gt stringclasses 1
value | context stringlengths 2.49k 119k |
|---|---|
import itertools
from wtforms import widgets
from wtforms.fields.core import Field
from wtforms.validators import ValidationError
__all__ = (
"SelectField",
"SelectMultipleField",
"RadioField",
)
class SelectFieldBase(Field):
option_widget = widgets.Option()
"""
Base class for fields which can be iterated to produce options.
This isn't a field, but an abstract base class for fields which want to
provide this functionality.
"""
def __init__(self, label=None, validators=None, option_widget=None, **kwargs):
super().__init__(label, validators, **kwargs)
if option_widget is not None:
self.option_widget = option_widget
def iter_choices(self):
"""
Provides data for choice widget rendering. Must return a sequence or
iterable of (value, label, selected) tuples.
"""
raise NotImplementedError()
def has_groups(self):
return False
def iter_groups(self):
raise NotImplementedError()
def __iter__(self):
opts = dict(
widget=self.option_widget,
validators=self.validators,
name=self.name,
render_kw=self.render_kw,
_form=None,
_meta=self.meta,
)
for i, (value, label, checked) in enumerate(self.iter_choices()):
opt = self._Option(label=label, id="%s-%d" % (self.id, i), **opts)
opt.process(None, value)
opt.checked = checked
yield opt
class _Option(Field):
checked = False
def _value(self):
return str(self.data)
class SelectField(SelectFieldBase):
widget = widgets.Select()
def __init__(
self,
label=None,
validators=None,
coerce=str,
choices=None,
validate_choice=True,
**kwargs,
):
super().__init__(label, validators, **kwargs)
self.coerce = coerce
if callable(choices):
choices = choices()
if choices is not None:
self.choices = choices if isinstance(choices, dict) else list(choices)
else:
self.choices = None
self.validate_choice = validate_choice
def iter_choices(self):
if not self.choices:
choices = []
elif isinstance(self.choices, dict):
choices = list(itertools.chain.from_iterable(self.choices.values()))
else:
choices = self.choices
return self._choices_generator(choices)
def has_groups(self):
return isinstance(self.choices, dict)
def iter_groups(self):
if isinstance(self.choices, dict):
for label, choices in self.choices.items():
yield (label, self._choices_generator(choices))
def _choices_generator(self, choices):
if not choices:
_choices = []
elif isinstance(choices[0], (list, tuple)):
_choices = choices
else:
_choices = zip(choices, choices)
for value, label in _choices:
yield (value, label, self.coerce(value) == self.data)
def process_data(self, value):
try:
# If value is None, don't coerce to a value
self.data = self.coerce(value) if value is not None else None
except (ValueError, TypeError):
self.data = None
def process_formdata(self, valuelist):
if not valuelist:
return
try:
self.data = self.coerce(valuelist[0])
except ValueError as exc:
raise ValueError(self.gettext("Invalid Choice: could not coerce.")) from exc
def pre_validate(self, form):
if self.choices is None:
raise TypeError(self.gettext("Choices cannot be None."))
if not self.validate_choice:
return
for _, _, match in self.iter_choices():
if match:
break
else:
raise ValidationError(self.gettext("Not a valid choice."))
class SelectMultipleField(SelectField):
"""
No different from a normal select field, except this one can take (and
validate) multiple choices. You'll need to specify the HTML `size`
attribute to the select field when rendering.
"""
widget = widgets.Select(multiple=True)
def _choices_generator(self, choices):
if choices:
if isinstance(choices[0], (list, tuple)):
_choices = choices
else:
_choices = zip(choices, choices)
else:
_choices = []
for value, label in _choices:
selected = self.data is not None and self.coerce(value) in self.data
yield (value, label, selected)
def process_data(self, value):
try:
self.data = list(self.coerce(v) for v in value)
except (ValueError, TypeError):
self.data = None
def process_formdata(self, valuelist):
try:
self.data = list(self.coerce(x) for x in valuelist)
except ValueError as exc:
raise ValueError(
self.gettext(
"Invalid choice(s): one or more data inputs could not be coerced."
)
) from exc
def pre_validate(self, form):
if self.choices is None:
raise TypeError(self.gettext("Choices cannot be None."))
if not self.validate_choice or not self.data:
return
acceptable = {c[0] for c in self.iter_choices()}
if any(d not in acceptable for d in self.data):
unacceptable = [str(d) for d in set(self.data) - acceptable]
raise ValidationError(
self.ngettext(
"'%(value)s' is not a valid choice for this field.",
"'%(value)s' are not valid choices for this field.",
len(unacceptable),
)
% dict(value="', '".join(unacceptable))
)
class RadioField(SelectField):
"""
Like a SelectField, except displays a list of radio buttons.
Iterating the field will produce subfields (each containing a label as
well) in order to allow custom rendering of the individual radio fields.
"""
widget = widgets.ListWidget(prefix_label=False)
option_widget = widgets.RadioInput()
| |
#!/usr/bin/env python
"""Web browser interface for CladeCompare.
Input (GET):
Form for submitting FG, [BG, HMM].
Output (POST):
CladeReport (heat map) of submission.
"""
# TODO:
# - take boolean do_weight as a form option (checkbox?)
# ENH (in the report):
# - asterisks up top link to PDF pairlogos
# - below title, link to PyMOL script of PDB(s)
from __future__ import print_function
import logging
import os
import tempfile
import webbrowser
import bottle
from cladecomparelib import (core, pairlogo, pmlscript, urn, gtest, jsd,
phospho, report)
# ENH: include textarea as alternative to each file upload input
# ENH: dropdown alpha (or textinput w/ JS validation)
FORM_HTML = """\
<html>
<body>
<h1>CladeCompare</h1>
<form action="cladecompare" method="post" enctype="multipart/form-data">
<p>
Submission name:
<input type="text" name="name" />
</p>
<h2>Sequences</h2>
<p>
Sequence file 1 (required):
<br />
<input type="file" name="seqfile1" size=50 />
</p>
<p>
Sequence file 2:
<br />
<input type="file" name="seqfile2" size=50 />
</p>
<h2>Statistical strategy</h2>
<p>
<label>
<input type="radio" name="strategy" value="gtest" checked="checked" />
G-test (goodness-of-fit)
</label>
</p>
<p>
<label>
<input type="radio" name="strategy" value="urn" />
Ball-in-urn model (binomial)
</label>
</p>
<p>
<label>
<input type="radio" name="strategy" value="jsd" />
Jensen-Shannon divergence
</label>
</p>
<p>
<label>
<input type="radio" name="strategy" value="phospho" />
Phosphorylation site conservation
</label>
</p>
<p>
Significance cutoff (alpha):
<input type="text" name="alpha" value="0.005" />
</p>
<h2>Alignment profile</h2>
<p>
HMM (.hmm) profile:
<br />
<input type="file" name="profile" size=50 />
</p>
<!--
<h2>Structure</h2>
<p>
PDB ID:
<input type="text" name="pdbid" />
<br />
or upload a
PDB file:
<br />
<input type="file" name="pdbfile" size=50 />
</p>
-->
<p />
<p><input type="submit" /></p>
</form>
<hr />
<p>Project page: <a
href="http://github.com/etal/cladecompare">http://github.com/etal/cladecompare</a></p>
<p>If you use this software in a publication, please cite our paper that
describes it:</p>
<blockquote>Talevich, E. & Kannan, N. (2013)
<a href="http://www.biomedcentral.com/1471-2148/13/117">Structural and
evolutionary adaptation of rhoptry kinases and pseudokinases, a family of
coccidian virulence factors</a>.
<i>BMC Evolutionary Biology</i> 13:117 doi:10.1186/1471-2148-13-117
</blockquote>
</body>
</html>
"""
# --- Routes ---
@bottle.get('/cladecompare')
def form():
return FORM_HTML
# TODO - routes for downloading .pml, .pdf -- use static_file
@bottle.post('/cladecompare')
def form_submit():
# ENH: pick a unique, informative name -- e.g. date or hostname
name = bottle.request.forms.name
seqfile1 = bottle.request.files.seqfile1
if not hasattr(seqfile1, 'file'):
return "Error: You need to specify at least one sequence file."
seq1fname = handle2temp(seqfile1.file,
suffix=('.cma' if seqfile1.filename.endswith('.cma')
else '.seq'))
# Optional second sequence set -- if missing, do single mode
seqfile2 = bottle.request.files.seqfile2
if hasattr(seqfile2, 'file'):
seq2fname = handle2temp(seqfile2.file,
suffix=('.cma' if
seqfile2.filename.endswith('.cma') else
'.seq'))
if not name:
name = "%s-vs-%s" % (seqfile1.filename.rsplit('.', 1)[0],
seqfile2.filename.rsplit('.', 1)[0])
else:
seq2fname = ''
if not name:
name = seqfile1.filename
# Optional HMM profile for alignment
profile = bottle.request.files.profile
# Optional HMM profile for alignment
profile = bottle.request.files.profile
if hasattr(profile, 'file'):
if not profile.filename.endswith('.hmm'):
return "HMM profile file name must end in .hmm"
profname = handle2temp(profile.file, suffix='.hmm')
logging.info("Aligning %s with profile %s", seq1fname, profname)
fg_aln = core.hmm_align_and_read(profname, seq1fname)
if seq2fname:
logging.info("Aligning %s with profile %s", seq2fname, profname)
bg_aln = core.hmm_align_and_read(profname, seq2fname)
else:
profname = ''
fg_aln = core.read_aln(seq1fname, 'fasta')
if seq2fname:
bg_aln = core.read_aln(seq2fname, 'fasta')
pdbfile = bottle.request.files.pdbfile
if hasattr(pdbfile, 'file'):
if not profname:
return ("Error: to generate a PyMOL script for a PDB file you must"
"also specify an HMM profile")
pdbfname = handle2temp(pdbfile.file)
logging.info("Aligning %s with profile %s", pdbfile.filename, profname)
pdb_rec, pdb_resnums, pdb_inserts = core.pdb_hmm(profname,
pdbfname)
pdb_data = [(pdbfname, pdb_rec, pdb_resnums, pdb_inserts)]
else:
pdbfname = ''
pdb_data = None
# Mutually exclusive with pdbfname (above):
pdbid = bottle.request.forms.pdbid
if pdbid:
# If PDB ID: .pml should use "fetch" instead of "load"?
# Can get this info w/o dl'ing actual PDB file (e.g. via FASTA)?
pass
stat_module = dict(gtest=gtest, urn=urn, jsd=jsd, phospho=phospho,
)[bottle.request.forms.strategy]
try:
alpha = float(bottle.request.forms.alpha)
if not 0.0 <= alpha <= 1.0:
raise ValueError
except ValueError:
return "Error: alpha must be a number between 0 and 1"
_fdo, tmp_output = tempfile.mkstemp(suffix='.out')
os.close(_fdo)
_fdp, tmp_pattern = tempfile.mkstemp(suffix='.pttrn')
os.close(_fdp)
# Run the algorithms...
if seq2fname:
# Pair mode
fg_clean, bg_clean, hits = core.process_pair(fg_aln, bg_aln,
stat_module, False)
core.process_output(fg_clean, bg_clean, hits, alpha,
tmp_output, tmp_pattern,
pdb_data)
else:
# Single mode
aln, hits = core.process_one(fg_aln, stat_module, False)
core.process_output(aln, None, hits, alpha,
tmp_output, tmp_pattern,
pdb_data)
# Get the HTML report data
contents = report.do_single(tmp_output, tmp_pattern)[1]
cleanup(seq1fname)
cleanup(seq2fname)
cleanup(profname)
cleanup(tmp_output)
cleanup(tmp_pattern)
return report.html_page_tpl % dict(title=name, contents=contents)
# --- Helpers ---
def handle2temp(handle, suffix=''):
"""Write file handle contents to a temporary file, return tempfile name."""
_fd, fname = tempfile.mkstemp(suffix=suffix)
os.write(_fd, handle.read())
os.close(_fd)
return fname
def cleanup(fname):
"""Remove a temporary file that may or may not exist."""
if os.path.isfile(fname):
try:
os.remove(fname)
print("Cleaned up", fname)
except OSError:
print("Failed to clean up", fname)
# --- Run ---
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO,
format="%(module)s [@%(lineno)s]: %(message)s")
webbrowser.open("http://localhost:8080/cladecompare")
bottle.run(host='localhost', port=8080)
| |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.layers.base."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.layers import base as base_layers
from tensorflow.python.layers import core as core_layers
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import test
class BaseLayerTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def testLayerProperties(self):
layer = base_layers.Layer(name='my_layer')
self.assertEqual(layer.variables, [])
self.assertEqual(layer.trainable_variables, [])
self.assertEqual(layer.non_trainable_variables, [])
if not context.executing_eagerly():
# updates, losses only supported in GRAPH mode
self.assertEqual(layer.updates, [])
self.assertEqual(layer.losses, [])
self.assertEqual(layer.built, False)
layer = base_layers.Layer(name='my_layer', trainable=False)
self.assertEqual(layer.trainable, False)
@test_util.run_in_graph_and_eager_modes
def testInt64Layer(self):
layer = base_layers.Layer(name='my_layer', dtype='int64')
layer.add_variable('my_var', [2, 2])
self.assertEqual(layer.name, 'my_layer')
@test_util.run_in_graph_and_eager_modes
def testAddWeight(self):
layer = base_layers.Layer(name='my_layer')
# Test basic variable creation.
variable = layer.add_variable(
'my_var', [2, 2], initializer=init_ops.zeros_initializer())
self.assertEqual(variable.name, 'my_layer/my_var:0')
self.assertEqual(layer.variables, [variable])
self.assertEqual(layer.trainable_variables, [variable])
self.assertEqual(layer.non_trainable_variables, [])
if not context.executing_eagerly():
self.assertEqual(
layer.variables,
ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES))
# Test non-trainable variable creation.
# layer.add_variable should work even outside `build` and `call`.
variable_2 = layer.add_variable(
'non_trainable_var', [2, 2],
initializer=init_ops.zeros_initializer(),
trainable=False)
self.assertEqual(layer.variables, [variable, variable_2])
self.assertEqual(layer.trainable_variables, [variable])
self.assertEqual(layer.non_trainable_variables, [variable_2])
if not context.executing_eagerly():
self.assertEqual(
len(ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)), 1)
# regularizers only supported in GRAPH mode.
regularizer = lambda x: math_ops.reduce_sum(x) * 1e-3
variable = layer.add_variable(
'reg_var', [2, 2],
initializer=init_ops.zeros_initializer(),
regularizer=regularizer)
self.assertEqual(len(layer.losses), 1)
def testReusePartitionedVaraiblesAndRegularizers(self):
regularizer = lambda x: math_ops.reduce_sum(x) * 1e-3
partitioner = partitioned_variables.fixed_size_partitioner(3)
for reuse in [False, True]:
with variable_scope.variable_scope(variable_scope.get_variable_scope(),
partitioner=partitioner,
reuse=reuse):
layer = base_layers.Layer(name='my_layer')
variable = layer.add_variable(
'reg_part_var', [4, 4],
initializer=init_ops.zeros_initializer(),
regularizer=regularizer)
self.assertEqual(
len(ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)), 3)
def testNoEagerActivityRegularizer(self):
with context.eager_mode():
with self.assertRaisesRegexp(ValueError, 'activity_regularizer'):
core_layers.Dense(1, activity_regularizer=lambda *args, **kwargs: 0.)
@test_util.run_in_graph_and_eager_modes
def testCall(self):
class MyLayer(base_layers.Layer):
def call(self, inputs):
return math_ops.square(inputs)
layer = MyLayer(name='my_layer')
inputs = random_ops.random_uniform((5,), seed=1)
outputs = layer.apply(inputs)
self.assertEqual(layer.built, True)
if not context.executing_eagerly():
# op is only supported in GRAPH mode
self.assertEqual(outputs.op.name, 'my_layer/Square')
@test_util.run_in_graph_and_eager_modes
def testDeepCopy(self):
class MyLayer(base_layers.Layer):
def call(self, inputs):
return math_ops.square(inputs)
layer = MyLayer(name='my_layer')
layer._private_tensor = random_ops.random_uniform(())
inputs = random_ops.random_uniform((5,), seed=1)
outputs = layer.apply(inputs)
self.assertEqual(layer.built, True)
if not context.executing_eagerly():
# op only supported in GRAPH mode.
self.assertEqual(outputs.op.name, 'my_layer/Square')
layer_copy = copy.deepcopy(layer)
self.assertEqual(layer_copy.name, layer.name)
self.assertEqual(layer_copy._scope.name, layer._scope.name)
self.assertEqual(layer_copy._graph, layer._graph)
self.assertEqual(layer_copy._private_tensor, layer._private_tensor)
@test_util.run_in_graph_and_eager_modes
def testScopeNaming(self):
class PrivateLayer(base_layers.Layer):
def call(self, inputs):
return inputs
inputs = random_ops.random_uniform((5,))
default_layer = PrivateLayer()
_ = default_layer.apply(inputs)
self.assertEqual(default_layer._scope.name, 'private_layer')
default_layer1 = PrivateLayer()
default_layer1.apply(inputs)
self.assertEqual(default_layer1._scope.name, 'private_layer_1')
my_layer = PrivateLayer(name='my_layer')
my_layer.apply(inputs)
self.assertEqual(my_layer._scope.name, 'my_layer')
my_layer1 = PrivateLayer(name='my_layer')
my_layer1.apply(inputs)
self.assertEqual(my_layer1._scope.name, 'my_layer_1')
my_layer2 = PrivateLayer(name='my_layer')
my_layer2.apply(inputs)
self.assertEqual(my_layer2._scope.name, 'my_layer_2')
# Name scope shouldn't affect names.
with ops.name_scope('some_name_scope'):
default_layer2 = PrivateLayer()
default_layer2.apply(inputs)
self.assertEqual(default_layer2._scope.name, 'private_layer_2')
my_layer3 = PrivateLayer(name='my_layer')
my_layer3.apply(inputs)
self.assertEqual(my_layer3._scope.name, 'my_layer_3')
other_layer = PrivateLayer(name='other_layer')
other_layer.apply(inputs)
self.assertEqual(other_layer._scope.name, 'other_layer')
# Variable scope gets added to scope names.
with variable_scope.variable_scope('var_scope'):
default_layer_scoped = PrivateLayer()
default_layer_scoped.apply(inputs)
self.assertEqual(default_layer_scoped._scope.name,
'var_scope/private_layer')
my_layer_scoped = PrivateLayer(name='my_layer')
my_layer_scoped.apply(inputs)
self.assertEqual(my_layer_scoped._scope.name, 'var_scope/my_layer')
my_layer_scoped1 = PrivateLayer(name='my_layer')
my_layer_scoped1.apply(inputs)
self.assertEqual(my_layer_scoped1._scope.name, 'var_scope/my_layer_1')
@test_util.run_in_graph_and_eager_modes
def testInputSpecNdimCheck(self):
class CustomerLayer(base_layers.Layer):
def __init__(self):
super(CustomerLayer, self).__init__()
self.input_spec = base_layers.InputSpec(ndim=2)
def call(self, inputs):
return inputs
if not context.executing_eagerly():
layer = CustomerLayer()
with self.assertRaisesRegexp(ValueError, r'requires a defined rank'):
layer.apply(array_ops.placeholder('int32'))
layer = CustomerLayer()
with self.assertRaisesRegexp(ValueError, r'expected ndim=2'):
layer.apply(constant_op.constant([1]))
# Note that we re-create the layer since in Eager mode, input spec checks
# only happen on first call.
# Works
layer = CustomerLayer()
layer.apply(constant_op.constant([[1], [2]]))
@test_util.run_in_graph_and_eager_modes
def testInputSpecMinNdimCheck(self):
class CustomerLayer(base_layers.Layer):
def __init__(self):
super(CustomerLayer, self).__init__()
self.input_spec = base_layers.InputSpec(min_ndim=2)
def call(self, inputs):
return inputs
if not context.executing_eagerly():
layer = CustomerLayer()
with self.assertRaisesRegexp(ValueError, r'requires a defined rank'):
layer.apply(array_ops.placeholder('int32'))
layer = CustomerLayer()
with self.assertRaisesRegexp(ValueError, r'expected min_ndim=2'):
layer.apply(constant_op.constant([1]))
# Works
layer = CustomerLayer()
layer.apply(constant_op.constant([[1], [2]]))
layer = CustomerLayer()
layer.apply(constant_op.constant([[[1], [2]]]))
@test_util.run_in_graph_and_eager_modes
def testInputSpecMaxNdimCheck(self):
class CustomerLayer(base_layers.Layer):
def __init__(self):
super(CustomerLayer, self).__init__()
self.input_spec = base_layers.InputSpec(max_ndim=2)
def call(self, inputs):
return inputs
if not context.executing_eagerly():
layer = CustomerLayer()
with self.assertRaisesRegexp(ValueError, r'requires a defined rank'):
layer.apply(array_ops.placeholder('int32'))
layer = CustomerLayer()
with self.assertRaisesRegexp(ValueError, r'expected max_ndim=2'):
layer.apply(constant_op.constant([[[1], [2]]]))
# Works
layer = CustomerLayer()
layer.apply(constant_op.constant([1]))
layer = CustomerLayer()
layer.apply(constant_op.constant([[1], [2]]))
@test_util.run_in_graph_and_eager_modes
def testInputSpecDtypeCheck(self):
class CustomerLayer(base_layers.Layer):
def __init__(self):
super(CustomerLayer, self).__init__()
self.input_spec = base_layers.InputSpec(dtype='float32')
def call(self, inputs):
return inputs
layer = CustomerLayer()
with self.assertRaisesRegexp(ValueError, r'expected dtype=float32'):
layer.apply(constant_op.constant(1, dtype=dtypes.int32))
# Works
layer = CustomerLayer()
layer.apply(constant_op.constant(1.0, dtype=dtypes.float32))
@test_util.run_in_graph_and_eager_modes
def testInputSpecAxesCheck(self):
class CustomerLayer(base_layers.Layer):
def __init__(self):
super(CustomerLayer, self).__init__()
self.input_spec = base_layers.InputSpec(axes={-1: 2})
def call(self, inputs):
return inputs
layer = CustomerLayer()
with self.assertRaisesRegexp(ValueError, r'expected axis'):
layer.apply(constant_op.constant([1, 2, 3]))
# Works
layer = CustomerLayer()
layer.apply(constant_op.constant([1, 2]))
layer = CustomerLayer()
layer.apply(constant_op.constant([[1, 2], [3, 4], [5, 6]]))
@test_util.run_in_graph_and_eager_modes
def testInputSpecShapeCheck(self):
class CustomerLayer(base_layers.Layer):
def __init__(self):
super(CustomerLayer, self).__init__()
self.input_spec = base_layers.InputSpec(shape=(None, 3))
def call(self, inputs):
return inputs
layer = CustomerLayer()
with self.assertRaisesRegexp(ValueError, r'expected shape'):
layer.apply(constant_op.constant([[1, 2]]))
# Works
layer = CustomerLayer()
layer.apply(constant_op.constant([[1, 2, 3], [4, 5, 6]]))
@test_util.run_in_graph_and_eager_modes
def testNoInputSpec(self):
class CustomerLayer(base_layers.Layer):
def __init__(self):
super(CustomerLayer, self).__init__()
self.input_spec = None
def call(self, inputs):
return inputs
layer = CustomerLayer()
layer.apply(constant_op.constant(1))
# Works
if not context.executing_eagerly():
layer.apply(array_ops.placeholder('int32'))
layer.apply(array_ops.placeholder('int32', shape=(2, 3)))
@test_util.run_in_graph_and_eager_modes
def test_count_params(self):
dense = core_layers.Dense(16)
dense.build((None, 4))
self.assertEqual(dense.count_params(), 16 * 4 + 16)
dense = core_layers.Dense(16)
with self.assertRaises(ValueError):
dense.count_params()
@test_util.run_in_graph_and_eager_modes
def testDictInputOutput(self):
class DictLayer(base_layers.Layer):
def call(self, inputs):
return {'l' + key: inputs[key] for key in inputs}
layer = DictLayer()
if context.executing_eagerly():
i1 = constant_op.constant(3)
i2 = constant_op.constant(4.0)
result = layer.apply({'abel': i1, 'ogits': i2})
self.assertTrue(isinstance(result, dict))
self.assertEqual(set(['label', 'logits']), set(result.keys()))
self.assertEqual(3, result['label'].numpy())
self.assertEqual(4.0, result['logits'].numpy())
else:
i1 = array_ops.placeholder('int32')
i2 = array_ops.placeholder('float32')
result = layer.apply({'abel': i1, 'ogits': i2})
self.assertTrue(isinstance(result, dict))
self.assertEqual(set(['label', 'logits']), set(result.keys()))
def testActivityRegularizer(self):
regularizer = math_ops.reduce_sum
layer = base_layers.Layer(activity_regularizer=regularizer)
x = array_ops.placeholder('int32')
layer.apply(x)
self.assertEqual(len(layer.get_losses_for(x)), 1)
def testNameScopeIsConsistentWithVariableScope(self):
# Github issue 13429.
class MyLayer(base_layers.Layer):
def build(self, input_shape):
self.my_var = self.add_variable('my_var', (), dtypes.float32)
self.built = True
def call(self, inputs):
return math_ops.multiply(inputs, self.my_var, name='my_op')
def _gen_layer(x, name=None):
layer = MyLayer(name=name)
out = layer.apply(x)
return layer, out
# unnamed layer
with ops.Graph().as_default():
x = array_ops.placeholder(dtypes.float32, (), 'x')
layer, op = _gen_layer(x)
layer1, op1 = _gen_layer(op)
layer2, op2 = _gen_layer(op1)
self.assertEqual(layer.my_var.name, 'my_layer/my_var:0')
self.assertEqual(op.name, 'my_layer/my_op:0')
self.assertEqual(layer1.my_var.name, 'my_layer_1/my_var:0')
self.assertEqual(op1.name, 'my_layer_1/my_op:0')
self.assertEqual(layer2.my_var.name, 'my_layer_2/my_var:0')
self.assertEqual(op2.name, 'my_layer_2/my_op:0')
# name starts from zero
with ops.Graph().as_default():
x = array_ops.placeholder(dtypes.float32, (), 'x')
layer, op = _gen_layer(x, name='name')
layer1, op1 = _gen_layer(op, name='name_1')
layer2, op2 = _gen_layer(op1, name='name_2')
self.assertEqual(layer.my_var.name, 'name/my_var:0')
self.assertEqual(op.name, 'name/my_op:0')
self.assertEqual(layer1.my_var.name, 'name_1/my_var:0')
self.assertEqual(op1.name, 'name_1/my_op:0')
self.assertEqual(layer2.my_var.name, 'name_2/my_var:0')
self.assertEqual(op2.name, 'name_2/my_op:0')
# name starts from one
with ops.Graph().as_default():
x = array_ops.placeholder(dtypes.float32, (), 'x')
layer, op = _gen_layer(x, name='name_1')
layer1, op1 = _gen_layer(op, name='name_2')
layer2, op2 = _gen_layer(op1, name='name_3')
self.assertEqual(layer.my_var.name, 'name_1/my_var:0')
self.assertEqual(op.name, 'name_1/my_op:0')
self.assertEqual(layer1.my_var.name, 'name_2/my_var:0')
self.assertEqual(op1.name, 'name_2/my_op:0')
self.assertEqual(layer2.my_var.name, 'name_3/my_var:0')
self.assertEqual(op2.name, 'name_3/my_op:0')
def testVariablesAreLiftedFromFunctionBuildingGraphs(self):
class MyLayer(base_layers.Layer):
def build(self, input_shape):
self.my_var = self.add_variable('my_var', (), dtypes.float32)
self.built = True
def call(self, inputs):
return inputs
outer_graph = ops.get_default_graph()
function_building_graph = ops.Graph()
function_building_graph._building_function = True
with outer_graph.as_default():
with function_building_graph.as_default():
layer = MyLayer()
# Create a variable by invoking build through __call__ and assert that
# it is both tracked and lifted into the outer graph.
inputs = array_ops.placeholder(dtypes.float32, (), 'inputs')
layer.apply(inputs)
self.assertEqual(len(layer.variables), 1)
self.assertEqual(len(layer.trainable_variables), 1)
self.assertEqual(layer.variables[0].graph, outer_graph)
def testGetUpdateFor(self):
class MyLayer(base_layers.Layer):
def build(self, input_shape):
self.a = self.add_variable('a',
(),
dtypes.float32,
trainable=False)
self.b = self.add_variable('b',
(),
dtypes.float32,
trainable=False)
self.add_update(state_ops.assign_add(self.a, 1., name='b_update'))
self.built = True
def call(self, inputs):
self.add_update(state_ops.assign_add(self.a, inputs, name='a_update'),
inputs=True)
return inputs + 1
layer = MyLayer()
inputs = array_ops.placeholder(dtypes.float32, (), 'inputs')
intermediate_inputs = inputs + 1
outputs = layer.apply(intermediate_inputs)
self.assertEqual(len(layer.updates), 2)
self.assertEqual(len(layer.get_updates_for(None)), 1)
self.assertEqual(len(layer.get_updates_for([inputs])), 1)
self.assertEqual(len(layer.get_updates_for([intermediate_inputs])), 1)
self.assertEqual(len(layer.get_updates_for([outputs])), 0)
# Call same layer on new input, creating one more conditional update
inputs = array_ops.placeholder(dtypes.float32, (), 'inputs')
intermediate_inputs = inputs + 1
outputs = layer.apply(intermediate_inputs)
self.assertEqual(len(layer.updates), 3)
self.assertEqual(len(layer.get_updates_for(None)), 1)
# Check that we are successfully filtering out irrelevant updates
self.assertEqual(len(layer.get_updates_for([inputs])), 1)
self.assertEqual(len(layer.get_updates_for([intermediate_inputs])), 1)
self.assertEqual(len(layer.get_updates_for([outputs])), 0)
def testGetLossesFor(self):
class MyLayer(base_layers.Layer):
def build(self, input_shape):
self.a = self.add_variable('a',
(),
dtypes.float32,
trainable=False)
self.b = self.add_variable('b',
(),
dtypes.float32,
trainable=False)
self.add_loss(self.a)
self.built = True
def call(self, inputs):
self.add_loss(inputs, inputs=True)
return inputs + 1
layer = MyLayer()
inputs = array_ops.placeholder(dtypes.float32, (), 'inputs')
intermediate_inputs = inputs + 1
outputs = layer.apply(intermediate_inputs)
self.assertEqual(len(layer.losses), 2)
self.assertEqual(len(layer.get_losses_for(None)), 1)
self.assertEqual(len(layer.get_losses_for([inputs])), 1)
self.assertEqual(len(layer.get_losses_for([intermediate_inputs])), 1)
self.assertEqual(len(layer.get_losses_for([outputs])), 0)
# Call same layer on new input, creating one more conditional loss
inputs = array_ops.placeholder(dtypes.float32, (), 'inputs')
intermediate_inputs = inputs + 1
outputs = layer.apply(intermediate_inputs)
self.assertEqual(len(layer.losses), 3)
self.assertEqual(len(layer.get_losses_for(None)), 1)
# Check that we are successfully filtering out irrelevant losses
self.assertEqual(len(layer.get_losses_for([inputs])), 1)
self.assertEqual(len(layer.get_losses_for([intermediate_inputs])), 1)
self.assertEqual(len(layer.get_losses_for([outputs])), 0)
def testLayerGraphSetInFirstApply(self):
with ops.Graph().as_default():
# Graph at construction time is ignored
layer = core_layers.Dense(1)
with ops.Graph().as_default():
layer.apply(constant_op.constant([[1.]]))
# layer is now bound to second Graph
with ops.Graph().as_default(), self.assertRaisesRegexp(
ValueError, 'Input graph and Layer graph are not the same'):
layer.apply(constant_op.constant([[1.]]))
if __name__ == '__main__':
test.main()
| |
import numpy as np
from astropy import units as u
from poliastro.core.elements import (
circular_velocity as circular_velocity_fast,
coe2rv as coe2rv_fast,
coe2rv_many as coe2rv_many_fast,
eccentricity_vector as eccentricity_vector_fast,
)
from poliastro.core.propagation.farnocchia import (
delta_t_from_nu as delta_t_from_nu_fast,
)
u_kms = u.km / u.s
u_km3s2 = u.km**3 / u.s**2
@u.quantity_input(k=u_km3s2, a=u.km)
def circular_velocity(k, a):
"""Circular velocity for a given body (k) and semimajor axis (a)."""
return circular_velocity_fast(k.to_value(u_km3s2), a.to_value(u.km)) * u_kms
@u.quantity_input(k=u_km3s2, a=u.km)
def mean_motion(k, a):
"""Mean motion given body (k) and semimajor axis (a)."""
return np.sqrt(k / abs(a**3)).to(1 / u.s) * u.rad
@u.quantity_input(k=u_km3s2, a=u.km)
def period(k, a):
"""Period given body (k) and semimajor axis (a)."""
n = mean_motion(k, a)
return 2 * np.pi * u.rad / n
@u.quantity_input(k=u_km3s2, r=u.km, v=u_kms)
def energy(k, r, v):
"""Specific energy."""
return v @ v / 2 - k / np.sqrt(r @ r)
@u.quantity_input(k=u_km3s2, r=u.km, v=u_kms)
def eccentricity_vector(k, r, v):
"""Eccentricity vector."""
return (
eccentricity_vector_fast(
k.to_value(u_km3s2), r.to_value(u.km), v.to_value(u_kms)
)
* u.one
)
@u.quantity_input(nu=u.rad, ecc=u.one, k=u_km3s2, r_p=u.km)
def t_p(nu, ecc, k, r_p):
"""Elapsed time since latest perifocal passage."""
t_p = (
delta_t_from_nu_fast(
nu.to_value(u.rad),
ecc.value,
k.to_value(u_km3s2),
r_p.to_value(u.km),
)
* u.s
)
return t_p
@u.quantity_input(
k=u_km3s2, R=u.km, J2=u.one, n_sunsync=1 / u.s, a=u.km, ecc=u.one, inc=u.rad
)
def heliosynchronous(k, R, J2, n_sunsync, a=None, ecc=None, inc=None):
with np.errstate(invalid="raise"):
if a is None and (ecc is not None) and (inc is not None):
a = (
-3
* R**2
* J2
* np.sqrt(k)
/ (2 * n_sunsync * (1 - ecc**2) ** 2)
* np.cos(inc)
) ** (2 / 7)
elif ecc is None and (a is not None) and (inc is not None):
ecc = np.sqrt(
1
- np.sqrt(
-3
* R**2
* J2
* np.sqrt(k)
* np.cos(inc)
/ (2 * a ** (7 / 2) * n_sunsync)
)
)
elif inc is None and (ecc is not None) and (a is not None):
# Inclination is the unknown variable
inc = np.arccos(
-2
* a ** (7 / 2)
* n_sunsync
* (1 - ecc**2) ** 2
/ (3 * R**2 * J2 * np.sqrt(k))
)
else:
raise ValueError("Two parameters of (a, ecc, inc) are required")
return a, ecc, inc
@u.quantity_input(ecc=u.one)
def hyp_nu_limit(ecc, r_max_ratio=np.inf):
r"""Limit true anomaly for hyperbolic orbits.
Parameters
----------
ecc : ~astropy.units.Quantity
Eccentricity, should be larger than 1.
r_max_ratio : float, optional
Value of :math:`r_{\text{max}} / p` for this angle, default to infinity.
"""
return np.arccos(-(1 - 1 / r_max_ratio) / ecc)
@u.quantity_input(R=u.m, J2=u.one, J3=u.one, a=u.m, inc=u.rad)
def get_eccentricity_critical_argp(R, J2, J3, a, inc):
"""Cccentricity for frozen orbits when the argument of perigee is critical.
Parameters
----------
R : ~astropy.units.Quantity
Planet radius.
J2 : ~astropy.units.Quantity
Planet J2 coefficient.
J3 : ~astropy.units.Quantity
Planet J3 coefficient.
a : ~astropy.units.Quantity
Orbit's semimajor axis
inc : ~astropy.units.Quantity
Inclination.
"""
ecc = -J3 * R * np.sin(inc) / 2 / J2 / a
return ecc
@u.quantity_input(R=u.m, J2=u.one, J3=u.one, a=u.m, ecc=u.one)
def get_inclination_critical_argp(R, J2, J3, a, ecc):
"""Inclination for frozen orbits
when the argument of perigee is critical and the eccentricity is given.
Parameters
----------
R : ~astropy.units.Quantity
Planet radius.
J2 : ~astropy.units.Quantity
Planet J2 coefficient.
J3 : ~astropy.units.Quantity
Planet J3 coefficient.
a : ~astropy.units.Quantity
Semimajor axis.
ecc : ~astropy.units.Quantity
Eccentricity.
"""
inc = np.arcsin(-ecc * a * J2 * 2 / R / J3) * u.rad
return inc
@u.quantity_input(ecc=u.one)
def get_eccentricity_critical_inc(ecc=None):
"""Eccentricity for frozen orbits when the inclination is critical.
If ecc is None we set an arbitrary value which is the Moon eccentricity
because it seems reasonable.
Parameters
----------
ecc : ~astropy.units.Quantity, optional
Eccentricity, default to None.
"""
if ecc is None:
ecc = 0.0549 * u.one
return ecc
def coe2rv(k, p, ecc, inc, raan, argp, nu):
rr, vv = coe2rv_fast(
k.to_value(u_km3s2),
p.to_value(u.km),
ecc.to_value(u.one),
inc.to_value(u.rad),
raan.to_value(u.rad),
argp.to_value(u.rad),
nu.to_value(u.rad),
)
rr = rr << u.km
vv = vv << (u.km / u.s)
return rr, vv
def coe2rv_many(k_arr, p_arr, ecc_arr, inc_arr, raan_arr, argp_arr, nu_arr):
rr_arr, vv_arr = coe2rv_many_fast(
k_arr.to_value(u_km3s2),
p_arr.to_value(u.km),
ecc_arr.to_value(u.one),
inc_arr.to_value(u.rad),
raan_arr.to_value(u.rad),
argp_arr.to_value(u.rad),
nu_arr.to_value(u.rad),
)
rr_arr = rr_arr << u.km
vv_arr = vv_arr << (u.km / u.s)
return rr_arr, vv_arr
| |
"""
pixelStats.py
Compute a multi-epoch (multi-day) statistics for each lat/lon pixel read from daily Level-3 grids.
Also do statistics roll-ups from daily to monthly, monthly to seasonal, seasonal to yearly,
yearly to multi-year, and multi-year to total N-year period.
Simple code to be run using Spark or Dpark.
"""
import sys, os, urllib, re, time
import numpy as N
import matplotlib
matplotlib.use('Agg')
import matplotlib.pylab as M
from netCDF4 import Dataset, default_fillvals
from variables import getVariables, close
from split import splitByMonth
from cache import retrieveFile, CachePath
#from pyspark import SparkContext # both imported below when needed
#import dpark
Modes = ['sequential', 'dpark', 'spark']
Accumulators = ['count', 'sum', 'sumsq', 'min', 'max']
Stats = ['count', 'mean', 'stddev', 'min', 'max']
GroupByKeys = ['month', 'season', 'year', '3-year', 'total']
TimeFromFilenameDOY = {'get': ('year', 'doy'), 'regex': re.compile(r'\/A(....)(...)')}
def pixelStats(urls, variable, nPartitions, timeFromFilename=TimeFromFilenameDOY, groupByKeys=GroupByKeys, accumulators=Accumulators,
cachePath=CachePath, mode='dpark', modes=Modes):
'''Compute a global (or regional) pixel mean field in parallel, given a list of URL's pointing to netCDF files.'''
baseKey = groupByKeys[0]
if baseKey == 'month':
urlsByKey = splitByMonth(urls, timeFromFilename)
else:
print >>sys.stderr, 'pixelStats: Unrecognized groupByKey "%s". Must be in %s' % (baseKey, str(groupByKeys))
sys.exit(1)
if mode == 'sequential':
accum = [accumulate(u, variable, accumulators) for u in urlsByKey]
merged = reduce(combine, accum)
stats = statsFromAccumulators(merged)
elif mode == 'dpark':
import dpark
urls = dpark.parallelize(urlsByKey, nPartitions) # returns RDD of URL lists
accum = urls.map(lambda urls: accumulate(urls, variable, accumulators)) # returns RDD of stats accumulators
merged = accum.reduce(combine) # merged accumulators on head node
stats = statsFromAccumulators(merged) # compute final stats from accumulators
elif mode == 'spark':
from pyspark import SparkContext
sc = SparkContext(appName="PixelStats")
urls = sc.parallelize(urlsByKey, nPartitions) # returns RDD of URL lists
accum = urls.map(lambda urls: accumulate(urls, variable, accumulators)) # returns RDD of stats accumulators
merged = accum.reduce(combine) # merged accumulators on head node
stats = statsFromAccumulators(merged) # compute final stats from accumulators
else:
stats = None
if mode not in modes:
print >>sys.stderr, 'pixelStats: Unrecognized mode "%s". Must be in %s' % (mode, str(modes))
sys.exit(1)
return stats
def accumulate(urls, variable, accumulators, cachePath=CachePath):
'''Accumulate data into statistics accumulators like count, sum, sumsq, min, max, M3, M4, etc.'''
keys, urls = urls
accum = {}
for i, url in enumerate(urls):
try:
path = retrieveFile(url, cachePath)
fn = os.path.split(path)[1]
except:
print >>sys.stderr, 'accumulate: Error, continuing without file %s' % url
continue
try:
var, fh = getVariables(path, [variable], arrayOnly=True, set_auto_mask=True) # return dict of variable objects by name
v = var[variable] # masked array
close(fh)
except:
print >>sys.stderr, 'accumulate: Error, cannot read variable %s from file %s' % (variable, path)
continue
if i == 0:
for k in accumulators:
if k == 'min': accum[k] = default_fillvals['f8'] * N.ones(v.shape, dtype=N.float64)
elif k == 'max': accum[k] = -default_fillvals['f8'] * N.ones(v.shape, dtype=N.float64)
elif k == 'count': accum[k] = N.zeros(v.shape, dtype=N.int64)
else:
accum[k] = N.zeros(v.shape, dtype=N.float64)
if 'count' in accumulators:
accum['count'] += ~v.mask
if 'min' in accumulators:
accum['min'] = N.ma.minimum(accum['min'], v)
if 'max' in accumulators:
accum['max'] = N.ma.maximum(accum['max'], v)
v = N.ma.filled(v, 0.)
if 'sum' in accumulators:
accum['sum'] += v
if 'sumsq' in accumulators:
accum['sumsq'] += v*v
return (keys, accum)
def combine(a, b):
'''Combine accumulators by summing.'''
keys, a = a
b = b[1]
for k in a.keys():
if k != 'min' and k != 'max':
a[k] += b[k]
if 'min' in accumulators:
a['min'] = N.ma.minimum(a['min'], b['min'])
if 'max' in accumulators:
a['max'] = N.ma.maximum(a['max'], b['max'])
return (('total',), a)
def statsFromAccumulators(accum):
'''Compute final statistics from accumulators.'''
keys, accum = accum
# Mask all of the accumulator arrays
accum['count'] = N.ma.masked_equal(accum['count'], 0, copy=False)
mask = accum['count'].mask
for k in accum:
if k != 'count':
accum[k] = N.ma.array(accum[k], copy=False, mask=mask)
# Compute stats (masked)
stats = {}
if 'count' in accum:
stats['count'] = accum['count']
if 'min' in accum:
stats['min'] = accum['min']
if 'max' in accum:
stats['max'] = accum['max']
if 'sum' in accum:
stats['mean'] = accum['sum'] / accum['count']
if 'sumsq' in accum:
stats['stddev'] = N.sqrt(accum['sumsq'] / (accum['count'].astype(N.float32) - 1))
return (keys, stats)
def writeStats(urls, variable, stats, outFile, copyToHdfsPath=None, format='NETCDF4', cachePath=CachePath):
'''Write out stats arrays to netCDF with some attributes.
'''
keys, stats = stats
dout = Dataset(outFile, 'w', format=format)
print >>sys.stderr, 'Writing %s ...' % outFile
dout.setncattr('variable', variable)
dout.setncattr('urls', str(urls))
dout.setncattr('level', str(keys))
inFile = retrieveFile(urls[0], cachePath)
din = Dataset(inFile, 'r')
try:
coordinates = din.variables[variable].getncattr('coordinates')
coordinates = coordinates.split()
except:
coordinates = ('lat', 'lon') # kludge: FIX ME
# Add dimensions and variables, copying data
coordDim = [dout.createDimension(coord, din.variables[coord].shape[0]) for coord in coordinates] # here lat, lon, alt, etc.
for coord in coordinates:
var = dout.createVariable(coord, din.variables[coord].dtype, (coord,))
var[:] = din.variables[coord][:]
# Add stats variables
for k,v in stats.items():
var = dout.createVariable(k, stats[k].dtype, coordinates)
var[:] = v[:]
din.close()
dout.close()
return outFile
def totalStats(args):
urlFile = args[0]
with open(urlFile, 'r') as f:
urls = [line.strip() for line in f]
variable = args[1]
mode = args[2]
nPartitions = int(args[3])
outFile = args[4]
stats = pixelStats(urls, variable, nPartitions, mode=mode)
outFile = writeStats(urls, variable, stats, outFile)
return outFile
def main(args):
return totalStats(args)
if __name__ == '__main__':
print main(sys.argv[1:])
# python pixelStats.py urls_sst_daynight_2003_3days.txt sst sequential 1 modis_sst_stats_test.nc
# python pixelStats.py urls_sst_daynight_2003_4months.txt sst sequential 1 modis_sst_stats_test.nc
# python pixelStats.py urls_sst_daynight_2003_4months.txt sst dpark 4 modis_sst_stats_test.nc
# python pixelStats.py urls_sst_daynight_2003_4months.txt sst spark 4 modis_sst_stats_test.nc
# python pixelStats.py urls_sst_daynight_2003_2015.txt sst dpark 16 modis_sst_stats.nc
# python pixelStats.py urls_sst_daynight_2003_2015.txt sst spark 16 modis_sst_stats.nc
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class ServiceOperations(object):
"""ServiceOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.databox.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list_available_skus(
self,
location, # type: str
available_sku_request, # type: "_models.AvailableSkuRequest"
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.AvailableSkusResult"]
"""This method provides the list of available skus for the given subscription and location.
:param location: The location of the resource.
:type location: str
:param available_sku_request: Filters for showing the available skus.
:type available_sku_request: ~azure.mgmt.databox.models.AvailableSkuRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either AvailableSkusResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.databox.models.AvailableSkusResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AvailableSkusResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-09-01"
content_type = "application/json"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_available_skus.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'location': self._serialize.url("location", location, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(available_sku_request, 'AvailableSkuRequest')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(available_sku_request, 'AvailableSkuRequest')
body_content_kwargs['content'] = body_content
request = self._client.get(url, query_parameters, header_parameters, **body_content_kwargs)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('AvailableSkusResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_available_skus.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.DataBox/locations/{location}/availableSkus'} # type: ignore
def list_available_skus_by_resource_group(
self,
resource_group_name, # type: str
location, # type: str
available_sku_request, # type: "_models.AvailableSkuRequest"
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.AvailableSkusResult"]
"""This method provides the list of available skus for the given subscription, resource group and
location.
:param resource_group_name: The Resource Group Name.
:type resource_group_name: str
:param location: The location of the resource.
:type location: str
:param available_sku_request: Filters for showing the available skus.
:type available_sku_request: ~azure.mgmt.databox.models.AvailableSkuRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either AvailableSkusResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.databox.models.AvailableSkusResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AvailableSkusResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-09-01"
content_type = "application/json"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_available_skus_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'location': self._serialize.url("location", location, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(available_sku_request, 'AvailableSkuRequest')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(available_sku_request, 'AvailableSkuRequest')
body_content_kwargs['content'] = body_content
request = self._client.get(url, query_parameters, header_parameters, **body_content_kwargs)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('AvailableSkusResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_available_skus_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataBox/locations/{location}/availableSkus'} # type: ignore
def validate_address(
self,
location, # type: str
validate_address, # type: "_models.ValidateAddress"
**kwargs # type: Any
):
# type: (...) -> "_models.AddressValidationOutput"
"""[DEPRECATED NOTICE: This operation will soon be removed] This method validates the customer
shipping address and provide alternate addresses if any.
:param location: The location of the resource.
:type location: str
:param validate_address: Shipping address of the customer.
:type validate_address: ~azure.mgmt.databox.models.ValidateAddress
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AddressValidationOutput, or the result of cls(response)
:rtype: ~azure.mgmt.databox.models.AddressValidationOutput
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AddressValidationOutput"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-09-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.validate_address.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'location': self._serialize.url("location", location, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(validate_address, 'ValidateAddress')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('AddressValidationOutput', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
validate_address.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.DataBox/locations/{location}/validateAddress'} # type: ignore
def validate_inputs_by_resource_group(
self,
resource_group_name, # type: str
location, # type: str
validation_request, # type: "_models.ValidationRequest"
**kwargs # type: Any
):
# type: (...) -> "_models.ValidationResponse"
"""This method does all necessary pre-job creation validation under resource group.
:param resource_group_name: The Resource Group Name.
:type resource_group_name: str
:param location: The location of the resource.
:type location: str
:param validation_request: Inputs of the customer.
:type validation_request: ~azure.mgmt.databox.models.ValidationRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ValidationResponse, or the result of cls(response)
:rtype: ~azure.mgmt.databox.models.ValidationResponse
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ValidationResponse"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-09-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.validate_inputs_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'location': self._serialize.url("location", location, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(validation_request, 'ValidationRequest')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ValidationResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
validate_inputs_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataBox/locations/{location}/validateInputs'} # type: ignore
def validate_inputs(
self,
location, # type: str
validation_request, # type: "_models.ValidationRequest"
**kwargs # type: Any
):
# type: (...) -> "_models.ValidationResponse"
"""This method does all necessary pre-job creation validation under subscription.
:param location: The location of the resource.
:type location: str
:param validation_request: Inputs of the customer.
:type validation_request: ~azure.mgmt.databox.models.ValidationRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ValidationResponse, or the result of cls(response)
:rtype: ~azure.mgmt.databox.models.ValidationResponse
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ValidationResponse"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-09-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.validate_inputs.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'location': self._serialize.url("location", location, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(validation_request, 'ValidationRequest')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ValidationResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
validate_inputs.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.DataBox/locations/{location}/validateInputs'} # type: ignore
def region_configuration(
self,
location, # type: str
region_configuration_request, # type: "_models.RegionConfigurationRequest"
**kwargs # type: Any
):
# type: (...) -> "_models.RegionConfigurationResponse"
"""This API provides configuration details specific to given region/location.
:param location: The location of the resource.
:type location: str
:param region_configuration_request: Request body to get the configuration for the region.
:type region_configuration_request: ~azure.mgmt.databox.models.RegionConfigurationRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:return: RegionConfigurationResponse, or the result of cls(response)
:rtype: ~azure.mgmt.databox.models.RegionConfigurationResponse
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RegionConfigurationResponse"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-09-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.region_configuration.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'location': self._serialize.url("location", location, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(region_configuration_request, 'RegionConfigurationRequest')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('RegionConfigurationResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
region_configuration.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.DataBox/locations/{location}/regionConfiguration'} # type: ignore
| |
# Copyright 2022 The SeqIO Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for the class-based evaluation."""
import concurrent
import functools
import inspect
import itertools
import time
from typing import Any, Callable, Mapping, Optional, Sequence, Tuple, Type
from absl import logging
from seqio import dataset_providers
from seqio import feature_converters
from seqio import loggers as loggers_lib
from seqio import metrics as metrics_lib
import tensorflow.compat.v2 as tf
import tensorflow_datasets as tfds
import typing_extensions
Task = dataset_providers.Task
EncDecFeatureConverter = feature_converters.EncDecFeatureConverter
FeatureConverter = feature_converters.FeatureConverter
AllOutputTokensType = Mapping[str, Sequence[Sequence[int]]]
AllOutputScoresType = Mapping[str, Sequence[float]]
AllMetricsType = Mapping[str, Mapping[str, Any]]
class AllMetricsFuture(typing_extensions.Protocol):
def result(self) -> AllMetricsType:
...
MetricsAndOutputsType = Tuple[AllMetricsFuture, # metrics
AllOutputTokensType, # output_tokens
AllOutputScoresType] # output_scores
def get_valid_eval_tasks(tasks: Sequence[Task], split: str) -> Sequence[Task]:
"""Get tasks that have the specified split and a metric function."""
valid_tasks = []
for task in tasks:
if split not in task.splits:
logging.info(
"Task %s has no '%s' split; skipping eval.", task.name, split
)
continue
if not task.metric_fns:
logging.info("Task %s has no metric_fns; skipping eval.", task.name)
continue
metric_types = []
if task.predict_metric_fns:
metric_types.append("predict")
if task.score_metric_fns:
metric_types.append("score")
logging.info("Adding task '%s' with %s metric_fn(s).", task.name,
" and ".join(metric_types))
valid_tasks.append(task)
return valid_tasks
def get_targets_and_examples(
tasks: Sequence[Task],
dataset_fn: Callable[[Task], tf.data.Dataset],
sequence_dims: Mapping[str, int],
num_examples: Optional[int] = None,
use_memory_cache: bool = True,
target_field_name: str = "targets"
) -> Tuple[
Mapping[str, Any],
Mapping[str, tf.data.Dataset],
Mapping[str, int]]:
"""Get targets, cached datasets, and maximum sequence lengths per feature.
Args:
tasks: tasks objects to get targets and examples for.
dataset_fn: function, returns the dataset from the task object.
sequence_dims: dict of feature names to their sequence dimension.
num_examples: an optional maximum number of examples to take from the
beginning of each task dataset.
use_memory_cache: whether to use tf.data.Dataset#cache. may cause
memory issues for large datasets.
target_field_name: Field name of the target in the input dataset examples.
Returns:
cached_targets: unpreprocessed targets for each task
cached_task_datasets: cached datasets for each task, with cardinality set
max_sequence_length: maximum sequence lengths for inputs and targets across
all tasks.
"""
# Pre-load in all of the targets once before entering continuous eval loop
cached_targets = {}
cached_task_datasets = {}
max_sequence_length = {k: 0 for k in tasks[0].output_features.keys()}
for task in tasks:
assert max_sequence_length.keys() == task.output_features.keys(), (
"all tasks must have the same features")
for task in tasks:
ds = dataset_fn(task)
if num_examples:
ds = ds.take(num_examples)
if use_memory_cache:
ds = ds.cache()
targets = []
for ex in tfds.as_numpy(ds):
for k in max_sequence_length:
sequence_dim = sequence_dims.get(k, 0)
sequence_length = ex[k].shape[sequence_dim]
max_sequence_length[k] = max(max_sequence_length[k], sequence_length)
# Create list of postprocessed targets
pretokenized_target_field_name = target_field_name + "_pretokenized"
if pretokenized_target_field_name in ex:
target = ex[pretokenized_target_field_name]
else:
target = task.output_features[target_field_name].vocabulary.decode(
[int(x) for x in ex[target_field_name]])
if isinstance(target, bytes):
target = target.decode("utf-8")
targets.append(task.postprocess_fn(target, example=ex, is_target=True))
cached_targets[task.name] = targets
cached_task_datasets[task.name] = ds.apply(
tf.data.experimental.assert_cardinality(len(targets)))
return cached_targets, cached_task_datasets, max_sequence_length
class PredictFnCallable(typing_extensions.Protocol):
def __call__(
self,
dataset: tf.data.Dataset,
model_feature_shapes: Optional[Mapping[str, int]]
) -> Sequence[Tuple[int, Sequence[int]]]:
...
class ScoreFnCallable(typing_extensions.Protocol):
def __call__(
self,
dataset: tf.data.Dataset,
model_feature_shapes: Optional[Mapping[str, int]]
) -> Sequence[Tuple[int, float]]:
...
class Evaluator:
"""A class to encapsulate all eval-related information.
Users should define `predict_fn` and then pass it to `evaluate` method.
`predict_fn` should operate with enumerated tf.data.Dataset. See `evaluate`
method for more detail.
evaluation data is cached once and will be used for arbitrary number of
evaluation runs.
If none of the evaluation tasks has metrics functions defined, the evaluation
will be skipped. `Evaluator.evaluate` will return ({}, {}) assuming that
compute_metrics is True.
Note that we cache two versions of the datasets. The first version
(self.cached_task_datasets) has the task features (e.g., "inputs" and
"targets"), which are returned from `seqio.Task.get_dataset`. The second
version (self.cached_model_datasets) has model features (e.g.,
"decoder_target_tokens"). This is returned from the feature converter. The
former is used for postprocessing associated with the Task that requires the
original task datasets. The latter is passed to `predict_fn` for evaluation.
Attributes:
eval_tasks: a mapping from a mixture or a task name to seqio.Task object(s).
cached_model_datasets: cached evaluation datasets with model features.
cached_task_datasets: cached evaluation datasets with task features.
cached_targets: cached evaluation targets.
model_feature_shapes: mapping from model feature to its shape in the
`cached_model_datasets`.
loggers: a sequence of subclasses of `Logger`.
"""
def __init__(self,
mixture_or_task_name: str,
feature_converter: FeatureConverter,
eval_split: str = "validation",
use_cached: bool = False,
seed: Optional[int] = 42,
sequence_length: Optional[Mapping[str, int]] = None,
num_examples: Optional[int] = None,
shuffle: bool = False,
logger_cls: Sequence[Type[loggers_lib.Logger]] = (),
log_dir: Optional[str] = None,
use_memory_cache: bool = True,
target_field_name: str = "targets"):
"""Evaluator constructor.
Args:
mixture_or_task_name: a registered task or mixture name.
feature_converter: a feature converter object to use to convert the task
features to model features. Must be a subclass of
seqio.FeatureConverter.
eval_split: evaluation split. Typically "validation" or "test".
use_cached: whether to use the cached dataset instead of processing it on
the fly.
seed: random seed used for dataset shuffle and preprocessing. This
is usually not needed since eval datasets aren't shuffled and shouldn't
use stochastic operations. It is only useful for in certain data sources
such as `FewshotDataSource` where the training examples are randomly
selected during evaluation.
sequence_length: an optional length specification. If specified, these
will be the hard-limit on the evaluation data used for prediction. If
none of the preprocessors depend on the sequence length, it can be left
unspecified and the maximum length for each feature will be used. These
lengths are computed while caching the datasets.
num_examples: an optional maximum number of examples to take from the
beginning of each Task dataset for evaluation.
shuffle: whether to shuffle the Task datasets. Only useful when
`num_examples` is also set in order to get a semi-random subsample of
the examples. Note that the shuffle will only be applied once during
initialization (using `seed`) and the same subsample will be used on
call to `evaluate`.
logger_cls: a set of subclasses of `Logger` to write results with.
log_dir: the directory to log outputs to. Required if `logger_cls` is
non-empty.
use_memory_cache: whether to use tf.data.Dataset#cache. may cause
memory issues for large datasets.
target_field_name: Field name of the target in the input dataset examples.
Raises:
ValueError if `sequence_length` is None but a preprocessor depends on its
value.
"""
logging.info("Initializing Evaluator for '%s'", mixture_or_task_name)
eval_tasks = dataset_providers.get_subtasks(
dataset_providers.get_mixture_or_task(mixture_or_task_name))
self._eval_tasks = get_valid_eval_tasks(eval_tasks, eval_split)
self._metrics_executor = concurrent.futures.ThreadPoolExecutor(
max_workers=1)
self._metrics_future = None
self._target_field_name = target_field_name
if not self._eval_tasks:
logging.warning(
"No eval task with valid split and metric fn found. Skipping eval.")
return
# Determine if sequence_length arg is required. This occurs when any of the
# task preprocessors have a `sequence_length` arg with no default value.
sequence_length_required = False
for task in self._eval_tasks:
for prep in task.preprocessors:
prep_params = inspect.signature(prep).parameters
if ("sequence_length" in prep_params and
prep_params["sequence_length"].default == inspect.Parameter.empty):
if sequence_length is None:
if isinstance(prep, functools.partial):
prep_name = prep.func.__name__
else:
prep_name = prep.__name__
raise ValueError(
f"Preprocessor '{prep_name}' in task '{task.name}' has a "
"`sequence_length` argument, making it incompatible with "
"automatic sequence length detection. Pass a valid "
"`sequence_length` to `Evaluator` and try again.")
sequence_length_required = True
break
def dataset_fn(task: Task) -> tf.data.Dataset:
return task.get_dataset(
sequence_length=sequence_length,
split=eval_split,
shuffle=shuffle,
num_epochs=1,
seed=seed,
use_cached=use_cached)
# `task_datasets` have the output features from seqio.Task.get_dataset.
# These features will be converted to "model features" by the feature
# converter before being cached.
sequence_dims = {
k: v.sequence_dim for k, v in feature_converter.TASK_FEATURES.items()
}
cached_targets, cached_task_datasets, max_lengths = (
get_targets_and_examples(
tasks=self._eval_tasks,
dataset_fn=dataset_fn,
sequence_dims=sequence_dims,
num_examples=num_examples,
use_memory_cache=use_memory_cache,
target_field_name=self._target_field_name))
if sequence_length is None:
logging.info("Setting sequence lengths to %s", max_lengths)
sequence_length = max_lengths
else:
log_long_warning = False
log_same_warning = False
sequence_length = {
k: sequence_length.get(k, max_lengths[k]) for k in max_lengths}
assert set(sequence_length.keys()) == set(max_lengths.keys()), (
"sequence_length=%s limits must match the detected max_lengths=%s" % (
sequence_length.keys(), max_lengths.keys()))
for k, l in sequence_length.items():
if l is None:
continue
if isinstance(l, (tuple, list)):
logging.warning(
"Automatic length checking is not supported when lengths are"
"specified with a tuple for feature %s = %s. Please make "
"sure your max lengths are not removing parts of your inputs.",
k, l
)
elif l > max_lengths[k]:
log_long_warning = True
elif not sequence_length_required and l == max_lengths[k]:
log_same_warning = True
if log_long_warning:
logging.warning(
"Given sequence lengths are longer than necessary for some "
"evaluation inputs or targets, resulting in wasted computation. "
"Consider passing `None` for `sequence_length` to have them be "
"automatically computed.\n Got: %s,\n Max Lengths: %s",
sequence_length, max_lengths)
elif log_same_warning:
logging.warning(
"Given sequence lengths *may be* insufficient for some evaluation "
"inputs or targets. Such sequences will be truncated to fit, "
"likely leading to sub-optimal results. Consider passing `None` "
"for `sequence_length` to have them be automatically computed.\n "
" Got: %s,\n Max Lengths: %s", sequence_length, max_lengths)
self._cached_model_datasets = {}
if feature_converter.pack:
raise ValueError("During evaluation, packing can't be used.")
# Convert the task features to model features
for task in self._eval_tasks:
eval_ds = feature_converter(
cached_task_datasets[task.name], sequence_length)
# The eval dataset is enumerated to ensure that the order is preserved
# throughout the entire evaluation process.
self._cached_model_datasets[task.name] = eval_ds.enumerate()
self._cached_targets = cached_targets
self._cached_task_datasets = cached_task_datasets
self._model_feature_shapes = {
k: tuple(spec.shape)
for k, spec in eval_ds.element_spec.items() if spec.shape.rank > 0
}
if logger_cls and not log_dir:
raise ValueError(
"'log_dir' must be provided to `Evaluator` if `logger_cls` is "
"non-empty.")
self._loggers = tuple(cls(output_dir=log_dir) for cls in logger_cls) # pytype:disable=not-instantiable
def __del__(self):
"""Wait for metrics to be written before deletion."""
self._metrics_executor.shutdown(wait=True)
def evaluate(self,
*,
compute_metrics: bool,
step: Optional[int] = None,
predict_fn: PredictFnCallable,
score_fn: ScoreFnCallable) -> MetricsAndOutputsType:
"""Predict and score self.eval_tasks.
Evaluation must preserve the example ordering. This requirement is satisfied
by using enumerated dataset. Each of the cached eval task datasets is an
enumerated tf.data.Dataset where each element has (index, example) format.
Therefore, each index serves as a unique integer id for the example.
`predict_fn` takes as input the cached eval dataset. The output must be of
the form Sequence[(index, token_ids)] where `token_ids` is the sequence of
token ids output by the model with the input `example` whose index matches
`index`. Therefore, even if `predict_fn` mixes the order of the examples
during prediction, the order can be corrected as long as the correct index
for each example is maintained.
Similarly, `score_fn` takes the cached eval dataset as input and returns
Sequence[(index, score)] where `score` is the sequence of log likelihood
scores for the targets in the eval dataset.
A common example is the multi-host setup where the evaluation dataset is
split into multiple hosts that independently make predictions and combine
the results during which the ordering can be mixed.
There are 4 steps involved in the evaluation using predicted tokens:
1. Model returns indices and output_tokens: Sequence[Tuple[int,
Sequence[int]]]
2. output tokens are decoded by `vocab.decode`
3. Postprocessors are applied to the decoded output. These are denoted as
predictions.
4. Each metric function is applied to the predictions and the cached
targets.
There are 2 steps involved in the evaluation using scores:
1. Model returns indices and scores: Sequence[Tuple[int, Sequence[float]]]
2. Each metric function is applied to the scores and the cached targets.
Args:
compute_metrics: whether to compute metrics.
step: an optional step number of the current evaluation. If unspecified, a
dummy value of -1 will be used.
predict_fn: a user-defined function, which takes in a tf.data.Dataset and
outputs the sequence of predicted tokens. Only called if predict metrics
exist for the tasks.
score_fn: a user-defined function, which takes in a tf.data.Dataset and
outputs the log likelihood score of the targets. Only called if score
metrics exist for the task.
Returns:
metrics: a Future containing a mapping from task name to computed metrics,
or None if `compute_metrics` is False.
predicted_tokens: a mapping from task name to the output tokens
from `predict_fn`, for tasks that have `predict_metric_fns`.
scores: a mapping from task name to the output scores from
`score_fn` for tasks that have `score_predict_fns`.
"""
all_output_tokens = {}
all_output_scores = {}
def _infer_and_sort_outputs(infer_fn, task_name):
indices_and_outputs = infer_fn(self.cached_model_datasets[task_name])
if len(indices_and_outputs[0]) != 2:
raise ValueError(
"Expected a sequence of length-2 tuples with (index, *) format.")
return [x[1] for x in sorted(indices_and_outputs, key=lambda x: x[0])]
for task in self.eval_tasks:
logging.info("Evaluating %s", task.name)
if task.predict_metric_fns:
# output_tokens is a list of token_ids where each token_ids
# corresponds to the model output of the input example.
all_output_tokens[task.name] = _infer_and_sort_outputs(
predict_fn, task.name)
if task.score_metric_fns:
all_output_scores[task.name] = _infer_and_sort_outputs(
score_fn, task.name)
if compute_metrics:
if self._metrics_future:
# Ensure previous step's metrics are finished and raise any exceptions
# that may have occurred.
tick = time.time()
self._metrics_future.result()
logging.info("Time waiting for previous metrics run: %f secs.",
time.time() - tick)
def compute_metrics_fn():
tick = time.time()
metrics = self._compute_metrics(all_output_tokens, all_output_scores,
step)
logging.info("Time computing metrics: %f secs.", time.time() - tick)
return metrics
def wrap_graph(fn):
graph = tf.compat.v1.get_default_graph()
def wrapped_fn():
with graph.as_default():
return fn()
return wrapped_fn
if not tf.executing_eagerly():
compute_metrics_fn = wrap_graph(compute_metrics_fn)
self._metrics_future = self._metrics_executor.submit(compute_metrics_fn)
all_metrics = self._metrics_future
else:
all_metrics = concurrent.futures.Future()
all_metrics.set_result(None)
return all_metrics, all_output_tokens, all_output_scores
def _compute_metrics(
self,
predicted_tokens: AllOutputTokensType,
scores: AllOutputScoresType,
step: Optional[int] = None) -> AllMetricsType:
"""Computes and logs metrics given the predicted tokens and scores.
Args:
predicted_tokens: a mapping from task name to the output tokens from
`predict_fn`, for tasks that have `predict_metric_fns`.
scores: a mapping from task name to the output scores from
`score_fn` for tasks that have `score_predict_fns`.
step: an optional step number of the current evaluation. If unspecified, a
dummy value of -1 will be used.
Returns:
A mapping from task name to computed metrics.
"""
all_metrics = {}
for task in self.eval_tasks:
logging.info("Computing metrics for %s", task.name)
task_dataset = self.cached_task_datasets[task.name]
targets = self.cached_targets[task.name]
task_metrics = []
inferences = {}
if task.predict_metric_fns:
task_vocab = task.output_features[self._target_field_name].vocabulary
task_predicted_tokens = predicted_tokens[task.name]
if len(targets) != len(task_predicted_tokens):
raise ValueError(
f"len(targets)({len(targets)}) != "
f"len(predictions)({len(task_predicted_tokens)})")
outputs = [
task_vocab.decode([int(token) for token in tokens])
for tokens in task_predicted_tokens
]
inferences["output"] = outputs
task_predictions = [
task.postprocess_fn(d, example=ex, is_target=False)
for d, ex in zip(outputs, tfds.as_numpy(task_dataset))
]
inferences["prediction"] = task_predictions
task_metrics.extend([
metric_fn(targets, task_predictions) for metric_fn in
task.predict_metric_fns
])
if task.score_metric_fns:
task_scores = scores[task.name]
if len(targets) != len(task_scores):
raise ValueError(f"len(targets)({len(targets)}) != "
f"len(task_scores)({len(task_scores)})")
task_metrics.extend([
metric_fn(targets, task_scores)
for metric_fn in task.score_metric_fns
])
inferences["score"] = task_scores
all_metrics[task.name] = {}
for k, v in itertools.chain(*[m.items() for m in task_metrics]):
if k in all_metrics[task.name]:
raise ValueError(
f"Duplicate metric key '{k}' in Task '{task.name}'.")
all_metrics[task.name][k] = v
metrics = {
k: metrics_lib.Scalar(v)
if not isinstance(v, metrics_lib.MetricValue) else v
for k, v in all_metrics[task.name].items()
}
for logger in self.loggers:
logger(task_name=task.name, step=step, metrics=metrics,
dataset=task_dataset, inferences=inferences, targets=targets)
return all_metrics
@property
def eval_tasks(self) -> Sequence[Task]:
return self._eval_tasks
@property
def cached_model_datasets(self) -> Mapping[str, tf.data.Dataset]:
return self._cached_model_datasets
@property
def cached_task_datasets(self) -> Mapping[str, tf.data.Dataset]:
return self._cached_task_datasets
@property
def cached_targets(self) -> Mapping[str, Sequence[str]]:
return self._cached_targets
@property
def model_feature_shapes(self) -> Mapping[str, Tuple[int, ...]]:
return self._model_feature_shapes
@property
def loggers(self) -> Tuple[loggers_lib.Logger]:
return tuple(self._loggers)
| |
''' Provider that returns PostGIS vector tiles in GeoJSON or MVT format.
VecTiles is intended for rendering, and returns tiles with contents simplified,
precision reduced and often clipped.
For a more general implementation, try the Vector provider:
http://tilestache.org/doc/#vector-provider
'''
from math import pi
from urlparse import urljoin, urlparse
from urllib import urlopen
from os.path import exists
from shapely.wkb import dumps
from shapely.wkb import loads
import json
from ... import getTile
from ...Core import KnownUnknown
from TileStache.Config import loadClassPath
try:
from psycopg2.extras import RealDictCursor
from psycopg2 import connect
from psycopg2.extensions import TransactionRollbackError
except ImportError, err:
# Still possible to build the documentation without psycopg2
def connect(*args, **kwargs):
raise err
from . import mvt, geojson, topojson, oscimap
from ...Geography import SphericalMercator
from ModestMaps.Core import Point
tolerances = [6378137 * 2 * pi / (2 ** (zoom + 8)) for zoom in range(22)]
def make_transform_fn(transform_fns):
if not transform_fns:
return None
def transform_fn(shape, properties, fid, zoom):
for fn in transform_fns:
shape, properties, fid = fn(shape, properties, fid, zoom)
return shape, properties, fid
return transform_fn
def resolve_transform_fns(fn_dotted_names):
if not fn_dotted_names:
return None
return map(loadClassPath, fn_dotted_names)
class Provider:
''' VecTiles provider for PostGIS data sources.
Parameters:
dbinfo:
Required dictionary of Postgres connection parameters. Should
include some combination of 'host', 'user', 'password', and 'database'.
queries:
Required list of Postgres queries, one for each zoom level. The
last query in the list is repeated for higher zoom levels, and null
queries indicate an empty response.
Query must use "__geometry__" for a column name, and must be in
spherical mercator (900913) projection. A query may include an
"__id__" column, which will be used as a feature ID in GeoJSON
instead of a dynamically-generated hash of the geometry. A query
can additionally be a file name or URL, interpreted relative to
the location of the TileStache config file.
If the query contains the token "!bbox!", it will be replaced with
a constant bounding box geomtry like this:
"ST_SetSRID(ST_MakeBox2D(ST_MakePoint(x, y), ST_MakePoint(x, y)), <srid>)"
This behavior is modeled on Mapnik's similar bbox token feature:
https://github.com/mapnik/mapnik/wiki/PostGIS#bbox-token
clip:
Optional boolean flag determines whether geometries are clipped to
tile boundaries or returned in full. Default true: clip geometries.
srid:
Optional numeric SRID used by PostGIS for spherical mercator.
Default 900913.
simplify:
Optional floating point number of pixels to simplify all geometries.
Useful for creating double resolution (retina) tiles set to 0.5, or
set to 0.0 to prevent any simplification. Default 1.0.
simplify_until:
Optional integer specifying a zoom level where no more geometry
simplification should occur. Default 16.
suppress_simplification:
Optional list of zoom levels where no dynamic simplification should
occur.
geometry_types:
Optional list of geometry types that constrains the results of what
kind of features are returned.
transform_fns:
Optional list of transformation functions. It will be
passed a shapely object, the properties dictionary, and
the feature id. The function should return a tuple
consisting of the new shapely object, properties
dictionary, and feature id for the feature.
sort_fn:
Optional function that will be used to sort features
fetched from the database.
Sample configuration, for a layer with no results at zooms 0-9, basic
selection of lines with names and highway tags for zoom 10, a remote
URL containing a query for zoom 11, and a local file for zooms 12+:
"provider":
{
"class": "TileStache.Goodies.VecTiles:Provider",
"kwargs":
{
"dbinfo":
{
"host": "localhost",
"user": "gis",
"password": "gis",
"database": "gis"
},
"queries":
[
null, null, null, null, null,
null, null, null, null, null,
"SELECT way AS __geometry__, highway, name FROM planet_osm_line -- zoom 10+ ",
"http://example.com/query-z11.pgsql",
"query-z12-plus.pgsql"
]
}
}
'''
def __init__(self, layer, dbinfo, queries, clip=True, srid=900913, simplify=1.0, simplify_until=16, suppress_simplification=(), geometry_types=None, transform_fns=None, sort_fn=None, simplify_before_intersect=False):
'''
'''
self.layer = layer
keys = 'host', 'user', 'password', 'database', 'port', 'dbname'
self.dbinfo = dict([(k, v) for (k, v) in dbinfo.items() if k in keys])
self.clip = bool(clip)
self.srid = int(srid)
self.simplify = float(simplify)
self.simplify_until = int(simplify_until)
self.suppress_simplification = set(suppress_simplification)
self.geometry_types = None if geometry_types is None else set(geometry_types)
self.transform_fn_names = transform_fns
self.transform_fn = make_transform_fn(resolve_transform_fns(transform_fns))
if sort_fn:
self.sort_fn_name = sort_fn
self.sort_fn = loadClassPath(sort_fn)
else:
self.sort_fn_name = None
self.sort_fn = None
self.simplify_before_intersect = simplify_before_intersect
self.queries = []
self.columns = {}
for query in queries:
if query is None:
self.queries.append(None)
continue
#
# might be a file or URL?
#
url = urljoin(layer.config.dirpath, query)
scheme, h, path, p, q, f = urlparse(url)
if scheme in ('file', '') and exists(path):
query = open(path).read()
elif scheme == 'http' and ' ' not in url:
query = urlopen(url).read()
self.queries.append(query)
def renderTile(self, width, height, srs, coord):
''' Render a single tile, return a Response instance.
'''
try:
query = self.queries[coord.zoom]
except IndexError:
query = self.queries[-1]
ll = self.layer.projection.coordinateProj(coord.down())
ur = self.layer.projection.coordinateProj(coord.right())
bounds = ll.x, ll.y, ur.x, ur.y
if not query:
return EmptyResponse(bounds)
if query not in self.columns:
self.columns[query] = query_columns(self.dbinfo, self.srid, query, bounds)
if coord.zoom in self.suppress_simplification:
tolerance = None
else:
tolerance = self.simplify * tolerances[coord.zoom] if coord.zoom < self.simplify_until else None
return Response(self.dbinfo, self.srid, query, self.columns[query], bounds, tolerance, coord.zoom, self.clip, coord, self.layer.name(), self.geometry_types, self.transform_fn, self.sort_fn, self.simplify_before_intersect)
def getTypeByExtension(self, extension):
''' Get mime-type and format by file extension, one of "mvt", "json" or "topojson".
'''
if extension.lower() == 'mvt':
return 'application/x-protobuf', 'MVT'
elif extension.lower() == 'json':
return 'application/json', 'JSON'
elif extension.lower() == 'topojson':
return 'application/json', 'TopoJSON'
elif extension.lower() == 'vtm':
return 'image/png', 'OpenScienceMap' # TODO: make this proper stream type, app only seems to work with png
else:
raise ValueError(extension + " is not a valid extension")
class MultiProvider:
''' VecTiles provider to gather PostGIS tiles into a single multi-response.
Returns a MultiResponse object for GeoJSON or TopoJSON requests.
names:
List of names of vector-generating layers from elsewhere in config.
ignore_cached_sublayers:
True if cache provider should not save intermediate layers
in cache.
Sample configuration, for a layer with combined data from water
and land areas, both assumed to be vector-returning layers:
"provider":
{
"class": "TileStache.Goodies.VecTiles:MultiProvider",
"kwargs":
{
"names": ["water-areas", "land-areas"]
}
}
'''
def __init__(self, layer, names, ignore_cached_sublayers=False):
self.layer = layer
self.names = names
self.ignore_cached_sublayers = ignore_cached_sublayers
def __call__(self, layer, names, ignore_cached_sublayers=False):
self.layer = layer
self.names = names
self.ignore_cached_sublayers = ignore_cached_sublayers
def renderTile(self, width, height, srs, coord):
''' Render a single tile, return a Response instance.
'''
return MultiResponse(self.layer.config, self.names, coord, self.ignore_cached_sublayers)
def getTypeByExtension(self, extension):
''' Get mime-type and format by file extension, "json" or "topojson" only.
'''
if extension.lower() == 'json':
return 'application/json', 'JSON'
elif extension.lower() == 'topojson':
return 'application/json', 'TopoJSON'
elif extension.lower() == 'vtm':
return 'image/png', 'OpenScienceMap' # TODO: make this proper stream type, app only seems to work with png
elif extension.lower() == 'mvt':
return 'application/x-protobuf', 'MVT'
else:
raise ValueError(extension + " is not a valid extension for responses with multiple layers")
class Connection:
''' Context manager for Postgres connections.
See http://www.python.org/dev/peps/pep-0343/
and http://effbot.org/zone/python-with-statement.htm
'''
def __init__(self, dbinfo):
self.dbinfo = dbinfo
def __enter__(self):
conn = connect(**self.dbinfo)
conn.set_session(readonly=True, autocommit=True)
self.db = conn.cursor(cursor_factory=RealDictCursor)
return self.db
def __exit__(self, type, value, traceback):
self.db.connection.close()
class Response:
'''
'''
def __init__(self, dbinfo, srid, subquery, columns, bounds, tolerance, zoom, clip, coord, layer_name, geometry_types, transform_fn, sort_fn, simplify_before_intersect):
''' Create a new response object with Postgres connection info and a query.
bounds argument is a 4-tuple with (xmin, ymin, xmax, ymax).
'''
self.dbinfo = dbinfo
self.bounds = bounds
self.zoom = zoom
self.clip = clip
self.coord = coord
self.layer_name = layer_name
self.geometry_types = geometry_types
self.transform_fn = transform_fn
self.sort_fn = sort_fn
geo_query = build_query(srid, subquery, columns, bounds, tolerance, True, clip, simplify_before_intersect=simplify_before_intersect)
tol_idx = coord.zoom if 0 <= coord.zoom < len(tolerances) else -1
tol_val = tolerances[tol_idx]
oscimap_query = build_query(srid, subquery, columns, bounds, tolerance, False, clip, oscimap.padding * tol_val, oscimap.extents, simplify_before_intersect=simplify_before_intersect)
mvt_query = build_query(srid, subquery, columns, bounds, tolerance, False, clip, mvt.padding * tol_val, mvt.extents, simplify_before_intersect=simplify_before_intersect)
self.query = dict(TopoJSON=geo_query, JSON=geo_query, MVT=mvt_query, OpenScienceMap=oscimap_query)
def save(self, out, format):
'''
'''
features = get_features(self.dbinfo, self.query[format], self.geometry_types, self.transform_fn, self.sort_fn, self.coord.zoom)
if format == 'MVT':
mvt.encode(out, features, self.coord, self.layer_name)
elif format == 'JSON':
geojson.encode(out, features, self.zoom)
elif format == 'TopoJSON':
ll = SphericalMercator().projLocation(Point(*self.bounds[0:2]))
ur = SphericalMercator().projLocation(Point(*self.bounds[2:4]))
topojson.encode(out, features, (ll.lon, ll.lat, ur.lon, ur.lat))
elif format == 'OpenScienceMap':
oscimap.encode(out, features, self.coord, self.layer_name)
else:
raise ValueError(format + " is not supported")
class EmptyResponse:
''' Simple empty response renders valid MVT or GeoJSON with no features.
'''
def __init__(self, bounds):
self.bounds = bounds
def save(self, out, format):
'''
'''
if format == 'MVT':
mvt.encode(out, [], None)
elif format == 'JSON':
geojson.encode(out, [], 0)
elif format == 'TopoJSON':
ll = SphericalMercator().projLocation(Point(*self.bounds[0:2]))
ur = SphericalMercator().projLocation(Point(*self.bounds[2:4]))
topojson.encode(out, [], (ll.lon, ll.lat, ur.lon, ur.lat))
elif format == 'OpenScienceMap':
oscimap.encode(out, [], None)
else:
raise ValueError(format + " is not supported")
class MultiResponse:
'''
'''
def __init__(self, config, names, coord, ignore_cached_sublayers):
''' Create a new response object with TileStache config and layer names.
'''
self.config = config
self.names = names
self.coord = coord
self.ignore_cached_sublayers = ignore_cached_sublayers
def save(self, out, format):
'''
'''
if format == 'TopoJSON':
topojson.merge(out, self.names, self.get_tiles(format), self.config, self.coord)
elif format == 'JSON':
geojson.merge(out, self.names, self.get_tiles(format), self.config, self.coord)
elif format == 'OpenScienceMap':
feature_layers = []
layers = [self.config.layers[name] for name in self.names]
for layer in layers:
width, height = layer.dim, layer.dim
tile = layer.provider.renderTile(width, height, layer.projection.srs, self.coord)
if isinstance(tile,EmptyResponse): continue
feature_layers.append({'name': layer.name(), 'features': get_features(tile.dbinfo, tile.query["OpenScienceMap"], layer.provider.geometry_types, layer.provider.transform_fn, layer.provider.sort_fn, self.coord.zoom)})
oscimap.merge(out, feature_layers, self.coord)
elif format == 'MVT':
feature_layers = []
layers = [self.config.layers[name] for name in self.names]
for layer in layers:
width, height = layer.dim, layer.dim
tile = layer.provider.renderTile(width, height, layer.projection.srs, self.coord)
if isinstance(tile,EmptyResponse): continue
feature_layers.append({'name': layer.name(), 'features': get_features(tile.dbinfo, tile.query["MVT"], layer.provider.geometry_types, layer.provider.transform_fn, layer.provider.sort_fn, self.coord.zoom)})
mvt.merge(out, feature_layers, self.coord)
else:
raise ValueError(format + " is not supported for responses with multiple layers")
def get_tiles(self, format):
unknown_layers = set(self.names) - set(self.config.layers.keys())
if unknown_layers:
raise KnownUnknown("%s.get_tiles didn't recognize %s when trying to load %s." % (__name__, ', '.join(unknown_layers), ', '.join(self.names)))
layers = [self.config.layers[name] for name in self.names]
mimes, bodies = zip(*[getTile(layer, self.coord, format.lower(), self.ignore_cached_sublayers, self.ignore_cached_sublayers) for layer in layers])
bad_mimes = [(name, mime) for (mime, name) in zip(mimes, self.names) if not mime.endswith('/json')]
if bad_mimes:
raise KnownUnknown('%s.get_tiles encountered a non-JSON mime-type in %s sub-layer: "%s"' % ((__name__, ) + bad_mimes[0]))
tiles = map(json.loads, bodies)
bad_types = [(name, topo['type']) for (topo, name) in zip(tiles, self.names) if topo['type'] != ('FeatureCollection' if (format.lower()=='json') else 'Topology')]
if bad_types:
raise KnownUnknown('%s.get_tiles encountered a non-%sCollection type in %s sub-layer: "%s"' % ((__name__, ('Feature' if (format.lower()=='json') else 'Topology'), ) + bad_types[0]))
return tiles
def query_columns(dbinfo, srid, subquery, bounds):
''' Get information about the columns returned for a subquery.
'''
with Connection(dbinfo) as db:
bbox = 'ST_MakeBox2D(ST_MakePoint(%f, %f), ST_MakePoint(%f, %f))' % bounds
bbox = 'ST_SetSRID(%s, %d)' % (bbox, srid)
query = subquery.replace('!bbox!', bbox)
# newline is important here, to break out of comments.
db.execute(query + '\n LIMIT 0')
column_names = set(x.name for x in db.description)
return column_names
def get_features(dbinfo, query, geometry_types, transform_fn, sort_fn, zoom,
n_try=1):
features = []
with Connection(dbinfo) as db:
try:
db.execute(query)
except TransactionRollbackError:
if n_try >= 5:
print 'TransactionRollbackError occurred 5 times'
raise
else:
return get_features(dbinfo, query, geometry_types,
transform_fn, sort_fn, zoom,
n_try=n_try + 1)
for row in db.fetchall():
assert '__geometry__' in row, 'Missing __geometry__ in feature result'
assert '__id__' in row, 'Missing __id__ in feature result'
wkb = bytes(row.pop('__geometry__'))
id = row.pop('__id__')
shape = loads(wkb)
if geometry_types is not None:
if shape.type not in geometry_types:
#print 'found %s which is not in: %s' % (geom_type, geometry_types)
continue
props = dict((k, v) for k, v in row.items() if v is not None)
if transform_fn:
shape, props, id = transform_fn(shape, props, id, zoom)
wkb = dumps(shape)
features.append((wkb, props, id))
if sort_fn:
features = sort_fn(features, zoom)
return features
def build_query(srid, subquery, subcolumns, bounds, tolerance, is_geo, is_clipped, padding=0, scale=None, simplify_before_intersect=False):
''' Build and return an PostGIS query.
'''
# bounds argument is a 4-tuple with (xmin, ymin, xmax, ymax).
bbox = 'ST_MakeBox2D(ST_MakePoint(%.12f, %.12f), ST_MakePoint(%.12f, %.12f))' % (bounds[0] - padding, bounds[1] - padding, bounds[2] + padding, bounds[3] + padding)
bbox = 'ST_SetSRID(%s, %d)' % (bbox, srid)
geom = 'q.__geometry__'
# Special care must be taken when simplifying certain geometries (like those
# in the earth/water layer) to prevent tile border "seams" from forming:
# these occur when a geometry is split across multiple tiles (like a
# continuous strip of land or body of water) and thus, for any such tile,
# the part of that geometry inside of it lines up along one or more of its
# edges. If there's any kind of fine geometric detail near one of these
# edges, simplification might remove it in a way that makes the edge of the
# geometry move off the edge of the tile. See this example of a tile
# pre-simplification:
# https://cloud.githubusercontent.com/assets/4467604/7937704/aef971b4-090f-11e5-91b9-d973ef98e5ef.png
# and post-simplification:
# https://cloud.githubusercontent.com/assets/4467604/7937705/b1129dc2-090f-11e5-9341-6893a6892a36.png
# at which point a seam formed.
#
# To get around this, for any given tile bounding box, we find the
# contained/overlapping geometries and simplify them BEFORE
# cutting out the precise tile bounding bbox (instead of cutting out the
# tile and then simplifying everything inside of it, as we do with all of
# the other layers).
if simplify_before_intersect:
# Simplify, then cut tile.
if tolerance is not None:
# The problem with simplifying all contained/overlapping geometries
# for a tile before cutting out the parts that actually lie inside
# of it is that we might end up simplifying a massive geometry just
# to extract a small portion of it (think simplifying the border of
# the US just to extract the New York City coastline). To reduce the
# performance hit, we actually identify all of the candidate
# geometries, then cut out a bounding box *slightly larger* than the
# tile bbox, THEN simplify, and only then cut out the tile itself.
# This still allows us to perform simplification of the geometry
# edges outside of the tile, which prevents any seams from forming
# when we cut it out, but means that we don't have to simplify the
# entire geometry (just the small bits lying right outside the
# desired tile).
simplification_padding = padding + (bounds[3] - bounds[1]) * 0.1
simplification_bbox = (
'ST_MakeBox2D(ST_MakePoint(%.12f, %.12f), '
'ST_MakePoint(%.12f, %.12f))' % (
bounds[0] - simplification_padding,
bounds[1] - simplification_padding,
bounds[2] + simplification_padding,
bounds[3] + simplification_padding))
simplification_bbox = 'ST_SetSrid(%s, %d)' % (
simplification_bbox, srid)
geom = 'ST_Intersection(%s, %s)' % (geom, simplification_bbox)
geom = 'ST_MakeValid(ST_SimplifyPreserveTopology(%s, %.12f))' % (
geom, tolerance)
assert is_clipped, 'If simplify_before_intersect=True, ' \
'is_clipped should be True as well'
geom = 'ST_Intersection(%s, %s)' % (geom, bbox)
else:
# Cut tile, then simplify.
if is_clipped:
geom = 'ST_Intersection(%s, %s)' % (geom, bbox)
if tolerance is not None:
geom = 'ST_SimplifyPreserveTopology(%s, %.12f)' % (geom, tolerance)
if is_geo:
geom = 'ST_Transform(%s, 4326)' % geom
if scale:
# scale applies to the un-padded bounds, e.g. geometry in the padding area "spills over" past the scale range
geom = ('ST_TransScale(%s, %.12f, %.12f, %.12f, %.12f)'
% (geom, -bounds[0], -bounds[1],
scale / (bounds[2] - bounds[0]),
scale / (bounds[3] - bounds[1])))
subquery = subquery.replace('!bbox!', bbox)
columns = ['q."%s"' % c for c in subcolumns if c not in ('__geometry__', )]
if '__geometry__' not in subcolumns:
raise Exception("There's supposed to be a __geometry__ column.")
if '__id__' not in subcolumns:
columns.append('Substr(MD5(ST_AsBinary(q.__geometry__)), 1, 10) AS __id__')
columns = ', '.join(columns)
return '''SELECT %(columns)s,
ST_AsBinary(%(geom)s) AS __geometry__
FROM (
%(subquery)s
) AS q
WHERE ST_IsValid(q.__geometry__)
AND ST_Intersects(q.__geometry__, %(bbox)s)''' \
% locals()
| |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/protobuf/internal/message_set_extensions.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/protobuf/internal/message_set_extensions.proto',
package='google.protobuf.internal',
syntax='proto2',
serialized_pb=_b('\n5google/protobuf/internal/message_set_extensions.proto\x12\x18google.protobuf.internal\"\x1e\n\x0eTestMessageSet*\x08\x08\x04\x10\xff\xff\xff\xff\x07:\x02\x08\x01\"\xa5\x01\n\x18TestMessageSetExtension1\x12\t\n\x01i\x18\x0f \x01(\x05\x32~\n\x15message_set_extension\x12(.google.protobuf.internal.TestMessageSet\x18\xab\xff\xf6. \x01(\x0b\x32\x32.google.protobuf.internal.TestMessageSetExtension1\"\xa7\x01\n\x18TestMessageSetExtension2\x12\x0b\n\x03str\x18\x19 \x01(\t2~\n\x15message_set_extension\x12(.google.protobuf.internal.TestMessageSet\x18\xca\xff\xf6. \x01(\x0b\x32\x32.google.protobuf.internal.TestMessageSetExtension2\"(\n\x18TestMessageSetExtension3\x12\x0c\n\x04text\x18# \x01(\t:\x7f\n\x16message_set_extension3\x12(.google.protobuf.internal.TestMessageSet\x18\xdf\xff\xf6. \x01(\x0b\x32\x32.google.protobuf.internal.TestMessageSetExtension3')
)
MESSAGE_SET_EXTENSION3_FIELD_NUMBER = 98418655
message_set_extension3 = _descriptor.FieldDescriptor(
name='message_set_extension3', full_name='google.protobuf.internal.message_set_extension3', index=0,
number=98418655, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=True, extension_scope=None,
options=None)
_TESTMESSAGESET = _descriptor.Descriptor(
name='TestMessageSet',
full_name='google.protobuf.internal.TestMessageSet',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('\010\001')),
is_extendable=True,
syntax='proto2',
extension_ranges=[(4, 2147483647), ],
oneofs=[
],
serialized_start=83,
serialized_end=113,
)
_TESTMESSAGESETEXTENSION1 = _descriptor.Descriptor(
name='TestMessageSetExtension1',
full_name='google.protobuf.internal.TestMessageSetExtension1',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='i', full_name='google.protobuf.internal.TestMessageSetExtension1.i', index=0,
number=15, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
_descriptor.FieldDescriptor(
name='message_set_extension', full_name='google.protobuf.internal.TestMessageSetExtension1.message_set_extension', index=0,
number=98418603, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=True, extension_scope=None,
options=None),
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=116,
serialized_end=281,
)
_TESTMESSAGESETEXTENSION2 = _descriptor.Descriptor(
name='TestMessageSetExtension2',
full_name='google.protobuf.internal.TestMessageSetExtension2',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='str', full_name='google.protobuf.internal.TestMessageSetExtension2.str', index=0,
number=25, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
_descriptor.FieldDescriptor(
name='message_set_extension', full_name='google.protobuf.internal.TestMessageSetExtension2.message_set_extension', index=0,
number=98418634, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=True, extension_scope=None,
options=None),
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=284,
serialized_end=451,
)
_TESTMESSAGESETEXTENSION3 = _descriptor.Descriptor(
name='TestMessageSetExtension3',
full_name='google.protobuf.internal.TestMessageSetExtension3',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='text', full_name='google.protobuf.internal.TestMessageSetExtension3.text', index=0,
number=35, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=453,
serialized_end=493,
)
DESCRIPTOR.message_types_by_name['TestMessageSet'] = _TESTMESSAGESET
DESCRIPTOR.message_types_by_name['TestMessageSetExtension1'] = _TESTMESSAGESETEXTENSION1
DESCRIPTOR.message_types_by_name['TestMessageSetExtension2'] = _TESTMESSAGESETEXTENSION2
DESCRIPTOR.message_types_by_name['TestMessageSetExtension3'] = _TESTMESSAGESETEXTENSION3
DESCRIPTOR.extensions_by_name['message_set_extension3'] = message_set_extension3
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
TestMessageSet = _reflection.GeneratedProtocolMessageType('TestMessageSet', (_message.Message,), dict(
DESCRIPTOR = _TESTMESSAGESET,
__module__ = 'google.protobuf.internal.message_set_extensions_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.TestMessageSet)
))
_sym_db.RegisterMessage(TestMessageSet)
TestMessageSetExtension1 = _reflection.GeneratedProtocolMessageType('TestMessageSetExtension1', (_message.Message,), dict(
DESCRIPTOR = _TESTMESSAGESETEXTENSION1,
__module__ = 'google.protobuf.internal.message_set_extensions_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.TestMessageSetExtension1)
))
_sym_db.RegisterMessage(TestMessageSetExtension1)
TestMessageSetExtension2 = _reflection.GeneratedProtocolMessageType('TestMessageSetExtension2', (_message.Message,), dict(
DESCRIPTOR = _TESTMESSAGESETEXTENSION2,
__module__ = 'google.protobuf.internal.message_set_extensions_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.TestMessageSetExtension2)
))
_sym_db.RegisterMessage(TestMessageSetExtension2)
TestMessageSetExtension3 = _reflection.GeneratedProtocolMessageType('TestMessageSetExtension3', (_message.Message,), dict(
DESCRIPTOR = _TESTMESSAGESETEXTENSION3,
__module__ = 'google.protobuf.internal.message_set_extensions_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.TestMessageSetExtension3)
))
_sym_db.RegisterMessage(TestMessageSetExtension3)
message_set_extension3.message_type = _TESTMESSAGESETEXTENSION3
TestMessageSet.RegisterExtension(message_set_extension3)
_TESTMESSAGESETEXTENSION1.extensions_by_name['message_set_extension'].message_type = _TESTMESSAGESETEXTENSION1
TestMessageSet.RegisterExtension(_TESTMESSAGESETEXTENSION1.extensions_by_name['message_set_extension'])
_TESTMESSAGESETEXTENSION2.extensions_by_name['message_set_extension'].message_type = _TESTMESSAGESETEXTENSION2
TestMessageSet.RegisterExtension(_TESTMESSAGESETEXTENSION2.extensions_by_name['message_set_extension'])
_TESTMESSAGESET.has_options = True
_TESTMESSAGESET._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('\010\001'))
# @@protoc_insertion_point(module_scope)
| |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db.models import Max
from django.forms.fields import CharField
from django.template.loader import select_template
from django.utils.html import format_html, strip_tags, strip_entities
from django.utils.safestring import mark_safe
from django.utils.text import Truncator
from django.utils.translation import ugettext_lazy as _
try:
from html.parser import HTMLParser # py3
except ImportError:
from HTMLParser import HTMLParser # py2
from cms.plugin_pool import plugin_pool
from djangocms_text_ckeditor.widgets import TextEditorWidget
from djangocms_text_ckeditor.utils import plugin_tags_to_user_html
from cmsplugin_cascade.fields import PartialFormField
from cmsplugin_cascade.link.cms_plugins import TextLinkPlugin
from cmsplugin_cascade.link.forms import LinkForm, TextLinkFormMixin
from cmsplugin_cascade.link.plugin_base import LinkElementMixin
from cmsplugin_cascade.bootstrap3.buttons import BootstrapButtonMixin
from shop import settings as shop_settings
from shop.models.cart import CartModel
from shop.modifiers.pool import cart_modifiers_pool
from .plugin_base import ShopButtonPluginBase, DialogFormPluginBase
class ProceedButtonForm(TextLinkFormMixin, LinkForm):
link_content = CharField(label=_("Button Content"))
LINK_TYPE_CHOICES = (('cmspage', _("CMS Page")), ('RELOAD_PAGE', _("Reload Page")), ('PURCHASE_NOW', _("Purchase Now")),)
class ShopProceedButton(BootstrapButtonMixin, ShopButtonPluginBase):
"""
This button is used to proceed from one checkout step to the next one.
"""
name = _("Proceed Button")
parent_classes = ('BootstrapColumnPlugin',)
model_mixins = (LinkElementMixin,)
def get_form(self, request, obj=None, **kwargs):
kwargs.update(form=ProceedButtonForm)
return super(ShopProceedButton, self).get_form(request, obj, **kwargs)
def get_render_template(self, context, instance, placeholder):
template_names = [
'{}/checkout/proceed-button.html'.format(shop_settings.APP_LABEL),
'shop/checkout/proceed-button.html',
]
return select_template(template_names)
plugin_pool.register_plugin(ShopProceedButton)
class CustomerFormPlugin(DialogFormPluginBase):
"""
provides the form to edit customer specific data stored in model `Customer`.
"""
name = _("Customer Form")
form_class = 'shop.forms.checkout.CustomerForm'
template_leaf_name = 'customer.html'
def get_form_data(self, request):
return {'instance': request.user}
DialogFormPluginBase.register_plugin(CustomerFormPlugin)
class GuestFormPlugin(CustomerFormPlugin):
name = _("Guest Form")
form_class = 'shop.forms.checkout.GuestForm'
DialogFormPluginBase.register_plugin(GuestFormPlugin)
class CheckoutAddressPluginBase(DialogFormPluginBase):
def get_form_data(self, request):
filter_args = {'user': request.user, '{}__isnull'.format(self.FormClass.priority_field): False}
AddressModel = self.FormClass.get_model()
address = AddressModel.objects.filter(**filter_args).order_by(self.FormClass.priority_field).first()
if address:
return {'instance': address}
else:
aggr = AddressModel.objects.filter(user=request.user).aggregate(Max(self.FormClass.priority_field))
initial = {'priority': aggr['{}__max'.format(self.FormClass.priority_field)] or 0}
return {'initial': initial}
class ShippingAddressFormPlugin(CheckoutAddressPluginBase):
name = _("Shipping Address Form")
form_class = 'shop.forms.checkout.ShippingAddressForm'
template_leaf_name = 'shipping-address.html'
DialogFormPluginBase.register_plugin(ShippingAddressFormPlugin)
class BillingAddressFormPlugin(CheckoutAddressPluginBase):
name = _("Billing Address Form")
form_class = 'shop.forms.checkout.BillingAddressForm'
template_leaf_name = 'billing-address.html'
DialogFormPluginBase.register_plugin(BillingAddressFormPlugin)
class PaymentMethodFormPlugin(DialogFormPluginBase):
name = _("Payment Method Form")
form_class = 'shop.forms.checkout.PaymentMethodForm'
template_leaf_name = 'payment-method.html'
def get_form_data(self, request):
# if there is only one payment method available, always set it as default
payment_modifier_choices = self.FormClass.base_fields['payment_modifier'].choices
if len(payment_modifier_choices) == 1:
default_payment_modifier = payment_modifier_choices[0][0]
else:
default_payment_modifier = None
cart = CartModel.objects.get_from_request(request)
initial = {'payment_modifier': cart.extra.get('payment_modifier', default_payment_modifier)}
return {'initial': initial}
def render(self, context, instance, placeholder):
super(PaymentMethodFormPlugin, self).render(context, instance, placeholder)
for payment_modifier in cart_modifiers_pool.get_payment_modifiers():
payment_modifier.update_render_context(context)
return context
if cart_modifiers_pool.get_payment_modifiers():
# Plugin is registered only if at least one payment modifier exists
DialogFormPluginBase.register_plugin(PaymentMethodFormPlugin)
class ShippingMethodFormPlugin(DialogFormPluginBase):
name = _("Shipping Method Form")
form_class = 'shop.forms.checkout.ShippingMethodForm'
template_leaf_name = 'shipping-method.html'
def get_form_data(self, request):
# if there is only one shipping method available, always set it as default
shipping_modifier_choices = self.FormClass.base_fields['shipping_modifier'].choices
if len(shipping_modifier_choices) == 1:
default_shipping_modifier = shipping_modifier_choices[0][0]
else:
default_shipping_modifier = None
cart = CartModel.objects.get_from_request(request)
initial = {'shipping_modifier': cart.extra.get('shipping_modifier', default_shipping_modifier)}
return {'initial': initial}
def render(self, context, instance, placeholder):
super(ShippingMethodFormPlugin, self).render(context, instance, placeholder)
for shipping_modifier in cart_modifiers_pool.get_shipping_modifiers():
shipping_modifier.update_render_context(context)
return context
if cart_modifiers_pool.get_shipping_modifiers():
# Plugin is registered only if at least one shipping modifier exists
DialogFormPluginBase.register_plugin(ShippingMethodFormPlugin)
class ExtraAnnotationFormPlugin(DialogFormPluginBase):
name = _("Extra Annotation Form")
form_class = 'shop.forms.checkout.ExtraAnnotationForm'
template_leaf_name = 'extra-annotation.html'
def get_form_data(self, request):
cart = CartModel.objects.get_from_request(request)
initial = {'annotation': cart.extra.get('annotation', '')}
return {'initial': initial}
DialogFormPluginBase.register_plugin(ExtraAnnotationFormPlugin)
class AcceptConditionFormPlugin(DialogFormPluginBase):
"""
Provides the form to accept any condition.
"""
name = _("Accept Condition")
form_class = 'shop.forms.checkout.AcceptConditionForm'
template_leaf_name = 'accept-condition.html'
html_parser = HTMLParser()
change_form_template = 'cascade/admin/text_plugin_change_form.html'
@classmethod
def get_identifier(cls, instance):
identifier = super(AcceptConditionFormPlugin, cls).get_identifier(instance)
html_content = cls.html_parser.unescape(instance.glossary.get('html_content', ''))
html_content = strip_entities(strip_tags(html_content))
html_content = Truncator(html_content).words(3, truncate=' ...')
return format_html('{}{}', identifier, html_content)
def get_form(self, request, obj=None, **kwargs):
if obj:
html_content = self.html_parser.unescape(obj.glossary.get('html_content', ''))
obj.glossary.update(html_content=html_content)
text_editor_widget = TextEditorWidget(installed_plugins=[TextLinkPlugin], pk=obj.pk,
placeholder=obj.placeholder, plugin_language=obj.language)
kwargs['glossary_fields'] = (
PartialFormField('html_content', text_editor_widget, label=_("HTML content")),
)
return super(AcceptConditionFormPlugin, self).get_form(request, obj, **kwargs)
def render(self, context, instance, placeholder):
super(AcceptConditionFormPlugin, self).render(context, instance, placeholder)
accept_condition_form = context['accept_condition_form.plugin_{}'.format(instance.id)]
html_content = self.html_parser.unescape(instance.glossary.get('html_content', ''))
html_content = plugin_tags_to_user_html(html_content, context, placeholder)
# transfer the stored HTML content into the widget's label
accept_condition_form['accept'].field.widget.choice_label = mark_safe(html_content)
context['accept_condition_form'] = accept_condition_form
return context
DialogFormPluginBase.register_plugin(AcceptConditionFormPlugin)
| |
# ---------------------------------------------------------------------------
#
# Albow - Root widget
#
#---------------------------------------------------------------------------
import sys
import pygame
from pygame import key
from pygame.locals import *
#from pygame.time import get_ticks
from pygame.event import Event
from glbackground import *
import widget
from widget import Widget
from datetime import datetime, timedelta
from albow.dialogs import wrapped_label
from albow.translate import _
from pymclevel.box import Vector
#-# This need to be changed. We need albow.translate in the config module.
#-# he solution can be a set of functions wich let us define the needed MCEdit 'config' data
#-# without importing it.
#-# It can be a 'config' module built only for albow.
from config import config
#-#
import os
import directories
import time
from dialogs import Dialog, Label, Button, Row, Column
start_time = datetime.now()
mod_cmd = KMOD_LCTRL | KMOD_RCTRL | KMOD_LMETA | KMOD_RMETA
double_click_time = timedelta(0, 0, 300000) # days, seconds, microseconds
import logging
log = logging.getLogger(__name__)
modifiers = dict(
shift=False,
ctrl=False,
alt=False,
meta=False,
)
modkeys = {
K_LSHIFT: 'shift', K_RSHIFT: 'shift',
K_LCTRL: 'ctrl', K_RCTRL: 'ctrl',
K_LALT: 'alt', K_RALT: 'alt',
K_LMETA: 'meta', K_RMETA: 'meta',
}
MUSIC_END_EVENT = USEREVENT + 1
last_mouse_event = Event(0, pos=(0, 0), local=(0, 0))
last_mouse_event_handler = None
root_widget = None # Root of the containment hierarchy
top_widget = None # Initial dispatch target
clicked_widget = None # Target of mouse_drag and mouse_up events
#---------------------------------------------------------------------------
class Cancel(Exception):
pass
#---------------------------------------------------------------------------
def set_modifier(key, value):
attr = modkeys.get(key)
if attr:
modifiers[attr] = value
def add_modifiers(event):
d = event.dict
d.update(modifiers)
d['cmd'] = event.ctrl or event.meta
def get_root():
return root_widget
def get_top_widget():
return top_widget
def get_focus():
return top_widget.get_focus()
#---------------------------------------------------------------------------
class RootWidget(Widget):
# surface Pygame display surface
# is_gl True if OpenGL surface
redraw_every_frame = False
bonus_draw_time = False
_is_gl_container = True
def __init__(self, surface):
global root_widget
Widget.__init__(self, surface.get_rect())
self.surface = surface
root_widget = self
widget.root_widget = self
self.is_gl = surface.get_flags() & OPENGL != 0
self.idle_handlers = []
self.editor = None
self.selectTool = None
self.movementMath = [-1, 1, 1, -1, 1, -1]
self.movementNum = [0, 0, 2, 2, 1, 1]
self.cameraMath = [-1., 1., -1., 1.]
self.cameraNum = [0, 0, 1, 1]
self.notMove = False
self.nudge = None
self.testTime = None
self.nudgeDirection = None
self.sessionStolen = False
def get_nudge_block(self):
return self.selectTool.panel.nudgeBlocksButton
def take_screenshot(self):
try:
os.mkdir(os.path.join(directories.getCacheDir(), "screenshots"))
except OSError:
pass
screenshot_name = os.path.join(directories.getCacheDir(), "screenshots", time.strftime("%Y-%m-%d (%I-%M-%S-%p)")+".png")
pygame.image.save(pygame.display.get_surface(), screenshot_name)
self.diag = Dialog()
lbl = Label(_("Screenshot taken and saved as '%s'")%screenshot_name, doNotTranslate=True)
folderBtn = Button("Open Folder", action=self.open_screenshots_folder)
btn = Button("Ok", action=self.screenshot_notify)
buttonsRow = Row((btn,folderBtn))
col = Column((lbl,buttonsRow))
self.diag.add(col)
self.diag.shrink_wrap()
self.diag.present()
@staticmethod
def open_screenshots_folder():
from mcplatform import platform_open
platform_open(os.path.join(directories.getCacheDir(), "screenshots"))
def screenshot_notify(self):
self.diag.dismiss()
@staticmethod
def set_timer(ms):
pygame.time.set_timer(USEREVENT, ms)
def run(self):
self.run_modal(None)
captured_widget = None
def capture_mouse(self, widget):
#put the mouse in "virtual mode" and pass mouse moved events to the
#specified widget
if widget:
pygame.mouse.set_visible(False)
pygame.event.set_grab(True)
self.captured_widget = widget
else:
pygame.mouse.set_visible(True)
pygame.event.set_grab(False)
self.captured_widget = None
frames = 0
hover_widget = None
def fix_sticky_ctrl(self):
self.ctrlClicked = -1
def run_modal(self, modal_widget):
if self.editor is None:
self.editor = self.mcedit.editor
self.selectTool = self.editor.toolbar.tools[0]
old_captured_widget = None
if self.captured_widget:
old_captured_widget = self.captured_widget
self.capture_mouse(None)
global last_mouse_event, last_mouse_event_handler
global top_widget, clicked_widget
is_modal = modal_widget is not None
modal_widget = modal_widget or self
from OpenGL import GL
try:
old_top_widget = top_widget
top_widget = modal_widget
was_modal = modal_widget.is_modal
modal_widget.is_modal = True
modal_widget.modal_result = None
if not modal_widget.focus_switch:
modal_widget.tab_to_first()
if clicked_widget:
clicked_widget = modal_widget
num_clicks = 0
last_click_time = start_time
last_click_button = False
self.bonus_draw_time = False
while modal_widget.modal_result is None:
try:
if not self.mcedit.version_checked:
if not self.mcedit.version_lock.locked():
self.mcedit.version_checked = True
self.mcedit.check_for_version()
self.hover_widget = self.find_widget(pygame.mouse.get_pos())
if not self.bonus_draw_time:
self.bonus_draw_time = True
if self.is_gl:
self.gl_clear()
self.gl_draw_all(self, (0, 0))
GL.glFlush()
else:
self.draw_all(self.surface)
pygame.display.flip()
self.frames += 1
#events = [pygame.event.wait()]
events = [pygame.event.poll()]
events.extend(pygame.event.get())
for event in events:
#if event.type:
#log.debug("%s", event)
type = event.type
if type == QUIT:
self.quit()
elif type == MOUSEBUTTONDOWN:
self.bonus_draw_time = False
t = datetime.now()
if t - last_click_time <= double_click_time and event.button == last_click_button:
num_clicks += 1
else:
num_clicks = 1
last_click_button = event.button
last_click_time = t
event.dict['num_clicks'] = num_clicks
add_modifiers(event)
mouse_widget = self.find_widget(event.pos)
if self.captured_widget:
mouse_widget = self.captured_widget
if not mouse_widget.is_inside(modal_widget):
mouse_widget = modal_widget
#if event.button == 1:
clicked_widget = mouse_widget
last_mouse_event_handler = mouse_widget
last_mouse_event = event
mouse_widget.notify_attention_loss()
mouse_widget.handle_mouse('mouse_down', event)
elif type == MOUSEMOTION:
self.bonus_draw_time = False
add_modifiers(event)
modal_widget.dispatch_key('mouse_delta', event)
last_mouse_event = event
mouse_widget = self.update_tooltip(event.pos)
if clicked_widget:
last_mouse_event_handler = clicked_widget
clicked_widget.handle_mouse('mouse_drag', event)
else:
if not mouse_widget.is_inside(modal_widget):
mouse_widget = modal_widget
last_mouse_event_handler = mouse_widget
mouse_widget.handle_mouse('mouse_move', event)
elif type == MOUSEBUTTONUP:
add_modifiers(event)
self.bonus_draw_time = False
mouse_widget = self.find_widget(event.pos)
if self.captured_widget:
mouse_widget = self.captured_widget
if clicked_widget:
last_mouse_event_handler = clicked_widget
event.dict['clicked_widget'] = clicked_widget
else:
last_mouse_event_handler = mouse_widget
event.dict['clicked_widget'] = None
last_mouse_event = event
clicked_widget = None
last_mouse_event_handler.handle_mouse('mouse_up', event)
elif type == KEYDOWN:
key = event.key
set_modifier(key, True)
add_modifiers(event)
self.bonus_draw_time = False
keyname = self.getKey(event)
if keyname == config.keys.takeAScreenshot.get():
self.take_screenshot()
self.send_key(modal_widget, 'key_down', event)
if last_mouse_event_handler:
event.dict['pos'] = last_mouse_event.pos
event.dict['local'] = last_mouse_event.local
last_mouse_event_handler.setup_cursor(event)
elif type == KEYUP:
key = event.key
set_modifier(key, False)
add_modifiers(event)
self.bonus_draw_time = False
keyname = self.getKey(event)
if keyname == config.keys.showBlockInfo.get() and self.editor.toolbar.tools[0].infoKey == 1:
self.editor.toolbar.tools[0].infoKey = 0
if self.nudgeDirection is not None:
keyname = self.getKey(movement=True, keyname=pygame.key.name(key))
for i, key in enumerate(self.editor.movements):
if keyname == key and i == self.nudgeDirection:
self.nudgeDirection = None
self.testTime = None
self.send_key(modal_widget, 'key_up', event)
if last_mouse_event_handler:
event.dict['pos'] = last_mouse_event.pos
event.dict['local'] = last_mouse_event.local
last_mouse_event_handler.setup_cursor(event)
elif type == MUSIC_END_EVENT:
self.music_end()
elif type == USEREVENT:
make_scheduled_calls()
if not is_modal:
if self.redraw_every_frame:
self.bonus_draw_time = False
else:
self.bonus_draw_time = True
if last_mouse_event_handler:
event.dict['pos'] = last_mouse_event.pos
event.dict['local'] = last_mouse_event.local
add_modifiers(event)
last_mouse_event_handler.setup_cursor(event)
self.begin_frame()
elif type == VIDEORESIZE:
#add_modifiers(event)
self.bonus_draw_time = False
self.size = (event.w, event.h)
#self.dispatch_key('reshape', event)
elif type == ACTIVEEVENT:
add_modifiers(event)
self.dispatch_key('activeevent', event)
elif type == NOEVENT:
add_modifiers(event)
self.call_idle_handlers(event)
if not self.sessionStolen:
try:
if self.editor.level is not None and hasattr(self.editor.level, "checkSessionLock"):
self.editor.level.checkSessionLock()
except Exception, e:
log.warn(u"Error reading chunk: %s", e)
if not config.session.override.get():
self.sessionStolen = True
else:
self.editor.level.acquireSessionLock()
if self.editor.level is not None:
self.editor.cameraInputs = [0., 0., 0., 0., 0., 0.]
self.editor.cameraPanKeys = [0., 0., 0., 0.]
allKeys = pygame.key.get_pressed()
allKeysWithData = enumerate(allKeys)
def useKeys((i, keys)):
if not keys:
return
keyName = self.getKey(movement=True, keyname=pygame.key.name(i))
if self.editor.level:
for j, key in enumerate(self.editor.movements):
if keyName == key and not allKeys[pygame.K_LCTRL] and not allKeys[pygame.K_RCTRL] and not allKeys[pygame.K_RMETA] and not allKeys[pygame.K_LMETA]:
self.changeMovementKeys(j, keyName)
for k, key in enumerate(self.editor.cameraPan):
if keyName == key and not allKeys[pygame.K_LCTRL] and not allKeys[pygame.K_RCTRL] and not allKeys[pygame.K_RMETA] and not allKeys[pygame.K_LMETA]:
self.changeCameraKeys(k)
map(useKeys, allKeysWithData)
except Cancel:
pass
finally:
modal_widget.is_modal = was_modal
top_widget = old_top_widget
if old_captured_widget:
self.capture_mouse(old_captured_widget)
clicked_widget = None
@staticmethod
def getKey(evt=None, movement=False, keyname=None):
if keyname is None:
keyname = key.name(evt.key)
if 'left' in keyname and len(keyname) > 5:
keyname = keyname[5:]
elif 'right' in keyname and len(keyname) > 6:
keyname = keyname[6:]
try:
keyname = keyname.replace(keyname[0], keyname[0].upper(), 1)
finally:
if keyname == 'Meta':
keyname = 'Ctrl'
if not movement:
newKeyname = ""
if evt.shift and keyname != "Shift":
newKeyname += "Shift-"
if (evt.ctrl or evt.cmd) and keyname != "Ctrl":
newKeyname += "Ctrl-"
if evt.alt and keyname != "Alt":
newKeyname += "Alt-"
keyname = newKeyname + keyname
if not newKeyname:
if sys.platform == 'linux2':
test_key = getattr(evt, 'scancode', None)
tool_keys = [10, 11, 12, 13, 14, 15, 16, 17, 18]
else:
test_key = keyname
tool_keys = ['1', '2', '3', '4', '5', '6', '7', '8', '9']
if test_key in tool_keys:
keyname = str(tool_keys.index(test_key) + 1)
elif test_key == 19:
keyname = '0'
if keyname == 'Enter':
keyname = 'Return'
return keyname
def changeMovementKeys(self, keyNum, keyname):
if self.editor.level is not None and not self.notMove:
self.editor.cameraInputs[self.movementNum[keyNum]] += self.movementMath[keyNum]
elif self.notMove and self.nudge is not None and (self.testTime is None or datetime.now() - self.testTime >= timedelta(seconds=0.1)):
self.bonus_draw_time = False
self.testTime = datetime.now()
if keyname == self.editor.movements[4]:
self.nudge.nudge(Vector(0, 1, 0))
if keyname == self.editor.movements[5]:
self.nudge.nudge(Vector(0, -1, 0))
Z = self.editor.mainViewport.cameraVector
absZ = map(abs, Z)
if absZ[0] < absZ[2]:
forward = (0, 0, (-1 if Z[2] < 0 else 1))
else:
forward = ((-1 if Z[0] < 0 else 1), 0, 0)
back = map(int.__neg__, forward)
left = forward[2], forward[1], -forward[0]
right = map(int.__neg__, left)
if keyname == self.editor.movements[2]:
self.nudge.nudge(Vector(*forward))
if keyname == self.editor.movements[3]:
self.nudge.nudge(Vector(*back))
if keyname == self.editor.movements[0]:
self.nudge.nudge(Vector(*left))
if keyname == self.editor.movements[1]:
self.nudge.nudge(Vector(*right))
for i, key in enumerate(self.editor.movements):
if key == keyname:
self.nudgeDirection = i
def changeCameraKeys(self, keyNum):
if self.editor.level is not None and not self.notMove:
self.editor.cameraPanKeys[self.cameraNum[keyNum]] = self.cameraMath[keyNum]
def call_idle_handlers(self, event):
def call(ref):
widget = ref()
if widget:
widget.idleevent(event)
else:
print "Idle ref died!"
return bool(widget)
self.idle_handlers = filter(call, self.idle_handlers)
def add_idle_handler(self, widget):
from weakref import ref
self.idle_handlers.append(ref(widget))
def remove_idle_handler(self, widget):
from weakref import ref
self.idle_handlers.remove(ref(widget))
@staticmethod
def send_key(widget, name, event):
widget.dispatch_key(name, event)
def begin_frame(self):
pass
def get_root(self):
return self
labelClass = lambda s, t: wrapped_label(t, 45)
def show_tooltip(self, widget, pos):
if hasattr(self, 'currentTooltip'):
if self.currentTooltip is not None:
self.remove(self.currentTooltip)
self.currentTooltip = None
def TextTooltip(text):
tooltipBacking = Panel()
tooltipBacking.bg_color = (0.0, 0.0, 0.0, 0.8)
tooltipBacking.add(self.labelClass(text))
tooltipBacking.shrink_wrap()
return tooltipBacking
def showTip(tip):
tip.topleft = pos
tip.top += 20
if (tip.bottom > self.bottom) or hasattr(widget, 'tooltipsUp'):
tip.bottomleft = pos
tip.top -= 4
if tip.right > self.right:
tip.right = pos[0]
self.add(tip)
self.currentTooltip = tip
if widget.tooltip is not None:
tip = widget.tooltip
showTip(tip)
else:
ttext = widget.tooltipText
if ttext is not None:
tip = TextTooltip(ttext)
showTip(tip)
def update_tooltip(self, pos=None):
if pos is None:
pos = pygame.mouse.get_pos()
if self.captured_widget:
mouse_widget = self.captured_widget
pos = mouse_widget.center
else:
mouse_widget = self.find_widget(pos)
self.show_tooltip(mouse_widget, pos)
return mouse_widget
def has_focus(self):
return True
def quit(self):
if self.confirm_quit():
self.capture_mouse(None)
sys.exit(0)
@staticmethod
def confirm_quit():
return True
@staticmethod
def get_mouse_for(widget):
last = last_mouse_event
event = Event(0, last.dict)
event.dict['local'] = widget.global_to_local(event.pos)
add_modifiers(event)
return event
def gl_clear(self):
from OpenGL import GL
bg = self.bg_color
if bg:
r = bg[0] / 255.0
g = bg[1] / 255.0
b = bg[2] / 255.0
GL.glClearColor(r, g, b, 0.0)
GL.glClear(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)
@staticmethod
def music_end():
import music
music.music_end()
#-# Used for debugging the resize stuff.
# def resized(self, *args, **kwargs):
# Widget.resized(self, *args, **kwargs)
# print self.size
#---------------------------------------------------------------------------
from bisect import insort
scheduled_calls = []
def make_scheduled_calls():
sched = scheduled_calls
t = time()
while sched and sched[0][0] <= t:
sched[0][1]()
sched.pop(0)
def schedule(delay, func):
"""Arrange for the given function to be called after the specified
delay in seconds. Scheduled functions are called synchronously from
the event loop, and only when the frame timer is running."""
t = time() + delay
insort(scheduled_calls, (t, func))
| |
# Copyright (C) 2020 NTT DATA
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from oslo_log import log as logging
from oslo_utils import timeutils
from oslo_utils import uuidutils
from oslo_versionedobjects import base as ovoo_base
from sqlalchemy import exc
from sqlalchemy.orm import joinedload
from tacker._i18n import _
from tacker.common import exceptions
from tacker.common import utils
from tacker.db import api as db_api
from tacker.db.db_sqlalchemy import api
from tacker.db.db_sqlalchemy import models
from tacker.db.vnfm import vnfm_db
from tacker import objects
from tacker.objects import base
from tacker.objects import common
from tacker.objects import fields
from tacker.objects import vnf_instantiated_info
from tacker.objects import vnf_package as vnf_package_obj
from tacker.objects import vnf_package_vnfd
LOG = logging.getLogger(__name__)
@db_api.context_manager.reader
def _vnf_instance_get_by_id(context, vnf_instance_id, columns_to_join=None,
read_deleted="no"):
query = api.model_query(context, models.VnfInstance,
read_deleted=read_deleted,
project_only=True). \
filter_by(id=vnf_instance_id)
if columns_to_join:
for column in columns_to_join:
query = query.options(joinedload(column))
result = query.first()
if not result:
raise exceptions.VnfInstanceNotFound(id=vnf_instance_id)
return result
@db_api.context_manager.writer
def _vnf_instance_create(context, values):
vnf_instance = models.VnfInstance()
vnf_instance.update(values)
vnf_instance.save(context.session)
return _vnf_instance_get_by_id(context, vnf_instance.id,
columns_to_join=["instantiated_vnf_info"])
@db_api.context_manager.writer
def _vnf_instance_update(context, vnf_instance_id, values,
columns_to_join=None):
vnf_instance = _vnf_instance_get_by_id(context, vnf_instance_id,
columns_to_join=columns_to_join)
vnf_instance.update(values)
vnf_instance.save(session=context.session)
return vnf_instance
@db_api.context_manager.writer
def _destroy_vnf_instance(context, uuid):
now = timeutils.utcnow()
updated_values = {'deleted': True,
'deleted_at': now
}
api.model_query(context, models.VnfInstantiatedInfo). \
filter_by(vnf_instance_id=uuid). \
update(updated_values, synchronize_session=False)
api.model_query(context, models.VnfInstance).\
filter_by(id=uuid). \
update(updated_values, synchronize_session=False)
@db_api.context_manager.reader
def _vnf_instance_list(context, columns_to_join=None):
query = api.model_query(context, models.VnfInstance, read_deleted="no",
project_only=True)
if columns_to_join:
for column in columns_to_join:
query = query.options(joinedload(column))
return query.all()
@db_api.context_manager.reader
def _vnf_instance_list_by_filter(context, columns_to_join=None,
filters=None):
query = api.model_query(context, models.VnfInstance,
read_deleted="no",
project_only=True)
if columns_to_join:
for column in columns_to_join:
query = query.options(joinedload(column))
if filters:
query = common.apply_filters(query, filters)
return query.all()
def _make_vnf_instance_list(context, vnf_instance_list, db_vnf_instance_list,
expected_attrs):
vnf_instance_cls = VnfInstance
vnf_instance_list.objects = []
for db_vnf_instance in db_vnf_instance_list:
vnf_instance_obj = vnf_instance_cls._from_db_object(
context, vnf_instance_cls(context), db_vnf_instance,
expected_attrs=expected_attrs)
vnf_instance_list.objects.append(vnf_instance_obj)
vnf_instance_list.obj_reset_changes()
return vnf_instance_list
# decorator to catch DBAccess exception
def _wrap_object_error(method):
def wrapper(*args, **kwargs):
try:
method(*args, **kwargs)
except exc.SQLAlchemyError:
raise exceptions.DBAccessError
return wrapper
@db_api.context_manager.reader
def _get_vnf_instance(context, id):
vnf_instance = api.model_query(
context, models.VnfInstance).filter_by(
vnfd_id=id).first()
return vnf_instance
@db_api.context_manager.reader
def _vnf_instance_get(context, vnfd_id, columns_to_join=None):
query = api.model_query(context, models.VnfInstance, read_deleted="no",
project_only=True).filter_by(vnfd_id=vnfd_id)
if columns_to_join:
for column in columns_to_join:
query = query.options(joinedload(column))
return query.first()
def _merge_vim_connection_info(
pre_vim_connection_info_list,
update_vim_connection_info_list):
def update_nested_element(pre_data, update_data):
for key, val in update_data.items():
if not isinstance(val, dict):
pre_data[key] = val
continue
if key in pre_data:
pre_data[key].update(val)
else:
pre_data.update({key: val})
result = []
clone_pre_list = copy.deepcopy(pre_vim_connection_info_list)
for update_vim_connection in update_vim_connection_info_list:
pre_data = None
for i in range(0, len(clone_pre_list) - 1):
if clone_pre_list[i].id == update_vim_connection.get('id'):
pre_data = clone_pre_list.pop(i)
if pre_data is None:
# new elm.
result.append(objects.VimConnectionInfo._from_dict(
update_vim_connection))
continue
convert_dict = pre_data.to_dict()
update_nested_element(convert_dict, update_vim_connection)
result.append(objects.VimConnectionInfo._from_dict(
convert_dict))
# Reflecting unupdated data
result.extend(clone_pre_list)
return result
@db_api.context_manager.writer
def _update_vnf_instances(
context,
vnf_lcm_opoccs,
body_data,
vnfd_pkg_data,
vnfd_id):
updated_values = {}
updated_values['vnf_instance_name'] = body_data.get('vnf_instance_name')
updated_values['vnf_instance_description'] = body_data.get(
'vnf_instance_description')
# get vnf_instances
vnf_instance = _get_vnf_instance(context, vnfd_id)
if body_data.get('metadata'):
vnf_instance.vnf_metadata.update(body_data.get('metadata'))
updated_values['vnf_metadata'] = vnf_instance.vnf_metadata
if body_data.get('vim_connection_info'):
merge_vim_connection_info = _merge_vim_connection_info(
vnf_instance.vim_connection_info,
body_data.get('vim_connection_info'))
updated_values['vim_connection_info'] = merge_vim_connection_info
if vnfd_pkg_data and len(vnfd_pkg_data) > 0:
updated_values['vnfd_id'] = vnfd_pkg_data.get('vnfd_id')
updated_values['vnf_provider'] = vnfd_pkg_data.get('vnf_provider')
updated_values['vnf_product_name'] = vnfd_pkg_data.get(
'vnf_product_name')
updated_values['vnf_software_version'] = vnfd_pkg_data.get(
'vnf_software_version')
updated_values['vnf_pkg_id'] = vnfd_pkg_data.get('package_uuid')
api.model_query(context, models.VnfInstance). \
filter_by(id=vnf_lcm_opoccs.get('vnf_instance_id')). \
update(updated_values, synchronize_session=False)
vnf_now = timeutils.utcnow()
if (body_data.get('vnfd_id') or body_data.get('vnf_pkg_id')):
# update vnf
if body_data.get('vnfd_id'):
updated_values = {'vnfd_id': body_data.get('vnfd_id'),
'updated_at': vnf_now
}
elif body_data.get('vnf_pkg_id'):
updated_values = {'vnfd_id': vnfd_pkg_data.get('vnfd_id'),
'updated_at': vnf_now
}
api.model_query(context, vnfm_db.VNF).\
filter_by(id=vnf_lcm_opoccs.get('vnf_instance_id')). \
update(updated_values, synchronize_session=False)
# get vnf_packages
id = vnfd_pkg_data.get('package_uuid')
try:
vnf_package = vnf_package_obj.VnfPackage.get_by_id(context, id)
except exceptions.VnfPackageNotFound:
raise exceptions.VnfPackageNotFound(id=id)
if vnf_package.usage_state == 'NOT_IN_USE':
# update vnf_packages
now = timeutils.utcnow()
updated_values = {'usage_state': 'IN_USE',
'updated_at': now
}
api.model_query(context, models.VnfPackage).\
filter_by(id=id). \
update(updated_values, synchronize_session=False)
# get vnf_instances
vnf_instance = _get_vnf_instance(context, vnfd_id)
if not vnf_instance:
# get vnf_package_vnfd
vnfd_data = vnf_package_vnfd.VnfPackageVnfd.get_by_vnfdId(
context, vnfd_id)
# update vnf_packages
now = timeutils.utcnow()
updated_values = {'usage_state': 'NOT_IN_USE',
'updated_at': now
}
api.model_query(context, models.VnfPackage).\
filter_by(id=vnfd_data.package_uuid). \
update(updated_values, synchronize_session=False)
return vnf_now
@base.TackerObjectRegistry.register
class VnfInstance(base.TackerObject, base.TackerPersistentObject,
base.TackerObjectDictCompat):
# Version 1.0: Initial version
VERSION = '1.0'
fields = {
'id': fields.UUIDField(nullable=False),
'vnf_instance_name': fields.StringField(nullable=True),
'vnf_instance_description': fields.StringField(nullable=True),
'instantiation_state':
fields.VnfInstanceStateField(
nullable=False,
default=fields.VnfInstanceState.NOT_INSTANTIATED),
'task_state': fields.StringField(nullable=True, default=None),
'vnfd_id': fields.StringField(nullable=False),
'vnf_provider': fields.StringField(nullable=False),
'vnf_product_name': fields.StringField(nullable=False),
'vnf_software_version': fields.StringField(nullable=False),
'vnfd_version': fields.StringField(nullable=False),
'vim_connection_info': fields.ListOfObjectsField(
'VimConnectionInfo', nullable=True, default=[]),
'tenant_id': fields.StringField(nullable=False),
'vnf_metadata': fields.DictOfStringsField(nullable=True, default={}),
'vnf_pkg_id': fields.StringField(nullable=False),
'instantiated_vnf_info': fields.ObjectField('InstantiatedVnfInfo',
nullable=True, default=None)
}
ALL_ATTRIBUTES = {
'id': ('id', "uuid", 'VnfInstance'),
'vnfInstanceName': ('vnf_instance_name', 'string', 'VnfInstance'),
'vnfInstanceDescription': (
'vnf_instance_description', 'string', 'VnfInstance'),
'instantiationState': ('instantiation_state', 'string', 'VnfInstance'),
'taskState': ('task_state', 'string', 'VnfInstance'),
'vnfdId': ('vnfd_id', 'uuid', 'VnfInstance'),
'vnfProvider': ('vnf_provider', 'string', 'VnfInstance'),
'vnfProductName': ('vnf_product_name', 'string', 'VnfInstance'),
'vnfSoftwareVersion': (
'vnf_software_version', 'string', 'VnfInstance'),
'vnfdVersion': ('vnfd_version', 'string', 'VnfInstance'),
'tenantId': ('tenant_id', 'string', 'VnfInstance'),
'vnfPkgId': ('vnf_pkg_id', 'uuid', 'VnfInstance'),
'vimConnectionInfo/*': ('vim_connection_info', 'key_value_pair',
{"key_column": "key", "value_column": "value",
"model": "VnfInstance"}),
'metadata/*': ('vnf_metadata', 'key_value_pair',
{"key_column": "key", "value_column": "value",
"model": "VnfInstance"}),
}
ALL_ATTRIBUTES.update(
vnf_instantiated_info.InstantiatedVnfInfo.ALL_ATTRIBUTES)
FLATTEN_ATTRIBUTES = utils.flatten_dict(ALL_ATTRIBUTES.copy())
def __init__(self, context=None, **kwargs):
super(VnfInstance, self).__init__(context, **kwargs)
self.obj_set_defaults()
@staticmethod
def _from_db_object(context, vnf_instance, db_vnf_instance,
expected_attrs=None):
special_fields = ["instantiated_vnf_info", "vim_connection_info"]
for key in vnf_instance.fields:
if key in special_fields:
continue
setattr(vnf_instance, key, db_vnf_instance[key])
VnfInstance._load_instantiated_vnf_info_from_db_object(
context, vnf_instance, db_vnf_instance)
vim_connection_info = db_vnf_instance['vim_connection_info']
vim_connection_list = [objects.VimConnectionInfo.obj_from_primitive(
vim_info, context) for vim_info in vim_connection_info]
vnf_instance.vim_connection_info = vim_connection_list
vnf_instance._context = context
vnf_instance.obj_reset_changes()
return vnf_instance
@staticmethod
def _load_instantiated_vnf_info_from_db_object(context, vnf_instance,
db_vnf_instance):
if db_vnf_instance['instantiated_vnf_info']:
inst_vnf_info = objects.InstantiatedVnfInfo.obj_from_db_obj(
context, db_vnf_instance['instantiated_vnf_info'])
vnf_instance.instantiated_vnf_info = inst_vnf_info
@base.remotable
def create(self):
if self.obj_attr_is_set('id'):
raise exceptions.ObjectActionError(action='create',
reason=_('already created'))
updates = self.obj_get_changes()
if 'id' not in updates:
updates['id'] = uuidutils.generate_uuid()
self.id = updates['id']
# add default vnf_instance_name if not specified
# format: 'vnf' + <vnf instance id>
if 'vnf_instance_name' not in updates or \
not updates.get("vnf_instance_name"):
updates['vnf_instance_name'] = 'vnf-' + self.id
self.vnf_instance_name = updates['vnf_instance_name']
db_vnf_instance = _vnf_instance_create(self._context, updates)
expected_attrs = ["instantiated_vnf_info"]
self._from_db_object(self._context, self, db_vnf_instance,
expected_attrs=expected_attrs)
@base.remotable
@_wrap_object_error
def save(self):
context = self._context
updates = {}
changes = self.obj_what_changed()
for field in self.fields:
if (self.obj_attr_is_set(field) and
isinstance(self.fields[field], fields.ObjectField)):
try:
getattr(self, '_save_%s' % field)(context)
except AttributeError:
LOG.exception('No save handler for %s', field)
elif (self.obj_attr_is_set(field) and
isinstance(self.fields[field], fields.ListOfObjectsField)):
field_list = getattr(self, field)
updates[field] = [obj.obj_to_primitive() for obj in field_list]
elif field in changes:
if (field == 'vnf_instance_name' and
not self[field]):
self.vnf_instance_name = 'vnf-' + self.id
updates[field] = self[field]
expected_attrs = ["instantiated_vnf_info"]
db_vnf_instance = _vnf_instance_update(self._context,
self.id, updates,
columns_to_join=expected_attrs)
self._from_db_object(self._context, self, db_vnf_instance)
def _save_instantiated_vnf_info(self, context):
if self.instantiated_vnf_info:
with self.instantiated_vnf_info.obj_alternate_context(context):
self.instantiated_vnf_info.save()
@base.remotable
@_wrap_object_error
def update_metadata(self, data):
_metadata = copy.deepcopy(self['vnf_metadata'])
_metadata.update(data)
self['vnf_metadata'] = _metadata
self.save()
@base.remotable
@_wrap_object_error
def destroy(self, context):
if not self.obj_attr_is_set('id'):
raise exceptions.ObjectActionError(action='destroy',
reason='no uuid')
_destroy_vnf_instance(context, self.id)
def to_dict(self):
data = {'id': self.id,
'vnf_instance_name': self.vnf_instance_name,
'vnf_instance_description': self.vnf_instance_description,
'instantiation_state': self.instantiation_state,
'vnfd_id': self.vnfd_id,
'vnf_provider': self.vnf_provider,
'vnf_product_name': self.vnf_product_name,
'vnf_software_version': self.vnf_software_version,
'vnfd_version': self.vnfd_version,
'vnf_pkg_id': self.vnf_pkg_id,
'vnf_metadata': self.vnf_metadata}
if (self.instantiation_state == fields.VnfInstanceState.INSTANTIATED
and self.instantiated_vnf_info):
data.update({'instantiated_vnf_info':
self.instantiated_vnf_info.to_dict()})
vim_connection_info_list = []
for vim_connection_info in self.vim_connection_info:
vim_connection_info_list.append(vim_connection_info.to_dict())
data.update({'vim_connection_info': vim_connection_info_list})
return data
@base.remotable
def update(
self,
context,
vnf_lcm_opoccs,
body_data,
vnfd_pkg_data,
vnfd_id):
# update vnf_instances
return _update_vnf_instances(
context,
vnf_lcm_opoccs,
body_data,
vnfd_pkg_data,
vnfd_id)
@base.remotable_classmethod
def get_by_id(cls, context, id, read_deleted="no"):
expected_attrs = ["instantiated_vnf_info"]
db_vnf_instance = _vnf_instance_get_by_id(
context, id, columns_to_join=expected_attrs,
read_deleted=read_deleted)
return cls._from_db_object(context, cls(), db_vnf_instance,
expected_attrs=expected_attrs)
@base.TackerObjectRegistry.register
class VnfInstanceList(ovoo_base.ObjectListBase, base.TackerObject):
VERSION = '1.0'
fields = {
'objects': fields.ListOfObjectsField('VnfInstance')
}
@base.remotable_classmethod
def get_all(cls, context, expected_attrs=None):
expected_attrs = ["instantiated_vnf_info"]
db_vnf_instances = _vnf_instance_list(context,
columns_to_join=expected_attrs)
return _make_vnf_instance_list(context, cls(), db_vnf_instances,
expected_attrs)
@base.remotable_classmethod
def get_by_filters(cls, context, filters=None,
expected_attrs=None):
expected_attrs = ["instantiated_vnf_info"]
db_vnf_instances = _vnf_instance_list_by_filter(
context, columns_to_join=expected_attrs,
filters=filters)
return _make_vnf_instance_list(context, cls(), db_vnf_instances,
expected_attrs)
@base.remotable_classmethod
def vnf_instance_list(cls, vnfd_id, context):
# get vnf_instance data
expected_attrs = ["instantiated_vnf_info"]
db_vnf_instances = _vnf_instance_get(context, vnfd_id,
columns_to_join=expected_attrs)
vnf_instance_cls = VnfInstance
vnf_instance_data = ""
vnf_instance_obj = vnf_instance_cls._from_db_object(
context, vnf_instance_cls(context), db_vnf_instances,
expected_attrs=expected_attrs)
vnf_instance_data = vnf_instance_obj
return vnf_instance_data
| |
#!/usr/bin/env python2.7
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Run stress test in C++"""
from __future__ import print_function
import argparse
import atexit
import dockerjob
import itertools
import jobset
import json
import multiprocessing
import os
import re
import subprocess
import sys
import tempfile
import time
import uuid
# Docker doesn't clean up after itself, so we do it on exit.
atexit.register(lambda: subprocess.call(['stty', 'echo']))
ROOT = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '../..'))
os.chdir(ROOT)
_DEFAULT_SERVER_PORT = 8080
_DEFAULT_METRICS_PORT = 8081
_DEFAULT_TEST_CASES = 'empty_unary:20,large_unary:20,client_streaming:20,server_streaming:20,empty_stream:20'
_DEFAULT_NUM_CHANNELS_PER_SERVER = 5
_DEFAULT_NUM_STUBS_PER_CHANNEL = 10
# 15 mins default
_DEFAULT_TEST_DURATION_SECS = 900
class CXXLanguage:
def __init__(self):
self.client_cwd = None
self.server_cwd = None
self.safename = 'cxx'
def client_cmd(self, args):
return ['bins/opt/stress_test'] + args
def server_cmd(self, args):
return ['bins/opt/interop_server'] + args
def global_env(self):
return {}
def __str__(self):
return 'c++'
_LANGUAGES = {'c++': CXXLanguage(),}
# languages supported as cloud_to_cloud servers
_SERVERS = ['c++']
DOCKER_WORKDIR_ROOT = '/var/local/git/grpc'
def docker_run_cmdline(cmdline, image, docker_args=[], cwd=None, environ=None):
"""Wraps given cmdline array to create 'docker run' cmdline from it."""
docker_cmdline = ['docker', 'run', '-i', '--rm=true']
# turn environ into -e docker args
if environ:
for k, v in environ.items():
docker_cmdline += ['-e', '%s=%s' % (k, v)]
# set working directory
workdir = DOCKER_WORKDIR_ROOT
if cwd:
workdir = os.path.join(workdir, cwd)
docker_cmdline += ['-w', workdir]
docker_cmdline += docker_args + [image] + cmdline
return docker_cmdline
def bash_login_cmdline(cmdline):
"""Creates bash -l -c cmdline from args list."""
# Use login shell:
# * rvm and nvm require it
# * makes error messages clearer if executables are missing
return ['bash', '-l', '-c', ' '.join(cmdline)]
def _job_kill_handler(job):
if job._spec.container_name:
dockerjob.docker_kill(job._spec.container_name)
# When the job times out and we decide to kill it,
# we need to wait a before restarting the job
# to prevent "container name already in use" error.
# TODO(jtattermusch): figure out a cleaner way to to this.
time.sleep(2)
def cloud_to_cloud_jobspec(language,
test_cases,
server_addresses,
test_duration_secs,
num_channels_per_server,
num_stubs_per_channel,
metrics_port,
docker_image=None):
"""Creates jobspec for cloud-to-cloud interop test"""
cmdline = bash_login_cmdline(language.client_cmd([
'--test_cases=%s' % test_cases, '--server_addresses=%s' %
server_addresses, '--test_duration_secs=%s' % test_duration_secs,
'--num_stubs_per_channel=%s' % num_stubs_per_channel,
'--num_channels_per_server=%s' % num_channels_per_server,
'--metrics_port=%s' % metrics_port
]))
print(cmdline)
cwd = language.client_cwd
environ = language.global_env()
if docker_image:
container_name = dockerjob.random_name('interop_client_%s' %
language.safename)
cmdline = docker_run_cmdline(
cmdline,
image=docker_image,
environ=environ,
cwd=cwd,
docker_args=['--net=host', '--name', container_name])
cwd = None
test_job = jobset.JobSpec(cmdline=cmdline,
cwd=cwd,
environ=environ,
shortname='cloud_to_cloud:%s:%s_server:stress_test' % (
language, server_name),
timeout_seconds=test_duration_secs * 2,
flake_retries=0,
timeout_retries=0,
kill_handler=_job_kill_handler)
test_job.container_name = container_name
return test_job
def server_jobspec(language, docker_image, test_duration_secs):
"""Create jobspec for running a server"""
container_name = dockerjob.random_name('interop_server_%s' %
language.safename)
cmdline = bash_login_cmdline(language.server_cmd(['--port=%s' %
_DEFAULT_SERVER_PORT]))
environ = language.global_env()
docker_cmdline = docker_run_cmdline(
cmdline,
image=docker_image,
cwd=language.server_cwd,
environ=environ,
docker_args=['-p', str(_DEFAULT_SERVER_PORT), '--name', container_name])
server_job = jobset.JobSpec(cmdline=docker_cmdline,
environ=environ,
shortname='interop_server_%s' % language,
timeout_seconds=test_duration_secs * 3)
server_job.container_name = container_name
return server_job
def build_interop_stress_image_jobspec(language, tag=None):
"""Creates jobspec for building stress test docker image for a language"""
if not tag:
tag = 'grpc_interop_stress_%s:%s' % (language.safename, uuid.uuid4())
env = {'INTEROP_IMAGE': tag,
'BASE_NAME': 'grpc_interop_stress_%s' % language.safename}
build_job = jobset.JobSpec(cmdline=['tools/run_tests/dockerize/build_interop_stress_image.sh'],
environ=env,
shortname='build_docker_%s' % (language),
timeout_seconds=30 * 60)
build_job.tag = tag
return build_job
argp = argparse.ArgumentParser(description='Run stress tests.')
argp.add_argument('-l',
'--language',
choices=['all'] + sorted(_LANGUAGES),
nargs='+',
default=['all'],
help='Clients to run.')
argp.add_argument('-j', '--jobs', default=multiprocessing.cpu_count(), type=int)
argp.add_argument(
'-s',
'--server',
choices=['all'] + sorted(_SERVERS),
action='append',
help='Run cloud_to_cloud servers in a separate docker ' + 'image.',
default=[])
argp.add_argument(
'--override_server',
action='append',
type=lambda kv: kv.split('='),
help=
'Use servername=HOST:PORT to explicitly specify a server. E.g. '
'csharp=localhost:50000',
default=[])
argp.add_argument('--test_duration_secs',
help='The duration of the test in seconds',
default=_DEFAULT_TEST_DURATION_SECS)
args = argp.parse_args()
servers = set(
s
for s in itertools.chain.from_iterable(_SERVERS if x == 'all' else [x]
for x in args.server))
languages = set(_LANGUAGES[l]
for l in itertools.chain.from_iterable(_LANGUAGES.iterkeys(
) if x == 'all' else [x] for x in args.language))
docker_images = {}
# languages for which to build docker images
languages_to_build = set(
_LANGUAGES[k]
for k in set([str(l) for l in languages] + [s for s in servers]))
build_jobs = []
for l in languages_to_build:
job = build_interop_stress_image_jobspec(l)
docker_images[str(l)] = job.tag
build_jobs.append(job)
if build_jobs:
jobset.message('START', 'Building interop docker images.', do_newline=True)
num_failures, _ = jobset.run(build_jobs,
newline_on_success=True,
maxjobs=args.jobs)
if num_failures == 0:
jobset.message('SUCCESS',
'All docker images built successfully.',
do_newline=True)
else:
jobset.message('FAILED',
'Failed to build interop docker images.',
do_newline=True)
for image in docker_images.itervalues():
dockerjob.remove_image(image, skip_nonexistent=True)
sys.exit(1)
# Start interop servers.
server_jobs = {}
server_addresses = {}
try:
for s in servers:
lang = str(s)
spec = server_jobspec(_LANGUAGES[lang], docker_images.get(lang), args.test_duration_secs)
job = dockerjob.DockerJob(spec)
server_jobs[lang] = job
server_addresses[lang] = ('localhost',
job.mapped_port(_DEFAULT_SERVER_PORT))
jobs = []
for server in args.override_server:
server_name = server[0]
(server_host, server_port) = server[1].split(':')
server_addresses[server_name] = (server_host, server_port)
for server_name, server_address in server_addresses.items():
(server_host, server_port) = server_address
for language in languages:
test_job = cloud_to_cloud_jobspec(
language,
_DEFAULT_TEST_CASES,
('%s:%s' % (server_host, server_port)),
args.test_duration_secs,
_DEFAULT_NUM_CHANNELS_PER_SERVER,
_DEFAULT_NUM_STUBS_PER_CHANNEL,
_DEFAULT_METRICS_PORT,
docker_image=docker_images.get(str(language)))
jobs.append(test_job)
if not jobs:
print('No jobs to run.')
for image in docker_images.itervalues():
dockerjob.remove_image(image, skip_nonexistent=True)
sys.exit(1)
num_failures, resultset = jobset.run(jobs,
newline_on_success=True,
maxjobs=args.jobs)
if num_failures:
jobset.message('FAILED', 'Some tests failed', do_newline=True)
else:
jobset.message('SUCCESS', 'All tests passed', do_newline=True)
finally:
# Check if servers are still running.
for server, job in server_jobs.items():
if not job.is_running():
print('Server "%s" has exited prematurely.' % server)
dockerjob.finish_jobs([j for j in server_jobs.itervalues()])
for image in docker_images.itervalues():
print('Removing docker image %s' % image)
dockerjob.remove_image(image)
| |
"""
Copyright (c) 2017 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
"""
from __future__ import unicode_literals
import json
import os
import platform
import sys
try:
import koji
except ImportError:
import inspect
import sys
# Find our mocked koji module
import tests.koji as koji
mock_koji_path = os.path.dirname(inspect.getfile(koji.ClientSession))
if mock_koji_path not in sys.path:
sys.path.append(os.path.dirname(mock_koji_path))
# Now load it properly, the same way the plugin will
del koji
import koji
from atomic_reactor.constants import IMAGE_TYPE_DOCKER_ARCHIVE
from atomic_reactor.core import DockerTasker
from atomic_reactor.plugins.post_koji_upload import (KojiUploadLogger,
KojiUploadPlugin)
from atomic_reactor.plugin import PostBuildPluginsRunner, PluginFailedException
from atomic_reactor.inner import DockerBuildWorkflow, TagConf, PushConf
from atomic_reactor.util import ImageName, ManifestDigest
from atomic_reactor.rpm_util import parse_rpm_output
from atomic_reactor.source import GitSource
from atomic_reactor.build import BuildResult
from tests.constants import SOURCE, MOCK
from flexmock import flexmock
import pytest
from tests.docker_mock import mock_docker
import subprocess
from osbs.api import OSBS
from osbs.exceptions import OsbsException
from six import string_types
NAMESPACE = 'mynamespace'
BUILD_ID = 'build-1'
KOJI_UPLOAD_DIR = 'upload'
LOCAL_ARCH = platform.processor()
if sys.version_info[0] == 2:
B_LOCAL_ARCH = bytes(LOCAL_ARCH)
elif sys.version_info[0] == 3:
B_LOCAL_ARCH = LOCAL_ARCH.encode()
def noop(*args, **kwargs): return None
# temp workaround until this API is added to osbs-client
OSBS.create_config_map = noop
OSBS.get_config_map = noop
class X(object):
pass
class MockedOSBS(OSBS):
def __init__(self, logs_return_bytes=True):
self.configmap = {}
if logs_return_bytes:
logs = b'build logs - \xe2\x80\x98 \xe2\x80\x97 \xe2\x80\x99'
else:
logs = 'build logs - \u2018 \u2017 \u2019'
(flexmock(OSBS)
.should_receive('get_build_logs')
.with_args(BUILD_ID)
.and_return(logs))
(flexmock(OSBS)
.should_receive('get_pod_for_build')
.with_args(BUILD_ID)
.and_return(MockedPodResponse()))
(flexmock(OSBS)
.should_receive('create_config_map')
.with_args(BUILD_ID+'-md', dict)
.replace_with(self.create_config_map))
(flexmock(OSBS)
.should_receive('get_config_map')
.with_args(BUILD_ID+'-md')
.replace_with(self.get_config_map))
def create_config_map(self, name, data):
assert isinstance(data, dict)
assert is_string_type(name)
self.configmap[name] = data
def get_config_map(self, name):
assert name in self.configmap
return self.configmap[name]
class MockedPodResponse(object):
def get_container_image_ids(self):
return {'buildroot:latest': '0123456'}
class MockedClientSession(object):
TAG_TASK_ID = 1234
DEST_TAG = 'images-candidate'
def __init__(self, hub, opts=None, task_states=None):
self.uploaded_files = []
self.build_tags = {}
self.task_states = task_states or ['FREE', 'ASSIGNED', 'CLOSED']
self.task_states = list(self.task_states)
self.task_states.reverse()
self.tag_task_state = self.task_states.pop()
def krb_login(self, principal=None, keytab=None, proxyuser=None):
return True
def ssl_login(self, cert, ca, serverca, proxyuser=None):
return True
def logout(self):
pass
def uploadWrapper(self, localfile, path, name=None, callback=None,
blocksize=1048576, overwrite=True):
self.uploaded_files.append(name)
self.blocksize = blocksize
assert path.split(os.path.sep, 1)[0] == KOJI_UPLOAD_DIR
def CGImport(self, metadata, server_dir):
self.metadata = metadata
self.server_dir = server_dir
return {"id": "123"}
def getBuildTarget(self, target):
return {'dest_tag_name': self.DEST_TAG}
def tagBuild(self, tag, build, force=False, fromtag=None):
self.build_tags[build] = tag
return self.TAG_TASK_ID
def getTaskInfo(self, task_id, request=False):
assert task_id == self.TAG_TASK_ID
# For extra code coverage, imagine Koji denies the task ever
# existed.
if self.tag_task_state is None:
return None
return {'state': koji.TASK_STATES[self.tag_task_state]}
def taskFinished(self, task_id):
try:
self.tag_task_state = self.task_states.pop()
except IndexError:
# No more state changes
pass
return self.tag_task_state in ['CLOSED', 'FAILED', 'CANCELED', None]
FAKE_SIGMD5 = b'0' * 32
FAKE_RPM_OUTPUT = (
b'name1;1.0;1;' + B_LOCAL_ARCH + b';0;' + FAKE_SIGMD5 + b';(none);'
b'RSA/SHA256, Mon 29 Jun 2015 13:58:22 BST, Key ID abcdef01234567\n'
b'gpg-pubkey;01234567;01234567;(none);(none);(none);(none);(none)\n'
b'gpg-pubkey-doc;01234567;01234567;noarch;(none);' + FAKE_SIGMD5 +
b';(none);(none)\n'
b'name2;2.0;2;' + B_LOCAL_ARCH + b';0;' + FAKE_SIGMD5 + b';' +
b'RSA/SHA256, Mon 29 Jun 2015 13:58:22 BST, Key ID bcdef012345678;(none)\n'
b'\n')
FAKE_OS_OUTPUT = 'fedora-22'
def fake_subprocess_output(cmd):
if cmd.startswith('/bin/rpm'):
return FAKE_RPM_OUTPUT
elif 'os-release' in cmd:
return FAKE_OS_OUTPUT
else:
raise RuntimeError
class MockedPopen(object):
def __init__(self, cmd, *args, **kwargs):
self.cmd = cmd
def wait(self):
return 0
def communicate(self):
return (fake_subprocess_output(self.cmd), '')
def fake_Popen(cmd, *args, **kwargs):
return MockedPopen(cmd, *args, **kwargs)
def fake_digest(image):
tag = image.to_str(registry=False)
return 'sha256:{0:032x}'.format(len(tag))
def is_string_type(obj):
return any(isinstance(obj, strtype)
for strtype in string_types)
def mock_environment(tmpdir, session=None, name=None,
component=None, version=None, release=None,
source=None, build_process_failed=False,
docker_registry=True, pulp_registries=0,
blocksize=None, task_states=None,
additional_tags=None, has_config=None,
prefer_schema1_digest=True):
if session is None:
session = MockedClientSession('', task_states=None)
if source is None:
source = GitSource('git', 'git://hostname/path')
if MOCK:
mock_docker()
tasker = DockerTasker()
workflow = DockerBuildWorkflow(SOURCE, "test-image")
base_image_id = '123456parent-id'
setattr(workflow, '_base_image_inspect', {'Id': base_image_id})
setattr(workflow, 'builder', X())
setattr(workflow.builder, 'image_id', '123456imageid')
setattr(workflow.builder, 'base_image', ImageName(repo='Fedora', tag='22'))
setattr(workflow.builder, 'source', X())
setattr(workflow.builder, 'built_image_info', {'ParentId': base_image_id})
setattr(workflow.builder.source, 'dockerfile_path', None)
setattr(workflow.builder.source, 'path', None)
setattr(workflow, 'tag_conf', TagConf())
with open(os.path.join(str(tmpdir), 'Dockerfile'), 'wt') as df:
df.write('FROM base\n'
'LABEL BZComponent={component} com.redhat.component={component}\n'
'LABEL Version={version} version={version}\n'
'LABEL Release={release} release={release}\n'
.format(component=component, version=version, release=release))
setattr(workflow.builder, 'df_path', df.name)
if name and version:
workflow.tag_conf.add_unique_image('user/test-image:{v}-timestamp'
.format(v=version))
if name and version and release:
workflow.tag_conf.add_primary_images(["{0}:{1}-{2}".format(name,
version,
release),
"{0}:{1}".format(name, version),
"{0}:latest".format(name)])
if additional_tags:
workflow.tag_conf.add_primary_images(["{0}:{1}".format(name, tag)
for tag in additional_tags])
flexmock(subprocess, Popen=fake_Popen)
flexmock(koji, ClientSession=lambda hub, opts: session)
flexmock(GitSource)
setattr(workflow, 'source', source)
setattr(workflow.source, 'lg', X())
setattr(workflow.source.lg, 'commit_id', '123456')
setattr(workflow, 'push_conf', PushConf())
if docker_registry:
docker_reg = workflow.push_conf.add_docker_registry('docker.example.com')
for image in workflow.tag_conf.images:
tag = image.to_str(registry=False)
if pulp_registries and prefer_schema1_digest:
docker_reg.digests[tag] = ManifestDigest(v1=fake_digest(image),
v2='sha256:not-used')
else:
docker_reg.digests[tag] = ManifestDigest(v1='sha256:not-used',
v2=fake_digest(image))
if has_config:
docker_reg.config = {
'config': {'architecture': LOCAL_ARCH},
'container_config': {}
}
for pulp_registry in range(pulp_registries):
workflow.push_conf.add_pulp_registry('env', 'pulp.example.com')
with open(os.path.join(str(tmpdir), 'image.tar.xz'), 'wt') as fp:
fp.write('x' * 2**12)
setattr(workflow, 'exported_image_sequence', [{'path': fp.name,
'type': IMAGE_TYPE_DOCKER_ARCHIVE}])
if build_process_failed:
workflow.build_result = BuildResult(logs=["docker build log - \u2018 \u2017 \u2019 \n'"],
fail_reason="not built")
else:
workflow.build_result = BuildResult(logs=["docker build log - \u2018 \u2017 \u2019 \n'"],
image_id="id1234")
workflow.prebuild_plugins_conf = {}
workflow.image_components = parse_rpm_output([
"name1;1.0;1;" + LOCAL_ARCH + ";0;2000;" + FAKE_SIGMD5.decode() + ";23000;"
"RSA/SHA256, Tue 30 Aug 2016 00:00:00, Key ID 01234567890abc;(none)",
"name2;2.0;1;" + LOCAL_ARCH + ";0;3000;" + FAKE_SIGMD5.decode() + ";24000"
"RSA/SHA256, Tue 30 Aug 2016 00:00:00, Key ID 01234567890abd;(none)",
])
return tasker, workflow
@pytest.fixture
def os_env(monkeypatch):
monkeypatch.setenv('BUILD', json.dumps({
"metadata": {
"creationTimestamp": "2015-07-27T09:24:00Z",
"namespace": NAMESPACE,
"name": BUILD_ID,
}
}))
monkeypatch.setenv('OPENSHIFT_CUSTOM_BUILD_BASE_IMAGE', 'buildroot:latest')
def create_runner(tasker, workflow, ssl_certs=False, principal=None,
keytab=None, blocksize=None, target=None,
prefer_schema1_digest=None, platform=None,
multiple=None):
args = {
'kojihub': '',
'url': '/',
'build_json_dir': '',
'koji_upload_dir': KOJI_UPLOAD_DIR,
}
if ssl_certs:
args['koji_ssl_certs_dir'] = '/'
if principal:
args['koji_principal'] = principal
if keytab:
args['koji_keytab'] = keytab
if blocksize:
args['blocksize'] = blocksize
if target:
args['target'] = target
args['poll_interval'] = 0
if prefer_schema1_digest is not None:
args['prefer_schema1_digest'] = prefer_schema1_digest
if platform is not None:
args['platform'] = platform
if multiple is not None:
args['report_multiple_digests'] = multiple
plugins_conf = [
{'name': KojiUploadPlugin.key, 'args': args},
]
workflow.postbuild_plugins_conf = plugins_conf
runner = PostBuildPluginsRunner(tasker, workflow, plugins_conf)
return runner
def get_metadata(workflow, osbs):
cm_annotations = workflow.postbuild_results[KojiUploadPlugin.key]
if not cm_annotations:
return {}
assert "metadata_fragment" in cm_annotations
assert "metadata_fragment_key" in cm_annotations
cmlen = len("configmap/")
cm_key = cm_annotations["metadata_fragment"][cmlen:]
cm_frag_key = cm_annotations["metadata_fragment_key"]
cm_data = osbs.get_config_map(cm_key)
return cm_data[cm_frag_key]
class TestKojiUploadLogger(object):
@pytest.mark.parametrize('totalsize', [0, 1024])
def test_with_zero(self, totalsize):
logger = flexmock()
logger.should_receive('debug').once()
upload_logger = KojiUploadLogger(logger)
upload_logger.callback(0, totalsize, 0, 0, 0)
@pytest.mark.parametrize(('totalsize', 'step', 'expected_times'), [
(10, 1, 11),
(12, 1, 7),
(12, 3, 5),
])
def test_with_defaults(self, totalsize, step, expected_times):
logger = flexmock()
logger.should_receive('debug').times(expected_times)
upload_logger = KojiUploadLogger(logger)
upload_logger.callback(0, totalsize, 0, 0, 0)
for offset in range(step, totalsize + step, step):
upload_logger.callback(offset, totalsize, step, 1.0, 1.0)
@pytest.mark.parametrize(('totalsize', 'step', 'notable', 'expected_times'), [
(10, 1, 10, 11),
(10, 1, 20, 6),
(10, 1, 25, 5),
(12, 3, 25, 5),
])
def test_with_notable(self, totalsize, step, notable, expected_times):
logger = flexmock()
logger.should_receive('debug').times(expected_times)
upload_logger = KojiUploadLogger(logger, notable_percent=notable)
for offset in range(0, totalsize + step, step):
upload_logger.callback(offset, totalsize, step, 1.0, 1.0)
class TestKojiUpload(object):
def test_koji_upload_failed_build(self, tmpdir, os_env):
session = MockedClientSession('')
osbs = MockedOSBS()
tasker, workflow = mock_environment(tmpdir,
session=session,
build_process_failed=True,
name='ns/name',
version='1.0',
release='1')
runner = create_runner(tasker, workflow)
runner.run()
# Must not have uploaded this build
metadata = get_metadata(workflow, osbs)
assert not metadata
def test_koji_upload_no_tagconf(self, tmpdir, os_env):
tasker, workflow = mock_environment(tmpdir)
runner = create_runner(tasker, workflow)
with pytest.raises(PluginFailedException):
runner.run()
def test_koji_upload_no_build_env(self, tmpdir, monkeypatch, os_env):
tasker, workflow = mock_environment(tmpdir,
name='ns/name',
version='1.0',
release='1')
runner = create_runner(tasker, workflow)
# No BUILD environment variable
monkeypatch.delenv("BUILD", raising=False)
with pytest.raises(PluginFailedException) as exc:
runner.run()
assert "plugin 'koji_upload' raised an exception: KeyError" in str(exc)
def test_koji_upload_no_build_metadata(self, tmpdir, monkeypatch, os_env):
tasker, workflow = mock_environment(tmpdir,
name='ns/name',
version='1.0',
release='1')
runner = create_runner(tasker, workflow)
# No BUILD metadata
monkeypatch.setenv("BUILD", json.dumps({}))
with pytest.raises(PluginFailedException):
runner.run()
@pytest.mark.parametrize('params', [
{
'should_raise': False,
'principal': None,
'keytab': None,
},
{
'should_raise': False,
'principal': 'principal@EXAMPLE.COM',
'keytab': 'FILE:/var/run/secrets/mysecret',
},
{
'should_raise': True,
'principal': 'principal@EXAMPLE.COM',
'keytab': None,
},
{
'should_raise': True,
'principal': None,
'keytab': 'FILE:/var/run/secrets/mysecret',
},
])
def test_koji_upload_krb_args(self, tmpdir, params, os_env):
session = MockedClientSession('')
expectation = flexmock(session).should_receive('krb_login').and_return(True)
name = 'name'
version = '1.0'
release = '1'
tasker, workflow = mock_environment(tmpdir,
session=session,
name=name,
version=version,
release=release)
runner = create_runner(tasker, workflow,
principal=params['principal'],
keytab=params['keytab'])
if params['should_raise']:
expectation.never()
with pytest.raises(PluginFailedException):
runner.run()
else:
expectation.once()
runner.run()
def test_koji_upload_krb_fail(self, tmpdir, os_env):
session = MockedClientSession('')
(flexmock(session)
.should_receive('krb_login')
.and_raise(RuntimeError)
.once())
tasker, workflow = mock_environment(tmpdir,
session=session,
name='ns/name',
version='1.0',
release='1')
runner = create_runner(tasker, workflow)
with pytest.raises(PluginFailedException):
runner.run()
def test_koji_upload_ssl_fail(self, tmpdir, os_env):
session = MockedClientSession('')
(flexmock(session)
.should_receive('ssl_login')
.and_raise(RuntimeError)
.once())
tasker, workflow = mock_environment(tmpdir,
session=session,
name='ns/name',
version='1.0',
release='1')
runner = create_runner(tasker, workflow, ssl_certs=True)
with pytest.raises(PluginFailedException):
runner.run()
@pytest.mark.parametrize('fail_method', [
'get_build_logs',
'get_pod_for_build',
])
def test_koji_upload_osbs_fail(self, tmpdir, os_env, fail_method):
tasker, workflow = mock_environment(tmpdir,
name='name',
version='1.0',
release='1')
(flexmock(OSBS)
.should_receive(fail_method)
.and_raise(OsbsException))
runner = create_runner(tasker, workflow)
runner.run()
@staticmethod
def check_components(components):
assert isinstance(components, list)
assert len(components) > 0
for component_rpm in components:
assert isinstance(component_rpm, dict)
assert set(component_rpm.keys()) == set([
'type',
'name',
'version',
'release',
'epoch',
'arch',
'sigmd5',
'signature',
])
assert component_rpm['type'] == 'rpm'
assert component_rpm['name']
assert is_string_type(component_rpm['name'])
assert component_rpm['name'] != 'gpg-pubkey'
assert component_rpm['version']
assert is_string_type(component_rpm['version'])
assert component_rpm['release']
epoch = component_rpm['epoch']
assert epoch is None or isinstance(epoch, int)
assert is_string_type(component_rpm['arch'])
assert component_rpm['signature'] != '(none)'
def validate_buildroot(self, buildroot):
assert isinstance(buildroot, dict)
assert set(buildroot.keys()) == set([
'id',
'host',
'content_generator',
'container',
'tools',
'components',
'extra',
])
host = buildroot['host']
assert isinstance(host, dict)
assert set(host.keys()) == set([
'os',
'arch',
])
assert host['os']
assert is_string_type(host['os'])
assert host['arch']
assert is_string_type(host['arch'])
assert host['arch'] != 'amd64'
content_generator = buildroot['content_generator']
assert isinstance(content_generator, dict)
assert set(content_generator.keys()) == set([
'name',
'version',
])
assert content_generator['name']
assert is_string_type(content_generator['name'])
assert content_generator['version']
assert is_string_type(content_generator['version'])
container = buildroot['container']
assert isinstance(container, dict)
assert set(container.keys()) == set([
'type',
'arch',
])
assert container['type'] == 'docker'
assert container['arch']
assert is_string_type(container['arch'])
assert isinstance(buildroot['tools'], list)
assert len(buildroot['tools']) > 0
for tool in buildroot['tools']:
assert isinstance(tool, dict)
assert set(tool.keys()) == set([
'name',
'version',
])
assert tool['name']
assert is_string_type(tool['name'])
assert tool['version']
assert is_string_type(tool['version'])
self.check_components(buildroot['components'])
extra = buildroot['extra']
assert isinstance(extra, dict)
assert set(extra.keys()) == set([
'osbs',
])
assert 'osbs' in extra
osbs = extra['osbs']
assert isinstance(osbs, dict)
assert set(osbs.keys()) == set([
'build_id',
'builder_image_id',
])
assert is_string_type(osbs['build_id'])
assert is_string_type(osbs['builder_image_id'])
def validate_output(self, output, has_config,
expect_digest):
assert isinstance(output, dict)
assert 'buildroot_id' in output
assert 'filename' in output
assert output['filename']
assert is_string_type(output['filename'])
assert 'filesize' in output
assert int(output['filesize']) > 0
assert 'arch' in output
assert output['arch']
assert is_string_type(output['arch'])
assert 'checksum' in output
assert output['checksum']
assert is_string_type(output['checksum'])
assert 'checksum_type' in output
assert output['checksum_type'] == 'md5'
assert is_string_type(output['checksum_type'])
assert 'type' in output
if output['type'] == 'log':
assert set(output.keys()) == set([
'buildroot_id',
'filename',
'filesize',
'arch',
'checksum',
'checksum_type',
'type',
])
assert output['arch'] == LOCAL_ARCH
else:
assert set(output.keys()) == set([
'buildroot_id',
'filename',
'filesize',
'arch',
'checksum',
'checksum_type',
'type',
'components',
'extra',
])
assert output['type'] == 'docker-image'
assert is_string_type(output['arch'])
assert output['arch'] != 'noarch'
assert output['arch'] in output['filename']
self.check_components(output['components'])
extra = output['extra']
assert isinstance(extra, dict)
assert set(extra.keys()) == set([
'image',
'docker',
])
image = extra['image']
assert isinstance(image, dict)
assert set(image.keys()) == set([
'arch',
])
assert image['arch'] == output['arch'] # what else?
assert 'docker' in extra
docker = extra['docker']
assert isinstance(docker, dict)
expected_keys_set = set([
'parent_id',
'id',
'repositories',
'layer_sizes',
'tags',
])
if has_config:
expected_keys_set.add('config')
assert set(docker.keys()) == expected_keys_set
assert is_string_type(docker['parent_id'])
assert is_string_type(docker['id'])
repositories = docker['repositories']
assert isinstance(repositories, list)
repositories_digest = list(filter(lambda repo: '@sha256' in repo, repositories))
repositories_tag = list(filter(lambda repo: '@sha256' not in repo, repositories))
assert len(repositories_tag) == 1
if expect_digest:
assert len(repositories_digest) == 1
else:
assert not repositories_digest
# check for duplicates
assert sorted(repositories_tag) == sorted(set(repositories_tag))
assert sorted(repositories_digest) == sorted(set(repositories_digest))
for repository in repositories_tag:
assert is_string_type(repository)
image = ImageName.parse(repository)
assert image.registry
assert image.namespace
assert image.repo
assert image.tag and image.tag != 'latest'
if expect_digest:
digest_pullspec = image.to_str(tag=False) + '@' + fake_digest(image)
assert digest_pullspec in repositories_digest
tags = docker['tags']
assert isinstance(tags, list)
assert all(is_string_type(tag) for tag in tags)
if has_config:
config = docker['config']
assert isinstance(config, dict)
assert 'container_config' not in [x.lower() for x in config.keys()]
assert all(is_string_type(entry) for entry in config)
def test_koji_upload_import_fail(self, tmpdir, os_env, caplog):
session = MockedClientSession('')
(flexmock(OSBS)
.should_receive('create_config_map')
.and_raise(OsbsException))
name = 'ns/name'
version = '1.0'
release = '1'
target = 'images-docker-candidate'
tasker, workflow = mock_environment(tmpdir,
name=name,
version=version,
release=release,
session=session)
runner = create_runner(tasker, workflow, target=target)
with pytest.raises(PluginFailedException):
runner.run()
assert 'metadata:' in caplog.text()
@pytest.mark.parametrize('additional_tags', [
None,
['3.2'],
])
def test_koji_upload_image_tags(self, tmpdir, os_env, additional_tags):
osbs = MockedOSBS()
session = MockedClientSession('')
version = '3.2.1'
release = '4'
tasker, workflow = mock_environment(tmpdir,
name='ns/name',
version=version,
release=release,
session=session,
additional_tags=additional_tags)
runner = create_runner(tasker, workflow)
runner.run()
data = get_metadata(workflow, osbs)
# Find the docker output section
outputs = data['output']
docker_outputs = [output for output in outputs
if output['type'] == 'docker-image']
assert len(docker_outputs) == 1
output = docker_outputs[0]
# Check the extra.docker.tags field
docker = output['extra']['docker']
assert isinstance(docker, dict)
assert 'tags' in docker
tags = docker['tags']
assert isinstance(tags, list)
expected_tags = set([version,
"{}-{}".format(version, release),
'latest',
"{}-timestamp".format(version)])
if additional_tags:
expected_tags.update(additional_tags)
assert set(tags) == expected_tags
@pytest.mark.parametrize(('apis',
'docker_registry',
'pulp_registries',
'blocksize',
'target'), [
('v1-only',
False,
1,
None,
'images-docker-candidate'),
('v1+v2',
True,
2,
10485760,
None),
('v2-only',
True,
1,
None,
None),
('v1+v2',
True,
0,
10485760,
None),
])
@pytest.mark.parametrize('has_config', (True, False))
@pytest.mark.parametrize('prefer_schema1_digest', (True, False))
def test_koji_upload_success(self, tmpdir, apis, docker_registry,
pulp_registries, blocksize, target,
os_env, has_config, prefer_schema1_digest):
osbs = MockedOSBS()
session = MockedClientSession('')
component = 'component'
name = 'ns/name'
version = '1.0'
release = '1'
if has_config and not docker_registry:
# Not a valid combination
has_config = False
tasker, workflow = mock_environment(tmpdir,
session=session,
name=name,
component=component,
version=version,
release=release,
docker_registry=docker_registry,
pulp_registries=pulp_registries,
blocksize=blocksize,
has_config=has_config,
prefer_schema1_digest=prefer_schema1_digest,
)
runner = create_runner(tasker, workflow, blocksize=blocksize, target=target,
prefer_schema1_digest=prefer_schema1_digest)
runner.run()
data = get_metadata(workflow, osbs)
assert set(data.keys()) == set([
'metadata_version',
'buildroots',
'output',
])
assert data['metadata_version'] in ['0', 0]
buildroots = data['buildroots']
assert isinstance(buildroots, list)
assert len(buildroots) > 0
output_files = data['output']
assert isinstance(output_files, list)
for buildroot in buildroots:
self.validate_buildroot(buildroot)
# Unique within buildroots in this metadata
assert len([b for b in buildroots
if b['id'] == buildroot['id']]) == 1
for output in output_files:
self.validate_output(output, has_config,
expect_digest=docker_registry)
buildroot_id = output['buildroot_id']
# References one of the buildroots
assert len([buildroot for buildroot in buildroots
if buildroot['id'] == buildroot_id]) == 1
files = session.uploaded_files
# There should be a file in the list for each output
assert isinstance(files, list)
expected_uploads = len(output_files)
assert len(files) == expected_uploads
# The correct blocksize argument should have been used
if blocksize is not None:
assert blocksize == session.blocksize
def test_koji_upload_pullspec(self, tmpdir, os_env):
osbs = MockedOSBS()
session = MockedClientSession('')
name = 'ns/name'
version = '1.0'
release = '1'
tasker, workflow = mock_environment(tmpdir,
session=session,
name=name,
version=version,
release=release,
pulp_registries=1,
)
runner = create_runner(tasker, workflow)
runner.run()
metadata = get_metadata(workflow, osbs)
docker_outputs = [
output
for output in metadata['output']
if output['type'] == 'docker-image'
]
assert len(docker_outputs) == 1
docker_output = docker_outputs[0]
digest_pullspecs = [
repo
for repo in docker_output['extra']['docker']['repositories']
if '@sha256' in repo
]
assert len(digest_pullspecs) == 1
tag_pullspecs = [
repo
for repo in docker_output['extra']['docker']['repositories']
if '@sha256' not in repo
]
assert len(tag_pullspecs) == 1
pullspec = tag_pullspecs[0]
nvr_tag = '{}:{}-{}'.format(name, version, release)
assert pullspec.endswith(nvr_tag)
@pytest.mark.parametrize('logs_return_bytes', [
True,
False,
])
@pytest.mark.parametrize('platform,expected_logs', [
(None, set(['x86_64-build.log'])),
('foo', set(['foo-build.log'])),
])
def test_koji_upload_logs(self, tmpdir, os_env, logs_return_bytes,
platform, expected_logs):
MockedOSBS(logs_return_bytes=logs_return_bytes)
session = MockedClientSession('')
tasker, workflow = mock_environment(tmpdir,
session=session,
name='name',
version='1.0',
release='1')
runner = create_runner(tasker, workflow, platform=platform)
runner.run()
log_files = set(f for f in session.uploaded_files
if f.endswith('.log'))
assert log_files == expected_logs
images = [f for f in session.uploaded_files
if f not in log_files]
assert len(images) == 1
if platform is None:
platform = 'x86_64'
assert images[0].endswith(platform + ".tar.xz")
@pytest.mark.parametrize('multiple', [False, True])
def test_koji_upload_multiple_digests(self, tmpdir, os_env,
multiple):
server = MockedOSBS()
session = MockedClientSession('')
tasker, workflow = mock_environment(tmpdir,
session=session,
name='name',
version='1.0',
release='1')
runner = create_runner(tasker, workflow, platform='x86_64',
multiple=multiple)
runner.run()
for data in server.configmap.values():
break
else:
raise RuntimeError("no configmap found")
outputs = data['metadata.json']['output']
output = [op for op in outputs if op['type'] == 'docker-image'][0]
repositories = output['extra']['docker']['repositories']
pullspecs = [pullspec for pullspec in repositories
if '@' in pullspec]
if multiple:
assert len(pullspecs) > 1
else:
assert len(pullspecs) == 1
| |
#!/usr/bin/python
# coding:utf-8
from celery import Celery, platforms
from flask import Flask, current_app
import random
import time
import json
import redis
import time
import logging
import base64
import psycopg2
import datetime
from celery.signals import task_prerun
from datetime import timedelta
from celery.schedules import crontab
from weblib.libpepper import Pepper, PepperException
from weblib.indbapi import Indb
from weblib.sensuapi import SensuAPI
from node import Perf, Perf_Node, Perf_Cpu, Perf_Mem, Perf_TCP, Perf_Disk, Perf_System_Load, Perf_Socket, Perf_Process_Count, Perf_Netif, Perf_Ping, Statistics
from api import Masterdb, Nodedb, Location
from user import User
from collections import defaultdict
from sqlalchemy.sql import func
from sqlalchemy import desc
try:
from prod import config
except:
pass
from functools import wraps
from utils import convert
from extensions import celery, db
from requests import post
from flask_socketio import SocketIO
from statistics import mean
#import app
#tapp,session = app.create_socket_celery()
# celery.init_app(tapp)
celery.config_from_object('celery_socket_config')
logger = logging.getLogger('task')
logger.setLevel(10)
#celery, session = create_celery_app()
#celery.config_from_object('prod', silent=True)
# load config from celery_config.py , store other api information in prod.py
indbapi = Indb(config['INDB_HOST'] + ':' + config['INDB_PORT'])
sensuapi = SensuAPI(config['SENSU_HOST'] + ':' + config['SENSU_PORT'])
#master = session.query(Masterdb).first()
# try:
# saltapi = Pepper(master.ret_api())
# user = master.username
# pawd = convert(base64.b64decode(master.password))
# except:
saltapi = Pepper(config['SALTAPI_HOST'])
user = config['SALTAPI_USER']
pawd = config['SALTAPI_PASS']
redisapi = redis.StrictRedis(host=config['REDIS_HOST'], port=config[
'REDIS_PORT'], db=config['REDIS_DB'])
'''
### DOC ###
Celery function description
*self test*
### END ###
'''
socketio = SocketIO(message_queue='redis://localhost:6379/0')
def ret_master():
master = db.session.query(Masterdb).first()
return master
def socket_emit(meta=None, event='others', room=None):
try:
if room:
socketio.emit(event, meta, room=room, namespace='/deyunio')
else:
room = 'all'
socketio.emit(event, meta, room='all', namespace='/deyunio')
except Exception as e:
logger.warning('error in emitting sitestatus to room :' +
str(room) + ' ' + str(e) + ' ' + str(meta))
logger.exception(e)
return {'failed': e}
logger.info({('sent ' + str(event)): str(room)})
return {('sent ' + str(event)): str(room)}
@celery.task
def self_test(x=16, y=16, url=None):
x = int(x)
y = int(y)
res = x + y
context = {"id": "test", "x": x, "y": y}
result = "add((x){}, (y){})".format(context['x'], context['y'])
goto = "{}".format(context['id'])
time.sleep(10)
meta = json.dumps({'result': result, 'goto': goto})
#post(url, json=meta)
socketio = SocketIO(message_queue='redis://localhost:6379/0')
socketio.emit('connect', meta, namespace='/deyunio')
#socketio.emit(event='hackerlist',meta=son.dumps({'emit_msg':'self test finished','type':'success'}))
return meta
'''
emit index page data
'''
@celery.task
def db_update_node_tag():
try:
data = db.session.query(Nodedb).all()
master_data = db.session.query(Masterdb).all()
for q in data:
print(q)
tag = Tag(
node_id=q.id,
node=q,
name='Salt Node',
type='default',
url='fa fa-soundcloud'
)
db.session.add(tag)
for p in master_data:
if p.master_name == q.node_name:
tag = Tag(
node_id=q.id,
node=q,
name='Master Node',
type='primary',
url='fa fa-soundcloud'
)
except Exception as e:
logger.warning('error in creating tag ' + str(tag))
return {'failed': e}
else:
db.session.commit()
logger.info('db tags created')
return {'ok': 'db tags created'}
@celery.task
def redis_update_nodelist():
try:
result = []
data = {}
node_data = Nodedb.query.all()
for q in node_data:
taglist = []
for x in q.tags:
taglist.append(
'<span class="label label-' + x.type + '"><i class="' +
x.url + '"></i> ' + x.name + '</span>'
)
#'<button class="btn btn-'+ x.type +' btn-circle" type="button" data-container="body" data-toggle="popover" data-placement="top" data-content="' + x.name + '" data-original-title="" title=""><i class="' + x.url + '"></i></button>')
data['Name'] = q.node_name
data['Tag'] = taglist
if q.status == 'up':
data['Status'] = '<p><span class="label label-primary">' + \
q.status + '</span></p>'
elif q.status == 'down':
data['Status'] = '<p><span class="label label-warning">' + \
q.status + '</span></p>'
else:
data['Status'] = '<p><span class="label ">' + \
'unknow' + '</span></p>'
data['Type'] = q.os
data['Information'] = q.cpu + ' ' + q.mem + 'M'
data['Note'] = q.bio
data['Operator'] = q.master.master_name
data['Date'] = str(q.create_at)
tasklist = []
for y in q.tasks:
tasklist.append(
'<button onclick="open_confirm(\'' +
y.task_name + "\',\'" + y.tgt + "\',\'" + y.info +
#'<strong><p>Task: ' + y.task_name +'</p></strong>' + '<strong><p>TGT:' + y.tgt +'</p></strong>' + y.info +
'\')" type="button" class="btn btn-warning btn-rounded btn-xs" data-toggle="modal" data-target="#myModal6"><i class="fa fa-twitch"></i> ' + y.task_name + '</button>'
)
data['Task'] = tasklist
result.append(data)
data = {}
meta = json.dumps(result)
except Exception as e:
logger.warning('error in syncing nodelist ' + str(meta))
logger.exception(e)
return {'failed': e}
else:
redisapi.set('node_list', meta)
logger.info('redis node list updated' + str(meta))
return {'ok': 'redis node list updated'}
@celery.task
def emit_nodelist(room=None):
try:
data = convert(redisapi.hgetall('salt_node_list'))
except Exception as e:
logger.warning('error in loading nodelist ')
logger.exception(e)
return {'failed': e}
meta = json.dumps(data)
if room:
socket_emit(meta=meta, event='nodelist', room=room)
#socket_emit(meta=json.dumps({'emit_msg':'master status updated','type':'success'}),event='hackerlist',room=room)
logger.info({'ok': 'emit_nodelist' + str(room)})
else:
socket_emit(meta=meta, event='nodelist')
logger.info({'ok': 'emit_nodelist to all'})
return {'ok': 'emit_nodelist'}
def get_toplogy():
m_node = Masterdb.query.all()
s_node = Nodedb.query.all()
node_list = []
for item in s_node:
node_list.append({'data': {'id': item.node_name}})
for item in m_node:
node_list.append({'data': {'id': item.master_name}})
edge_list = []
for item in s_node:
edge_list.append(
{'data': {'source': item.node_name, 'target': item.master.master_name}})
data = {
'nodes': node_list,
'edges': edge_list
}
logger.info({'ok': 'get_toplogy'})
return json.dumps(data)
@celery.task
def redis_master_status_update():
try:
master = Masterdb.query.first()
r = indbapi.ret_point_24h(
table='memory_percent_usedWOBuffersCaches', db='graphite', host=master.master_name)
p = indbapi.ret_point_24h(
table='cpu_user', db='graphite', host=master.master_name)
index_data = {
'top': get_toplogy(),
'master': {'name': master.master_name, 'mem': r, 'cpu': p}
}
except Exception as e:
logger.warning('error in writing master status ' +
str(e) + ' data:' + index_data)
logger.exception(e)
return {'failed': index_data}
else:
redisapi.set('index_data', json.dumps(index_data))
emit_master_status.delay(room='all')
logger.info({'ok': index_data})
socket_emit(meta=json.dumps(
{'emit_msg': 'redis status updated', 'type': 'success'}), event='hackerlist')
return {"ok": index_data}
@celery.task
def emit_master_status(room=None):
try:
data = json.loads(convert(redisapi.get('index_data')))
except Exception as e:
logger.warning('error in loading index_data ' + str(data))
logger.exception(e)
return {'failed': e}
meta = json.dumps(data)
if room:
socket_emit(meta=meta, event='m_status', room=room)
#socket_emit(meta=json.dumps({'emit_msg':'master status updated','type':'success'}),event='hackerlist',room=room)
logger.info({'ok': 'emit_master_status' + str(room)})
else:
socket_emit(meta=meta, event='m_status')
logger.info({'ok': 'emit_master_status to all'})
return {'ok': 'emit_master_status'}
'''
emit the site status data by sockitio
'''
def mean_status(data):
'''
return the mean value for the value[1]
'''
j = json.loads(data)
r = mean([x[1] for x in j]) * 100
return '{:.2f}'.format(r)
def spark_data():
ret = {}
a = db.session.query(Statistics.managed_nodes).order_by(
desc(Statistics.update_at)).limit(8).all()
ret['n'] = [r for r, in a]
b = db.session.query(Statistics.registered_master).order_by(
desc(Statistics.update_at)).limit(8).all()
ret['m'] = [r for r, in b]
return json.dumps(ret)
def ret_socket_sitestatus():
d = convert(redisapi.hgetall('sitestatus'))
d['service_level'] = str(100.0 - float(mean_status(d['service_level'])))
d['system_utilization'] = str(mean_status(d['system_utilization']))
a = db.session.query(Statistics.managed_nodes).order_by(
desc(Statistics.update_at)).limit(8).all()
d['n'] = [r for r, in a]
b = db.session.query(Statistics.registered_master).order_by(
desc(Statistics.update_at)).limit(8).all()
d['m'] = [r for r, in b]
return d
@celery.task
def emit_site_status(room=None):
try:
data = ret_socket_sitestatus()
except Exception as e:
logger.warning('error in loading sitestatus to ' + str(room))
logger.exception(e)
return {'failed': e}
meta = json.dumps(data)
if room:
socket_emit(meta=meta, event='sitestatus', room=room)
logger.info({'ok': 'emit_site_status to ' + str(room)})
else:
socket_emit(meta=meta, event='sitestatus')
logger.info({'ok': 'emit_site_status to all'})
return {'ok': 'emit_site_status'}
'''
### DOC ###
Celery function description
*to obtain token from saltstack api, based on pepper*
### END ###
'''
def salttoken():
try:
if redisapi.hexists(name='salt', key='token'):
if (time.time() - float(bytes.decode(redisapi.hget(name='salt', key='expire')))) < 0.0:
ret = convert(redisapi.hget(name='salt', key='token'))
return convert(ret)
else:
return saltlogin(saltapi.login(user, pawd, 'pam'))
else:
return saltlogin(saltapi.login(user, pawd, 'pam'))
except Exception as e:
return {'falid': e}
def saltlogin(loginresult=None):
if loginresult:
for k in loginresult.keys():
redisapi.hset(name='salt', key=k, value=loginresult[k])
else:
raise Exception('require login string')
return salttoken()
'''
### DOC ###
Celery function description
*salt api wraper for saltstack api, token stored in redis cache and tries to reflash when expired*
### END ###
'''
def salt_command(f):
@wraps(f)
def wrapper(*args, **kwds):
try:
saltkey = salttoken()
saltapi.auth['token'] = saltkey
return f(*args, **kwds)
except Exception as e:
return {'failed': e}
return wrapper
'''
### DOC ###
This taks should go with below task follow:
1. obtain the jid from salt api.(salt-api could only return the jid by load_async function)
2. boardcast the information by websocket "initilized task"
3. rest a while (could see the state change in web when debuging)
4. ask for the salt api for taks result (emitting "running")
5. after api returned the result, emitting the final result
'''
def db_lookup_jid(jid):
try:
posconn = psycopg2.connect(dbname=config['POSTGRESQL_DB'], user=config[
'POSTGRESQL_USER'], host=config['POSTGRESQL_HOST'], password=config['POSTGRESQL_PASSWD'])
cur = posconn.cursor()
cur.execute(
'SELECT return FROM redis_exec_list WHERE redis_exec_list.tag LIKE %s;', [jid])
if cur.fetchone():
return saltapi.lookup_jid(jid)
else:
return {'return': [{}]}
except Exception as e:
logger.exception(e)
return saltapi.lookup_jid(jid)
@salt_command
def salt_exec_func(tgt='*', func='test.ping', arg=None, kwarg=None, room=None,info=None):
try:
result = saltapi.local_async(tgt=tgt, fun=func, arg=arg, kwarg=kwarg)
jid = result['return'][0]['jid']
tgt = result['return'][0]['minions']
meta = json.dumps({'msg': 'started','type':'success', 'tgt': tgt, 'func': func,'jid':jid,'info':info,'progress':'0'})
socket_emit(meta=meta, event='salt_task_warn', room=room)
socket_emit(meta=meta, event='salt_task_menu', room=room)
#i = int(redisapi.hlen('salt_exec_list')) + 1
one = {}
one['jid'] = jid
one['start'] = ''
one['end'] = ''
one['fun'] = func
one['arg'] = arg
one['kwarg'] = kwarg
one['tgt'] = tgt
one['ret'] = ''
one['status'] = '<button type="button" class="btn btn-xs btn-outline btn-primary animated infinite flash "><i class="fa fa-send-o"></i> Excuting</button>'
one['text'] = 'text-warning '
redisapi.hset('salt_exec_list', jid, json.dumps(one))
redisapi.expire('salt_exec_list',1800)
socket_emit(meta=json.dumps(
{'func': 'salt_task_list'}), event='func_init', room='all')
except Exception as e:
redisapi.hdel('salt_exec_list', jid)
meta = json.dumps({'msg': 'Saltstack API not working. Please try later.',
'progress':'0','type': 'danger', 'tgt': tgt, 'func': func, jid: 'FAIL'})
socket_emit(meta=meta, event='salt_task_warn', room=room)
logger.exception(e)
logger.warning('error in getting saltstack jid', e)
return 1
try:
i = 0
t = redisapi.hget('task_timer', str(tgt)+':'+str(func))
rt = float(convert(t)) if t else 1000
while(i < 600):
try:
i = i + 1
j = (i * 10 / rt) * 100
'''
Query db instead of API.
'''
#ret = saltapi.lookup_jid(jid['return'])
ret = db_lookup_jid(jid)
if room:
meta = json.dumps(
{'msg': 'running '+ str(j) + '%','progress':str(j), 'type': 'info', 'tgt': tgt, 'func': func,'jid':jid,'info':info})
socket_emit(meta=meta, event='salt_task_warn', room=room)
socket_emit(meta=meta, event='salt_task_menu', room=room)
if ret['return'] != [{}]:
redis_salt_task_sync.delay()
meta = json.dumps(
{'msg': 'completed','progress':'100', 'type': 'success', 'tgt': tgt, 'func': func,'jid':jid,'info':info})
socket_emit(meta=meta, event='salt_task_warn', room=room)
socket_emit(meta=meta, event='salt_task_menu', room=room)
rt = (rt + i * 10)/2 if t else i * 10
redisapi.hset('task_timer', str(tgt)+':'+str(func), rt)
break
except PepperException as e:
pass
time.sleep(3)
else:
# TODO timeout
return {'failed:': {'Task Running Timeout'}}
except Exception as e:
redisapi.hdel('salt_exec_list', jid)
logger.warning('error in geting job status', e)
logger.exception(e)
return 1
redisapi.hdel('salt_exec_list', jid)
logger.info({'ok': str(jid) + ' : ' + str(tgt)})
socket_emit(meta=json.dumps(
{'func': 'salt_task_list'}), event='func_init', room='all')
return 0
@celery.task
def emit_salt_task_list(room=None):
try:
data = {}
data['el'] = convert(redisapi.hgetall('salt_exec_list'))
data['tl'] = convert(redisapi.hgetall('salt_task_list'))
except Exception as e:
logger.warning('error in loading salt_task_list ' + str(data), e)
logger.exception(e)
return {'failed': e}
meta = json.dumps(data)
if room:
socket_emit(meta=meta, event='salt_task_list', room=room)
logger.info({'ok': 'emit_salt_task_list ' + str(room)})
else:
socket_emit(meta=meta, event='salt_task_list')
logger.info({'ok': 'emit_salt_task_list to all'})
return {'ok': 'emit_salt_task_list'}
@celery.task
@salt_command
def emit_salt_jid(jid, room):
try:
meta = json.dumps(
{'msg': 'initialization completed, loading data...', 'jid': jid})
socket_emit(meta=meta, event='salt_jid', room=room)
ret = saltapi.lookup_jid(jid)
except Exception as e:
logger.exception(e)
meta = json.dumps(
{'msg': 'error, please try again later...', 'jid': jid})
socket_emit(meta=meta, event='salt_jid', room=room)
return 1
else:
logger.info({'ok': 'emit_salt_jid'})
meta = json.dumps({'msg': 'job info loaded.', 'jid': jid})
socket_emit(meta=meta, event='salt_jid', room=room)
meta = json.dumps(ret)
socket_emit(meta=meta, event='salt_jid', room=room)
return 0
@celery.task
@salt_command
def emit_salt_ping(room, tgt, func,info):
try:
if convert(redisapi.hget('salt_task_lock', room + tgt)) == func:
meta = json.dumps({'msg': 'Task execulting.Waitting for result.',
'type': 'warning', 'tgt': tgt, 'func': func, 'jid':'Job Waiting','info':info})
socket_emit(meta=meta, event='salt_task_warn', room=room)
return 1
else:
redisapi.hset('salt_task_lock', room + tgt, func)
logger.info({'task': 'emit_salt_ping'.'room': room,'tgt': tgt})
salt_exec_func(tgt=tgt, func='test.ping', room=room,info=info)
redisapi.hdel('salt_task_lock',room+tgt)
return 0
except Exception as e:
logger.exception(e)
meta = json.dumps({'msg': 'Task canceled for unknonw reason.Contact admin pls.',
'type': 'warning', 'tgt': tgt, 'func': func,'jid':'Job Error','info':info})
socket_emit(meta=meta, event='salt_task_warn', room=room)
return 1
'''
### DOC ###
Celery function description
*Get minion status from saltstack api and store in redis cache*
### END ###
'''
@celery.task
@salt_command
def redis_salt_minion_status_update():
try:
ret = saltapi.runner('manage.status')
result = []
count = 0
if len(ret['return'][0]['up']) > 0:
for node in ret['return'][0]['up']:
count += redisapi.hset(name='status', key=node, value='up')
result.append(node)
if len(ret['return'][0]['down']) > 0:
for node in ret['return'][0]['down']:
count += redisapi.hset(name='status', key=node, value='down')
result.append(node)
except Exception as e:
logger.warning('error in updaing minion status in redis :' + str(e))
logger.exception(e)
return {'failed': e}
logger.info('minion status updated')
return {'ok': 'redis_salt_minion_status_update' + ' updated with redis return: ' + str(count)}
'''
### DOC ###
Celery function description
*check saltstack api status*
### END ###
'''
@celery.task
@salt_command
def salt_api_status():
try:
ret = saltapi.req_get(path='stats')
except Exception as e:
logger.exception(e)
return {'failed': e}
return ret
'''
### DOC ###
Celery function description
*update status subtask when syncing*
### END ###
'''
@salt_command
def salt_minion(node_name):
try:
ret = saltapi.req_get('/minions/' + node_name)
except Exception as e:
logger.exception(e)
return {'failed': e}
return ret
@celery.task
def salt_mark_status(k, v):
target_node = db.session.query(
Nodedb).filter_by(node_name=k).first()
master = ret_master()
# TODO
if target_node:
target_node.status = v
else:
target_node = Nodedb(
id=uuid.uuid4(),
node_name=k,
node_ip=u'1.1.1.1',
bio=u'Down',
master=master,
status=v
)
db.session.add(target_node)
db.session.commit()
'''
### DOC ###
Celery function description
*search the cmdb first the tries to update information when available*
this task based on the result of salt_minion_status, may return none
### END ###
'''
@celery.task
@salt_command
def db_salt_nodes_sync():
result = []
count = 0
data = redisapi.hgetall(name='status')
if not data:
return {'failed': 'no status data in redis cache '}
try:
for (k, v) in convert(data).items():
if v == 'down':
salt_mark_status(k, v)
continue
target_node = db.session.query(
Nodedb).filter_by(node_name=k).first()
node_data = salt_minion(k)
db_data = node_data['return'][0][k]
master = ret_master()
# TODO
try:
if target_node:
target_node.minion_data = db_data
target_node.node_ip = db_data.get('ipv4', '1.1.1.1'),
target_node.os = db_data.get('lsb_distrib_description') or (
db_data.get('lsb_distrib_id') + db_data.get('lsb_distrib_release')) or (db_data.get('osfullname') + db_data('osrelease'))
target_node.cpu = str(db_data[
'num_cpus']) + ' * ' + str(db_data['cpu_model'])
target_node.kenel = db_data['kernelrelease']
target_node.core = int(db_data['num_cpus']),
target_node.mem = db_data['mem_total']
target_node.host = db_data['host']
target_node.status = v
target_node.master = master
else:
target_node = Nodedb(
id=uuid.uuid4(),
node_name=db_data['id'],
node_ip=db_data.get('ipv4', '1.1.1.1'),
minion_data=db_data,
os=db_data.get('lsb_distrib_description') or (
db_data.get('lsb_distrib_id') + db_data.get('lsb_distrib_release')) or (db_data.get('osfullname') + db_data('osrelease')),
cpu=str(db_data['num_cpus']) + ' * ' +
str(db_data['cpu_model']),
kenel=db_data['kernelrelease'],
core=int(db_data['num_cpus']),
mem=db_data['mem_total'],
host=db_data['host'],
master=master,
status=v
)
except KeyError as e:
logger.warning('updating ' + k + ' with error:' + str(e.args))
continue
result.append(target_node)
db.session.add(target_node)
except Exception as e:
logger.warning('Error while updaing ' + str(((k, v))) + str(e.args))
logger.exception(e)
db.session.commit()
return {'ok': 'db_salt_nodes_sync' + ' updated with redis return: ' + str(count)}
'''
### DOC ###
Celery function description
*influx syncing tasks*
### END ###
'''
@celery.task
def sync_node_from_influxdb():
try:
data = sensuapi.get('clients')
result = []
except Exception as e:
return {'failed': e}
for item in data:
try:
sensunode = session.query(Perf_Node).filter_by(
sensu_node_name=item["name"]).first()
except Exception as e:
return {'failed': e}
try:
if sensunode:
sensunode.sensu_subscriptions = item["address"]
sensunode.sensu_version = item["version"]
sensunode.sensu_timestamp = item["timestamp"]
result.append(sensunode)
else:
sensunode = Perf_Node(
sensu_node_name=item["name"],
sensu_subscriptions=item["address"],
sensu_version=item["version"],
sensu_timestamp=item["timestamp"]
)
result.append(sensunode)
except Exception as e:
return {'failed': e}
session.add(sensunode)
try:
session.commit()
except Exception as e:
logger.exception(e)
return {'failed': e}
return {'successed': 'sync_node_from_influxdb'}
#@celery.task
def sync_praser_data(data):
result = defaultdict(list)
for row in data:
for item in row:
result[item['tags']['host']].append(item['values'][0][1])
return result
@celery.task
def sync_cpu_from_influxdb():
praser = []
result = []
praser.append(indbapi.get_sync_data('cpu_user'))
praser.append(indbapi.get_sync_data('cpu_nice'))
praser.append(indbapi.get_sync_data('cpu_system'))
praser.append(indbapi.get_sync_data('cpu_idle'))
praser.append(indbapi.get_sync_data('cpu_iowait'))
praser.append(indbapi.get_sync_data('cpu_irq'))
praser.append(indbapi.get_sync_data('cpu_softirq'))
praser.append(indbapi.get_sync_data('cpu_steal'))
praser.append(indbapi.get_sync_data('cpu_guest'))
# return sync_praser_data(praser)
try:
data = sync_praser_data(praser)
except Exception as e:
logger.error(
'error while prasering data from influx db: ' + str(praser))
try:
for (k, v) in data.items():
target_node = Perf_Cpu(
node_name=k,
cpu_user=v[0],
cpu_nice=v[1],
cpu_system=v[2],
cpu_idle=v[3],
cpu_iowait=v[4],
cpu_irq=v[5],
cpu_softirq=v[6],
cpu_steal=v[7],
cpu_guest=v[8],
)
result.append(target_node)
session.add(target_node)
except Exception as e:
logger.warning('error in creating data ' +
str((k, v)) + ' in ' + str(data))
logger.exception(e)
return {'failed': e}
try:
session.commit()
except Exception as e:
logger.warning('error in writing data ' + str(data))
logger.exception(e)
return {'failed': e}
logger.info('Completed in writing data to Pref_Cpu' + str(data))
return {'successed': 'sync_cpu_from_influxdb'}
@celery.task
def sync_mem_from_influxdb():
praser = []
result = []
praser.append(indbapi.get_sync_data('memory_percent_usedWOBuffersCaches'))
praser.append(indbapi.get_sync_data('memory_percent_freeWOBuffersCaches'))
praser.append(indbapi.get_sync_data('memory_percent_swapUsed'))
praser.append(indbapi.get_sync_data('memory_percent_free'))
# return sync_praser_data(praser)
try:
data = sync_praser_data(praser)
except Exception as e:
logger.error(
'error while prasering data from influx db: ' + str(praser))
try:
for (k, v) in data.items():
target_node = Perf_Mem(
node_name=k,
mem_usedWOBuffersCaches=v[0],
mem_freeWOBuffersCaches=v[1],
mem_swapUsed=v[2]
)
result.append(target_node)
session.add(target_node)
except Exception as e:
logger.warning('error in creating data ' +
str((k, v)) + ' in ' + str(data))
logger.exception(e)
return {'failed': e}
try:
session.commit()
except Exception as e:
logger.warning('error in writing data ' + str(data))
logger.exception(e)
return {'failed': e}
logger.info('Completed in writing data to Pref_Mem' + str(data))
return {'successed': 'sync_mem_from_influxdb'}
@celery.task
def sync_tcp_from_influxdb():
praser = []
result = []
praser.append(indbapi.get_sync_data('tcp_UNKNOWN'))
praser.append(indbapi.get_sync_data('tcp_ESTABLISHED'))
praser.append(indbapi.get_sync_data('tcp_SYN_SENT'))
praser.append(indbapi.get_sync_data('tcp_SYN_RECV'))
praser.append(indbapi.get_sync_data('tcp_FIN_WAIT1'))
praser.append(indbapi.get_sync_data('tcp_FIN_WAIT2'))
praser.append(indbapi.get_sync_data('tcp_CLOSE'))
praser.append(indbapi.get_sync_data('tcp_CLOSE_WAIT'))
praser.append(indbapi.get_sync_data('tcp_LAST_ACK'))
praser.append(indbapi.get_sync_data('tcp_LISTEN'))
praser.append(indbapi.get_sync_data('tcp_CLOSING'))
# return sync_praser_data(praser)
try:
data = sync_praser_data(praser)
except Exception as e:
logger.error(
'error while prasering data from influx db: ' + str(praser))
try:
for (k, v) in data.items():
target_node = Perf_TCP(
node_name=k,
tcp_UNKNOWN=v[0],
tcp_ESTABLISHED=v[1],
tcp_SYN_SENT=v[2],
tcp_SYN_RECV=v[3],
tcp_FIN_WAIT1=v[4],
tcp_FIN_WAIT2=v[5],
tcp_CLOSE=v[6],
tcp_CLOSE_WAIT=v[7],
tcp_LAST_ACK=v[8],
tcp_LISTEN=v[9],
tcp_CLOSING=v[10],
)
result.append(target_node)
session.add(target_node)
except Exception as e:
logger.warning('error in creating data ' +
str((k, v)) + ' in ' + str(data))
logger.exception(e)
return {'failed': e}
try:
session.commit()
except Exception as e:
logger.warning('error in writing data ' + str(data))
logger.exception(e)
return {'failed': e}
logger.info('Completed in writing data to Pref_Tcp' + str(data))
return {'successed': 'sync_tcp_from_influxdb'}
@celery.task
def sync_disk_from_influxdb():
praser = []
result = []
praser.append(indbapi.get_sync_data('disk_usage_root_used'))
praser.append(indbapi.get_sync_data('disk_usage_root_avail'))
praser.append(indbapi.get_sync_data('disk_usage_root_used_percentage'))
# return sync_praser_data(praser)
try:
data = sync_praser_data(praser)
except Exception as e:
logger.error(
'error while prasering data from influx db: ' + str(praser))
try:
for (k, v) in data.items():
target_node = Perf_Disk(
node_name=k,
disk_usage_root_used=v[0],
disk_usage_root_avail=v[1],
disk_usage_root_used_percentage=v[2]
)
result.append(target_node)
session.add(target_node)
except Exception as e:
logger.warning('error in creating data ' +
str((k, v)) + ' in ' + str(data))
logger.exception(e)
return {'failed': e}
try:
session.commit()
except Exception as e:
logger.warning('error in writing data ' + str(data))
logger.exception(e)
return {'failed': e}
logger.info('Completed in writing data to Pref_Disk' + str(data))
return {'successed': 'sync_disk_from_influxdb'}
@celery.task
def sync_load_from_influxdb():
praser = []
result = []
praser.append(indbapi.get_sync_data('load_load_avg_one'))
praser.append(indbapi.get_sync_data('load_load_avg_five'))
praser.append(indbapi.get_sync_data('load_load_avg_fifteen'))
# return sync_praser_data(praser)
try:
data = sync_praser_data(praser)
except Exception as e:
logger.error(
'error while prasering data from influx db: ' + str(praser))
try:
for (k, v) in data.items():
target_node = Perf_System_Load(
node_name=k,
load_avg_one=v[0],
load_avg_five=v[1],
load_avg_fifteen=v[2]
)
result.append(target_node)
session.add(target_node)
except Exception as e:
logger.warning('error in creating data ' +
str((k, v)) + ' in ' + str(data))
logger.exception(e)
return {'failed': e}
try:
session.commit()
except Exception as e:
logger.warning('error in writing data ' + str(data))
logger.exception(e)
return {'failed': e}
logger.info('Completed in writing data to Pref_Load' + str(data))
return {'successed': 'sync_load_from_influxdb'}
@celery.task
def sync_socket_from_influxdb():
praser = []
result = []
praser.append(indbapi.get_sync_data('sockets_total_used'))
praser.append(indbapi.get_sync_data('sockets_TCP_inuse'))
praser.append(indbapi.get_sync_data('sockets_TCP_orphan'))
praser.append(indbapi.get_sync_data('sockets_TCP_tw'))
praser.append(indbapi.get_sync_data('sockets_TCP_alloc'))
praser.append(indbapi.get_sync_data('sockets_TCP_mem'))
praser.append(indbapi.get_sync_data('sockets_UDP_inuse'))
praser.append(indbapi.get_sync_data('sockets_UDP_mem'))
praser.append(indbapi.get_sync_data('sockets_UDPLITE_inuse'))
praser.append(indbapi.get_sync_data('sockets_RAW_inuse'))
praser.append(indbapi.get_sync_data('sockets_RAW_inuse'))
praser.append(indbapi.get_sync_data('sockets_FRAG_memory'))
# return sync_praser_data(praser)
try:
data = sync_praser_data(praser)
except Exception as e:
logger.error(
'error while prasering data from influx db: ' + str(praser))
try:
for (k, v) in data.items():
target_node = Perf_Socket(
node_name=k,
sockets_total_used=v[0],
sockets_TCP_inuse=v[1],
sockets_TCP_orphan=v[2],
sockets_TCP_tw=v[3],
sockets_TCP_alloc=v[4],
sockets_TCP_mem=v[5],
sockets_UDP_inuse=v[6],
sockets_UDP_mem=v[7],
sockets_UDPLITE_inuse=v[8],
sockets_RAW_inuse=v[9],
sockets_FRAG_inuse=v[10],
sockets_FRAG_memory=v[10],
)
result.append(target_node)
session.add(target_node)
except Exception as e:
logger.warning('error in creating data ' +
str((k, v)) + ' in ' + str(data))
logger.exception(e)
return {'failed': e}
try:
session.commit()
except Exception as e:
logger.warning('error in writing data ' + str(data))
logger.exception(e)
return {'failed': e}
logger.info('Completed in writing data to Pref_Socket' + str(data))
return {'successed': 'sync_socket_from_influxdb'}
@celery.task
def sync_process_from_influxdb():
praser = []
result = []
praser.append(indbapi.get_sync_data('process_count'))
# return sync_praser_data(praser)
try:
data = sync_praser_data(praser)
except Exception as e:
logger.error(
'error while prasering data from influx db: ' + str(praser))
try:
for (k, v) in data.items():
target_node = Perf_Process_Count(
node_name=k,
process_count=v[0]
)
result.append(target_node)
session.add(target_node)
except Exception as e:
logger.warning('error in creating data ' +
str((k, v)) + ' in ' + str(data))
logger.exception(e)
return {'failed': e}
try:
session.commit()
except Exception as e:
logger.warning('error in writing data ' + str(data))
logger.exception(e)
return {'failed': e}
logger.info('Completed in writing data to Process_count' + str(data))
return {'successed': 'sync_process_from_influxdb'}
@celery.task
def sync_netif_from_influxdb(netif='eth0'):
praser = []
result = []
praser.append(indbapi.get_sync_data('net_' + netif + '_tx_bytes'))
praser.append(indbapi.get_sync_data('net_' + netif + '_rx_bytes'))
praser.append(indbapi.get_sync_data('net_' + netif + '_tx_packets'))
praser.append(indbapi.get_sync_data('net_' + netif + '_rx_packets'))
praser.append(indbapi.get_sync_data('net_' + netif + '_tx_errors'))
praser.append(indbapi.get_sync_data('net_' + netif + '_if_speed'))
try:
data = sync_praser_data(praser)
# return sync_praser_data(praser)
except Exception as e:
logger.error(
'error while prasering data from influx db: ' + str(praser))
return
try:
for (k, v) in data.items():
target_node = Perf_Netif(
node_name=k,
netif=netif,
netif_tx_bytes=v[0],
netif_rx_bytes=v[1],
netif_tx_packets=v[2],
netif_rx_packets=v[3],
netif_rx_errors=v[4],
netif_speed=v[5]
)
result.append(target_node)
session.add(target_node)
except Exception as e:
logger.warning('error in creating data ' +
str((k, v)) + ' in ' + str(data))
logger.exception(e)
return {'failed': e}
try:
session.commit()
except Exception as e:
logger.warning('error in writing data ' + str(data))
logger.exception(e)
return {'failed': e}
logger.info('Completed in writing data to Pref_netif')
return {'successed': 'sync_netif_from_influxdb'}
@celery.task
def sync_ping_from_influxdb(node='master'):
praser = []
result = []
praser.append(indbapi.get_sync_data('ping_' + node + '_packet_loss'))
praser.append(indbapi.get_sync_data('ping_' + node + '_avg'))
try:
data = sync_praser_data(praser)
# return sync_praser_data(praser)
except Exception as e:
logger.error(
'error while prasering data from influx db: ' + str(praser))
return
try:
for (k, v) in data.items():
target_node = Perf_Ping(
node_name=k,
ping_target=node,
ping_packet_loss=v[0],
ping_avg=v[1]
)
result.append(target_node)
session.add(target_node)
except Exception as e:
logger.warning('error in creating data ' +
str((k, v)) + ' in ' + str(data))
logger.exception(e)
return {'failed': e}
try:
session.commit()
except Exception as e:
logger.warning('error in writing data ', e)
logger.exception(e)
return {'failed': e}
logger.info('Completed in writing data to Pref_ping' + str(result))
return {'successed': result}
'''
### DOC ###
Update statistics hash in redis
'''
@celery.task
def db_statistics_sync():
result = []
data = convert(redisapi.hgetall(name='sitestatus'))
if not data:
logger.warning('no site status data in redis cache')
return {'failed': 'no site status data in redis cache'}
try:
state = Statistics(
system_capacity=data['system_capacity'],
managed_nodes=data['managed_nodes'],
system_utilization=convert(redisapi.hgetall(
name='sitestatus')).get('system_utilization', ''),
user_count=data['user_count'],
registered_master=data['registered_master'],
total_task=data['total_task'],
service_level=convert(redisapi.hgetall(
name='sitestatus')).get('service_level', ''),
uptime=data['uptime'],
page_visit_count=data.get('page_visit_count',0),
api_visit_count=data.get('api_visit_count',0)
)
db.session.add(state)
db.session.commit()
result.append(state)
except Exception as e:
logger.warning('error in creating data in statistics :', e)
logger.exception(e)
return {'failed': e}
logger.info('Completed in writing data to statistics' + str(result))
return {'successed': 'db_statistics_sync'}
@celery.task
def statistics_page_visit():
try:
data = convert(redisapi.hgetall(name='sitestatus'))
if not data:
logger.warning('no site status data in redis cache')
return {'failed': 'no site status data in redis cache'}
if data.get('page_visit_count', None):
page_visit_count = int(data['page_visit_count'])
else:
page_visit_count = 0
redisapi.hset('sitestatus', 'page_visit_count', page_visit_count + 1)
except Exception as e:
logger.exception(e)
return {'failed': e}
return {'successed': 'page visit updated'}
@celery.task
def statistics_api_visit():
try:
data = convert(redisapi.hgetall(name='sitestatus'))
if not data:
logger.warning('no site status data in redis cache')
return {'failed': 'no site status data in redis cache'}
if data.get('api_visit_count', None):
page_visit_count = int(data['api_visit_count'])
else:
page_visit_count = 0
redisapi.hset('sitestatus', 'api_visit_count', page_visit_count + 1)
except Exception as e:
logger.exception(e)
return {'failed': e}
return {'successed': 'page visit updated'}
@celery.task
def redis_statistics_update():
try:
redisapi.hset('sitestatus', 'managed_nodes', Nodedb.get_count())
redisapi.hset('sitestatus', 'system_capacity', db.session.query(
func.sum(Nodedb.core).label('average')).all()[0][0])
redisapi.hset('sitestatus', 'system_utilization', json.dumps(db.session.query(
Perf_System_Load.node_name, func.avg(
Perf_System_Load.load_avg_fifteen).label('average')
).group_by('node_name').all()))
redisapi.hset('sitestatus', 'user_count', User.get_count())
redisapi.hset('sitestatus', 'registered_master', Masterdb.get_count())
redisapi.hset('sitestatus', 'total_task', 0)
redisapi.hset('sitestatus', 'service_level', json.dumps(db.session.query(
Perf_Ping.node_name, func.avg(
Perf_Ping.ping_packet_loss).label('average')
).group_by('node_name').all()))
redisapi.hset('sitestatus', 'uptime', (datetime.datetime.utcnow() - db.session.query(
Masterdb.create_at).first()[0]).days)
except Exception as e:
logger.warning('error in writing sitestatus ', e)
logger.exception(e)
return {'failed': e}
logger.info('Completed in updating site status')
emit_site_status.delay(room='all')
'''
Text Color:
text-danger text-navy text-primary text-success text-info text-warning text-muted text-white
'''
@celery.task
def redis_salt_task_sync():
try:
posconn = psycopg2.connect(dbname=config['POSTGRESQL_DB'], user=config[
'POSTGRESQL_USER'], host=config['POSTGRESQL_HOST'], password=config['POSTGRESQL_PASSWD'])
cur = posconn.cursor()
cur.execute("SELECT * FROM redis_task_list LIMIT 20;")
i = 100
for line in cur:
one = {}
one['jid'] = line[0]
one['start'] = str(line[1].replace(microsecond=0)) if type(
line[1]) is datetime.datetime else ''
one['end'] = str(line[2].replace(microsecond=0)) if type(
line[2]) is datetime.datetime else ''
one['fun'] = line[3]
one['arg'] = line[4]
one['kwarg'] = line[5]
one['tgt'] = line[6]
#one['ret'] = line[7]
one['status'] = '<button type="button" class="btn btn-xs btn-outline btn-success "><i class="fa fa-check-circle-o"></i> Completed</button>' if line[
8] is True else '<button type="button" class="btn btn-xs btn-outline btn-warning "><i class="fa fa-times-circle-o"></i> Failed</button>'
one['text'] = 'text-success' if line[8] is True else 'text-danger'
redisapi.hset('salt_task_list', i, json.dumps(one))
i -= 1
except Exception as e:
posconn.close()
logger.warning('error in syncing redis_salt_task_sync ', e)
logger.exception(e)
return {'failed': e}
posconn.close()
logger.info('Completed in syncing redis_salt_task_sync ')
return str(100 - i) + ' records synced'
@celery.task
def redis_salt_event_sync():
try:
# posconn = psycopg2.connect(
# dbname='salt', user='salt', host='123.56.195.220', password='salt')
posconn = psycopg2.connect(dbname=config['POSTGRESQL_DB'], user=config[
'POSTGRESQL_USER'], host=config['POSTGRESQL_HOST'], password=config['POSTGRESQL_PASSWD'])
cur = posconn.cursor()
cur.execute("SELECT * FROM salt_events LIMIT 100;")
i = 0
ret = {}
for line in cur:
one = []
for col in line:
if type(col) is datetime:
col = str(col.replace(microsecond=0))
one.append(col)
redisapi.hset('salt_event_list', i, one)
i += 1
except Exception as e:
posconn.close()
logger.warning('error in syncing redis_salt_event_sync ', e)
logger.exception(e)
return {'failed': e}
posconn.close()
logger.info('Completed in syncing redis_salt_event_sync ')
return str(i) + ' records synced'
| |
# -*- coding: utf-8 -*-
import uuid
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
"Write your forwards methods here."
for user in orm.User.objects.all():
orm.UserAvatar.objects.create(user=user,
file=None,
ident=uuid.uuid4().hex,
avatar_type=2)
def backwards(self, orm):
"Write your backwards methods here."
models = {
'sentry.activity': {
'Meta': {'object_name': 'Activity'},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'})
},
'sentry.apikey': {
'Meta': {'object_name': 'ApiKey'},
'allowed_origins': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'label': ('django.db.models.fields.CharField', [], {'default': "'Default'", 'max_length': '64', 'blank': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'key_set'", 'to': "orm['sentry.Organization']"}),
'scopes': ('django.db.models.fields.BigIntegerField', [], {'default': 'None'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.auditlogentry': {
'Meta': {'object_name': 'AuditLogEntry'},
'actor': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'audit_actors'", 'null': 'True', 'to': "orm['sentry.User']"}),
'actor_key': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ApiKey']", 'null': 'True', 'blank': 'True'}),
'actor_label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39', 'null': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'target_object': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'target_user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'audit_targets'", 'null': 'True', 'to': "orm['sentry.User']"})
},
'sentry.authidentity': {
'Meta': {'unique_together': "(('auth_provider', 'ident'), ('auth_provider', 'user'))", 'object_name': 'AuthIdentity'},
'auth_provider': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.AuthProvider']"}),
'data': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'last_synced': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_verified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.authprovider': {
'Meta': {'object_name': 'AuthProvider'},
'config': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'default_global_access': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'default_role': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '50'}),
'default_teams': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Team']", 'symmetrical': 'False', 'blank': 'True'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_sync': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']", 'unique': 'True'}),
'provider': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'sync_time': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'})
},
'sentry.broadcast': {
'Meta': {'object_name': 'Broadcast'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_expires': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2016, 5, 18, 0, 0)', 'null': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'link': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'upstream_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'})
},
'sentry.broadcastseen': {
'Meta': {'unique_together': "(('broadcast', 'user'),)", 'object_name': 'BroadcastSeen'},
'broadcast': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Broadcast']"}),
'date_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.counter': {
'Meta': {'object_name': 'Counter', 'db_table': "'sentry_projectcounter'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'unique': 'True'}),
'value': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.dsymbundle': {
'Meta': {'object_name': 'DSymBundle'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'object': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.DSymObject']"}),
'sdk': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.DSymSDK']"})
},
'sentry.dsymobject': {
'Meta': {'object_name': 'DSymObject'},
'cpu_name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'object_path': ('django.db.models.fields.TextField', [], {'db_index': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'db_index': 'True'}),
'vmaddr': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'vmsize': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'})
},
'sentry.dsymsdk': {
'Meta': {'object_name': 'DSymSDK', 'index_together': "[('version_major', 'version_minor', 'version_patchlevel', 'version_build')]"},
'dsym_type': ('django.db.models.fields.CharField', [], {'max_length': '20', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'sdk_name': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'version_build': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'version_major': ('django.db.models.fields.IntegerField', [], {}),
'version_minor': ('django.db.models.fields.IntegerField', [], {}),
'version_patchlevel': ('django.db.models.fields.IntegerField', [], {})
},
'sentry.dsymsymbol': {
'Meta': {'unique_together': "[('object', 'address')]", 'object_name': 'DSymSymbol'},
'address': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'object': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.DSymObject']"}),
'symbol': ('django.db.models.fields.TextField', [], {})
},
'sentry.event': {
'Meta': {'unique_together': "(('project_id', 'event_id'),)", 'object_name': 'Event', 'db_table': "'sentry_message'", 'index_together': "(('group_id', 'datetime'),)"},
'data': ('sentry.db.models.fields.node.NodeField', [], {'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'db_column': "'message_id'"}),
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'time_spent': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'null': 'True'})
},
'sentry.eventmapping': {
'Meta': {'unique_together': "(('project_id', 'event_id'),)", 'object_name': 'EventMapping'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.eventtag': {
'Meta': {'unique_together': "(('event_id', 'key_id', 'value_id'),)", 'object_name': 'EventTag', 'index_together': "(('project_id', 'key_id', 'value_id'),)"},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'value_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.eventuser': {
'Meta': {'unique_together': "(('project', 'ident'), ('project', 'hash'))", 'object_name': 'EventUser', 'index_together': "(('project', 'email'), ('project', 'username'), ('project', 'ip_address'))"},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'})
},
'sentry.file': {
'Meta': {'object_name': 'File'},
'blob': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'legacy_blob'", 'null': 'True', 'to': "orm['sentry.FileBlob']"}),
'blobs': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.FileBlob']", 'through': "orm['sentry.FileBlobIndex']", 'symmetrical': 'False'}),
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True'}),
'headers': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'path': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'size': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'sentry.fileblob': {
'Meta': {'object_name': 'FileBlob'},
'checksum': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'path': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'size': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'})
},
'sentry.fileblobindex': {
'Meta': {'unique_together': "(('file', 'blob', 'offset'),)", 'object_name': 'FileBlobIndex'},
'blob': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.FileBlob']"}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'offset': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {})
},
'sentry.globaldsymfile': {
'Meta': {'object_name': 'GlobalDSymFile'},
'cpu_name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'object_name': ('django.db.models.fields.TextField', [], {}),
'uuid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '36'})
},
'sentry.group': {
'Meta': {'unique_together': "(('project', 'short_id'),)", 'object_name': 'Group', 'db_table': "'sentry_groupedmessage'", 'index_together': "(('project', 'first_release'),)"},
'active_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True', 'blank': 'True'}),
'first_release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']", 'null': 'True', 'on_delete': 'models.PROTECT'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'level': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'resolved_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'score': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}),
'short_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'time_spent_count': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}),
'time_spent_total': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}),
'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '1', 'db_index': 'True'})
},
'sentry.groupassignee': {
'Meta': {'object_name': 'GroupAssignee', 'db_table': "'sentry_groupasignee'"},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'assignee_set'", 'unique': 'True', 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'assignee_set'", 'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'sentry_assignee_set'", 'to': "orm['sentry.User']"})
},
'sentry.groupbookmark': {
'Meta': {'unique_together': "(('project', 'user', 'group'),)", 'object_name': 'GroupBookmark'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'sentry_bookmark_set'", 'to': "orm['sentry.User']"})
},
'sentry.groupemailthread': {
'Meta': {'unique_together': "(('email', 'group'), ('email', 'msgid'))", 'object_name': 'GroupEmailThread'},
'date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'groupemail_set'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'msgid': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'groupemail_set'", 'to': "orm['sentry.Project']"})
},
'sentry.grouphash': {
'Meta': {'unique_together': "(('project', 'hash'),)", 'object_name': 'GroupHash'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'})
},
'sentry.groupmeta': {
'Meta': {'unique_together': "(('group', 'key'),)", 'object_name': 'GroupMeta'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'value': ('django.db.models.fields.TextField', [], {})
},
'sentry.groupredirect': {
'Meta': {'object_name': 'GroupRedirect'},
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'previous_group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'unique': 'True'})
},
'sentry.groupresolution': {
'Meta': {'object_name': 'GroupResolution'},
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'unique': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.grouprulestatus': {
'Meta': {'unique_together': "(('rule', 'group'),)", 'object_name': 'GroupRuleStatus'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_active': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'rule': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Rule']"}),
'status': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'})
},
'sentry.groupseen': {
'Meta': {'unique_together': "(('user', 'group'),)", 'object_name': 'GroupSeen'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'db_index': 'False'})
},
'sentry.groupsnooze': {
'Meta': {'object_name': 'GroupSnooze'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'unique': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'until': ('django.db.models.fields.DateTimeField', [], {})
},
'sentry.grouptagkey': {
'Meta': {'unique_together': "(('project', 'group', 'key'),)", 'object_name': 'GroupTagKey'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'values_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.grouptagvalue': {
'Meta': {'unique_together': "(('project', 'key', 'value', 'group'),)", 'object_name': 'GroupTagValue', 'db_table': "'sentry_messagefiltervalue'"},
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'grouptag'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'grouptag'", 'null': 'True', 'to': "orm['sentry.Project']"}),
'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.helppage': {
'Meta': {'object_name': 'HelpPage'},
'content': ('django.db.models.fields.TextField', [], {}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_visible': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64', 'unique': 'True', 'null': 'True'}),
'priority': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '50'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'sentry.lostpasswordhash': {
'Meta': {'object_name': 'LostPasswordHash'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'unique': 'True'})
},
'sentry.option': {
'Meta': {'object_name': 'Option'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.organization': {
'Meta': {'object_name': 'Organization'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'default_role': ('django.db.models.fields.CharField', [], {'default': "'member'", 'max_length': '32'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '1'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'org_memberships'", 'symmetrical': 'False', 'through': "orm['sentry.OrganizationMember']", 'to': "orm['sentry.User']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.organizationaccessrequest': {
'Meta': {'unique_together': "(('team', 'member'),)", 'object_name': 'OrganizationAccessRequest'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'member': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.OrganizationMember']"}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"})
},
'sentry.organizationmember': {
'Meta': {'unique_together': "(('organization', 'user'), ('organization', 'email'))", 'object_name': 'OrganizationMember'},
'counter': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
'has_global_access': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'member_set'", 'to': "orm['sentry.Organization']"}),
'role': ('django.db.models.fields.CharField', [], {'default': "'member'", 'max_length': '32'}),
'teams': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Team']", 'symmetrical': 'False', 'through': "orm['sentry.OrganizationMemberTeam']", 'blank': 'True'}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '50', 'blank': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'sentry_orgmember_set'", 'null': 'True', 'to': "orm['sentry.User']"})
},
'sentry.organizationmemberteam': {
'Meta': {'unique_together': "(('team', 'organizationmember'),)", 'object_name': 'OrganizationMemberTeam', 'db_table': "'sentry_organizationmember_teams'"},
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'organizationmember': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.OrganizationMember']"}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"})
},
'sentry.organizationonboardingtask': {
'Meta': {'unique_together': "(('organization', 'task'),)", 'object_name': 'OrganizationOnboardingTask'},
'data': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'date_completed': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'task': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'})
},
'sentry.organizationoption': {
'Meta': {'unique_together': "(('organization', 'key'),)", 'object_name': 'OrganizationOption', 'db_table': "'sentry_organizationoptions'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.project': {
'Meta': {'unique_together': "(('team', 'slug'), ('organization', 'slug'))", 'object_name': 'Project'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'first_event': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'forced_color': ('django.db.models.fields.CharField', [], {'max_length': '6', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'null': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"})
},
'sentry.projectbookmark': {
'Meta': {'unique_together': "(('project_id', 'user'),)", 'object_name': 'ProjectBookmark'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.projectdsymfile': {
'Meta': {'unique_together': "(('project', 'uuid'),)", 'object_name': 'ProjectDSymFile'},
'cpu_name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'object_name': ('django.db.models.fields.TextField', [], {}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36'})
},
'sentry.projectkey': {
'Meta': {'object_name': 'ProjectKey'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'key_set'", 'to': "orm['sentry.Project']"}),
'public_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'roles': ('django.db.models.fields.BigIntegerField', [], {'default': '1'}),
'secret_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.projectoption': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'ProjectOption', 'db_table': "'sentry_projectoptions'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.projectplatform': {
'Meta': {'unique_together': "(('project_id', 'platform'),)", 'object_name': 'ProjectPlatform'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.release': {
'Meta': {'unique_together': "(('project', 'version'),)", 'object_name': 'Release'},
'data': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_released': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_started': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'new_groups': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'owner': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True', 'blank': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'ref': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'sentry.releasefile': {
'Meta': {'unique_together': "(('release', 'ident'),)", 'object_name': 'ReleaseFile'},
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'name': ('django.db.models.fields.TextField', [], {}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"})
},
'sentry.rule': {
'Meta': {'object_name': 'Rule'},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.savedsearch': {
'Meta': {'unique_together': "(('project', 'name'),)", 'object_name': 'SavedSearch'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'query': ('django.db.models.fields.TextField', [], {})
},
'sentry.savedsearchuserdefault': {
'Meta': {'unique_together': "(('project', 'user'),)", 'object_name': 'SavedSearchUserDefault', 'db_table': "'sentry_savedsearch_userdefault'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'savedsearch': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.SavedSearch']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.tagkey': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'TagKey', 'db_table': "'sentry_filterkey'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'values_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.tagvalue': {
'Meta': {'unique_together': "(('project', 'key', 'value'),)", 'object_name': 'TagValue', 'db_table': "'sentry_filtervalue'"},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.team': {
'Meta': {'unique_together': "(('organization', 'slug'),)", 'object_name': 'Team'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.user': {
'Meta': {'object_name': 'User', 'db_table': "'auth_user'"},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_managed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'db_column': "'first_name'", 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'})
},
'sentry.useravatar': {
'Meta': {'object_name': 'UserAvatar'},
'avatar_type': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']", 'unique': 'True', 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'avatar'", 'unique': 'True', 'to': "orm['sentry.User']"})
},
'sentry.useroption': {
'Meta': {'unique_together': "(('user', 'project', 'key'),)", 'object_name': 'UserOption'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.userreport': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'UserReport', 'index_together': "(('project', 'event_id'), ('project', 'date_added'))"},
'comments': ('django.db.models.fields.TextField', [], {}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
}
}
complete_apps = ['sentry']
symmetrical = True
| |
from django.core import serializers
from django.shortcuts import render, render_to_response, redirect
from django.http import HttpResponse, HttpResponseNotAllowed, HttpResponseForbidden,HttpResponseNotModified, HttpResponseNotFound, JsonResponse
from django.template import RequestContext
from gitorial.models import User, Tutorial, Step
import django.contrib.auth
import social.apps.django_app.views
import json
import requests
from config import settings
from datetime import datetime, timedelta
from . import diff
# Create your views here.
def index(request):
return render_to_response('index.html', {},
context_instance=RequestContext(request))
def logout(request):
django.contrib.auth.logout(request)
return redirect('/')
def callback(request):
if(request.user is not None and
request.user.is_authenticated()):
return redirect('/#/' + request.user.username)
else:
return redirect('/')
return
def session(request):
if request.method == 'GET':
if(request.user is not None and
request.user.is_authenticated()):
username = request.user.username
else:
username = ''
return HttpResponse(json.dumps({
'username': username
}),
content_type="application/json")
else:
return HttpResponseNotAllowed(['GET'])
def user_view(request, username):
if request.method == 'POST':
user, is_new = User.objects.get_or_create(username=username)
if is_new:
api_r = requests.get('https://api.github.com/users/%s?client_id=%s&client_secret=%s' % (username, settings.SOCIAL_AUTH_GITHUB_KEY, settings.SOCIAL_AUTH_GITHUB_SECRET))
# Log how many requests are remaining
print(api_r.headers['X-RateLimit-Remaining'])
response_json = api_r.json()
try:
user.name = response_json['name']
except:
pass
user.avatar_url = response_json['avatar_url']
user.last_modified = datetime.now()
user.save()
return HttpResponse(status=201)
else:
return HttpResponseForbidden()
elif request.method =='GET':
try:
user = User.objects.get(username=username)
result = user.getDict()
if request.user.username == username:
repo_r = requests.get('https://api.github.com/users/%s/repos?client_id=%s&client_secret=%s&sort=pushed' % (username, settings.SOCIAL_AUTH_GITHUB_KEY, settings.SOCIAL_AUTH_GITHUB_SECRET))
repo_r_json = repo_r.json()
result['repos'] = [{
'title': repo['name'],
'description': repo['description']
} for repo in repo_r_json]
result['tutorials'] = [{
'id': tutorial.id,
'title': tutorial.title,
'description': tutorial.description,
'repo_url': tutorial.repo_url
} for tutorial in Tutorial.objects.filter(owner=user)]
return HttpResponse(json.dumps(result),
content_type="application/json")
except Exception as e:
print(e)
return HttpResponseNotFound()
elif request.method == 'DELETE':
try:
User.objects.get(username=username).delete()
return HttpResponse()
except Exception as e:
print(e)
return HttpResponseNotFound()
elif request.method == 'PATCH':
try:
user = User.objects.get(username=username)
if (datetime.now() - user.last_modified) > timedelta(hours=1):
api_r = requests.get('https://api.github.com/users/%s?client_id=%s&client_secret=%s' % (username, settings.SOCIAL_AUTH_GITHUB_KEY, settings.SOCIAL_AUTH_GITHUB_SECRET))
# Log how many requests are remaining
print(api_r.headers['X-RateLimit-Remaining'])
response_json = api_r.json()
try:
user.name = response_json['name']
except:
pass
user.avatar_url = response_json['avatar_url']
user.last_modified = datetime.now()
user.save()
return HttpResponse()
else:
return HttpResponseNotModified()
except Exception as e:
print(e)
return HttpResponseNotFound()
else:
return HttpResponseNotAllowed(['POST', 'GET', 'DELETE'])
def build_tutorials(user):
user_tutorials = Tutorial.objects.filter(owner=user).values('title', 'description', 'repo_url').order_by('id').reverse()
return [{'title': item['title'],
'description': item['description'],
'url': item['repo_url']}
for item in user_tutorials]
def build_steps(username, repo_name, tutorial, commits_data):
index = 0
for commit_data in commits_data:
index += 1
step, is_new = Step.objects.get_or_create(index=index, tutorial=tutorial)
if is_new:
step.title = commit_data['title']
step.content_before = commit_data['message']
step.content_after = ''
step.diff_url = commit_data['diff_url']
step.code_url = commit_data['code_url']
api_r = requests.get(
'https://api.github.com/repos/%s/%s/commits/%s?client_id=%s&client_secret=%s' % (
username,
repo_name,
commit_data['sha'],
settings.SOCIAL_AUTH_GITHUB_KEY,
settings.SOCIAL_AUTH_GITHUB_SECRET
), headers={'Accept': 'application/vnd.github.diff'})
step.files = json.dumps(diff.parse(api_r.text))
step.save()
def tutorial_new(request, username, repo):
if request.method == 'POST':
user = User.objects.get(username=username)
repo_r_json = requests.get(
'https://api.github.com/repos/%s/%s?client_id=%s&client_secret=%s' % (
username,
repo,
settings.SOCIAL_AUTH_GITHUB_KEY,
settings.SOCIAL_AUTH_GITHUB_SECRET)
).json()
tut_entry, is_new = Tutorial.objects.get_or_create(id=repo_r_json['id'], owner = user)
tut_entry.title = repo_r_json['name']
tut_entry.description = repo_r_json['description']
tut_entry.repo_name = repo_r_json['name']
tut_entry.repo_url = repo_r_json['url']
tut_entry.owner = user
tut_entry.save()
# Get all commits, most recent first
commits_r_json = requests.get(
repo_r_json['commits_url'].replace('{/sha}', '') +
('?client_id=%s&client_secret=%s' % (
settings.SOCIAL_AUTH_GITHUB_KEY,
settings.SOCIAL_AUTH_GITHUB_SECRET))
).json()
commits_data = []
for commit in commits_r_json:
(title_raw, _, message_raw) = commit['commit']['message'].partition('\n')
commits_data.insert(0, {
'sha': commit['sha'],
'title': title_raw[:50],
'message': message_raw,
'diff_url': commit['html_url'],
'code_url': 'https://github.com/%s/%s/tree/%s' % (username, repo, commit['sha'])
})
build_steps(username, repo, tut_entry, commits_data)
return JsonResponse({'tutorial_id': tut_entry.id})
else:
return HttpResponseNotAllowed(['POST'])
def tutorial(request, username, tutnum):
if request.method == 'GET':
# tut is an ID (number)
tut_entry = Tutorial.objects.get(id=tutnum)
response = {
'id': tutnum,
'title': tut_entry.title,
'description': tut_entry.description,
'repo_name': tut_entry.repo_name,
'repo_url': tut_entry.repo_url,
'steps': [{
'title': step.title,
'content_before': step.content_before,
'content_after': step.content_after,
'diff_url': step.diff_url,
'code_url': step.code_url,
'files': json.loads(step.files)
} for step in Step.objects.filter(tutorial=tutnum).order_by('index')]
}
return JsonResponse(response)
elif request.method == 'DELETE':
# tut is an ID (number)
Tutorial.objects.get(id=tutnum).delete()
return HttpResponse()
elif request.method == 'PATCH':
patch = json.loads(request.body)
try:
tut_entry = Tutorial.objects.get(id=tutnum)
tut_entry.title = patch['title']
tut_entry.description = patch['description']
tut_entry.save()
index = 0
for step_json in patch['steps']:
index += 1
step = Step.objects.get(tutorial=tut_entry, index=index)
step.title = patch[index - 1]['title']
step.content_before = patch[index - 1]['content_before']
step.content_after = patch[index - 1]['content_after']
step.save()
except Tutorial.DoesNotExist:
return HttpResponseNotFound()
else:
return HttpResponseNotAllowed(['POST','GET','DELETE','PATCH'])
| |
# -*- coding: utf-8 -*-
'''
Control of entries in SSH authorized_key files
==============================================
The information stored in a user's SSH authorized key file can be easily
controlled via the ssh_auth state. Defaults can be set by the enc, options,
and comment keys. These defaults can be overridden by including them in the
name.
Since the YAML specification limits the length of simple keys to 1024
characters, and since SSH keys are often longer than that, you may have
to use a YAML 'explicit key', as demonstrated in the second example below.
.. code-block:: yaml
AAAAB3NzaC1kc3MAAACBAL0sQ9fJ5bYTEyY==:
ssh_auth.present:
- user: root
- enc: ssh-dss
? AAAAB3NzaC1kc3MAAACBAL0sQ9fJ5bYTEyY==...
:
ssh_auth.present:
- user: root
- enc: ssh-dss
thatch:
ssh_auth.present:
- user: root
- source: salt://ssh_keys/thatch.id_rsa.pub
- config: %h/.ssh/authorized_keys
sshkeys:
ssh_auth.present:
- user: root
- enc: ssh-rsa
- options:
- option1="value1"
- option2="value2 flag2"
- comment: myuser
- names:
- AAAAB3NzaC1kc3MAAACBAL0sQ9fJ5bYTEyY==
- ssh-dss AAAAB3NzaCL0sQ9fJ5bYTEyY== user@domain
- option3="value3" ssh-dss AAAAB3NzaC1kcQ9J5bYTEyY== other@testdomain
- AAAAB3NzaC1kcQ9fJFF435bYTEyY== newcomment
'''
# Import python libs
from __future__ import absolute_import
import re
import sys
# Import 3rd-party libs
import salt.ext.six as six
def _present_test(user, name, enc, comment, options, source, config):
'''
Run checks for "present"
'''
result = None
if source:
keys = __salt__['ssh.check_key_file'](
user,
source,
config,
saltenv=__env__)
if keys:
comment = ''
for key, status in six.iteritems(keys):
if status == 'exists':
continue
comment += 'Set to {0}: {1}\n'.format(status, key)
if comment:
return result, comment
err = sys.modules[
__salt__['test.ping'].__module__
].__context__.pop('ssh_auth.error', None)
if err:
return False, err
else:
return (
True,
'All host keys in file {0} are already present'.format(source)
)
else:
# check if this is of form {options} {enc} {key} {comment}
sshre = re.compile(r'^(.*?)\s?((?:ssh\-|ecds)[\w-]+\s.+)$')
fullkey = sshre.search(name)
# if it is {key} [comment]
if not fullkey:
key_and_comment = name.split()
name = key_and_comment[0]
if len(key_and_comment) == 2:
comment = key_and_comment[1]
else:
# if there are options, set them
if fullkey.group(1):
options = fullkey.group(1).split(',')
# key is of format: {enc} {key} [comment]
comps = fullkey.group(2).split()
enc = comps[0]
name = comps[1]
if len(comps) == 3:
comment = comps[2]
check = __salt__['ssh.check_key'](
user,
name,
enc,
comment,
options,
config)
if check == 'update':
comment = (
'Key {0} for user {1} is set to be updated'
).format(name, user)
elif check == 'add':
comment = (
'Key {0} for user {1} is set to be added'
).format(name, user)
elif check == 'exists':
result = True
comment = ('The authorized host key {0} is already present '
'for user {1}'.format(name, user))
return result, comment
def _absent_test(user, name, enc, comment, options, source, config):
'''
Run checks for "absent"
'''
result = None
if source:
keys = __salt__['ssh.check_key_file'](
user,
source,
config,
saltenv=__env__)
if keys:
comment = ''
for key, status in list(keys.items()):
if status == 'exists':
continue
comment += 'Set to {0}: {1}\n'.format(status, key)
if comment:
return result, comment
err = sys.modules[
__salt__['test.ping'].__module__
].__context__.pop('ssh_auth.error', None)
if err:
return False, err
else:
return (
True,
'All host keys in file {0} are already absent'.format(source)
)
else:
# check if this is of form {options} {enc} {key} {comment}
sshre = re.compile(r'^(.*?)\s?((?:ssh\-|ecds)[\w-]+\s.+)$')
fullkey = sshre.search(name)
# if it is {key} [comment]
if not fullkey:
key_and_comment = name.split()
name = key_and_comment[0]
if len(key_and_comment) == 2:
comment = key_and_comment[1]
else:
# if there are options, set them
if fullkey.group(1):
options = fullkey.group(1).split(',')
# key is of format: {enc} {key} [comment]
comps = fullkey.group(2).split()
enc = comps[0]
name = comps[1]
if len(comps) == 3:
comment = comps[2]
check = __salt__['ssh.check_key'](
user,
name,
enc,
comment,
options,
config)
if check == 'update' or check == 'exists':
comment = ('Key {0} for user {1} is set for removal').format(name, user)
else:
comment = ('Key is already absent')
result = True
return result, comment
def present(
name,
user,
enc='ssh-rsa',
comment='',
source='',
options=None,
config='.ssh/authorized_keys',
**kwargs):
'''
Verifies that the specified SSH key is present for the specified user
name
The SSH key to manage
user
The user who owns the SSH authorized keys file to modify
enc
Defines what type of key is being used; can be ed25519, ecdsa, ssh-rsa
or ssh-dss
comment
The comment to be placed with the SSH public key
source
The source file for the key(s). Can contain any number of public keys,
in standard "authorized_keys" format. If this is set, comment and enc
will be ignored.
.. note::
The source file must contain keys in the format ``<enc> <key>
<comment>``. If you have generated a keypair using PuTTYgen, then you
will need to do the following to retrieve an OpenSSH-compatible public
key.
1. In PuTTYgen, click ``Load``, and select the *private* key file (not
the public key), and click ``Open``.
2. Copy the public key from the box labeled ``Public key for pasting
into OpenSSH authorized_keys file``.
3. Paste it into a new file.
options
The options passed to the key, pass a list object
config
The location of the authorized keys file relative to the user's home
directory, defaults to ".ssh/authorized_keys". Token expansion %u and
%h for username and home path supported.
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': ''}
if source == '':
# check if this is of form {options} {enc} {key} {comment}
sshre = re.compile(r'^(.*?)\s?((?:ssh\-|ecds)[\w-]+\s.+)$')
fullkey = sshre.search(name)
# if it is {key} [comment]
if not fullkey:
key_and_comment = name.split(None, 1)
name = key_and_comment[0]
if len(key_and_comment) == 2:
comment = key_and_comment[1]
else:
# if there are options, set them
if fullkey.group(1):
options = fullkey.group(1).split(',')
# key is of format: {enc} {key} [comment]
comps = fullkey.group(2).split(None, 2)
enc = comps[0]
name = comps[1]
if len(comps) == 3:
comment = comps[2]
if __opts__['test']:
ret['result'], ret['comment'] = _present_test(
user,
name,
enc,
comment,
options or [],
source,
config,
)
return ret
# Get only the path to the file without env referrences to check if exists
if source != '':
source_path = __salt__['cp.get_url'](
source,
None,
saltenv=__env__)
if source != '' and not source_path:
data = 'no key'
elif source != '' and source_path:
key = __salt__['cp.get_file_str'](
source,
saltenv=__env__)
filehasoptions = False
# check if this is of form {options} {enc} {key} {comment}
sshre = re.compile(r'^(ssh\-|ecds).*')
key = key.rstrip().split('\n')
for keyline in key:
filehasoptions = sshre.match(keyline)
if not filehasoptions:
data = __salt__['ssh.set_auth_key_from_file'](
user,
source,
config,
saltenv=__env__)
else:
# Split keyline to get key und commen
keyline = keyline.split(' ')
key_type = keyline[0]
key_value = keyline[1]
key_comment = keyline[2] if len(keyline) > 2 else ''
data = __salt__['ssh.set_auth_key'](
user,
key_value,
key_type,
key_comment,
options or [],
config)
else:
data = __salt__['ssh.set_auth_key'](
user,
name,
enc,
comment,
options or [],
config)
if data == 'replace':
ret['changes'][name] = 'Updated'
ret['comment'] = ('The authorized host key {0} for user {1} was '
'updated'.format(name, user))
return ret
elif data == 'no change':
ret['comment'] = ('The authorized host key {0} is already present '
'for user {1}'.format(name, user))
elif data == 'new':
ret['changes'][name] = 'New'
ret['comment'] = ('The authorized host key {0} for user {1} was added'
.format(name, user))
elif data == 'no key':
ret['result'] = False
ret['comment'] = ('Failed to add the ssh key. Source file {0} is '
'missing'.format(source))
elif data == 'fail':
ret['result'] = False
err = sys.modules[
__salt__['test.ping'].__module__
].__context__.pop('ssh_auth.error', None)
if err:
ret['comment'] = err
else:
ret['comment'] = ('Failed to add the ssh key. Is the home '
'directory available, and/or does the key file '
'exist?')
elif data == 'invalid':
ret['result'] = False
ret['comment'] = 'Invalid public ssh key, most likely has spaces'
return ret
def absent(name,
user,
enc='ssh-rsa',
comment='',
source='',
options=None,
config='.ssh/authorized_keys'):
'''
Verifies that the specified SSH key is absent
name
The SSH key to manage
user
The user who owns the SSH authorized keys file to modify
enc
Defines what type of key is being used; can be ed25519, ecdsa, ssh-rsa
or ssh-dss
comment
The comment to be placed with the SSH public key
options
The options passed to the key, pass a list object
source
The source file for the key(s). Can contain any number of public keys,
in standard "authorized_keys" format. If this is set, comment, enc and
options will be ignored.
.. versionadded:: 2015.8.0
config
The location of the authorized keys file relative to the user's home
directory, defaults to ".ssh/authorized_keys". Token expansion %u and
%h for username and home path supported.
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': ''}
if __opts__['test']:
ret['result'], ret['comment'] = _absent_test(
user,
name,
enc,
comment,
options or [],
source,
config,
)
return ret
# Extract Key from file if source is present
if source != '':
key = __salt__['cp.get_file_str'](
source,
saltenv=__env__)
filehasoptions = False
# check if this is of form {options} {enc} {key} {comment}
sshre = re.compile(r'^(ssh\-|ecds).*')
key = key.rstrip().split('\n')
for keyline in key:
filehasoptions = sshre.match(keyline)
if not filehasoptions:
ret['comment'] = __salt__['ssh.rm_auth_key_from_file'](user,
source,
config,
saltenv=__env__)
else:
# Split keyline to get key
keyline = keyline.split(' ')
ret['comment'] = __salt__['ssh.rm_auth_key'](user,
keyline[1],
config)
else:
# Get just the key
sshre = re.compile(r'^(.*?)\s?((?:ssh\-|ecds)[\w-]+\s.+)$')
fullkey = sshre.search(name)
# if it is {key} [comment]
if not fullkey:
key_and_comment = name.split(None, 1)
name = key_and_comment[0]
if len(key_and_comment) == 2:
comment = key_and_comment[1]
else:
# if there are options, set them
if fullkey.group(1):
options = fullkey.group(1).split(',')
# key is of format: {enc} {key} [comment]
comps = fullkey.group(2).split()
enc = comps[0]
name = comps[1]
if len(comps) == 3:
comment = comps[2]
ret['comment'] = __salt__['ssh.rm_auth_key'](user, name, config)
if ret['comment'] == 'User authorized keys file not present':
ret['result'] = False
return ret
elif ret['comment'] == 'Key removed':
ret['changes'][name] = 'Removed'
return ret
| |
#!/usr/bin/env python3
# Copyright (c) 2015-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test CSV soft fork activation.
This soft fork will activate the following BIPS:
BIP 68 - nSequence relative lock times
BIP 112 - CHECKSEQUENCEVERIFY
BIP 113 - MedianTimePast semantics for nLockTime
mine 82 blocks whose coinbases will be used to generate inputs for our tests
mine 345 blocks and seed block chain with the 82 inputs will use for our tests at height 427
mine 2 blocks and verify soft fork not yet activated
mine 1 block and test that soft fork is activated (rules enforced for next block)
Test BIP 113 is enforced
Mine 4 blocks so next height is 580 and test BIP 68 is enforced for time and height
Mine 1 block so next height is 581 and test BIP 68 now passes time but not height
Mine 1 block so next height is 582 and test BIP 68 now passes time and height
Test that BIP 112 is enforced
Various transactions will be used to test that the BIPs rules are not enforced before the soft fork activates
And that after the soft fork activates transactions pass and fail as they should according to the rules.
For each BIP, transactions of versions 1 and 2 will be tested.
----------------
BIP 113:
bip113tx - modify the nLocktime variable
BIP 68:
bip68txs - 16 txs with nSequence relative locktime of 10 with various bits set as per the relative_locktimes below
BIP 112:
bip112txs_vary_nSequence - 16 txs with nSequence relative_locktimes of 10 evaluated against 10 OP_CSV OP_DROP
bip112txs_vary_nSequence_9 - 16 txs with nSequence relative_locktimes of 9 evaluated against 10 OP_CSV OP_DROP
bip112txs_vary_OP_CSV - 16 txs with nSequence = 10 evaluated against varying {relative_locktimes of 10} OP_CSV OP_DROP
bip112txs_vary_OP_CSV_9 - 16 txs with nSequence = 9 evaluated against varying {relative_locktimes of 10} OP_CSV OP_DROP
bip112tx_special - test negative argument to OP_CSV
bip112tx_emptystack - test empty stack (= no argument) OP_CSV
"""
from decimal import Decimal
from itertools import product
from io import BytesIO
import time
from test_framework.blocktools import create_coinbase, create_block, create_transaction
from test_framework.messages import ToHex, CTransaction
from test_framework.mininode import P2PDataStore
from test_framework.script import (
CScript,
OP_CHECKSEQUENCEVERIFY,
OP_DROP,
)
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
hex_str_to_bytes,
softfork_active,
)
TESTING_TX_COUNT = 83 # Number of testing transactions: 1 BIP113 tx, 16 BIP68 txs, 66 BIP112 txs (see comments above)
COINBASE_BLOCK_COUNT = TESTING_TX_COUNT # Number of coinbase blocks we need to generate as inputs for our txs
BASE_RELATIVE_LOCKTIME = 10
CSV_ACTIVATION_HEIGHT = 432
SEQ_DISABLE_FLAG = 1 << 31
SEQ_RANDOM_HIGH_BIT = 1 << 25
SEQ_TYPE_FLAG = 1 << 22
SEQ_RANDOM_LOW_BIT = 1 << 18
def relative_locktime(sdf, srhb, stf, srlb):
"""Returns a locktime with certain bits set."""
locktime = BASE_RELATIVE_LOCKTIME
if sdf:
locktime |= SEQ_DISABLE_FLAG
if srhb:
locktime |= SEQ_RANDOM_HIGH_BIT
if stf:
locktime |= SEQ_TYPE_FLAG
if srlb:
locktime |= SEQ_RANDOM_LOW_BIT
return locktime
def all_rlt_txs(txs):
return [tx['tx'] for tx in txs]
def sign_transaction(node, unsignedtx):
rawtx = ToHex(unsignedtx)
signresult = node.signrawtransactionwithwallet(rawtx)
tx = CTransaction()
f = BytesIO(hex_str_to_bytes(signresult['hex']))
tx.deserialize(f)
return tx
def create_bip112special(node, input, txversion, address):
tx = create_transaction(node, input, address, amount=Decimal("49.98"))
tx.nVersion = txversion
signtx = sign_transaction(node, tx)
signtx.vin[0].scriptSig = CScript([-1, OP_CHECKSEQUENCEVERIFY, OP_DROP] + list(CScript(signtx.vin[0].scriptSig)))
return signtx
def create_bip112emptystack(node, input, txversion, address):
tx = create_transaction(node, input, address, amount=Decimal("49.98"))
tx.nVersion = txversion
signtx = sign_transaction(node, tx)
signtx.vin[0].scriptSig = CScript([OP_CHECKSEQUENCEVERIFY] + list(CScript(signtx.vin[0].scriptSig)))
return signtx
def send_generic_input_tx(node, coinbases, address):
return node.sendrawtransaction(ToHex(sign_transaction(node, create_transaction(node, node.getblock(coinbases.pop())['tx'][0], address, amount=Decimal("49.99")))))
def create_bip68txs(node, bip68inputs, txversion, address, locktime_delta=0):
"""Returns a list of bip68 transactions with different bits set."""
txs = []
assert len(bip68inputs) >= 16
for i, (sdf, srhb, stf, srlb) in enumerate(product(*[[True, False]] * 4)):
locktime = relative_locktime(sdf, srhb, stf, srlb)
tx = create_transaction(node, bip68inputs[i], address, amount=Decimal("49.98"))
tx.nVersion = txversion
tx.vin[0].nSequence = locktime + locktime_delta
tx = sign_transaction(node, tx)
tx.rehash()
txs.append({'tx': tx, 'sdf': sdf, 'stf': stf})
return txs
def create_bip112txs(node, bip112inputs, varyOP_CSV, txversion, address, locktime_delta=0):
"""Returns a list of bip68 transactions with different bits set."""
txs = []
assert len(bip112inputs) >= 16
for i, (sdf, srhb, stf, srlb) in enumerate(product(*[[True, False]] * 4)):
locktime = relative_locktime(sdf, srhb, stf, srlb)
tx = create_transaction(node, bip112inputs[i], address, amount=Decimal("49.98"))
if (varyOP_CSV): # if varying OP_CSV, nSequence is fixed
tx.vin[0].nSequence = BASE_RELATIVE_LOCKTIME + locktime_delta
else: # vary nSequence instead, OP_CSV is fixed
tx.vin[0].nSequence = locktime + locktime_delta
tx.nVersion = txversion
signtx = sign_transaction(node, tx)
if (varyOP_CSV):
signtx.vin[0].scriptSig = CScript([locktime, OP_CHECKSEQUENCEVERIFY, OP_DROP] + list(CScript(signtx.vin[0].scriptSig)))
else:
signtx.vin[0].scriptSig = CScript([BASE_RELATIVE_LOCKTIME, OP_CHECKSEQUENCEVERIFY, OP_DROP] + list(CScript(signtx.vin[0].scriptSig)))
tx.rehash()
txs.append({'tx': signtx, 'sdf': sdf, 'stf': stf})
return txs
class BIP68_112_113Test(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
self.extra_args = [[
'-whitelist=noban@127.0.0.1',
'-blockversion=4',
'-addresstype=legacy',
'-par=1', # Use only one script thread to get the exact reject reason for testing
]]
self.supports_cli = False
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def generate_blocks(self, number):
test_blocks = []
for i in range(number):
block = self.create_test_block([])
test_blocks.append(block)
self.last_block_time += 600
self.tip = block.sha256
self.tipheight += 1
return test_blocks
def create_test_block(self, txs):
block = create_block(self.tip, create_coinbase(self.tipheight + 1), self.last_block_time + 600)
block.nVersion = 4
block.vtx.extend(txs)
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
block.solve()
return block
def send_blocks(self, blocks, success=True, reject_reason=None):
"""Sends blocks to test node. Syncs and verifies that tip has advanced to most recent block.
Call with success = False if the tip shouldn't advance to the most recent block."""
self.nodes[0].p2p.send_blocks_and_test(blocks, self.nodes[0], success=success, reject_reason=reject_reason)
def run_test(self):
self.nodes[0].add_p2p_connection(P2PDataStore())
self.log.info("Generate blocks in the past for coinbase outputs.")
long_past_time = int(time.time()) - 600 * 1000 # enough to build up to 1000 blocks 10 minutes apart without worrying about getting into the future
self.nodes[0].setmocktime(long_past_time - 100) # enough so that the generated blocks will still all be before long_past_time
self.coinbase_blocks = self.nodes[0].generate(COINBASE_BLOCK_COUNT) # blocks generated for inputs
self.nodes[0].setmocktime(0) # set time back to present so yielded blocks aren't in the future as we advance last_block_time
self.tipheight = COINBASE_BLOCK_COUNT # height of the next block to build
self.last_block_time = long_past_time
self.tip = int(self.nodes[0].getbestblockhash(), 16)
self.nodeaddress = self.nodes[0].getnewaddress()
# Activation height is hardcoded
# We advance to block height five below BIP112 activation for the following tests
test_blocks = self.generate_blocks(CSV_ACTIVATION_HEIGHT-5 - COINBASE_BLOCK_COUNT)
self.send_blocks(test_blocks)
assert not softfork_active(self.nodes[0], 'csv')
# Inputs at height = 431
#
# Put inputs for all tests in the chain at height 431 (tip now = 430) (time increases by 600s per block)
# Note we reuse inputs for v1 and v2 txs so must test these separately
# 16 normal inputs
bip68inputs = []
for i in range(16):
bip68inputs.append(send_generic_input_tx(self.nodes[0], self.coinbase_blocks, self.nodeaddress))
# 2 sets of 16 inputs with 10 OP_CSV OP_DROP (actually will be prepended to spending scriptSig)
bip112basicinputs = []
for j in range(2):
inputs = []
for i in range(16):
inputs.append(send_generic_input_tx(self.nodes[0], self.coinbase_blocks, self.nodeaddress))
bip112basicinputs.append(inputs)
# 2 sets of 16 varied inputs with (relative_lock_time) OP_CSV OP_DROP (actually will be prepended to spending scriptSig)
bip112diverseinputs = []
for j in range(2):
inputs = []
for i in range(16):
inputs.append(send_generic_input_tx(self.nodes[0], self.coinbase_blocks, self.nodeaddress))
bip112diverseinputs.append(inputs)
# 1 special input with -1 OP_CSV OP_DROP (actually will be prepended to spending scriptSig)
bip112specialinput = send_generic_input_tx(self.nodes[0], self.coinbase_blocks, self.nodeaddress)
# 1 special input with (empty stack) OP_CSV (actually will be prepended to spending scriptSig)
bip112emptystackinput = send_generic_input_tx(self.nodes[0],self.coinbase_blocks, self.nodeaddress)
# 1 normal input
bip113input = send_generic_input_tx(self.nodes[0], self.coinbase_blocks, self.nodeaddress)
self.nodes[0].setmocktime(self.last_block_time + 600)
inputblockhash = self.nodes[0].generate(1)[0] # 1 block generated for inputs to be in chain at height 431
self.nodes[0].setmocktime(0)
self.tip = int(inputblockhash, 16)
self.tipheight += 1
self.last_block_time += 600
assert_equal(len(self.nodes[0].getblock(inputblockhash, True)["tx"]), TESTING_TX_COUNT + 1)
# 2 more version 4 blocks
test_blocks = self.generate_blocks(2)
self.send_blocks(test_blocks)
assert_equal(self.tipheight, CSV_ACTIVATION_HEIGHT - 2)
self.log.info("Height = {}, CSV not yet active (will activate for block {}, not {})".format(self.tipheight, CSV_ACTIVATION_HEIGHT, CSV_ACTIVATION_HEIGHT - 1))
assert not softfork_active(self.nodes[0], 'csv')
# Test both version 1 and version 2 transactions for all tests
# BIP113 test transaction will be modified before each use to put in appropriate block time
bip113tx_v1 = create_transaction(self.nodes[0], bip113input, self.nodeaddress, amount=Decimal("49.98"))
bip113tx_v1.vin[0].nSequence = 0xFFFFFFFE
bip113tx_v1.nVersion = 1
bip113tx_v2 = create_transaction(self.nodes[0], bip113input, self.nodeaddress, amount=Decimal("49.98"))
bip113tx_v2.vin[0].nSequence = 0xFFFFFFFE
bip113tx_v2.nVersion = 2
# For BIP68 test all 16 relative sequence locktimes
bip68txs_v1 = create_bip68txs(self.nodes[0], bip68inputs, 1, self.nodeaddress)
bip68txs_v2 = create_bip68txs(self.nodes[0], bip68inputs, 2, self.nodeaddress)
# For BIP112 test:
# 16 relative sequence locktimes of 10 against 10 OP_CSV OP_DROP inputs
bip112txs_vary_nSequence_v1 = create_bip112txs(self.nodes[0], bip112basicinputs[0], False, 1, self.nodeaddress)
bip112txs_vary_nSequence_v2 = create_bip112txs(self.nodes[0], bip112basicinputs[0], False, 2, self.nodeaddress)
# 16 relative sequence locktimes of 9 against 10 OP_CSV OP_DROP inputs
bip112txs_vary_nSequence_9_v1 = create_bip112txs(self.nodes[0], bip112basicinputs[1], False, 1, self.nodeaddress, -1)
bip112txs_vary_nSequence_9_v2 = create_bip112txs(self.nodes[0], bip112basicinputs[1], False, 2, self.nodeaddress, -1)
# sequence lock time of 10 against 16 (relative_lock_time) OP_CSV OP_DROP inputs
bip112txs_vary_OP_CSV_v1 = create_bip112txs(self.nodes[0], bip112diverseinputs[0], True, 1, self.nodeaddress)
bip112txs_vary_OP_CSV_v2 = create_bip112txs(self.nodes[0], bip112diverseinputs[0], True, 2, self.nodeaddress)
# sequence lock time of 9 against 16 (relative_lock_time) OP_CSV OP_DROP inputs
bip112txs_vary_OP_CSV_9_v1 = create_bip112txs(self.nodes[0], bip112diverseinputs[1], True, 1, self.nodeaddress, -1)
bip112txs_vary_OP_CSV_9_v2 = create_bip112txs(self.nodes[0], bip112diverseinputs[1], True, 2, self.nodeaddress, -1)
# -1 OP_CSV OP_DROP input
bip112tx_special_v1 = create_bip112special(self.nodes[0], bip112specialinput, 1, self.nodeaddress)
bip112tx_special_v2 = create_bip112special(self.nodes[0], bip112specialinput, 2, self.nodeaddress)
# (empty stack) OP_CSV input
bip112tx_emptystack_v1 = create_bip112emptystack(self.nodes[0], bip112emptystackinput, 1, self.nodeaddress)
bip112tx_emptystack_v2 = create_bip112emptystack(self.nodes[0], bip112emptystackinput, 2, self.nodeaddress)
self.log.info("TESTING")
self.log.info("Pre-Soft Fork Tests. All txs should pass.")
self.log.info("Test version 1 txs")
success_txs = []
# BIP113 tx, -1 CSV tx and empty stack CSV tx should succeed
bip113tx_v1.nLockTime = self.last_block_time - 600 * 5 # = MTP of prior block (not <) but < time put on current block
bip113signed1 = sign_transaction(self.nodes[0], bip113tx_v1)
success_txs.append(bip113signed1)
success_txs.append(bip112tx_special_v1)
success_txs.append(bip112tx_emptystack_v1)
# add BIP 68 txs
success_txs.extend(all_rlt_txs(bip68txs_v1))
# add BIP 112 with seq=10 txs
success_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_v1))
success_txs.extend(all_rlt_txs(bip112txs_vary_OP_CSV_v1))
# try BIP 112 with seq=9 txs
success_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_9_v1))
success_txs.extend(all_rlt_txs(bip112txs_vary_OP_CSV_9_v1))
self.send_blocks([self.create_test_block(success_txs)])
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
self.log.info("Test version 2 txs")
success_txs = []
# BIP113 tx, -1 CSV tx and empty stack CSV tx should succeed
bip113tx_v2.nLockTime = self.last_block_time - 600 * 5 # = MTP of prior block (not <) but < time put on current block
bip113signed2 = sign_transaction(self.nodes[0], bip113tx_v2)
success_txs.append(bip113signed2)
success_txs.append(bip112tx_special_v2)
success_txs.append(bip112tx_emptystack_v2)
# add BIP 68 txs
success_txs.extend(all_rlt_txs(bip68txs_v2))
# add BIP 112 with seq=10 txs
success_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_v2))
success_txs.extend(all_rlt_txs(bip112txs_vary_OP_CSV_v2))
# try BIP 112 with seq=9 txs
success_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_9_v2))
success_txs.extend(all_rlt_txs(bip112txs_vary_OP_CSV_9_v2))
self.send_blocks([self.create_test_block(success_txs)])
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
# 1 more version 4 block to get us to height 432 so the fork should now be active for the next block
assert not softfork_active(self.nodes[0], 'csv')
test_blocks = self.generate_blocks(1)
self.send_blocks(test_blocks)
assert softfork_active(self.nodes[0], 'csv')
self.log.info("Post-Soft Fork Tests.")
self.log.info("BIP 113 tests")
# BIP 113 tests should now fail regardless of version number if nLockTime isn't satisfied by new rules
bip113tx_v1.nLockTime = self.last_block_time - 600 * 5 # = MTP of prior block (not <) but < time put on current block
bip113signed1 = sign_transaction(self.nodes[0], bip113tx_v1)
bip113tx_v2.nLockTime = self.last_block_time - 600 * 5 # = MTP of prior block (not <) but < time put on current block
bip113signed2 = sign_transaction(self.nodes[0], bip113tx_v2)
for bip113tx in [bip113signed1, bip113signed2]:
self.send_blocks([self.create_test_block([bip113tx])], success=False, reject_reason='bad-txns-nonfinal')
# BIP 113 tests should now pass if the locktime is < MTP
bip113tx_v1.nLockTime = self.last_block_time - 600 * 5 - 1 # < MTP of prior block
bip113signed1 = sign_transaction(self.nodes[0], bip113tx_v1)
bip113tx_v2.nLockTime = self.last_block_time - 600 * 5 - 1 # < MTP of prior block
bip113signed2 = sign_transaction(self.nodes[0], bip113tx_v2)
for bip113tx in [bip113signed1, bip113signed2]:
self.send_blocks([self.create_test_block([bip113tx])])
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
# Next block height = 437 after 4 blocks of random version
test_blocks = self.generate_blocks(4)
self.send_blocks(test_blocks)
self.log.info("BIP 68 tests")
self.log.info("Test version 1 txs - all should still pass")
success_txs = []
success_txs.extend(all_rlt_txs(bip68txs_v1))
self.send_blocks([self.create_test_block(success_txs)])
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
self.log.info("Test version 2 txs")
# All txs with SEQUENCE_LOCKTIME_DISABLE_FLAG set pass
bip68success_txs = [tx['tx'] for tx in bip68txs_v2 if tx['sdf']]
self.send_blocks([self.create_test_block(bip68success_txs)])
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
# All txs without flag fail as we are at delta height = 8 < 10 and delta time = 8 * 600 < 10 * 512
bip68timetxs = [tx['tx'] for tx in bip68txs_v2 if not tx['sdf'] and tx['stf']]
for tx in bip68timetxs:
self.send_blocks([self.create_test_block([tx])], success=False, reject_reason='bad-txns-nonfinal')
bip68heighttxs = [tx['tx'] for tx in bip68txs_v2 if not tx['sdf'] and not tx['stf']]
for tx in bip68heighttxs:
self.send_blocks([self.create_test_block([tx])], success=False, reject_reason='bad-txns-nonfinal')
# Advance one block to 438
test_blocks = self.generate_blocks(1)
self.send_blocks(test_blocks)
# Height txs should fail and time txs should now pass 9 * 600 > 10 * 512
bip68success_txs.extend(bip68timetxs)
self.send_blocks([self.create_test_block(bip68success_txs)])
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
for tx in bip68heighttxs:
self.send_blocks([self.create_test_block([tx])], success=False, reject_reason='bad-txns-nonfinal')
# Advance one block to 439
test_blocks = self.generate_blocks(1)
self.send_blocks(test_blocks)
# All BIP 68 txs should pass
bip68success_txs.extend(bip68heighttxs)
self.send_blocks([self.create_test_block(bip68success_txs)])
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
self.log.info("BIP 112 tests")
self.log.info("Test version 1 txs")
# -1 OP_CSV tx and (empty stack) OP_CSV tx should fail
self.send_blocks([self.create_test_block([bip112tx_special_v1])], success=False,
reject_reason='non-mandatory-script-verify-flag (Negative locktime)')
self.send_blocks([self.create_test_block([bip112tx_emptystack_v1])], success=False,
reject_reason='non-mandatory-script-verify-flag (Operation not valid with the current stack size)')
# If SEQUENCE_LOCKTIME_DISABLE_FLAG is set in argument to OP_CSV, version 1 txs should still pass
success_txs = [tx['tx'] for tx in bip112txs_vary_OP_CSV_v1 if tx['sdf']]
success_txs += [tx['tx'] for tx in bip112txs_vary_OP_CSV_9_v1 if tx['sdf']]
self.send_blocks([self.create_test_block(success_txs)])
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
# If SEQUENCE_LOCKTIME_DISABLE_FLAG is unset in argument to OP_CSV, version 1 txs should now fail
fail_txs = all_rlt_txs(bip112txs_vary_nSequence_v1)
fail_txs += all_rlt_txs(bip112txs_vary_nSequence_9_v1)
fail_txs += [tx['tx'] for tx in bip112txs_vary_OP_CSV_v1 if not tx['sdf']]
fail_txs += [tx['tx'] for tx in bip112txs_vary_OP_CSV_9_v1 if not tx['sdf']]
for tx in fail_txs:
self.send_blocks([self.create_test_block([tx])], success=False,
reject_reason='non-mandatory-script-verify-flag (Locktime requirement not satisfied)')
self.log.info("Test version 2 txs")
# -1 OP_CSV tx and (empty stack) OP_CSV tx should fail
self.send_blocks([self.create_test_block([bip112tx_special_v2])], success=False,
reject_reason='non-mandatory-script-verify-flag (Negative locktime)')
self.send_blocks([self.create_test_block([bip112tx_emptystack_v2])], success=False,
reject_reason='non-mandatory-script-verify-flag (Operation not valid with the current stack size)')
# If SEQUENCE_LOCKTIME_DISABLE_FLAG is set in argument to OP_CSV, version 2 txs should pass (all sequence locks are met)
success_txs = [tx['tx'] for tx in bip112txs_vary_OP_CSV_v2 if tx['sdf']]
success_txs += [tx['tx'] for tx in bip112txs_vary_OP_CSV_9_v2 if tx['sdf']]
self.send_blocks([self.create_test_block(success_txs)])
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
# SEQUENCE_LOCKTIME_DISABLE_FLAG is unset in argument to OP_CSV for all remaining txs ##
# All txs with nSequence 9 should fail either due to earlier mismatch or failing the CSV check
fail_txs = all_rlt_txs(bip112txs_vary_nSequence_9_v2)
fail_txs += [tx['tx'] for tx in bip112txs_vary_OP_CSV_9_v2 if not tx['sdf']]
for tx in fail_txs:
self.send_blocks([self.create_test_block([tx])], success=False,
reject_reason='non-mandatory-script-verify-flag (Locktime requirement not satisfied)')
# If SEQUENCE_LOCKTIME_DISABLE_FLAG is set in nSequence, tx should fail
fail_txs = [tx['tx'] for tx in bip112txs_vary_nSequence_v2 if tx['sdf']]
for tx in fail_txs:
self.send_blocks([self.create_test_block([tx])], success=False,
reject_reason='non-mandatory-script-verify-flag (Locktime requirement not satisfied)')
# If sequencelock types mismatch, tx should fail
fail_txs = [tx['tx'] for tx in bip112txs_vary_nSequence_v2 if not tx['sdf'] and tx['stf']]
fail_txs += [tx['tx'] for tx in bip112txs_vary_OP_CSV_v2 if not tx['sdf'] and tx['stf']]
for tx in fail_txs:
self.send_blocks([self.create_test_block([tx])], success=False,
reject_reason='non-mandatory-script-verify-flag (Locktime requirement not satisfied)')
# Remaining txs should pass, just test masking works properly
success_txs = [tx['tx'] for tx in bip112txs_vary_nSequence_v2 if not tx['sdf'] and not tx['stf']]
success_txs += [tx['tx'] for tx in bip112txs_vary_OP_CSV_v2 if not tx['sdf'] and not tx['stf']]
self.send_blocks([self.create_test_block(success_txs)])
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
# Additional test, of checking that comparison of two time types works properly
time_txs = []
for tx in [tx['tx'] for tx in bip112txs_vary_OP_CSV_v2 if not tx['sdf'] and tx['stf']]:
tx.vin[0].nSequence = BASE_RELATIVE_LOCKTIME | SEQ_TYPE_FLAG
signtx = sign_transaction(self.nodes[0], tx)
time_txs.append(signtx)
self.send_blocks([self.create_test_block(time_txs)])
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
if __name__ == '__main__':
BIP68_112_113Test().main()
| |
# Copyright 2014-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0
# See LICENSE file for details.
# standard library modules, , ,
import argparse
import logging
import os
# Component, , represents an installed component, internal
from yotta.lib import component
# access, , get components, internal
from yotta.lib import access
# access, , get components, internal
from yotta.lib import access_common
# folders, , get places to install things, internal
from yotta.lib import folders
# --config option, , , internal
from yotta import options
def addOptions(parser):
options.config.addTo(parser)
parser.add_argument('component', default=None, nargs='?',
help='If specified, install this module instead of installing '+
'the dependencies of the current module.'
)
parser.add_argument('--test-dependencies', dest='install_test_deps',
choices=('none', 'all', 'own'), default='own',
help='Control the installation of dependencies necessary for building tests.'
)
group = parser.add_mutually_exclusive_group()
group.add_argument('--global', '-g', dest='act_globally', default=False, action='store_true',
help='Install globally instead of in the current working directory.'
)
# Deprecated options, these now do nothing! --save behavior is the default,
# and --save-target has been removed.
group.add_argument('--save', dest='save', action='store_true',
default=False, help=argparse.SUPPRESS
)
group.add_argument('--save-target', dest='save_target',
action='store_true', default=False, help=argparse.SUPPRESS
)
def execCommand(args, following_args):
if not hasattr(args, 'install_test_deps'):
vars(args)['install_test_deps'] = 'none'
if getattr(args, 'save', None):
logging.warning('the --save option is now the default and is ignored. It will be removed soon.')
if getattr(args, 'save_target', None):
logging.warning('the --save-target is now ignored. It will be removed soon.')
cwd = os.getcwd()
c = component.Component(cwd)
if args.component is None:
return installDeps(args, c)
elif c or c.exists():
return installComponentAsDependency(args, c)
else:
return installComponent(args)
def checkPrintStatus(errors, components, top_component, target):
status = 0
for error in errors:
logging.error(error)
status = 1
for c in list(components.values()) + [top_component]:
if c and c.getError():
logging.error('%s %s', c.getName(), c.getError())
status = 1
leaf_target = None
if target and target.hierarchy:
for t in target.hierarchy:
if not leaf_target:
leaf_target = t
if t and t.getError():
if t is leaf_target:
logging.error('target %s %s', t.getName(), t.getError())
else:
logging.error('base target %s of %s %s', t.getName(), leaf_target.getName(), t.getError())
status = 1
return status
def installDeps(args, current_component):
# settings, , load and save settings, internal
from yotta.lib import settings
logging.debug('install deps for %s' % current_component)
if not current_component:
logging.debug(str(current_component.getError()))
logging.error('The current directory does not contain a valid module.')
return 1
# warn if the target hasn't been explicitly specified when running a build:
# this is likely user-error
if not settings.getProperty('build', 'targetSetExplicitly') and not \
getattr(args, '_target_set_explicitly', False):
logging.warning(
'The build target has not been set, so the default (%s) is being ' +
'used. You can use `yotta target <targetname>` to set the build ' +
'target. See http://yottadocs.mbed.com/tutorial/targets.html for '
'more information on using targets.',
args.target
)
target, errors = current_component.satisfyTarget(args.target, additional_config=args.config)
if errors:
for error in errors:
logging.error(error)
return 1
if args.act_globally:
# the npm behaviour here would be to install the working directory
# module into the global modules dir
raise NotImplementedError()
else:
# satisfyDependenciesRecursive will always prefer to install
# dependencies in the yotta_modules directory of the top-level module,
# so it's safe to set traverse_links here when we're only *installing*
# modules (not updating them). This will never result in
# Spooky-Action-Through-A-Symlink.
components, errors = current_component.satisfyDependenciesRecursive(
target = target,
traverse_links = True,
available_components = [(current_component.getName(), current_component)],
test = {'own':'toplevel', 'all':True, 'none':False}[args.install_test_deps]
)
return checkPrintStatus(errors, components, current_component, target)
def installComponentAsDependency(args, current_component):
logging.debug('install component %s as dependency of %s' % (args.component, current_component))
if not current_component:
logging.debug(str(current_component.getError()))
logging.error('The current directory does not contain a valid module.')
return -1
target, errors = current_component.satisfyTarget(args.target, additional_config=args.config)
if errors:
for error in errors:
logging.error(error)
return 1
modules_dir = current_component.modulesPath()
from yotta.lib import sourceparse
# check if we have both a name and specification
component_name, component_spec = sourceparse.parseModuleNameAndSpec(args.component)
logging.info('%s, %s', component_name, component_spec)
if component_name == current_component.getName():
logging.error('will not install module %s as a dependency of itself', component_name)
return -1
try:
installed = access.satisfyVersion(
component_name,
component_spec,
available = {current_component.getName():current_component},
search_paths = [modules_dir],
working_directory = modules_dir
)
except access_common.AccessException as e:
logging.error(e)
return 1
# We always add the component to the dependencies of the current component
# (if it is not already present), and write that back to disk. Without
# writing to disk the dependency wouldn't be usable.
if installed and not current_component.hasDependency(component_name):
saved_spec = current_component.saveDependency(installed)
current_component.writeDescription()
logging.info('dependency %s: %s written to module.json', component_name, saved_spec)
else:
logging.info('dependency %s is already present in module.json', component_name)
# !!! should only install dependencies necessary for the one thing that
# we're installing (but existing components should be made available to
# satisfy dependencies)
components, errors = current_component.satisfyDependenciesRecursive(
target = target,
available_components = [(current_component.getName(), current_component)],
test = {'own':'toplevel', 'all':True, 'none':False}[args.install_test_deps]
)
return checkPrintStatus(errors, components, current_component, target)
def installComponent(args):
path = folders.globalInstallDirectory() if args.act_globally else os.getcwd()
logging.debug('install component %s to %s' % (args.component, path))
from yotta.lib import sourceparse
# check if we have both a name and specification
component_name, component_spec = sourceparse.parseModuleNameAndSpec(args.component)
try:
access.satisfyVersion(
component_name,
component_spec,
available = dict(),
search_paths = [path],
working_directory = path
)
except access_common.AccessException as e:
logging.error('%s', e)
return 1
os.chdir(component_name)
return installDeps(args, component.Component(os.getcwd()))
| |
#!/usr/bin/env python
""" compile.py
Build the pyjamas-showcase web application.
We firstly scan through all the demo modules, pulling out the module
docstrings and the source code, and adding this information to the
demoInfo.py module. We then call Pyjamas to compile the application, and
finally open a web browser window to show the compiled application.
"""
import cStringIO
import os
import os.path
import sys
import webbrowser
import pyColourize
#############################################################################
# Modify the following to refer to the full path to your Pyjamas installation,
# relative to the pyjamas-showcase source directory:
here = os.path.dirname(os.path.abspath(__file__))
PATH_TO_PYJAMAS = os.path.dirname(os.path.dirname(here))
#############################################################################
def main():
""" Our main program.
"""
# Extract the source code to each demo file, storing the results into the
# demoInfo.py file.
# Load all our demonstrations into memory.
demoInfo = {}
for section in ["widgets", "panels"]:
for fName in os.listdir(os.path.join("src", "demos_" + section)):
if fName.startswith(".") or not fName.endswith(".py"):
continue
f = file(os.path.join("src", "demos_" + section, fName), "r")
src = f.read()
f.close()
demoName = fName[:-3]
docstring,htmlSrc = parseDemo(fName, src)
demoInfo[demoName] = {'section' : section,
'doc' : docstring,
'src' : src,
'htmlSrc' : htmlSrc}
# Calculate the list of imports to use for the combined module.
imports = set()
for demo in demoInfo.values():
imports.update(extractImports(demo['src']))
# Write the combined demos into a single file.
s = []
s.append('""" demoInfo.py')
s.append('')
s.append(' DO NOT EDIT THE CONTENTS OF THIS FILE!')
s.append('')
s.append(' This file is created automatically by the compile.py')
s.append(' script out of the various demonstration modules.')
s.append('"""')
s.append('')
for imp in imports:
s.append(imp + "")
s.append('')
s.append('')
for demo in demoInfo.keys():
s.append(removeDocstringAndImports(demoInfo[demo]['src']))
s.append('')
s.append('')
s.append('def getDemos():')
s.append(' demos = []')
sortKeys = []
for name in demoInfo.keys():
sortKeys.append((demoInfo[name]['section'], name))
sortKeys.sort()
for section,name in sortKeys:
demo = demoInfo[name]
capName = name[0].upper() + name[1:]
s.append(' demos.append({"name" : "' + name + '",')
s.append(' "title" : "ui.' + capName + '",')
s.append(' "section" : "' + demo['section'] + '",')
s.append(' "doc" : """' + demo['doc'] + '""",')
s.append(' "src" : """' + demo['htmlSrc'] + '""",')
s.append(' "example" : ' + capName + 'Demo()})')
s.append('')
s.append(' return demos')
s.append('')
f = file(os.path.join("src", "demoInfo.py"), "w")
f.write("\n".join(s))
f.close()
options = " ".join(sys.argv[1:])
# Compile the application using Pyjamas.
stmt = (os.path.join(PATH_TO_PYJAMAS, 'bin', 'pyjsbuild') +
" " + options +
" -o " + os.path.join(here,'build') + " " +
" -I " + os.path.join(here, 'src') + " " +
'Showcase' +
" > /dev/null")
if os.system(stmt) != 0: return
# Finally, launch a web browser to show the compiled application.
#webbrowser.open("file://" + os.path.abspath("build/Showcase.html"))
#############################################################################
def parseDemo(fName, src):
""" Parse the given demonstration file.
'fName' is the name of the demonstration module, and 'src' is a copy of
the module's contents.
We return a (docstring, src) tuple, containing the documentation string
and source code for the given demonstration module.
"""
if src.startswith('"""'):
# We have a docstring. Search for the ending docstring, and pull the
# remaining source code out.
i = src.find('"""', 3)
if i == -1:
docstring = src[3:]
src = ""
else:
docstring = src[3:i].lstrip()
src = src[i+3:].lstrip()
else:
# We have no docstring at all.
docstring = ""
# Tidy up the paragraphs in the docstring. We do the following:
#
# * If a paragraph starts with " * ", treat it as an unordered list item.
#
# * If all the lines in a paragraph start with " ", treat it as an
# indented code block.
#
# * Otherwise, join the lines in the paragraph together to avoid line
# break issues.
paragraphs = docstring.split("\n\n")
for i in range(len(paragraphs)):
indented = True
for line in paragraphs[i].split("\n"):
if not line.startswith(" "):
indented = False
if indented:
# Treat the paragraph as a code block.
paragraphs[i] = "<blockquote><pre>" + paragraphs[i] + \
"</pre></blockquote>"
else:
paragraphs[i] = paragraphs[i].replace("\n", " ")
if paragraphs[i].startswith(" * "):
paragraphs[i] = "<ul><li>" + paragraphs[i][3:] + "</li></ul>"
docstring = "\n\n".join(paragraphs)
# Colourize the source code.
buff = cStringIO.StringIO()
pyColourize.Parser(src, pyColourize._Eriks_Style,
fName, buff).format(None, None)
html = buff.getvalue()
# Replace any quotes in the source code with the equivalent HTML entity
# codes.
html = html.replace('"""', '"""')
# That's all, folks!
return docstring,html
#############################################################################
def extractImports(src):
""" Extract the set of import statements in the given source code.
We return a set object containing the import statements in the given
source code.
"""
imports = set()
for line in src.split("\n"):
if line.startswith("import"):
imports.add(line)
elif line.startswith("from "):
i = line.find(" import ")
if i > -1:
fromModule = line[5:i]
modules = line[i+8:].split(",")
for module in modules:
imports.add("from "+fromModule+" import "+module.strip())
return imports
#############################################################################
def removeDocstringAndImports(src):
""" Remove the docstring and import statements from the given code.
We return the source code, minus the initial docstring (if any) and any
import statements.
"""
if src.startswith('"""'):
# We have a docstring. Search for the ending docstring, and pull the
# remaining source code out.
i = src.find('"""', 3)
if i == -1:
src = ""
else:
src = src[i+3:].lstrip()
dst = []
for line in src.split("\n"):
if not line.startswith("import") and not line.startswith("from"):
dst.append(line)
return "\n".join(dst)
#############################################################################
if __name__ == "__main__":
main()
| |
#!/usr/bin/env python3
import sys, getopt, time
import pyndn, ipaddress
import subprocess
import pyautogui
import logging
import pickle
from Crypto import Random
from Crypto.Cipher import AES
import hashlib
from datetime import datetime
def main(argv):
logging_filename = "ndnMouse-log.txt"
logging_level = logging.ERROR
default_address = "192.168.1.2"
def printUsage():
print("Usage: ndnMouse-client-udp.py [logging_level_flag]")
print("No flag specified: logging level defaults to errors only")
print(" -h help/print usage")
print(" -d logging level debug")
print(" -i logging level info")
print(" -n logging level none")
# Parse arguments
if len(sys.argv) != 0:
try:
opts, args = getopt.getopt(argv, "dhin")
except getopt.GetoptError:
printUsage()
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
printUsage()
sys.exit()
elif opt in ('-d'):
logging_level = logging.DEBUG
elif opt in ('-i'):
logging_level = logging.INFO
elif opt in ('-n'):
logging_level = logging.CRITICAL # critical level never used
# Set logging level based on flag provided (default to error only)
logging.basicConfig(filename=logging_filename, level=logging_level)
# Prompt user for server address (port is always 6363 for NFD)
server_address = getServerAddress(default_address)
# Prompt user for password
password = getPassword()
# Create a route from this PC's NFD to phone's NFD
if NFDIsRunning():
if not setupNFD(server_address):
print("Error: could not set up NFD route!\nExiting...")
logging.info("{0} Error: could not set up NFD route!\nExiting...".format(datetime.now()))
exit(1)
else:
print("Error: NFD is not running!\nRun \"nfd-start\" and try again.\nExiting...")
logging.info("{0} Error: NFD is not running!\nRun \"nfd-start\" and try again.\nExiting...".format(datetime.now()))
exit(1)
# Create server and run it
if not password:
server = ndnMouseClientNDN(server_address)
else:
server = ndnMouseClientNDNSecure(server_address, password)
try:
server.run()
except KeyboardInterrupt:
print("\nExiting...")
logging.info("{0} Exiting...".format(datetime.now()))
finally:
server.shutdown()
################################################################################
# Class ndnMouseClientNDN
################################################################################
class ndnMouseClientNDN():
# pyautogui variables
transition_time = 0
pyautogui.FAILSAFE = False
pyautogui.PAUSE = 0
# NDN variables
interest_timeout = 50
sleep_time = 0.020
def __init__(self, addr):
# Create face to work with NFD
self.face = pyndn.face.Face()
self.server_address = addr
def run(self):
print("Use ctrl+c quit at anytime....")
print("Routing /ndnmouse interests to Face udp://{0}.".format(self.server_address))
logging.info("{0} Use ctrl+c quit at anytime....".format(datetime.now()))
logging.info("{0} Routing /ndnmouse interests to Face udp://{1}.".format(datetime.now(), self.server_address))
# Make interest to get movement data
interest_move = pyndn.interest.Interest(pyndn.name.Name("/ndnmouse/update"))
interest_move.setInterestLifetimeMilliseconds(self.interest_timeout)
interest_move.setMustBeFresh(True)
# Send interests
self.face.expressInterest(interest_move, self._onData, self._onTimeout)
# Loop forever, processing data as it comes back
# Additional interests are sent by _onData and _onTimeout callbacks
while True:
self.face.processEvents()
time.sleep(self.sleep_time)
def shutdown(self):
self.face.shutdown()
############################################################################
# Interest Callbacks
############################################################################
# Callback for when data is returned for an interest
def _onData(self, interest, data):
msg = bytes(data.getContent().buf())
try:
self._handle(msg)
logging.info("{0} Got returned data from {1}: {2}".format(datetime.now(), data.getName().toUri(), msg))
except UnicodeDecodeError:
logging.error("{0} Failed to parse data. Password on server?".format(datetime.now()))
# Resend interest to get move/click data
self.face.expressInterest(interest, self._onData, self._onTimeout)
# Callback for when interest times out
def _onTimeout(self, interest):
# logging.info("{0} TIMEOUT: {1}".format(datetime.now(), interest.getName().toUri()))
# Resend interest to get move/click data
self.face.expressInterest(interest, self._onData, self._onTimeout)
############################################################################
# Handle Mouse Functions
############################################################################
# General handler
# Returns true if message could be handled, otherwise false
def _handle(self, msg):
if msg.startswith(b"M") or msg.startswith(b"A"):
self._handleMove(msg)
elif msg.startswith(b"S"):
self._handleScroll(msg)
elif msg.startswith(b"C"):
_, click, updown = msg.decode().split('_')
self._handleClick(click, updown)
elif msg.startswith(b"K"):
_, keypress, updown = msg.decode().split('_')
self._handleKeypress(keypress, updown)
elif msg.startswith(b"T"):
self._handleTypeMessage(msg)
elif msg.startswith(b"BEAT"):
pass # Ignore, out of order heartbeat response
else:
logging.error("{0} Bad command received. Password on server?".format(datetime.now()))
return False
return True
# Handle click commands
def _handleClick(self, click, updown):
if updown == "U": # Up
pyautogui.mouseUp(button=click)
elif updown == "D": # Down
pyautogui.mouseDown(button=click)
elif updown == "F": # Full
pyautogui.click(button=click)
else:
logging.error("{0} Invalid click type: {1} {2}".format(datetime.now(), click, updown))
# Handle keypress commands
def _handleKeypress(self, keypress, updown):
# Decompress certain longer commands
if keypress == "bspace":
keypress = "backspace"
if updown == "U": # UP
pyautogui.keyUp(keypress)
elif updown == "D": # DOWN
pyautogui.keyDown(keypress)
elif updown == "F": # FULL
pyautogui.press(keypress)
else:
logging.error("{0} Invalid keypress type: {1} {2}".format(datetime.now(), keypress, updown))
# Handle custom typed message
# Format of commands: T<msg-to-type> (msg-to-type can be up to 10B)
# b"Thello" (type "hello" on client)
def _handleTypeMessage(self, msg):
type_string = msg.decode()[1:]
pyautogui.typewrite(type_string)
# Handle movement commands
# Format of commands: M<x-4B><y-4B>
# b"A\x00\x00\x01\x90\x00\x00\x01\xf4" (move to absolute pixel coordinate x=400, y=500)
# b"M\xff\xff\xff\xb5\x00\x00\x00\x19" (move 75 left, 25 up relative to current pixel position)
def _handleMove(self, data):
move_type = data[:1]
x = intFromBytes(data[1:5])
y = intFromBytes(data[5:9])
# Move mouse according to move_type (relative or absolute)
if (move_type == b"M"):
pyautogui.moveRel(x, y, self.transition_time)
elif (move_type == b"A"):
pyautogui.moveTo(x, y, self.transition_time)
# Handle two-finger scroll commands
# Format of commands: S<x-4B><y-4B>
# b"S\xff\xff\xff\xb5\x00\x00\x00\x19" (scroll 75 right, 25 up)
def _handleScroll(self, data):
move_type = data[:1]
x = intFromBytes(data[1:5])
y = intFromBytes(data[5:9])
# Prevent bug with pyautogui library where x < 10 causes opposite horizontal scrolling behavior
# https://github.com/asweigart/pyautogui/issues/154
if not (-9 <= x and x <= -1):
pyautogui.hscroll(x)
if y:
pyautogui.vscroll(y)
################################################################################
# Class ndnMouseClientNDNSecure
################################################################################
# Packet description
# 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2
# -----------------------------------------------------------------
# | IV | Seq | Message (PKCS5 pad) |
# -----------------------------------------------------------------
# <~~~~~~~~~ plaintext ~~~~~~~~~~~><~~~~~~~~~ ciphertext ~~~~~~~~~>
class ndnMouseClientNDNSecure(ndnMouseClientNDN):
# Constants
seq_num_bytes = 4
iv_bytes = 16
salt_bytes = 16
key_bytes = 16
aes_block_size = 16
packet_bytes = 48
max_bad_responses = 5
max_seq_num = 2147483647
def __init__(self, addr, password):
super().__init__(addr)
self.password = password
self.rndfile = Random.new()
self.seq_num = 0
self.bad_response_count = 0
self.pending_sync = False
def run(self):
print("Use ctrl+c quit at anytime....")
print("Routing /ndnmouse interests to Face udp://{0}.".format(self.server_address))
logging.info("{0} Use ctrl+c quit at anytime....".format(datetime.now()))
logging.info("{0} Routing /ndnmouse interests to Face udp://{1}.".format(datetime.now(), self.server_address))
# Request password salt from producer
self._requestSalt()
# Wait for salt data to come back
while not self.salt_received:
self.face.processEvents()
time.sleep(self.sleep_time)
# Make interest to get movement data
interest_move = pyndn.interest.Interest(pyndn.name.Name("/ndnmouse/update"))
interest_move.setInterestLifetimeMilliseconds(self.interest_timeout)
interest_move.setMustBeFresh(True)
# Send move and click interests
self.face.expressInterest(interest_move, self._onData, self._onTimeout)
# Loop forever, processing data as it comes back
# Additional interests are sent by _onData and _onTimeout callbacks
while True:
self.face.processEvents()
time.sleep(self.sleep_time)
def shutdown(self):
self.face.shutdown()
############################################################################
# Interest Callbacks and Helpers
############################################################################
# Callback when data is returned for a general mouse command interest
def _onData(self, interest, data):
data_bytes = bytes(data.getContent().buf())
server_iv = data_bytes[:self.iv_bytes]
encrypted = data_bytes[self.iv_bytes:]
try:
decrypted = self._decryptData(encrypted, server_iv)
server_seq_num = intFromBytes(decrypted[:self.seq_num_bytes])
# If decrypted response has a valid seq num...
if server_seq_num > self.seq_num or self.seq_num == self.max_seq_num:
msg = decrypted[self.seq_num_bytes:]
# Only update seq num if we handled the command, also reset bad response count
if self._handle(msg):
self.seq_num = server_seq_num
self.bad_response_count = 0
logging.debug("{0} Got returned data from {1}: {2}".format(datetime.now(), data.getName().toUri(), msg))
else:
logging.error("{0} Bad sequence number received!".format(datetime.now()))
self.bad_response_count += 1
if self.bad_response_count > self.max_bad_responses:
# Send special interest to update server's seq num
self._syncWithServer()
except (UnicodeDecodeError, ValueError):
logging.error("{0} Failed to decrypt data. Wrong password?".format(datetime.now()))
self.bad_response_count += 1
if self.bad_response_count > self.max_bad_responses:
self._syncWithServer()
# Resend interest to get move/click data
self.face.expressInterest(interest, self._onData, self._onTimeout)
# Callback when timeout for a general mouse command interest
def _onTimeout(self, interest):
# logging.info("{0} TIMEOUT: {1}".format(datetime.now(), interest.getName().toUri()))
# Resend interest to get move/click data
self.face.expressInterest(interest, self._onData, self._onTimeout)
# Send a salt request interest
def _requestSalt(self):
logging.info("{0} Sending salt request interest: /ndnmmouse/salt".format(datetime.now()))
self.salt_received = False
interest_salt = pyndn.interest.Interest(pyndn.name.Name("/ndnmouse/salt"))
interest_salt.setInterestLifetimeMilliseconds(self.interest_timeout)
interest_salt.setMustBeFresh(True)
self.face.expressInterest(interest_salt, self._onSaltData, self._onSaltTimeout)
# Callback when data is returned for getting password salt from producer
def _onSaltData(self, interest, data):
# Validate salt is correct length
salt = bytes(data.getContent().buf())
logging.info(str(datetime.now()).encode() + b" Received salt data: " + salt)
if len(salt) == self.salt_bytes:
# Get key from password and salt
self.key = self._getKeyFromPassword(self.password, salt)
self.salt_received = True
else:
# Otherwise try requesting salt again
self.face.expressInterest(interest, self._onSaltData, self._onSaltTimeout)
# Callback when timeout for getting password salt from producer
def _onSaltTimeout(self, interest):
# logging.info("{0} TIMEOUT: /ndnmouse/salt".format(datetime.now()))
# Just resend interest
self.face.expressInterest(interest, self._onSaltData, self._onSaltTimeout)
# Send a special interest to update the synchronize the server's seq num with consumer
def _setServerSeqNum(self):
# If seq num passed INT_MAX, then reset to 0
if self.seq_num >= self.max_seq_num:
self.seq_num = 0
# Make interest name
iv = self._getNewIV()
msg = intToBytes(self.seq_num) + b"SEQ"
encrypted_seq_num = self._encryptData(msg, iv)
interest_name = pyndn.name.Name("/ndnmouse/seq/")
interest_name.append(pyndn.name.Name.Component(iv + encrypted_seq_num))
# Make interest and send out face
interest_update_seq = pyndn.interest.Interest(interest_name)
interest_update_seq.setInterestLifetimeMilliseconds(self.interest_timeout)
interest_update_seq.setMustBeFresh(True)
logging.debug("{0} Sending set seq num interest: ".format(datetime.now()) + interest_name.toUri())
self.face.expressInterest(interest_update_seq, self._onUpdateSeqData, self._onUpdateSeqTimeout)
# Callback when data is returned for an update seq num interest
def _onUpdateSeqData(self, interest, data):
data_bytes = bytes(data.getContent().buf())
server_iv = data_bytes[:self.iv_bytes]
encrypted = data_bytes[self.iv_bytes:]
decrypted = self._decryptData(encrypted, server_iv)
server_seq_num = intFromBytes(decrypted[:self.seq_num_bytes])
# If decrypted response has a valid seq num...
if server_seq_num > self.seq_num or self.seq_num == self.max_seq_num:
try:
msg = decrypted[self.seq_num_bytes:]
# Good response received, no additional update seq interests needed
if msg.startswith(b"SEQ-ACK"):
self.seq_num = server_seq_num
return
except UnicodeDecodeError:
logging.error("{0} Failed to decrypt data. Wrong password?".format(datetime.now()))
self.bad_response_count += 1
if self.bad_response_count > self.max_bad_responses:
self._syncWithServer()
else:
logging.error("{0} Bad sequence number received!".format(datetime.now()))
# Resend update seq interest, because we didn't get proper response back
self.face.expressInterest(interest, self._onUpdateSeqData, self._onUpdateSeqTimeout)
# Callback when timeout for an update seq num interest
def _onUpdateSeqTimeout(self, interest):
# logging.info("{0} TIMEOUT: /ndnmouse/seq".format(datetime.now()))
# Resend interest to try to synchronize seq nums again
self.face.expressInterest(interest, self._onUpdateSeqData, self._onUpdateSeqTimeout)
# If consumer gets bad responses, sync with server by re-getting password
# salt and setting the server's seq num
def _syncWithServer(self):
# Don't try to sync if sync is already pending
if self.pending_sync:
return
self.pending_sync = True
logging.info("{0} Attempting to synchronize with server".format(datetime.now()))
# Get password salt
self._requestSalt()
# Wait for salt data to return
while not self.salt_received:
self.face.processEvents()
time.sleep(self.sleep_time)
# Set server's seq num
self._setServerSeqNum()
# Reset bad response count and sync is complete
self.bad_response_count = 0
self.pending_sync = False
# General handler
# Returns true if message could be handled, otherwise false
def _handle(self, msg):
if msg.startswith(b"M") or msg.startswith(b"A"):
self._handleMove(msg)
elif msg.startswith(b"S"):
self._handleScroll(msg)
elif msg.startswith(b"C"):
_, click, updown = msg.decode().split('_')
self._handleClick(click, updown)
elif msg.startswith(b"K"):
_, keypress, updown = msg.decode().split('_')
self._handleKeypress(keypress, updown)
elif msg.startswith(b"T"):
self._handleTypeMessage(msg)
elif msg.startswith(b"BEAT"):
pass # Ignore, out of order heartbeat response
else:
logging.error("{0} Bad response data received. Wrong password?".format(datetime.now()))
self.bad_response_count += 1
if self.bad_response_count > self.max_bad_responses:
self._syncWithServer()
return False
return True
############################################################################
# Encryption Helpers
############################################################################
# Encrypt data, message and iv are byte strings
def _encryptData(self, message, iv):
logging.info(str(datetime.now()).encode() + b" Data SENT: " + message)
cipher = AES.new(self.key, AES.MODE_CBC, iv)
message = self._PKCS5Pad(message)
encrypted = cipher.encrypt(message)
logging.debug(str(datetime.now()).encode() + b" Encrypting data SENT: " + encrypted)
return encrypted
# Decrypt data, encrypted and iv are byte strings
def _decryptData(self, encrypted, iv):
logging.debug(str(datetime.now()).encode() + b" Encrypted data RECEIVED: " + encrypted)
cipher = AES.new(self.key, AES.MODE_CBC, iv)
decrypted = self._PKCS5Unpad(cipher.decrypt(encrypted))
logging.info(str(datetime.now()).encode() + b" Data RECEIVED: " + decrypted)
return decrypted
# Get a new random initialization vector (IV), return byte string
def _getNewIV(self):
return self.rndfile.read(self.iv_bytes)
# Hash password and salt (if provided) into key
# password: string
# salt: byte string
def _getKeyFromPassword(self, password, salt=b""):
sha = hashlib.sha256()
sha.update(password.encode() + salt)
# Only take first 128 bits (16 B)
return sha.digest()[:self.key_bytes]
# PKCS5Padding padder, allows for longer than 16 byte pads by specifying maxPad
def _PKCS5Pad(self, s, maxPad=aes_block_size):
return s + (maxPad - len(s) % maxPad) * chr(maxPad - len(s) % maxPad).encode()
# PKCS5Padding unpadder
def _PKCS5Unpad(self, s):
return s[0:-ord(chr(s[-1]))]
################################################################################
# User Input Functions
################################################################################
# Prompt user for server address and port, and validate them
def getServerAddress(default_addr):
last_ip_addr = "ndnMouse-temp.pkl"
# Try to get pickle of last IP address
try:
with open(last_ip_addr, 'rb') as fp:
last_addr = pickle.load(fp)
except IOError:
last_addr = default_addr
addr = pyautogui.prompt(text="Enter server IP address", title="Server Address", default=last_addr)
# Validate address
try:
ipaddress.ip_address(addr)
except ValueError:
pyautogui.alert(text="Address \"{0}\" is not valid!".format(addr), title="Invalid Address", button='Exit')
sys.exit(1)
# Save the last used IP address to pickle file
with open(last_ip_addr, 'wb') as fp:
pickle.dump(addr, fp)
return addr
# Prompt user for password, and validate it
def getPassword():
password = pyautogui.password(text="Enter the server's password (optional)", title="Password", mask='*')
# if not password:
# pyautogui.alert(text="Password should not be empty!", title="Invalid Password", button='Exit')
# sys.exit(1)
return password
################################################################################
# NFD Functions
################################################################################
# Checks if NFD is running
def NFDIsRunning():
process = subprocess.Popen(["nfd-status"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = process.communicate()
if err.startswith(b"error"):
return False
else:
return True
# Setup NFD's route to the phone server's NFD
def setupNFD(addr):
process = subprocess.Popen(["nfdc", "register", "/ndnmouse", "udp://{0}".format(addr)], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = process.communicate()
if out.startswith(b"Successful"):
return True
else:
return False
################################################################################
# Helper Functions
################################################################################
# Takes signed integer and tranforms to byte string (truncating if necessary)
def intToBytes(x):
try:
return x.to_bytes(4, 'big', signed=True)
except OverflowError:
x %= 2147483648
return x.to_bytes(4, 'big', signed=True)
# Takes byte string and transforms to signed integer
def intFromBytes(xbytes):
return int.from_bytes(xbytes, 'big', signed=True)
# Strip off script name in arg list
if __name__ == "__main__":
main(sys.argv[1:])
| |
import base64, simplejson, time, datetime
json = simplejson
# Static text.
TEXT_NAME = 'TV-Headend Next Generation'
TEXT_TITLE = 'TV-Headend'
# Image resources.
ICON_DEFAULT = 'icon-default.png'
ART_DEFAULT = 'art-default.jpg'
ICON_ALLCHANS = R('icon-allchans.png')
ICON_BOUQUETS = R('icon-bouquets.png')
# Other definitions.
PLUGIN_PREFIX = '/video/tvheadend-ng'
debug = True
debug_epg = False
# MINIMUM API version to run this channel.
req_api_version = 15
# themovieDB
BASE_URL = None
SIZES = None
debug_db = False
####################################################################################################
def Start():
ObjectContainer.art = R(ART_DEFAULT)
HTTP.CacheTime = 1
####################################################################################################
@handler(PLUGIN_PREFIX, TEXT_TITLE, ICON_DEFAULT, ART_DEFAULT)
def MainMenu():
oc = ObjectContainer(no_cache=True)
result = checkConfig()
if result['status'] == True:
if debug == True: Log("Configuration OK!")
oc.title1 = TEXT_TITLE
oc.header = None
oc.message = None
oc = ObjectContainer(title1=TEXT_TITLE, no_cache=True)
if Prefs['tvheadend_allchans'] != False:
oc.add(DirectoryObject(key=Callback(getChannels, title=L('allchans')), title=L('allchans'), thumb=ICON_ALLCHANS))
if Prefs['tvheadend_tagchans'] != False:
oc.add(DirectoryObject(key=Callback(getChannelsByTag, title=L('tagchans')), title=L('tagchans'), thumb=ICON_BOUQUETS))
if Prefs['tvheadend_recordings'] != False:
oc.add(DirectoryObject(key=Callback(getRecordings, title=L('recordings')), title=L('recordings'), thumb=ICON_BOUQUETS))
oc.add(PrefsObject(title=L('preferences')))
else:
if debug == True: Log("Configuration error! Displaying error message: " + result['message'])
oc.title1 = None
oc.header = L('header_attention')
oc.message = result['message']
oc.add(PrefsObject(title=L('preferences')))
return oc
####################################################################################################
@route(PLUGIN_PREFIX + '/ValidatePrefs')
def ValidatePrefs():
return True
@route(PLUGIN_PREFIX + '/checkConfig')
def checkConfig():
global req_api_version
result = {
'status':False,
'message':''
}
if Prefs['tvheadend_user'] != "" and Prefs['tvheadend_pass'] != "" and Prefs['tvheadend_host'] != "" and Prefs['tvheadend_web_port'] != "" and Prefs['tvheadend_user'] != None and Prefs['tvheadend_pass'] != None and Prefs['tvheadend_host'] != None and Prefs['tvheadend_web_port'] != None:
# To validate the tvheadend connection and api version.
json_data = getTVHeadendJson('getServerVersion', '')
if json_data != False:
# if debug == True: Log("Server running API version: " + json_data['api_version'])
if json_data['api_version'] >= req_api_version:
result['status'] = True
result['message'] = ''
return result
else:
result['status'] = False
result['message'] = L('error_api_version')
return result
else:
result['status'] = False
result['message'] = L('error_connection')
return result
else:
if Prefs['tvheadend_user'] == "" or Prefs['tvheadend_pass'] == "" or Prefs['tvheadend_user'] == None or Prefs['tvheadend_pass'] == None:
result['status'] = False
result['message'] = L('error_no_anonymous')
return result
else:
result['status'] = False
result['message'] = L('error_connection')
return result
@route(PLUGIN_PREFIX + '/getTVHeadendJson')
def getTVHeadendJson(apirequest, arg1):
if debug == True: Log("JSON-Request: " + apirequest)
api = dict(
getChannelGrid='api/channel/grid?start=0&limit=999999',
getEpgGrid='api/epg/events/grid?start=0&limit=2000',
getIdNode='api/idnode/load?uuid=' + arg1,
getServiceGrid='api/mpegts/service/grid?start=0&limit=999999',
getMuxGrid='api/mpegts/mux/grid?start=0&limit=999999',
getChannelTags='api/channeltag/grid?start=0&limit=999999',
getServerVersion='api/serverinfo',
getRecordings='api/dvr/entry/grid_finished'
)
try:
url = 'http://%s:%s%s%s' % (Prefs['tvheadend_host'], Prefs['tvheadend_web_port'], Prefs['tvheadend_web_rootpath'], api[apirequest])
authstring = base64.encodestring('%s:%s' % (Prefs['tvheadend_user'], Prefs['tvheadend_pass'])).replace('\n', '')
headers = dict()
headers['Authorization'] = "Basic %s" % (authstring)
json_data = ""
try:
json_data = JSON.ObjectFromURL(encoding='utf-8', url=url, headers=headers, values=None)
except:
try:
json_data = JSON.ObjectFromURL(url=url, headers=headers, values=None)
except:
raise Exception("JSON encoding error")
except Exception, e:
if debug == True: Log("JSON-Request failed: " + str(e))
return False
if debug == True: Log("JSON-Request successfull!")
return json_data
####################################################################################################
@route(PLUGIN_PREFIX + '/getEPG')
def getEPG():
json_data = getTVHeadendJson('getEpgGrid','')
if json_data != False:
if debug_epg == True: Log("Got EPG: " + json.dumps(json_data))
else:
if debug_epg == True: Log("Failed to fetch EPG!")
return json_data
@route(PLUGIN_PREFIX + '/getServices')
def getServices():
json_data = getTVHeadendJson('getServiceGrid','')
if json_data == False:
if debug == True: Log("Failed to fetch DVB services!")
return json_data
@route(PLUGIN_PREFIX + '/getChannelInfo')
def getChannelInfo(uuid, services, json_epg, json_services):
result = {
'service_encrypted':None,
'service_type':'',
'epg_title':'',
'epg_description':'',
'epg_duration':0,
'epg_start':0,
'epg_stop':0,
'epg_summary':'',
}
if debug == True: Log("Merging channel informations (EPG and services) for channel uuid: " + uuid)
if json_epg == False: Log("No EPG informations available for channel uuid: " + uuid)
# Get dvb informations.
for service in json_services['entries']:
if service['uuid'] == services[0]:
result['service_type'] = str(service['dvb_servicetype'])
result['service_encrypted'] = service['encrypted']
# Check if we have data within the json_epg object.
if json_epg != False and json_epg.get('entries'):
if debug == True: Log("Looking up EPG informations for channel uuid: " + uuid)
for epg in json_epg['entries']:
if epg['channelUuid'] == uuid and time.time() > int(epg['start']) and time.time() < int(epg['stop']):
if debug == True: Log("Found EPG informations for channel uuid: " + uuid)
if epg.get('title'):
result['epg_title'] = epg['title']
if epg.get('description'):
result['epg_description'] = epg['description']
if epg.get('start'):
result['epg_start'] = time.strftime("%H:%M", time.localtime(int(epg['start'])));
if epg.get('stop'):
result['epg_stop'] = time.strftime("%H:%M", time.localtime(int(epg['stop'])));
if epg.get('start') and epg.get('stop'):
result['epg_duration'] = (epg.get('stop')-epg.get('start'))*1000;
return result
####################################################################################################
@route(PLUGIN_PREFIX + '/getChannelsByTag')
def getChannelsByTag(title):
json_data = getTVHeadendJson('getChannelTags', '')
tagList = ObjectContainer(no_cache=True)
if json_data != False:
tagList.title1 = L('tagchans')
tagList.header = None
tagList.message = None
for tag in sorted(json_data['entries'], key=lambda t: t['name']):
if tag['internal'] == False:
if debug == True: Log("Getting channellist for tag: " + tag['name'])
tagList.add(DirectoryObject(key=Callback(getChannels, title=tag['name'], tag=tag['uuid']), title=tag['name']))
else:
if debug == True: Log("Could not create tagelist! Showing error.")
tagList.title1 = None
tagList.header = L('error')
tagList.message = L('error_request_failed')
if debug == True: Log("Count of configured tags within TV-Headend: " + str(len(tagList)))
if ( len(tagList) == 0 ):
tagList.header = L('attention')
tagList.message = L('error_no_tags')
return tagList
#@route(PLUGIN_PREFIX + '/getChannels', tag=int, t=int)
def getChannels(title, tag=int(0)):
json_data = getTVHeadendJson('getChannelGrid', '')
json_epg = getEPG()
json_services = getServices()
channelList = ObjectContainer(no_cache=True)
if json_data != False and json_epg != False and json_services != False:
channelList.title1 = title
channelList.header = None
channelList.message = None
for channel in sorted(json_data['entries'], key=lambda t: float(t['number'])):
if tag > 0:
tags = channel['tags']
for tids in tags:
if (tag == tids):
if debug == True: Log("Got channel with tag: " + channel['name'])
chaninfo = getChannelInfo(channel['uuid'], channel['services'])
channelList.add(createTVChannelObject(
channel['name'],
channel['uuid'],
channel['icon_public_url'],
chaninfo['epg_start'],
chaninfo['epg_stop'],
chaninfo['epg_duration'],
chaninfo['epg_title'],
chaninfo['epg_description'],
chaninfo['service_type'],
))
else:
chaninfo = getChannelInfo(channel['uuid'], channel['services'], json_epg, json_services)
channelList.add(createTVChannelObject(channel['name'], channel['uuid'], channel['icon_public_url'], chaninfo))
else:
if debug == True: Log("Could not create channellist! Showing error.")
channelList.title1 = None;
channelList.header = L('error')
channelList.message = L('error_request_failed')
return channelList
@route(PLUGIN_PREFIX + '/getRecordings')
def getRecordings(title):
json_data = getTVHeadendJson('getRecordings', '')
recordingsList = ObjectContainer(no_cache=True)
if json_data != False:
recordingsList.title1 = L('recordings')
recordingsList.header = None
recordingsList.message = None
for recording in sorted(json_data['entries'], key=lambda t: t['title']):
if debug == True: Log("Got recording with title: " + str(recording['title']))
recordingsList.add(createRecordingObject(recording, Client.Product, Client.Platform))
else:
if debug == True: Log("Could not create recordings list! Showing error.")
recordingsList.title1 = None
recordingsList.header = L('error')
recordingsList.message = L('error_request_failed')
if debug == True: Log("Count of recordings within TV-Headend: " + str(len(recordingsList)))
if ( len(recordingsList) == 0 ):
recordingsList.header = L('attention')
recordingsList.message = L('error_no_recordings')
return recordingsList
####################################################################################################
@route(PLUGIN_PREFIX + '/PlayMedia')
def PlayMedia(url):
Log("Redirecting Client to " + str(url) )
return Redirect(url)
@route(PLUGIN_PREFIX + '/createMediaContainer')
def createMediaContainer(mctype, args):
mco = None
if debug == True: Log("Building VideoClip object")
if mctype == 'videoclip':
mco = VideoClipObject(
key = args['key'],
rating_key = args['rating_key'],
title = args['title'],
summary = args['summary'],
duration = args['duration'],
thumb = args['thumb'],
art = args['art'],
source_title = 'TVHeadend',
)
if debug == True: Log("Building AudioTrack object")
if mctype == 'audiotrack':
mco = TrackObject(
key = args['key'],
rating_key = args['rating_key'],
title = args['title'],
summary = args['summary'],
duration = args['duration'],
thumb = args['thumb'],
art = args['art'],
artist = args['artist'],
album = args['album'],
source_title = 'TVHeadend'
)
stream_defined = False
# Decide if we have to stream for native streaming devices or if we have to transcode the content.
if (Prefs['tvheadend_mpegts_passthrough'] == True) or (stream_defined == False and (args['cproduct'] == "Plex Home Theater" or args['cproduct'] == "PlexConnect")):
mco = addMediaObject(mco, args['url'] + '?profile=pass')
stream_defined = True
# Custom streaming profile for iOS.
if stream_defined == False and (Prefs['tvheadend_custprof_ios'] != None and args['cplatform'] == "iOS"):
mco = addMediaObject(mco, args['url'] + '?profile=' + Prefs['tvheadend_custprof_ios'])
stream_defined = True
# Custom streaming profile for Android.
if stream_defined == False and (Prefs['tvheadend_custprof_android'] != None and args['cplatform'] == "Android"):
mco = addMediaObject(mco, args['url'] + '?profile=' + Prefs['tvheadend_custprof_android'])
stream_defined = True
# Custom default streaming.
if stream_defined == False and (Prefs['tvheadend_custprof_default']):
mco = addMediaObject(mco, args['url'] + '?profile=' + Prefs['tvheadend_custprof_default'])
stream_defined = True
# Default streaming.
if stream_defined == False:
mco = addMediaObject(mco, args['url'])
stream_defined = True
# Log the product and platform which requested a stream.
if args['cproduct'] != None and args['cplatform'] != None:
if debug == True: Log("Created MediaObject for plex product: " + args['cproduct'] + " on " + args['cplatform'])
else:
if debug == True: Log("Created MediaObject for plex product: UNDEFINED")
return mco
@route(PLUGIN_PREFIX + '/addMediaObject')
def addMediaObject(mco, vurl):
media = MediaObject(
optimized_for_streaming = True,
#parts = [PartObject(key = vurl)],
parts = [PartObject(key = Callback(PlayMedia, url=vurl))],
#video_codec = VideoCodec.H264,
#audio_codec = AudioCodec.AAC,
)
mco.add(media)
if debug == True: Log("Creating MediaObject for streaming with URL: " + vurl)
return mco
@route(PLUGIN_PREFIX + '/createTVChannelObject')
def createTVChannelObject(chan_name, chan_id, chan_icon, epg_start, epg_stop, epg_duration, epg_title, epg_description, service_type, container = False):
if debug == True: Log("Creating TVChannelObject. Container: " + str(container))
summary = None
duration = None
# Handle channel icon.
icon = None
try:
if Prefs['tvheadend_channelicons'] == True and chan_icon.startswith('imagecache'):
icon = 'http://%s:%s@%s:%s%s%s' % (Prefs['tvheadend_user'], Prefs['tvheadend_pass'], Prefs['tvheadend_host'], Prefs['tvheadend_web_port'], Prefs['tvheadend_web_rootpath'], chan_icon)
except KeyError:
pass
#themovieDB
banner = None
if Prefs['tvheadend_use_themovieDB']:
tempArt = getArt(epg_title)
if tempArt['poster'] != '':
icon = tempArt['poster']
Log.Info("Setting icon to: " + str(icon))
if tempArt['banner'] != '':
banner = tempArt['banner']
Log.Info("Setting banner to: " + str(banner))
# Add epg data. Otherwise leave the fields blank by default.
if epg_title != "" and epg_start != 0 and epg_stop != 0 and epg_duration != 0:
if container == False:
chan_name = chan_name + " (" + epg_title + ") - (" + epg_start + " - " + epg_stop + ")"
summary = epg_title + "\n\n" + epg_description
if container == True:
summary = epg_title + "\n\n" + epg_description + "\n\n" + epg_start + " - " + epg_stop
duration = epg_duration
# Build streaming url.
url_structure = 'stream/channel'
url = 'http://%s:%s@%s:%s%s%s/%s' % (Prefs['tvheadend_user'], Prefs['tvheadend_pass'], Prefs['tvheadend_host'], Prefs['tvheadend_web_port'], Prefs['tvheadend_web_rootpath'], url_structure, chan_id)
# Create and return MediaContainer.
args = dict()
args['cproduct'] = Client.Product
args['cplatform'] = Client.Platform
args['url'] = url
if service_type != '2':
if debug == True: Log("Creating media object with type: VIDEO")
args['key'] = Callback(
createTVChannelObject,
chan_name = chan_name,
chan_id = chan_id,
chan_icon = chan_icon,
epg_stop = epg_stop,
epg_duration = epg_duration,
epg_title = epg_title,
epg_description = epg_description,
service_type = service_type,
container = True
)
args['rating_key'] = chan_id
args['title'] = chan_name
args['summary'] = summary
args['duration'] = duration
args['thumb'] = icon
args['art'] = banner
args['epg_title'] = epg_title
mco = createMediaContainer('videoclip', args)
else:
if debug == True: Log("Creating media object with type: AUDIO")
args['key'] = Callback(
createTVChannelObject,
chan_name = chan_name,
chan_id = chan_id,
chan_icon = chan_icon,
epg_stop = epg_stop,
epg_duration = epg_duration,
epg_title = epg_title,
epg_description = epg_description,
service_type = service_type,
container = True
)
args['rating_key'] = chan_id
args['title'] = chan_name
args['summary'] = summary
args['duration'] = duration
args['thumb'] = icon
args['art'] = banner
args['artist'] = ' '
args['album'] = epg_title
mco = createMediaContainer('audiotrack', args)
if container:
return ObjectContainer(objects = [mco])
return mco
def createRecordingObject(recording, cproduct, cplatform, container = False):
if debug == True: Log("Creating RecordingObject. Container: " + str(container))
name = recording['disp_title']
id = recording['uuid']
summary = None
duration = None
# Handle recording icon.
icon = None
if Prefs['tvheadend_channelicons'] == True and recording['channel_icon'].startswith('imagecache'):
icon = 'http://%s:%s@%s:%s%s%s' % (Prefs['tvheadend_user'], Prefs['tvheadend_pass'], Prefs['tvheadend_host'], Prefs['tvheadend_web_port'], Prefs['tvheadend_web_rootpath'], recording['channel_icon'])
#themovieDB
banner = None
if Prefs['tvheadend_use_themovieDB']:
tempArt = getArt(name)
if tempArt['poster'] != '':
icon = tempArt['poster']
Log.Info("Setting icon to: " + str(icon))
if tempArt['banner'] != '':
banner = tempArt['banner']
Log.Info("Setting banner to: " + str(banner))
# Add recording informations. Otherwise leave the fields blank by default.
if debug == True: Log("Info for mediaobject: " + str(recording))
if recording['disp_title'] != "" and recording['start'] != 0 and recording['stop'] != 0:
start = datetime.datetime.fromtimestamp(recording['start']).strftime('%d-%m-%Y %H:%M')
stop = datetime.datetime.fromtimestamp(recording['stop']).strftime('%d-%m-%Y %H:%M')
duration = (recording['stop']-recording['start'])*1000
if container == False:
name = name + " (" + start + ")"
summary = recording['disp_subtitle']
if container == True:
summary = recording['disp_subtitle'] + "\n\n" + recording['disp_description'] + "\n\n" + start
# Build streaming url.
url_structure = 'dvrfile'
url = 'http://%s:%s@%s:%s%s%s/%s' % (Prefs['tvheadend_user'], Prefs['tvheadend_pass'], Prefs['tvheadend_host'], Prefs['tvheadend_web_port'], Prefs['tvheadend_web_rootpath'], url_structure, id)
# Create and return MediaContainer.
mco = None
args = dict()
args['cproduct'] = cproduct
args['cplatform'] = cplatform
args['url'] = url
if debug == True: Log("Creating media object with type: VIDEO")
args['key'] = Callback(createRecordingObject, recording = recording, cproduct = cproduct, cplatform = cplatform, container = True)
args['rating_key'] = id
args['title'] = name
args['summary'] = summary
args['duration'] = duration
args['thumb'] = icon
args['art'] = banner
mco = createMediaContainer('videoclip', args)
if container:
return ObjectContainer(objects = [mco])
else:
return mco
return mco
####################################################################################################
def getConfig():
global BASE_URL, SIZES
headers = {
'Accept': 'application/json',
}
URL = 'http://api.themoviedb.org/3/configuration?api_key=%s' % Prefs['tvheadend_themovieDB_key']
try:
config = JSON.ObjectFromURL( URL , headers=headers , values=None )
except:
Log.Warn("Error connecting to themovieDB API")
return
BASE_URL = config['images']['base_url']
SIZES = config['images']['poster_sizes']
return
def searchDB(query):
Log("Searching themovieDB for: " + str(query))
headers = {
'Accept': 'application/json'
}
URL = 'http://api.themoviedb.org/3/search/multi?api_key=%s&query=%s' % (Prefs['tvheadend_themovieDB_key'], String.Quote(query))
try:
return JSON.ObjectFromURL( URL , headers=headers , values=None )
except Exception, e:
Log("Error: failed to get results -> " + str(e))
return
def getArt(show):
global BASE_URL
poster = None
banner = None
API_RESULTS = searchDB(show)
if debug_db == True: print json.dumps(API_RESULTS, indent=4, separators=(',',': '))
if BASE_URL is None:
getConfig()
if API_RESULTS != None and int(API_RESULTS['total_results']) > 0 :
for result in API_RESULTS['results']:
try:
if result['name'] == show and ( result['poster_path'] != None or result['backdrop_path'] != None ):
poster = result['poster_path']
banner = result['backdrop_path']
Log.Debug("Found result on themovieDB: { name: " + str(result['name']) + ", poster: " + str(poster) + ", banner: " + str(banner) + " }")
break
except KeyError:
try:
if result['title'] == show and ( result['poster_path'] != 'null' or result['backdrop_path'] != 'null' ):
poster = result['poster_path']
banner = result['backdrop_path']
Log.Debug("Found result on themovieDB: { title: " + str(result['title']) + ", poster: " + str(poster) + ", banner: " + str(banner) + " }")
break
except:
pass
except:
pass
else:
Log.Info("No Results on themovieDB")
return { 'poster': '', 'banner': '' }
if poster != 'null' and poster != None:
poster_url = str(BASE_URL) + 'w342' + str(poster)
else:
poster_url = ''
if banner != 'null' and banner != None:
banner_url = str(BASE_URL) + 'original' + str(banner)
else:
banner_url = ''
return { 'poster': poster_url, 'banner': banner_url }
| |
# python imports
import math
# django imports
from django import forms
from django.db import models
# lfs imports
import lfs
from lfs.payment.settings import PM_ORDER_IMMEDIATELY # NOQA
from lfs.payment.settings import PM_ORDER_ACCEPTED # NOQA
from lfs.payment.settings import PM_MSG_TOP # NOQA
from lfs.payment.settings import PM_MSG_FORM # NOQA
from lfs.order.settings import PAID # NOQA
class OrderNumberGenerator(models.Model):
"""
Base class from which all order number generators should inherit.
**Attributes:**
id
The unique id of the order number generator.
"""
id = models.CharField(primary_key=True, max_length=20)
class Meta:
abstract = True
def init(self, request, order):
"""
Initializes the order number generator. This method is called
automatically from LFS.
"""
self.request = request
self.order = order
self.user = request.user
self.customer = lfs.customer.utils.get_customer(request)
self.cart = lfs.cart.utils.get_cart(request)
def get_next(self, formatted=True):
"""
Returns the next order number as string. Derived classes must implement
this method.
**Parameters:**
formatted
If True the number will be returned within the stored format, which
is based on Python default string formatting operators, e.g.
``%04d``.
"""
raise NotImplementedError
def exclude_form_fields(self):
"""
Returns a list of fields, which are excluded from the model form, see
also ``get_form``.
"""
return ("id", )
def get_form(self, **kwargs):
"""
Returns the form which is used within the shop preferences management
interface.
All parameters are passed to the form.
"""
class OrderNumberGeneratorForm(forms.ModelForm):
class Meta:
model = self
exclude = self.exclude_form_fields()
return OrderNumberGeneratorForm(**kwargs)
class PaymentMethodProcessor(object):
"""
Base class from which all 3rd-party payment method processors should inherit.
**Attributes:**
request
The current request.
cart
The current cart. This is only set, when create order time is ACCEPTED.
order
The current order. This is only set, when create order time is
IMMEDIATELY.
"""
def __init__(self, request, cart=None, order=None):
self.request = request
self.cart = cart
self.order = order
def process(self):
"""
Implements the processing of the payment method. Returns a dictionary
with several status codes, see below.
**Return Values:**
This values are returned within a dictionary.
accepted (mandatory)
Indicates whether the payment is accepted or not. if this is
``False`` the customer keeps on the checkout page and gets
``message`` (if given) below. If this is ``True`` the customer will
be redirected to next_url (if given).
message (optional)
This message is displayed on the checkout page, when the order is
not accepted.
message_location (optional)
The location, where the message is displayed.
next_url (optional)
The url to which the user is redirect after the payment has been
processed. if this is not given the customer is redirected to the
default thank-you page.
order_state (optional)
The state in which the order should be set. It's just PAID. If it's
not given the state keeps in SUBMITTED.
"""
raise NotImplementedError
def get_create_order_time(self):
"""
Returns the time when the order should be created. It is one of:
PM_ORDER_IMMEDIATELY
The order is created immediately before the payment is processed.
PM_ORDER_ACCEPTED
The order is created when the payment has been processed and
accepted.
"""
raise NotImplementedError
def get_pay_link(self):
"""
Returns a link to the payment service to pay the current order, which
is displayed on the thank-you page and the order confirmation mail. In
this way the customer can pay the order again if something has gone
wrong.
"""
return None
class PriceCalculator(object):
"""
This is the base class that pricing calculators must inherit from.
**Attributes:**
product
The product for which the price is calculated.
request
The current request.
"""
def __init__(self, request, product, **kwargs):
self.request = request
self.product = product
def get_effective_price(self, amount=1):
""" Effective price is used for sorting and filtering.
Usually it is same as value from get_price but in some cases it might differ (eg. if we add eco tax to
product price)
**Parameters:**
amount
The amount of products for which the price is calculated.
"""
return self.get_price(amount)
def get_price(self, with_properties=True, amount=1):
"""
Returns the stored price of the product without any tax calculations.
It takes variants, properties and sale prices into account, though.
**Parameters:**
with_properties
If the instance is a configurable product and with_properties is
True the prices of the default properties are added to the price.
amount
The amount of products for which the price is calculated.
"""
object = self.product
if object.is_product_with_variants() and object.get_default_variant():
object = object.get_default_variant()
if object.get_for_sale():
if object.is_variant() and not object.active_for_sale_price:
price = object.parent.get_for_sale_price(self.request, with_properties)
else:
price = object.get_for_sale_price(self.request, with_properties)
else:
if object.is_variant() and not object.active_price:
price = object.parent.price
else:
price = object.price
return price
def get_price_net(self, with_properties=True, amount=1):
"""
Returns the net price of the product.
**Parameters:**
with_properties
If the instance is a configurable product and with_properties is
True the prices of the default properties are added to the price.
amount
The amount of products for which the price is calculated.
"""
raise NotImplementedError
def get_price_gross(self, with_properties=True, amount=1):
"""
Returns the real gross price of the product. This is the base of
all price and tax calculations.
**Parameters:**
with_properties
If the instance is a configurable product and with_properties is
True the prices of the default properties are added to the price.
amount
The amount of products for which the price is calculated.
"""
raise NotImplementedError
def get_standard_price(self, with_properties=True, amount=1):
"""
Returns always the stored standard price for the product. Independent
whether the product is for sale or not. If you want the real price of
the product use ``get_price`` instead.
**Parameters:**
with_properties
If the instance is a configurable product and with_properties is
True the prices of the default properties are added to the price.
amount
The amount of products for which the price is calculated.
"""
object = self.product
if object.is_product_with_variants() and object.get_default_variant():
object = object.get_default_variant()
if object.is_variant() and not object.active_price:
object = object.parent
price = object.price
if with_properties and object.is_configurable_product():
price += object.get_default_properties_price()
return price
def get_standard_price_net(self, with_properties=True, amount=1):
"""
Returns always the standard net price for the product. Independent
whether the product is for sale or not. If you want the real net price
of the product use ``get_price_net`` instead.
**Parameters:**
with_properties
If the instance is a configurable product and with_properties is
True the prices of the default properties are added to the price.
amount
The amount of products for which the price is calculated.
"""
raise NotImplementedError
def get_standard_price_gross(self, with_properties=True, amount=1):
"""
Returns always the gross standard price for the product. Independent
whether the product is for sale or not. If you want the real gross
price of the product use ``get_price_gross`` instead.
**Parameters:**
with_properties
If the instance is a configurable product and with_properties is
True the prices of the default properties are added to the price.
amount
The amount of products for which the price is calculated.
"""
raise NotImplementedError
def get_for_sale_price(self, with_properties=True, amount=1):
"""
Returns the sale price for the product.
**Parameters:**
with_properties
If the instance is a configurable product and with_properties is
True the prices of the default properties are added to the price.
amount
The amount of products for which the price is calculated.
"""
object = self.product
if object.is_product_with_variants() and object.get_default_variant():
object = object.get_default_variant()
if object.is_variant() and not object.active_for_sale_price:
object = object.parent
price = object.for_sale_price
if with_properties and object.is_configurable_product():
price += object.get_default_properties_price()
return price
def get_for_sale_price_net(self, with_properties=True, amount=1):
"""
Returns the sale net price for the product.
**Parameters:**
with_properties
If the instance is a configurable product and with_properties is
True the prices of the default properties are added to the price.
amount
The amount of products for which the price is calculated.
"""
raise NotImplementedError
def get_for_sale_price_gross(self, with_properties=True, amount=1):
"""
Returns the sale net price for the product.
**Parameters:**
with_properties
If the instance is a configurable product and with_properties is
True the prices of the default properties are added to the price.
amount
The amount of products for which the price is calculated.
"""
raise NotImplementedError
def get_base_price(self, with_properties=True, amount=1):
"""
Returns the base price of the product.
**Parameters:**
with_properties
If the instance is a configurable product and with_properties is
True the prices of the default properties are added to the price.
amount
The amount of products for which the price is calculated.
"""
try:
return self.get_price(with_properties, amount) / self.product.get_base_price_amount()
except (TypeError, ZeroDivisionError):
return 0.0
def get_base_price_net(self, with_properties=True, amount=1):
"""
Returns the net base price of the product.
**Parameters:**
with_properties
If the instance is a configurable product and with_properties is
True the prices of the default properties are added to the price.
amount
The amount of products for which the price is calculated.
"""
try:
return self.get_price_net(with_properties, amount) / self.product.get_base_price_amount()
except (TypeError, ZeroDivisionError):
return 0.0
def get_base_price_gross(self, with_properties=True, amount=1):
"""
Returns the gross base price of the product.
**Parameters:**
with_properties
If the instance is a configurable product and with_properties is
True the prices of the default properties are added to the price.
amount
The amount of products for which the price is calculated.
"""
try:
return self.get_price_gross(with_properties, amount) / self.product.get_base_price_amount()
except (TypeError, ZeroDivisionError):
return 0.0
def get_base_packing_price(self, with_properties=True, amount=1):
"""
Returns the base packing price of the product.
**Parameters:**
with_properties
If the instance is a configurable product and with_properties is
True the prices of the default properties are added to the price.
amount
The amount of products for which the price is calculated.
"""
return self.get_price(with_properties, amount) * self._calc_packing_amount()
def get_base_packing_price_net(self, with_properties=True, amount=1):
"""
Returns the base packing net price of the product.
**Parameters:**
with_properties
If the instance is a configurable product and with_properties is
True the prices of the default properties are added to the price.
amount
The amount of products for which the price is calculated.
"""
return self.get_price_net(with_properties, amount) * self._calc_packing_amount()
def get_base_packing_price_gross(self, with_properties=True, amount=1):
"""
Returns the base packing gross price of the product.
**Parameters:**
with_properties
If the instance is a configurable product and with_properties is
True the prices of the default properties are added to the price.
amount
The amount of products for which the price is calculated.
"""
return self.get_price_gross(with_properties, amount) * self._calc_packing_amount()
def get_customer_tax_rate(self):
"""
Returns the tax rate for the current customer and product.
"""
from lfs.customer_tax.utils import get_customer_tax_rate
return get_customer_tax_rate(self.request, self.product)
def get_customer_tax(self, with_properties=True, amount=1):
"""
Returns the calculated tax for the current customer and product.
**Parameters:**
with_properties
If the instance is a configurable product and with_properties is
True the taxes of the default properties are added to the price.
amount
The amount of products for which the tax is calculated.
"""
return self.get_price_gross(with_properties, amount) - self.get_price_net(with_properties, amount)
def get_product_tax_rate(self):
"""
Returns the stored tax rate of the product. If the product is a variant
it returns the parent's tax rate.
"""
from django.core.cache import cache
if self.product.is_variant():
obj = self.product.parent
else:
obj = self.product
if obj.tax_id:
cache_key = u'tax_rate_{}'.format(obj.tax_id)
tax_rate = cache.get(cache_key)
if tax_rate is None:
tax_rate = obj.tax.rate
cache.set(cache_key, tax_rate)
return tax_rate
return 0.0
def get_product_tax(self, with_properties=True):
"""
Returns the calculated tax for the current product independent of the
customer.
"""
raise NotImplementedError
def price_includes_tax(self):
"""
Returns True if stored price includes tax. False if not.
"""
raise NotImplementedError
def _calc_product_tax_rate(self):
"""
Returns the default tax rate for the product.
"""
tax_rate = self.get_product_tax_rate()
return ((tax_rate + 100.0) / 100.0)
def _calc_customer_tax_rate(self):
"""
Returns the tax rate for the current customer.
"""
return (self.get_customer_tax_rate() + 100.0) / 100.0
def _calc_packing_amount(self):
packing_amount, packing_unit = self.product.get_packing_info()
if not packing_amount:
return 1
packs = math.ceil(1 / packing_amount)
return packs * packing_amount
class ShippingMethodPriceCalculator(object):
"""
Base class from which all 3rd-party shipping method prices should inherit.
**Attributes:**
request
The current request.
shipping_method
The shipping method for which the price is calculated.
"""
def __init__(self, request, shipping_method):
self.shipping_method = shipping_method
self.request = request
def get_tax_rate(self):
from lfs.criteria.utils import get_first_valid
from lfs.customer_tax.models import CustomerTax
from django.core.cache import cache
customer_tax = get_first_valid(self.request, CustomerTax.objects.all(), self.shipping_method)
if customer_tax:
return customer_tax.rate
cache_key = 'shipping_method_tax_{}'.format(self.shipping_method.pk)
tax_rate = cache.get(cache_key)
if tax_rate is None:
if self.shipping_method.tax_id is None:
tax_rate = 0
else:
tax_rate = self.shipping_method.tax.rate
cache.set(cache_key, tax_rate, 60)
return tax_rate
def get_price(self):
"""
Returns the stored price without any calculations.
"""
from lfs.criteria import utils as criteria_utils
price = criteria_utils.get_first_valid(self.request,
self.shipping_method.prices.all())
if price:
return price.price
return self.shipping_method.price
def get_price_net(self):
"""
Returns the net price of the shipping method.
"""
raise NotImplementedError
def get_price_gross(self):
"""
Returns the gross price of the shipping method.
"""
raise NotImplementedError
def get_tax(self):
"""
Returns the total tax of the shipping method.
"""
return self.get_price_gross() - self.get_price_net()
| |
#!/usr/bin/env python
# coding:utf-8
import os
import base64
import time
import re
import thread
import urllib2
import urlparse
import simple_http_server
from xlog import getLogger
xlog = getLogger("gae_proxy")
from config import config
default_pacfile = os.path.join(os.path.dirname(os.path.abspath(__file__)), config.PAC_FILE)
user_pacfile = os.path.join(config.DATA_PATH, config.PAC_FILE)
current_path = os.path.dirname(os.path.abspath(__file__))
root_path = os.path.abspath( os.path.join(current_path, os.pardir, os.pardir))
data_root = os.path.join(root_path, 'data')
gae_proxy_listen = "GOAGENT_LISTEN"
pac_listen = "PAC_LISTEN"
def get_serving_pacfile():
if not os.path.isfile(user_pacfile):
serving_pacfile = default_pacfile
else:
serving_pacfile = user_pacfile
return serving_pacfile
def get_opener():
autoproxy = '127.0.0.1:8087'
import ssl
if getattr(ssl, "create_default_context", None):
cafile = os.path.join(data_root, "gae_proxy", "CA.crt")
context = ssl.create_default_context(purpose=ssl.Purpose.SERVER_AUTH,
cafile=cafile)
https_handler = urllib2.HTTPSHandler(context=context)
opener = urllib2.build_opener(urllib2.ProxyHandler({'http': autoproxy, 'https': autoproxy}), https_handler)
else:
opener = urllib2.build_opener(urllib2.ProxyHandler({'http': autoproxy, 'https': autoproxy}))
return opener
class PacUtil(object):
"""GAEProxy Pac Util"""
@staticmethod
def update_pacfile(filename):
opener = get_opener()
listen_ip = config.LISTEN_IP
autoproxy = gae_proxy_listen
blackhole = pac_listen
default = 'DIRECT'
if config.PAC_ADBLOCK:
try:
xlog.info('try download %r to update_pacfile(%r)', config.PAC_ADBLOCK, filename)
adblock_content = opener.open(config.PAC_ADBLOCK).read()
except Exception as e:
xlog.warn("pac_update download adblock fail:%r", e)
return
try:
xlog.info('try download %r to update_pacfile(%r)', config.PAC_GFWLIST, filename)
pac_content = opener.open(config.PAC_GFWLIST).read()
except Exception as e:
xlog.warn("pac_update download gfwlist fail:%r", e)
return
content = ''
need_update = True
with open(get_serving_pacfile(), 'rb') as fp:
content = fp.read()
try:
placeholder = '// AUTO-GENERATED RULES, DO NOT MODIFY!'
content = content[:content.index(placeholder)+len(placeholder)]
content = re.sub(r'''blackhole\s*=\s*['"]PROXY [\.\w:]+['"]''', 'blackhole = \'PROXY %s\'' % blackhole, content)
content = re.sub(r'''autoproxy\s*=\s*['"]PROXY [\.\w:]+['"]''', 'autoproxy = \'PROXY %s\'' % autoproxy, content)
if content.startswith('//'):
line = '// Proxy Auto-Config file generated by autoproxy2pac, %s\r\n' % time.strftime('%Y-%m-%d %H:%M:%S')
content = line + '\r\n'.join(content.splitlines()[1:])
except ValueError:
need_update = False
try:
if config.PAC_ADBLOCK:
xlog.info('%r downloaded, try convert it with adblock2pac', config.PAC_ADBLOCK)
jsrule = PacUtil.adblock2pac(adblock_content, 'FindProxyForURLByAdblock', blackhole, default)
content += '\r\n' + jsrule + '\r\n'
xlog.info('%r downloaded and parsed', config.PAC_ADBLOCK)
else:
content += '\r\nfunction FindProxyForURLByAdblock(url, host) {return "DIRECT";}\r\n'
except Exception as e:
need_update = False
xlog.exception('update_pacfile failed: %r', e)
return
try:
autoproxy_content = base64.b64decode(pac_content)
xlog.info('%r downloaded, try convert it with autoproxy2pac', config.PAC_GFWLIST)
jsrule = PacUtil.autoproxy2pac(autoproxy_content, 'FindProxyForURLByAutoProxy', autoproxy, default)
content += '\r\n' + jsrule + '\r\n'
xlog.info('%r downloaded and parsed', config.PAC_GFWLIST)
except Exception as e:
need_update = False
xlog.exception('update_pacfile failed: %r', e)
return
if need_update:
with open(user_pacfile, 'wb') as fp:
fp.write(content)
xlog.info('%r successfully updated', user_pacfile)
serving_pacfile = user_pacfile
@staticmethod
def autoproxy2pac(content, func_name='FindProxyForURLByAutoProxy', proxy=gae_proxy_listen, default='DIRECT', indent=4):
"""Autoproxy to Pac, based on https://github.com/iamamac/autoproxy2pac"""
jsLines = []
for line in content.splitlines()[1:]:
if line and not line.startswith("!"):
use_proxy = True
if line.startswith("@@"):
line = line[2:]
use_proxy = False
return_proxy = 'PROXY %s' % proxy if use_proxy else default
if line.startswith('/') and line.endswith('/'):
jsLine = 'if (/%s/i.test(url)) return "%s";' % (line[1:-1], return_proxy)
elif line.startswith('||'):
domain = line[2:].lstrip('.')
if len(jsLines) > 0 and ('host.indexOf(".%s") >= 0' % domain in jsLines[-1] or 'host.indexOf("%s") >= 0' % domain in jsLines[-1]):
jsLines.pop()
jsLine = 'if (dnsDomainIs(host, ".%s") || host == "%s") return "%s";' % (domain, domain, return_proxy)
elif line.startswith('|'):
jsLine = 'if (url.indexOf("%s") == 0) return "%s";' % (line[1:], return_proxy)
elif '*' in line:
jsLine = 'if (shExpMatch(url, "*%s*")) return "%s";' % (line.strip('*'), return_proxy)
elif '/' not in line:
jsLine = 'if (host.indexOf("%s") >= 0) return "%s";' % (line, return_proxy)
else:
jsLine = 'if (url.indexOf("%s") >= 0) return "%s";' % (line, return_proxy)
jsLine = ' ' * indent + jsLine
if use_proxy:
jsLines.append(jsLine)
else:
jsLines.insert(0, jsLine)
function = 'function %s(url, host) {\r\n%s\r\n%sreturn "%s";\r\n}' % (func_name, '\n'.join(jsLines), ' '*indent, default)
return function
@staticmethod
def urlfilter2pac(content, func_name='FindProxyForURLByUrlfilter', proxy='127.0.0.1:8086', default='DIRECT', indent=4):
"""urlfilter.ini to Pac, based on https://github.com/iamamac/autoproxy2pac"""
jsLines = []
for line in content[content.index('[exclude]'):].splitlines()[1:]:
if line and not line.startswith(';'):
use_proxy = True
if line.startswith("@@"):
line = line[2:]
use_proxy = False
return_proxy = 'PROXY %s' % proxy if use_proxy else default
if '*' in line:
jsLine = 'if (shExpMatch(url, "%s")) return "%s";' % (line, return_proxy)
else:
jsLine = 'if (url == "%s") return "%s";' % (line, return_proxy)
jsLine = ' ' * indent + jsLine
if use_proxy:
jsLines.append(jsLine)
else:
jsLines.insert(0, jsLine)
function = 'function %s(url, host) {\r\n%s\r\n%sreturn "%s";\r\n}' % (func_name, '\n'.join(jsLines), ' '*indent, default)
return function
@staticmethod
def adblock2pac(content, func_name='FindProxyForURLByAdblock', proxy='127.0.0.1:8086', default='DIRECT', indent=4):
"""adblock list to Pac, based on https://github.com/iamamac/autoproxy2pac"""
jsLines = []
for line in content.splitlines()[1:]:
if not line or line.startswith('!') or '##' in line or '#@#' in line:
continue
use_proxy = True
use_start = False
use_end = False
use_domain = False
use_postfix = []
if '$' in line:
posfixs = line.split('$')[-1].split(',')
if any('domain' in x for x in posfixs):
continue
if 'image' in posfixs:
use_postfix += ['.jpg', '.gif']
elif 'script' in posfixs:
use_postfix += ['.js']
else:
continue
line = line.split('$')[0]
if line.startswith("@@"):
line = line[2:]
use_proxy = False
if '||' == line[:2]:
line = line[2:]
if '/' not in line:
use_domain = True
else:
if not line.startswith('http://'):
line = 'http://' + line
use_start = True
elif '|' == line[0]:
line = line[1:]
if not line.startswith('http://'):
line = 'http://' + line
use_start = True
if line[-1] in ('^', '|'):
line = line[:-1]
if not use_postfix:
use_end = True
return_proxy = 'PROXY %s' % proxy if use_proxy else default
line = line.replace('^', '*').strip('*')
if use_start and use_end:
if '*' in line:
jsLine = 'if (shExpMatch(url, "%s")) return "%s";' % (line, return_proxy)
else:
jsLine = 'if (url == "%s") return "%s";' % (line, return_proxy)
elif use_start:
if '*' in line:
if use_postfix:
jsCondition = ' || '.join('shExpMatch(url, "%s*%s")' % (line, x) for x in use_postfix)
jsLine = 'if (%s) return "%s";' % (jsCondition, return_proxy)
else:
jsLine = 'if (shExpMatch(url, "%s*")) return "%s";' % (line, return_proxy)
else:
jsLine = 'if (url.indexOf("%s") == 0) return "%s";' % (line, return_proxy)
elif use_domain and use_end:
if '*' in line:
jsLine = 'if (shExpMatch(host, "%s*")) return "%s";' % (line, return_proxy)
else:
jsLine = 'if (host == "%s") return "%s";' % (line, return_proxy)
elif use_domain:
if line.split('/')[0].count('.') <= 1:
if use_postfix:
jsCondition = ' || '.join('shExpMatch(url, "http://*.%s*%s")' % (line, x) for x in use_postfix)
jsLine = 'if (%s) return "%s";' % (jsCondition, return_proxy)
else:
jsLine = 'if (shExpMatch(url, "http://*.%s*")) return "%s";' % (line, return_proxy)
else:
if '*' in line:
if use_postfix:
jsCondition = ' || '.join('shExpMatch(url, "http://%s*%s")' % (line, x) for x in use_postfix)
jsLine = 'if (%s) return "%s";' % (jsCondition, return_proxy)
else:
jsLine = 'if (shExpMatch(url, "http://%s*")) return "%s";' % (line, return_proxy)
else:
if use_postfix:
jsCondition = ' || '.join('shExpMatch(url, "http://%s*%s")' % (line, x) for x in use_postfix)
jsLine = 'if (%s) return "%s";' % (jsCondition, return_proxy)
else:
jsLine = 'if (url.indexOf("http://%s") == 0) return "%s";' % (line, return_proxy)
else:
if use_postfix:
jsCondition = ' || '.join('shExpMatch(url, "*%s*%s")' % (line, x) for x in use_postfix)
jsLine = 'if (%s) return "%s";' % (jsCondition, return_proxy)
else:
jsLine = 'if (shExpMatch(url, "*%s*")) return "%s";' % (line, return_proxy)
jsLine = ' ' * indent + jsLine
if use_proxy:
jsLines.append(jsLine)
else:
jsLines.insert(0, jsLine)
function = 'function %s(url, host) {\r\n%s\r\n%sreturn "%s";\r\n}' % (func_name, '\n'.join(jsLines), ' '*indent, default)
return function
class PACServerHandler(simple_http_server.HttpServerHandler):
onepixel = b'GIF89a\x01\x00\x01\x00\x80\xff\x00\xc0\xc0\xc0\x00\x00\x00!\xf9\x04\x01\x00\x00\x00\x00,\x00\x00\x00\x00\x01\x00\x01\x00\x00\x02\x02D\x01\x00;'
def address_string(self):
return '%s:%s' % self.client_address[:2]
def do_CONNECT(self):
self.wfile.write(b'HTTP/1.1 403\r\nConnection: close\r\n\r\n')
def do_GET(self):
xlog.info('PAC from:%s %s %s ', self.address_string(), self.command, self.path)
path = urlparse.urlparse(self.path).path # '/proxy.pac'
filename = os.path.normpath('./' + path) # proxy.pac
if self.path.startswith(('http://', 'https://')):
data = b'HTTP/1.1 200\r\nCache-Control: max-age=86400\r\nExpires:Oct, 01 Aug 2100 00:00:00 GMT\r\nConnection: close\r\n'
if filename.endswith(('.jpg', '.gif', '.jpeg', '.bmp')):
data += b'Content-Type: image/gif\r\n\r\n' + self.onepixel
else:
data += b'\r\n This is the Pac server, not proxy port, use 8087 as proxy port.'
self.wfile.write(data)
xlog.info('%s "%s %s HTTP/1.1" 200 -', self.address_string(), self.command, self.path)
return
# check for '..', which will leak file
if re.search(r'(\.{2})', self.path) is not None:
self.wfile.write(b'HTTP/1.1 404\r\n\r\n')
xlog.warn('%s %s %s haking', self.address_string(), self.command, self.path )
return
if filename != 'proxy.pac':
xlog.warn("pac_server GET %s fail", filename)
self.wfile.write(b'HTTP/1.1 404\r\n\r\n')
return
mimetype = 'text/plain'
if self.path.endswith('.pac?flush') or time.time() - os.path.getmtime(get_serving_pacfile()) > config.PAC_EXPIRED:
thread.start_new_thread(PacUtil.update_pacfile, (user_pacfile,))
pac_filename = get_serving_pacfile()
with open(pac_filename, 'rb') as fp:
data = fp.read()
host = self.headers.getheader('Host')
host, _, port = host.rpartition(":")
gae_proxy_proxy = host + ":" + str(config.LISTEN_PORT)
pac_proxy = host + ":" + str(config.PAC_PORT)
data = data.replace(gae_proxy_listen, gae_proxy_proxy)
data = data.replace(pac_listen, pac_proxy)
self.wfile.write(('HTTP/1.1 200\r\nContent-Type: %s\r\nContent-Length: %s\r\n\r\n' % (mimetype, len(data))).encode())
self.wfile.write(data)
def send_file(self, filename, mimetype):
with open(filename, 'rb') as fp:
data = fp.read()
if data:
self.wfile.write(('HTTP/1.1 200\r\nContent-Type: %s\r\nContent-Length: %s\r\n\r\n' % (mimetype, len(data))).encode())
self.wfile.write(data)
| |
import json
import os
import random
import six
import socket
from collections import defaultdict
from contextlib import contextmanager
from coverage.files import PathAliases
from coverage.files import relative_filename
from coverage.files import set_relative_directory
from portalocker import Lock
from smother.python import InvalidPythonFile
from smother.python import PythonFile
def create_path_aliases_from_coverage(coverage):
aliases = PathAliases()
if coverage and coverage.config.paths:
for paths in coverage.config.paths.values():
result = paths[0]
for pattern in paths[1:]:
aliases.add(pattern, result)
return aliases
def get_smother_filename(base_name, parallel_mode):
if parallel_mode:
suffix = "%s.%s.%06d" % (
socket.gethostname(), os.getpid(),
random.randint(0, 999999)
)
base_name += "." + suffix
return base_name
@contextmanager
def noclose(file):
"""
A "no-op" contextmanager that prevents files from closing.
"""
try:
yield file
finally:
pass
class QueryResult(object):
def __init__(self, contexts):
self.contexts = contexts
def report(self):
print("\n".join(sorted(self.contexts)))
class Smother(object):
def __init__(self, coverage=None):
self.coverage = coverage
self.data = {}
self.aliases = create_path_aliases_from_coverage(self.coverage)
def start(self):
self.coverage.collector.reset()
self.coverage.start()
def save_context(self, label):
self.data[label] = {
key: sorted(map(int, val.keys()))
for key, val in self.coverage.collector.data.items()
}
def write_coverage(self):
# coverage won't write data if it hasn't been started.
self.coverage.start()
self.coverage.stop()
data = {}
for cover in six.itervalues(self.data):
for path, lines in six.iteritems(cover):
data.setdefault(path, {}).update(
{line: None for line in lines}
)
self.coverage.collector.data = data
self.coverage.save()
def write(self, file_or_path, append=False, timeout=10):
"""
Write Smother results to a file.
Parameters
----------
fiile_or_path : str
Path to write report to
append : bool
If True, read an existing smother report from `outpath`
and combine it with this file before writing.
timeout : int
Time in seconds to wait to acquire a file lock, before
raising an error.
Note
----
Append mode is atomic when file_or_path is a path,
and can be safely run in a multithreaded or
multiprocess test environment.
When using `parallel_mode`, file_or_path is given a unique
suffix based on the machine name and process id.
"""
if isinstance(file_or_path, six.string_types):
if self.coverage:
file_or_path = get_smother_filename(
file_or_path, self.coverage.config.parallel)
outfile = Lock(
file_or_path, mode='a+',
timeout=timeout,
fail_when_locked=False
)
else:
outfile = noclose(file_or_path)
with outfile as fh:
if append:
fh.seek(0)
try:
other = Smother.load(fh)
except ValueError: # no smother data
pass
else:
self |= other
fh.seek(0)
fh.truncate() # required to overwrite data in a+ mode
json.dump(self.data, fh)
@classmethod
def load(cls, file_or_path):
if isinstance(file_or_path, six.string_types):
infile = open(file_or_path)
else:
infile = noclose(file_or_path)
with infile as fh:
data = json.load(fh)
result = cls()
result.data = data
return result
@classmethod
def convert_to_relative_paths(cls, smother_obj):
data = defaultdict(lambda: dict())
set_relative_directory()
for ctx, cover in smother_obj.data.items():
for src, lines in cover.items():
src = relative_filename(src)
data[ctx][src] = lines
result = cls()
result.data = dict(data)
return result
def __ior__(self, other):
for ctx, cover in other.data.items():
for src, lines in cover.items():
src = self.aliases.map(src)
old = self.data.setdefault(ctx, {}).setdefault(src, [])
self.data[ctx][src] = sorted(set(old + lines))
return self
def query_context(self, regions, file_factory=PythonFile):
"""
Return which set of test contexts intersect a set of code regions.
Parameters
----------
regions: A sequence of Intervals
file_factory: Callable (optional, default PythonFile)
A callable that takes a filename and
returns a PythonFile object.
Returns
-------
A QueryResult
"""
result = set()
for region in regions:
try:
pf = file_factory(region.filename)
except InvalidPythonFile:
continue
# region and/or coverage report may use paths
# relative to this directory. Ensure we find a match
# if they use different conventions.
paths = {
os.path.abspath(region.filename),
os.path.relpath(region.filename)
}
for test_context, hits in six.iteritems(self.data):
if test_context in result:
continue
for path in paths:
if region.intersects(pf, hits.get(path, [])):
result.add(test_context)
return QueryResult(result)
def _invert(self):
"""
Invert coverage data from {test_context: {file: line}}
to {file: {test_context: line}}
"""
result = defaultdict(dict)
for test_context, src_context in six.iteritems(self.data):
for src, lines in six.iteritems(src_context):
result[src][test_context] = lines
return result
def iter_records(self, semantic=False, sort=True):
inverted = self._invert()
for src, coverage in six.iteritems(inverted):
if semantic:
try:
pf = PythonFile(src)
except IOError:
continue
source2test = defaultdict(set)
for test_context, lines in six.iteritems(coverage):
for line in lines:
if semantic:
# coverage line count is 1-based
src_context = pf.context(line)
else:
src_context = "{}:{}".format(src, line)
source2test[src_context].add(test_context)
for src_context in sorted(source2test) if sort else source2test:
test_contexts = source2test[src_context]
if sort:
test_contexts = sorted(test_contexts)
for test_context in test_contexts:
yield src_context, test_context
| |
"""
Tests for `attr._make`.
"""
from __future__ import absolute_import, division, print_function
import pytest
from . import simple_attr
from attr import _config
from attr._compat import PY3
from attr._make import (
Attribute,
NOTHING,
_CountingAttr,
_transform_attrs,
attr,
attributes,
fields,
make_class,
validate,
)
class TestCountingAttr(object):
"""
Tests for `attr`.
"""
def test_returns_Attr(self):
"""
Returns an instance of _CountingAttr.
"""
a = attr()
assert isinstance(a, _CountingAttr)
def make_tc():
class TransformC(object):
z = attr()
y = attr()
x = attr()
a = 42
return TransformC
class TestTransformAttrs(object):
"""
Tests for `_transform_attrs`.
"""
def test_normal(self):
"""
Transforms every `_CountingAttr` and leaves others (a) be.
"""
C = make_tc()
_transform_attrs(C, None)
assert ["z", "y", "x"] == [a.name for a in C.__attrs_attrs__]
def test_empty(self):
"""
No attributes works as expected.
"""
@attributes
class C(object):
pass
_transform_attrs(C, None)
assert () == C.__attrs_attrs__
@pytest.mark.parametrize("attribute", [
"z",
"y",
"x",
])
def test_transforms_to_attribute(self, attribute):
"""
All `_CountingAttr`s are transformed into `Attribute`s.
"""
C = make_tc()
_transform_attrs(C, None)
assert isinstance(getattr(C, attribute), Attribute)
def test_conflicting_defaults(self):
"""
Raises `ValueError` if attributes with defaults are followed by
mandatory attributes.
"""
class C(object):
x = attr(default=None)
y = attr()
with pytest.raises(ValueError) as e:
_transform_attrs(C, None)
assert (
"No mandatory attributes allowed after an attribute with a "
"default value or factory. Attribute in question: Attribute"
"(name='y', default=NOTHING, validator=None, repr=True, "
"cmp=True, hash=True, init=True)",
) == e.value.args
def test_these(self):
"""
If these is passed, use it and ignore body.
"""
class C(object):
y = attr()
_transform_attrs(C, {"x": attr()})
assert (
simple_attr("x"),
) == C.__attrs_attrs__
assert isinstance(C.y, _CountingAttr)
def test_recurse(self):
"""
Collect attributes from all sub-classes.
"""
class A(object):
pass
class C(A):
x = attr()
_transform_attrs(C, None)
class D(C):
y = attr()
_transform_attrs(D, None)
assert (
simple_attr("x"),
simple_attr("y"),
) == D.__attrs_attrs__
class TestAttributes(object):
"""
Tests for the `attributes` class decorator.
"""
@pytest.mark.skipif(PY3, reason="No old-style classes in Py3")
def test_catches_old_style(self):
"""
Raises TypeError on old-style classes.
"""
with pytest.raises(TypeError) as e:
@attributes
class C:
pass
assert ("attrs only works with new-style classes.",) == e.value.args
def test_sets_attrs(self):
"""
Sets the `__attrs_attrs__` class attribute with a list of `Attribute`s.
"""
@attributes
class C(object):
x = attr()
assert "x" == C.__attrs_attrs__[0].name
assert all(isinstance(a, Attribute) for a in C.__attrs_attrs__)
def test_empty(self):
"""
No attributes, no problems.
"""
@attributes
class C3(object):
pass
assert "C3()" == repr(C3())
assert C3() == C3()
@pytest.mark.parametrize("method_name", [
"__repr__",
"__eq__",
"__hash__",
"__init__",
])
def test_adds_all_by_default(self, method_name):
"""
If no further arguments are supplied, all add_XXX functions are
applied.
"""
# Set the method name to a sentinel and check whether it has been
# overwritten afterwards.
sentinel = object()
class C1(object):
x = attr()
setattr(C1, method_name, sentinel)
C1 = attributes(C1)
class C2(object):
x = attr()
setattr(C2, method_name, sentinel)
C2 = attributes(C2)
assert sentinel != getattr(C1, method_name)
assert sentinel != getattr(C2, method_name)
@pytest.mark.parametrize("arg_name, method_name", [
("repr", "__repr__"),
("cmp", "__eq__"),
("hash", "__hash__"),
("init", "__init__"),
])
def test_respects_add_arguments(self, arg_name, method_name):
"""
If a certain `add_XXX` is `True`, XXX is not added to the class.
"""
# Set the method name to a sentinel and check whether it has been
# overwritten afterwards.
sentinel = object()
am_args = {
"repr": True,
"cmp": True,
"hash": True,
"init": True
}
am_args[arg_name] = False
class C(object):
x = attr()
setattr(C, method_name, sentinel)
C = attributes(**am_args)(C)
assert sentinel == getattr(C, method_name)
@pytest.mark.skipif(not PY3, reason="__qualname__ is PY3-only.")
def test_repr_qualname(self):
"""
On Python 3, the name in repr is the __qualname__.
"""
@attributes
class C(object):
@attributes
class D(object):
pass
assert "C.D()" == repr(C.D())
assert "GC.D()" == repr(GC.D())
def test_repr_fake_qualname(self):
"""
Setting repr_ns overrides a potentially guessed namespace.
"""
@attributes
class C(object):
@attributes(repr_ns="C")
class D(object):
pass
assert "C.D()" == repr(C.D())
@attributes
class GC(object):
@attributes
class D(object):
pass
class TestAttribute(object):
"""
Tests for `Attribute`.
"""
def test_missing_argument(self):
"""
Raises `TypeError` if an Argument is missing.
"""
with pytest.raises(TypeError) as e:
Attribute(default=NOTHING, validator=None)
assert ("Missing argument 'name'.",) == e.value.args
def test_too_many_arguments(self):
"""
Raises `TypeError` if extra arguments are passed.
"""
with pytest.raises(TypeError) as e:
Attribute(name="foo", default=NOTHING,
factory=NOTHING, validator=None,
repr=True, cmp=True, hash=True, init=True)
assert ("Too many arguments.",) == e.value.args
class TestMakeClass(object):
"""
Tests for `make_class`.
"""
@pytest.mark.parametrize("ls", [
list,
tuple
])
def test_simple(self, ls):
"""
Passing a list of strings creates attributes with default args.
"""
C1 = make_class("C1", ls(["a", "b"]))
@attributes
class C2(object):
a = attr()
b = attr()
assert C1.__attrs_attrs__ == C2.__attrs_attrs__
def test_dict(self):
"""
Passing a dict of name: _CountingAttr creates an equivalent class.
"""
C1 = make_class("C1", {"a": attr(default=42), "b": attr(default=None)})
@attributes
class C2(object):
a = attr(default=42)
b = attr(default=None)
assert C1.__attrs_attrs__ == C2.__attrs_attrs__
def test_attr_args(self):
"""
attributes_arguments are passed to attributes
"""
C = make_class("C", ["x"], repr=False)
assert repr(C(1)).startswith("<attr._make.C object at 0x")
def test_catches_wrong_attrs_type(self):
"""
Raise `TypeError` if an invalid type for attrs is passed.
"""
with pytest.raises(TypeError) as e:
make_class("C", object())
assert (
"attrs argument must be a dict or a list.",
) == e.value.args
class TestFields(object):
"""
Tests for `fields`.
"""
def test_instance(self, C):
"""
Raises `TypeError` on non-classes.
"""
with pytest.raises(TypeError) as e:
fields(C(1, 2))
assert "Passed object must be a class." == e.value.args[0]
def test_handler_non_attrs_class(self, C):
"""
Raises `ValueError` if passed a non-``attrs`` instance.
"""
with pytest.raises(ValueError) as e:
fields(object)
assert (
"{o!r} is not an attrs-decorated class.".format(o=object)
) == e.value.args[0]
def test_fields(self, C):
"""
Returns a list of `Attribute`a.
"""
assert all(isinstance(a, Attribute) for a in fields(C))
def test_copies(self, C):
"""
Returns a new list object with new `Attribute` objects.
"""
assert C.__attrs_attrs__ is not fields(C)
assert all(new == original and new is not original
for new, original
in zip(fields(C), C.__attrs_attrs__))
class TestValidate(object):
"""
Tests for `validate`.
"""
def test_success(self):
"""
If the validator suceeds, nothing gets raised.
"""
C = make_class("C", {"x": attr(validator=lambda *a: None),
"y": attr()})
validate(C(1, 2))
def test_propagates(self):
"""
The exception of the validator is handed through.
"""
def raiser(_, __, value):
if value == 42:
raise FloatingPointError
C = make_class("C", {"x": attr(validator=raiser)})
i = C(1)
i.x = 42
with pytest.raises(FloatingPointError):
validate(i)
def test_run_validators(self):
"""
Setting `_run_validators` to False prevents validators from running.
"""
_config._run_validators = False
obj = object()
def raiser(_, __, ___):
raise Exception(obj)
C = make_class("C", {"x": attr(validator=raiser)})
assert 1 == C(1).x
_config._run_validators = True
with pytest.raises(Exception) as e:
C(1)
assert (obj,) == e.value.args
| |
#!/usr/bin/env python
import argparse
import os
import numpy as np
from os.path import isfile
from os.path import join
import sys, signal
import matplotlib.pyplot as plt
from scipy.stats import multivariate_normal
from time import time
import itertools
from multiprocessing import Pool
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
def abspath_to_resource(path):
return os.path.abspath(os.path.join(SCRIPT_DIR, path))
os.environ['GLOG_minloglevel'] = '2' # Hides Caffe's debug printing...
# Dynamically load the correct Caffe from our submodule
def load_caffe():
import imp
caffe_fp, caffe_pathname, caffe_description = imp.find_module('caffe', [abspath_to_resource('../deps/simnets/python')])
try:
return imp.load_module('caffe', caffe_fp, caffe_pathname, caffe_description)
finally:
if caffe_fp:
caffe_fp.close()
caffe = load_caffe()
######### External Code ##########
'''
Code for im2col modified from Standford's CS231n Course. License:
The MIT License (MIT)
Copyright (c) 2015 Andrej Karpathy
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
def get_im2col_indices(x_shape, field_height, field_width, padding=1, stride=1):
# First figure out what the size of the output should be
C, H, W = x_shape
assert (H + 2 * padding - field_height) % stride == 0
assert (W + 2 * padding - field_height) % stride == 0
out_height = (H + 2 * padding - field_height) / stride + 1
out_width = (W + 2 * padding - field_width) / stride + 1
i0 = np.repeat(np.arange(field_height), field_width)
i0 = np.tile(i0, C)
i1 = stride * np.repeat(np.arange(out_height), out_width)
j0 = np.tile(np.arange(field_width), field_height * C)
j1 = stride * np.tile(np.arange(out_width), out_height)
i = i0.reshape(-1, 1) + i1.reshape(1, -1)
j = j0.reshape(-1, 1) + j1.reshape(1, -1)
k = np.repeat(np.arange(C), field_height * field_width).reshape(-1, 1)
return (k, i, j)
def im2col_indices(x, field_height, field_width, padding=1, stride=1, precomputed_indices=None):
""" An implementation of im2col based on some fancy indexing """
x_padded = None
if padding > 0:
# Zero-pad the input
p = padding
x_padded = np.pad(x, ((0, 0), (0, 0), (p, p), (p, p)), mode='constant')
else:
x_padded = np.copy(x)
if precomputed_indices is None:
k, i, j = get_im2col_indices(x.shape, field_height, field_width, padding, stride)
else:
k, i, j = precomputed_indices
cols = x_padded[k, i, j]
return cols
######### End of External Code ##########
def stable_normalize_logspace_mat(X_in):
h,w = np.shape(X_in)
c = np.max(X_in, axis=1)
X_shifted = X_in - np.tile(c,(w,1)).transpose()
exp_X_shifted = np.exp(X_shifted)
sums = np.sum(exp_X_shifted, axis=1)
return exp_X_shifted / (np.tile(sums,(w,1)).transpose())
def get_image_data_and_labels(index_file, get_full_path=True, as_list=True):
if not os.path.exists(index_file):
print 'Error, no index file at path ', index_file
return [],[]
index_file_dir = os.path.dirname(index_file)
data = np.genfromtxt(index_file, dtype='str')
labels = data[:,1].astype(int)
if as_list:
im_data= list(data[:,0])
else:
im_data = data[:,0]
if get_full_path:
im_data_f = [join(index_file_dir,im) for im in im_data ]
if not as_list:
im_data_f = np.array(im_data_f)
else:
im_data_f = im_data
return im_data_f,labels
def hash_bool_array(x):
h = 0 # Since this method is only useful for arrays < 20 then no need to use longs
for i in xrange(x.shape[0]):
h = (h << 1) + x[i]
return h
def bool_value(x):
if x == 'y':
return True
elif x == 'n':
return False
else:
raise ValueError("Bool parameter must be either 'y' or 'n'.")
def init_worker(num_instances, kernel_h, kernel_w, pad, stride, indices, pdfs):
global g_num_instances
g_num_instances = num_instances
global g_kernel_h
g_kernel_h = kernel_h
global g_kernel_w
g_kernel_w = kernel_w
global g_pad
g_pad = pad
global g_stride
g_stride = stride
global g_indices
g_indices = indices
global g_pdfs
g_pdfs = pdfs
signal.signal(signal.SIGINT, signal.SIG_IGN)
np.random.seed(None)
def whiten_similarity(params):
i, processed_data = params
cols = im2col_indices(processed_data, g_kernel_h, g_kernel_w, g_pad, g_stride, g_indices).transpose()
masks = ~np.isnan(cols)
marginal_xs = [None] * cols.shape[0]
hashs = [None] * cols.shape[0]
sim_out = np.zeros((g_num_instances, cols.shape[0]))
for k in xrange(cols.shape[0]):
hashs[k] = hash_bool_array(masks[k])
x = cols[k]
marginal_xs[k] = x[masks[k]]
for j in xrange(g_num_instances):
instance_pdfs = g_pdfs[j]
for k in xrange(cols.shape[0]):
marginal_x = marginal_xs[k]
if marginal_x.shape[0] == 0:
# When marginalizing over all vars the probability is 1 (0 in logspace)
continue
rv = instance_pdfs[hashs[k]]
logprob = rv.logpdf(marginal_x)
sim_out[j, k] = logprob
return (i, sim_out)
class ModelAnalyzer(object):
def __init__(self, net_fn, param_fn, out_layer_name, num_classes, raw_scale=1, is_color=True, use_whitened_similarity=False):
sys.stderr.write('Loading model...\n')
if is_color:
self._channel_swap = (2,1,0)
self._base_net = caffe.Classifier(net_fn,param_fn,channel_swap = self._channel_swap,raw_scale=raw_scale)
else:
self._channel_swap = 0
self._base_net = caffe.Classifier(net_fn,param_fn,raw_scale=raw_scale)
self._out_layer_name = out_layer_name
self._K = num_classes
self._tested = False
self._is_color = is_color
self._use_whitened_similarity = use_whitened_similarity
self._batch_size = self._base_net.blobs['data'].data.shape[0]
if self._use_whitened_similarity:
sys.stderr.write('Processing whitened similarity...\n')
similiarity_index, similarity_layer = next(((i, l) for i, l in enumerate(self._base_net.layers) if l.type == 'Similarity'))
self._after_similarity_name = self._base_net._layer_names[similiarity_index + 1]
self._similarity_name = self._base_net._layer_names[similiarity_index]
self._similarity_output = self._base_net.top_names[self._similarity_name][0]
self._similarity_output_shape = self._base_net.blobs[self._similarity_output].data.shape
self._whitening_name = self._base_net.bottom_names[self._similarity_name][0]
whitening_index = list(self._base_net._layer_names).index(self._whitening_name)
self._last_preprocess_layer_index = whitening_index - 1
self._last_preprocess_layer = self._base_net._layer_names[self._last_preprocess_layer_index]
self._whitening_input = self._base_net.bottom_names[self._whitening_name][0]
self._whitening_input_shape = self._base_net.blobs[self._whitening_input].data.shape
whitening_layer = self._base_net.layers[whitening_index]
self._patch_shape = whitening_layer.blobs[0].data.shape[1:]
W = whitening_layer.blobs[0].data
W = W.reshape((W.shape[0], -1))
b = whitening_layer.blobs[1].data
b = b.reshape((b.shape[0], 1))
# Wy + b = x => Wy = x - b => y = W^-1 (x - b) = W^-1 x - W^-1 b
# y = Bx + c, where x ~ N(mu, sigma).
# Then y ~ N(c + B*mu, B * sigma * B^T)
B = np.linalg.pinv(W)
c = - np.dot(B, b)
mus = np.squeeze(similarity_layer.blobs[0].data).transpose()
new_mus = c + np.dot(B, mus)
new_mus = map(lambda x: x.flatten(), np.split(new_mus, new_mus.shape[1], axis=1))
sigmas = (1.0 / np.squeeze(similarity_layer.blobs[1].data)).transpose()
sigmas = np.split(sigmas, sigmas.shape[1], axis=1) # each sigma in its own array
new_sigmas = map(lambda x: np.dot(B, np.dot(np.diag(x.flatten()), B.transpose())), sigmas)
self._corrected_means = new_mus
self._corrected_covs = new_sigmas
self._num_instances = len(sigmas)
self._pdfs = None
with open(param_fn, 'rb') as f:
net = caffe.proto.caffe_pb2.NetParameter()
net.MergeFromString(f.read())
self._conv_param = next((l for l in net.layer if l.name == self._whitening_name)).convolution_param
if len(self._conv_param.kernel_size) > 0:
self._conv_param.kernel_h = self._conv_param.kernel_size[0]
self._conv_param.kernel_w = self._conv_param.kernel_size[0]
if len(self._conv_param.pad) == 0:
self._conv_param.pad.append(0)
if len(self._conv_param.stride) == 0:
self._conv_param.stride.append(1)
temp_shape = self._whitening_input_shape[1:]
self._im2col_indices = get_im2col_indices(temp_shape,
self._conv_param.kernel_h, self._conv_param.kernel_w, self._conv_param.pad[0], self._conv_param.stride[0])
def classify(self,X,Y, use_normalized=True, mask=None):
if self._use_whitened_similarity:
self.precompute_marginals()
self._pool = Pool(initializer=init_worker, initargs=(self._num_instances,
self._conv_param.kernel_h, self._conv_param.kernel_w, self._conv_param.pad[0],
self._conv_param.stride[0], self._im2col_indices, self._pdfs))
probs, preds = self.collect_probs(X, Y, use_normalized, mask=mask)
self._prob_mat = probs
self._Y_hat = preds
self._Y = Y
self._tested = True
if self._use_whitened_similarity:
self._pool.close()
self._pool.join()
self._pool = None
self._pdfs = None
def precompute_marginals(self):
sys.stderr.write('Precomputing marginals...\n')
self._pdfs = [None] * self._num_instances
# precomputing all possible marginals
for i in xrange(self._num_instances):
mean = self._corrected_means[i]
cov = self._corrected_covs[i]
self._pdfs[i] = [None] * (2 ** mean.shape[0])
for marginal_pattern in itertools.product([False, True], repeat=mean.shape[0]):
marginal_length = marginal_pattern.count(True)
if marginal_length == 0:
continue
m = np.array(marginal_pattern)
marginal_mean = mean[m]
mm = m[:, np.newaxis]
marginal_cov = cov[np.dot(mm, mm.transpose())].reshape((marginal_length, marginal_length))
self._pdfs[i][hash_bool_array(m)] = multivariate_normal(mean=marginal_mean, cov=marginal_cov)
def batch_get_probs(self, use_normalized=True):
out = None
start_time = time()
if not self._use_whitened_similarity:
out = self._base_net.forward()
else:
self._base_net._forward(0, self._last_preprocess_layer_index)
processed_data = self._base_net.blobs[self._whitening_input].data
sim_out = np.zeros(self._similarity_output_shape).reshape((self._batch_size, self._num_instances, -1))
results = self._pool.imap_unordered(whiten_similarity,
[(i, processed_data[i]) for i in xrange(processed_data.shape[0])], chunksize=2)
for i, (img_index, res) in enumerate(results):
sys.stderr.write('evaluating image: %d \r' % (i + 1))
sys.stdout.flush()
sim_out[img_index] = res
sys.stderr.write('\n')
sim_out = sim_out.reshape(self._similarity_output_shape)
self._base_net.blobs[self._similarity_output].data[...] = sim_out
out = self._base_net.forward(start=self._after_similarity_name)
end_time = time()
sys.stderr.write('Total time for last batch (in sec): %f\n' % (end_time - start_time))
sys.stderr.write('Time per image (in sec): %f\n' % ((end_time - start_time) / self._batch_size))
sys.stderr.write('\n')
acts = self._base_net.blobs[self._out_layer_name].data
acts = acts[:,:,0,0]
if use_normalized:
return acts
else:
return stable_normalize_logspace_mat(acts)
"""
Collect channel assignment probabilities of dataset samples.
** Assuming batch size divides dataset size **
"""
def collect_probs(self, X, Y, use_normalized=True, mask=None):
batch_size = self._base_net.blobs['data'].data.shape[0]
dataset_size = len(Y)
num_batches = dataset_size/batch_size
all_probs = np.zeros((dataset_size, self._K))
num_batches = dataset_size / batch_size
for j in range(num_batches):
start = j*batch_size
sys.stderr.write('Testing on batch %d of %d\n' % (j + 1, num_batches))
batch_data = map(lambda x: self._base_net.transformer.preprocess('data',caffe.io.load_image(x, color=self._is_color)), X[start:(start+batch_size)])
batch_data = np.stack(batch_data)
if mask:
batch_masks = np.stack(map(lambda x: caffe.io.load_image(x, color=self._is_color).transpose((2,0,1)), mask[start:(start+batch_size)]))
if self._is_color:
batch_masks = batch_masks[:, self._channel_swap, :, :]
chosen = None
m = np.logical_and(batch_masks[:,1,:,:] == 1, batch_masks[:,2,:,:] == 0)
(batch_data[:,1,:,:])[m] = (batch_data[:,2,:,:])[m]
(batch_masks[:,1,:,:])[m] = (batch_masks[:,2,:,:])[m]
m = np.logical_and(batch_masks[:,2,:,:] == 1, batch_masks[:,1,:,:] == 0)
(batch_data[:,2,:,:])[m] = (batch_data[:,1,:,:])[m]
(batch_masks[:,2,:,:])[m] = (batch_masks[:,1,:,:])[m]
batch_data[batch_masks == 1] = np.nan # Missing data will turn into NaN and the rest will stay the same
self._base_net.blobs['data'].data[...] = batch_data
probs = self.batch_get_probs(use_normalized)
all_probs[start:(start+batch_size),:] = probs
labels = np.argmax(all_probs, axis=1).astype(int)
return (all_probs, labels)
def get_classification_hist(self):
if not self._tested:
print 'You must run classifer first on data to obtain statistics'
return []
wrong_preds = self._Y[self._Y != self._Y_hat]
wr_hist,bins = np.histogram(wrong_preds,np.arange(self._K+1))
fig = plt.figure(figsize=(5, 4))
ax = fig.add_subplot(1,1,1)
width = 0.7 * (bins[1] - bins[0])
center = (bins[:-1] + bins[1:]) / 2
n_wr_hist = wr_hist.astype(float) / np.sum(wr_hist)
ax.bar(center, n_wr_hist, align='center', width=width)
return n_wr_hist
if __name__ == '__main__':
description = ('Script for analyzing performance of a trained Caffe network')
parser = argparse.ArgumentParser(description=description)
parser.add_argument(
"prototxt_path",
help="path to caffe prototxt file. This should be a deploy prototxt file"
" (see Caffe documentation)",
type=str
)
parser.add_argument(
"caffemodel_path",
help="path to caffemodel",
type=str
)
parser.add_argument(
"data_index_file",
help="path to index file of data",
type=str
)
parser.add_argument(
"--missing_data_index_file",
help="path to index file of missing data masks",
type=str
)
parser.add_argument(
"out_layer",
help="name of output layer to be used for classification.",
type=str
)
parser.add_argument(
"--starts_with_whitened_similarity",
help="If true then assumes the model is probabilistic and assumes it begins with conv + similarity. True is represented by 'y', and false otherwise.",
type=bool_value, default=False
)
args = parser.parse_args()
script_dir = os.getcwd()
if not isfile(join(script_dir,args.caffemodel_path)) or not isfile(join(script_dir,args.prototxt_path)):
print 'Caffemodel\prototxt dont exist at specified path. Exiting...'
exit(-1)
if not isfile(join(script_dir,args.data_index_file)):
print 'Data index file doesnt exist at specified path. Exiting...'
exit(-1)
if args.missing_data_index_file and len(args.missing_data_index_file) > 0 and not isfile(join(script_dir,args.missing_data_index_file)):
print 'Missing data index file doesnt exist at specified path. Exiting...'
exit(-1)
caffe.set_device(0)
caffe.set_mode_gpu()
# Example use
# Load validation and test data
X, Y = get_image_data_and_labels(args.data_index_file)
M = None
if args.missing_data_index_file and len(args.missing_data_index_file) > 0:
M, _ = get_image_data_and_labels(args.missing_data_index_file)
# Load pre-trained network
net_fn = args.prototxt_path
param_fn = args.caffemodel_path
net_w = ModelAnalyzer(net_fn, param_fn, args.out_layer, np.max(Y)+1, use_whitened_similarity=args.starts_with_whitened_similarity)
net_w.classify(X, Y, mask=M)
acc = float(np.sum(net_w._Y_hat == net_w._Y)) / len(net_w._Y)
print 'Accuracy: %f ' % (acc)
| |
# Copied from croniter
# https://github.com/taichino/croniter
# Licensed under MIT license
# Pyflakes warnings corrected
#!/usr/bin/python
# -*- coding: utf-8 -*-
import re
from time import time, mktime
from datetime import datetime
from dateutil.relativedelta import relativedelta
search_re = re.compile(r'^([^-]+)-([^-/]+)(/(.*))?$')
only_int_re = re.compile(r'^\d+$')
any_int_re = re.compile(r'^\d+')
star_or_int_re = re.compile(r'^(\d+|\*)$')
__all__ = ('croniter',)
class croniter(object):
RANGES = (
(0, 59),
(0, 23),
(1, 31),
(1, 12),
(0, 6),
(0, 59)
)
DAYS = (
31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31
)
ALPHACONV = (
{ },
{ },
{ },
{ 'jan':1, 'feb':2, 'mar':3, 'apr':4, 'may':5, 'jun':6,
'jul':7, 'aug':8, 'sep':9, 'oct':10, 'nov':11, 'dec':12 },
{ 'sun':0, 'mon':1, 'tue':2, 'wed':3, 'thu':4, 'fri':5, 'sat':0 },
{ }
)
LOWMAP = (
{},
{},
{0: 1},
{0: 1},
{7: 0},
{},
)
bad_length = 'Exactly 5 or 6 columns has to be specified for iterator' \
'expression.'
def __init__(self, expr_format, start_time=time()):
if isinstance(start_time, datetime):
start_time = mktime(start_time.timetuple())
self.cur = start_time
self.exprs = expr_format.split()
if len(self.exprs) != 5 and len(self.exprs) != 6:
raise ValueError(self.bad_length)
expanded = []
for i, expr in enumerate(self.exprs):
e_list = expr.split(',')
res = []
while len(e_list) > 0:
e = e_list.pop()
t = re.sub(r'^\*(/.+)$', r'%d-%d\1' % (self.RANGES[i][0],
self.RANGES[i][1]),
str(e))
m = search_re.search(t)
if m:
(low, high, step) = m.group(1), m.group(2), m.group(4) or 1
if not any_int_re.search(low):
low = self.ALPHACONV[i][low.lower()]
if not any_int_re.search(high):
high = self.ALPHACONV[i][high.lower()]
if (not low or not high or int(low) > int(high)
or not only_int_re.search(str(step))):
raise ValueError("[%s] is not acceptable" %expr_format)
for j in xrange(int(low), int(high)+1):
if j % int(step) == 0:
e_list.append(j)
else:
if not star_or_int_re.search(t):
t = self.ALPHACONV[i][t.lower()]
try:
t = int(t)
except:
pass
if t in self.LOWMAP[i]:
t = self.LOWMAP[i][t]
if t != '*' and (int(t) < self.RANGES[i][0] or
int(t) > self.RANGES[i][1]):
raise ValueError("[%s] is not acceptable, out of range" % expr_format)
res.append(t)
res.sort()
expanded.append(['*'] if (len(res) == 1 and res[0] == '*') else res)
self.expanded = expanded
def get_next(self, ret_type=float):
return self._get_next(ret_type, is_prev=False)
def get_prev(self, ret_type=float):
return self._get_next(ret_type, is_prev=True)
def _get_next(self, ret_type=float, is_prev=False):
expanded = self.expanded[:]
if ret_type not in (float, datetime):
raise TypeError("Invalid ret_type, only 'float' or 'datetime' " \
"is acceptable.")
if expanded[2][0] != '*' and expanded[4][0] != '*':
bak = expanded[4]
expanded[4] = ['*']
t1 = self._calc(self.cur, expanded, is_prev)
expanded[4] = bak
expanded[2] = ['*']
t2 = self._calc(self.cur, expanded, is_prev)
if not is_prev:
result = t1 if t1 < t2 else t2
else:
result = t1 if t1 > t2 else t2
else:
result = self._calc(self.cur, expanded, is_prev)
self.cur = result
if ret_type == datetime:
result = datetime.fromtimestamp(result)
return result
def _calc(self, now, expanded, is_prev):
if is_prev:
nearest_diff_method = self._get_prev_nearest_diff
sign = -1
else:
nearest_diff_method = self._get_next_nearest_diff
sign = 1
offset = len(expanded) == 6 and 1 or 60
dst = now = datetime.fromtimestamp(now + sign * offset)
day, month, year = dst.day, dst.month, dst.year
current_year = now.year
DAYS = self.DAYS
def proc_month(d):
if expanded[3][0] != '*':
diff_month = nearest_diff_method(d.month, expanded[3], 12)
days = DAYS[month - 1]
if month == 2 and self.is_leap(year) == True:
days += 1
reset_day = days if is_prev else 1
if diff_month != None and diff_month != 0:
if is_prev:
d += relativedelta(months=diff_month)
else:
d += relativedelta(months=diff_month, day=reset_day,
hour=0, minute=0, second=0)
return True, d
return False, d
def proc_day_of_month(d):
if expanded[2][0] != '*':
days = DAYS[month - 1]
if month == 2 and self.is_leap(year) == True:
days += 1
diff_day = nearest_diff_method(d.day, expanded[2], days)
if diff_day != None and diff_day != 0:
if is_prev:
d += relativedelta(days=diff_day)
else:
d += relativedelta(days=diff_day, hour=0, minute=0, second=0)
return True, d
return False, d
def proc_day_of_week(d):
if expanded[4][0] != '*':
diff_day_of_week = nearest_diff_method(d.isoweekday() % 7, expanded[4], 7)
if diff_day_of_week != None and diff_day_of_week != 0:
if is_prev:
d += relativedelta(days=diff_day_of_week)
else:
d += relativedelta(days=diff_day_of_week, hour=0, minute=0, second=0)
return True, d
return False, d
def proc_hour(d):
if expanded[1][0] != '*':
diff_hour = nearest_diff_method(d.hour, expanded[1], 24)
if diff_hour != None and diff_hour != 0:
if is_prev:
d += relativedelta(hours = diff_hour)
else:
d += relativedelta(hours = diff_hour, minute=0, second=0)
return True, d
return False, d
def proc_minute(d):
if expanded[0][0] != '*':
diff_min = nearest_diff_method(d.minute, expanded[0], 60)
if diff_min != None and diff_min != 0:
if is_prev:
d += relativedelta(minutes = diff_min)
else:
d += relativedelta(minutes = diff_min, second=0)
return True, d
return False, d
def proc_second(d):
if len(expanded) == 6:
if expanded[5][0] != '*':
diff_sec = nearest_diff_method(d.second, expanded[5], 60)
if diff_sec != None and diff_sec != 0:
d += relativedelta(seconds = diff_sec)
return True, d
else:
d += relativedelta(second = 0)
return False, d
if is_prev:
procs = [proc_second,
proc_minute,
proc_hour,
proc_day_of_week,
proc_day_of_month,
proc_month]
else:
procs = [proc_month,
proc_day_of_month,
proc_day_of_week,
proc_hour,
proc_minute,
proc_second]
while abs(year - current_year) <= 1:
next = False
for proc in procs:
(changed, dst) = proc(dst)
if changed:
next = True
break
if next:
continue
return mktime(dst.timetuple())
raise "failed to find prev date"
def _get_next_nearest(self, x, to_check):
small = [item for item in to_check if item < x]
large = [item for item in to_check if item >= x]
large.extend(small)
return large[0]
def _get_prev_nearest(self, x, to_check):
small = [item for item in to_check if item <= x]
large = [item for item in to_check if item > x]
small.reverse()
large.reverse()
small.extend(large)
return small[0]
def _get_next_nearest_diff(self, x, to_check, range_val):
for i, d in enumerate(to_check):
if d >= x:
return d - x
return to_check[0] - x + range_val
def _get_prev_nearest_diff(self, x, to_check, range_val):
candidates = to_check[:]
candidates.reverse()
for d in candidates:
if d <= x:
return d - x
return (candidates[0]) - x - range_val
def is_leap(self, year):
if year % 400 == 0 or (year % 4 == 0 and year % 100 != 0):
return True
else:
return False
if __name__ == '__main__':
base = datetime(2010, 1, 25)
itr = croniter('0 0 1 * *', base)
n1 = itr.get_next(datetime)
print n1
| |
#!/usr/bin/python -tt
#
# Copyright (c) 2011 Intel, Inc.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; version 2 of the License
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc., 59
# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
import os
import shutil
import re
import tempfile
from mic import chroot, msger
from mic.utils import misc, fs_related, errors, runner, cmdln
from mic.conf import configmgr
from mic.plugin import pluginmgr
from mic.utils.partitionedfs import PartitionedMount
import mic.imager.raw as raw
from mic.pluginbase import ImagerPlugin
class RawPlugin(ImagerPlugin):
name = 'raw'
@classmethod
@cmdln.option("--compress-disk-image", dest="compress_image", type='choice',
choices=("gz", "bz2"), default=None,
help="Same with --compress-image")
@cmdln.option("--compress-image", dest="compress_image", type='choice',
choices=("gz", "bz2"), default = None,
help="Compress all raw images before package")
@cmdln.option("--generate-bmap", action="store_true", default = None,
help="also generate the block map file")
@cmdln.option("--fstab-entry", dest="fstab_entry", type='choice',
choices=("name", "uuid"), default="uuid",
help="Set fstab entry, 'name' means using device names, "
"'uuid' means using filesystem uuid")
def do_create(self, subcmd, opts, *args):
"""${cmd_name}: create raw image
Usage:
${name} ${cmd_name} <ksfile> [OPTS]
${cmd_option_list}
"""
if len(args) != 1:
raise errors.Usage("Extra arguments given")
creatoropts = configmgr.create
ksconf = args[0]
if creatoropts['runtime'] == "bootstrap":
configmgr._ksconf = ksconf
rt_util.bootstrap_mic()
recording_pkgs = []
if len(creatoropts['record_pkgs']) > 0:
recording_pkgs = creatoropts['record_pkgs']
if creatoropts['release'] is not None:
if 'name' not in recording_pkgs:
recording_pkgs.append('name')
if 'vcs' not in recording_pkgs:
recording_pkgs.append('vcs')
configmgr._ksconf = ksconf
# Called After setting the configmgr._ksconf as the creatoropts['name'] is reset there.
if creatoropts['release'] is not None:
creatoropts['outdir'] = "%s/%s/images/%s/" % (creatoropts['outdir'], creatoropts['release'], creatoropts['name'])
# try to find the pkgmgr
pkgmgr = None
backends = pluginmgr.get_plugins('backend')
if 'auto' == creatoropts['pkgmgr']:
for key in configmgr.prefer_backends:
if key in backends:
pkgmgr = backends[key]
break
else:
for key in backends.keys():
if key == creatoropts['pkgmgr']:
pkgmgr = backends[key]
break
if not pkgmgr:
raise errors.CreatorError("Can't find backend: %s, "
"available choices: %s" %
(creatoropts['pkgmgr'],
','.join(backends.keys())))
creator = raw.RawImageCreator(creatoropts, pkgmgr, opts.compress_image,
opts.generate_bmap, opts.fstab_entry)
if len(recording_pkgs) > 0:
creator._recording_pkgs = recording_pkgs
images = ["%s-%s.raw" % (creator.name, disk_name)
for disk_name in creator.get_disk_names()]
self.check_image_exists(creator.destdir,
creator.pack_to,
images,
creatoropts['release'])
try:
creator.check_depend_tools()
creator.mount(None, creatoropts["cachedir"])
creator.install()
creator.configure(creatoropts["repomd"])
creator.copy_kernel()
creator.unmount()
creator.generate_bmap()
creator.package(creatoropts["outdir"])
if creatoropts['release'] is not None:
creator.release_output(ksconf, creatoropts['outdir'], creatoropts['release'])
creator.print_outimage_info()
except errors.CreatorError:
raise
finally:
creator.cleanup()
msger.info("Finished.")
return 0
@classmethod
def do_chroot(cls, target, cmd=[]):
img = target
imgsize = misc.get_file_size(img) * 1024L * 1024L
partedcmd = fs_related.find_binary_path("parted")
disk = fs_related.SparseLoopbackDisk(img, imgsize)
imgmnt = misc.mkdtemp()
imgloop = PartitionedMount(imgmnt, skipformat = True)
imgloop.add_disk('/dev/sdb', disk)
img_fstype = "ext3"
msger.info("Partition Table:")
partnum = []
for line in runner.outs([partedcmd, "-s", img, "print"]).splitlines():
# no use strip to keep line output here
if "Number" in line:
msger.raw(line)
if line.strip() and line.strip()[0].isdigit():
partnum.append(line.strip()[0])
msger.raw(line)
rootpart = None
if len(partnum) > 1:
rootpart = msger.choice("please choose root partition", partnum)
# Check the partitions from raw disk.
# if choose root part, the mark it as mounted
if rootpart:
root_mounted = True
else:
root_mounted = False
partition_mounts = 0
for line in runner.outs([partedcmd,"-s",img,"unit","B","print"]).splitlines():
line = line.strip()
# Lines that start with number are the partitions,
# because parted can be translated we can't refer to any text lines.
if not line or not line[0].isdigit():
continue
# Some vars have extra , as list seperator.
line = line.replace(",","")
# Example of parted output lines that are handled:
# Number Start End Size Type File system Flags
# 1 512B 3400000511B 3400000000B primary
# 2 3400531968B 3656384511B 255852544B primary linux-swap(v1)
# 3 3656384512B 3720347647B 63963136B primary fat16 boot, lba
partition_info = re.split("\s+",line)
size = partition_info[3].split("B")[0]
if len(partition_info) < 6 or partition_info[5] in ["boot"]:
# No filesystem can be found from partition line. Assuming
# btrfs, because that is the only MeeGo fs that parted does
# not recognize properly.
# TODO: Can we make better assumption?
fstype = "btrfs"
elif partition_info[5] in ["ext2","ext3","ext4","btrfs"]:
fstype = partition_info[5]
elif partition_info[5] in ["fat16","fat32"]:
fstype = "vfat"
elif "swap" in partition_info[5]:
fstype = "swap"
else:
raise errors.CreatorError("Could not recognize partition fs type '%s'." % partition_info[5])
if rootpart and rootpart == line[0]:
mountpoint = '/'
elif not root_mounted and fstype in ["ext2","ext3","ext4","btrfs"]:
# TODO: Check that this is actually the valid root partition from /etc/fstab
mountpoint = "/"
root_mounted = True
elif fstype == "swap":
mountpoint = "swap"
else:
# TODO: Assing better mount points for the rest of the partitions.
partition_mounts += 1
mountpoint = "/media/partition_%d" % partition_mounts
if "boot" in partition_info:
boot = True
else:
boot = False
msger.verbose("Size: %s Bytes, fstype: %s, mountpoint: %s, boot: %s" % (size, fstype, mountpoint, boot))
# TODO: add_partition should take bytes as size parameter.
imgloop.add_partition((int)(size)/1024/1024, "/dev/sdb", mountpoint, fstype = fstype, boot = boot)
try:
imgloop.mount()
except errors.MountError:
imgloop.cleanup()
raise
try:
if len(cmd) != 0:
cmdline = ' '.join(cmd)
else:
cmdline = "/bin/bash"
envcmd = fs_related.find_binary_inchroot("env", imgmnt)
if envcmd:
cmdline = "%s HOME=/root %s" % (envcmd, cmdline)
chroot.chroot(imgmnt, None, cmdline)
except:
raise errors.CreatorError("Failed to chroot to %s." %img)
finally:
chroot.cleanup_after_chroot("img", imgloop, None, imgmnt)
@classmethod
def do_unpack(cls, srcimg):
srcimgsize = (misc.get_file_size(srcimg)) * 1024L * 1024L
srcmnt = misc.mkdtemp("srcmnt")
disk = fs_related.SparseLoopbackDisk(srcimg, srcimgsize)
srcloop = PartitionedMount(srcmnt, skipformat = True)
srcloop.add_disk('/dev/sdb', disk)
srcloop.add_partition(srcimgsize/1024/1024, "/dev/sdb", "/", "ext3", boot=False)
try:
srcloop.mount()
except errors.MountError:
srcloop.cleanup()
raise
image = os.path.join(tempfile.mkdtemp(dir = "/var/tmp", prefix = "tmp"), "target.img")
args = ['dd', "if=%s" % srcloop.partitions[0]['device'], "of=%s" % image]
msger.info("`dd` image ...")
rc = runner.show(args)
srcloop.cleanup()
shutil.rmtree(os.path.dirname(srcmnt), ignore_errors = True)
if rc != 0:
raise errors.CreatorError("Failed to dd")
else:
return image
| |
#
# Class to manage the test directory and prototypes
"""
This class manages the test library directory and databases, primarily by maintaining a "prototype"
of the databases. Tests that require a bundle to operate on can be sped up by copyin the protoitype
rather than creating a new library and building the bundles in it.
For Sqlite libraries, the prototype is held in the /proto directory and copied to the /sqlite directory when
then lirbary is initalized
For postgres libraries, a prototype database is constructed by appending -proto to the end of the name of the
test database. The proto databse is created and populated, and then flagged for use as a template. When a test
library is created, it is constructed with the proto library as its template.
"""
import logging
import os
import unittest
from ambry.util import ensure_dir_exists, memoize, get_logger
from ambry.library import Library
logger = get_logger(__name__, level=logging.INFO, propagate=False)
DEFAULT_ROOT = '/tmp/ambry-test' # Default root for the library roots ( The library root is one level down )
class ProtoLibrary(object):
"""Manage test libraries. Creates a proto library, with pre-built bundles, that can be
copied quickly into a test library, providing bundles to test against"""
def __init__(self, config_path=None):
"""
:param config_path:
:return:
"""
from ambry.run import load_config, update_config, load_accounts
from ambry.util import parse_url_to_dict, unparse_url_dict
self._root = DEFAULT_ROOT
# TODO: Update root from config.
ensure_dir_exists(self._root)
if config_path is None:
import test.support
config_path = os.path.join(os.path.dirname(test.support.__file__), 'test-config')
self.config = load_config(config_path)
self.config.update(load_accounts())
update_config(self.config, use_environ=False)
assert self.config.loaded[0] == config_path + '/config.yaml'
# Populate library and proto DSNs
if os.environ.get('AMBRY_TEST_DB'):
library_dsn = os.environ['AMBRY_TEST_DB']
else:
# Derive from library.database setting.
dsn = self.config.library.database
if dsn.startswith('post'):
# postgres case.
p = parse_url_to_dict(dsn)
parsed_library = dict(p, path=p['path'] )
elif dsn.startswith('sqlite'):
# sqlite case
p = parse_url_to_dict(dsn)
parsed_library = dict(p, path=p['path'] )
library_dsn = unparse_url_dict(parsed_library)
if library_dsn.startswith('post'):
self._db_type = 'postgres'
p = parse_url_to_dict(library_dsn)
parsed_proto = dict(p, path=p['path'] + '-proto')
proto_dsn = unparse_url_dict(parsed_proto)
elif library_dsn.startswith('sqlite'):
self._db_type = 'sqlite'
p = parse_url_to_dict(library_dsn)
parsed_proto = dict(p, path=p['path'] )
proto_dsn = unparse_url_dict(parsed_proto)
else:
raise Exception('Do not know how to process {} database.'.format(library_dsn))
self.proto_dsn = proto_dsn
self.config.library.database = library_dsn
def __str__(self):
return """
root: {}
dsn: {}
proto-dsn: {}
""".format(self._root, self.config.library.database, self.proto_dsn)
def _ensure_exists(self, dir):
"""Ensure the full path to a directory exists. """
if not os.path.exists(dir):
os.makedirs(dir)
def proto_dir(self, *args):
"""Directory where the prototype library is built, and copied from each run """
base = os.path.join(self._root, 'proto')
self._ensure_exists(base)
return os.path.join(base, *args)
def sqlite_dir(self, create=True, *args):
base = os.path.join(self._root, 'sqlite')
if create:
self._ensure_exists(base)
return os.path.join(base, *args)
def pg_dir(self, *args):
base = os.path.join(self._root, 'pg')
self._ensure_exists(base)
return os.path.join(base, *args)
def _create_database(self, pg_dsn=None):
"""Create the database, if it does not exist"""
def import_bundle(self, l, cache_path):
"""Import a test bundle into a library"""
from test import bundle_tests
orig_source = os.path.join(os.path.dirname(bundle_tests.__file__), cache_path)
imported_bundles = l.import_bundles(orig_source, detach=True, force=True)
b = next(b for b in imported_bundles).cast_to_subclass()
b.clean()
b.sync_in(force=True)
return b
def clean_proto(self):
import shutil
shutil.rmtree(self.proto_dir())
def _proto_config(self):
config = self.config.clone()
self.proto_dir() # Make sure it exists
config.library.filesystem_root = self.proto_dir()
config.library.database = self.proto_dsn
return config
def remove(self, ref):
l = Library(self._proto_config())
l.remove(ref)
def build_proto(self):
"""Builds the prototype library, by building or injesting any bundles that don't
exist in it yet. """
from ambry.orm.exc import NotFoundError
l = Library(self._proto_config())
try:
b = l.bundle('ingest.example.com-headerstypes')
except NotFoundError:
b = self.import_bundle(l, 'ingest.example.com/headerstypes')
b.log('Build to: {}'.format(b.build_fs))
b.ingest()
b.close()
try:
b = l.bundle('ingest.example.com-stages')
except NotFoundError:
b = self.import_bundle(l, 'ingest.example.com/stages')
b.ingest()
b.close()
try:
b = l.bundle('ingest.example.com-basic')
except NotFoundError:
b = self.import_bundle(l, 'ingest.example.com/basic')
b.ingest()
b.close()
try:
b = l.bundle('build.example.com-coverage')
except NotFoundError:
b = self.import_bundle(l, 'build.example.com/coverage')
b.ingest()
b.source_schema()
b.schema()
b.build()
b.finalize()
b.close()
try:
b = l.bundle('build.example.com-generators')
except NotFoundError:
b = self.import_bundle(l, 'build.example.com/generators')
b.run()
b.finalize()
b.close()
try:
b = l.bundle('build.example.com-plot')
except NotFoundError:
b = self.import_bundle(l, 'build.example.com/plot')
b.build()
b.finalize()
b.close()
try:
b = l.bundle('build.example.com-casters')
except NotFoundError:
b = self.import_bundle(l, 'build.example.com/casters')
b.ingest()
b.source_schema()
b.schema()
b.build()
b.finalize()
b.close()
try:
b = l.bundle('build.example.com-sql')
except NotFoundError:
b = self.import_bundle(l, 'build.example.com/sql')
b.build(sources=['integers', 'integers2', 'integers3'])
def init_library(self, use_proto=True):
"""Initialize either the sqlite or pg library, based on the DSN """
if self._db_type == 'sqlite':
return self.init_sqlite(use_proto=use_proto)
else:
return self.init_pg(use_proto=use_proto)
def init_sqlite(self, use_proto=True):
import shutil
shutil.rmtree(self.sqlite_dir())
self.config.library.filesystem_root = self.sqlite_dir(create=False)
if use_proto:
self.build_proto()
shutil.copytree(self.proto_dir(), self.sqlite_dir(create=False))
return Library(self.config)
else:
self.sqlite_dir() # Ensure it exists
l = Library(self.config)
l.create()
return l
def init_pg(self, use_proto=True):
if use_proto:
# self.create_pg_template()
# self.build_proto()
self.create_pg(re_create=True)
else:
self.create_pg(re_create=True, template_name='template1')
l = Library(self.config)
l.create()
return l
@memoize
def pg_engine(self, dsn):
"""Return a Sqlalchemy engine for a database, by dsn. The result is cached. """
from sqlalchemy import create_engine
from sqlalchemy.pool import NullPool
return create_engine(dsn, poolclass=NullPool)
@property
@memoize
def pg_root_engine(self):
"""Return an engine connected to the postgres database, for executing operations on other databases"""
from ambry.util import set_url_part
from sqlalchemy import create_engine
from sqlalchemy.pool import NullPool
root_dsn = set_url_part(self.config.library.database, path='postgres')
return create_engine(root_dsn, poolclass=NullPool)
def dispose(self):
self.pg_engine(self.config.library.database).dispose()
self.pg_root_engine.dispose()
@classmethod
def postgres_db_exists(cls, db_name, conn):
""" Returns True if database with given name exists in the postgresql. """
from sqlalchemy.sql.expression import text
result = conn\
.execute(
text('SELECT 1 FROM pg_database WHERE datname=:db_name;'), db_name=db_name)\
.fetchall()
return result == [(1,)]
@classmethod
def postgres_extension_installed(cls, extension, conn):
""" Returns True if extension with given name exists in the postgresql. """
from sqlalchemy.sql.expression import text
result = conn\
.execute(
text('SELECT 1 FROM pg_extension WHERE extname=:extension;'), extension=extension)\
.fetchall()
return result == [(1,)]
def drop_pg(self, database_name):
with self.pg_root_engine.connect() as conn:
conn.execute('COMMIT') # we have to close opened transaction.
if self.postgres_db_exists(database_name, conn):
try:
conn.execute('DROP DATABASE "{}";'.format(database_name))
conn.execute('COMMIT;')
except Exception as e:
logger.warn("Failed to drop database '{}': {}".format(database_name, e))
conn.execute('ROLLBACK;')
raise
finally:
conn.close()
else:
logger.warn('Not dropping {}; does not exist'.format(database_name))
conn.close()
def create_pg_template(self, template_name=None):
"""Create the test template database"""
from ambry.util import select_from_url
if template_name is None:
flag_templ = True
template_name = select_from_url(self.proto_dsn, 'path').strip('/')
else:
flag_templ = False
# Create the database
with self.pg_root_engine.connect() as conn:
if self.postgres_db_exists(template_name, conn):
return
conn.execute('COMMIT;') # we have to close opened transaction.
query = 'CREATE DATABASE "{}" OWNER postgres TEMPLATE template1 encoding \'UTF8\';' \
.format(template_name)
conn.execute(query)
if flag_templ:
conn.execute("UPDATE pg_database SET datistemplate = TRUE WHERE datname = '{}';"
.format(template_name))
conn.execute('COMMIT;')
conn.close()
# Create the extensions, if they aren't already installed
with self.pg_engine(self.proto_dsn).connect() as conn:
conn.execute('CREATE EXTENSION IF NOT EXISTS pg_trgm;')
# Prevents error: operator class "gist_trgm_ops" does not exist for access method "gist"
conn.execute('alter extension pg_trgm set schema pg_catalog;')
conn.execute('CREATE EXTENSION IF NOT EXISTS multicorn;')
conn.execute('COMMIT;')
conn.close()
def create_pg(self, re_create=False, template_name=None):
from ambry.util import select_from_url
database_name = select_from_url(self.config.library.database, 'path').strip('/')
if template_name is None:
template_name = select_from_url(self.proto_dsn, 'path').strip('/')
load_extensions = False # They are already in template
else:
load_extensions = True
username = select_from_url(self.config.library.database, 'username')
if re_create:
self.drop_pg(database_name)
with self.pg_root_engine.connect() as conn:
conn.execute('COMMIT;') # we have to close opened transaction.
query = 'CREATE DATABASE "{}" OWNER "{}" TEMPLATE "{}" encoding \'UTF8\';' \
.format(database_name, username, template_name)
conn.execute(query)
conn.close()
# Create the extensions, if they aren't already installed
if load_extensions:
with self.pg_engine(self.config.library.database).connect() as conn:
conn.execute('CREATE EXTENSION IF NOT EXISTS pg_trgm;')
# Prevents error: operator class "gist_trgm_ops" does not exist for access method "gist"
conn.execute('alter extension pg_trgm set schema pg_catalog;')
conn.execute('CREATE EXTENSION IF NOT EXISTS multicorn;')
conn.execute('COMMIT;')
conn.close()
class TestBase(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls._proto = ProtoLibrary()
cls.config = cls._proto.config
cls._db_type = cls._proto._db_type
def setUp(self):
pass
def tearDown(self):
pass
def clean_proto(self):
self._proto.clean_proto()
def import_single_bundle(self, cache_path, clean=True):
from test import bundle_tests
import fs
from fs.opener import fsopendir
l = self.library(use_proto=False)
if clean:
l.clean()
l.create()
orig_source = os.path.join(os.path.dirname(bundle_tests.__file__), cache_path)
l.import_bundles(orig_source, detach=True, force=True)
b = next(b for b in l.bundles).cast_to_subclass()
b.clean()
b.sync_in(force=True)
if os.path.exists(os.path.join(orig_source, 'data')):
source = fsopendir(os.path.join(orig_source, 'data'))
b.source_fs.makedir('data',allow_recreate=True)
dest = b.source_fs.opendir('data')
for d, files in source.walk('/'):
if d.startswith('.'):
continue
dest.makedir(d,recursive=True, allow_recreate=True)
for f in files:
if f.startswith('.'):
continue
path = d+'/'+f
dest.setcontents(path, source.getcontents(path) )
return b
def library(self, use_proto=True):
"""Return a new proto library. """
return self._proto.init_library(use_proto=use_proto)
| |
"""
Various bayesian regression
"""
# Authors: V. Michel, F. Pedregosa, A. Gramfort
# License: BSD 3 clause
from math import log
import numpy as np
from scipy import linalg
from .base import LinearModel
from ..base import RegressorMixin
from ..utils.extmath import fast_logdet
###############################################################################
# BayesianRidge regression
class BayesianRidge(LinearModel, RegressorMixin):
"""Bayesian ridge regression
Fit a Bayesian ridge model and optimize the regularization parameters
lambda (precision of the weights) and alpha (precision of the noise).
Parameters
----------
X : array, shape = (n_samples, n_features)
Training vectors.
y : array, shape = (length)
Target values for training vectors
n_iter : int, optional
Maximum number of iterations. Default is 300.
tol : float, optional
Stop the algorithm if w has converged. Default is 1.e-3.
alpha_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the alpha parameter. Default is 1.e-6
alpha_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the alpha parameter.
Default is 1.e-6.
lambda_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the lambda parameter. Default is 1.e-6.
lambda_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the lambda parameter.
Default is 1.e-6
compute_score : boolean, optional
If True, compute the objective function at each step of the model.
Default is False
fit_intercept : boolean, optional
wether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
Default is True.
normalize : boolean, optional, default False
If True, the regressors X are normalized
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
verbose : boolean, optional, default False
Verbose mode when fitting the model.
Attributes
----------
`coef_` : array, shape = (n_features)
Coefficients of the regression model (mean of distribution)
`alpha_` : float
estimated precision of the noise.
`lambda_` : array, shape = (n_features)
estimated precisions of the weights.
`scores_` : float
if computed, value of the objective function (to be maximized)
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.BayesianRidge()
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
... # doctest: +NORMALIZE_WHITESPACE
BayesianRidge(alpha_1=1e-06, alpha_2=1e-06, compute_score=False,
copy_X=True, fit_intercept=True, lambda_1=1e-06, lambda_2=1e-06,
n_iter=300, normalize=False, tol=0.001, verbose=False)
>>> clf.predict([[1, 1]])
array([ 1.])
Notes
-----
See examples/linear_model/plot_bayesian_ridge.py for an example.
"""
def __init__(self, n_iter=300, tol=1.e-3, alpha_1=1.e-6, alpha_2=1.e-6,
lambda_1=1.e-6, lambda_2=1.e-6, compute_score=False,
fit_intercept=True, normalize=False,
copy_X=True, verbose=False):
self.n_iter = n_iter
self.tol = tol
self.alpha_1 = alpha_1
self.alpha_2 = alpha_2
self.lambda_1 = lambda_1
self.lambda_2 = lambda_2
self.compute_score = compute_score
self.fit_intercept = fit_intercept
self.normalize = normalize
self.copy_X = copy_X
self.verbose = verbose
def fit(self, X, y):
"""Fit the model
Parameters
----------
X : numpy array of shape [n_samples,n_features]
Training data
y : numpy array of shape [n_samples]
Target values
Returns
-------
self : returns an instance of self.
"""
X = np.asarray(X, dtype=np.float)
y = np.asarray(y, dtype=np.float)
X, y, X_mean, y_mean, X_std = self._center_data(X, y,
self.fit_intercept, self.normalize, self.copy_X)
n_samples, n_features = X.shape
### Initialization of the values of the parameters
alpha_ = 1. / np.var(y)
lambda_ = 1.
verbose = self.verbose
lambda_1 = self.lambda_1
lambda_2 = self.lambda_2
alpha_1 = self.alpha_1
alpha_2 = self.alpha_2
self.scores_ = list()
coef_old_ = None
XT_y = np.dot(X.T, y)
U, S, Vh = linalg.svd(X, full_matrices=False)
eigen_vals_ = S ** 2
### Convergence loop of the bayesian ridge regression
for iter_ in range(self.n_iter):
### Compute mu and sigma
# sigma_ = lambda_ / alpha_ * np.eye(n_features) + np.dot(X.T, X)
# coef_ = sigma_^-1 * XT * y
if n_samples > n_features:
coef_ = np.dot(Vh.T,
Vh / (eigen_vals_ + lambda_ / alpha_)[:, None])
coef_ = np.dot(coef_, XT_y)
if self.compute_score:
logdet_sigma_ = - np.sum(
np.log(lambda_ + alpha_ * eigen_vals_))
else:
coef_ = np.dot(X.T, np.dot(
U / (eigen_vals_ + lambda_ / alpha_)[None, :], U.T))
coef_ = np.dot(coef_, y)
if self.compute_score:
logdet_sigma_ = lambda_ * np.ones(n_features)
logdet_sigma_[:n_samples] += alpha_ * eigen_vals_
logdet_sigma_ = - np.sum(np.log(logdet_sigma_))
### Update alpha and lambda
rmse_ = np.sum((y - np.dot(X, coef_)) ** 2)
gamma_ = (np.sum((alpha_ * eigen_vals_)
/ (lambda_ + alpha_ * eigen_vals_)))
lambda_ = ((gamma_ + 2 * lambda_1)
/ (np.sum(coef_ ** 2) + 2 * lambda_2))
alpha_ = ((n_samples - gamma_ + 2 * alpha_1)
/ (rmse_ + 2 * alpha_2))
### Compute the objective function
if self.compute_score:
s = lambda_1 * log(lambda_) - lambda_2 * lambda_
s += alpha_1 * log(alpha_) - alpha_2 * alpha_
s += 0.5 * (n_features * log(lambda_)
+ n_samples * log(alpha_)
- alpha_ * rmse_
- (lambda_ * np.sum(coef_ ** 2))
- logdet_sigma_
- n_samples * log(2 * np.pi))
self.scores_.append(s)
### Check for convergence
if iter_ != 0 and np.sum(np.abs(coef_old_ - coef_)) < self.tol:
if verbose:
print "Convergence after ", str(iter_), " iterations"
break
coef_old_ = np.copy(coef_)
self.alpha_ = alpha_
self.lambda_ = lambda_
self.coef_ = coef_
self._set_intercept(X_mean, y_mean, X_std)
return self
###############################################################################
# ARD (Automatic Relevance Determination) regression
class ARDRegression(LinearModel, RegressorMixin):
"""Bayesian ARD regression.
Fit the weights of a regression model, using an ARD prior. The weights of
the regression model are assumed to be in Gaussian distributions.
Also estimate the parameters lambda (precisions of the distributions of the
weights) and alpha (precision of the distribution of the noise).
The estimation is done by an iterative procedures (Evidence Maximization)
Parameters
----------
X : array, shape = (n_samples, n_features)
Training vectors.
y : array, shape = (n_samples)
Target values for training vectors
n_iter : int, optional
Maximum number of iterations. Default is 300
tol : float, optional
Stop the algorithm if w has converged. Default is 1.e-3.
alpha_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the alpha parameter. Default is 1.e-6.
alpha_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the alpha parameter. Default is 1.e-6.
lambda_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the lambda parameter. Default is 1.e-6.
lambda_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the lambda parameter. Default is 1.e-6.
compute_score : boolean, optional
If True, compute the objective function at each step of the model.
Default is False.
threshold_lambda : float, optional
threshold for removing (pruning) weights with high precision from
the computation. Default is 1.e+4.
fit_intercept : boolean, optional
wether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
Default is True.
normalize : boolean, optional
If True, the regressors X are normalized
copy_X : boolean, optional, default True.
If True, X will be copied; else, it may be overwritten.
verbose : boolean, optional, default False
Verbose mode when fitting the model.
Attributes
----------
`coef_` : array, shape = (n_features)
Coefficients of the regression model (mean of distribution)
`alpha_` : float
estimated precision of the noise.
`lambda_` : array, shape = (n_features)
estimated precisions of the weights.
`sigma_` : array, shape = (n_features, n_features)
estimated variance-covariance matrix of the weights
`scores_` : float
if computed, value of the objective function (to be maximized)
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.ARDRegression()
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
... # doctest: +NORMALIZE_WHITESPACE
ARDRegression(alpha_1=1e-06, alpha_2=1e-06, compute_score=False,
copy_X=True, fit_intercept=True, lambda_1=1e-06, lambda_2=1e-06,
n_iter=300, normalize=False, threshold_lambda=10000.0, tol=0.001,
verbose=False)
>>> clf.predict([[1, 1]])
array([ 1.])
Notes
--------
See examples/linear_model/plot_ard.py for an example.
"""
def __init__(self, n_iter=300, tol=1.e-3, alpha_1=1.e-6, alpha_2=1.e-6,
lambda_1=1.e-6, lambda_2=1.e-6, compute_score=False,
threshold_lambda=1.e+4, fit_intercept=True,
normalize=False, copy_X=True, verbose=False):
self.n_iter = n_iter
self.tol = tol
self.fit_intercept = fit_intercept
self.normalize = normalize
self.alpha_1 = alpha_1
self.alpha_2 = alpha_2
self.lambda_1 = lambda_1
self.lambda_2 = lambda_2
self.compute_score = compute_score
self.threshold_lambda = threshold_lambda
self.copy_X = copy_X
self.verbose = verbose
def fit(self, X, y):
"""Fit the ARDRegression model according to the given training data
and parameters.
Iterative procedure to maximize the evidence
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array, shape = [n_samples]
Target values (integers)
Returns
-------
self : returns an instance of self.
"""
X = np.asarray(X, dtype=np.float)
y = np.asarray(y, dtype=np.float)
n_samples, n_features = X.shape
coef_ = np.zeros(n_features)
X, y, X_mean, y_mean, X_std = self._center_data(X, y,
self.fit_intercept, self.normalize, self.copy_X)
### Launch the convergence loop
keep_lambda = np.ones(n_features, dtype=bool)
lambda_1 = self.lambda_1
lambda_2 = self.lambda_2
alpha_1 = self.alpha_1
alpha_2 = self.alpha_2
verbose = self.verbose
### Initialization of the values of the parameters
alpha_ = 1. / np.var(y)
lambda_ = np.ones(n_features)
self.scores_ = list()
coef_old_ = None
### Iterative procedure of ARDRegression
for iter_ in range(self.n_iter):
### Compute mu and sigma (using Woodbury matrix identity)
sigma_ = linalg.pinv(np.eye(n_samples) / alpha_ +
np.dot(X[:, keep_lambda] *
np.reshape(1. / lambda_[keep_lambda], [1, -1]),
X[:, keep_lambda].T))
sigma_ = np.dot(sigma_, X[:, keep_lambda]
* np.reshape(1. / lambda_[keep_lambda], [1, -1]))
sigma_ = - np.dot(np.reshape(1. / lambda_[keep_lambda], [-1, 1])
* X[:, keep_lambda].T, sigma_)
sigma_.flat[::(sigma_.shape[1] + 1)] += \
1. / lambda_[keep_lambda]
coef_[keep_lambda] = alpha_ * np.dot(
sigma_, np.dot(X[:, keep_lambda].T, y))
### Update alpha and lambda
rmse_ = np.sum((y - np.dot(X, coef_)) ** 2)
gamma_ = 1. - lambda_[keep_lambda] * np.diag(sigma_)
lambda_[keep_lambda] = (gamma_ + 2. * lambda_1) \
/ ((coef_[keep_lambda]) ** 2 + 2. * lambda_2)
alpha_ = (n_samples - gamma_.sum() + 2. * alpha_1) \
/ (rmse_ + 2. * alpha_2)
### Prune the weights with a precision over a threshold
keep_lambda = lambda_ < self.threshold_lambda
coef_[keep_lambda == False] = 0
### Compute the objective function
if self.compute_score:
s = (lambda_1 * np.log(lambda_) - lambda_2 * lambda_).sum()
s += alpha_1 * log(alpha_) - alpha_2 * alpha_
s += 0.5 * (fast_logdet(sigma_) + n_samples * log(alpha_)
+ np.sum(np.log(lambda_)))
s -= 0.5 * (alpha_ * rmse_ + (lambda_ * coef_ ** 2).sum())
self.scores_.append(s)
### Check for convergence
if iter_ > 0 and np.sum(np.abs(coef_old_ - coef_)) < self.tol:
if verbose:
print "Converged after %s iterations" % iter_
break
coef_old_ = np.copy(coef_)
self.coef_ = coef_
self.alpha_ = alpha_
self.sigma_ = sigma_
self._set_intercept(X_mean, y_mean, X_std)
return self
| |
#!/usr/bin/env python
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import collections
import logging
import os
import re
import subprocess
import sys
import time
extra_cq_trybots = [
{
"mastername": "tryserver.chromium.win",
"buildernames": ["win_optional_gpu_tests_rel"]
},
{
"mastername": "tryserver.chromium.mac",
"buildernames": ["mac_optional_gpu_tests_rel"]
},
{
"mastername": "tryserver.chromium.linux",
"buildernames": ["linux_optional_gpu_tests_rel"]
}
]
extra_fyi_trybots = [
{
"mastername": "tryserver.chromium.win",
"buildernames": ["win_clang_dbg"]
}
]
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
SRC_DIR = os.path.abspath(os.path.join(SCRIPT_DIR, os.pardir))
sys.path.insert(0, os.path.join(SRC_DIR, 'build'))
import find_depot_tools
find_depot_tools.add_depot_tools_to_path()
import roll_dep_svn
from gclient import GClientKeywords
from third_party import upload
# Avoid depot_tools/third_party/upload.py print verbose messages.
upload.verbosity = 0 # Errors only.
CHROMIUM_GIT_URL = 'https://chromium.googlesource.com/chromium/src.git'
CL_ISSUE_RE = re.compile('^Issue number: ([0-9]+) \((.*)\)$')
RIETVELD_URL_RE = re.compile('^https?://(.*)/(.*)')
ROLL_BRANCH_NAME = 'special_angle_roll_branch'
TRYJOB_STATUS_SLEEP_SECONDS = 30
# Use a shell for subcommands on Windows to get a PATH search.
IS_WIN = sys.platform.startswith('win')
ANGLE_PATH = os.path.join('third_party', 'angle')
CommitInfo = collections.namedtuple('CommitInfo', ['git_commit',
'git_repo_url'])
CLInfo = collections.namedtuple('CLInfo', ['issue', 'url', 'rietveld_server'])
def _PosixPath(path):
"""Convert a possibly-Windows path to a posix-style path."""
(_, path) = os.path.splitdrive(path)
return path.replace(os.sep, '/')
def _ParseGitCommitHash(description):
for line in description.splitlines():
if line.startswith('commit '):
return line.split()[1]
logging.error('Failed to parse git commit id from:\n%s\n', description)
sys.exit(-1)
return None
def _ParseDepsFile(filename):
with open(filename, 'rb') as f:
deps_content = f.read()
return _ParseDepsDict(deps_content)
def _ParseDepsDict(deps_content):
local_scope = {}
var = GClientKeywords.VarImpl({}, local_scope)
global_scope = {
'File': GClientKeywords.FileImpl,
'From': GClientKeywords.FromImpl,
'Var': var.Lookup,
'deps_os': {},
}
exec(deps_content, global_scope, local_scope)
return local_scope
def _GenerateCLDescriptionCommand(angle_current, angle_new, bugs, tbr):
def GetChangeString(current_hash, new_hash):
return '%s..%s' % (current_hash[0:7], new_hash[0:7]);
def GetChangeLogURL(git_repo_url, change_string):
return '%s/+log/%s' % (git_repo_url, change_string)
def GetBugString(bugs):
bug_str = 'BUG='
for bug in bugs:
bug_str += str(bug) + ','
return bug_str.rstrip(',')
if angle_current.git_commit != angle_new.git_commit:
change_str = GetChangeString(angle_current.git_commit,
angle_new.git_commit)
changelog_url = GetChangeLogURL(angle_current.git_repo_url,
change_str)
def GetExtraCQTrybotString():
s = ''
for t in extra_cq_trybots:
if s:
s += ';'
s += t['mastername'] + ':' + ','.join(t['buildernames'])
return s
def GetTBRString(tbr):
if not tbr:
return ''
return 'TBR=' + tbr
extra_trybot_args = []
if extra_cq_trybots:
extra_trybot_string = GetExtraCQTrybotString()
extra_trybot_args = ['-m', 'CQ_INCLUDE_TRYBOTS=' + extra_trybot_string]
return [
'-m', 'Roll ANGLE ' + change_str,
'-m', '%s' % changelog_url,
'-m', GetBugString(bugs),
'-m', GetTBRString(tbr),
'-m', 'TEST=bots',
] + extra_trybot_args
class AutoRoller(object):
def __init__(self, chromium_src):
self._chromium_src = chromium_src
def _RunCommand(self, command, working_dir=None, ignore_exit_code=False,
extra_env=None):
"""Runs a command and returns the stdout from that command.
If the command fails (exit code != 0), the function will exit the process.
"""
working_dir = working_dir or self._chromium_src
logging.debug('cmd: %s cwd: %s', ' '.join(command), working_dir)
env = os.environ.copy()
if extra_env:
logging.debug('extra env: %s', extra_env)
env.update(extra_env)
p = subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, shell=IS_WIN, env=env,
cwd=working_dir, universal_newlines=True)
output = p.stdout.read()
p.wait()
p.stdout.close()
p.stderr.close()
if not ignore_exit_code and p.returncode != 0:
logging.error('Command failed: %s\n%s', str(command), output)
sys.exit(p.returncode)
return output
def _GetCommitInfo(self, path_below_src, git_hash=None, git_repo_url=None):
working_dir = os.path.join(self._chromium_src, path_below_src)
self._RunCommand(['git', 'fetch', 'origin'], working_dir=working_dir)
revision_range = git_hash or 'origin'
ret = self._RunCommand(
['git', '--no-pager', 'log', revision_range, '--pretty=full', '-1'],
working_dir=working_dir)
return CommitInfo(_ParseGitCommitHash(ret), git_repo_url)
def _GetDepsCommitInfo(self, deps_dict, path_below_src):
entry = deps_dict['deps'][_PosixPath('src/%s' % path_below_src)]
at_index = entry.find('@')
git_repo_url = entry[:at_index]
git_hash = entry[at_index + 1:]
return self._GetCommitInfo(path_below_src, git_hash, git_repo_url)
def _GetCLInfo(self):
cl_output = self._RunCommand(['git', 'cl', 'issue'])
m = CL_ISSUE_RE.match(cl_output.strip())
if not m:
logging.error('Cannot find any CL info. Output was:\n%s', cl_output)
sys.exit(-1)
issue_number = int(m.group(1))
url = m.group(2)
# Parse the Rietveld host from the URL.
m = RIETVELD_URL_RE.match(url)
if not m:
logging.error('Cannot parse Rietveld host from URL: %s', url)
sys.exit(-1)
rietveld_server = m.group(1)
return CLInfo(issue_number, url, rietveld_server)
def _GetCurrentBranchName(self):
return self._RunCommand(
['git', 'rev-parse', '--abbrev-ref', 'HEAD']).splitlines()[0]
def _IsTreeClean(self):
lines = self._RunCommand(
['git', 'status', '--porcelain', '-uno']).splitlines()
if len(lines) == 0:
return True
logging.debug('Dirty/unversioned files:\n%s', '\n'.join(lines))
return False
def _GetBugList(self, path_below_src, angle_current, angle_new):
working_dir = os.path.join(self._chromium_src, path_below_src)
lines = self._RunCommand(
['git','log',
'%s..%s' % (angle_current.git_commit, angle_new.git_commit)],
working_dir=working_dir).split('\n')
bugs = set()
for line in lines:
line = line.strip()
bug_prefix = 'BUG='
if line.startswith(bug_prefix):
bugs_strings = line[len(bug_prefix):].split(',')
for bug_string in bugs_strings:
try:
bugs.add(int(bug_string))
except:
# skip this, it may be a project specific bug such as
# "angleproject:X" or an ill-formed BUG= message
pass
return bugs
def _UpdateReadmeFile(self, readme_path, new_revision):
readme = open(os.path.join(self._chromium_src, readme_path), 'r+')
txt = readme.read()
m = re.sub(re.compile('.*^Revision\: ([0-9]*).*', re.MULTILINE),
('Revision: %s' % new_revision), txt)
readme.seek(0)
readme.write(m)
readme.truncate()
def _TriggerExtraTrybots(self, trybots):
for trybot in trybots:
for builder in trybot['buildernames']:
self._RunCommand([
'git', 'cl', 'try',
'-m', trybot['mastername'],
'-b', builder])
def PrepareRoll(self, ignore_checks, tbr, should_commit):
# TODO(kjellander): use os.path.normcase, os.path.join etc for all paths for
# cross platform compatibility.
if not ignore_checks:
if self._GetCurrentBranchName() != 'master':
logging.error('Please checkout the master branch.')
return -1
if not self._IsTreeClean():
logging.error('Please make sure you don\'t have any modified files.')
return -1
# Always clean up any previous roll.
self.Abort()
logging.debug('Pulling latest changes')
if not ignore_checks:
self._RunCommand(['git', 'pull'])
self._RunCommand(['git', 'checkout', '-b', ROLL_BRANCH_NAME])
# Modify Chromium's DEPS file.
# Parse current hashes.
deps_filename = os.path.join(self._chromium_src, 'DEPS')
deps = _ParseDepsFile(deps_filename)
angle_current = self._GetDepsCommitInfo(deps, ANGLE_PATH)
# Find ToT revisions.
angle_latest = self._GetCommitInfo(ANGLE_PATH)
if IS_WIN:
# Make sure the roll script doesn't use windows line endings
self._RunCommand(['git', 'config', 'core.autocrlf', 'true'])
self._UpdateDep(deps_filename, ANGLE_PATH, angle_latest)
if self._IsTreeClean():
logging.debug('Tree is clean - no changes detected.')
self._DeleteRollBranch()
else:
bugs = self._GetBugList(ANGLE_PATH, angle_current, angle_latest)
description = _GenerateCLDescriptionCommand(
angle_current, angle_latest, bugs, tbr)
logging.debug('Committing changes locally.')
self._RunCommand(['git', 'add', '--update', '.'])
self._RunCommand(['git', 'commit'] + description)
logging.debug('Uploading changes...')
self._RunCommand(['git', 'cl', 'upload'],
extra_env={'EDITOR': 'true'})
# Kick off tryjobs.
base_try_cmd = ['git', 'cl', 'try']
self._RunCommand(base_try_cmd)
if extra_cq_trybots:
# Run additional tryjobs.
# TODO(kbr): this should not be necessary -- the
# CQ_INCLUDE_TRYBOTS directive above should handle it.
# http://crbug.com/585237
self._TriggerExtraTrybots(extra_cq_trybots)
if extra_fyi_trybots:
self._TriggerExtraTrybots(extra_fyi_trybots)
# Mark the CL to be committed if requested
if should_commit:
self._RunCommand(['git', 'cl', 'set-commit'])
cl_info = self._GetCLInfo()
print 'Issue: %d URL: %s' % (cl_info.issue, cl_info.url)
# Checkout master again.
self._RunCommand(['git', 'checkout', 'master'])
print 'Roll branch left as ' + ROLL_BRANCH_NAME
return 0
def _UpdateDep(self, deps_filename, dep_relative_to_src, commit_info):
dep_name = _PosixPath(os.path.join('src', dep_relative_to_src))
# roll_dep_svn.py relies on cwd being the Chromium checkout, so let's
# temporarily change the working directory and then change back.
cwd = os.getcwd()
os.chdir(os.path.dirname(deps_filename))
roll_dep_svn.update_deps(deps_filename, dep_relative_to_src, dep_name,
commit_info.git_commit, '')
os.chdir(cwd)
def _DeleteRollBranch(self):
self._RunCommand(['git', 'checkout', 'master'])
self._RunCommand(['git', 'branch', '-D', ROLL_BRANCH_NAME])
logging.debug('Deleted the local roll branch (%s)', ROLL_BRANCH_NAME)
def _GetBranches(self):
"""Returns a tuple of active,branches.
The 'active' is the name of the currently active branch and 'branches' is a
list of all branches.
"""
lines = self._RunCommand(['git', 'branch']).split('\n')
branches = []
active = ''
for l in lines:
if '*' in l:
# The assumption is that the first char will always be the '*'.
active = l[1:].strip()
branches.append(active)
else:
b = l.strip()
if b:
branches.append(b)
return (active, branches)
def Abort(self):
active_branch, branches = self._GetBranches()
if active_branch == ROLL_BRANCH_NAME:
active_branch = 'master'
if ROLL_BRANCH_NAME in branches:
print 'Aborting pending roll.'
self._RunCommand(['git', 'checkout', ROLL_BRANCH_NAME])
# Ignore an error here in case an issue wasn't created for some reason.
self._RunCommand(['git', 'cl', 'set_close'], ignore_exit_code=True)
self._RunCommand(['git', 'checkout', active_branch])
self._RunCommand(['git', 'branch', '-D', ROLL_BRANCH_NAME])
return 0
def main():
parser = argparse.ArgumentParser(
description='Auto-generates a CL containing an ANGLE roll.')
parser.add_argument('--abort',
help=('Aborts a previously prepared roll. '
'Closes any associated issues and deletes the roll branches'),
action='store_true')
parser.add_argument('--ignore-checks', action='store_true', default=False,
help=('Skips checks for being on the master branch, dirty workspaces and '
'the updating of the checkout. Will still delete and create local '
'Git branches.'))
parser.add_argument('--tbr', help='Add a TBR to the commit message.')
parser.add_argument('--commit', action='store_true', default=False,
help='Submit the roll to the CQ after uploading.')
parser.add_argument('-v', '--verbose', action='store_true', default=False,
help='Be extra verbose in printing of log messages.')
args = parser.parse_args()
if args.verbose:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.ERROR)
autoroller = AutoRoller(SRC_DIR)
if args.abort:
return autoroller.Abort()
else:
return autoroller.PrepareRoll(args.ignore_checks, args.tbr, args.commit)
if __name__ == '__main__':
sys.exit(main())
| |
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Provides an interface for working with multiple event files."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import threading
import six
from tensorflow.python.platform import logging
from tensorflow.python.summary import event_accumulator
from tensorflow.python.summary.impl import io_wrapper
class EventMultiplexer(object):
"""An `EventMultiplexer` manages access to multiple `EventAccumulator`s.
Each `EventAccumulator` is associated with a `run`, which is a self-contained
TensorFlow execution. The `EventMultiplexer` provides methods for extracting
information about events from multiple `run`s.
Example usage for loading specific runs from files:
```python
x = EventMultiplexer({'run1': 'path/to/run1', 'run2': 'path/to/run2'})
x.Reload()
```
Example usage for loading a directory where each subdirectory is a run
```python
(eg:) /parent/directory/path/
/parent/directory/path/run1/
/parent/directory/path/run1/events.out.tfevents.1001
/parent/directory/path/run1/events.out.tfevents.1002
/parent/directory/path/run2/
/parent/directory/path/run2/events.out.tfevents.9232
/parent/directory/path/run3/
/parent/directory/path/run3/events.out.tfevents.9232
x = EventMultiplexer().AddRunsFromDirectory('/parent/directory/path')
(which is equivalent to:)
x = EventMultiplexer({'run1': '/parent/directory/path/run1', 'run2':...}
```
If you would like to watch `/parent/directory/path`, wait for it to be created
(if necessary) and then periodically pick up new runs, use
`AutoloadingMultiplexer`
@@__init__
@@AddRun
@@AddRunsFromDirectory
@@Reload
@@Runs
@@Scalars
@@Graph
@@Histograms
@@CompressedHistograms
@@Images
"""
def __init__(self,
run_path_map=None,
size_guidance=event_accumulator.DEFAULT_SIZE_GUIDANCE,
purge_orphaned_data=True):
"""Constructor for the `EventMultiplexer`.
Args:
run_path_map: Dict `{run: path}` which specifies the
name of a run, and the path to find the associated events. If it is
None, then the EventMultiplexer initializes without any runs.
size_guidance: A dictionary mapping from `tagType` to the number of items
to store for each tag of that type. See
`event_ccumulator.EventAccumulator` for details.
purge_orphaned_data: Whether to discard any events that were "orphaned" by
a TensorFlow restart.
"""
self._accumulators_mutex = threading.Lock()
self._accumulators = {}
self._paths = {}
self._reload_called = False
self._size_guidance = size_guidance
self.purge_orphaned_data = purge_orphaned_data
if run_path_map is not None:
for (run, path) in six.iteritems(run_path_map):
self.AddRun(path, run)
def AddRun(self, path, name=None):
"""Add a run to the multiplexer.
If the name is not specified, it is the same as the path.
If a run by that name exists, and we are already watching the right path,
do nothing. If we are watching a different path, replace the event
accumulator.
If `Reload` has been called, it will `Reload` the newly created
accumulators. This maintains the invariant that once the Multiplexer was
activated, all of its accumulators are active.
Args:
path: Path to the event files (or event directory) for given run.
name: Name of the run to add. If not provided, is set to path.
Returns:
The `EventMultiplexer`.
"""
if name is None or name is '':
name = path
accumulator = None
with self._accumulators_mutex:
if name not in self._accumulators or self._paths[name] != path:
if name in self._paths and self._paths[name] != path:
# TODO(danmane) - Make it impossible to overwrite an old path with
# a new path (just give the new path a distinct name)
logging.warning('Conflict for name %s: old path %s, new path %s',
name, self._paths[name], path)
logging.info('Constructing EventAccumulator for %s', path)
accumulator = event_accumulator.EventAccumulator(
path,
size_guidance=self._size_guidance,
purge_orphaned_data=self.purge_orphaned_data)
self._accumulators[name] = accumulator
self._paths[name] = path
if accumulator:
if self._reload_called:
accumulator.Reload()
return self
def AddRunsFromDirectory(self, path, name=None):
"""Load runs from a directory; recursively walks subdirectories.
If path doesn't exist, no-op. This ensures that it is safe to call
`AddRunsFromDirectory` multiple times, even before the directory is made.
If path is a directory, load event files in the directory (if any exist) and
recursively call AddRunsFromDirectory on any subdirectories. This mean you
can call AddRunsFromDirectory at the root of a tree of event logs and
TensorBoard will load them all.
If the `EventMultiplexer` is already loaded this will cause
the newly created accumulators to `Reload()`.
Args:
path: A string path to a directory to load runs from.
name: Optionally, what name to apply to the runs. If name is provided
and the directory contains run subdirectories, the name of each subrun
is the concatenation of the parent name and the subdirectory name. If
name is provided and the directory contains event files, then a run
is added called "name" and with the events from the path.
Raises:
ValueError: If the path exists and isn't a directory.
Returns:
The `EventMultiplexer`.
"""
if io_wrapper.Exists(path) and not io_wrapper.IsDirectory(path):
raise ValueError('AddRunsFromDirectory: path exists and is not a '
'directory, %s' % path)
# ListRecursively just yields nothing if the path doesn't exist.
subdirs = [
subdir
for (subdir, files) in io_wrapper.ListRecursively(path)
if list(filter(event_accumulator.IsTensorFlowEventsFile, files))
]
for subdir in subdirs:
logging.info('Adding events from directory %s', subdir)
rpath = os.path.relpath(subdir, path)
subname = os.path.join(name, rpath) if name else rpath
self.AddRun(subdir, name=subname)
return self
def Reload(self):
"""Call `Reload` on every `EventAccumulator`."""
self._reload_called = True
with self._accumulators_mutex:
loaders = list(self._accumulators.values())
for l in loaders:
l.Reload()
return self
def Scalars(self, run, tag):
"""Retrieve the scalar events associated with a run and tag.
Args:
run: A string name of the run for which values are retrieved.
tag: A string name of the tag for which values are retrieved.
Raises:
KeyError: If the run is not found, or the tag is not available for
the given run.
RuntimeError: If the run's `EventAccumulator` has not been activated.
Returns:
An array of `event_accumulator.ScalarEvents`.
"""
accumulator = self._GetAccumulator(run)
return accumulator.Scalars(tag)
def Graph(self, run):
"""Retrieve the graph associated with the provided run.
Args:
run: A string name of a run to load the graph for.
Raises:
KeyError: If the run is not found.
ValueError: If the run does not have an associated graph.
RuntimeError: If the run's EventAccumulator has not been activated.
Returns:
The `graph_def` protobuf data structure.
"""
accumulator = self._GetAccumulator(run)
return accumulator.Graph()
def RunMetadata(self, run, tag):
"""Get the session.run() metadata associated with a TensorFlow run and tag.
Args:
run: A string name of a TensorFlow run.
tag: A string name of the tag associated with a particular session.run().
Raises:
KeyError: If the run is not found, or the tag is not available for the
given run.
RuntimeError: If the run's EventAccumulator has not been activated.
Returns:
The metadata in the form of `RunMetadata` protobuf data structure.
"""
accumulator = self._GetAccumulator(run)
return accumulator.RunMetadata(tag)
def Histograms(self, run, tag):
"""Retrieve the histogram events associated with a run and tag.
Args:
run: A string name of the run for which values are retrieved.
tag: A string name of the tag for which values are retrieved.
Raises:
KeyError: If the run is not found, or the tag is not available for
the given run.
RuntimeError: If the run's `EventAccumulator` has not been activated.
Returns:
An array of `event_accumulator.HistogramEvents`.
"""
accumulator = self._GetAccumulator(run)
return accumulator.Histograms(tag)
def CompressedHistograms(self, run, tag):
"""Retrieve the compressed histogram events associated with a run and tag.
Args:
run: A string name of the run for which values are retrieved.
tag: A string name of the tag for which values are retrieved.
Raises:
KeyError: If the run is not found, or the tag is not available for
the given run.
RuntimeError: If the run's EventAccumulator has not been activated.
Returns:
An array of `event_accumulator.CompressedHistogramEvents`.
"""
accumulator = self._GetAccumulator(run)
return accumulator.CompressedHistograms(tag)
def Images(self, run, tag):
"""Retrieve the image events associated with a run and tag.
Args:
run: A string name of the run for which values are retrieved.
tag: A string name of the tag for which values are retrieved.
Raises:
KeyError: If the run is not found, or the tag is not available for
the given run.
RuntimeError: If the run's `EventAccumulator` has not been activated.
Returns:
An array of `event_accumulator.ImageEvents`.
"""
accumulator = self._GetAccumulator(run)
return accumulator.Images(tag)
def Runs(self):
"""Return all the run names in the `EventMultiplexer`.
Returns:
```
{runName: { images: [tag1, tag2, tag3],
scalarValues: [tagA, tagB, tagC],
histograms: [tagX, tagY, tagZ],
compressedHistograms: [tagX, tagY, tagZ],
graph: true}}
```
"""
with self._accumulators_mutex:
# To avoid nested locks, we construct a copy of the run-accumulator map
items = list(six.iteritems(self._accumulators))
return {run_name: accumulator.Tags() for run_name, accumulator in items}
def _GetAccumulator(self, run):
with self._accumulators_mutex:
return self._accumulators[run]
| |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te
def check_throws(f):
try:
f()
except tvm.error.TVMError:
pass
else:
raise AssertionError("Should have raised an exception but didn't.")
def test_const_fold():
def check(f, *args):
x = f(*[tvm.tir.const(x, "int32") for x in args])
y = f(*args)
if not isinstance(x, (tvm.tir.IntImm,)) or x.value != int(y):
raise ValueError("check error: %s vs %s " % (x, y))
tmod = tvm.tir.truncmod
check(lambda x, y: x + y, 3, 4)
check(lambda x, y: x * y, 3, 12)
check(lambda x, y: x * y - 10, 3, 12)
check(lambda x, y: x - tmod(y, 10), 3, 12)
check(lambda x, y: x // y + 10, 100, 12)
check(lambda x, y: x & y + 10, 112, 128)
check(lambda x, y: x > y, 112, 128)
check(lambda x, y: x < y, 112, 128)
check(lambda x, y: x <= y, 112, 128)
check(lambda x, y: x >= y, 112, 128)
check(lambda x, y: (x | y) ^ 10, 112, 128)
def test_const_fold2():
x = te.var("x")
tmod = tvm.tir.truncmod
tdiv = tvm.tir.truncdiv
assert (x + 0).same_as(x)
assert (0 + x).same_as(x)
assert (x - 0).same_as(x)
assert tmod(x, 1).value == 0
assert (x * 1).same_as(x)
assert (1 * x).same_as(x)
assert isinstance(tdiv(1, x), tvm.tir.Div)
def test_const_fold3():
# Test that using ints with logic operations is forbidden
x = te.var("x")
for val in [0, 1]:
for func in [tvm.tir.all, tvm.tir.any]:
check_throws(lambda: func(tvm.tir.const(val, "uint1"), x))
check_throws(lambda: func(x, tvm.tir.const(val, "uint1")))
# Test const folding when both arguments are const
for tvm_func, py_func in [
(tvm.tir.all, lambda a, b: a and b),
(tvm.tir.any, lambda a, b: a or b),
]:
for v1 in [0, 1]:
for v2 in [0, 1]:
assert tvm.ir.structural_equal(
tvm_func(tvm.tir.const(v1, "uint1"), tvm.tir.const(v2, "uint1")),
tvm.tir.const(py_func(v1, v2), "uint1"),
)
x = te.var("x", "uint1")
true = tvm.tir.const(1, "uint1")
false = tvm.tir.const(0, "uint1")
assert tvm.tir.all(x, true).same_as(x)
assert tvm.tir.all(true, x).same_as(x)
assert tvm.tir.any(x, false).same_as(x)
assert tvm.tir.any(false, x).same_as(x)
assert tvm.tir.all(x, false).same_as(false)
assert tvm.tir.all(false, x).same_as(false)
assert tvm.tir.any(x, true).same_as(true)
assert tvm.tir.any(true, x).same_as(true)
def test_const_fold4():
x1 = tvm.tir.const(4, "int32")
x2 = x1 + 5
tdiv = tvm.tir.truncdiv
assert isinstance(x2, tvm.tir.IntImm) and x2.value == 9
x3 = tdiv(x2, 3)
assert isinstance(x3, tvm.tir.IntImm) and x3.value == 3
x4 = x3 + 0.55
assert isinstance(x4, tvm.tir.FloatImm) and abs(x4.value - 3.55) < 1e-6
x5 = te.ceil(x4)
assert isinstance(x5, tvm.tir.FloatImm) and x5.value == 4
x6 = x5.astype("int")
assert isinstance(x6, tvm.tir.IntImm) and x6.value == 4, "x6={}".format(x6)
y = (te.round((tvm.tir.const(6.5, "float32") - 1) / 1.5) + 2).astype("int")
assert isinstance(y, tvm.tir.IntImm) and y.value == 6
def test_binary_dtype_match():
def verify_general_dtype_support(f, is_conditional=False):
rules = [
[("bool", "int32"), "int32"],
[("int32", "float32"), "float32"],
[("int32", "int64"), "int64"],
[("uint32", "int8"), "uint32"],
[("uint32", "int32"), "uint32"],
]
for (lhs_dtype, rhs_dtype), out_dtype in rules:
lhs = te.var("lhs", dtype=lhs_dtype)
rhs = te.var("rhs", dtype=rhs_dtype)
out = f(lhs, rhs)
if not is_conditional:
assert out.dtype == out_dtype
else:
assert out.dtype == "bool"
if hasattr(out, "a"):
assert out.a.dtype == out_dtype
assert out.b.dtype == out_dtype
elif hasattr(out, "args"):
# CallOp
assert out.args[0].dtype == out_dtype
assert out.args[1].dtype == out_dtype
else:
raise ValueError("Unknown binary op format!")
def verify_callop_float_only(f):
for lhs_dtype in ["int32", "float32", "float64"]:
for rhs_dtype in ["int32", "float32", "float64"]:
lhs = te.var("lhs", dtype=lhs_dtype)
rhs = te.var("rhs", dtype=rhs_dtype)
if "float" not in lhs_dtype and "float" not in rhs_dtype:
check_throws(lambda: f(lhs, rhs))
elif "float" in lhs_dtype:
out = f(lhs, rhs)
# Upcasting for floating point types
dtypes = [lhs_dtype, rhs_dtype]
if "float64" in dtypes:
target_dtype = "float64"
elif "float32" in dtypes:
target_dtype = "float32"
else:
target_dtype = "int32"
assert out.dtype == target_dtype
# Final inputs are the right type
assert out.args[0].dtype == target_dtype
assert out.args[1].dtype == target_dtype
else:
out = f(lhs, rhs)
assert out.dtype == rhs_dtype
assert out.args[0].dtype == rhs_dtype
assert out.args[1].dtype == rhs_dtype
verify_general_dtype_support(lambda a, b: a + b)
verify_general_dtype_support(lambda a, b: a * b)
verify_general_dtype_support(lambda a, b: a >= b, is_conditional=True)
verify_general_dtype_support(lambda a, b: a <= b, is_conditional=True)
verify_callop_float_only(lambda a, b: te.power(a, b))
# verify bool & int32 constant folding
assert tvm.tir.const(1) == tvm.tir.const(True)
assert tvm.tir.const(2) != tvm.tir.const(True)
def test_if_then_else():
cases = [
[(te.var("cond", dtype="bool"), "bool", "int32"), "int32"],
[(True, "int32", "float32"), "float32"],
[(False, "int32", "int64"), "int64"],
[(te.var("cond", dtype="bool"), "uint32", "int32"), "uint32"],
[(te.var("cond", dtype="int32"), "uint32", "int32"), "uint32"],
]
for (cond, lhs_dtype, rhs_dtype), out_dtype in cases:
lhs = te.var("lhs", dtype=lhs_dtype)
rhs = te.var("rhs", dtype=rhs_dtype)
if cond is True or cond is False:
out = tvm.tir.if_then_else(cond, lhs, rhs)
out2 = tvm.tir.if_then_else(not cond, rhs, lhs)
out3 = tvm.tir.if_then_else(not cond, lhs, rhs)
assert tvm.ir.structural_equal(out, out2) == 1
if cond:
assert tvm.ir.structural_equal(out, lhs.astype(out_dtype)) == 1
assert tvm.ir.structural_equal(out3, rhs.astype(out_dtype)) == 1
else:
assert tvm.ir.structural_equal(out, rhs.astype(out_dtype)) == 1
assert tvm.ir.structural_equal(out3, lhs.astype(out_dtype)) == 1
elif cond.dtype == "bool":
out = tvm.tir.if_then_else(cond, lhs, rhs)
assert out.dtype == out_dtype
assert out.args[1].dtype == out_dtype
assert out.args[2].dtype == out_dtype
elif cond.dtype != "bool":
check_throws(lambda: tvm.tir.if_then_else(cond, lhs, rhs))
else:
raise ValueError("Unknown combinations")
if __name__ == "__main__":
test_const_fold()
test_const_fold2()
test_const_fold3()
test_const_fold4()
test_binary_dtype_match()
test_if_then_else()
| |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of Mobilenet V2.
Architecture: https://arxiv.org/abs/1801.04381
The base model gives 72.2% accuracy on ImageNet, with 300MMadds,
3.4 M parameters.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import functools
import tensorflow.compat.v1 as tf
import tf_slim as slim
from nets.mobilenet import conv_blocks as ops
from nets.mobilenet import mobilenet as lib
op = lib.op
expand_input = ops.expand_input_by_factor
# pyformat: disable
# Architecture: https://arxiv.org/abs/1801.04381
V2_DEF = dict(
defaults={
# Note: these parameters of batch norm affect the architecture
# that's why they are here and not in training_scope.
(slim.batch_norm,): {'center': True, 'scale': True},
(slim.conv2d, slim.fully_connected, slim.separable_conv2d): {
'normalizer_fn': slim.batch_norm, 'activation_fn': tf.nn.relu6
},
(ops.expanded_conv,): {
'expansion_size': expand_input(6),
'split_expansion': 1,
'normalizer_fn': slim.batch_norm,
'residual': True
},
(slim.conv2d, slim.separable_conv2d): {'padding': 'SAME'}
},
spec=[
op(slim.conv2d, stride=2, num_outputs=32, kernel_size=[3, 3]),
op(ops.expanded_conv,
expansion_size=expand_input(1, divisible_by=1),
num_outputs=16),
op(ops.expanded_conv, stride=2, num_outputs=24),
op(ops.expanded_conv, stride=1, num_outputs=24),
op(ops.expanded_conv, stride=2, num_outputs=32),
op(ops.expanded_conv, stride=1, num_outputs=32),
op(ops.expanded_conv, stride=1, num_outputs=32),
op(ops.expanded_conv, stride=2, num_outputs=64),
op(ops.expanded_conv, stride=1, num_outputs=64),
op(ops.expanded_conv, stride=1, num_outputs=64),
op(ops.expanded_conv, stride=1, num_outputs=64),
op(ops.expanded_conv, stride=1, num_outputs=96),
op(ops.expanded_conv, stride=1, num_outputs=96),
op(ops.expanded_conv, stride=1, num_outputs=96),
op(ops.expanded_conv, stride=2, num_outputs=160),
op(ops.expanded_conv, stride=1, num_outputs=160),
op(ops.expanded_conv, stride=1, num_outputs=160),
op(ops.expanded_conv, stride=1, num_outputs=320),
op(slim.conv2d, stride=1, kernel_size=[1, 1], num_outputs=1280)
],
)
# pyformat: enable
# Mobilenet v2 Definition with group normalization.
V2_DEF_GROUP_NORM = copy.deepcopy(V2_DEF)
V2_DEF_GROUP_NORM['defaults'] = {
(slim.conv2d, slim.fully_connected, slim.separable_conv2d): {
'normalizer_fn': slim.group_norm, # pylint: disable=C0330
'activation_fn': tf.nn.relu6, # pylint: disable=C0330
}, # pylint: disable=C0330
(ops.expanded_conv,): {
'expansion_size': ops.expand_input_by_factor(6),
'split_expansion': 1,
'normalizer_fn': slim.group_norm,
'residual': True
},
(slim.conv2d, slim.separable_conv2d): {
'padding': 'SAME'
}
}
@slim.add_arg_scope
def mobilenet(input_tensor,
num_classes=1001,
depth_multiplier=1.0,
scope='MobilenetV2',
conv_defs=None,
finegrain_classification_mode=False,
min_depth=None,
divisible_by=None,
activation_fn=None,
**kwargs):
"""Creates mobilenet V2 network.
Inference mode is created by default. To create training use training_scope
below.
with slim.arg_scope(mobilenet_v2.training_scope()):
logits, endpoints = mobilenet_v2.mobilenet(input_tensor)
Args:
input_tensor: The input tensor
num_classes: number of classes
depth_multiplier: The multiplier applied to scale number of
channels in each layer.
scope: Scope of the operator
conv_defs: Allows to override default conv def.
finegrain_classification_mode: When set to True, the model
will keep the last layer large even for small multipliers. Following
https://arxiv.org/abs/1801.04381
suggests that it improves performance for ImageNet-type of problems.
*Note* ignored if final_endpoint makes the builder exit earlier.
min_depth: If provided, will ensure that all layers will have that
many channels after application of depth multiplier.
divisible_by: If provided will ensure that all layers # channels
will be divisible by this number.
activation_fn: Activation function to use, defaults to tf.nn.relu6 if not
specified.
**kwargs: passed directly to mobilenet.mobilenet:
prediction_fn- what prediction function to use.
reuse-: whether to reuse variables (if reuse set to true, scope
must be given).
Returns:
logits/endpoints pair
Raises:
ValueError: On invalid arguments
"""
if conv_defs is None:
conv_defs = V2_DEF
if 'multiplier' in kwargs:
raise ValueError('mobilenetv2 doesn\'t support generic '
'multiplier parameter use "depth_multiplier" instead.')
if finegrain_classification_mode:
conv_defs = copy.deepcopy(conv_defs)
if depth_multiplier < 1:
conv_defs['spec'][-1].params['num_outputs'] /= depth_multiplier
if activation_fn:
conv_defs = copy.deepcopy(conv_defs)
defaults = conv_defs['defaults']
conv_defaults = (
defaults[(slim.conv2d, slim.fully_connected, slim.separable_conv2d)])
conv_defaults['activation_fn'] = activation_fn
depth_args = {}
# NB: do not set depth_args unless they are provided to avoid overriding
# whatever default depth_multiplier might have thanks to arg_scope.
if min_depth is not None:
depth_args['min_depth'] = min_depth
if divisible_by is not None:
depth_args['divisible_by'] = divisible_by
with slim.arg_scope((lib.depth_multiplier,), **depth_args):
return lib.mobilenet(
input_tensor,
num_classes=num_classes,
conv_defs=conv_defs,
scope=scope,
multiplier=depth_multiplier,
**kwargs)
mobilenet.default_image_size = 224
def wrapped_partial(func, *args, **kwargs):
partial_func = functools.partial(func, *args, **kwargs)
functools.update_wrapper(partial_func, func)
return partial_func
# Wrappers for mobilenet v2 with depth-multipliers. Be noticed that
# 'finegrain_classification_mode' is set to True, which means the embedding
# layer will not be shrinked when given a depth-multiplier < 1.0.
mobilenet_v2_140 = wrapped_partial(mobilenet, depth_multiplier=1.4)
mobilenet_v2_050 = wrapped_partial(mobilenet, depth_multiplier=0.50,
finegrain_classification_mode=True)
mobilenet_v2_035 = wrapped_partial(mobilenet, depth_multiplier=0.35,
finegrain_classification_mode=True)
@slim.add_arg_scope
def mobilenet_base(input_tensor, depth_multiplier=1.0, **kwargs):
"""Creates base of the mobilenet (no pooling and no logits) ."""
return mobilenet(input_tensor,
depth_multiplier=depth_multiplier,
base_only=True, **kwargs)
@slim.add_arg_scope
def mobilenet_base_group_norm(input_tensor, depth_multiplier=1.0, **kwargs):
"""Creates base of the mobilenet (no pooling and no logits) ."""
kwargs['conv_defs'] = V2_DEF_GROUP_NORM
kwargs['conv_defs']['defaults'].update({
(slim.group_norm,): {
'groups': kwargs.pop('groups', 8)
}
})
return mobilenet(
input_tensor, depth_multiplier=depth_multiplier, base_only=True, **kwargs)
def training_scope(**kwargs):
"""Defines MobilenetV2 training scope.
Usage:
with slim.arg_scope(mobilenet_v2.training_scope()):
logits, endpoints = mobilenet_v2.mobilenet(input_tensor)
Args:
**kwargs: Passed to mobilenet.training_scope. The following parameters
are supported:
weight_decay- The weight decay to use for regularizing the model.
stddev- Standard deviation for initialization, if negative uses xavier.
dropout_keep_prob- dropout keep probability
bn_decay- decay for the batch norm moving averages.
Returns:
An `arg_scope` to use for the mobilenet v2 model.
"""
return lib.training_scope(**kwargs)
__all__ = ['training_scope', 'mobilenet_base', 'mobilenet', 'V2_DEF']
| |
import pp
from pp import components as pc
@pp.autoname
def test_comb(
pad_size=(200, 200),
wire_width=1,
wire_gap=3,
comb_layer=0,
overlap_zigzag_layer=1,
comb_pad_layer=None,
comb_gnd_layer=None,
overlap_pad_layer=None,
):
""" Superconducting heater device from phidl.geometry
Args:
pad_size=(200, 200)
wire_width=1
wire_gap=3
comb_layer=0
overlap_zigzag_layer=1
comb_pad_layer=None
comb_gnd_layer=None
overlap_pad_layer=None
"""
CI = pp.Component()
if comb_pad_layer is None:
comb_pad_layer = comb_layer
if comb_gnd_layer is None:
comb_gnd_layer = comb_layer
if overlap_pad_layer is None:
overlap_pad_layer = overlap_zigzag_layer
wire_spacing = wire_width + wire_gap * 2
# %% pad overlays
overlay_padb = CI.add_ref(
pc.rectangle(
size=(pad_size[0] * 9 / 10, pad_size[1] * 9 / 10), layer=overlap_pad_layer
)
)
overlay_padl = CI.add_ref(
pc.rectangle(
size=(pad_size[0] * 9 / 10, pad_size[1] * 9 / 10), layer=comb_pad_layer
)
)
overlay_padt = CI.add_ref(
pc.rectangle(
size=(pad_size[0] * 9 / 10, pad_size[1] * 9 / 10), layer=comb_pad_layer
)
)
overlay_padr = CI.add_ref(
pc.rectangle(
size=(pad_size[0] * 9 / 10, pad_size[1] * 9 / 10), layer=comb_gnd_layer
)
)
overlay_padl.xmin = 0
overlay_padl.ymin = 0
overlay_padb.ymax = 0
overlay_padb.xmin = overlay_padl.xmax + pad_size[1] / 5
overlay_padr.ymin = overlay_padl.ymin
overlay_padr.xmin = overlay_padb.xmax + pad_size[1] / 5
overlay_padt.xmin = overlay_padl.xmax + pad_size[1] / 5
overlay_padt.ymin = overlay_padl.ymax
# %% pads
padl = CI.add_ref(pc.rectangle(size=pad_size, layer=comb_layer))
padt = CI.add_ref(pc.rectangle(size=pad_size, layer=comb_layer))
padr = CI.add_ref(pc.rectangle(size=pad_size, layer=comb_layer))
padb = CI.add_ref(pc.rectangle(size=pad_size, layer=overlap_zigzag_layer))
padl_nub = CI.add_ref(
pc.rectangle(size=(pad_size[0] / 4, pad_size[1] / 2), layer=comb_layer)
)
padr_nub = CI.add_ref(
pc.rectangle(size=(pad_size[0] / 4, pad_size[1] / 2), layer=comb_layer)
)
padl.xmin = overlay_padl.xmin
padl.center = [padl.center[0], overlay_padl.center[1]]
padt.ymax = overlay_padt.ymax
padt.center = [overlay_padt.center[0], padt.center[1]]
padr.xmax = overlay_padr.xmax
padr.center = [padr.center[0], overlay_padr.center[1]]
padb.ymin = overlay_padb.ymin
padb.center = [overlay_padb.center[0], padb.center[1]]
padl_nub.xmin = padl.xmax
padl_nub.center = [padl_nub.center[0], padl.center[1]]
padr_nub.xmax = padr.xmin
padr_nub.center = [padr_nub.center[0], padr.center[1]]
# %% connected zig
head = CI.add_ref(pc.compass(size=(pad_size[0] / 12, wire_width), layer=comb_layer))
head.xmin = padl_nub.xmax
head.ymax = padl_nub.ymax
connector = CI.add_ref(pc.compass(size=(wire_width, wire_width), layer=comb_layer))
connector.connect(port="W", destination=head.ports["E"])
old_port = connector.ports["S"]
top = True
obj = connector
while obj.xmax + pad_size[0] / 12 < padr_nub.xmin:
# long zig zag rectangle
obj = CI.add_ref(
pc.compass(
size=(pad_size[1] / 2 - 2 * wire_width, wire_width), layer=comb_layer
)
)
obj.connect(port="W", destination=old_port)
old_port = obj.ports["E"]
if top:
# zig zag edge rectangle
obj = CI.add_ref(
pc.compass(size=(wire_width, wire_width), layer=comb_layer)
)
obj.connect(port="N", destination=old_port)
top = False
else:
# zig zag edge rectangle
obj = CI.add_ref(
pc.compass(size=(wire_width, wire_width), layer=comb_layer)
)
obj.connect(port="S", destination=old_port)
top = True
# comb rectange
comb = CI.add_ref(
pc.rectangle(
size=(
(padt.ymin - head.ymax)
+ pad_size[1] / 2
- (wire_spacing + wire_width) / 2,
wire_width,
),
layer=comb_layer,
)
)
comb.rotate(90)
comb.ymax = padt.ymin
comb.xmax = obj.xmax - (wire_spacing + wire_width) / 2
old_port = obj.ports["E"]
obj = CI.add_ref(pc.compass(size=(wire_spacing, wire_width), layer=comb_layer))
obj.connect(port="W", destination=old_port)
old_port = obj.ports["E"]
obj = CI.add_ref(pc.compass(size=(wire_width, wire_width), layer=comb_layer))
obj.connect(port="W", destination=old_port)
if top:
old_port = obj.ports["S"]
else:
old_port = obj.ports["N"]
old_port = obj.ports["E"]
if padr_nub.xmin - obj.xmax > 0:
tail = CI.add_ref(
pc.compass(size=(padr_nub.xmin - obj.xmax, wire_width), layer=comb_layer)
)
else:
tail = CI.add_ref(pc.compass(size=(wire_width, wire_width), layer=comb_layer))
tail.connect(port="W", destination=old_port)
# %% disconnected zig
dhead = CI.add_ref(
pc.compass(
size=(padr_nub.ymin - padb.ymax - wire_width, wire_width),
layer=overlap_zigzag_layer,
)
)
dhead.rotate(90)
dhead.ymin = padb.ymax
dhead.xmax = tail.xmin - (wire_spacing + wire_width) / 2
connector = CI.add_ref(
pc.compass(size=(wire_width, wire_width), layer=overlap_zigzag_layer)
)
connector.connect(port="S", destination=dhead.ports["E"])
old_port = connector.ports["N"]
right = True
obj = connector
while obj.ymax + wire_spacing + wire_width < head.ymax:
obj = CI.add_ref(
pc.compass(size=(wire_spacing, wire_width), layer=overlap_zigzag_layer)
)
obj.connect(port="W", destination=old_port)
old_port = obj.ports["E"]
if right:
obj = CI.add_ref(
pc.compass(size=(wire_width, wire_width), layer=overlap_zigzag_layer)
)
obj.connect(port="W", destination=old_port)
right = False
else:
obj = CI.add_ref(
pc.compass(size=(wire_width, wire_width), layer=overlap_zigzag_layer)
)
obj.connect(port="E", destination=old_port)
right = True
old_port = obj.ports["N"]
obj = CI.add_ref(
pc.compass(
size=(
dhead.xmin - (head.xmax + head.xmin + wire_width) / 2,
wire_width,
),
layer=overlap_zigzag_layer,
)
)
obj.connect(port="E", destination=old_port)
old_port = obj.ports["W"]
obj = CI.add_ref(
pc.compass(size=(wire_width, wire_width), layer=overlap_zigzag_layer)
)
obj.connect(port="S", destination=old_port)
if right:
old_port = obj.ports["W"]
else:
old_port = obj.ports["E"]
return CI
if __name__ == "__main__":
c = test_comb()
pp.show(c)
| |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""RNN helpers for TensorFlow models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import rnn_cell_impl
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.util import nest
# pylint: disable=protected-access
_state_size_with_prefix = rnn_cell_impl._state_size_with_prefix
# pylint: enable=protected-access
def _infer_state_dtype(explicit_dtype, state):
"""Infer the dtype of an RNN state.
Args:
explicit_dtype: explicitly declared dtype or None.
state: RNN's hidden state. Must be a Tensor or a nested iterable containing
Tensors.
Returns:
dtype: inferred dtype of hidden state.
Raises:
ValueError: if `state` has heterogeneous dtypes or is empty.
"""
if explicit_dtype is not None:
return explicit_dtype
elif nest.is_sequence(state):
inferred_dtypes = [element.dtype for element in nest.flatten(state)]
if not inferred_dtypes:
raise ValueError("Unable to infer dtype from empty state.")
all_same = all([x == inferred_dtypes[0] for x in inferred_dtypes])
if not all_same:
raise ValueError(
"State has tensors of different inferred_dtypes. Unable to infer a "
"single representative dtype.")
return inferred_dtypes[0]
else:
return state.dtype
def _on_device(fn, device):
"""Build the subgraph defined by lambda `fn` on `device` if it's not None."""
if device:
with ops.device(device):
return fn()
else:
return fn()
# pylint: disable=unused-argument
def _rnn_step(
time, sequence_length, min_sequence_length, max_sequence_length,
zero_output, state, call_cell, state_size, skip_conditionals=False):
"""Calculate one step of a dynamic RNN minibatch.
Returns an (output, state) pair conditioned on the sequence_lengths.
When skip_conditionals=False, the pseudocode is something like:
if t >= max_sequence_length:
return (zero_output, state)
if t < min_sequence_length:
return call_cell()
# Selectively output zeros or output, old state or new state depending
# on if we've finished calculating each row.
new_output, new_state = call_cell()
final_output = np.vstack([
zero_output if time >= sequence_lengths[r] else new_output_r
for r, new_output_r in enumerate(new_output)
])
final_state = np.vstack([
state[r] if time >= sequence_lengths[r] else new_state_r
for r, new_state_r in enumerate(new_state)
])
return (final_output, final_state)
Args:
time: Python int, the current time step
sequence_length: int32 `Tensor` vector of size [batch_size]
min_sequence_length: int32 `Tensor` scalar, min of sequence_length
max_sequence_length: int32 `Tensor` scalar, max of sequence_length
zero_output: `Tensor` vector of shape [output_size]
state: Either a single `Tensor` matrix of shape `[batch_size, state_size]`,
or a list/tuple of such tensors.
call_cell: lambda returning tuple of (new_output, new_state) where
new_output is a `Tensor` matrix of shape `[batch_size, output_size]`.
new_state is a `Tensor` matrix of shape `[batch_size, state_size]`.
state_size: The `cell.state_size` associated with the state.
skip_conditionals: Python bool, whether to skip using the conditional
calculations. This is useful for `dynamic_rnn`, where the input tensor
matches `max_sequence_length`, and using conditionals just slows
everything down.
Returns:
A tuple of (`final_output`, `final_state`) as given by the pseudocode above:
final_output is a `Tensor` matrix of shape [batch_size, output_size]
final_state is either a single `Tensor` matrix, or a tuple of such
matrices (matching length and shapes of input `state`).
Raises:
ValueError: If the cell returns a state tuple whose length does not match
that returned by `state_size`.
"""
# Convert state to a list for ease of use
flat_state = nest.flatten(state)
flat_zero_output = nest.flatten(zero_output)
def _copy_one_through(output, new_output):
copy_cond = (time >= sequence_length)
return _on_device(
lambda: array_ops.where(copy_cond, output, new_output),
device=new_output.op.device)
def _copy_some_through(flat_new_output, flat_new_state):
# Use broadcasting select to determine which values should get
# the previous state & zero output, and which values should get
# a calculated state & output.
flat_new_output = [
_copy_one_through(zero_output, new_output)
for zero_output, new_output in zip(flat_zero_output, flat_new_output)]
flat_new_state = [
_copy_one_through(state, new_state)
for state, new_state in zip(flat_state, flat_new_state)]
return flat_new_output + flat_new_state
def _maybe_copy_some_through():
"""Run RNN step. Pass through either no or some past state."""
new_output, new_state = call_cell()
nest.assert_same_structure(state, new_state)
flat_new_state = nest.flatten(new_state)
flat_new_output = nest.flatten(new_output)
return control_flow_ops.cond(
# if t < min_seq_len: calculate and return everything
time < min_sequence_length, lambda: flat_new_output + flat_new_state,
# else copy some of it through
lambda: _copy_some_through(flat_new_output, flat_new_state))
# TODO(ebrevdo): skipping these conditionals may cause a slowdown,
# but benefits from removing cond() and its gradient. We should
# profile with and without this switch here.
if skip_conditionals:
# Instead of using conditionals, perform the selective copy at all time
# steps. This is faster when max_seq_len is equal to the number of unrolls
# (which is typical for dynamic_rnn).
new_output, new_state = call_cell()
nest.assert_same_structure(state, new_state)
new_state = nest.flatten(new_state)
new_output = nest.flatten(new_output)
final_output_and_state = _copy_some_through(new_output, new_state)
else:
empty_update = lambda: flat_zero_output + flat_state
final_output_and_state = control_flow_ops.cond(
# if t >= max_seq_len: copy all state through, output zeros
time >= max_sequence_length, empty_update,
# otherwise calculation is required: copy some or all of it through
_maybe_copy_some_through)
if len(final_output_and_state) != len(flat_zero_output) + len(flat_state):
raise ValueError("Internal error: state and output were not concatenated "
"correctly.")
final_output = final_output_and_state[:len(flat_zero_output)]
final_state = final_output_and_state[len(flat_zero_output):]
for output, flat_output in zip(final_output, flat_zero_output):
output.set_shape(flat_output.get_shape())
for substate, flat_substate in zip(final_state, flat_state):
substate.set_shape(flat_substate.get_shape())
final_output = nest.pack_sequence_as(
structure=zero_output, flat_sequence=final_output)
final_state = nest.pack_sequence_as(
structure=state, flat_sequence=final_state)
return final_output, final_state
def _reverse_seq(input_seq, lengths):
"""Reverse a list of Tensors up to specified lengths.
Args:
input_seq: Sequence of seq_len tensors of dimension (batch_size, n_features)
or nested tuples of tensors.
lengths: A `Tensor` of dimension batch_size, containing lengths for each
sequence in the batch. If "None" is specified, simply reverses
the list.
Returns:
time-reversed sequence
"""
if lengths is None:
return list(reversed(input_seq))
flat_input_seq = tuple(nest.flatten(input_) for input_ in input_seq)
flat_results = [[] for _ in range(len(input_seq))]
for sequence in zip(*flat_input_seq):
input_shape = tensor_shape.unknown_shape(
ndims=sequence[0].get_shape().ndims)
for input_ in sequence:
input_shape.merge_with(input_.get_shape())
input_.set_shape(input_shape)
# Join into (time, batch_size, depth)
s_joined = array_ops.stack(sequence)
# TODO(schuster, ebrevdo): Remove cast when reverse_sequence takes int32
if lengths is not None:
lengths = math_ops.to_int64(lengths)
# Reverse along dimension 0
s_reversed = array_ops.reverse_sequence(s_joined, lengths, 0, 1)
# Split again into list
result = array_ops.unstack(s_reversed)
for r, flat_result in zip(result, flat_results):
r.set_shape(input_shape)
flat_result.append(r)
results = [nest.pack_sequence_as(structure=input_, flat_sequence=flat_result)
for input_, flat_result in zip(input_seq, flat_results)]
return results
def bidirectional_dynamic_rnn(cell_fw, cell_bw, inputs, sequence_length=None,
initial_state_fw=None, initial_state_bw=None,
dtype=None, parallel_iterations=None,
swap_memory=False, time_major=False, scope=None):
"""Creates a dynamic version of bidirectional recurrent neural network.
Similar to the unidirectional case above (rnn) but takes input and builds
independent forward and backward RNNs. The input_size of forward and
backward cell must match. The initial state for both directions is zero by
default (but can be set optionally) and no intermediate states are ever
returned -- the network is fully unrolled for the given (passed in)
length(s) of the sequence(s) or completely unrolled if length(s) is not
given.
Args:
cell_fw: An instance of RNNCell, to be used for forward direction.
cell_bw: An instance of RNNCell, to be used for backward direction.
inputs: The RNN inputs.
If time_major == False (default), this must be a tensor of shape:
`[batch_size, max_time, input_size]`.
If time_major == True, this must be a tensor of shape:
`[max_time, batch_size, input_size]`.
[batch_size, input_size].
sequence_length: (optional) An int32/int64 vector, size `[batch_size]`,
containing the actual lengths for each of the sequences in the batch.
If not provided, all batch entries are assumed to be full sequences; and
time reversal is applied from time `0` to `max_time` for each sequence.
initial_state_fw: (optional) An initial state for the forward RNN.
This must be a tensor of appropriate type and shape
`[batch_size, cell_fw.state_size]`.
If `cell_fw.state_size` is a tuple, this should be a tuple of
tensors having shapes `[batch_size, s] for s in cell_fw.state_size`.
initial_state_bw: (optional) Same as for `initial_state_fw`, but using
the corresponding properties of `cell_bw`.
dtype: (optional) The data type for the initial states and expected output.
Required if initial_states are not provided or RNN states have a
heterogeneous dtype.
parallel_iterations: (Default: 32). The number of iterations to run in
parallel. Those operations which do not have any temporal dependency
and can be run in parallel, will be. This parameter trades off
time for space. Values >> 1 use more memory but take less time,
while smaller values use less memory but computations take longer.
swap_memory: Transparently swap the tensors produced in forward inference
but needed for back prop from GPU to CPU. This allows training RNNs
which would typically not fit on a single GPU, with very minimal (or no)
performance penalty.
time_major: The shape format of the `inputs` and `outputs` Tensors.
If true, these `Tensors` must be shaped `[max_time, batch_size, depth]`.
If false, these `Tensors` must be shaped `[batch_size, max_time, depth]`.
Using `time_major = True` is a bit more efficient because it avoids
transposes at the beginning and end of the RNN calculation. However,
most TensorFlow data is batch-major, so by default this function
accepts input and emits output in batch-major form.
dtype: (optional) The data type for the initial state. Required if
either of the initial states are not provided.
scope: VariableScope for the created subgraph; defaults to
"bidirectional_rnn"
Returns:
A tuple (outputs, output_states) where:
outputs: A tuple (output_fw, output_bw) containing the forward and
the backward rnn output `Tensor`.
If time_major == False (default),
output_fw will be a `Tensor` shaped:
`[batch_size, max_time, cell_fw.output_size]`
and output_bw will be a `Tensor` shaped:
`[batch_size, max_time, cell_bw.output_size]`.
If time_major == True,
output_fw will be a `Tensor` shaped:
`[max_time, batch_size, cell_fw.output_size]`
and output_bw will be a `Tensor` shaped:
`[max_time, batch_size, cell_bw.output_size]`.
It returns a tuple instead of a single concatenated `Tensor`, unlike
in the `bidirectional_rnn`. If the concatenated one is preferred,
the forward and backward outputs can be concatenated as
`tf.concat(outputs, 2)`.
output_states: A tuple (output_state_fw, output_state_bw) containing
the forward and the backward final states of bidirectional rnn.
Raises:
TypeError: If `cell_fw` or `cell_bw` is not an instance of `RNNCell`.
"""
# pylint: disable=protected-access
if not isinstance(cell_fw, rnn_cell_impl._RNNCell):
raise TypeError("cell_fw must be an instance of RNNCell")
if not isinstance(cell_bw, rnn_cell_impl._RNNCell):
raise TypeError("cell_bw must be an instance of RNNCell")
# pylint: enable=protected-access
with vs.variable_scope(scope or "bidirectional_rnn"):
# Forward direction
with vs.variable_scope("fw") as fw_scope:
output_fw, output_state_fw = dynamic_rnn(
cell=cell_fw, inputs=inputs, sequence_length=sequence_length,
initial_state=initial_state_fw, dtype=dtype,
parallel_iterations=parallel_iterations, swap_memory=swap_memory,
time_major=time_major, scope=fw_scope)
# Backward direction
if not time_major:
time_dim = 1
batch_dim = 0
else:
time_dim = 0
batch_dim = 1
def _reverse(input_, seq_lengths, seq_dim, batch_dim):
if seq_lengths is not None:
return array_ops.reverse_sequence(
input=input_, seq_lengths=seq_lengths,
seq_dim=seq_dim, batch_dim=batch_dim)
else:
return array_ops.reverse(input_, axis=[seq_dim])
with vs.variable_scope("bw") as bw_scope:
inputs_reverse = _reverse(
inputs, seq_lengths=sequence_length,
seq_dim=time_dim, batch_dim=batch_dim)
tmp, output_state_bw = dynamic_rnn(
cell=cell_bw, inputs=inputs_reverse, sequence_length=sequence_length,
initial_state=initial_state_bw, dtype=dtype,
parallel_iterations=parallel_iterations, swap_memory=swap_memory,
time_major=time_major, scope=bw_scope)
output_bw = _reverse(
tmp, seq_lengths=sequence_length,
seq_dim=time_dim, batch_dim=batch_dim)
outputs = (output_fw, output_bw)
output_states = (output_state_fw, output_state_bw)
return (outputs, output_states)
def dynamic_rnn(cell, inputs, sequence_length=None, initial_state=None,
dtype=None, parallel_iterations=None, swap_memory=False,
time_major=False, scope=None):
"""Creates a recurrent neural network specified by RNNCell `cell`.
This function is functionally identical to the function `rnn` above, but
performs fully dynamic unrolling of `inputs`.
Unlike `rnn`, the input `inputs` is not a Python list of `Tensors`, one for
each frame. Instead, `inputs` may be a single `Tensor` where
the maximum time is either the first or second dimension (see the parameter
`time_major`). Alternatively, it may be a (possibly nested) tuple of
Tensors, each of them having matching batch and time dimensions.
The corresponding output is either a single `Tensor` having the same number
of time steps and batch size, or a (possibly nested) tuple of such tensors,
matching the nested structure of `cell.output_size`.
The parameter `sequence_length` is optional and is used to copy-through state
and zero-out outputs when past a batch element's sequence length. So it's more
for correctness than performance, unlike in rnn().
Args:
cell: An instance of RNNCell.
inputs: The RNN inputs.
If `time_major == False` (default), this must be a `Tensor` of shape:
`[batch_size, max_time, ...]`, or a nested tuple of such
elements.
If `time_major == True`, this must be a `Tensor` of shape:
`[max_time, batch_size, ...]`, or a nested tuple of such
elements.
This may also be a (possibly nested) tuple of Tensors satisfying
this property. The first two dimensions must match across all the inputs,
but otherwise the ranks and other shape components may differ.
In this case, input to `cell` at each time-step will replicate the
structure of these tuples, except for the time dimension (from which the
time is taken).
The input to `cell` at each time step will be a `Tensor` or (possibly
nested) tuple of Tensors each with dimensions `[batch_size, ...]`.
sequence_length: (optional) An int32/int64 vector sized `[batch_size]`.
initial_state: (optional) An initial state for the RNN.
If `cell.state_size` is an integer, this must be
a `Tensor` of appropriate type and shape `[batch_size, cell.state_size]`.
If `cell.state_size` is a tuple, this should be a tuple of
tensors having shapes `[batch_size, s] for s in cell.state_size`.
dtype: (optional) The data type for the initial state and expected output.
Required if initial_state is not provided or RNN state has a heterogeneous
dtype.
parallel_iterations: (Default: 32). The number of iterations to run in
parallel. Those operations which do not have any temporal dependency
and can be run in parallel, will be. This parameter trades off
time for space. Values >> 1 use more memory but take less time,
while smaller values use less memory but computations take longer.
swap_memory: Transparently swap the tensors produced in forward inference
but needed for back prop from GPU to CPU. This allows training RNNs
which would typically not fit on a single GPU, with very minimal (or no)
performance penalty.
time_major: The shape format of the `inputs` and `outputs` Tensors.
If true, these `Tensors` must be shaped `[max_time, batch_size, depth]`.
If false, these `Tensors` must be shaped `[batch_size, max_time, depth]`.
Using `time_major = True` is a bit more efficient because it avoids
transposes at the beginning and end of the RNN calculation. However,
most TensorFlow data is batch-major, so by default this function
accepts input and emits output in batch-major form.
scope: VariableScope for the created subgraph; defaults to "rnn".
Returns:
A pair (outputs, state) where:
outputs: The RNN output `Tensor`.
If time_major == False (default), this will be a `Tensor` shaped:
`[batch_size, max_time, cell.output_size]`.
If time_major == True, this will be a `Tensor` shaped:
`[max_time, batch_size, cell.output_size]`.
Note, if `cell.output_size` is a (possibly nested) tuple of integers
or `TensorShape` objects, then `outputs` will be a tuple having the
same structure as `cell.output_size`, containing Tensors having shapes
corresponding to the shape data in `cell.output_size`.
state: The final state. If `cell.state_size` is an int, this
will be shaped `[batch_size, cell.state_size]`. If it is a
`TensorShape`, this will be shaped `[batch_size] + cell.state_size`.
If it is a (possibly nested) tuple of ints or `TensorShape`, this will
be a tuple having the corresponding shapes.
Raises:
TypeError: If `cell` is not an instance of RNNCell.
ValueError: If inputs is None or an empty list.
"""
# pylint: disable=protected-access
if not isinstance(cell, rnn_cell_impl._RNNCell):
raise TypeError("cell must be an instance of RNNCell")
# pylint: enable=protected-access
# By default, time_major==False and inputs are batch-major: shaped
# [batch, time, depth]
# For internal calculations, we transpose to [time, batch, depth]
flat_input = nest.flatten(inputs)
if not time_major:
# (B,T,D) => (T,B,D)
flat_input = tuple(array_ops.transpose(input_, [1, 0, 2])
for input_ in flat_input)
parallel_iterations = parallel_iterations or 32
if sequence_length is not None:
sequence_length = math_ops.to_int32(sequence_length)
if sequence_length.get_shape().ndims not in (None, 1):
raise ValueError(
"sequence_length must be a vector of length batch_size, "
"but saw shape: %s" % sequence_length.get_shape())
sequence_length = array_ops.identity( # Just to find it in the graph.
sequence_length, name="sequence_length")
# Create a new scope in which the caching device is either
# determined by the parent scope, or is set to place the cached
# Variable using the same placement as for the rest of the RNN.
with vs.variable_scope(scope or "rnn") as varscope:
if varscope.caching_device is None:
varscope.set_caching_device(lambda op: op.device)
input_shape = tuple(array_ops.shape(input_) for input_ in flat_input)
batch_size = input_shape[0][1]
for input_ in input_shape:
if input_[1].get_shape() != batch_size.get_shape():
raise ValueError("All inputs should have the same batch size")
if initial_state is not None:
state = initial_state
else:
if not dtype:
raise ValueError("If there is no initial_state, you must give a dtype.")
state = cell.zero_state(batch_size, dtype)
def _assert_has_shape(x, shape):
x_shape = array_ops.shape(x)
packed_shape = array_ops.stack(shape)
return control_flow_ops.Assert(
math_ops.reduce_all(math_ops.equal(x_shape, packed_shape)),
["Expected shape for Tensor %s is " % x.name,
packed_shape, " but saw shape: ", x_shape])
if sequence_length is not None:
# Perform some shape validation
with ops.control_dependencies(
[_assert_has_shape(sequence_length, [batch_size])]):
sequence_length = array_ops.identity(
sequence_length, name="CheckSeqLen")
inputs = nest.pack_sequence_as(structure=inputs, flat_sequence=flat_input)
(outputs, final_state) = _dynamic_rnn_loop(
cell,
inputs,
state,
parallel_iterations=parallel_iterations,
swap_memory=swap_memory,
sequence_length=sequence_length,
dtype=dtype)
# Outputs of _dynamic_rnn_loop are always shaped [time, batch, depth].
# If we are performing batch-major calculations, transpose output back
# to shape [batch, time, depth]
if not time_major:
# (T,B,D) => (B,T,D)
flat_output = nest.flatten(outputs)
flat_output = [array_ops.transpose(output, [1, 0, 2])
for output in flat_output]
outputs = nest.pack_sequence_as(
structure=outputs, flat_sequence=flat_output)
return (outputs, final_state)
def _dynamic_rnn_loop(cell,
inputs,
initial_state,
parallel_iterations,
swap_memory,
sequence_length=None,
dtype=None):
"""Internal implementation of Dynamic RNN.
Args:
cell: An instance of RNNCell.
inputs: A `Tensor` of shape [time, batch_size, input_size], or a nested
tuple of such elements.
initial_state: A `Tensor` of shape `[batch_size, state_size]`, or if
`cell.state_size` is a tuple, then this should be a tuple of
tensors having shapes `[batch_size, s] for s in cell.state_size`.
parallel_iterations: Positive Python int.
swap_memory: A Python boolean
sequence_length: (optional) An `int32` `Tensor` of shape [batch_size].
dtype: (optional) Expected dtype of output. If not specified, inferred from
initial_state.
Returns:
Tuple `(final_outputs, final_state)`.
final_outputs:
A `Tensor` of shape `[time, batch_size, cell.output_size]`. If
`cell.output_size` is a (possibly nested) tuple of ints or `TensorShape`
objects, then this returns a (possibly nsted) tuple of Tensors matching
the corresponding shapes.
final_state:
A `Tensor`, or possibly nested tuple of Tensors, matching in length
and shapes to `initial_state`.
Raises:
ValueError: If the input depth cannot be inferred via shape inference
from the inputs.
"""
state = initial_state
assert isinstance(parallel_iterations, int), "parallel_iterations must be int"
state_size = cell.state_size
flat_input = nest.flatten(inputs)
flat_output_size = nest.flatten(cell.output_size)
# Construct an initial output
input_shape = array_ops.shape(flat_input[0])
time_steps = input_shape[0]
batch_size = input_shape[1]
inputs_got_shape = tuple(input_.get_shape().with_rank_at_least(3)
for input_ in flat_input)
const_time_steps, const_batch_size = inputs_got_shape[0].as_list()[:2]
for shape in inputs_got_shape:
if not shape[2:].is_fully_defined():
raise ValueError(
"Input size (depth of inputs) must be accessible via shape inference,"
" but saw value None.")
got_time_steps = shape[0].value
got_batch_size = shape[1].value
if const_time_steps != got_time_steps:
raise ValueError(
"Time steps is not the same for all the elements in the input in a "
"batch.")
if const_batch_size != got_batch_size:
raise ValueError(
"Batch_size is not the same for all the elements in the input.")
# Prepare dynamic conditional copying of state & output
def _create_zero_arrays(size):
size = _state_size_with_prefix(size, prefix=[batch_size])
return array_ops.zeros(
array_ops.stack(size), _infer_state_dtype(dtype, state))
flat_zero_output = tuple(_create_zero_arrays(output)
for output in flat_output_size)
zero_output = nest.pack_sequence_as(structure=cell.output_size,
flat_sequence=flat_zero_output)
if sequence_length is not None:
min_sequence_length = math_ops.reduce_min(sequence_length)
max_sequence_length = math_ops.reduce_max(sequence_length)
time = array_ops.constant(0, dtype=dtypes.int32, name="time")
with ops.name_scope("dynamic_rnn") as scope:
base_name = scope
def _create_ta(name, dtype):
return tensor_array_ops.TensorArray(dtype=dtype,
size=time_steps,
tensor_array_name=base_name + name)
output_ta = tuple(_create_ta("output_%d" % i,
_infer_state_dtype(dtype, state))
for i in range(len(flat_output_size)))
input_ta = tuple(_create_ta("input_%d" % i, flat_input[0].dtype)
for i in range(len(flat_input)))
input_ta = tuple(ta.unstack(input_)
for ta, input_ in zip(input_ta, flat_input))
def _time_step(time, output_ta_t, state):
"""Take a time step of the dynamic RNN.
Args:
time: int32 scalar Tensor.
output_ta_t: List of `TensorArray`s that represent the output.
state: nested tuple of vector tensors that represent the state.
Returns:
The tuple (time + 1, output_ta_t with updated flow, new_state).
"""
input_t = tuple(ta.read(time) for ta in input_ta)
# Restore some shape information
for input_, shape in zip(input_t, inputs_got_shape):
input_.set_shape(shape[1:])
input_t = nest.pack_sequence_as(structure=inputs, flat_sequence=input_t)
call_cell = lambda: cell(input_t, state)
if sequence_length is not None:
(output, new_state) = _rnn_step(
time=time,
sequence_length=sequence_length,
min_sequence_length=min_sequence_length,
max_sequence_length=max_sequence_length,
zero_output=zero_output,
state=state,
call_cell=call_cell,
state_size=state_size,
skip_conditionals=True)
else:
(output, new_state) = call_cell()
# Pack state if using state tuples
output = nest.flatten(output)
output_ta_t = tuple(
ta.write(time, out) for ta, out in zip(output_ta_t, output))
return (time + 1, output_ta_t, new_state)
_, output_final_ta, final_state = control_flow_ops.while_loop(
cond=lambda time, *_: time < time_steps,
body=_time_step,
loop_vars=(time, output_ta, state),
parallel_iterations=parallel_iterations,
swap_memory=swap_memory)
# Unpack final output if not using output tuples.
final_outputs = tuple(ta.stack() for ta in output_final_ta)
# Restore some shape information
for output, output_size in zip(final_outputs, flat_output_size):
shape = _state_size_with_prefix(
output_size, prefix=[const_time_steps, const_batch_size])
output.set_shape(shape)
final_outputs = nest.pack_sequence_as(
structure=cell.output_size, flat_sequence=final_outputs)
return (final_outputs, final_state)
def raw_rnn(cell, loop_fn,
parallel_iterations=None, swap_memory=False, scope=None):
"""Creates an `RNN` specified by RNNCell `cell` and loop function `loop_fn`.
**NOTE: This method is still in testing, and the API may change.**
This function is a more primitive version of `dynamic_rnn` that provides
more direct access to the inputs each iteration. It also provides more
control over when to start and finish reading the sequence, and
what to emit for the output.
For example, it can be used to implement the dynamic decoder of a seq2seq
model.
Instead of working with `Tensor` objects, most operations work with
`TensorArray` objects directly.
The operation of `raw_rnn`, in pseudo-code, is basically the following:
```python
time = tf.constant(0, dtype=tf.int32)
(finished, next_input, initial_state, _, loop_state) = loop_fn(
time=time, cell_output=None, cell_state=None, loop_state=None)
emit_ta = TensorArray(dynamic_size=True, dtype=initial_state.dtype)
state = initial_state
while not all(finished):
(output, cell_state) = cell(next_input, state)
(next_finished, next_input, next_state, emit, loop_state) = loop_fn(
time=time + 1, cell_output=output, cell_state=cell_state,
loop_state=loop_state)
# Emit zeros and copy forward state for minibatch entries that are finished.
state = tf.where(finished, state, next_state)
emit = tf.where(finished, tf.zeros_like(emit), emit)
emit_ta = emit_ta.write(time, emit)
# If any new minibatch entries are marked as finished, mark these.
finished = tf.logical_or(finished, next_finished)
time += 1
return (emit_ta, state, loop_state)
```
with the additional properties that output and state may be (possibly nested)
tuples, as determined by `cell.output_size` and `cell.state_size`, and
as a result the final `state` and `emit_ta` may themselves be tuples.
A simple implementation of `dynamic_rnn` via `raw_rnn` looks like this:
```python
inputs = tf.placeholder(shape=(max_time, batch_size, input_depth),
dtype=tf.float32)
sequence_length = tf.placeholder(shape=(batch_size,), dtype=tf.int32)
inputs_ta = tf.TensorArray(dtype=tf.float32, size=max_time)
inputs_ta = inputs_ta.unstack(inputs)
cell = tf.contrib.rnn.LSTMCell(num_units)
def loop_fn(time, cell_output, cell_state, loop_state):
emit_output = cell_output # == None for time == 0
if cell_output is None: # time == 0
next_cell_state = cell.zero_state(batch_size, tf.float32)
else:
next_cell_state = cell_state
elements_finished = (time >= sequence_length)
finished = tf.reduce_all(elements_finished)
next_input = tf.cond(
finished,
lambda: tf.zeros([batch_size, input_depth], dtype=tf.float32),
lambda: inputs_ta.read(time))
next_loop_state = None
return (elements_finished, next_input, next_cell_state,
emit_output, next_loop_state)
outputs_ta, final_state, _ = raw_rnn(cell, loop_fn)
outputs = outputs_ta.stack()
```
Args:
cell: An instance of RNNCell.
loop_fn: A callable that takes inputs
`(time, cell_output, cell_state, loop_state)`
and returns the tuple
`(finished, next_input, next_cell_state, emit_output, next_loop_state)`.
Here `time` is an int32 scalar `Tensor`, `cell_output` is a
`Tensor` or (possibly nested) tuple of tensors as determined by
`cell.output_size`, and `cell_state` is a `Tensor`
or (possibly nested) tuple of tensors, as determined by the `loop_fn`
on its first call (and should match `cell.state_size`).
The outputs are: `finished`, a boolean `Tensor` of
shape `[batch_size]`, `next_input`: the next input to feed to `cell`,
`next_cell_state`: the next state to feed to `cell`,
and `emit_output`: the output to store for this iteration.
Note that `emit_output` should be a `Tensor` or (possibly nested)
tuple of tensors with shapes and structure matching `cell.output_size`
and `cell_output` above. The parameter `cell_state` and output
`next_cell_state` may be either a single or (possibly nested) tuple
of tensors. The parameter `loop_state` and
output `next_loop_state` may be either a single or (possibly nested) tuple
of `Tensor` and `TensorArray` objects. This last parameter
may be ignored by `loop_fn` and the return value may be `None`. If it
is not `None`, then the `loop_state` will be propagated through the RNN
loop, for use purely by `loop_fn` to keep track of its own state.
The `next_loop_state` parameter returned may be `None`.
The first call to `loop_fn` will be `time = 0`, `cell_output = None`,
`cell_state = None`, and `loop_state = None`. For this call:
The `next_cell_state` value should be the value with which to initialize
the cell's state. It may be a final state from a previous RNN or it
may be the output of `cell.zero_state()`. It should be a
(possibly nested) tuple structure of tensors.
If `cell.state_size` is an integer, this must be
a `Tensor` of appropriate type and shape `[batch_size, cell.state_size]`.
If `cell.state_size` is a `TensorShape`, this must be a `Tensor` of
appropriate type and shape `[batch_size] + cell.state_size`.
If `cell.state_size` is a (possibly nested) tuple of ints or
`TensorShape`, this will be a tuple having the corresponding shapes.
The `emit_output` value may be either `None` or a (possibly nested)
tuple structure of tensors, e.g.,
`(tf.zeros(shape_0, dtype=dtype_0), tf.zeros(shape_1, dtype=dtype_1))`.
If this first `emit_output` return value is `None`,
then the `emit_ta` result of `raw_rnn` will have the same structure and
dtypes as `cell.output_size`. Otherwise `emit_ta` will have the same
structure, shapes (prepended with a `batch_size` dimension), and dtypes
as `emit_output`. The actual values returned for `emit_output` at this
initializing call are ignored. Note, this emit structure must be
consistent across all time steps.
parallel_iterations: (Default: 32). The number of iterations to run in
parallel. Those operations which do not have any temporal dependency
and can be run in parallel, will be. This parameter trades off
time for space. Values >> 1 use more memory but take less time,
while smaller values use less memory but computations take longer.
swap_memory: Transparently swap the tensors produced in forward inference
but needed for back prop from GPU to CPU. This allows training RNNs
which would typically not fit on a single GPU, with very minimal (or no)
performance penalty.
scope: VariableScope for the created subgraph; defaults to "rnn".
Returns:
A tuple `(emit_ta, final_state, final_loop_state)` where:
`emit_ta`: The RNN output `TensorArray`.
If `loop_fn` returns a (possibly nested) set of Tensors for
`emit_output` during initialization, (inputs `time = 0`,
`cell_output = None`, and `loop_state = None`), then `emit_ta` will
have the same structure, dtypes, and shapes as `emit_output` instead.
If `loop_fn` returns `emit_output = None` during this call,
the structure of `cell.output_size` is used:
If `cell.output_size` is a (possibly nested) tuple of integers
or `TensorShape` objects, then `emit_ta` will be a tuple having the
same structure as `cell.output_size`, containing TensorArrays whose
elements' shapes correspond to the shape data in `cell.output_size`.
`final_state`: The final cell state. If `cell.state_size` is an int, this
will be shaped `[batch_size, cell.state_size]`. If it is a
`TensorShape`, this will be shaped `[batch_size] + cell.state_size`.
If it is a (possibly nested) tuple of ints or `TensorShape`, this will
be a tuple having the corresponding shapes.
`final_loop_state`: The final loop state as returned by `loop_fn`.
Raises:
TypeError: If `cell` is not an instance of RNNCell, or `loop_fn` is not
a `callable`.
"""
# pylint: disable=protected-access
if not isinstance(cell, rnn_cell_impl._RNNCell):
raise TypeError("cell must be an instance of RNNCell")
# pylint: enable=protected-access
if not callable(loop_fn):
raise TypeError("loop_fn must be a callable")
parallel_iterations = parallel_iterations or 32
# Create a new scope in which the caching device is either
# determined by the parent scope, or is set to place the cached
# Variable using the same placement as for the rest of the RNN.
with vs.variable_scope(scope or "rnn") as varscope:
if varscope.caching_device is None:
varscope.set_caching_device(lambda op: op.device)
time = constant_op.constant(0, dtype=dtypes.int32)
(elements_finished, next_input, initial_state, emit_structure,
init_loop_state) = loop_fn(
time, None, None, None) # time, cell_output, cell_state, loop_state
flat_input = nest.flatten(next_input)
# Need a surrogate loop state for the while_loop if none is available.
loop_state = (init_loop_state if init_loop_state is not None
else constant_op.constant(0, dtype=dtypes.int32))
input_shape = [input_.get_shape() for input_ in flat_input]
static_batch_size = input_shape[0][0]
for input_shape_i in input_shape:
# Static verification that batch sizes all match
static_batch_size.merge_with(input_shape_i[0])
batch_size = static_batch_size.value
if batch_size is None:
batch_size = array_ops.shape(flat_input[0])[0]
nest.assert_same_structure(initial_state, cell.state_size)
state = initial_state
flat_state = nest.flatten(state)
flat_state = [ops.convert_to_tensor(s) for s in flat_state]
state = nest.pack_sequence_as(structure=state,
flat_sequence=flat_state)
if emit_structure is not None:
flat_emit_structure = nest.flatten(emit_structure)
flat_emit_size = [emit.get_shape() for emit in flat_emit_structure]
flat_emit_dtypes = [emit.dtype for emit in flat_emit_structure]
else:
emit_structure = cell.output_size
flat_emit_size = nest.flatten(emit_structure)
flat_emit_dtypes = [flat_state[0].dtype] * len(flat_emit_size)
flat_emit_ta = [
tensor_array_ops.TensorArray(
dtype=dtype_i, dynamic_size=True, size=0, name="rnn_output_%d" % i)
for i, dtype_i in enumerate(flat_emit_dtypes)]
emit_ta = nest.pack_sequence_as(structure=emit_structure,
flat_sequence=flat_emit_ta)
flat_zero_emit = [
array_ops.zeros(
_state_size_with_prefix(size_i, prefix=[batch_size]),
dtype_i)
for size_i, dtype_i in zip(flat_emit_size, flat_emit_dtypes)]
zero_emit = nest.pack_sequence_as(structure=emit_structure,
flat_sequence=flat_zero_emit)
def condition(unused_time, elements_finished, *_):
return math_ops.logical_not(math_ops.reduce_all(elements_finished))
def body(time, elements_finished, current_input,
emit_ta, state, loop_state):
"""Internal while loop body for raw_rnn.
Args:
time: time scalar.
elements_finished: batch-size vector.
current_input: possibly nested tuple of input tensors.
emit_ta: possibly nested tuple of output TensorArrays.
state: possibly nested tuple of state tensors.
loop_state: possibly nested tuple of loop state tensors.
Returns:
Tuple having the same size as Args but with updated values.
"""
(next_output, cell_state) = cell(current_input, state)
nest.assert_same_structure(state, cell_state)
nest.assert_same_structure(cell.output_size, next_output)
next_time = time + 1
(next_finished, next_input, next_state, emit_output,
next_loop_state) = loop_fn(
next_time, next_output, cell_state, loop_state)
nest.assert_same_structure(state, next_state)
nest.assert_same_structure(current_input, next_input)
nest.assert_same_structure(emit_ta, emit_output)
# If loop_fn returns None for next_loop_state, just reuse the
# previous one.
loop_state = loop_state if next_loop_state is None else next_loop_state
def _copy_some_through(current, candidate):
"""Copy some tensors through via array_ops.where."""
current_flat = nest.flatten(current)
candidate_flat = nest.flatten(candidate)
# pylint: disable=g-long-lambda,cell-var-from-loop
result_flat = [
_on_device(
lambda: array_ops.where(
elements_finished, current_i, candidate_i),
device=candidate_i.op.device)
for (current_i, candidate_i) in zip(current_flat, candidate_flat)]
# pylint: enable=g-long-lambda,cell-var-from-loop
return nest.pack_sequence_as(
structure=current, flat_sequence=result_flat)
emit_output = _copy_some_through(zero_emit, emit_output)
next_state = _copy_some_through(state, next_state)
emit_output_flat = nest.flatten(emit_output)
emit_ta_flat = nest.flatten(emit_ta)
elements_finished = math_ops.logical_or(elements_finished, next_finished)
emit_ta_flat = [
ta.write(time, emit)
for (ta, emit) in zip(emit_ta_flat, emit_output_flat)]
emit_ta = nest.pack_sequence_as(
structure=emit_structure, flat_sequence=emit_ta_flat)
return (next_time, elements_finished, next_input,
emit_ta, next_state, loop_state)
returned = control_flow_ops.while_loop(
condition, body, loop_vars=[
time, elements_finished, next_input,
emit_ta, state, loop_state],
parallel_iterations=parallel_iterations,
swap_memory=swap_memory)
(emit_ta, final_state, final_loop_state) = returned[-3:]
if init_loop_state is None:
final_loop_state = None
return (emit_ta, final_state, final_loop_state)
| |
# Copyright 2014 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Icehouse release
Revision ID: 001
Revises: None
Create Date: 2014-04-01 20:46:25.783444
"""
# revision identifiers, used by Alembic.
revision = '001'
down_revision = None
from alembic import op
import sqlalchemy as sa
from sahara.db.sqlalchemy import types as st
MYSQL_ENGINE = 'InnoDB'
MYSQL_CHARSET = 'utf8'
def upgrade():
op.create_table('jobs',
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('tenant_id', sa.String(length=36),
nullable=True),
sa.Column('name', sa.String(length=80), nullable=False),
sa.Column('description', sa.Text(), nullable=True),
sa.Column('type', sa.String(length=80), nullable=False),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name', 'tenant_id'),
mysql_engine=MYSQL_ENGINE,
mysql_charset=MYSQL_CHARSET)
op.create_table('node_group_templates',
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('name', sa.String(length=80), nullable=False),
sa.Column('description', sa.Text(), nullable=True),
sa.Column('tenant_id', sa.String(length=36),
nullable=True),
sa.Column('flavor_id', sa.String(length=36),
nullable=False),
sa.Column('image_id', sa.String(length=36), nullable=True),
sa.Column('plugin_name', sa.String(length=80),
nullable=False),
sa.Column('hadoop_version', sa.String(length=80),
nullable=False),
sa.Column('node_processes', st.JsonEncoded(),
nullable=True),
sa.Column('node_configs', st.JsonEncoded(), nullable=True),
sa.Column('volumes_per_node', sa.Integer(),
nullable=False),
sa.Column('volumes_size', sa.Integer(), nullable=True),
sa.Column('volume_mount_prefix', sa.String(length=80),
nullable=True),
sa.Column('floating_ip_pool', sa.String(length=36),
nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name', 'tenant_id'),
mysql_engine=MYSQL_ENGINE,
mysql_charset=MYSQL_CHARSET)
op.create_table('cluster_templates',
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('name', sa.String(length=80), nullable=False),
sa.Column('description', sa.Text(), nullable=True),
sa.Column('cluster_configs', st.JsonEncoded(),
nullable=True),
sa.Column('default_image_id', sa.String(length=36),
nullable=True),
sa.Column('anti_affinity', st.JsonEncoded(),
nullable=True),
sa.Column('tenant_id', sa.String(length=36),
nullable=True),
sa.Column('neutron_management_network',
sa.String(length=36), nullable=True),
sa.Column('plugin_name', sa.String(length=80),
nullable=False),
sa.Column('hadoop_version', sa.String(length=80),
nullable=False),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name', 'tenant_id'),
mysql_engine=MYSQL_ENGINE,
mysql_charset=MYSQL_CHARSET)
op.create_table('job_binary_internal',
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('tenant_id', sa.String(length=36),
nullable=True),
sa.Column('name', sa.String(length=80), nullable=False),
sa.Column('data', st.LargeBinary(), nullable=True),
sa.Column('datasize', sa.BIGINT(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name', 'tenant_id'),
mysql_engine=MYSQL_ENGINE,
mysql_charset=MYSQL_CHARSET)
op.create_table('job_binaries',
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('tenant_id', sa.String(length=36),
nullable=True),
sa.Column('name', sa.String(length=80), nullable=False),
sa.Column('description', sa.Text(), nullable=True),
sa.Column('url', sa.String(length=256), nullable=False),
sa.Column('extra', st.JsonEncoded(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name', 'tenant_id'),
mysql_engine=MYSQL_ENGINE,
mysql_charset=MYSQL_CHARSET)
op.create_table('data_sources',
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('tenant_id', sa.String(length=36),
nullable=True),
sa.Column('name', sa.String(length=80), nullable=False),
sa.Column('description', sa.Text(), nullable=True),
sa.Column('type', sa.String(length=80), nullable=False),
sa.Column('url', sa.String(length=256), nullable=False),
sa.Column('credentials', st.JsonEncoded(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name', 'tenant_id'),
mysql_engine=MYSQL_ENGINE,
mysql_charset=MYSQL_CHARSET)
op.create_table('libs_association',
sa.Column('Job_id', sa.String(length=36), nullable=True),
sa.Column('JobBinary_id', sa.String(length=36),
nullable=True),
sa.ForeignKeyConstraint(['JobBinary_id'],
['job_binaries.id'], ),
sa.ForeignKeyConstraint(['Job_id'], ['jobs.id'], ),
mysql_engine=MYSQL_ENGINE,
mysql_charset=MYSQL_CHARSET)
op.create_table('clusters',
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('name', sa.String(length=80), nullable=False),
sa.Column('description', sa.Text(), nullable=True),
sa.Column('tenant_id', sa.String(length=36),
nullable=True),
sa.Column('trust_id', sa.String(length=36), nullable=True),
sa.Column('is_transient', sa.Boolean(), nullable=True),
sa.Column('plugin_name', sa.String(length=80),
nullable=False),
sa.Column('hadoop_version', sa.String(length=80),
nullable=False),
sa.Column('cluster_configs', st.JsonEncoded(),
nullable=True),
sa.Column('default_image_id', sa.String(length=36),
nullable=True),
sa.Column('neutron_management_network',
sa.String(length=36), nullable=True),
sa.Column('anti_affinity', st.JsonEncoded(),
nullable=True),
sa.Column('management_private_key', sa.Text(),
nullable=False),
sa.Column('management_public_key', sa.Text(),
nullable=False),
sa.Column('user_keypair_id', sa.String(length=80),
nullable=True),
sa.Column('status', sa.String(length=80), nullable=True),
sa.Column('status_description', sa.String(length=200),
nullable=True),
sa.Column('info', st.JsonEncoded(), nullable=True),
sa.Column('extra', st.JsonEncoded(), nullable=True),
sa.Column('cluster_template_id', sa.String(length=36),
nullable=True),
sa.ForeignKeyConstraint(['cluster_template_id'],
['cluster_templates.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name', 'tenant_id'),
mysql_engine=MYSQL_ENGINE,
mysql_charset=MYSQL_CHARSET)
op.create_table('templates_relations',
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('tenant_id', sa.String(length=36),
nullable=True),
sa.Column('name', sa.String(length=80), nullable=False),
sa.Column('flavor_id', sa.String(length=36),
nullable=False),
sa.Column('image_id', sa.String(length=36), nullable=True),
sa.Column('node_processes', st.JsonEncoded(),
nullable=True),
sa.Column('node_configs', st.JsonEncoded(), nullable=True),
sa.Column('volumes_per_node', sa.Integer(), nullable=True),
sa.Column('volumes_size', sa.Integer(), nullable=True),
sa.Column('volume_mount_prefix', sa.String(length=80),
nullable=True),
sa.Column('count', sa.Integer(), nullable=False),
sa.Column('cluster_template_id', sa.String(length=36),
nullable=True),
sa.Column('node_group_template_id', sa.String(length=36),
nullable=True),
sa.Column('floating_ip_pool', sa.String(length=36),
nullable=True),
sa.ForeignKeyConstraint(['cluster_template_id'],
['cluster_templates.id'], ),
sa.ForeignKeyConstraint(['node_group_template_id'],
['node_group_templates.id'], ),
sa.PrimaryKeyConstraint('id'),
mysql_engine=MYSQL_ENGINE,
mysql_charset=MYSQL_CHARSET)
op.create_table('mains_association',
sa.Column('Job_id', sa.String(length=36), nullable=True),
sa.Column('JobBinary_id', sa.String(length=36),
nullable=True),
sa.ForeignKeyConstraint(['JobBinary_id'],
['job_binaries.id'], ),
sa.ForeignKeyConstraint(['Job_id'], ['jobs.id'], ),
mysql_engine=MYSQL_ENGINE,
mysql_charset=MYSQL_CHARSET)
op.create_table('job_executions',
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('tenant_id', sa.String(length=36),
nullable=True),
sa.Column('job_id', sa.String(length=36), nullable=True),
sa.Column('input_id', sa.String(length=36), nullable=True),
sa.Column('output_id', sa.String(length=36),
nullable=True),
sa.Column('start_time', sa.DateTime(), nullable=True),
sa.Column('end_time', sa.DateTime(), nullable=True),
sa.Column('cluster_id', sa.String(length=36),
nullable=True),
sa.Column('info', st.JsonEncoded(), nullable=True),
sa.Column('progress', sa.Float(), nullable=True),
sa.Column('oozie_job_id', sa.String(length=100),
nullable=True),
sa.Column('return_code', sa.String(length=80),
nullable=True),
sa.Column('job_configs', st.JsonEncoded(), nullable=True),
sa.Column('extra', st.JsonEncoded(), nullable=True),
sa.ForeignKeyConstraint(['cluster_id'], ['clusters.id'], ),
sa.ForeignKeyConstraint(['input_id'],
['data_sources.id'], ),
sa.ForeignKeyConstraint(['job_id'], ['jobs.id'], ),
sa.ForeignKeyConstraint(['output_id'],
['data_sources.id'], ),
sa.PrimaryKeyConstraint('id'),
mysql_engine=MYSQL_ENGINE,
mysql_charset=MYSQL_CHARSET)
op.create_table('node_groups',
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('name', sa.String(length=80), nullable=False),
sa.Column('tenant_id', sa.String(length=36),
nullable=True),
sa.Column('flavor_id', sa.String(length=36),
nullable=False),
sa.Column('image_id', sa.String(length=36), nullable=True),
sa.Column('image_username', sa.String(length=36),
nullable=True),
sa.Column('node_processes', st.JsonEncoded(),
nullable=True),
sa.Column('node_configs', st.JsonEncoded(), nullable=True),
sa.Column('volumes_per_node', sa.Integer(), nullable=True),
sa.Column('volumes_size', sa.Integer(), nullable=True),
sa.Column('volume_mount_prefix', sa.String(length=80),
nullable=True),
sa.Column('count', sa.Integer(), nullable=False),
sa.Column('cluster_id', sa.String(length=36),
nullable=True),
sa.Column('node_group_template_id', sa.String(length=36),
nullable=True),
sa.Column('floating_ip_pool', sa.String(length=36),
nullable=True),
sa.ForeignKeyConstraint(['cluster_id'], ['clusters.id'], ),
sa.ForeignKeyConstraint(['node_group_template_id'],
['node_group_templates.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name', 'cluster_id'),
mysql_engine=MYSQL_ENGINE,
mysql_charset=MYSQL_CHARSET)
op.create_table('instances',
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('tenant_id', sa.String(length=36),
nullable=True),
sa.Column('node_group_id', sa.String(length=36),
nullable=True),
sa.Column('instance_id', sa.String(length=36),
nullable=True),
sa.Column('instance_name', sa.String(length=80),
nullable=False),
sa.Column('internal_ip', sa.String(length=15),
nullable=True),
sa.Column('management_ip', sa.String(length=15),
nullable=True),
sa.Column('volumes', st.JsonEncoded(), nullable=True),
sa.ForeignKeyConstraint(['node_group_id'],
['node_groups.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('instance_id', 'node_group_id'),
mysql_engine=MYSQL_ENGINE,
mysql_charset=MYSQL_CHARSET)
| |
"""
=======================
Network simulation core
=======================
This module contains the main Simulation class.
A simulation object is responsible for creating network and process and to
execute the main loop, running the process on the network. It also handles I/O
and logging. Saving data after each run.
"""
__author__ = "Lukas Ahrenberg (lukas@ahrenberg.se)"
__license__ = "Modified BSD License"
__all__ = ['Simulation']
import numpy
import sys
import imp
import csv
import time
import networkx
import copy
import os
from collections import OrderedDict
import pickle
# Local imports
from nepidemix import process
from nepidemix.exceptions import NepidemiXBaseException
from nepidemix.utilities import NepidemiXConfigParser
from nepidemix.version import full_version
from nepidemix.utilities.dbio import sqlite3io
# Logging
import logging
# Set up Logging
logger = logging.getLogger(__name__)
class Simulation(object):
"""
Captures the functionality of a simulation in a class.
A simulation has three stages: configuration, execution, and data export.
Configuration is made using an ini-like language, and the configure method
takes a Python ConfigParser compatible structure.
Once the simulation is configured it is started by execute, and finally call
saveData in order to save any simulation data to disc.
What data is saved depends on how the simulation is configured (as outlined
below) and on which process is used. Currently the simulation can be set up
to track the number of nodes in each mean field state defined by the process
and/or the full network (topology and states).
A very short example running a simulation (given a configuration file named
'myconfig.ini'::
cfParser = nepidemix.utilities.NepidemiXConfigParser()
configFileName = 'myconfig.ini'
with open(configFileName) as f:
cfParser.readfp(f)
S = nepidemix.simulation.Simulation()
S.configure(cfParser)
S.execute()
S.saveData()
Configuration files are written in an ini-like language with sections and
<option> = <value> pairs in each. The Section and Option names are given by
the CFG-prefixed attributes of the Simulation class.
The following table contains explanations of all valid ini-file configuration sections.
+-----------------------+------------------------------------------------+
| List of configuration sections |
+-----------------------+------------------------------------------------+
| Section | Explanation |
+=======================+================================================+
| Simulation | This section contains general simulation |
| | information such as what network generation |
| | and process to use. |
| | See table of Simulation section options below |
| | for specific settings. |
+-----------------------+------------------------------------------------+
| NetworkParameters | This is a special section. All options will be |
| | sent as parameters to the network generation |
| | function (set by the ``network_func`` option |
| | in the ``Simulation`` section). Thus the |
| | options in this section is dependent on the |
| | function chosen, and must match that function |
| | exactly. See |
| |``nepidemix.utilities.networkgeneratorwrappers``|
| | for a list of network generation functions. |
+-----------------------+------------------------------------------------+
| ProcessParameters | This is a special section. All options will be |
| | sent as parameters to the process class (set |
| | by the ``process_class`` option in the |
| | ``Simulation`` section) initialization method. |
| | Thus the options in this section is dependent |
| | on the process chosen, and must match that |
| | class exactly. See nepidemix.Process for a |
| | brief introduction, and the tutorial for |
| | examples. |
+-----------------------+------------------------------------------------+
| NodeStateDistribution | This is a special section. The section will be |
| | ignored if ``network_init`` is turned off. All |
| | options will be sent as parameters to the |
| | process class (set by the ``process_class`` |
| | option in the ``Simulation`` section) and used |
| | to distribute the initial node states over the |
| | network. While it is possible for generic |
| | classes to override, the format of this |
| | section should be so that the option names |
| | should be the names of the network node states |
| | used by the process and their values should be |
| | the fraction of nodes, alternatively the |
| | number of nodes, that will be assigned to |
| | each state initially. If the sum of the states |
| | add up to the size of the network the exact |
| | number of nodes will be used. If not, a |
| | fraction of the network equal to (network size |
| | * state dist)/(sum of all state dist) will be |
| | used. I.e. normalized. It is recommended to |
| | either use exact numbers or fractions of the |
| | network size here for readability. The state |
| | names must match those specified by the |
| | network process class. If this section is left |
| | out an equal number of nodes are allocated to |
| | each state. |
+-----------------------+------------------------------------------------+
| EdgeStateDistribution | This is a special section, and analogous to |
| | the NodeStateDistribution described above but |
| | for edges. |
+-----------------------+------------------------------------------------+
| Output | This section contains options controlling |
| | simulation output and saving of data. |
| | See table of Output section options below for |
| | specific settings. |
+-----------------------+------------------------------------------------+
| Logging | Contains options on software log output for |
| | nepidemix. |
+-----------------------+------------------------------------------------+
Configuration options
Below are tables listing available options for sections having them.
+-----------------------+------------------------------------------------+
| Simulation section options |
+-----------------------+------------------------------------------------+
| Option key | Explanation |
+=======================+================================================+
| iterations | Run the simulation this many iterations. |
+-----------------------+------------------------------------------------+
| dt | The time step taken each iteration. Should be |
| | a fraction in the range 0,1. |
+-----------------------+------------------------------------------------+
| process_class | This is the name of the process object. |
| | See ``nepidemix.process`` or tutorial for |
| | options. |
+-----------------------+------------------------------------------------+
| process_class_module | This is the python module/package where the |
| | class given in the option process_class |
| | resides. Default: ``nepidemix.process`` for |
| | built-in processes. Use the base name of your |
| | own file if you programmed your own process in |
| | python. See tutorial for examples. |
| | Optional. |
+-----------------------+------------------------------------------------+
| network_func | This is the name of the network generation |
| | function. See |
| |``nepidemix.utilities.networkgeneratorwrappers``|
| | for a list of network generation functions. |
+-----------------------+------------------------------------------------+
| network_func_module | This is the python module where the network |
| | function resides. If you do not write your own |
| | network generation functions this can be left |
| | undefined. Optional. |
+-----------------------+------------------------------------------------+
| network_init | This switch (on/off, true/false, yes/no, 1/0) |
| | is optional (default value true) and tells the |
| | simulation if the network should be |
| | initialized by the current process or not. |
| | Note that not initializing the network may |
| | lead to errors or strange behavior. Only |
| | switch off if network is loaded from disk and |
| | you don't want it to be re-initialized with |
| | new state (thus keeping the states), or if the |
| | network is initialized by some other |
| | mechanism. |
+-----------------------+------------------------------------------------+
| node_init | This switch (on/off, true/false, yes/no, 1/0) |
| | is optional (default value true) and tells the |
| | simulation if the network nodes should be |
| | initialized by the current process or not. |
| | Note: This option is only interpreted if |
| | network_init is set to on (true, yes, 1) and |
| | is ignored otherwise. Optional. Default: true |
+-----------------------+------------------------------------------------+
| edge_init | This switch (on/off, true/false, yes/no, 1/0) |
| | is optional (default value true) and tells the |
| | simulation if the network edges should be |
| | initialized by the current process or not. |
| | Note: This option is only interpreted if |
| | network_init is set to on (true, yes, 1) and |
| | is ignored otherwise. Optional. Default: true. |
+-----------------------+------------------------------------------------+
| module_paths | This is an optional list (comma-separated) of |
| | directory paths that the simulation will add |
| | to the python path before loading the network |
| | generation and process routines. Useful if you |
| | have written your own functions that reside in |
| | some directory not on the path. See the |
| | tutorial for examples on how this option is |
| | used. |
+-----------------------+------------------------------------------------+
| include_files | Optional list (comma-separated) containing |
| | names of additional configuration files to |
| | include. The files will be read in order and |
| | their sections added to the configuration. |
| | This allows for splitting of large |
| | configuration files into logical sections |
| | and store them in individual files. |
+-----------------------+------------------------------------------------+
+----------------------------+-------------------------------------------+
| Output section options |
+----------------------------+-------------------------------------------+
| Option key | Explanation |
+============================+===========================================+
| output_dir | Output directory where files will be |
| | saved. |
| | The directory must exist and be writable. |
+----------------------------+-------------------------------------------+
| base_name | This is the base name of all files |
| | generated by the run. |
+----------------------------+-------------------------------------------+
| unique | Optional (default value true) switch |
| | (on/off, true/false, yes/no, 1/0). If |
| | unique is defined as true, yes, 1, or on, |
| | unique file names will be created (time |
| | stamp added). |
+----------------------------+-------------------------------------------+
| save_config | Switch (on/off, true/false, yes/no, 1/0). |
| | If this is true, yes, 1, or on, a copy of |
| | the full program config, plus an Info |
| | section will be saved. |
+----------------------------+-------------------------------------------+
| db_name | Optional (default value dependent on |
| | options base_name and unique). |
| | If specified this file name will be used |
| | as the sqlite3 database name for output. |
| | db_name overrides options base_name and |
| | unique. If db_name does not contain an |
| | absolute path output_dir is prefixed. |
| | If the database file does not exist it is |
| | created. |
| | If the database exists and is compatible |
| | with the current simulation it is |
| | appended to. If not an error is |
| | generated. |
+----------------------------+-------------------------------------------+
| save_state_count | Optional (default value true) switch |
| | (on/off, true/false, yes/no, 1/0). |
| | If this is true/yes/on, the network node |
| | states will be counted and saved as a csv |
| | file. |
| | Note: only valid if the current process |
| | support node updates. If not, nothing |
| | will be saved. |
+----------------------------+-------------------------------------------+
| save_state_count_interval | Optional (default value 1). Count nodes |
| | every <value> iterations. Value should be |
| | an integer >= 1. Note, initial and final |
| | node state counts are always saved even |
| | if they are not covered by the interval. |
+----------------------------+-------------------------------------------+
| save_network_compress_file | Optional (default value true) switch |
| | (on/off, true/false, yes/no, 1/0). |
| | Denotes if the saved network files should |
| | be bz2 compressed. |
+----------------------------+-------------------------------------------+
| save_state_transition_cnt | Optional (default value false) switch |
| | (on/off, true/false, yes/no, 1/0). |
| | If set to true a csv file with saving the |
| | count of every possibly triggered |
| | transition in every time step. |
| | The file format is time in first column, |
| | old state in second column, and number |
| | of transitions to destination state in |
| | the following columns. Destination states |
| | are given by the first row. |
+----------------------------+-------------------------------------------+
| print_progress_bar | Optional (default value true) switch |
| | (on/off, true/false, yes/no, 1/0). |
| | If set to true, and the number of |
| | iterations is greater than 100, a |
| | progress indicator is printed while the |
| | simulation is running. |
+----------------------------+-------------------------------------------+
+----------------------------+-------------------------------------------+
| Logging section options |
+----------------------------+-------------------------------------------+
| Option key | Explanation |
+============================+===========================================+
| level | Optional (default value DEBUG). |
| | Must be one of DEBUG/INFO/WARN/SILENT. |
+----------------------------+-------------------------------------------+
"""
# Configuration file constants.
CFG_SECTION_OUTPT = "Output"
CFG_SECTION_SIM = "Simulation"
CFG_SECTION_LOG = "Logging"
CFG_SECTION_MOD = "ProcessParameters"
# This section is used store information about the sim.
CFG_SECTION_INFO = "Info"
# This section carry the network parameters
CFG_SECTION_NETWORK = "NetworkParameters"
# This section is used to distribute states among the nodes.
CFG_SECTION_NODE_STATE_DIST = "NodeStateDistribution"
# This section is used to distribute states among the nodes.
CFG_SECTION_EDGE_STATE_DIST = "EdgeStateDistribution"
# Parameter names.
# Simulation parameters
CFG_PARAM_mod_path = "module_paths"
CFG_PARAM_outputDir = "output_dir"
CFG_PARAM_baseFileName = "base_name"
CFG_PARAM_uniqueFileName = "unique"
CFG_PARAM_dt = "dt"
CFG_PARAM_iterations = "iterations"
CFG_PARAM_process_name = "process_class"
CFG_PARAM_process_module = "process_class_module"
CFG_PARAM_network_name = "network_func"
CFG_PARAM_network_module = "network_func_module"
CFG_PARAM_save_config = "save_config"
CFG_PARAM_network_init = "network_init"
CFG_PARAM_node_init = "node_init"
CFG_PARAM_edge_init = "edge_init"
CFG_PARAM_include_files = "include_files"
# Info parameters.
CFG_PARAM_execute_time = "sim_exec_time"
CFG_PARAM_avgclust = "avg_clustering"
CFG_PARAM_avgdegree = "avg_degree"
CFG_PARAM_nepidemix_version = "NepidemiX_version";
# Network output parameters
CFG_PARAM_save_network = "save_network"
CFG_PARAM_save_network_interval = "save_network_interval"
CFG_PARAM_save_network_format = "save_network_format"
CFG_PARAM_save_network_compress_file = "save_network_compress_file"
CFG_PARAM_save_state_count = "save_state_count"
CFG_PARAM_save_state_count_interval = "save_state_count_interval"
CFG_PARAM_save_node_rule_transition_count = "save_state_transition_cnt"
CFG_PARAM_print_progress = "print_progress_bar"
CFG_PARAM_db_name = "db_name"
# Names of fields in the network graph dictionary.
TIME_FIELD_NAME = "Time"
STATE_COUNT_FIELD_NAME = "state_count"
def __init__(self):
"""
Initialization method.
"""
self.process = None
self.network = None
self.stateSamples = None
self.save_config = False
self.settings = None
# Set when database is initialized, and simulation table filled out.
self._db_sim_id = None
def execute(self):
"""
Execute simulation.
The simulation must be configured before this method is called.
"""
nwcopytime = 0
startTime = time.time()
logger.info("Running simulation.")
logger.info("Simulation will cover {0} months."\
.format(self.iterations*self.dt))
# If the network is to be saved, then save the initial config.
if self.saveNetwork == True:
self._saveNetwork(number = 0)
# Create state count arrays, count and add the initial states.
self.stateSamples = {}
self.stateSamples[self.STATE_COUNT_FIELD_NAME] = []
# Get database cursor if there is a connection.
db_cur = self._dbConnection.cursor() if self._dbConnection != None else None
# Add entry for time 0.
for k in self.stateSamples:
# Create dictionary.
countDict = {}
# Insert time stamp.
countDict[self.TIME_FIELD_NAME] = self.network.graph.get(self.TIME_FIELD_NAME,0.0)
# Copy data.
countDict.update(dict([ (s,str(v)) for s,v in self.network.\
graph[k].iteritems()]))
self.stateSamples[k].append(countDict)
logger.info("Initial node state count vector: {0}".format(self.stateSamples[self.STATE_COUNT_FIELD_NAME]))
readNetwork = self.network
logger.info("Process will leave topology constant?: {0}".format(self.process.constantTopology))
if self.process.constantTopology == True:
writeNetwork = readNetwork.copy()
else:
writeNetwork = networkx.Graph()
for stk in self.stateSamples:
writeNetwork.graph[stk] = readNetwork.graph[stk].copy()
for it in range(self.iterations):
# Add a node transition count array for this iteration (update timestamp and copy data array).
# Update nodes.
if self.process.constantTopology == False or self.process.runNodeUpdate == True:
# Go over all nodes.
for n in readNetwork.nodes_iter(data = True):
oldstate = self.process.deduceNodeState(n)
nc = (n[0], n[1].copy())
nc = self.process.nodeUpdateRule(nc,
readNetwork,
self.dt)
writeNetwork.add_node(nc[0], nc[1])
newstate = self.process.deduceNodeState(nc)
if newstate != oldstate:
# Debug code belonging to
# own = int(writeNetwork.graph[self.STATE_COUNT_FIELD_NAME][oldstate])
# nwn = int(writeNetwork.graph[self.STATE_COUNT_FIELD_NAME][newstate])
# orn = int(readNetwork.graph[self.STATE_COUNT_FIELD_NAME][oldstate])
# nrn = int(readNetwork.graph[self.STATE_COUNT_FIELD_NAME][newstate])
# Update count
writeNetwork.graph[self.STATE_COUNT_FIELD_NAME][newstate] += 1
writeNetwork.graph[self.STATE_COUNT_FIELD_NAME][oldstate] -= 1
# Update database
# Check if we have a description of the destination stat
# (the source state should be there per definition)
# If not, insert it.
if db_cur != None:
ncks = nc[1].keys()
db_cur.execute("""INSERT OR IGNORE INTO {0}({1}, {2}) VALUES ({3})"""\
.format(sqlite3io.NODE_STATE_TABLE_NAME,
sqlite3io.NODE_STATE_TABLE_ID_COL,
",".join(ncks),
",".join(["?"]*(1+len(nc[1])))),
[hash(newstate)]+
[nc[1][k] for k in ncks])
db_cur.execute("""INSERT INTO {0}({1}, {2},
{3}, {4},
{5},
{6},
{7})
VALUES (?, ?, ?, ?, ?, ?, ?)"""\
.format(sqlite3io.NODE_EVENT_TABLE_NAME,
sqlite3io.NODE_EVENT_TABLE_SRC_STATE_COL,
sqlite3io.NODE_EVENT_TABLE_DST_STATE_COL,
sqlite3io.NODE_EVENT_TABLE_NODE_ID_COL,
sqlite3io.NODE_EVENT_TABLE_SIM_ID_COL,
sqlite3io.NODE_EVENT_TABLE_SIM_TIME_COL,
sqlite3io.NODE_EVENT_TABLE_MAJOR_IT_COL,
sqlite3io.NODE_EVENT_TABLE_MINOR_IT_COL,
),
(hash(oldstate), hash(newstate),
n[0], self._db_sim_id,
readNetwork.graph[self.TIME_FIELD_NAME],
it, n[0]))
# Update edges.
if self.process.constantTopology == False or self.process.runEdgeUpdate == True:
for e in readNetwork.edges_iter(data = True):
oldstate = self.process.deduceEdgeState(e)
ne = (e[0], e[1], e[2].copy())
ne = self.process.edgeUpdateRule(ne,
readNetwork,
self.dt)
writeNetwork.add_edge(ne[0], ne[1], ne[2])
newstate = self.process.deduceEdgeState(ne)
if newstate != oldstate:
# Update count
writeNetwork.graph[self.STATE_COUNT_FIELD_NAME][newstate] += 1
writeNetwork.graph[self.STATE_COUNT_FIELD_NAME][oldstate] -= 1
if self.process.constantTopology == False or self.process.runNetworkUpdate == True:
writeNetwork = self.process.networkUpdateRule(writeNetwork, self.dt)
writeNetwork.graph[self.TIME_FIELD_NAME] = readNetwork.graph[self.TIME_FIELD_NAME] + self.dt
self.network = writeNetwork
writeNetwork = readNetwork
readNetwork = self.network
if self.process.constantTopology == False:
writeNetwork.clear()
# Always update the graph data
for k in readNetwork.graph:
writeNetwork.graph[k] = copy.deepcopy(readNetwork.graph[k])
# Check if we should save node state this iteration.
# it +1 is checked as the 0th is always saved before the loop.
# Also always save the last result.
if k in self.stateSamples and self.saveStates[k] and \
((self.saveStatesInterval[k] >0 and (it+1)%(self.saveStatesInterval[k]) == 0)\
or (it == self.iterations -1 )):
# Add the mean field states.
countDict = {}
# Create dictionary.
# Insert time stamp.
countDict[self.TIME_FIELD_NAME] = self.network.graph[self.TIME_FIELD_NAME]
# Copy data.
countDict.update(dict([ (s,str(v)) for s,v in self.network.graph[k].iteritems()]))
# Add to current list of samples.
self.stateSamples[k].append(countDict)
# Check network saving. Same here as for states above:
# look at iteration +1, as it is done after execution of the rules.
if self.saveNetwork == True and ( \
( self.saveNetworkInterval >0 \
and (it+1)%(self.saveNetworkInterval) == 0 )\
or it == (self.iterations-1) ):
self._saveNetwork(number= (it+1))
# Print progress
if self.printProgress:
if it % int(self.iterations * 0.20) == 0:
sys.stdout.write("[{0}%]".format(int(it*100.0/self.iterations)))
sys.stdout.flush()
elif it % int(self.iterations * 0.025) == 0:
sys.stdout.write("=")
sys.stdout.flush()
# Print 100 % when done
if self.printProgress:
sys.stdout.write("[100%]\n")
# Commit changes to database
if self._dbConnection != None:
self._dbConnection.commit()
logger.info("Simulation done.")
endTime = time.time()
logger.info("Total execution time: {0} s.".format(endTime-startTime))
if self.settings != None:
self.settings.set(self.CFG_SECTION_INFO,
self.CFG_PARAM_execute_time,(endTime-startTime))
def configure(self, settings):
"""
Configure simulation.
Parameters
----------
settings : NepidemiXConfigParser, ConfigParser compatible
The settings in a ConfigParser compatible datastructure.
See Also
--------
nepidemix.NepidemiXConfigParser
"""
self.includeFiles = settings.getrange(self.CFG_SECTION_SIM,
self.CFG_PARAM_include_files,
default = [],
add_if_not_existing = False)
if len(self.includeFiles) > 0:
logger.info("Files {0} will be included.".format(", ".join(self.includeFiles)))
for fileName in self.includeFiles:
with open(fileName) as fp:
settings.readfp(fp)
self.settings = settings
if not self.settings.has_section(self.CFG_SECTION_INFO):
self.settings.add_section(self.CFG_SECTION_INFO)
try:
try:
self.outputDir = settings.get(self.CFG_SECTION_OUTPT,
self.CFG_PARAM_outputDir)
logger.info("Output directory set to '{0}'".format(self.outputDir))
self.baseFileName = settings.get(self.CFG_SECTION_OUTPT,
self.CFG_PARAM_baseFileName)
self.save_config = settings.getboolean(self.CFG_SECTION_OUTPT,
self.CFG_PARAM_save_config)
self.iterations = settings.getint(self.CFG_SECTION_SIM,
self.CFG_PARAM_iterations)
logger.info("# iterations = {0}".format(self.iterations))
self.dt = settings.getfloat(self.CFG_SECTION_SIM,
self.CFG_PARAM_dt);
logger.info("dt = {0}".format(self.dt))
except NepidemiXBaseException as err:
logger.error("Missing mandatory config option : {0}".format(err))
sys.exit()
for pth in settings.getrange(self.CFG_SECTION_SIM,
self.CFG_PARAM_mod_path,
default=[]):
abspth = os.path.abspath(pth)
logger.info("Adding '{0}' to python path.".format(abspth))
sys.path.append(abspth)
except NepidemiXBaseException as err:
logger.error("Missing mandatory config section : {0}".format(err))
sys.exit()
# Construct process.
# Make dictionary from the settings.
dparams = settings.evaluateSection(self.CFG_SECTION_MOD)
process_name = settings.get(self.CFG_SECTION_SIM,
self.CFG_PARAM_process_name)
process_module = settings.get(self.CFG_SECTION_SIM,
self.CFG_PARAM_process_module,
default = 'nepidemix.process')
self.process = _import_and_execute(process_name, process_module,dparams)
logger.info("Created '{0}' object"
.format(process_name))
# Set/update verision info field.
self.settings.set(self.CFG_SECTION_INFO,
self.CFG_PARAM_nepidemix_version,
full_version)
# Construct and initialize network.
dparams = settings.evaluateSection(self.CFG_SECTION_NETWORK)
nwork_name = settings.get(self.CFG_SECTION_SIM,
self.CFG_PARAM_network_name)
nwork_module = settings.get(self.CFG_SECTION_SIM,
self.CFG_PARAM_network_module,
default = 'nepidemix.utilities.networkgeneratorwrappers')
self.network = _import_and_execute(nwork_name, nwork_module,dparams)
# Change the standard dictionary in the NetworkX graph to an ordered one.
self.network.graph = OrderedDict(self.network.graph)
if self.network.graph.has_key(self.STATE_COUNT_FIELD_NAME) == False:
# Create a dictionary for the state counts.
self.network.graph[self.STATE_COUNT_FIELD_NAME] = OrderedDict()
logger.info("Created '{0}' network with {1} nodes." \
.format(nwork_name, len(self.network)))
# Save the average clustering to info section
if not self.network.is_directed():
self.settings.set(self.CFG_SECTION_INFO,
self.CFG_PARAM_avgclust,
networkx.average_clustering(self.network))
# And the average degree
self.settings.set(self.CFG_SECTION_INFO,
self.CFG_PARAM_avgdegree,
sum(networkx.degree(self.network).values())\
/float(len(self.network)))
# Initialize the states
# Check if init should be performed.
if (not settings.has_option(self.CFG_SECTION_SIM, self.CFG_PARAM_network_init))\
or settings.getboolean(self.CFG_SECTION_SIM, self.CFG_PARAM_network_init):
logger.info("Performing network setup.")
# Add an attribute for time/set time to zero.
# If graph does not already have such a field.
if self.network.graph.has_key(self.TIME_FIELD_NAME) == False:
self.network.graph[self.TIME_FIELD_NAME] = 0.0
logger.debug("Time field after init: {0}".format(self.network.graph[self.TIME_FIELD_NAME]))
# Nodes
if settings.getboolean(self.CFG_SECTION_SIM,
self.CFG_PARAM_node_init, default = True):
if settings.has_section(self.CFG_SECTION_NODE_STATE_DIST):
attDict = settings.evaluateSection(self.CFG_SECTION_NODE_STATE_DIST)
else:
attDict = {}
self.process.initializeNetworkNodes(self.network, **attDict)
else:
logger.info("Skipping node initialization.")
# Edges
if settings.getboolean(self.CFG_SECTION_SIM,
self.CFG_PARAM_edge_init, default = True):
if settings.has_section(self.CFG_SECTION_EDGE_STATE_DIST):
attDict = settings.evaluateSection(self.CFG_SECTION_EDGE_STATE_DIST)
else:
attDict = {}
self.process.initializeNetworkEdges(self.network, **attDict)
else:
logger.info("Skipping edge initialization.")
# The network itself.
# Right now it doesn't have a configuration section.
self.process.initializeNetwork(self.network)
self.saveStatesInterval = {}
self.saveStatesInterval[self.STATE_COUNT_FIELD_NAME] = \
settings.getint(self.CFG_SECTION_OUTPT,
self.CFG_PARAM_save_state_count_interval,
default=1)
self.saveStates = {}
self.saveStates[self.STATE_COUNT_FIELD_NAME] = \
settings.getboolean(self.CFG_SECTION_OUTPT,
self.CFG_PARAM_save_state_count,
default=True)
self.saveNodeRuleTransitionCount = \
settings.getboolean(self.CFG_SECTION_OUTPT,
self.CFG_PARAM_save_node_rule_transition_count,
default = False)
# If there is no option to set a unique file name take it as true.
# If there is one we have to check if it is set to true.
# Then update name.
if (not settings.has_option(self.CFG_SECTION_OUTPT,
self.CFG_PARAM_uniqueFileName)
) or (settings.getboolean(self.CFG_SECTION_OUTPT,
self.CFG_PARAM_uniqueFileName)
) == True:
self.baseFileName = self.baseFileName + '_'+\
"-".join(("_".join(time.ctime().split())).split(':'))
logger.info("Base file name set to: '{0}'".format(self.baseFileName))
# Network save options
self.saveNetwork = settings.getboolean(self.CFG_SECTION_OUTPT,
self.CFG_PARAM_save_network,
default=False)
self.saveNetworkInterval = settings.getint(self.CFG_SECTION_OUTPT,
self.CFG_PARAM_save_network_interval,
default = 0)
self.saveNetworkFormat = settings.get(self.CFG_SECTION_OUTPT,
self.CFG_PARAM_save_network_format,
default = 'gpickle')
# Inserting a warning here in case the format is GraphML as saving in this
# format was defunct and has been removed.
if self.saveNetworkFormat == 'GraphML':
logger.warning("Saving in GraphML no longer supported. Simulation will run, but no graph data will be saved!")
self.saveNetworkFormatCompress = \
settings.getboolean(self.CFG_SECTION_OUTPT,
self.CFG_PARAM_save_network_compress_file,
default = True)
# Print progress bar if turned on and the number of iterations
# are greater than 100.
self.printProgress = \
settings.getboolean(self.CFG_SECTION_OUTPT,
self.CFG_PARAM_print_progress,
default = True) \
and (self.iterations > 100)
# Database name and creation
db_name = settings.get(self.CFG_SECTION_OUTPT,
self.CFG_PARAM_db_name,
default = "{0}.db".format(self.baseFileName))
if not os.path.isabs(db_name):
db_name = os.path.join(self.outputDir,db_name)
self._setupDatabase(db_name)
def saveData(self):
"""
Save any computed data as per configuration.
If execute() has not yet been run (i.e. no data exist) an error message
is printed.
"""
logger.info("Saving data.")
if True in self.saveStates.values():
if self.stateSamples == None:
logger.error("No data to save exists. Run execute() first.")
else:
for sampleName in self.stateSamples:
if self.saveStates[sampleName] == True:
stateDataFName = self.outputDir+"/"+self.baseFileName+"_{0}.csv".format(sampleName)
logger.info("File = '{0}'".format(stateDataFName))
try:
with open(stateDataFName, 'wb') as stateDataFP:
stateDataWriter = csv.writer(stateDataFP)
# Keys are time stamp
keys = [self.TIME_FIELD_NAME]
# and labels
keys.extend(self.network.graph[sampleName].keys())
# Write labels in first row
stateDataWriter.writerow(keys)
# Write data.
for row in self.stateSamples[sampleName]:
stateDataWriter.writerow([row.get(k,0) for k in keys])
except IOError:
logger.error("Could not open file '{0}' for writing!"\
.format(stateDataFName))
if self.save_config == True:
if self.settings == None:
logger.error("No settings to save exists.")
else:
configDataFName = self.outputDir+"/"+self.baseFileName+".ini"
try:
with open(configDataFName, 'wb') as configDataFP:
self.settings.write(configDataFP)
except IOError:
logger.error("Could not open file '{0}' for writing!"\
.format(configDataFName))
logger.info("Saving done")
def _saveNetwork(self, number = -1):
"""
Save network to file.
Currently gpickle (uncompressed or bz2 compressed) is supported.
Parameters
----------
number : int, optional
If >0 this number will be appended (zero padded) to the file name.
Default value -1.
"""
sveBaseName = "{0}/{1}".format(self.outputDir,
self.baseFileName)
if number >= 0:
sveBaseName = sveBaseName +"_{0:010}".format(number)
sveBaseName = sveBaseName + ".{0}".format(self.saveNetworkFormat)
if self.saveNetworkFormatCompress == True:
sveBaseName = sveBaseName + '.bz2'
if self.saveNetworkFormat == 'gpickle':
networkx.readwrite.gpickle.write_gpickle(self.network,
sveBaseName)
else:
logger.error("Unknown file format {0}".format(\
self.saveNetworkFormat))
# logger.info("Wrote initial graph to '{0}'.".format(sveBaseName))
def _setupDatabase(self, db_name):
self._dbConnection = None
self._db_sim_id = None
try:
# Try to get the sqlite3 package
import sqlite3
logger.info("Connecting to database '{0}'".format(db_name))
self._dbConnection = sqlite3.connect(db_name)
cur = self._dbConnection.cursor()
# Check if the tables do not exist, create them.
tbls = sorted([n[0] for n in cur.execute("SELECT name from sqlite_master")])
if not all([d in tbls for d in [sqlite3io.SIMULATION_TABLE_NAME,
sqlite3io.NODE_EVENT_TABLE_NAME,
sqlite3io.NODE_STATE_TABLE_NAME]]):
cur.execute("""CREATE TABLE {0} ({1} INTEGER PRIMARY KEY,
{2} TEXT,
{3} BLOB,
{4} BLOB,
{5} INTEGER,
{6} INTEGER,
{7} DATETIME DEFAULT CURRENT_TIMESTAMP)"""\
.format(sqlite3io.SIMULATION_TABLE_NAME,
sqlite3io.SIMULATION_TABLE_SIM_ID_COL,
sqlite3io.SIMULATION_TABLE_NPX_V_COL,
sqlite3io.SIMULATION_TABLE_GRAPH_COL,
sqlite3io.SIMULATION_TABLE_CONF_COL,
sqlite3io.SIMULATION_TABLE_NUM_NODES_COL,
sqlite3io.SIMULATION_TABLE_NUM_EDGES_COL,
sqlite3io.SIMULATION_TABLE_TIME_COL,
))
cur.execute("""CREATE TABLE {0} ({1} INTEGER, {2} INTEGER,
{3} INTEGER, {4} INTEGER,
{5} FLOAT,
{6} INTEGER,
{7} INTEGER,
PRIMARY KEY ({4},
{6},
{7}))"""\
.format(sqlite3io.NODE_EVENT_TABLE_NAME,
sqlite3io.NODE_EVENT_TABLE_SRC_STATE_COL,
sqlite3io.NODE_EVENT_TABLE_DST_STATE_COL,
sqlite3io.NODE_EVENT_TABLE_NODE_ID_COL,
sqlite3io.NODE_EVENT_TABLE_SIM_ID_COL,
sqlite3io.NODE_EVENT_TABLE_SIM_TIME_COL,
sqlite3io.NODE_EVENT_TABLE_MAJOR_IT_COL,
sqlite3io.NODE_EVENT_TABLE_MINOR_IT_COL,))
# Now create the table describing the node attributes
# Deduce them first. As the state dictionary should be the same for
# all nodes use the first one.
key_types = ["{0} {1}".format(k, {str:"TEXT",
int:"INTEGER",
float:"REAL"}.get(type(v),"BLOB")
)\
for k,v in self.network.nodes(data=True)[0][1].iteritems()]
cur.execute("""CREATE TABLE {0} ({1} INTEGER PRIMARY KEY,
{2})"""\
.format(sqlite3io.NODE_STATE_TABLE_NAME,
sqlite3io.NODE_STATE_TABLE_ID_COL,
",".join(key_types)))
# Create a simulation entry
cur.execute("""INSERT INTO {0} ({1}, {2}, {3}, {4}, {5}) VALUES (?,?,?,?,?)"""\
.format(sqlite3io.SIMULATION_TABLE_NAME,
sqlite3io.SIMULATION_TABLE_NPX_V_COL,
sqlite3io.SIMULATION_TABLE_GRAPH_COL,
sqlite3io.SIMULATION_TABLE_NUM_NODES_COL,
sqlite3io.SIMULATION_TABLE_NUM_EDGES_COL,
sqlite3io.SIMULATION_TABLE_CONF_COL,
),
(full_version,
sqlite3.Binary(pickle.dumps(self.network, protocol=-1)),
self.network.number_of_nodes(),
self.network.number_of_edges(),
sqlite3.Binary(pickle.dumps(self.settings, protocol=-1)),
))
self._dbConnection.commit()
# Get and set the simulation ID.
self._db_sim_id = cur.lastrowid
logger.debug("_db_sim_id = {0}".format(self._db_sim_id))
# Now populate the state database with the initial graph states
for nc in self.network.nodes_iter(data=True):
ncks = nc[1].keys()
cur.execute("""INSERT OR IGNORE INTO {0}({1}, {2}) VALUES ({3})"""\
.format(sqlite3io.NODE_STATE_TABLE_NAME,
sqlite3io.NODE_STATE_TABLE_ID_COL,
",".join(ncks),
",".join(["?"]*(1+len(nc[1])))),
[hash(self.process.deduceNodeState(nc))]+
[nc[1][k] for k in ncks])
self._dbConnection.commit()
except sqlite3.OperationalError as sqlerr:
logger.error("Could not open connection to database '{0}'.\n"\
.format(db_name) + "Sqlite3 Error message: '{0}'."\
.format(sqlerr)+ "\n Will not proceed with database output.")
except ImportError:
# No sqlite 3 found
logger.error("sqlite3 package not found. No database logging supported.")
def _import_and_execute(name, modules, parameters):
"""
Utility function that loads a function or class object from a module
and executes it.
Basically perform a 'from <modules> import <name>'
Then executes name with the parameters.
Finally the name is unloaded from the name space.
Parameters
----------
name : str
String containing the name of the function/class to load.
modules : str
String containing the standard dot separated modules path
parameters : dict
Dictionary of function parameters. Will be sent in with a **kwargs style
call. Thus if your function has a fixed parameter list make sure that the
dictionary keys match the names.
Returns
-------
retval : special
The result of the function call.
"""
retval = None
# First have a go att importing from absolute.
try:
exec("from {0} import {1} as nm_impexf".format(modules, name))
# Execute.
except ImportError as e:
# If that fails try to import 'dotted'
try:
exec("import {0}.{1} as nm_impexf".format(modules, name))
except ImportError as e:
emsg = "Could not import {0} from {1}, nor {0}.{1}; Check that the module and class names are correct."\
.format(name, modules)
logger.error(emsg)
raise NepidemiXBaseException(emsg)
if parameters==None:
retval = nm_impexf()
else:
retval = nm_impexf(**parameters)
# Remove from name space.
del nm_impexf
# Return
return retval
| |
#!/usr/bin/env python3
# Copyright 2021 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Finds unused assets in Chromium code base.
This script is experimental. While it tries to discover known usages, the assets
identified should still be manually verified. It currently only looks for PNG
files.
The script also ignores several directory trees, such as third_party, docs, and
testing directories.
Usage:
Either execute the script from the chromium/src directory or specify a
chromium/src directory via the --src_dir flag.
Example:
$ tools/resources/find_unused_assets.py
To output the list of unused assets to a file, use the --output_unused_files
flag.
Example:
$ tools/resources/find_unused_assets.py \
--output_unused_files=/tmp/unused_files.txt
"""
import argparse
from concurrent import futures
import functools
from html import parser as html_parser
import itertools
import json
import logging
import os
import pprint
import re
import subprocess
import sys
import threading
from typing import Iterable, List, Optional, Sequence, Set, Text, Tuple
from xml.etree import ElementTree as ET
IGNORED_PATHSPECS = [
':!**/docs/**',
':!**/pdfium/**',
':!**/test/**',
':!**/testdata/**',
':!**/testing/**',
':!**/vectorcanvastest/**',
':!**/*unittest/**',
':!docs/**',
':!native_client_sdk/**',
':!testing/**',
':!third_party/**',
':!tools/perf/page_sets/**',
':!tools/stats_viewer/**',
]
logger = logging.getLogger(__name__)
def init_logger():
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
formatter = logging.Formatter(
'%(levelname)s %(asctime)s %(filename)s:%(lineno)d] %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
'--src_dir',
help='Optional chromium/src directory to analyze. If not specified, '
'defaults to current working directory.')
parser.add_argument('--output_unused_files',
help='Filepath to output the list of unused files.')
parser.add_argument('--output_all_png_files',
help='Filepath to output the list of all PNG files.')
return parser.parse_args()
def list_files(pathspecs: Sequence[str]) -> List[str]:
cmd = ['git', 'ls-files'] + list(pathspecs) + IGNORED_PATHSPECS
return subprocess.check_output(cmd).decode('utf-8').strip().split('\n')
def get_all_ext_files(extension: str) -> List[str]:
return list_files([f'**/*.{extension}'])
def find_filepath_usage(fpath: str, segs: int = 1):
"""Finds file usage based on filepath.
Args:
fpath: The relative filepath.
segs: The number of filepath segments to use for checking.
Returns:
True if usage found, False otherwise.
"""
pattern = r'[\\/]'.join(fpath.split(os.sep)[-segs:])
pattern = pattern.replace('\\', '\\\\')
pattern = pattern.replace('.', '\\.')
found = grep_pattern(pattern, pathspecs=tuple(IGNORED_PATHSPECS))
if not found:
logger.info('Found unused file: %s', fpath)
return found
@functools.lru_cache()
def grep_pattern(pattern: Text,
pathspecs: Optional[Sequence[Text]] = None,
fixed: bool = False) -> List[Text]:
"""Greps for the specified pattern.
Args:
pattern: Regex pattern to find.
pathspecs: Optional sequence of pathspecs to limit the search.
fixed: Whether the pattern should be treated as a fixed string instead of a
regex pattern.
Returns:
A list of matching filepaths.
"""
pathspecs = pathspecs or []
cmd = ['git', 'grep', '-I', '-w', '-l', '-e', pattern]
if fixed:
cmd.append('-F')
cmd.append('--')
cmd.extend(pathspecs)
try:
return subprocess.check_output(cmd).decode('utf-8').strip().split('\n')
except subprocess.CalledProcessError as ex:
if ex.returncode == 1:
return []
else:
logger.exception('Got error when grepping: %s', pattern)
raise
def find_usage_of_java_res(fpath: Text,
pathspecs: Optional[Iterable[Text]] = None
) -> List[Text]:
"""Finds usage of a Java image resource.
Args:
fpath: The filepath to find.
pathspecs: Optional pathspecs to look for usages.
Returns:
A list of filepaths that use |fpath|.
"""
pathspecs = pathspecs or []
common_pathspecs = list(pathspecs) + [':!**/*.gni', ':!**/*.gn'
] + IGNORED_PATHSPECS
basename = os.path.basename(fpath).rsplit('.')[0]
dirname = os.path.dirname(fpath)
if 'drawable' in dirname:
res_type = 'drawable'
elif 'mipmap' in dirname:
res_type = 'mipmap'
else:
raise ValueError(f'Could not parse resource type from filepath: {fpath}')
pathspec_prefixes = [
'android_webview',
'chrome/android',
'chrome/browser',
'components',
'content/public/android',
'content/shell/android',
'remoting/android',
'tools/android',
'ui/android',
'weblayer/browser',
'weblayer/shell',
]
xml_pattern = f'@{res_type}/{basename}'
found = grep_pattern(
xml_pattern,
pathspecs=tuple([p + '/**/*.xml'
for p in pathspec_prefixes] + common_pathspecs),
fixed=True)
java_pattern = f'R.{res_type}.{basename}'
found = found or grep_pattern(
java_pattern,
pathspecs=tuple([p + '/**/*.java' for p in pathspec_prefixes] + [
'chrome/browser/android/resource_id.h',
'components/resources/android/*.h'
] + common_pathspecs),
fixed=True)
if not found:
logger.info('Found unused file: %s', fpath)
return found
def filter_usages_of_java_res(png_files: Iterable[Text], path_pattern: Text
) -> Tuple[Set[Text], Set[Text]]:
"""Filters and finds used and unused Java image resources.
Args:
png_files: The paths of image files to filter through and find usages of.
path_pattern: The regex pattern to filter filepaths.
Returns:
A tuple of (used, unused) filepaths.
"""
used_png_files = set()
unused_png_files = set()
containing_files = set()
matcher = re.compile(path_pattern)
for png_file in sorted(png_files):
matches = matcher.search(png_file)
if not matches:
continue
found = find_usage_of_java_res(png_file)
if found:
used_png_files.add(png_file)
containing_files |= set(found)
else:
unused_png_files.add(png_file)
logger.info('Found %d PNG files used under %s dirs.', len(used_png_files),
path_pattern)
logger.info('Found %d unused PNG files under %s dirs.', len(unused_png_files),
path_pattern)
logger.debug('Containing files:\n%s',
pprint.pformat(sorted(containing_files)))
return used_png_files, unused_png_files
def find_images_by_filepath_string(filepaths: Iterable[Text],
all_png_files: Set[Text]
) -> Tuple[Set[Text], Set[Text]]:
"""Finds images used and maybe unused by looking at filepath strings.
The maybe unused files are determined by the fact that they're not referenced
by any of the files in |filepaths| that are in their ancestor directories.
Args:
filepaths: The paths of the files that contain filepath strings.
all_png_files: The set of all PNG files, used to filter results and
find unused files.
Returns:
A tuple of (used_files, maybe_unused_files).
"""
used_files = set()
maybe_unused_files = set()
ignored_files = set()
png_filepath_matcher = re.compile(r'"([^/][^"]+\.png)"')
for filepath in filepaths:
rel_dir = os.path.dirname(filepath)
used_by_current_file = set()
with open(filepath, 'rb') as f:
content = f.read().decode('utf-8')
for match in png_filepath_matcher.finditer(content):
img_relpath = match.group(1)
rooted_img_path = os.path.join(rel_dir, img_relpath)
if 'test' in os.path.dirname(img_relpath) or '$' in img_relpath:
logger.debug('Ignoring %s', rooted_img_path)
ignored_files.add(img_relpath)
continue
if rooted_img_path in all_png_files:
used_by_current_file.add(rooted_img_path)
else:
logger.warning('PNG file %s does not exist, reffed in %s as %s',
rooted_img_path, filepath, img_relpath)
used_files |= used_by_current_file
# Consider any images in subdirectories that were not used by this file as
# unused. Nested BUILD.gn files will be handled outside the loop.
maybe_unused_files |= set(
f for f in all_png_files
if f.startswith(rel_dir) and f not in used_by_current_file)
# Handles nested BUILD.gn files.
maybe_unused_files -= used_files
maybe_unused_files -= ignored_files
return used_files, maybe_unused_files
def find_images_used_by_grd(all_png_files: Set[Text]) -> Set[Text]:
"""Finds image files referenced by grd/grdp.
Args:
all_png_files: The set of all PNG files, used to filter results.
Returns:
A set of image filepaths that are referenced by grd/grdp.
"""
used_files = set()
grd_files = set(get_all_ext_files('grd') + get_all_ext_files('grdp'))
for grd_file in grd_files:
grd_dir = os.path.dirname(grd_file)
cur_relpaths = set()
grd_root = ET.parse(grd_file).getroot()
for elem in itertools.chain(grd_root.iter('include'),
grd_root.iter('structure')):
relpath = elem.get('file')
if relpath and relpath.endswith('.png'):
relpath = relpath.replace('\\', '/')
relpath = relpath.replace(
'${input_tools_root_dir}',
'third_party/google_input_tools/src/chrome/os')
if relpath.startswith('${root_src_dir}'):
relpath = relpath[len('${root_src_dir}') + 1]
if relpath.startswith('/') or '$' in relpath:
logger.error('When processing %s got weird relpath: %s', grd_file,
relpath)
raise ValueError('Unexpected relpath!')
rooted_filepath = os.path.normpath(os.path.join(grd_dir, relpath))
if rooted_filepath in all_png_files:
used_files.add(rooted_filepath)
cur_relpaths.add(relpath)
for relpath in cur_relpaths:
pattern = re.compile(grd_dir + r'/(default_\d+_percent/)?' + relpath)
for png_file in all_png_files:
if pattern.match(png_file):
used_files.add(png_file)
return used_files
def find_images_used_by_html(all_png_files: Set[Text]) -> Set[Text]:
"""Finds images used by HTML img tags.
Args:
all_png_files: The set of all PNG files, used to filter results.
Returns:
A set of image filepaths referenced by HTML img tags.
"""
current_relpaths = []
class ImgHTMLParser(html_parser.HTMLParser):
def handle_starttag(self, tag, attrs):
if tag != 'img':
return
for name, value in attrs:
if name != 'src' or not value.endswith('.png'):
continue
current_relpaths.append(value)
used_files = set()
ignored_src_pattern = re.compile(r'^(https?://)')
html_files = set(get_all_ext_files('html'))
for html_file in html_files:
current_relpaths.clear()
parser = ImgHTMLParser()
with open(html_file, 'rb') as f:
try:
parser.feed(f.read().decode('utf-8'))
except UnicodeDecodeError:
logger.error('Failed to decode html file: %s', html_file)
continue
html_dir = os.path.dirname(html_file)
for src_relpath in current_relpaths:
if ignored_src_pattern.match(src_relpath):
continue
rooted_src_path = normalize_resource_url_path(html_dir, src_relpath)
if '{{static}}' in src_relpath:
# I don't know how to handle this yet.
continue
if rooted_src_path not in all_png_files:
logger.debug('PNG file %s does not exist, reffed in %s as %s',
rooted_src_path, html_file, src_relpath)
continue
used_files.add(rooted_src_path)
return used_files
def find_images_used_by_css(all_png_files: Set[Text]) -> Set[Text]:
"""Finds images used by css.
Args:
all_png_files: The set of all PNG files, used to filter results.
Returns:
A set of image filepaths referenced by css.
"""
used_files = set()
css_url_pattern = re.compile(r'\burl\(([^\)]+\.png)\)')
# Both .css and .html files can contain css.
files_with_css = set(get_all_ext_files('css') + get_all_ext_files('html'))
for css_file in files_with_css:
css_dir = os.path.dirname(css_file)
with open(css_file, 'rb') as f:
for match in css_url_pattern.finditer(f.read().decode('utf-8')):
url_relpath = match.group(1)
# TODO(aluh): Figure out if url references in css is relative to the
# css file (current assumption) or the including html file.
rooted_url_path = normalize_resource_url_path(css_dir, url_relpath)
if rooted_url_path not in all_png_files:
logger.debug('PNG file %s does not exist, reffed in %s as %s',
rooted_url_path, css_file, url_relpath)
continue
used_files.add(rooted_url_path)
return used_files
def normalize_resource_url_path(rel_dir: Text, url: Text) -> Text:
"""Joins the relative root directory with the URL path and normalizes it.
It handles the special case of the 'chrome://' web resource URL.
Args:
rel_dir: The relative directory from the source root.
url: The URL path of the asset.
Returns:
The joined and normalized filepath.
"""
if url.startswith('chrome://'):
rooted_path = os.path.join('ui/webui', url[len('chrome://'):])
else:
rooted_path = os.path.join(rel_dir, url)
return os.path.normpath(rooted_path)
def find_images_used_by_markdown(all_png_files: Set[Text]) -> Set[Text]:
"""Finds images used by markdown files.
Args:
all_png_files: The set of all PNG files, used to filter results.
Returns:
A set of image filepaths used by markdown files.
"""
md_url_pattern = re.compile(r'\(([^\s\)]+\.png)[\s\)]')
md_files = list_files(['**/README', '**/*.md'])
used_files = set()
for md_file in md_files:
file_dir = os.path.dirname(md_file)
with open(md_file, 'rb') as f:
for match in md_url_pattern.finditer(f.read().decode('utf-8')):
url_relpath = match.group(1)
if url_relpath.startswith('http'):
continue
if url_relpath.startswith('/'):
rooted_filepath = url_relpath.lstrip('/')
else:
rooted_filepath = os.path.join(file_dir, url_relpath)
rooted_filepath = os.path.normpath(rooted_filepath)
if rooted_filepath in all_png_files:
used_files.add(rooted_filepath)
else:
logger.warning('PNG file %s does not exist, reffed in %s as %s',
rooted_filepath, md_file, url_relpath)
return used_files
def find_images_used_by_ios_imageset(all_png_files: Set[Text]
) -> Tuple[Set[Text], Set[Text]]:
"""Finds images used and unused by ios imagesets and friends.
Args:
all_png_files: The set of all PNG files, used to filter results.
Returns:
A tuple of (used, unused) image filepaths from ios imagesets.
"""
used_files = set()
unused_files = set()
json_files = set(
get_all_ext_files('imageset/Contents.json') +
get_all_ext_files('appiconset/Contents.json'))
for json_file in json_files:
rel_dir = os.path.dirname(json_file)
with open(json_file, 'rb') as f:
j = json.load(f)
for elem in j.get('images', []):
img_relpath = elem.get('filename', '')
if not img_relpath.endswith('.png'):
continue
rooted_img_path = os.path.join(rel_dir, img_relpath)
if rooted_img_path not in all_png_files:
logger.warning('PNG file %s does not exist, reffed in %s as %s',
rooted_img_path, json_file, img_relpath)
continue
else:
used_files.add(rooted_img_path)
imageset_dirs = set(os.path.dirname(f) for f in json_files)
imageset_png_files = set()
for imageset_dir in imageset_dirs:
imageset_png_files |= set(
list_files([imageset_dir + '/*.png', imageset_dir + '/**/*.png']))
unused_files = (imageset_png_files & all_png_files) - used_files
return used_files, unused_files
def main() -> None:
init_logger()
args = parse_args()
src_dir = os.getcwd()
if args.src_dir and os.path.isdir(args.src_dir):
src_dir = os.path.realpath(args.src_dir)
os.chdir(src_dir)
logger.info('Searching directory: %s', src_dir)
# Set of all PNG files. Should not be modified.
all_png_files = set(get_all_ext_files('png'))
logger.info('Found %d PNG files total.', len(all_png_files))
if args.output_all_png_files:
with open(args.output_all_png_files, 'w') as fout:
for fp in sorted(all_png_files):
fout.write(fp)
fout.write('\n')
# Working set of files that still need to be examined.
png_files = set(all_png_files)
# Files that are used.
used_png_files = set()
# Files that are likely to be unused.
likely_unused_png_files = set()
# Exclude files used by grd files.
logger.info('Looking for usages by grd files...')
used_by_grd = find_images_used_by_grd(all_png_files)
logger.info('Found %d PNG files used by grd files.', len(used_by_grd))
used_png_files |= used_by_grd
png_files -= used_png_files
# Exclude files used by HTML files.
logger.info('Looking for usages by html files...')
used_by_html = find_images_used_by_html(all_png_files)
logger.info('Found %d PNG files used by html img tags.', len(used_by_html))
used_png_files |= used_by_html
png_files -= used_png_files
# Exclude files used in CSS.
logger.info('Looking for usages by css files...')
used_by_css = find_images_used_by_css(all_png_files)
logger.info('Found %d PNG files used in css.', len(used_by_css))
used_png_files |= used_by_css
png_files -= used_png_files
# Find files used by markdown.
logger.info('Looking for usages by markdown files...')
used_by_markdown = find_images_used_by_markdown(all_png_files)
logger.info('Found %d PNG files used by markdown.', len(used_by_markdown))
used_png_files |= used_by_markdown
png_files -= used_png_files
# Find images used by ios imagesets.
logger.info('Looking for usages by ios imagesets...')
used_by_imageset, unused_by_imageset = find_images_used_by_ios_imageset(
all_png_files)
logger.info('Found %d PNG files used in json.', len(used_by_imageset))
used_png_files |= used_by_imageset
png_files -= used_png_files
logger.info('Found %d likely unused imageset PNG files.',
len(unused_by_imageset))
likely_unused_png_files |= unused_by_imageset
png_files -= likely_unused_png_files
# Check for usages by filepath string.
manifest_pathspecs = ['**/manifest.json', '**/*.gni', '**/BUILD.gn']
for pathspec in manifest_pathspecs:
logger.info('Looking for usages with pathspec: %s', pathspec)
used_by_pathspec, unused_by_pathspec = find_images_by_filepath_string(
list_files([pathspec]), all_png_files)
used_png_files |= used_by_pathspec
png_files -= used_png_files
likely_unused_png_files |= unused_by_pathspec
png_files -= likely_unused_png_files
logger.info('Found %d used PNG files from pathspec: %s',
len(used_by_pathspec), pathspec)
logger.info('Found %d likely unused PNG files from pathspec: %s',
len(unused_by_pathspec), pathspec)
# Check java resources image files.
logger.info('Looking for usages by java or android resources...')
path_patterns = [r'/res\w*/(drawable|mipmap)']
for path_pattern in path_patterns:
used_java_res_png_files, unused_java_res_png_files = (
filter_usages_of_java_res(all_png_files, path_pattern=path_pattern))
used_png_files |= used_java_res_png_files
png_files -= used_png_files
likely_unused_png_files |= unused_java_res_png_files
png_files -= likely_unused_png_files
logger.info('Still have %d remaining files:', len(png_files))
pprint.pprint(sorted(png_files))
# Check remaining files.
lock = threading.Lock()
remaining_used_files = set()
remaining_unused_files = set()
logger.info('Searching for remaining files...')
def find_filepath_usage_wrapper(fpath: Text):
found = find_filepath_usage(fpath)
with lock:
if found:
remaining_used_files.add(fpath)
else:
remaining_unused_files.add(fpath)
with futures.ThreadPoolExecutor(max_workers=32) as executor:
num_processed = 0
for future in futures.as_completed(
executor.submit(find_filepath_usage_wrapper, fpath)
for fpath in png_files):
num_processed += 1
if num_processed % 20 == 0:
logger.info('Checked %d files so far.', num_processed)
if future.exception():
logger.error('Got exception when finding usage: %s', future.exception())
logger.info('Found %d used misc PNG files.', len(remaining_used_files))
logger.info('Found %d unused misc PNG files.', len(remaining_unused_files))
used_png_files |= remaining_used_files
likely_unused_png_files |= remaining_unused_files
png_files -= used_png_files
# Prune likely unused files.
logger.info('Pruning likely unused files...')
likely_unused_png_files -= used_png_files
# Summarize findings.
likely_unused_png_files = sorted(likely_unused_png_files)
logger.info('Summary:')
logger.info(' Found %d used files out of %d.', len(used_png_files),
len(all_png_files))
logger.info(' Found %d likely unused files.', len(likely_unused_png_files))
pprint.pprint(likely_unused_png_files)
if args.output_unused_files:
with open(os.path.realpath(args.output_unused_files), 'w') as fout:
for fp in likely_unused_png_files:
fout.write(fp)
fout.write('\n')
logger.info('Saved list of likely unused files to: %s',
args.output_unused_files)
if __name__ == '__main__':
sys.exit(main())
| |
# autocalibrator.py
#
# David J. Lampert (djlampert@gmail.com)
#
# Contains the AutoCalibrator class that can be used to calibrate a model.
# The class requires and HSPFModel class, start and end dates, and an output
# location to work in while running simulations. The primary function is
# autocalibrate, and it takes a list of HSPF variables, perturbations (as a
# percentage, optimization parameter, and flag for parallelization as
# keyword arguments. The calibration routine can be summarized as follows:
#
# 1. Set up a series of simulations with a small perturbation to the current
# parameter values for the parameters of interest
# 2. Make copies of the input HSPFModel and adjust the parameter values
# 3. Run the simulations and get the effect of the optimization parameter
# 4. Adjust the baseline parameter values if they improve performance
# 5. Repeat until a maximum is achieved.
#
# The class should be adaptable to other optimization parameters.
import os, pickle, datetime, time, numpy
from multiprocessing import Pool, cpu_count
from pyhspf.core import HSPFModel, WDMUtil
from .calibratormodel import CalibratorModel
class AutoCalibrator:
"""
A class to use to autocalibrate an HSPF model.
"""
def __init__(self,
hspfmodel,
start,
end,
output,
comid = None,
gageid = None,
atemp = False,
snow = False,
hydrology = False,
submodel = None,
warmup = 30,
parameter_ranges = {'IRC': (0.5, 2),
'LZETP': (0.2, 1.4),
'DEEPFR': (0, 1),
'LZSN': (0.2, 2),
'UZSN': (0.2, 10),
'INFILT': (0.01, 20),
'INTFW': (0.01, 2.5),
'AGWRC': (0.5, 2),
'KVARY': (0, 0.1),
'CCFACT': (1, 10),
'MGMELT': (0, 25),
},
):
self.hspfmodel = hspfmodel
self.submodel = submodel
self.start = start
self.end = end
self.output = output
self.gageid = gageid
self.comid = comid
self.atemp = atemp
self.snow = snow
self.hydrology = hydrology
self.warmup = warmup
self.parameter_ranges = parameter_ranges
def create_submodel(self,
filepath,
name,
overwrite = True,
verbose = True,
):
"""
Creates a submodel of the source model to enhance performance.
"""
if not os.path.isfile(filepath) or overwrite:
if verbose: print('creating a submodel\n')
with open(self.hspfmodel, 'rb') as f: hspfmodel = pickle.load(f)
submodel = CalibratorModel()
submodel.build_submodel(hspfmodel, self.comid, name = name)
with open(filepath, 'wb') as f: pickle.dump(submodel, f)
self.submodel = filepath
def copymodel(self,
name,
verbose = True,
):
"""
Returns a copy of the HSPFModel.
"""
if self.submodel is None: m = self.hspfmodel
else: m = self.submodel
with open(m, 'rb') as f: hspfmodel = pickle.load(f)
hspfmodel.filename = name
return hspfmodel
def adjust(self, model, variable, adjustment):
"""
Adjusts the values of the given parameter for all the PERLNDs in the
watershed by the "adjustment." The adjustments can be defined as
values relative to the default (products) or absolute values (sums).
"""
if variable == 'LZSN':
for p in model.perlnds: p.LZSN *= adjustment
if variable == 'UZSN':
for p in model.perlnds: p.UZSN *= adjustment
if variable == 'LZETP':
for p in model.perlnds: p.LZETP *= adjustment
if variable == 'INFILT':
for p in model.perlnds: p.INFILT *= adjustment
if variable == 'INTFW':
for p in model.perlnds: p.INTFW *= adjustment
if variable == 'IRC':
for p in model.perlnds: p.IRC *= adjustment
if variable == 'AGWRC':
for p in model.perlnds: p.AGWRC *= adjustment
if variable == 'KVARY':
for p in model.perlnds: p.KVARY = max(0, p.KVARY + adjustment)
if variable == 'DEEPFR':
for p in model.perlnds: p.DEEPFR += adjustment
if variable == 'CCFACT':
for o in model.perlnds + model.implnds:
o.CCFACT = min(10, max(1, o.CCFACT + adjustment))
if variable == 'MGMELT':
for o in model.perlnds + model.implnds:
o.MGMELT = min(25, max(0, o.MGMELT + adjustment))
def run(self,
model,
targets = ['reach_outvolume'],
verbose = False,
):
"""
Creates a copy of the base model, adjusts a parameter value, runs
the simulation, calculates and returns the perturbation.
"""
# build the input files and run
model.build_wdminfile()
if self.submodel is None:
model.build_uci(targets, self.start, self.end, atemp = self.atemp,
snow = self.snow, hydrology = self.hydrology)
else:
model.build_uci(self.comid, self.start, self.end,
atemp = self.atemp, snow = self.snow,
hydrology = self.hydrology)
model.run(verbose = verbose)
# get the regression information using the postprocessor
dates = self.start + datetime.timedelta(days = self.warmup), self.end
# use WDMUtil to get the simulated values
wdm = WDMUtil()
f = '{}_out.wdm'.format(model.filename)
wdm.open(f, 'r')
dsns = wdm.get_datasets(f)
staids = [wdm.get_attribute(f, n, 'STAID') for n in dsns]
data = wdm.get_data(f, dsns[staids.index(self.comid)],
start = dates[0], end = dates[1])
wdm.close(f)
if model.units == 'Metric': conv = 10**6
else: conv = 43560
# the submodel is daily, full model is hourly
if self.submodel is None:
sflows = [sum(data[i:i+24]) * conv / 86400
for i in range(0, len(data) - 23, 24)]
else:
sflows = [d * conv / 86400 for d in data]
stimes = [self.start + i * datetime.timedelta(days = 1)
for i in range(self.warmup, (self.end - self.start).days)]
otimes = self.otimes
oflows = self.oflows
# remove points with missing data from both simulated and oberved flows
sflows = [sflows[stimes.index(t)]
for t, f in zip(otimes, oflows)
if t in stimes and f is not None]
oflows = [oflows[otimes.index(t)]
for t, f in zip(otimes, oflows)
if f is not None]
# return the appropriate performance metric
if self.optimization == 'Nash-Sutcliffe Product':
# daily log flows
log_o = [numpy.log(f) for f in oflows]
log_s = [numpy.log(f) for f in sflows]
logdNS = (1 - sum((numpy.array(log_s) - numpy.array(log_o))**2) /
sum((numpy.array(log_o) - numpy.mean(log_o))**2))
# daily NS
dNS = (1 - sum((numpy.array(sflows) - numpy.array(oflows))**2) /
sum((numpy.array(oflows) - numpy.mean(oflows))**2))
return dNS * logdNS
if self.optimization == 'Nash-Sutcliffe Efficiency':
# daily NS
dNS = (1 - sum((numpy.array(sflows) - numpy.array(oflows))**2) /
sum((numpy.array(oflows) - numpy.mean(oflows))**2))
return dNS
def simulate(self, simulation):
"""
Performs a simulation and returns the optimization value.
"""
name, perturbation, adjustments = simulation
# create a copy of the original model to modify
filename = '{}/{}{:4.3f}'.format(self.output, name, perturbation)
model = self.copymodel(filename)
# adjust the values of the parameters
for variable, adjustment in zip(self.variables, adjustments):
self.adjust(model, variable, adjustment)
# run and pass back the result
print('running', name, 'perturbation')
return self.run(model)
def perturb(self,
parallel,
nprocessors,
timeout = 300,
verbose = True,
):
"""
Performs the perturbation analysis.
"""
if verbose:
st = time.time()
if parallel:
print('perturbing the model in parallel\n')
else:
print('perturbing the model serially\n')
# adjust the parameter values for each variable for each simulation
its = range(len(self.variables)), self.variables, self.perturbations
adjustments = []
for i, v, p in zip(*its):
adjustment = self.values[:]
adjustment[i] += p
adjustments.append(adjustment)
# run a baseline simulation and perturbation simulations for
# each of calibration variables
its = self.variables, self.perturbations, adjustments
simulations = ([['baseline', 0, self.values]] +
[[v, p, a] for v, p, a in zip(*its)])
if parallel:
if nprocessors is None: n = cpu_count()
else: n = nprocessors
try:
# create a pool of workers and try parallel
with Pool(n, maxtasksperchild = 4 * cpu_count()) as p:
results = p.map_async(self.simulate, simulations)
optimizations = results.get(timeout = timeout)
except:
print('error: parallel calibration failed\n')
print('last values of calibration variables:\n')
for i in zip(self.variables, self.values): print(*i)
raise RuntimeError
else:
# run the simulations to get the optimization parameter values
optimizations = [self.simulate(s) for s in simulations]
if verbose:
print('\ncompleted perturbation in ' +
'{:.1f} seconds\n'.format(time.time() - st))
# calculate the sensitivities for the perturbations
sensitivities = [o - optimizations[0] for o in optimizations[1:]]
# save the current value of the optimization parameter
self.value = optimizations[0]
return sensitivities
def get_default(self, variable):
"""Gets the default value of the perturbation for the variable.
The defaults are based on experience with parameter sensitivity."""
if variable == 'LZSN': return 0.05
elif variable == 'UZSN': return 0.10
elif variable == 'LZETP': return 0.02
elif variable == 'INFILT': return 0.02
elif variable == 'INTFW': return 0.01
elif variable == 'IRC': return 0.02
elif variable == 'AGWRC': return 0.005
elif variable == 'KVARY': return 0.002
elif variable == 'DEEPFR': return 0.01
elif variable == 'CCFACT': return 0.2
elif variable == 'MGMELT': return 0.2
else:
print('error: unknown variable specified\n')
raise
def check_variables(self):
"""
User-defined check on the values of the variables to ensure
the calibrated values stay within the limits.
"""
for i in range(len(self.variables)):
variable = self.variables[i]
value = self.values[i]
mi, ma = self.parameter_ranges[variable]
if value < mi:
its = variable, value, mi
print('warning: current value of ' +
'{} ({}) is below minimum ({})'.format(*its))
self.values[i] = mi
if value > ma:
its = variable, value, ma
print('warning: current value of ' +
'{} ({}) is above maximum ({})'.format(*its))
self.values[i] = ma
def optimize(self, parallel, nprocessors):
"""
Optimizes the objective function for the parameters.
"""
# set the current value of the optimization parameter
current = self.value - 1
# iterate through positive and negative perturbations of the
# calibration parameters until there is no improvement
t1 = 'increasing {:6s} {:>5.1%} increases {} {:7.4f}'
t2 = 'decreasing {:6s} {:>5.1%} increases {} {:7.4f}'
while current < self.value:
# update the current value of the optimization parameter
current = self.value
# set the current values of the calibration parameters
values = self.values[:]
print('\ncurrent optimization value: {:4.3f}\n'.format(self.value))
# perturb the values positively
positives = self.perturb(parallel, nprocessors)
# perturb the values negatively
self.perturbations = [-p for p in self.perturbations]
negatives = self.perturb(parallel, nprocessors)
# reset the perturbations to positive
self.perturbations = [-p for p in self.perturbations]
# iterate through the calibration variables and update their
# values positively or negatively if they increase the value
# of the optimization parameter
for i in range(len(self.values)):
p = positives[i]
n = negatives[i]
d = self.perturbations[i]
# see if positive change increases optimization
if p > 0 and p > n:
its = self.variables[i], d, self.optimization, p
print(t1.format(*its))
self.values[i] = round(self.values[i] + d, 3)
elif n > 0:
its = self.variables[i], d, self.optimization, n
print(t2.format(*its))
self.values[i] = round(self.values[i] - d, 3)
# make sure variables are within bounds
self.check_variables()
# show progress
print('\ncalibration values relative to default:\n')
for variable, adjustment in zip(self.variables, self.values):
print('{:6s} {:5.3f}'.format(variable, adjustment))
# since the last iteration made the fit worse, reset the values of
# the calibration parameters to the previous iteration
self.values = values[:]
def autocalibrate(self,
output,
variables = {'LZSN': 1.,
'UZSN': 1.,
'LZETP': 1.,
'INFILT': 1.,
'INTFW': 1.,
'IRC': 1.,
'AGWRC': 1.,
},
optimization = 'Nash-Sutcliffe Efficiency',
perturbations = [2, 1, 0.5],
submodel = True,
parallel = True,
nprocessors = None,
):
"""
Autocalibrates the hydrology for the hspfmodel by modifying the
values of the HSPF PERLND parameters contained in the vars list.
"""
# open up the base model
with open(self.hspfmodel, 'rb') as f: hspfmodel = pickle.load(f)
# find the comid of the calibration gage
if self.comid is None and self.gageid is not None:
print('looking up the comid for gage {}\n'.format(self.gageid))
# make a dictionary to use to find the comid for each gage id
d = {v:k
for k, v in hspfmodel.subbasin_timeseries['flowgage'].items()}
self.comid = d[self.gageid]
elif self.comid is None:
print('error, no calibration gage specified')
raise
# get the calibration data
s, tstep, data = hspfmodel.flowgages[self.gageid]
# find the indices for the calibration
i = (self.start - s).days + self.warmup
j = (self.end - s).days
n = (self.end - self.start).days - self.warmup
delta = datetime.timedelta(days = 1)
if hspfmodel.units == 'Metric': conv = 0.3048**3
else: conv = 1
self.oflows = [d * conv for d in data[i:j]]
self.otimes = [self.start + (i + self.warmup) * delta for i in range(n)]
# create a submodel to improve performance
if submodel:
filepath = '{}/submodel'.format(self.output)
self.create_submodel(filepath, self.comid)
# set up the current values of the variables, the amount to perturb
# them by in each iteration, and the optimization parameter
self.variables = [v for v in variables]
self.values = [variables[v] for v in variables]
self.optimization = optimization
# current value of the optimization parameter
self.value = -10
# perturb until reaching a maximum (start with large perturbations)
print('attempting to calibrate {}'.format(self.hspfmodel))
for p in perturbations:
self.perturbations = [p * self.get_default(v) for v in variables]
self.optimize(parallel, nprocessors)
print('\noptimization complete, saving model\n')
# set the submodel to None to save the full model
self.submodel = None
model = self.copymodel(output)
# adjust the values of the parameters
for variable, adjustment in zip(self.variables, self.values):
self.adjust(model, variable, adjustment)
# adjust the filename
with open(output, 'wb') as f: pickle.dump(model, f)
| |
#!/usr/bin/env python3
#
# Copyright (c) 2015-2017 Nest Labs, Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
##
# @file
# Implements Ping class that call ping to virtual nodes.
#
from __future__ import absolute_import
from __future__ import print_function
import os
import sys
from happy.ReturnMsg import ReturnMsg
from happy.Utils import *
from happy.utils.IP import IP
from happy.HappyNode import HappyNode
options = {}
options["quiet"] = False
options["source"] = None
options["destination"] = None
options["size"] = None
options["count"] = None
def option():
return options.copy()
class Ping(HappyNode):
"""
Sends pings between virtual nodes. Uses ping for IPv4 and ping6 for IPv6.
happy-ping [-h --help] [-q --quiet] [-i --id <NODE_NAME>]
[-d --destination (<IP_ADDR>|<NODE_NAME>)]
[-s --size <PING_SIZE>] [-c --count <PING_COUNT>]
-i --id Source node.
-d --destination Destination node, can be either the IP address or the
node name.
-s --size Size of the ping in bytes.
-c --count Number of pings to send.
Example:
$ happy-ping ThreadNode BorderRouter
Sends a ping between the ThreadNode and BorderRouter nodes.
return:
0-100 percentage of the lost packets
"""
def __init__(self, opts=options):
HappyNode.__init__(self)
self.quiet = opts["quiet"]
self.source = opts["source"]
self.destination = opts["destination"]
self.count = opts["count"]
self.size = opts["size"]
def __pre_check(self):
# Check if the name of the new node is given
if not self.source:
emsg = "Missing name of the virtual source node."
self.logger.error("[localhost] Ping: %s" % (emsg))
self.exit()
# Check if the source node exists.
if not self._nodeExists(self.source):
emsg = "virtual source node %s does not exist." % (self.source)
self.logger.error("[%s] Ping: %s" % (self.source, emsg))
self.exit()
# Check if the ping destination is given.
if not self.destination:
emsg = "Missing destination for ping."
self.logger.error("[localhost] Ping: %s" % (emsg))
self.exit()
# Check if the destination node exists.
if not IP.isIpAddress(self.destination) and not self._nodeExists(self.destination):
emsg = "virtual destination node %s does not exist." % (self.destination)
self.logger.error("[%s] Ping: %s" % (self.source, emsg))
self.exit()
if self.count is not None and self.count.isdigit():
self.count = int(float(self.count))
else:
self.count = 1
def __get_addresses(self):
self.addresses = {}
if IP.isIpAddress(self.destination):
self.addresses[self.destination] = 100
return
if self._nodeExists(self.destination):
node_addresses = self.getNodeAddresses(self.destination)
for addr in node_addresses:
self.addresses[addr] = 100
def __ping_on_address(self, addr):
cmd = ""
if IP.isIpv6(addr):
cmd += "ping6"
else:
cmd += "ping"
cmd += " -c " + str(self.count)
if self.size is not None:
cmd += " -s " + str(self.size)
if IP.isMulticast(addr):
cmd += ""
cmd += " " + addr
out, err = self.CallAtNodeForOutput(self.source, cmd)
return (out, err)
def __parse_output(self, addr, out, err):
if out is None:
emsg = "Failed to call ping at node " + self.source + " to " + addr + "."
self.logger.warning("[%s] Ping: %s" % (self.source, emsg))
if err is not None:
self.logger.warning("[%s] Ping: %s" % (self.source, err))
return
for line in out.split("\n"):
if "packet loss" not in line:
continue
l = line.split()
perc_loss = -1
if len(l) > 10 and l[8] == "packet" and l[9] == "loss,":
perc_loss = l[7]
perc_loss = l[7][:-1] # drop % char
perc_loss = int(float(perc_loss))
if len(l) > 8 and l[6] == "packet" and l[7] == "loss,":
perc_loss = l[5]
perc_loss = l[5][:-1] # drop % char
perc_loss = int(float(perc_loss))
self.addresses[addr] = perc_loss
break
def __post_check(self):
# pick the best result
self.rets = []
for addr in self.addresses.keys():
self.rets.append(self.addresses[addr])
if len(self.rets) > 0:
self.ret = min(self.rets)
else:
self.ret = 100
def run(self):
self.__pre_check()
self.__get_addresses()
if self.addresses == {}:
emsg = "No address to ping at " + str(self.destination) + "."
self.logger.warning("[%s] Ping: %s" % (self.source, emsg))
print(hyellow(emsg))
return ReturnMsg(100, self.addresses)
for addr in self.addresses.keys():
out, err = self.__ping_on_address(addr)
self.__parse_output(addr, out, err)
self.__post_check()
for addr in self.addresses.keys():
if IP.isIpAddress(self.destination):
self.logger.info("ping from " + self.source + " to address " +
addr + " -> " + str(self.addresses[addr]) +
"% packet loss")
else:
self.logger.info("ping from " + self.source + " to " + self.destination +
" on address " + addr + " -> " + str(self.addresses[addr]) +
"% packet loss")
return ReturnMsg(self.ret, self.addresses)
| |
########
# Copyright (c) 2013 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
from testenv import TestCase
from testenv.utils import get_resource as resource
from testenv.utils import deploy_application as deploy
from testenv.utils import execute_workflow
class TestScaleWorkflow(TestCase):
def test_compute_scale_out_compute(self):
expectations = self.deploy('scale1')
expectations['compute']['new']['install'] = 1
self.deployment_assertions(expectations)
expectations = self.scale(parameters={'node_id': 'compute'})
expectations['compute']['new']['install'] = 1
expectations['compute']['existing']['install'] = 1
self.deployment_assertions(expectations)
def test_compute_scale_in_compute(self):
expectations = self.deploy('scale4')
expectations['compute']['new']['install'] = 3
self.deployment_assertions(expectations)
expectations = self.scale(parameters={'node_id': 'compute',
'delta': -1})
expectations['compute']['existing']['install'] = 2
expectations['compute']['removed']['install'] = 1
expectations['compute']['removed']['uninstall'] = 1
self.deployment_assertions(expectations)
def test_compute_scale_out_and_in_compute_from_0(self):
expectations = self.deploy('scale10')
expectations['compute']['new']['install'] = 0
self.deployment_assertions(expectations)
expectations = self.scale(parameters={'node_id': 'compute'})
expectations['compute']['new']['install'] = 1
self.deployment_assertions(expectations)
expectations = self.scale(parameters={'node_id': 'compute',
'delta': -1})
expectations['compute']['new']['install'] = 0
expectations['compute']['existing']['install'] = 0
expectations['compute']['removed']['install'] = 1
expectations['compute']['removed']['uninstall'] = 1
self.deployment_assertions(expectations)
def test_compute_scale_out_2_compute(self):
expectations = self.deploy('scale1')
expectations['compute']['new']['install'] = 1
self.deployment_assertions(expectations)
expectations = self.scale(parameters={'node_id': 'compute',
'delta': 2})
expectations['compute']['new']['install'] = 2
expectations['compute']['existing']['install'] = 1
self.deployment_assertions(expectations)
def test_compute_scale_in_2_compute(self):
expectations = self.deploy('scale4')
expectations['compute']['new']['install'] = 3
self.deployment_assertions(expectations)
expectations = self.scale(parameters={'node_id': 'compute',
'delta': -2})
expectations['compute']['existing']['install'] = 1
expectations['compute']['removed']['install'] = 2
expectations['compute']['removed']['uninstall'] = 2
self.deployment_assertions(expectations)
def test_db_contained_in_compute_scale_out_compute(self):
expectations = self.deploy('scale2')
expectations['compute']['new']['install'] = 1
expectations['db']['new']['install'] = 1
expectations['db']['new']['rel_install'] = 2
self.deployment_assertions(expectations)
expectations = self.scale(parameters={'node_id': 'compute'})
expectations['compute']['new']['install'] = 1
expectations['compute']['existing']['install'] = 1
expectations['db']['new']['install'] = 1
expectations['db']['new']['rel_install'] = 2
expectations['db']['existing']['install'] = 1
expectations['db']['existing']['rel_install'] = 2
self.deployment_assertions(expectations)
def test_db_contained_in_compute_scale_in_compute(self):
expectations = self.deploy('scale5')
expectations['compute']['new']['install'] = 2
expectations['db']['new']['install'] = 4
expectations['db']['new']['rel_install'] = 8
self.deployment_assertions(expectations)
expectations = self.scale(parameters={'node_id': 'compute',
'delta': -1})
expectations['compute']['existing']['install'] = 1
expectations['compute']['removed']['install'] = 1
expectations['compute']['removed']['uninstall'] = 1
expectations['db']['existing']['install'] = 2
expectations['db']['existing']['rel_install'] = 4
expectations['db']['removed']['install'] = 2
expectations['db']['removed']['uninstall'] = 2
expectations['db']['removed']['rel_install'] = 4
expectations['db']['removed']['rel_uninstall'] = 4
self.deployment_assertions(expectations)
def test_db_contained_in_compute_scale_out_db(self):
expectations = self.deploy('scale2')
expectations['compute']['new']['install'] = 1
expectations['db']['new']['install'] = 1
expectations['db']['new']['rel_install'] = 2
self.deployment_assertions(expectations)
expectations = self.scale(parameters={'node_id': 'db'})
expectations['compute']['new']['install'] = 1
expectations['compute']['existing']['install'] = 1
expectations['db']['new']['install'] = 1
expectations['db']['new']['rel_install'] = 2
expectations['db']['existing']['install'] = 1
expectations['db']['existing']['rel_install'] = 2
self.deployment_assertions(expectations)
def test_db_contained_in_compute_scale_in_db(self):
expectations = self.deploy('scale5')
expectations['compute']['new']['install'] = 2
expectations['db']['new']['install'] = 4
expectations['db']['new']['rel_install'] = 8
self.deployment_assertions(expectations)
expectations = self.scale(parameters={'node_id': 'db',
'delta': -1})
expectations['compute']['existing']['install'] = 1
expectations['compute']['removed']['install'] = 1
expectations['compute']['removed']['uninstall'] = 1
expectations['db']['existing']['install'] = 2
expectations['db']['existing']['rel_install'] = 4
expectations['db']['removed']['install'] = 2
expectations['db']['removed']['uninstall'] = 2
expectations['db']['removed']['rel_install'] = 4
expectations['db']['removed']['rel_uninstall'] = 4
self.deployment_assertions(expectations)
def test_db_contained_in_compute_scale_out_db_scale_db(self):
expectations = self.deploy('scale2')
expectations['compute']['new']['install'] = 1
expectations['db']['new']['install'] = 1
expectations['db']['new']['rel_install'] = 2
self.deployment_assertions(expectations)
expectations = self.scale(parameters={
'node_id': 'db', 'scale_compute': False})
expectations['compute']['existing']['install'] = 1
expectations['db']['new']['install'] = 1
expectations['db']['new']['rel_install'] = 2
expectations['db']['existing']['install'] = 1
expectations['db']['existing']['rel_install'] = 2
self.deployment_assertions(expectations)
def test_db_contained_in_compute_scale_in_db_scale_db(self):
expectations = self.deploy('scale5')
expectations['compute']['new']['install'] = 2
expectations['db']['new']['install'] = 4
expectations['db']['new']['rel_install'] = 8
self.deployment_assertions(expectations)
expectations = self.scale(parameters={'node_id': 'db',
'delta': -1,
'scale_compute': False})
expectations['compute']['existing']['install'] = 2
expectations['db']['existing']['install'] = 2
expectations['db']['existing']['rel_install'] = 4
expectations['db']['removed']['install'] = 2
expectations['db']['removed']['uninstall'] = 2
expectations['db']['removed']['rel_install'] = 4
expectations['db']['removed']['rel_uninstall'] = 4
self.deployment_assertions(expectations)
def test_db_connected_to_compute_scale_out_compute(self):
expectations = self.deploy('scale3')
expectations['compute']['new']['install'] = 1
expectations['db']['new']['install'] = 1
expectations['db']['new']['rel_install'] = 2
self.deployment_assertions(expectations)
expectations = self.scale(parameters={'node_id': 'compute'})
expectations['compute']['new']['install'] = 1
expectations['compute']['existing']['install'] = 1
expectations['db']['existing']['install'] = 1
expectations['db']['existing']['rel_install'] = 2
expectations['db']['existing']['scale_rel_install'] = 2
self.deployment_assertions(expectations)
def test_db_connected_to_compute_scale_in_compute(self):
expectations = self.deploy('scale6')
expectations['compute']['new']['install'] = 2
expectations['db']['new']['install'] = 2
expectations['db']['new']['rel_install'] = 8
self.deployment_assertions(expectations)
expectations = self.scale(parameters={'node_id': 'compute',
'delta': -1})
expectations['compute']['existing']['install'] = 1
expectations['compute']['removed']['install'] = 1
expectations['compute']['removed']['uninstall'] = 1
expectations['db']['existing']['install'] = 2
expectations['db']['existing']['rel_install'] = 8
expectations['db']['existing']['rel_uninstall'] = 4
self.deployment_assertions(expectations)
def test_db_connected_to_compute_scale_in_and_out_compute_from_0(self):
expectations = self.deploy('scale11')
expectations['compute']['new']['install'] = 0
expectations['db']['new']['install'] = 1
expectations['db']['new']['rel_install'] = 0
self.deployment_assertions(expectations)
expectations = self.scale(parameters={'node_id': 'compute',
'delta': 1})
expectations['compute']['new']['install'] = 1
expectations['compute']['existing']['install'] = 0
expectations['db']['existing']['install'] = 1
expectations['db']['existing']['rel_install'] = 0
expectations['db']['existing']['scale_rel_install'] = 2
self.deployment_assertions(expectations)
expectations = self.scale(parameters={'node_id': 'compute',
'delta': -1})
expectations['compute']['new']['install'] = 0
expectations['compute']['existing']['install'] = 0
expectations['compute']['removed']['install'] = 1
expectations['compute']['removed']['uninstall'] = 1
expectations['db']['existing']['install'] = 1
expectations['db']['existing']['scale_rel_install'] = 2
expectations['db']['existing']['rel_uninstall'] = 2
self.deployment_assertions(expectations)
def test_db_connected_to_compute_scale_out_db(self):
expectations = self.deploy('scale3')
expectations['compute']['new']['install'] = 1
expectations['db']['new']['install'] = 1
expectations['db']['new']['rel_install'] = 2
self.deployment_assertions(expectations)
expectations = self.scale(parameters={'node_id': 'db'})
expectations['compute']['existing']['install'] = 1
expectations['db']['new']['install'] = 1
expectations['db']['new']['rel_install'] = 2
expectations['db']['existing']['install'] = 1
expectations['db']['existing']['rel_install'] = 2
self.deployment_assertions(expectations)
def test_db_connected_to_compute_scale_in_db(self):
expectations = self.deploy('scale6')
expectations['compute']['new']['install'] = 2
expectations['db']['new']['install'] = 2
expectations['db']['new']['rel_install'] = 8
self.deployment_assertions(expectations)
expectations = self.scale(parameters={'node_id': 'db',
'delta': -1})
expectations['compute']['existing']['install'] = 2
expectations['db']['existing']['install'] = 1
expectations['db']['existing']['rel_install'] = 4
expectations['db']['removed']['install'] = 1
expectations['db']['removed']['uninstall'] = 1
expectations['db']['removed']['rel_install'] = 4
expectations['db']['removed']['rel_uninstall'] = 4
self.deployment_assertions(expectations)
def test_compute_scale_out_compute_rollback(self):
fail_operations = [{
'workflow': 'scale',
'node': 'compute',
'operation': 'cloudify.interfaces.lifecycle.start'
}]
expectations = self.deploy('scale7', inputs={'fail': fail_operations})
expectations['compute']['new']['install'] = 1
self.deployment_assertions(expectations)
with self.assertRaises(RuntimeError) as e:
self.scale(parameters={'node_id': 'compute'})
self.assertIn('TEST_EXPECTED_FAIL', str(e.exception))
expectations = self.expectations()
expectations['compute']['new']['install'] = 1
expectations['compute']['new']['uninstall'] = 1
expectations['compute']['existing']['install'] = 1
self.deployment_assertions(expectations, rollback=True)
def test_db_contained_in_compute_scale_out_compute_rollback(self):
fail_operations = [{
'workflow': 'scale',
'node': 'db',
'operation': 'cloudify.interfaces.lifecycle.start'
}]
expectations = self.deploy('scale8', inputs={'fail': fail_operations})
expectations['compute']['new']['install'] = 1
expectations['db']['new']['install'] = 1
expectations['db']['new']['rel_install'] = 2
self.deployment_assertions(expectations)
with self.assertRaises(RuntimeError) as e:
self.scale(parameters={'node_id': 'compute'})
self.assertIn('TEST_EXPECTED_FAIL', str(e.exception))
expectations = self.expectations()
expectations['compute']['new']['install'] = 1
expectations['compute']['new']['uninstall'] = 1
expectations['compute']['existing']['install'] = 1
expectations['db']['new']['install'] = 1
expectations['db']['new']['rel_install'] = 2
# this is somewhat of a hack. scale_rel_install only considers
# establish, so we reuse this to decrease 2 from the expected establish
# invocation, as start is the one that fails.
# whoever you are that may be reading this. please don't hate me.
# i mean no harm
expectations['db']['new']['scale_rel_install'] = -2
expectations['db']['new']['uninstall'] = 1
expectations['db']['new']['rel_uninstall'] = 2
expectations['db']['existing']['install'] = 1
expectations['db']['existing']['rel_install'] = 2
self.deployment_assertions(expectations, rollback=True)
def test_db_connected_to_compute_scale_out_compute_rollback(self):
fail_operations = [{
'workflow': 'scale',
'node': 'compute',
'operation': 'cloudify.interfaces.lifecycle.start'
}]
expectations = self.deploy('scale9', inputs={'fail': fail_operations})
expectations['compute']['new']['install'] = 1
expectations['db']['new']['install'] = 1
expectations['db']['new']['rel_install'] = 2
self.deployment_assertions(expectations)
with self.assertRaises(RuntimeError) as e:
self.scale(parameters={'node_id': 'compute'})
self.assertIn('TEST_EXPECTED_FAIL', str(e.exception))
expectations = self.expectations()
expectations['compute']['new']['install'] = 1
expectations['compute']['new']['uninstall'] = 1
expectations['compute']['existing']['install'] = 1
expectations['db']['existing']['install'] = 1
expectations['db']['existing']['rel_install'] = 2
expectations['db']['existing']['rel_uninstall'] = 2
self.deployment_assertions(expectations, rollback=True)
def setUp(self):
super(TestScaleWorkflow, self).setUp()
self.previous_ids = []
self.previous_instances = []
def deployment_assertions(self, expected, rollback=False):
def expected_invocations(_expectations, num_instances):
result = {}
install_count = _expectations.get('install') or 0
result.update({
'create': install_count / num_instances,
'configure': install_count / num_instances,
'start': install_count / num_instances
})
uninstall_count = _expectations.get('uninstall') or 0
result.update({
'stop': uninstall_count / num_instances,
'delete': uninstall_count / num_instances,
})
rel_install_count = _expectations.get('rel_install') or 0
scale_rel_install_count = _expectations.get(
'scale_rel_install') or 0
result.update({
'preconfigure': rel_install_count / num_instances,
'postconfigure': rel_install_count / num_instances,
'establish': (rel_install_count + scale_rel_install_count) /
num_instances
})
rel_uninstall_count = _expectations.get('rel_uninstall') or 0
result.update({
'unlink': rel_uninstall_count / num_instances
})
return result
if rollback:
mod = self.client.deployment_modifications.list()[0]
rolledback = [i for i in mod.node_instances.added_and_related if
i.get('modification') == 'added']
else:
rolledback = []
instances = self.client.node_instances.list()
instance_ids = [i.id for i in instances]
calculated_expected = {}
for node_id, expectations in expected.items():
new_expectation = expectations['new']
existing_expectation = expectations['existing']
removed_expectation = expectations['removed']
node_instances = [i for i in instances if i.node_id == node_id]
node_rolledback = [i for i in rolledback if i.node_id == node_id]
if rollback:
new_instances = node_rolledback
else:
new_instances = [i for i in node_instances
if i.id not in self.previous_ids]
existing_instances = [i for i in node_instances
if i.id in self.previous_ids]
removed_instances = [i for i in self.previous_instances
if i.id not in instance_ids and
i.node_id == node_id]
self.assertEqual(len(new_instances),
new_expectation.get('install') or 0,
'new_instances: {0}, install_expectations: {1}'
.format(new_instances,
new_expectation.get('install')))
self.assertEqual(len(existing_instances),
existing_expectation.get('install') or 0,
'existing_instances: {0}, '
'install_expectations: {1}'
.format(existing_instances,
existing_expectation.get('install')))
self.assertEqual(len(removed_instances),
removed_expectation.get('uninstall') or 0,
'removed_instances: {0}, '
'uninstall_expectations: {1}'
.format(removed_instances,
removed_expectation.get('uninstall')))
for new_instance in new_instances:
calculated_expected.update({
new_instance.id: expected_invocations(
new_expectation, len(new_instances))})
for existing_instance in existing_instances:
calculated_expected.update({
existing_instance.id: expected_invocations(
existing_expectation, len(existing_instances))})
for removed_instance in removed_instances:
calculated_expected.update({
removed_instance.id: expected_invocations(
removed_expectation, len(removed_instances))})
invocations = self.get_plugin_data(
'testmockoperations', self.deployment_id
).get('mock_operation_invocation', [])
total_expected_count = 0
for instance_id, operations in calculated_expected.items():
for operation, expected_count in operations.items():
total_expected_count += expected_count
op_invocations = [i for i in invocations
if i['operation'] == operation and
i['id'] == instance_id]
self.assertEqual(expected_count, len(op_invocations),
'expected_count: {0}, op_invocations: {1}'
.format(expected_count, op_invocations))
self.assertEqual(total_expected_count, len(invocations))
# set state for next deployment assertion
self.previous_instances = instances
self.previous_ids = instance_ids
def expectations(self):
return {
'compute': {
'new': {},
'existing': {},
'removed': {}
},
'db': {
'new': {},
'existing': {},
'removed': {}
},
'webserver': {
'new': {},
'existing': {},
'removed': {}
}
}
def deploy(self, resource_name, inputs=None):
deployment, _ = deploy(resource('dsl/{0}.yaml'.format(resource_name)),
inputs=inputs)
self.deployment_id = deployment.id
return self.expectations()
def scale(self, parameters):
execute_workflow('scale', self.deployment_id, parameters=parameters)
return self.expectations()
| |
#!/bin/python
import os
from os.path import dirname, join, exists, relpath, splitext
import json
from json_tools import list_field_paths, field_by_path
from shutil import copy
from utils import get_answer
from bisect import insort_left
from codecs import open as copen
from sys import platform
if platform == "win32":
from os.path import normpath as norm
def normpath(path):
return norm(path).replace('\\', '/')
else:
from os.path import normpath
mod_dir = "./mod"
root_dir = "./translations"
def parseFile(filename):
result = []
try:
with copen(filename, "r", 'utf-8') as f:
result = json.load(f)
except:
print("Failed to parse: " + filename)
quit(1)
return result
def get_data(field, target_file, original_file):
## Returns json structure from "target_file" and
## index of a field in this structure related to "field" in "original_file"
## original_file - a file path in game assets the requested data related to
## field - path to field of interest inside "original_file" json
## target_file - a framework file, containing the requested data
data = ""
try:
with copen(target_file, "r", 'utf-8') as f:
data = json.load(f)
except:
print("Warning: can not load file " + target_file)
return None, -1
index = -1
for i, label in enumerate(data):
if original_file in label["Files"] and (field in label["Files"][original_file]):
index = i
return data, i
return None, -1
def replace(target_file, field, newdata, original):
## Tries to merge translation to framework
## if translation exists and conflicts with new one
## asks for user to manual merge
## target - file in framework related to file should be translated
## field - path of field in game assets json file which should be translated
## newdata - translated string
## original - path to file in game assets should be translated
target = join(root_dir, target_file)
data, index = get_data(field, target, original)
if not (type(newdata) is str):
return
if data is None:
print("Cannot get data: " + newdata)
print("Target file: " + target)
print("Assets file: " + original)
return
changed = False
olddata = ""
if "DeniedAlternatives" not in data[index]:
data[index]["DeniedAlternatives"] = list()
if "Rus" in data[index]["Texts"]:
olddata = data[index]["Texts"]["Rus"]
if olddata == newdata or len(newdata) == 0:
return
elif newdata in data[index]["DeniedAlternatives"]:
return
elif len(olddata) == 0:
changed = True
data[index]["Texts"]["Rus"] = newdata
else:
print("Target: " + target)
print("Origin: " + original)
print("Used in:")
i = 0
for f, fields in data[index]["Files"].items():
if i > 5:
print("...and in " + str(len(data[index]["Files"])-i) + " more files")
break
print(" " + f)
for p in fields:
print(" at " + p)
i += 1
print("Denied variants:")
for d in data[index]["DeniedAlternatives"]:
print(' ' + d)
print("Field: " + field)
print("English text:")
print(' "' + data[index]["Texts"]["Eng"] + '"')
print("Old Russian text:")
print(' "' + data[index]["Texts"]["Rus"] + '"')
print("New Russian text:")
print(" \"" + newdata + '"')
print("What text should be used?")
print(" n - new text")
print(" o - old text")
print(" e - enter manually")
answer = get_answer(["n", "o", "e", "i"])
if answer == "n":
print("Setting to the new data...")
if olddata not in data[index]["DeniedAlternatives"]:
insort_left(data[index]["DeniedAlternatives"], olddata)
if newdata in data[index]["DeniedAlternatives"]:
data[index]["DeniedAlternatives"].remove(newdata)
data[index]["Texts"]["Rus"] = newdata
changed = True
elif answer == "e":
print("Enter new data:")
answer = get_answer(3)
data[index]["Texts"]["Rus"] = answer
changed = True
if newdata not in data[index]["DeniedAlternatives"] and newdata != answer:
insort_left(data[index]["DeniedAlternatives"], newdata)
changed = True
if olddata not in data[index]["DeniedAlternatives"] and olddata != answer:
insort_left(data[index]["DeniedAlternatives"], olddata)
changed = True
if answer in data[index]["DeniedAlternatives"]:
data[index]["DeniedAlternatives"].remove(answer)
print("Written: " + answer)
elif answer == "i":
import code
code.InteractiveConsole(locals=globals()).interact()
else:
print("Keeping old data...")
if newdata not in data[index]["DeniedAlternatives"]:
insort_left(data[index]["DeniedAlternatives"],newdata)
changed = True
if olddata in data[index]["DeniedAlternatives"]:
data[index]["DeniedAlternatives"].remove(olddata)
changed = True
if changed:
pass
with copen(target, "w", 'utf-8') as f:
json.dump(data, f, ensure_ascii = False, indent = 2, sort_keys=True)
def handleGlitch(field, newdata, original_file, original_files):
## field - path to field of interest inside json
## newdata - translated string
## original_files - a dict with pairs {field: path}
emotefield = join(field, "glitchEmote")
textfield = join(field, "glitchEmotedText")
if emotefield not in original_files and textfield not in original_files:
return False # Not a glitch case, return
offset = newdata.find(".")
if offset == -1 or offset + 2 >= len(newdata):
print("Cann't find separator of glitch emote in " + newdata)
print("but this text should contain one! Skipping to avoid database damage...")
return True
emote = newdata[:offset+1]
text = newdata[offset+2:]
while text.startswith(emote): #TODO: Delete after base fix
text = text[len(emote)+1:]
emotepath = original_files[emotefield]
textpath = original_files[textfield]
replace(emotepath, emotefield, emote, original_file)
replace(textpath, textfield, text, original_file)
return True
substitutions = dict()
with copen(join(root_dir ,"substitutions.json"), "r", 'utf-8') as f:
substitutions = json.load(f)
specialHandlers = [
## Contains handler-functions for special cases
## function should return True if it can handle supplied data
## otherwise - return False
## function should receive 4 arguments:
## field - path to field of interest inside json
## newdata - translated string
## original_file - path to target file in game assets
## original_files - a dict with pairs {field: path},
## where:
## path - path to target file in framework
## field - path to field of interest inside json
## this dict can contain a special fields, not existent in real game assets and
## related to internal framework special cases
## such a dict usually can be obtained from substitutions global variable or file
handleGlitch
]
def process_replacement(field, newdata, original_file):
## field - path to field of interest inside json
## newdata - translated string
## original_file - target file of replacement in game assets
targetfile = "texts/" + original_file + ".json"
if original_file in substitutions: # We encountered shared field
if field in substitutions[original_file]:
targetfile = substitutions[original_file][field]
else: # Special case like glitchEmote
for handler in specialHandlers:
if handler(field, newdata, original_file, substitutions[original_file]):
return
replace(targetfile, field, newdata, original_file)
others_path = normpath(join(root_dir, "others"))
for subdir, dirs, files in os.walk(mod_dir):
for thefile in files:
if not thefile.endswith(".patch"):
# All non-patch files will be copied to others directory
modpath = join(subdir, thefile) # File path in mod dir
assetspath = normpath(relpath(modpath, mod_dir)) # File path in packed assets
fwpath = normpath(join(others_path,assetspath)) # File path in framework
if exists(fwpath):
print(fwpath + " already exists! Replacing...")
os.makedirs(dirname(fwpath), exist_ok = True)
copy(modpath, fwpath)
continue
filename = join(subdir, thefile) # Patch file path
# File path in packed assets
fname, ext = splitext(filename)
assetspath = normpath(relpath(fname, mod_dir))
replacements = parseFile(filename)
for replacement in replacements:
# We expect, that operation is always "replace".
# If it isn't... Well, something strange will happen.
newdata = replacement["value"] # Imported translated text
jsonpath = replacement["path"]
if type(newdata) is list: # Very special case if value is list
paths = list_field_paths(newdata)
for p in paths: # There we are restoring paths to leafs of structure in newdata
process_replacement(join(jsonpath, p),
field_by_path(newdata,p), assetspath)
else: # All as expected, just perform replacement
process_replacement(jsonpath, newdata, assetspath)
| |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (c) 2007-2017, Jared Crapo
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
"""Change the owner, group, and mode of some files with a single command
chogm [OPTIONS] files_spec directories_spec file [file file ...]
-R, --recursive recurse through the directory tree of each file
-v, --verbose show progress
-h, --help display this usage message
file_spec owner:group:perms to set on files
directory_spec owner:group:perms to set on directories
file one or more files to operate on. Use '-' to
process stdin as a list of files.
file_spec tells what owner, group, and permissions should be given to any
files. Each of the three elements are separated by a ':'. If a value is
not given for a particular element, that that element is not changed on
the encountered files.
directory_spec works just like files_spec, but it is applied to
directories. If any element of directory_spec is a comma, the value of that
element will be used from file_spec
EXAMPLES
chogm www-data:www-data:644 ,:,:755 /pub/www/*
Change all files in /pub/www to have an owner and group of www-data,
and permissions of -rw-r--r--. Also change all directories in
/pub/www/ to have an owner and group of www-data, but permissions of
-rwxr-xr-x. This is equivilent to the following shell commands:
$ chown www-data:www-group /pub/www/*
$ find /pub/www -maxdepth 1 -type f | xargs chmod 644
$ find /pub/www -maxdepth 1 -type d | tail -n +2 | xargs chmod 755
chogm -R :accounting:g+rw,o= :,:g=rwx,o= /mnt/acct
Change the group of all files in /mnt/acct to be accounting, and
make sure people in that group can read, write, and create files
anywhere in that directory tree. Also make sure that the hoi palloi
can't peek at accounting's files. This is the same as doing:
$ chgrp -R accounting /mnt/acct
$ find /mnt/acct -type f -print | xargs chmod g+rw,o=
$ find /mnt/acct -type d -print | xargs chmod g=rwx,o=
find ~/src -depth 2 -type d -print | grep -v '/.git$' | chogm -R :staff:660 :-:770 -
Assuming your ~/src directory contains a bunch of directories, each
with their own git project, change all those files to have a group
of staff and permissions of -rw-rw---- and all the directories to
also have a group of staff but permissions of -rwxrwx---. While
doing all of that, don't change the permissions of any of the files
inside of .git directories.
REQUIREMENTS
This script uses the operating system commands xargs, chmod, chgrp, and
chmod to do it's work. It also uses the python multiprocessing module from
the standard library which was added in python 2.6, so it won't work with
python versions earlier than that. It works in python 2.7 and 3+.
EXIT STATUS
0 everything OK
1 some operations not successful (ie permission denied on a directory)
2 incorrect usage
"""
from __future__ import print_function
import sys
import os
import argparse
import stat
import multiprocessing as mp
import subprocess
class Usage(Exception):
def __init__(self, msg):
self.msg = msg
class Ogm:
"""store an owner, group, and mode"""
def __init__(self):
self.owner = None
self.group = None
self.mode = None
class Worker:
"""Launch an operating system process and feed it data
a worker class that uses python multiprocessing module clone itself, launch an OS
processes, and then catch new work from a multiprocessing.Pipe and send it to the
OS process to get done.
The OS process is xargs, so that we don't have to execute a new OS process for
every file we want to modify. We just send it to standard in, and let xargs take
care of how often it actually need to execute the chmod, chgrp or chmod
"""
def __init__(self, cmd, arg):
self.cmd = cmd
self.arg = arg
# set up a pipe so we can communicate with our multiprocessing.Process.
# From the parent process, we write filenames into the child pipe and read error
# messages from it. From the child process, we read filenames from the parent pipe
# and write error messages into it.
self.pipe_parent, self.pipe_child = mp.Pipe(duplex = True)
self.p = mp.Process(target=self.runner, args=(cmd,arg,))
self.p.start()
###self.pipe_parent.close() # this is the parent so we close the reading end of the pipe
def name(self):
"""return the name of this worker
the command it runs and the first argument for that command, ie 'chown www-data'
"""
return "%s %s" % (self.cmd, self.arg)
def add(self, file):
"""send a filename to the child process via a pipe"""
# this is called by the parent, and writes a filename to the child pipe
self.pipe_child.send(file)
def runner(self, cmd, arg):
"""Start a subprocess and feed it data from a pipe
This function is run in a child process. So we read from the parent
pipe to get work to do, and write to the parent pipe to send error messages
We also fire up an xargs subprocess to actually do the work, and feed stuff
from our parent pipe to stdin of the subprocess.
"""
xargs = subprocess.Popen(["xargs", cmd, arg], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
if debug:
print("--worker '%s' started xargs subprocess pid=%i" % (self.name(), xargs.pid), file=sys.stderr)
while True:
try:
# receive work from our parent pipe
filename = self.pipe_parent.recv()
# if we get message that there is None work, then we are done
if filename == None:
if debug:
print("--worker '%s' has no more work to do" % self.name(), file=sys.stderr)
break
# send the file to the stdin of the xargs process
print(filename, file=xargs.stdin)
if debug:
print("--worker '%s' received %s" % (self.name(), filename), file=sys.stderr)
except EOFError:
break
# we have broken out of the loop, so that means we have no more work to do
# gracefully close down the xargs process, save the contents of stderr, and
# write the exit code and the errors into the pipe to our parent
(stdoutdata,stderrdata) = xargs.communicate()
if debug:
print("--worker '%s' xargs pid=%i returncode=%i" % (self.name(), xargs.pid, xargs.returncode), file=sys.stderr)
print("--worker '%s' xargs stderr=%s" % (self.name(), stderrdata), file=sys.stderr)
self.pipe_parent.send( (xargs.returncode, stderrdata.rstrip('\r\n')) )
def gohome(self):
if debug:
print("--worker '%s' joining mp.Process" % self.name(), file=sys.stderr)
(rtncode,errmsgs) = self.pipe_child.recv()
self.p.join()
return (rtncode,errmsgs)
class Manager:
"""Start and manage all of the subprocesses"""
def __init__(self, fogm, dogm, verbose=False):
self.haveError = False
self.fogm = fogm
self.dogm = dogm
self.verbose = verbose
self.fchown = None
self.dchown = None
self.fchgrp = None
self.dchgrp = None
self.fchmod = None
self.dchmod = None
if fogm.owner:
self.fchown = Worker('chown', fogm.owner)
if dogm.owner:
self.dchown = Worker('chown', dogm.owner)
if fogm.group:
self.fchgrp = Worker('chgrp', fogm.group)
if dogm.group:
self.dchgrp = Worker('chgrp', dogm.group)
if fogm.mode:
self.fchmod = Worker('chmod', fogm.mode)
if dogm.mode:
self.dchmod = Worker('chmod', dogm.mode)
def do_file(self, file):
"""pass file to our subprocesses to change its owner, group and mode"""
if self.fchown:
self.fchown.add(file)
if self.fchgrp:
self.fchgrp.add(file)
if self.fchmod:
self.fchmod.add(file)
def do_dir(self, file):
"""pass a directory to our subprocesses to change its owner group and mode"""
if self.dchown:
self.dchown.add(file)
if self.dchgrp:
self.dchgrp.add(file)
if self.dchmod:
self.dchmod.add(file)
def report_information(self,message):
"""report information to stderr if verbose is set"""
if self.verbose:
print(message, file=sys.stderr)
def report_error(self, message):
"""report an error by printing it to stderr"""
self.haveError = True
print(message, file=sys.stderr)
def finish(self):
"""fire all of our workers and return a proper shell return code"""
self.fire(self.fchown)
self.fire(self.dchown)
self.fire(self.fchgrp)
self.fire(self.dchgrp)
self.fire(self.fchmod)
self.fire(self.dchmod)
if self.haveError:
return 1
else:
return 0
def fire(self, worker):
"""tell a worker there is no more work for them and send them home"""
if worker:
# put the "no more work" paper in the inbox
worker.add(None)
# and send the worker home
(rtncode,stderrdata) = worker.gohome()
if rtncode != 0:
self.report_error(stderrdata)
def main(argv=None):
parser = argparse.ArgumentParser(description='Change the owner, group, and mode of some files with a single command')
parser.add_argument('-R', '--recursive', action='store_true', help='recurse through the directory tree of each filespec')
parser.add_argument('-v', '--verbose', action='store_true', help='show progress')
parser.add_argument('file_spec', nargs=1, help='owner:group:perms to set on files')
parser.add_argument('directory_spec', nargs=1, help='owner:group:perms to set on directories')
parser.add_argument('file', nargs='+', help='one or more files to operate on. Use \'-\' to process stdin as a list of files')
args = parser.parse_args()
verbose = args.verbose
recursive = args.recursive
global debug
debug = False
spec = args.file_spec[0].split(':')
if len(spec) != 3:
parser.error('Invalid file_spec')
fileOgm = Ogm()
fileOgm.owner = spec[0]
fileOgm.group = spec[1]
fileOgm.mode = spec[2]
spec = args.directory_spec[0].split(':')
if len(spec) != 3:
parser.error('Invalid directory_spec')
dirOgm = Ogm()
dirOgm.owner = spec[0]
dirOgm.group = spec[1]
dirOgm.mode = spec[2]
# check for ',' which means to clone the argument from the file_spec
if dirOgm.owner == ',':
dirOgm.owner = fileOgm.owner
if dirOgm.group == ',':
dirOgm.group = fileOgm.group
if dirOgm.mode == ',':
dirOgm.mode = fileOgm.mode
# start up the child processes
m = Manager(fileOgm, dirOgm, verbose)
# examine each of the files
for filename in args.file:
if filename == '-':
while True:
onefile = sys.stdin.readline()
if onefile == '': break
examine(m, onefile.rstrip('\r\n'), parser, recursive)
else:
examine(m, filename, parser, recursive)
# and finish up
return m.finish()
def examine(m, thisfile, parser, recursive=False):
"""Recursively process a single file or directory"""
if debug:
print("--examining '%s'" % thisfile, file=sys.stderr)
try:
if os.path.isfile(thisfile):
m.do_file(thisfile)
elif os.path.isdir(thisfile):
m.do_dir(thisfile)
if recursive:
m.report_information("Processing directory %s...." % thisfile)
try:
for eachfile in os.listdir(thisfile):
examine(m, os.path.join(thisfile, eachfile), parser, recursive)
except OSError as e:
# do nicer formatting for common errors
if e.errno == 13:
m.report_error("%s: %s: Permission denied" % (parser.prog, e.filename))
else:
m.report_error("%s: %s" % (parser.prog, e))
else:
m.report_error("%s: cannot access '%s': No such file or directory" % (parser.prog, thisfile))
except OSError as ose:
m.report_error("%s: %s" % (parser.prog, e))
if __name__ == "__main__":
sys.exit(main())
| |
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import with_statement
import copy
from libcloud.dns.types import Provider, RecordType
from libcloud.dns.types import RecordError
from libcloud.dns.types import (
ZoneDoesNotExistError,
RecordDoesNotExistError,
ZoneAlreadyExistsError,
RecordAlreadyExistsError,
)
from libcloud.dns.base import DNSDriver, Zone, Record
from libcloud.common.gandi_live import (
ResourceNotFoundError,
ResourceConflictError,
GandiLiveResponse,
GandiLiveConnection,
BaseGandiLiveDriver,
)
__all__ = [
"GandiLiveDNSDriver",
]
TTL_MIN = 300
TTL_MAX = 2592000 # 30 days
API_BASE = "/api/v5"
class GandiLiveDNSResponse(GandiLiveResponse):
pass
class GandiLiveDNSConnection(GandiLiveConnection):
responseCls = GandiLiveDNSResponse
class GandiLiveDNSDriver(BaseGandiLiveDriver, DNSDriver):
"""
API reference can be found at:
https://doc.livedns.gandi.net/
Please note that the Libcloud paradigm of one zone per domain does not
match exactly with Gandi LiveDNS. For Gandi, a "zone" can apply to
multiple domains. This driver behaves as if the domain is a zone, but be
warned that modifying a domain means modifying the zone. Iif you have a
zone associated with mutiple domains, all of those domains will be
modified as well.
"""
type = Provider.GANDI
name = "Gandi LiveDNS"
website = "http://www.gandi.net/domain"
connectionCls = GandiLiveDNSConnection
# also supports CAA, CDS
RECORD_TYPE_MAP = {
RecordType.A: "A",
RecordType.AAAA: "AAAA",
RecordType.ALIAS: "ALIAS",
RecordType.CNAME: "CNAME",
RecordType.DNAME: "DNAME",
RecordType.DS: "DS",
RecordType.KEY: "KEY",
RecordType.LOC: "LOC",
RecordType.MX: "MX",
RecordType.NS: "NS",
RecordType.PTR: "PTR",
RecordType.SPF: "SPF",
RecordType.SRV: "SRV",
RecordType.SSHFP: "SSHFP",
RecordType.TLSA: "TLSA",
RecordType.TXT: "TXT",
RecordType.WKS: "WKS",
RecordType.CAA: "CAA",
}
def list_zones(self):
zones = self.connection.request(action="%s/domains" % API_BASE, method="GET")
return self._to_zones(zones.object)
def get_zone(self, zone_id):
action = "%s/domains/%s" % (API_BASE, zone_id)
try:
zone = self.connection.request(action=action, method="GET")
except ResourceNotFoundError:
raise ZoneDoesNotExistError(
value="", driver=self.connection.driver, zone_id=zone_id
)
return self._to_zone(zone.object)
"""
:param extra: (optional) Extra attribute ('name'); if not provided, name
is based on domain.
:return: :class:`Zone` with attribute zone_uuid set in extra ``dict``
"""
def create_zone(self, domain, type="master", ttl=None, extra=None):
if extra and "name" in extra:
zone_name = extra["name"]
else:
zone_name = "%s zone" % domain
zone_data = {
"name": zone_name,
}
try:
new_zone = self.connection.request(
action="%s/zones" % API_BASE, method="POST", data=zone_data
)
except ResourceConflictError:
raise ZoneAlreadyExistsError(
value="", driver=self.connection.driver, zone_id=zone_name
)
new_zone_uuid = new_zone.headers["location"].split("/")[-1]
self.ex_switch_domain_gandi_zone(domain, new_zone_uuid)
return self._to_zone({"fqdn": domain, "zone_uuid": new_zone_uuid})
def list_records(self, zone):
action = "%s/domains/%s/records" % (API_BASE, zone.id)
records = self.connection.request(action=action, method="GET")
return self._to_records(records.object, zone)
"""
:return: :class:`Record` with the extra ``dict`` containing attribute
other_values ``list`` of ``str`` for other values; the first
value is returned through Record.data.
"""
def get_record(self, zone_id, record_id):
record_type, name = record_id.split(":", 1)
action = "%s/domains/%s/records/%s/%s" % (API_BASE, zone_id, name, record_type)
try:
record = self.connection.request(action=action, method="GET")
except ResourceNotFoundError:
raise RecordDoesNotExistError(
value="", driver=self.connection.driver, record_id=record_id
)
return self._to_record(record.object, self.get_zone(zone_id))[0]
def create_record(self, name, zone, type, data, extra=None):
self._validate_record(None, name, type, data, extra)
action = "%s/domains/%s/records" % (API_BASE, zone.id)
if type == "MX":
data = "%s %s" % (extra["priority"], data)
record_data = {
"rrset_name": name,
"rrset_type": self.RECORD_TYPE_MAP[type],
"rrset_values": [data],
}
if extra is not None and "ttl" in extra:
record_data["rrset_ttl"] = extra["ttl"]
try:
self.connection.request(action=action, method="POST", data=record_data)
except ResourceConflictError:
raise RecordAlreadyExistsError(
value="",
driver=self.connection.driver,
record_id="%s:%s" % (self.RECORD_TYPE_MAP[type], name),
)
return self._to_record_sub(record_data, zone, data)
"""
Ignores name and type, not allowed in an update call to the service.
The Gandi service requires all values for a record when doing an update.
Not providing all values during an update means the service will interpret
it as replacing all values with the one data value. The easiest way to
accomplish this is to make sure the value of a get_record is used as the
value of the record parameter.
This method will change the value when only one exists. When more than
one exists, it will combine the data parameter value with the extra dict
values contained in the list extra['_other_records']. This method should
only be used to make single value updates.
To change the number of values in the value set or to change several at
once, delete and recreate, potentially using ex_create_multi_value_record.
"""
def update_record(self, record, name, type, data, extra):
self._validate_record(record.id, record.name, record.type, data, extra)
action = "%s/domains/%s/records/%s/%s" % (
API_BASE,
record.zone.id,
record.name,
self.RECORD_TYPE_MAP[record.type],
)
multiple_value_record = record.extra.get("_multi_value", False)
other_records = record.extra.get("_other_records", [])
if record.type == RecordType.MX:
data = "%s %s" % (extra["priority"], data)
if multiple_value_record and len(other_records) > 0:
rvalue = [data]
for other_record in other_records:
if record.type == RecordType.MX:
rvalue.append(
"%s %s"
% (other_record["extra"]["priority"], other_record["data"])
)
else:
rvalue.append(other_record["data"])
else:
rvalue = [data]
record_data = {"rrset_values": rvalue}
if extra is not None and "ttl" in extra:
record_data["rrset_ttl"] = extra["ttl"]
try:
self.connection.request(action=action, method="PUT", data=record_data)
except ResourceNotFoundError:
raise RecordDoesNotExistError(
value="", driver=self.connection.driver, record_id=record.id
)
record_data["rrset_name"] = record.name
record_data["rrset_type"] = self.RECORD_TYPE_MAP[record.type]
return self._to_record(record_data, record.zone)[0]
"""
The Gandi service considers all values for a name-type combination to be
one record. Deleting that name-type record means deleting all values for
it.
"""
def delete_record(self, record):
action = "%s/domains/%s/records/%s/%s" % (
API_BASE,
record.zone.id,
record.name,
self.RECORD_TYPE_MAP[record.type],
)
try:
self.connection.request(action=action, method="DELETE")
except ResourceNotFoundError:
raise RecordDoesNotExistError(
value="", driver=self.connection.driver, record_id=record.id
)
# Originally checked for success here, but it should never reach
# this point with anything other than HTTP 200
return True
def export_zone_to_bind_format(self, zone):
action = "%s/domains/%s/records" % (API_BASE, zone.id)
headers = {"Accept": "text/plain"}
resp = self.connection.request(
action=action, method="GET", headers=headers, raw=True
)
return resp.body
# There is nothing you can update about a domain; you can update zones'
# names and which zone a domain is associated with, but the domain itself
# is basically immutable. Instead, some ex_ methods for dealing with
# Gandi zones.
"""
Update the name of a Gandi zone.
Note that a Gandi zone is not the same as a Libcloud zone. A Gandi zone
is a separate object type from a Gandi domain; a Gandi zone can be reused
by multiple Gandi domains, and the actual records are associated with the
zone directly. This is mostly masked in this driver to make it look like
records are associated with domains. If you need to step out of that
masking, use these extension methods.
:param zone_uuid: Identifier for the Gandi zone.
:type zone_uuid: ``str``
:param name: New name for the Gandi zone.
:type name: ``str``
:return: ``bool``
"""
def ex_update_gandi_zone_name(self, zone_uuid, name):
action = "%s/zones/%s" % (API_BASE, zone_uuid)
data = {
"name": name,
}
self.connection.request(action=action, method="PATCH", data=data)
return True
# There is no concept of deleting domains in this API, not even to
# disassociate a domain from a zone. You can delete a zone, though.
"""
Delete a Gandi zone. This may raise a ResourceConflictError if you
try to delete a zone that has domains still using it.
:param zone_uuid: Identifier for the Gandi zone
:type zone_uuid: ``str``
:return: ``bool``
"""
def ex_delete_gandi_zone(self, zone_uuid):
self.connection.request(
action="%s/zones/%s" % (API_BASE, zone_uuid), method="DELETE"
)
return True
"""
Change the Gandi zone a domain is asociated with.
:param domain: Domain name to switch zones.
:type domain: ``str``
:param zone_uuid: Identifier for the new Gandi zone to switch to.
:type zone_uuid: ``str``
:return: ``bool``
"""
def ex_switch_domain_gandi_zone(self, domain, zone_uuid):
domain_data = {
"zone_uuid": zone_uuid,
}
self.connection.request(
action="%s/domains/%s" % (API_BASE, domain),
method="PATCH",
data=domain_data,
)
return True
"""
Create a new record with multiple values.
:param data: Record values (depends on the record type)
:type data: ``list`` (of ``str``)
:return: ``list`` of :class:`Record`s
"""
def ex_create_multi_value_record(self, name, zone, type, data, extra=None):
self._validate_record(None, name, type, data, extra)
action = "%s/domains/%s/records" % (API_BASE, zone.id)
record_data = {
"rrset_name": name,
"rrset_type": self.RECORD_TYPE_MAP[type],
"rrset_values": data,
}
if extra is not None and "ttl" in extra:
record_data["rrset_ttl"] = extra["ttl"]
try:
self.connection.request(action=action, method="POST", data=record_data)
except ResourceConflictError:
raise RecordAlreadyExistsError(
value="",
driver=self.connection.driver,
record_id="%s:%s" % (self.RECORD_TYPE_MAP[type], name),
)
return self._to_record(record_data, zone)
def _to_record(self, data, zone):
records = []
rrset_values = data["rrset_values"]
multiple_value_record = len(rrset_values) > 1
for index, rrset_value in enumerate(rrset_values):
record = self._to_record_sub(data, zone, rrset_value)
record.extra["_multi_value"] = multiple_value_record
if multiple_value_record:
record.extra["_other_records"] = []
records.append(record)
if multiple_value_record:
for index in range(0, len(records)):
record = records[index]
for other_index, other_record in enumerate(records):
if index == other_index:
continue
extra = copy.deepcopy(other_record.extra)
extra.pop("_multi_value")
extra.pop("_other_records")
item = {
"name": other_record.name,
"data": other_record.data,
"type": other_record.type,
"extra": extra,
}
record.extra["_other_records"].append(item)
return records
def _to_record_sub(self, data, zone, value):
extra = {}
ttl = data.get("rrset_ttl", None)
if ttl is not None:
extra["ttl"] = int(ttl)
if data["rrset_type"] == "MX":
priority, value = value.split()
extra["priority"] = priority
return Record(
id="%s:%s" % (data["rrset_type"], data["rrset_name"]),
name=data["rrset_name"],
type=self._string_to_record_type(data["rrset_type"]),
data=value,
zone=zone,
driver=self,
ttl=ttl,
extra=extra,
)
def _to_records(self, data, zone):
records = []
for r in data:
records += self._to_record(r, zone)
return records
def _to_zone(self, zone):
extra = {}
if "zone_uuid" in zone:
extra = {"zone_uuid": zone["zone_uuid"]}
return Zone(
id=str(zone["fqdn"]),
domain=zone["fqdn"],
type="master",
ttl=0,
driver=self,
extra=extra,
)
def _to_zones(self, zones):
ret = []
for z in zones:
ret.append(self._to_zone(z))
return ret
def _validate_record(self, record_id, name, record_type, data, extra):
if len(data) > 1024:
raise RecordError(
"Record data must be <= 1024 characters",
driver=self,
record_id=record_id,
)
if type == "MX" or type == RecordType.MX:
if extra is None or "priority" not in extra:
raise RecordError(
"MX record must have a priority", driver=self, record_id=record_id
)
if extra is not None and "_other_records" in extra:
for other_value in extra.get("_other_records", []):
if len(other_value["data"]) > 1024:
raise RecordError(
"Record data must be <= 1024 characters",
driver=self,
record_id=record_id,
)
if type == "MX" or type == RecordType.MX:
if (
other_value["extra"] is None
or "priority" not in other_value["extra"]
):
raise RecordError(
"MX record must have a priority",
driver=self,
record_id=record_id,
)
if extra is not None and "ttl" in extra:
if extra["ttl"] < TTL_MIN:
raise RecordError(
"TTL must be at least 300 seconds", driver=self, record_id=record_id
)
if extra["ttl"] > TTL_MAX:
raise RecordError(
"TTL must not exceed 30 days", driver=self, record_id=record_id
)
| |
"""
Some API handling code. Predominantly this is to centralize common
alterations we make to API calls, such as filtering by router ids.
"""
import asyncio
import cellulario
import collections
import collections.abc
import fnmatch
import html
import html.parser
import itertools
import logging
import os
import re
import requests
import shellish
import shelve
import shutil
import syndicate
import syndicate.client
import syndicate.data
import warnings
from syndicate.adapters.requests import RequestsPager
logger = logging.getLogger('ecm.api')
JWT_COOKIE = 'cpAccountsJwt'
LEGACY_COOKIE = 'sessionid'
class HTMLJSONDecoder(syndicate.data.NormalJSONDecoder):
def parse_object(self, data):
data = super().parse_object(data)
for key, value in data.items():
if isinstance(value, str):
data[key] = html.unescape(value)
return data
syndicate.data.serializers['htmljson'] = syndicate.data.Serializer(
'application/json',
syndicate.data.serializers['json'].encode,
HTMLJSONDecoder().decode
)
class AuthFailure(SystemExit):
pass
class Unauthorized(AuthFailure):
""" Either the login is bad or the session is expired. """
pass
class TOSRequired(AuthFailure):
""" The terms of service have not been accepted yet. """
pass
class ECMLogin(object):
sso_url = 'https://accounts.cradlepointecm.com/login'
primer_url = 'https://cradlepointecm.com/api/v1/products/?limit=1'
legacy_url = 'https://cradlepointecm.com/api/v1/login/'
def __init__(self, api):
self._site = api.site
self._session = api.adapter.session
self._login_attempted = None
self.sso = None
def set_creds(self, username, password):
self.session_mode = False
self._login_attempted = False
self._username = username
self._password = password
def set_session(self, legacy_id, jwt):
self.session_mode = True
self.initial_legacy_id = legacy_id
self.initial_jwt = jwt
def reset(self, request):
try:
del request.headers['Cookie']
except KeyError:
pass
def __call__(self, request):
if self.session_mode:
if self.initial_jwt:
self.reset(request)
logger.info("Attempting to use saved session for login...")
self._session.cookies.update({
JWT_COOKIE: self.initial_jwt,
LEGACY_COOKIE: self.initial_legacy_id
})
self.initial_jwt = None
self.initial_legacy_id = None
logger.info("Loaded Session for SSO")
self.sso = True
elif self.initial_legacy_id:
self.reset(request)
self._session.cookies[LEGACY_COOKIE] = self.initial_legacy_id
self.initial_legacy_id = None
logger.info("Loaded Session for Legacy Auth")
self.sso = False
elif not self._login_attempted:
self._login_attempted = True
self.reset(request)
logger.info("Attempting to login with credentials...")
creds = {
"username": self._username,
"password": self._password,
}
resp = requests.post(self.sso_url, data=creds,
allow_redirects=False)
if JWT_COOKIE in resp.cookies:
logger.info("SSO Login Success")
self._session.cookies[JWT_COOKIE] = resp.cookies[JWT_COOKIE]
logger.debug("Priming session...")
self._session.get(self.primer_url)
self.sso = True
else:
logger.info("SSO auth failed, trying legacy auth")
resp = requests.post(self.legacy_url, json=creds)
if resp.status_code not in (200, 201):
raise Unauthorized('Invalid Login')
if LEGACY_COOKIE in resp.cookies:
self._session.cookies[LEGACY_COOKIE] = \
resp.cookies[LEGACY_COOKIE]
self.sso = False
request.prepare_cookies(self._session.cookies)
return request
class AberrantPager(RequestsPager):
""" The time-series resources in ECM have broken paging. limit and offset
mean different things, next is erroneous and total_count is a lie. """
def __init__(self, getter, path, kwargs):
self._limit = kwargs.pop('limit')
self._offset = kwargs.pop('offset', 0)
self._done = False
super().__init__(getter, path, kwargs)
def __len__(self):
""" Count is not supported but we'd like to support truthy tests
still. """
return 0 if self._done else 1
def _get_next_page(self):
assert not self._done, 'iterator exhausted'
page = self.getter(*self.path, limit=self._limit,
offset=self._offset, **self.kwargs)
size = len(page)
if not size:
self._done = True
raise StopIteration()
self._offset += size
self._limit += size
return page
def __next__(self):
if self._done:
raise StopIteration()
if not self.page:
self.page = self._get_next_page()
return self.page.pop(0)
class ECMService(shellish.Eventer, syndicate.Service):
site = 'https://cradlepointecm.com'
api_prefix = '/api/v1'
session_file = os.path.expanduser('~/.ecm_session')
globs = {
'seq': r'\[.*\]',
'wild': r'[*?]',
'set': r'\{.*\}'
}
re_glob_matches = re.compile('|'.join('(?P<%s>%s)' % x
for x in globs.items()))
re_glob_sep = re.compile('(%s)' % '|'.join(globs.values()))
default_remote_concurrency = 20
# Resources that don't page correctly.
aberrant_pager_resources = {
'router_alerts',
'activity_logs',
}
def __init__(self, **kwargs):
super().__init__(uri='nope', urn=self.api_prefix,
serializer='htmljson', **kwargs)
if not self.aio:
a = requests.adapters.HTTPAdapter(max_retries=3)
self.adapter.session.mount('https://', a)
self.adapter.session.mount('http://', a)
self.username = None
self.legacy_id = None
self.jwt = None
self.add_events([
'start_request',
'finish_request',
'reset_auth'
])
self.call_count = itertools.count()
def clone(self, **varations):
""" Produce a cloned instance of ourselves, including state. """
clone = type(self)(**varations)
copy = ('parent_account', 'legacy_id', 'jwt', 'username',
'ident', 'uri', '_events', 'call_count')
for x in copy:
value = getattr(self, x)
if hasattr(value, 'copy'): # containers
value = value.copy()
setattr(clone, x, value)
if clone.jwt is not None:
clone.adapter.set_cookie(JWT_COOKIE, clone.jwt)
else:
clone.adapter.set_cookie(LEGACY_COOKIE, clone.legacy_id)
return clone
@property
def default_page_size(self):
""" Dynamically change the page size to the screen height. """
# Underflow the term height by a few rows to give a bit of context
# for each page. For simple cases the output will pause on each
# page and this gives them a bit of old data or header data to look
# at while the next page is being loaded.
page_size = shutil.get_terminal_size()[1] - 4
return max(20, min(100, page_size))
def connect(self, site=None, username=None, password=None):
if site:
self.site = site
self.parent_account = None
self.uri = self.site
self.adapter.auth = ECMLogin(self)
if username:
self.login(username, password)
elif not self.load_session(try_last=True):
raise Unauthorized('No valid sessions found')
def reset_auth(self):
self.fire_event('reset_auth')
self.adapter.set_cookie(LEGACY_COOKIE, None)
self.adapter.set_cookie(JWT_COOKIE, None)
self.save_session(None)
self.ident = None
def login(self, username=None, password=None):
if not self.load_session(username):
self.set_auth(username, password)
def set_auth(self, username, password=None, legacy_id=None, jwt=None):
if password is not None:
self.adapter.auth.set_creds(username, password)
elif legacy_id or jwt:
self.adapter.auth.set_session(legacy_id, jwt)
else:
raise TypeError("password or legacy_id required")
self.save_last_username(username)
self.username = username
self.ident = self.get('login')
if self.adapter.auth.sso:
self.ident['user']['username'] = self.ident['user']['email']
def get_session(self, username=None, use_last=False):
if use_last:
if username is not None:
raise RuntimeError("use_last and username are exclusive")
elif username is None:
raise TypeError("username required unless use_last=True")
with shelve.open(self.session_file) as s:
try:
site = s[self.uri]
if not username:
username = site['last_username']
return username, site['sessions'][username]
except KeyError:
return None, None
def save_last_username(self, username):
with shelve.open(self.session_file) as s:
site = s.get(self.uri, {})
site['last_username'] = username
s[self.uri] = site # Required to persist; see shelve docs.
def save_session(self, session):
with shelve.open(self.session_file) as s:
site = s.get(self.uri, {})
sessions = site.setdefault('sessions', {})
sessions[self.username] = session
s[self.uri] = site # Required to persist; see shelve docs.
def load_session(self, username=None, try_last=False):
username, session = self.get_session(username, use_last=try_last)
self.legacy_id = session.get('id') if session else None
self.jwt = session.get('jwt') if session else None
if self.legacy_id or self.jwt:
self.set_auth(username, legacy_id=self.legacy_id, jwt=self.jwt)
return True
else:
self.username = None
return False
def check_session(self):
""" ECM sometimes updates the session token. We make sure we are in
sync. """
try:
legacy_id = self.adapter.get_cookie(LEGACY_COOKIE)
except KeyError:
legacy_id = self.legacy_id
try:
jwt = self.adapter.get_cookie(JWT_COOKIE)
except KeyError:
jwt = self.jwt
if legacy_id != self.legacy_id or jwt != self.jwt:
logger.info("Updating Session: ID:%s JWT:%s" % (legacy_id, jwt))
self.save_session({
"id": legacy_id,
"jwt": jwt
})
self.legacy_id = legacy_id
self.jwt = jwt
def finish_do(self, callid, result_func, *args, reraise=True, **kwargs):
try:
result = result_func(*args, **kwargs)
except BaseException as e:
self.fire_event('finish_request', callid, exc=e)
if reraise:
raise e
else:
return
else:
self.fire_event('finish_request', callid, result=result)
return result
def do(self, *args, **kwargs):
""" Wrap some session and error handling around all API actions. """
callid = next(self.call_count)
self.fire_event('start_request', callid, args=args, kwargs=kwargs)
if self.aio:
on_fin = lambda f: self.finish_do(callid, f.result, reraise=False)
future = asyncio.ensure_future(self._do(*args, **kwargs))
future.add_done_callback(on_fin)
return future
else:
return self.finish_do(callid, self._do, *args, **kwargs)
def _do(self, *args, **kwargs):
if self.parent_account is not None:
kwargs['parentAccount'] = self.parent_account
try:
result = super().do(*args, **kwargs)
except syndicate.client.ResponseError as e:
self.handle_error(e)
result = super().do(*args, **kwargs)
except Unauthorized as e:
self.reset_auth()
raise e
self.check_session()
return result
def handle_error(self, error):
""" Pretty print error messages and exit. """
resp = error.response
if resp.get('exception') == 'precondition_failed' and \
resp['message'] == 'must_accept_tos':
raise TOSRequired('Must accept TOS')
err = resp.get('exception') or resp.get('error_code')
if err in ('login_failure', 'unauthorized'):
self.reset_auth()
raise Unauthorized(err)
if resp.get('message'):
err += '\n%s' % resp['message'].strip()
raise SystemExit("Error: %s" % err)
def glob_match(self, string, pattern):
""" Add bash style {a,b?,c*c} set matching to fnmatch. """
sets = []
for x in self.re_glob_matches.finditer(pattern):
match = x.group('set')
if match is not None:
prefix = pattern[:x.start()]
suffix = pattern[x.end():]
for s in match[1:-1].split(','):
sets.append(prefix + s + suffix)
if not sets:
sets = [pattern]
return any(fnmatch.fnmatchcase(string, x) for x in sets)
def glob_field(self, field, criteria):
""" Convert the criteria into an API filter and test function to
further refine the fetched results. That is, the glob pattern will
often require client side filtering after doing a more open ended
server filter. The client side test function will only be truthy
when a value is in full compliance. The server filters are simply
to reduce high latency overhead. """
filters = {}
try:
start, *globs, end = self.re_glob_sep.split(criteria)
except ValueError:
filters['%s__exact' % field] = criteria
else:
if start:
filters['%s__startswith' % field] = start
if end:
filters['%s__endswith' % field] = end
return filters, lambda x: self.glob_match(x.get(field), criteria)
def get_by(self, selectors, resource, criteria, required=True, **options):
if isinstance(selectors, str):
selectors = [selectors]
for field in selectors:
sfilters, test = self.glob_field(field, criteria)
filters = options.copy()
filters.update(sfilters)
for x in self.get_pager(resource, **filters):
if test is None or test(x):
return x
if required:
raise SystemExit("%s not found: %s" % (resource[:-1].capitalize(),
criteria))
def get_by_id_or_name(self, resource, id_or_name, **kwargs):
selectors = ['name']
if id_or_name.isnumeric():
selectors.insert(0, 'id')
return self.get_by(selectors, resource, id_or_name, **kwargs)
def glob_pager(self, *args, **kwargs):
""" Similar to get_pager but use glob filter patterns. If arrays are
given to a filter arg it is converted to the appropriate disjunction
filters. That is, if you ask for field=['foo*', 'bar*'] it will return
entries that start with `foo` OR `bar`. The normal behavior would
produce a paradoxical query saying it had to start with both. """
exclude = {"expand", "limit", "timeout", "_or", "page_size", "urn",
"data", "callback"}
iterable = lambda x: isinstance(x, collections.abc.Iterable) and \
not isinstance(x, str)
glob_tests = []
glob_filters = collections.defaultdict(list)
for fkey, fval in list(kwargs.items()):
if fkey in exclude or '__' in fkey or '.' in fkey:
continue
kwargs.pop(fkey)
fvals = [fval] if not iterable(fval) else fval
gcount = 0
for gval in fvals:
gcount += 1
filters, test = self.glob_field(fkey, gval)
for query, term in filters.items():
glob_filters[query].append(term)
if test:
glob_tests.append(test)
# Scrub out any exclusive queries that will prevent certain client
# side matches from working. Namely if one pattern can match by
# `startswith`, for example, but others can't we must forgo
# inclusion of this server side filter to prevent stripping out
# potentially valid responses for the other more open-ended globs.
for gkey, gvals in list(glob_filters.items()):
if len(gvals) != gcount:
del glob_filters[gkey]
disjunctions = []
disjunct = kwargs.pop('_or', None)
if disjunct is not None:
if isinstance(disjunct, collections.abc.Iterable) and \
not isinstance(disjunct, str):
disjunctions.extend(disjunct)
else:
disjunctions.append(disjunct)
disjunctions.extend('|'.join('%s=%s' % (query, x) for x in terms)
for query, terms in glob_filters.items())
if disjunctions:
kwargs['_or'] = disjunctions
stream = self.get_pager(*args, **kwargs)
if not glob_tests:
return stream
else:
def glob_scrub():
for x in stream:
if any(t(x) for t in glob_tests):
yield x
return glob_scrub()
def _routers_slice(self, routers, size):
""" Pull a slice of s3 routers out of a generator. """
while True:
page = list(itertools.islice(routers, size))
if not page:
return {}
idmap = dict((x['id'], x) for x in page
if x['product']['series'] == 3)
if idmap:
return idmap
def remote(self, path, **kwargs):
""" Generator for remote data with globing support and smart
paging. """
if '/' in path:
warnings.warn("Use '.' instead of '/' for path argument.")
path_parts = path.split('.')
server_path = []
globs = []
for i, x in enumerate(path_parts):
if self.re_glob_sep.search(x):
globs.extend(path_parts[i:])
break
else:
server_path.append(x)
def expand_globs(base, tests, context=server_path):
if not tests:
yield '.'.join(context), base
return
if isinstance(base, dict):
items = base.items()
elif isinstance(base, list):
items = [(str(i), x) for i, x in enumerate(base)]
else:
return
test = tests[0]
for key, val in items:
if self.glob_match(key, test):
if len(tests) == 1:
yield '.'.join(context + [key]), val
else:
yield from expand_globs(val, tests[1:],
context + [key])
for x in self.fetch_remote(server_path, **kwargs):
if 'data' in x:
x['results'] = [{"path": k, "data": v}
for k, v in expand_globs(x['data'], globs)]
x['_data'] = x['data']
del x['data']
else:
x['results'] = []
yield x
def fetch_remote(self, path, concurrency=None, timeout=None, **query):
cell = cellulario.IOCell(coord='pool')
if concurrency is None:
concurrency = self.default_remote_concurrency
elif concurrency < 1:
raise ValueError("Concurrency less than 1")
page_concurrency = min(4, concurrency)
page_slice = max(10, round((concurrency / page_concurrency) * 1.20))
api = self.clone(aio=True, loop=cell.loop, request_timeout=timeout,
connect_timeout=timeout)
@cell.tier()
async def start(route):
probe = await api.get('routers', limit=1, fields='id',
**query)
for i in range(0, probe.meta['total_count'], page_slice):
await route.emit(i, page_slice)
@cell.tier(pool_size=page_concurrency)
async def get_page(route, offset, limit):
page = await api.get('routers', expand='product',
offset=offset, limit=limit, **query)
for router in page:
if router['product']['series'] != 3:
continue
await route.emit(router)
@cell.tier(pool_size=concurrency)
async def get_remote(route, router):
try:
res = (await api.get('remote', *path, id=router['id']))[0]
except Exception as e:
res = {
"success": False,
"exception": type(e).__name__,
"message": str(e),
"id": int(router['id'])
}
res['router'] = router
await route.emit(res)
@cell.cleaner
async def close():
await api.close()
return cell
def get_pager(self, *path, **kwargs):
resource = path[0].split('/', 1)[0] if path else None
if resource in self.aberrant_pager_resources:
assert not self.aio, 'Only sync mode supported for: %s' % \
resource
page_arg = kwargs.pop('page_size', None)
limit_arg = kwargs.pop('limit', None)
kwargs['limit'] = page_arg or limit_arg or self.default_page_size
return AberrantPager(self.get, path, kwargs)
else:
return super().get_pager(*path, **kwargs)
| |
# Copyright 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
from telemetry.core import app
from telemetry.core.backends import browser_backend
from telemetry.core import browser_credentials
from telemetry.core import exceptions
from telemetry.core import extension_dict
from telemetry.core import local_server
from telemetry.core import memory_cache_http_server
from telemetry.core.platform import profiling_controller
from telemetry.core import tab_list
from telemetry import decorators
class Browser(app.App):
"""A running browser instance that can be controlled in a limited way.
To create a browser instance, use browser_finder.FindBrowser.
Be sure to clean up after yourself by calling Close() when you are done with
the browser. Or better yet:
browser_to_create = FindBrowser(options)
with browser_to_create.Create(options) as browser:
... do all your operations on browser here
"""
def __init__(self, backend, platform_backend, credentials_path):
super(Browser, self).__init__(app_backend=backend,
platform_backend=platform_backend)
self._browser_backend = backend
self._platform_backend = platform_backend
self._local_server_controller = local_server.LocalServerController(
platform_backend)
self._tabs = tab_list.TabList(backend.tab_list_backend)
self.credentials = browser_credentials.BrowserCredentials()
self.credentials.credentials_path = credentials_path
self._platform_backend.DidCreateBrowser(self, self._browser_backend)
browser_options = self._browser_backend.browser_options
self.platform.FlushDnsCache()
if browser_options.clear_sytem_cache_for_browser_and_profile_on_start:
if self.platform.CanFlushIndividualFilesFromSystemCache():
self.platform.FlushSystemCacheForDirectory(
self._browser_backend.profile_directory)
self.platform.FlushSystemCacheForDirectory(
self._browser_backend.browser_directory)
else:
self.platform.FlushEntireSystemCache()
self._browser_backend.SetBrowser(self)
self._browser_backend.Start()
self._platform_backend.DidStartBrowser(self, self._browser_backend)
self._profiling_controller = profiling_controller.ProfilingController(
self._browser_backend.profiling_controller_backend)
@property
def profiling_controller(self):
return self._profiling_controller
@property
def browser_type(self):
return self.app_type
@property
def supports_extensions(self):
return self._browser_backend.supports_extensions
@property
def supports_tab_control(self):
return self._browser_backend.supports_tab_control
@property
def tabs(self):
return self._tabs
@property
def foreground_tab(self):
for i in xrange(len(self._tabs)):
# The foreground tab is the first (only) one that isn't hidden.
# This only works through luck on Android, due to crbug.com/322544
# which means that tabs that have never been in the foreground return
# document.hidden as false; however in current code the Android foreground
# tab is always tab 0, which will be the first one that isn't hidden
if self._tabs[i].EvaluateJavaScript('!document.hidden'):
return self._tabs[i]
raise Exception("No foreground tab found")
@property
@decorators.Cache
def extensions(self):
if not self.supports_extensions:
raise browser_backend.ExtensionsNotSupportedException(
'Extensions not supported')
return extension_dict.ExtensionDict(self._browser_backend.extension_backend)
def _GetStatsCommon(self, pid_stats_function):
browser_pid = self._browser_backend.pid
result = {
'Browser': dict(pid_stats_function(browser_pid), **{'ProcessCount': 1}),
'Renderer': {'ProcessCount': 0},
'Gpu': {'ProcessCount': 0},
'Other': {'ProcessCount': 0}
}
process_count = 1
for child_pid in self._platform_backend.GetChildPids(browser_pid):
try:
child_cmd_line = self._platform_backend.GetCommandLine(child_pid)
child_stats = pid_stats_function(child_pid)
except exceptions.ProcessGoneException:
# It is perfectly fine for a process to have gone away between calling
# GetChildPids() and then further examining it.
continue
child_process_name = self._browser_backend.GetProcessName(child_cmd_line)
process_name_type_key_map = {'gpu-process': 'Gpu', 'renderer': 'Renderer'}
if child_process_name in process_name_type_key_map:
child_process_type_key = process_name_type_key_map[child_process_name]
else:
# TODO: identify other process types (zygote, plugin, etc), instead of
# lumping them in a single category.
child_process_type_key = 'Other'
result[child_process_type_key]['ProcessCount'] += 1
for k, v in child_stats.iteritems():
if k in result[child_process_type_key]:
result[child_process_type_key][k] += v
else:
result[child_process_type_key][k] = v
process_count += 1
for v in result.itervalues():
if v['ProcessCount'] > 1:
for k in v.keys():
if k.endswith('Peak'):
del v[k]
del v['ProcessCount']
result['ProcessCount'] = process_count
return result
@property
def memory_stats(self):
"""Returns a dict of memory statistics for the browser:
{ 'Browser': {
'VM': R,
'VMPeak': S,
'WorkingSetSize': T,
'WorkingSetSizePeak': U,
'ProportionalSetSize': V,
'PrivateDirty': W
},
'Gpu': {
'VM': R,
'VMPeak': S,
'WorkingSetSize': T,
'WorkingSetSizePeak': U,
'ProportionalSetSize': V,
'PrivateDirty': W
},
'Renderer': {
'VM': R,
'VMPeak': S,
'WorkingSetSize': T,
'WorkingSetSizePeak': U,
'ProportionalSetSize': V,
'PrivateDirty': W
},
'SystemCommitCharge': X,
'SystemTotalPhysicalMemory': Y,
'ProcessCount': Z,
}
Any of the above keys may be missing on a per-platform basis.
"""
self._platform_backend.PurgeUnpinnedMemory()
result = self._GetStatsCommon(self._platform_backend.GetMemoryStats)
commit_charge = self._platform_backend.GetSystemCommitCharge()
if commit_charge:
result['SystemCommitCharge'] = commit_charge
total = self._platform_backend.GetSystemTotalPhysicalMemory()
if total:
result['SystemTotalPhysicalMemory'] = total
return result
@property
def cpu_stats(self):
"""Returns a dict of cpu statistics for the system.
{ 'Browser': {
'CpuProcessTime': S,
'TotalTime': T
},
'Gpu': {
'CpuProcessTime': S,
'TotalTime': T
},
'Renderer': {
'CpuProcessTime': S,
'TotalTime': T
}
}
Any of the above keys may be missing on a per-platform basis.
"""
result = self._GetStatsCommon(self._platform_backend.GetCpuStats)
del result['ProcessCount']
# We want a single time value, not the sum for all processes.
cpu_timestamp = self._platform_backend.GetCpuTimestamp()
for process_type in result:
# Skip any process_types that are empty
if not len(result[process_type]):
continue
result[process_type].update(cpu_timestamp)
return result
def Close(self):
"""Closes this browser."""
if self._browser_backend.IsBrowserRunning():
self._platform_backend.WillCloseBrowser(self, self._browser_backend)
self._local_server_controller.Close()
self._browser_backend.profiling_controller_backend.WillCloseBrowser()
self._browser_backend.Close()
self.credentials = None
@property
def http_server(self):
return self._local_server_controller.GetRunningServer(
memory_cache_http_server.MemoryCacheHTTPServer, None)
def SetHTTPServerDirectories(self, paths):
"""Returns True if the HTTP server was started, False otherwise."""
if isinstance(paths, basestring):
paths = set([paths])
paths = set(os.path.realpath(p) for p in paths)
# If any path is in a subdirectory of another, remove the subdirectory.
duplicates = set()
for parent_path in paths:
for sub_path in paths:
if parent_path == sub_path:
continue
if os.path.commonprefix((parent_path, sub_path)) == parent_path:
duplicates.add(sub_path)
paths -= duplicates
if self.http_server:
if paths and self.http_server.paths == paths:
return False
self.http_server.Close()
if not paths:
return False
server = memory_cache_http_server.MemoryCacheHTTPServer(paths)
self.StartLocalServer(server)
return True
def StartLocalServer(self, server):
"""Starts a LocalServer and associates it with this browser.
It will be closed when the browser closes.
"""
self._local_server_controller.StartServer(server)
@property
def local_servers(self):
"""Returns the currently running local servers."""
return self._local_server_controller.local_servers
def GetStandardOutput(self):
return self._browser_backend.GetStandardOutput()
def GetStackTrace(self):
return self._browser_backend.GetStackTrace()
@property
def supports_system_info(self):
return self._browser_backend.supports_system_info
def GetSystemInfo(self):
"""Returns low-level information about the system, if available.
See the documentation of the SystemInfo class for more details."""
return self._browser_backend.GetSystemInfo()
| |
import py, pytest
from _pytest.config import getcfg, get_common_ancestor, determine_setup
from _pytest.main import EXIT_NOTESTSCOLLECTED
class TestParseIni:
def test_getcfg_and_config(self, testdir, tmpdir):
sub = tmpdir.mkdir("sub")
sub.chdir()
tmpdir.join("setup.cfg").write(py.code.Source("""
[pytest]
name = value
"""))
rootdir, inifile, cfg = getcfg([sub], ["setup.cfg"])
assert cfg['name'] == "value"
config = testdir.parseconfigure(sub)
assert config.inicfg['name'] == 'value'
def test_getcfg_empty_path(self, tmpdir):
getcfg([''], ['setup.cfg']) #happens on py.test ""
def test_append_parse_args(self, testdir, tmpdir, monkeypatch):
monkeypatch.setenv('PYTEST_ADDOPTS', '--color no -rs --tb="short"')
tmpdir.join("setup.cfg").write(py.code.Source("""
[pytest]
addopts = --verbose
"""))
config = testdir.parseconfig(tmpdir)
assert config.option.color == 'no'
assert config.option.reportchars == 's'
assert config.option.tbstyle == 'short'
assert config.option.verbose
#config = testdir.Config()
#args = [tmpdir,]
#config._preparse(args, addopts=False)
#assert len(args) == 1
def test_tox_ini_wrong_version(self, testdir):
testdir.makefile('.ini', tox="""
[pytest]
minversion=9.0
""")
result = testdir.runpytest()
assert result.ret != 0
result.stderr.fnmatch_lines([
"*tox.ini:2*requires*9.0*actual*"
])
@pytest.mark.parametrize("name", "setup.cfg tox.ini pytest.ini".split())
def test_ini_names(self, testdir, name):
testdir.tmpdir.join(name).write(py.std.textwrap.dedent("""
[pytest]
minversion = 1.0
"""))
config = testdir.parseconfig()
assert config.getini("minversion") == "1.0"
def test_toxini_before_lower_pytestini(self, testdir):
sub = testdir.tmpdir.mkdir("sub")
sub.join("tox.ini").write(py.std.textwrap.dedent("""
[pytest]
minversion = 2.0
"""))
testdir.tmpdir.join("pytest.ini").write(py.std.textwrap.dedent("""
[pytest]
minversion = 1.5
"""))
config = testdir.parseconfigure(sub)
assert config.getini("minversion") == "2.0"
@pytest.mark.xfail(reason="probably not needed")
def test_confcutdir(self, testdir):
sub = testdir.mkdir("sub")
sub.chdir()
testdir.makeini("""
[pytest]
addopts = --qwe
""")
result = testdir.inline_run("--confcutdir=.")
assert result.ret == 0
class TestConfigCmdlineParsing:
def test_parsing_again_fails(self, testdir):
config = testdir.parseconfig()
pytest.raises(AssertionError, lambda: config.parse([]))
def test_explicitly_specified_config_file_is_loaded(self, testdir):
testdir.makeconftest("""
def pytest_addoption(parser):
parser.addini("custom", "")
""")
testdir.makeini("""
[pytest]
custom = 0
""")
testdir.makefile(".cfg", custom = """
[pytest]
custom = 1
""")
config = testdir.parseconfig("-c", "custom.cfg")
assert config.getini("custom") == "1"
class TestConfigAPI:
def test_config_trace(self, testdir):
config = testdir.parseconfig()
l = []
config.trace.root.setwriter(l.append)
config.trace("hello")
assert len(l) == 1
assert l[0] == "hello [config]\n"
def test_config_getoption(self, testdir):
testdir.makeconftest("""
def pytest_addoption(parser):
parser.addoption("--hello", "-X", dest="hello")
""")
config = testdir.parseconfig("--hello=this")
for x in ("hello", "--hello", "-X"):
assert config.getoption(x) == "this"
pytest.raises(ValueError, "config.getoption('qweqwe')")
@pytest.mark.skipif('sys.version_info[:2] not in [(2, 6), (2, 7)]')
def test_config_getoption_unicode(self, testdir):
testdir.makeconftest("""
from __future__ import unicode_literals
def pytest_addoption(parser):
parser.addoption('--hello', type='string')
""")
config = testdir.parseconfig('--hello=this')
assert config.getoption('hello') == 'this'
def test_config_getvalueorskip(self, testdir):
config = testdir.parseconfig()
pytest.raises(pytest.skip.Exception,
"config.getvalueorskip('hello')")
verbose = config.getvalueorskip("verbose")
assert verbose == config.option.verbose
def test_config_getvalueorskip_None(self, testdir):
testdir.makeconftest("""
def pytest_addoption(parser):
parser.addoption("--hello")
""")
config = testdir.parseconfig()
with pytest.raises(pytest.skip.Exception):
config.getvalueorskip('hello')
def test_getoption(self, testdir):
config = testdir.parseconfig()
with pytest.raises(ValueError):
config.getvalue('x')
assert config.getoption("x", 1) == 1
def test_getconftest_pathlist(self, testdir, tmpdir):
somepath = tmpdir.join("x", "y", "z")
p = tmpdir.join("conftest.py")
p.write("pathlist = ['.', %r]" % str(somepath))
config = testdir.parseconfigure(p)
assert config._getconftest_pathlist('notexist', path=tmpdir) is None
pl = config._getconftest_pathlist('pathlist', path=tmpdir)
print(pl)
assert len(pl) == 2
assert pl[0] == tmpdir
assert pl[1] == somepath
def test_addini(self, testdir):
testdir.makeconftest("""
def pytest_addoption(parser):
parser.addini("myname", "my new ini value")
""")
testdir.makeini("""
[pytest]
myname=hello
""")
config = testdir.parseconfig()
val = config.getini("myname")
assert val == "hello"
pytest.raises(ValueError, config.getini, 'other')
def test_addini_pathlist(self, testdir):
testdir.makeconftest("""
def pytest_addoption(parser):
parser.addini("paths", "my new ini value", type="pathlist")
parser.addini("abc", "abc value")
""")
p = testdir.makeini("""
[pytest]
paths=hello world/sub.py
""")
config = testdir.parseconfig()
l = config.getini("paths")
assert len(l) == 2
assert l[0] == p.dirpath('hello')
assert l[1] == p.dirpath('world/sub.py')
pytest.raises(ValueError, config.getini, 'other')
def test_addini_args(self, testdir):
testdir.makeconftest("""
def pytest_addoption(parser):
parser.addini("args", "new args", type="args")
parser.addini("a2", "", "args", default="1 2 3".split())
""")
testdir.makeini("""
[pytest]
args=123 "123 hello" "this"
""")
config = testdir.parseconfig()
l = config.getini("args")
assert len(l) == 3
assert l == ["123", "123 hello", "this"]
l = config.getini("a2")
assert l == list("123")
def test_addini_linelist(self, testdir):
testdir.makeconftest("""
def pytest_addoption(parser):
parser.addini("xy", "", type="linelist")
parser.addini("a2", "", "linelist")
""")
testdir.makeini("""
[pytest]
xy= 123 345
second line
""")
config = testdir.parseconfig()
l = config.getini("xy")
assert len(l) == 2
assert l == ["123 345", "second line"]
l = config.getini("a2")
assert l == []
def test_addinivalue_line_existing(self, testdir):
testdir.makeconftest("""
def pytest_addoption(parser):
parser.addini("xy", "", type="linelist")
""")
testdir.makeini("""
[pytest]
xy= 123
""")
config = testdir.parseconfig()
l = config.getini("xy")
assert len(l) == 1
assert l == ["123"]
config.addinivalue_line("xy", "456")
l = config.getini("xy")
assert len(l) == 2
assert l == ["123", "456"]
def test_addinivalue_line_new(self, testdir):
testdir.makeconftest("""
def pytest_addoption(parser):
parser.addini("xy", "", type="linelist")
""")
config = testdir.parseconfig()
assert not config.getini("xy")
config.addinivalue_line("xy", "456")
l = config.getini("xy")
assert len(l) == 1
assert l == ["456"]
config.addinivalue_line("xy", "123")
l = config.getini("xy")
assert len(l) == 2
assert l == ["456", "123"]
def test_options_on_small_file_do_not_blow_up(testdir):
def runfiletest(opts):
reprec = testdir.inline_run(*opts)
passed, skipped, failed = reprec.countoutcomes()
assert failed == 2
assert skipped == passed == 0
path = testdir.makepyfile("""
def test_f1(): assert 0
def test_f2(): assert 0
""")
for opts in ([], ['-l'], ['-s'], ['--tb=no'], ['--tb=short'],
['--tb=long'], ['--fulltrace'], ['--nomagic'],
['--traceconfig'], ['-v'], ['-v', '-v']):
runfiletest(opts + [path])
def test_preparse_ordering_with_setuptools(testdir, monkeypatch):
pkg_resources = pytest.importorskip("pkg_resources")
def my_iter(name):
assert name == "pytest11"
class EntryPoint:
name = "mytestplugin"
class dist:
pass
def load(self):
class PseudoPlugin:
x = 42
return PseudoPlugin()
return iter([EntryPoint()])
monkeypatch.setattr(pkg_resources, 'iter_entry_points', my_iter)
testdir.makeconftest("""
pytest_plugins = "mytestplugin",
""")
monkeypatch.setenv("PYTEST_PLUGINS", "mytestplugin")
config = testdir.parseconfig()
plugin = config.pluginmanager.getplugin("mytestplugin")
assert plugin.x == 42
def test_plugin_preparse_prevents_setuptools_loading(testdir, monkeypatch):
pkg_resources = pytest.importorskip("pkg_resources")
def my_iter(name):
assert name == "pytest11"
class EntryPoint:
name = "mytestplugin"
def load(self):
assert 0, "should not arrive here"
return iter([EntryPoint()])
monkeypatch.setattr(pkg_resources, 'iter_entry_points', my_iter)
config = testdir.parseconfig("-p", "no:mytestplugin")
plugin = config.pluginmanager.getplugin("mytestplugin")
assert plugin is None
def test_cmdline_processargs_simple(testdir):
testdir.makeconftest("""
def pytest_cmdline_preparse(args):
args.append("-h")
""")
result = testdir.runpytest()
result.stdout.fnmatch_lines([
"*pytest*",
"*-h*",
])
def test_invalid_options_show_extra_information(testdir):
"""display extra information when pytest exits due to unrecognized
options in the command-line"""
testdir.makeini("""
[pytest]
addopts = --invalid-option
""")
result = testdir.runpytest()
result.stderr.fnmatch_lines([
"*error: unrecognized arguments: --invalid-option*",
"* inifile: %s*" % testdir.tmpdir.join('tox.ini'),
"* rootdir: %s*" % testdir.tmpdir,
])
@pytest.mark.parametrize('args', [
['dir1', 'dir2', '-v'],
['dir1', '-v', 'dir2'],
['dir2', '-v', 'dir1'],
['-v', 'dir2', 'dir1'],
])
def test_consider_args_after_options_for_rootdir_and_inifile(testdir, args):
"""
Consider all arguments in the command-line for rootdir and inifile
discovery, even if they happen to occur after an option. #949
"""
# replace "dir1" and "dir2" from "args" into their real directory
root = testdir.tmpdir.mkdir('myroot')
d1 = root.mkdir('dir1')
d2 = root.mkdir('dir2')
for i, arg in enumerate(args):
if arg == 'dir1':
args[i] = d1
elif arg == 'dir2':
args[i] = d2
result = testdir.runpytest(*args)
result.stdout.fnmatch_lines(['*rootdir: *myroot, inifile: '])
@pytest.mark.skipif("sys.platform == 'win32'")
def test_toolongargs_issue224(testdir):
result = testdir.runpytest("-m", "hello" * 500)
assert result.ret == EXIT_NOTESTSCOLLECTED
def test_notify_exception(testdir, capfd):
config = testdir.parseconfig()
excinfo = pytest.raises(ValueError, "raise ValueError(1)")
config.notify_exception(excinfo)
out, err = capfd.readouterr()
assert "ValueError" in err
class A:
def pytest_internalerror(self, excrepr):
return True
config.pluginmanager.register(A())
config.notify_exception(excinfo)
out, err = capfd.readouterr()
assert not err
def test_load_initial_conftest_last_ordering(testdir):
from _pytest.config import get_config
pm = get_config().pluginmanager
class My:
def pytest_load_initial_conftests(self):
pass
m = My()
pm.register(m)
hc = pm.hook.pytest_load_initial_conftests
l = hc._nonwrappers + hc._wrappers
assert l[-1].function.__module__ == "_pytest.capture"
assert l[-2].function == m.pytest_load_initial_conftests
assert l[-3].function.__module__ == "_pytest.config"
class TestWarning:
def test_warn_config(self, testdir):
testdir.makeconftest("""
l = []
def pytest_configure(config):
config.warn("C1", "hello")
def pytest_logwarning(code, message):
if message == "hello" and code == "C1":
l.append(1)
""")
testdir.makepyfile("""
def test_proper(pytestconfig):
import conftest
assert conftest.l == [1]
""")
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1)
def test_warn_on_test_item_from_request(self, testdir):
testdir.makepyfile("""
import pytest
@pytest.fixture
def fix(request):
request.node.warn("T1", "hello")
def test_hello(fix):
pass
""")
result = testdir.runpytest()
assert result.parseoutcomes()["warnings"] > 0
assert "hello" not in result.stdout.str()
result = testdir.runpytest("-rw")
result.stdout.fnmatch_lines("""
===*warning summary*===
*WT1*test_warn_on_test_item*:5*hello*
""")
class TestRootdir:
def test_simple_noini(self, tmpdir):
assert get_common_ancestor([tmpdir]) == tmpdir
assert get_common_ancestor([tmpdir.mkdir("a"), tmpdir]) == tmpdir
assert get_common_ancestor([tmpdir, tmpdir.join("a")]) == tmpdir
with tmpdir.as_cwd():
assert get_common_ancestor([]) == tmpdir
@pytest.mark.parametrize("name", "setup.cfg tox.ini pytest.ini".split())
def test_with_ini(self, tmpdir, name):
inifile = tmpdir.join(name)
inifile.write("[pytest]\n")
a = tmpdir.mkdir("a")
b = a.mkdir("b")
for args in ([tmpdir], [a], [b]):
rootdir, inifile, inicfg = determine_setup(None, args)
assert rootdir == tmpdir
assert inifile == inifile
rootdir, inifile, inicfg = determine_setup(None, [b,a])
assert rootdir == tmpdir
assert inifile == inifile
@pytest.mark.parametrize("name", "setup.cfg tox.ini".split())
def test_pytestini_overides_empty_other(self, tmpdir, name):
inifile = tmpdir.ensure("pytest.ini")
a = tmpdir.mkdir("a")
a.ensure(name)
rootdir, inifile, inicfg = determine_setup(None, [a])
assert rootdir == tmpdir
assert inifile == inifile
def test_setuppy_fallback(self, tmpdir):
a = tmpdir.mkdir("a")
a.ensure("setup.cfg")
tmpdir.ensure("setup.py")
rootdir, inifile, inicfg = determine_setup(None, [a])
assert rootdir == tmpdir
assert inifile is None
assert inicfg == {}
def test_nothing(self, tmpdir):
rootdir, inifile, inicfg = determine_setup(None, [tmpdir])
assert rootdir == tmpdir
assert inifile is None
assert inicfg == {}
def test_with_specific_inifile(self, tmpdir):
inifile = tmpdir.ensure("pytest.ini")
rootdir, inifile, inicfg = determine_setup(inifile, [tmpdir])
assert rootdir == tmpdir
| |
from profelis import utils
from profelis import xml
import re
import os
from javax.swing import JPanel
from javax.swing import JList
from javax.swing import JLabel
from javax.swing import JTextArea
from javax.swing import JTextField
from javax.swing import JButton
from javax.swing import ListCellRenderer
from javax.swing import ListSelectionModel
from javax.swing import DefaultListModel
from javax.swing import JScrollPane
from javax.swing import Action
from javax.swing import AbstractAction
from javax.swing import Timer
from javax.swing.event import ListSelectionListener
from javax.swing.border import LineBorder
from java.awt import Color
from java.awt import Font
from java.awt import GridBagLayout
from java.awt import GridBagConstraints
from java.awt.event import ActionListener
class NewGeneActionListener(ActionListener):
def __init__(self, panel):
self.panel = panel
def actionPerformed(self, event):
if self.panel.newGeneFrom.text.isdigit() and self.panel.newGeneTo.text.isdigit():
location = [int(self.panel.newGeneFrom.text), int(self.panel.newGeneTo.text)]
if location[0] > 0 and location[1] > 0:
self.panel.outGenes.model.addElement(utils.Iteration(location))
self.panel.newGeneFrom.background = self.panel.newGeneTo.background = Color.white
else:
self.panel.newGeneFrom.background = self.panel.newGeneTo.background = Color.red
else:
self.panel.newGeneFrom.background = self.panel.newGeneTo.background = Color.red
class AddGenesAction(AbstractAction):
def __init__(self, panel):
AbstractAction.__init__(self, "Add Genes")
self.panel = panel
def actionPerformed(self, event):
output = open("profelis.query.fas", "w")
for gene in self.panel.outGenes.model.elements():
print gene.location
if gene.location[0] < gene.location[1]:
proteins = utils.translate(self.panel.genome[gene.location[0]-1:gene.location[1]])
else:
proteins = utils.translate(utils.reverseComplement(self.panel.genome[gene.location[1]-1:gene.location[0]]))
output.write(">profelis" + ":" + "-".join(map(str, gene.location)) + "\n")
for i in xrange(0, len(proteins), 50):
output.write(proteins[i:min(i+50, len(proteins))] + "\n")
output.close()
self.panel.frame.blastLocation.background = Color.white
self.panel.frame.databaseLocation.background = Color.white
try:
utils.cachedBlast("profelis.query.blastp.xml", self.panel.frame.blastLocation.text, self.panel.frame.databaseLocation.text + "/" + self.panel.database, self.panel.evalue, "profelis.query.fas", self.panel, True)
except OSError:
self.panel.frame.blastLocation.background = Color.red
self.panel.frame.databaseLocation.background = Color.red
return
genes = utils.parseBlast("profelis.query.blastp.xml")[2]
self.panel.outGenes.model.clear()
[self.panel.inGenes.model.add(0, gene) for gene in genes[::-1]]
xml.addGenes(self.panel.name + ".blastp.xml", "profelis.query.blastp.xml")
xml.writeHTML(self.panel.name + ".blastp.xml")
self.panel.writeArtemisFile()
class RemoveAction(AbstractAction):
def __init__(self, panel):
AbstractAction.__init__(self, "Remove Marked Genes")
self.panel = panel
def actionPerformed(self, event):
index = 0
removed = []
while index < self.panel.inGenes.model.size():
if self.panel.inGenes.model.get(index).remove:
removed.append(self.panel.inGenes.model.remove(index).location[0])
else:
index += 1
xml.deleteGenes(self.panel.name + ".blastp.xml", removed)
xml.writeHTML(self.panel.name + ".blastp.xml")
self.panel.writeArtemisFile()
class MarkForRemovalListener(ActionListener):
def __init__(self, panel):
self.panel = panel
def actionPerformed(self, event):
self.panel.inGenes.selectedValue.remove = not self.panel.inGenes.selectedValue.remove
self.panel.repaint()
class ProfelisCellRenderer(JTextArea, ListCellRenderer):
def __init__(self):
self.border = LineBorder(Color.black)
self.font = Font("Monospaced", Font.PLAIN, self.font.size)
def getListCellRendererComponent(self, list, value, index, isSelected, cellHasFocus):
self.setText(str(value))
if isSelected:
self.background = list.selectionBackground
self.foreground = list.selectionForeground
else:
self.background = list.background
self.foreground = list.foreground
if value.remove:
self.background = Color.red
return self
class MarkButtonLabeler(ActionListener):
def __init__(self, panel):
self.panel = panel
def actionPerformed(self, event):
if self.panel.inGenes.selectedValue and self.panel.inGenes.selectedValue.remove:
self.panel.markForRemovalButton.text = "Unmark For Removal"
else:
self.panel.markForRemovalButton.text = "Mark For Removal"
class SearchListener(ActionListener):
def __init__(self, panel):
self.panel = panel
def actionPerformed(self, event):
if self.panel.searchField.text != self.panel.searchTerm:
self.panel.searchTerm = self.panel.searchField.text
self.panel.searchIndex = -1
for index in range(self.panel.searchIndex+1, self.panel.inGenes.model.size()):
if str(self.panel.inGenes.model.get(index)).find(self.panel.searchTerm) != -1:
self.panel.inGenes.selectedIndex = self.panel.searchIndex = index
self.panel.inGenes.ensureIndexIsVisible(self.panel.inGenes.selectedIndex)
break
class ProfelisPanel(JPanel):
def __init__(self, frame, name):
self.frame = frame
self.exception = None
self.name = name
self.searchTerm = None
self.searchIndex = -1
self.searchField = JTextField("")
self.searchField.addActionListener(SearchListener(self))
self.newGeneFrom = JTextField("")
self.newGeneTo = JTextField("")
self.newGeneButton = JButton("New Gene")
newGeneActionListener = NewGeneActionListener(self)
self.newGeneFrom.addActionListener(newGeneActionListener)
self.newGeneTo.addActionListener(newGeneActionListener)
self.newGeneButton.addActionListener(newGeneActionListener)
self.markForRemovalButton = JButton("Mark For Removal")
self.markForRemovalButton.addActionListener(MarkForRemovalListener(self))
self.inGenes = JList(DefaultListModel())
self.inGenes.selectionMode = ListSelectionModel.SINGLE_SELECTION
self.inGenes.cellRenderer = ProfelisCellRenderer()
self.markButtonLabelerTimer = Timer(100, MarkButtonLabeler(self))
self.markButtonLabelerTimer.start()
self.loadFile()
self.outGenes = JList(DefaultListModel())
self.outGenes.selectionMode = ListSelectionModel.SINGLE_SELECTION
self.outGenes.cellRenderer = ProfelisCellRenderer()
constraints = GridBagConstraints()
self.layout = GridBagLayout()
constraints.gridx, constraints.gridy = 0, 0
constraints.gridwidth, constraints.gridheight = 1, 1
constraints.fill = GridBagConstraints.NONE
constraints.weightx, constraints.weighty = 0, 0
self.add(JLabel("Genes In Artemis File"), constraints)
constraints.gridx, constraints.gridy = 0, 1
self.add(JButton(RemoveAction(self)), constraints)
constraints.gridx, constraints.gridy = 1, 1
self.add(self.markForRemovalButton, constraints)
constraints.gridx, constraints.gridy = 2, 1
self.add(JLabel("Search"), constraints)
constraints.gridx, constraints.gridy = 3, 1
constraints.fill = GridBagConstraints.HORIZONTAL
self.add(self.searchField, constraints)
constraints.gridx, constraints.gridy = 0, 2
constraints.gridwidth, constraints.gridheight = 4, 2
constraints.fill = GridBagConstraints.BOTH
constraints.weightx, constraints.weighty = 1, 1
self.add(JScrollPane(self.inGenes), constraints)
constraints.gridx, constraints.gridy = 4, 0
constraints.gridwidth, constraints.gridheight = 1, 1
constraints.fill = GridBagConstraints.NONE
constraints.weightx, constraints.weighty = 0, 0
self.add(JLabel("Genes To Add To Artemis File"), constraints)
constraints.gridx, constraints.gridy = 4, 1
self.add(self.newGeneButton, constraints)
constraints.weightx = 1
constraints.fill = GridBagConstraints.BOTH
constraints.gridx, constraints.gridy = 5, 1
self.add(self.newGeneFrom, constraints)
constraints.weightx = 0
constraints.fill = GridBagConstraints.NONE
constraints.gridx, constraints.gridy = 6, 1
self.add(JLabel("To"), constraints)
constraints.weightx = 1
constraints.fill = GridBagConstraints.BOTH
constraints.gridx, constraints.gridy = 7, 1
self.add(self.newGeneTo, constraints)
constraints.weightx = 0
constraints.fill = GridBagConstraints.NONE
constraints.gridx, constraints.gridy = 4, 2
self.add(JButton(AddGenesAction(self)), constraints)
constraints.gridx, constraints.gridy = 4, 3
constraints.gridwidth, constraints.gridheight = 4, 1
constraints.fill = GridBagConstraints.BOTH
constraints.weightx, constraints.weighty = 1, 1
self.add(JScrollPane(self.outGenes), constraints)
def loadFile(self):
self.inGenes.model.clear()
self.database, self.evalue, genes = utils.parseBlast(self.name + ".blastp.xml")
[self.inGenes.model.addElement(gene) for gene in genes]
artemisInput = open(self.name + ".art", "r")
lines = artemisInput.readlines()
artemisInput.close()
self.restOfFile = self.genome = []
while lines:
if re.match("\s+CDS\s+(complement\()?\d+\.\.\d+\)?\n", lines[0]):
lines = lines[4:]
elif lines[0].find("ORIGIN") == 0:
self.genome = map(lambda x: re.sub("\s+", "", x), lines[1:])
lines = []
else:
if lines[0].strip():
self.restOfFile.append(lines[0])
lines = lines[1:]
self.genome = "".join(self.genome)
self.restOfFile = "".join(self.restOfFile)
def writeArtemisFile(self):
output = open(self.name + ".art", "w")
output.write(self.restOfFile)
for element in self.inGenes.model.elements():
output.write(element.toArtemis())
output.write("\nORIGIN\n\n")
for i in range(0, len(self.genome), 50):
output.write(self.genome[i:min(i+50, len(self.genome))] + "\n")
output.close()
| |
import unittest
import redisco
from redisco import containers as cont
class SetTestCase(unittest.TestCase):
def setUp(self):
self.client = redisco.get_client()
self.client.flushdb()
def tearDown(self):
self.client.flushdb()
def test_common_operations(self):
fruits = cont.Set(key='fruits')
fruits.add('apples')
fruits.add('oranges')
fruits.add('bananas', 'tomatoes')
fruits.add(['strawberries', 'blackberries'])
self.assertEqual(set(['apples', 'oranges', 'bananas', 'tomatoes', 'strawberries', 'blackberries']), fruits.all())
# remove
fruits.remove('apples')
fruits.remove('bananas', 'blackberries')
fruits.remove(['tomatoes', 'strawberries'])
self.assertEqual(set(['oranges']), fruits.all())
# in
self.assertTrue('oranges' in fruits)
self.assertTrue('apples' not in fruits)
# len
self.assertEqual(1, len(fruits))
# pop
self.assertEqual('oranges', fruits.pop())
# copy
fruits.add('apples')
fruits.add('oranges')
basket = fruits.copy('basket')
self.assertEqual(set(['apples', 'oranges']), basket.all())
# update
o = cont.Set('o', self.client)
o.add('kiwis')
fruits.update(o)
self.assertEqual(set(['kiwis', 'apples', 'oranges']),
fruits.all())
def test_comparisons(self):
all_pls = cont.Set(key='ProgrammingLanguages')
my_pls = cont.Set(key='MyPLs')
o_pls = cont.Set(key='OPLs')
all_pls.add('Python')
all_pls.add('Ruby')
all_pls.add('PHP')
all_pls.add('Lua')
all_pls.add('Java')
all_pls.add('Pascal')
all_pls.add('C')
all_pls.add('C++')
all_pls.add('Haskell')
all_pls.add('C#')
all_pls.add('Go')
my_pls.add('Ruby')
my_pls.add('Python')
my_pls.add('Lua')
my_pls.add('Haskell')
o_pls.add('Ruby')
o_pls.add('Python')
o_pls.add('Lua')
o_pls.add('Haskell')
# equality
self.assertNotEqual(my_pls, all_pls)
self.assertEqual(o_pls, my_pls)
fruits = cont.Set(key='fruits')
fruits.add('apples')
fruits.add('oranges')
# disjoint
self.assertTrue(fruits.isdisjoint(o_pls))
self.assertFalse(all_pls.isdisjoint(o_pls))
# subset
self.assertTrue(my_pls < all_pls)
self.assertTrue(all_pls > my_pls)
self.assertTrue(o_pls >= my_pls)
self.assertTrue(o_pls <= my_pls)
self.assertTrue(my_pls.issubset(all_pls))
self.assertTrue(my_pls.issubset(o_pls))
self.assertTrue(o_pls.issubset(my_pls))
# union
s = fruits.union("fruits|mypls", my_pls)
self.assertEqual(set(['Ruby', 'Python', 'Lua', 'Haskell', 'apples', 'oranges']), s.members)
# intersection
inter = fruits.intersection('fruits&mypls', my_pls)
self.assertEqual(set([]), inter.members)
# difference
s = fruits.difference('fruits-my_pls', my_pls)
self.assertEqual(set(['apples', 'oranges']), s.members)
def test_operations_with_updates(self):
abc = cont.Set('abc', self.client)
for c in 'abc':
abc.add(c)
def_ = cont.Set('def', self.client)
for c in 'def':
def_.add(c)
# __ior__
abc |= def_
self.assertEqual(set(['a', 'b', 'c', 'd', 'e', 'f']), abc.all())
abc &= def_
self.assertEqual(set(['d', 'e', 'f']), abc.all())
for c in 'abc':
abc.add(c)
abc -= def_
self.assertEqual(set(['a', 'b', 'c']), abc.all())
def test_methods_that_should_return_new_sets(self):
abc = cont.Set('abc', self.client)
for c in 'abc':
abc.add(c)
def_ = cont.Set('def', self.client)
for c in 'def':
def_.add(c)
# new_key as a set should raise error
# only strings are allowed as keys
new_set = cont.Set('new_set')
self.assertRaises(ValueError, abc.union, new_set, def_)
self.assertRaises(ValueError, abc.difference, new_set, def_)
self.assertRaises(ValueError, abc.intersection, new_set, def_)
self.assert_(isinstance(abc.union('new_set', def_), cont.Set))
self.assert_(isinstance(abc.intersection('new_set', def_), cont.Set))
self.assert_(isinstance(abc.difference('new_set', def_), cont.Set))
def test_access_redis_methods(self):
s = cont.Set('new_set')
s.sadd('a')
s.sadd('b')
s.srem('b')
self.assertEqual('a', s.spop())
s.sadd('a')
self.assert_('a' in s.members)
s.sadd('b')
self.assertEqual(2, s.scard())
self.assert_(s.sismember('a'))
self.client.sadd('other_set', 'a')
self.client.sadd('other_set', 'b')
self.client.sadd('other_set', 'c')
self.assert_(s.srandmember() in set(['a', 'b']))
def test_sinter(self):
abc = cont.Set("abc")
def_ = cont.Set("def")
abc.add('a')
abc.add('b')
abc.add('c')
def_.add('d')
def_.add('e')
def_.add('f')
self.assertEqual(set([]), abc.sinter(def_))
def_.add('b')
def_.add('c')
self.assertEqual(set(['b', 'c']), abc.sinter(def_))
def test_sunion(self):
abc = cont.Set("abc")
def_ = cont.Set("def")
abc.add('a')
abc.add('b')
abc.add('c')
def_.add('d')
def_.add('e')
def_.add('f')
self.assertEqual(set(['a', 'b', 'c', 'd', 'e', 'f']), abc.sunion(def_))
def test_susdiff(self):
abc = cont.Set("abc")
def_ = cont.Set("def")
abc.add('a')
abc.add('b')
abc.add('c')
def_.add('c')
def_.add('b')
def_.add('f')
self.assertEqual(set(['a']), abc.sdiff(def_))
class ListTestCase(unittest.TestCase):
def setUp(self):
self.client = redisco.get_client()
self.client.flushdb()
def tearDown(self):
self.client.flushdb()
def test_common_operations(self):
alpha = cont.List('alpha', self.client)
# append
alpha.append('a')
alpha.append('b')
alpha.append('c', 'd')
alpha.append(['e', 'f'])
self.assertEqual(['a', 'b', 'c', 'd', 'e', 'f'], alpha.all())
# len
self.assertEqual(6, len(alpha))
num = cont.List('num', self.client)
num.append('1')
num.append('2')
# extend and iter
alpha.extend(num)
self.assertEqual(['a', 'b', 'c', 'd', 'e', 'f', '1', '2'], alpha.all())
alpha.extend(['3', '4'])
self.assertEqual(['a', 'b', 'c', 'd', 'e', 'f', '1', '2', '3', '4'], alpha.all())
# contains
self.assertTrue('b' in alpha)
self.assertTrue('2' in alpha)
self.assertTrue('5' not in alpha)
# shift and unshift
num.unshift('0')
self.assertEqual(['0', '1', '2'], num.members)
self.assertEqual('0', num.shift())
self.assertEqual(['1', '2'], num.members)
# push and pop
num.push('4')
num.push('a', 'b')
num.push(['c', 'd'])
self.assertEqual('d', num.pop())
self.assertEqual('c', num.pop())
self.assertEqual(['1', '2', '4', 'a', 'b'], num.members)
# trim
alpha.trim(0, 1)
self.assertEqual(['a', 'b'], alpha.all())
# remove
alpha.remove('b')
self.assertEqual(['a'], alpha.all())
# setitem
alpha[0] = 'A'
self.assertEqual(['A'], alpha.all())
# iter
alpha.push('B')
for e, a in zip(alpha, ['A', 'B']):
self.assertEqual(a, e)
self.assertEqual(['A', 'B'], list(alpha))
# slice
alpha.extend(['C', 'D', 'E'])
self.assertEqual(['A', 'B', 'C', 'D', 'E'], alpha[:])
self.assertEqual(['B', 'C'], alpha[1:3])
alpha.reverse()
self.assertEqual(['E', 'D', 'C', 'B', 'A'], list(alpha))
def test_pop_onto(self):
a = cont.List('alpha')
b = cont.List('beta')
a.extend(range(10))
# test pop_onto
a_snap = list(a.members)
while True:
v = a.pop_onto(b.key)
if not v:
break
else:
self.assertTrue(v not in a.members)
self.assertTrue(v in b.members)
self.assertEqual(a_snap, b.members)
# test rpoplpush
b_snap = list(b.members)
while True:
v = b.rpoplpush(a.key)
if not v:
break
else:
self.assertTrue(v in a.members)
self.assertTrue(v not in b.members)
self.assertEqual(b_snap, a.members)
def test_native_methods(self):
l = cont.List('mylist')
self.assertEqual([], l.lrange(0, -1))
l.rpush('b')
l.rpush('c')
l.lpush('a')
self.assertEqual(['a', 'b', 'c'], l.lrange(0, -1))
self.assertEqual(3, l.llen())
l.ltrim(1, 2)
self.assertEqual(['b', 'c'], l.lrange(0, -1))
self.assertEqual('c', l.lindex(1))
self.assertEqual(1, l.lset(0, 'a'))
self.assertEqual(1, l.lset(1, 'b'))
self.assertEqual(['a', 'b'], l.lrange(0, -1))
self.assertEqual('a', l.lpop())
self.assertEqual('b', l.rpop())
class TypedListTestCase(unittest.TestCase):
def setUp(self):
self.client = redisco.get_client()
self.client.flushdb()
def tearDown(self):
self.client.flushdb()
def test_basic_types(self):
alpha = cont.TypedList('alpha', unicode, type_args=('UTF-8',))
monies = u'\u0024\u00a2\u00a3\u00a5'
alpha.append(monies)
val = alpha[-1]
self.assertEquals(monies, val)
beta = cont.TypedList('beta', int)
for i in xrange(1000):
beta.append(i)
for i, x in enumerate(beta):
self.assertEquals(i, x)
charlie = cont.TypedList('charlie', float)
for i in xrange(100):
val = 1 * pow(10, i*-1)
charlie.append(val)
for i, x in enumerate(charlie):
val = 1 * pow(10, i*-1)
self.assertEquals(x, val)
def test_model_type(self):
from redisco import models
class Person(models.Model):
name = models.Attribute()
friend = models.ReferenceField('Person')
iamteam = Person.objects.create(name='iamteam')
clayg = Person.objects.create(name='clayg', friend=iamteam)
l = cont.TypedList('friends', 'Person')
l.extend(Person.objects.all())
for person in l:
if person.name == 'clayg':
self.assertEquals(iamteam, clayg.friend)
else:
# this if failing for some reason ???
#self.assertEquals(person.friend, clayg)
pass
class SortedSetTestCase(unittest.TestCase):
def setUp(self):
self.client = redisco.get_client()
self.client.flushdb()
def tearDown(self):
self.client.flushdb()
def test_everything(self):
zorted = cont.SortedSet("Person:age")
zorted.add("1", 29)
zorted.add("2", 39)
zorted.add({"3": '15', "4": 35})
zorted.add({"5": 98, "6": 5})
self.assertEqual(6, len(zorted))
self.assertEqual(35, zorted.score("4"))
self.assertEqual(0, zorted.rank("6"))
self.assertEqual(5, zorted.revrank("6"))
self.assertEqual(3, zorted.rank("4"))
self.assertEqual(["6", "3", "1", "4"], zorted.le(35))
zorted.add("7", 35)
self.assertEqual(["4", "7"], zorted.eq(35))
self.assertEqual(["6", "3", "1"], zorted.lt(30))
self.assertEqual(["4", "7", "2", "5"], zorted.gt(30))
def test_delegateable_methods(self):
zset = cont.SortedSet("Person:all")
zset.zadd("1", 1)
zset.zadd("2", 2)
zset.zadd("3", 3)
zset.zadd("4", 4)
self.assertEqual(4, zset.zcard())
self.assertEqual(4, zset.zscore('4'))
self.assertEqual(['1', '2', '3', '4'], list(zset))
self.assertEqual(zset.zrange(0, -1), list(zset))
self.assertEqual(['4', '3', '2', '1'], zset.zrevrange(0, -1))
self.assertEqual(list(reversed(zset)), zset.zrevrange(0, -1))
self.assertEqual(list(reversed(zset)), list(zset.__reversed__()))
class HashTestCase(unittest.TestCase):
def setUp(self):
self.client = redisco.get_client()
self.client.flushdb()
def tearDown(self):
self.client.flushdb()
def test_basic(self):
h = cont.Hash('hkey')
self.assertEqual(0, len(h))
h['name'] = "Richard Cypher"
h['real_name'] = "Richard Rahl"
pulled = self.client.hgetall('hkey')
self.assertEqual({'name': "Richard Cypher", 'real_name': "Richard Rahl"}, pulled)
self.assertEqual({'name': "Richard Cypher", 'real_name': "Richard Rahl"}, h.dict)
self.assertEqual(['name', 'real_name'], h.keys())
self.assertEqual(["Richard Cypher", "Richard Rahl"],
h.values())
del h['name']
pulled = self.client.hgetall('hkey')
self.assertEqual({'real_name': "Richard Rahl"}, pulled)
self.assert_('real_name' in h)
h.dict = {"new_hash": "YEY"}
self.assertEqual({"new_hash": "YEY"}, h.dict)
def test_delegateable_methods(self):
h = cont.Hash('my_hash')
h.hincrby('Red', 1)
h.hincrby('Red', 1)
h.hincrby('Red', 2)
self.assertEqual(4, int(h.hget('Red')))
h.hmset({'Blue': 100, 'Green': 19, 'Yellow': 1024})
self.assertEqual(['100', '19'], h.hmget(['Blue', 'Green']))
if __name__ == "__main__":
import sys
unittest.main(argv=sys.argv)
| |
# -*- coding: utf-8 -*-
"""Submissions models"""
import tempfile
import os
from datetime import datetime
from flask_babel import gettext
from pysistem import db, app, cache
from pysistem.submissions.const import RESULT_UNKNOWN, RESULT_OK, STATUS_DONE, STR_STATUS
from pysistem.submissions.const import STATUS_WAIT, STATUS_COMPILEFAIL, STATUS_ACT, STR_RESULT
from pysistem.submissions.const import STATUS_COMPILING, STATUS_CHECKING, STATUS_CWAIT
from pysistem.users.model import User
from pysistem.compilers.model import Compiler
from pysistem.problems.model import Problem
from pysistem.checkers.model import Checker
class Submission(db.Model):
"""An attempt to solve a problem
Fields:
id -- unique submission identifier
source -- submission's source code
status -- submission's status
result -- submission's verdict
compile_log -- submissions's compilation log
score -- submissions's score
submitted -- submission datetime
Relationships:
user, user_id -- whose this submission is
compiler, compiler_id -- compiler this submission is sent via
problem, problem_id -- what problem is this submission attempting to solve
submission_logs -- logs linked with tests in problem
"""
id = db.Column(db.Integer, primary_key=True)
source = db.Column(db.Text)
status = db.Column(db.Integer)
result = db.Column(db.Integer)
compile_log = db.Column(db.Text)
score = db.Column(db.Integer)
submitted = db.Column(db.DateTime)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
compiler_id = db.Column(db.Integer, db.ForeignKey('compiler.id'))
problem_id = db.Column(db.Integer, db.ForeignKey('problem.id'))
submission_logs = db.relationship('SubmissionLog', cascade="all,delete",
backref="submission", lazy="dynamic")
current_test_id = db.Column(db.Integer)
def __init__(self, source=None, user=None, compiler=None, problem=None):
self.source = source
self.status = STATUS_CWAIT
self.result = RESULT_UNKNOWN
self.current_test_id = 0
if isinstance(user, int): # pragma: no cover
user = User.query.get(user)
if isinstance(compiler, int): # pragma: no cover
compiler = Compiler.query.get(compiler)
if isinstance(problem, int): # pragma: no cover
problem = Problem.query.get(problem)
self.user = user
self.compiler = compiler
self.problem = problem
self.score = 0
self.submitted = datetime.now()
def __repr__(self):
return '<Submission #%s>' % str(self.id)
def get_exe_path(self):
"""Get submission's executable path"""
if not self.id:
db.session.commit()
storage_dir = app.config['STORAGE']
return storage_dir + '/submissions_bin/' + str(self.id)
def get_source_path(self):
"""Get submission's source path"""
if not self.id:
db.session.commit()
if os.path.exists('/SANDBOX'):
return '/SANDBOX/pysistem_submission_' + str(self.id) + '.' + self.compiler.lang
else: # pragma: no cover
return tempfile.gettempdir() + '/pysistem_submission_' \
+ str(self.id) + '.' + self.compiler.lang
def compile(self):
"""Compile submission
Returns:
Tuple: (Success?, compiler log)
"""
self.status = STATUS_COMPILING
db.session.commit()
source_path = self.get_source_path()
source_file = open(source_path, 'w')
source_file.write(self.source)
source_file.close()
try:
os.remove(self.get_exe_path())
except: # pragma: no cover
pass
result, output = self.compiler.compile(source_path, self.get_exe_path())
if result:
self.status = STATUS_WAIT
else:
self.status = STATUS_COMPILEFAIL
self.compile_log = output
db.session.commit()
try:
os.remove(source_path)
except: # pragma: no cover
pass
return result, output
def run(self, stdin='', time_limit=1000, memory_limit=65536, commit_waiting=True):
"""Run submission in sandbox
Arguments:
stdin -- string to pass to submission as stdin
time_limit -- maximum execution time of program in milliseconds
memory_limit -- maximum memory usage of program in KiB
commit_waiting -- commit 'Waiting state' to database?
Returns:
Tuple: (Exit code: see runsbox(1), Program's stdout, Program's stderr: b'')
"""
self.status = STATUS_CHECKING
db.session.commit()
source_path = self.get_source_path()
source_file = open(source_path, 'w')
source_file.write(self.source)
source_file.close()
result, stdout, stderr = self.compiler.run(self.get_exe_path(), source_path, \
time_limit, memory_limit, stdin)
os.remove(source_path)
self.result = result
if commit_waiting:
self.status = STATUS_WAIT
db.session.commit()
return result, stdout, stderr
def done(self):
"""Set status to done"""
self.status = STATUS_DONE
def get_str_result(self, color=False, score=True, only_color=False, result=None, status=None):
"""Get formatted verdict string
Arguments:
color -- enable coloring, will produce HTML markup
score -- show score
only_color -- return only Bootstrap coloring class: 'sucess', 'danger' etc
result -- overriding self.result
status -- overriding self.status
"""
result = result if result is not None else self.result
status = status if status is not None else self.status
if status in [STATUS_DONE, STATUS_ACT]:
res = STR_RESULT[result]
if score:
res += ' (%d %s %d)' % (self.score, gettext('common.outof'), self.problem.get_max_score())
if color or only_color:
if result in [RESULT_OK] :
if only_color:
return 'success'
res = '<span class="text-success">' + res + '</span>'
else:
if only_color:
return 'danger'
res = '<span class="text-danger">' + res + '</span>'
return res
else:
res = STR_STATUS[status]
if color or only_color:
if only_color:
return 'warning'
res = '<span class="text-warning">' + res + '</span>'
return res
def is_compile_failed(self):
"""Check if compilation error occurred"""
return self.status == STATUS_COMPILEFAIL
def check(self, session=None):
"""Start sync checking of submission"""
session = session or db.session
if not self.id:
session.commit()
try:
cache.delete("/submission/view/%d/%r" % (self.id, True))
cache.delete("/submission/view/%d/%r" % (self.id, False))
except: pass
checker = self.problem.checkers.filter(Checker.status == STATUS_ACT).first()
if checker is None:
return -1
self.current_test_id = 0
return checker.check(self, session)
class SubmissionLog(db.Model):
"""A submission <-> test pair log
Fields:
result -- pysistem.submissions.const result
log -- checker's log
stdout -- submission's output
Relationships:
submission, submission_id -- submission
test_pair, test_pair_id -- test_pair
"""
result = db.Column(db.Integer)
log = db.Column(db.Text)
stdout = db.Column(db.Text)
submission_id = db.Column(db.Integer, db.ForeignKey('submission.id'), primary_key=True)
test_pair_id = db.Column(db.Integer, db.ForeignKey('test_pair.id'), primary_key=True)
def __init__(self, result=None, log=None, stdout=None, submission=None, test_pair=None):
self.result = result
self.log = log
self.stdout = stdout
self.submission = submission
self.test_pair = test_pair
def __repr__(self):
if self.submission and self.test_pair:
return '<SubmissionLog Submission=%s TestPair=%s>' % (self.submission, self.test_pair)
else:
return '<SubmissionLog Unknown>'
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
JSONType = Any
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_list_request(
resource_group_name: str,
account_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-02-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/privateEndpointConnections')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
"accountName": _SERIALIZER.url("account_name", account_name, 'str', max_length=24, min_length=3),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_get_request(
resource_group_name: str,
account_name: str,
subscription_id: str,
private_endpoint_connection_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-02-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/privateEndpointConnections/{privateEndpointConnectionName}')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
"accountName": _SERIALIZER.url("account_name", account_name, 'str', max_length=24, min_length=3),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
"privateEndpointConnectionName": _SERIALIZER.url("private_endpoint_connection_name", private_endpoint_connection_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_put_request(
resource_group_name: str,
account_name: str,
subscription_id: str,
private_endpoint_connection_name: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2021-02-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/privateEndpointConnections/{privateEndpointConnectionName}')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
"accountName": _SERIALIZER.url("account_name", account_name, 'str', max_length=24, min_length=3),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
"privateEndpointConnectionName": _SERIALIZER.url("private_endpoint_connection_name", private_endpoint_connection_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_delete_request(
resource_group_name: str,
account_name: str,
subscription_id: str,
private_endpoint_connection_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-02-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/privateEndpointConnections/{privateEndpointConnectionName}')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
"accountName": _SERIALIZER.url("account_name", account_name, 'str', max_length=24, min_length=3),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
"privateEndpointConnectionName": _SERIALIZER.url("private_endpoint_connection_name", private_endpoint_connection_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="DELETE",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
class PrivateEndpointConnectionsOperations(object):
"""PrivateEndpointConnectionsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.storage.v2021_02_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list(
self,
resource_group_name: str,
account_name: str,
**kwargs: Any
) -> Iterable["_models.PrivateEndpointConnectionListResult"]:
"""List all the private endpoint connections associated with the storage account.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PrivateEndpointConnectionListResult or the result
of cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.storage.v2021_02_01.models.PrivateEndpointConnectionListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateEndpointConnectionListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
resource_group_name=resource_group_name,
account_name=account_name,
subscription_id=self._config.subscription_id,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
resource_group_name=resource_group_name,
account_name=account_name,
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("PrivateEndpointConnectionListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/privateEndpointConnections'} # type: ignore
@distributed_trace
def get(
self,
resource_group_name: str,
account_name: str,
private_endpoint_connection_name: str,
**kwargs: Any
) -> "_models.PrivateEndpointConnection":
"""Gets the specified private endpoint connection associated with the storage account.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:param private_endpoint_connection_name: The name of the private endpoint connection associated
with the Azure resource.
:type private_endpoint_connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PrivateEndpointConnection, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2021_02_01.models.PrivateEndpointConnection
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateEndpointConnection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
resource_group_name=resource_group_name,
account_name=account_name,
subscription_id=self._config.subscription_id,
private_endpoint_connection_name=private_endpoint_connection_name,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('PrivateEndpointConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/privateEndpointConnections/{privateEndpointConnectionName}'} # type: ignore
@distributed_trace
def put(
self,
resource_group_name: str,
account_name: str,
private_endpoint_connection_name: str,
properties: "_models.PrivateEndpointConnection",
**kwargs: Any
) -> "_models.PrivateEndpointConnection":
"""Update the state of specified private endpoint connection associated with the storage account.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:param private_endpoint_connection_name: The name of the private endpoint connection associated
with the Azure resource.
:type private_endpoint_connection_name: str
:param properties: The private endpoint connection properties.
:type properties: ~azure.mgmt.storage.v2021_02_01.models.PrivateEndpointConnection
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PrivateEndpointConnection, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2021_02_01.models.PrivateEndpointConnection
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateEndpointConnection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(properties, 'PrivateEndpointConnection')
request = build_put_request(
resource_group_name=resource_group_name,
account_name=account_name,
subscription_id=self._config.subscription_id,
private_endpoint_connection_name=private_endpoint_connection_name,
content_type=content_type,
json=_json,
template_url=self.put.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('PrivateEndpointConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
put.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/privateEndpointConnections/{privateEndpointConnectionName}'} # type: ignore
@distributed_trace
def delete(
self,
resource_group_name: str,
account_name: str,
private_endpoint_connection_name: str,
**kwargs: Any
) -> None:
"""Deletes the specified private endpoint connection associated with the storage account.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:param private_endpoint_connection_name: The name of the private endpoint connection associated
with the Azure resource.
:type private_endpoint_connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request(
resource_group_name=resource_group_name,
account_name=account_name,
subscription_id=self._config.subscription_id,
private_endpoint_connection_name=private_endpoint_connection_name,
template_url=self.delete.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/privateEndpointConnections/{privateEndpointConnectionName}'} # type: ignore
| |
#!/usr/local/bin/python
from __future__ import with_statement
__license__ = """
Copyright (c) 2014, IDPF, Will Manis
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors
may be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import os
import zipfile
import posixpath
import urllib
import uuid
import time
import xmlom
import xmlElement
__author__ = 'wmanis'
componentDirectory = 'components'
componentNamespace = 'component: http://www.idpf.org/vocab/component/#'
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
class EPUBSpineItem:
def __init__(self, zipfile, path):
self.zipfile_ = zipfile
self.path_ = path
self.spineXML_ = xmlom.XMLOM(self.zipfile_.getBytes(path))
def insert(self, elementID, src):
def walk(node, elementid):
attributes = xmlElement.getAttributes(node)
for attr in attributes:
if attr == 'id' and attributes[attr] == elementid:
return node
children = xmlElement.getChildElements(node)
for child in children:
id = walk(child, elementid)
if id != None:
return id
return None
node = walk(self.spineXML_.getRootElement(), elementID)
if node != None:
xmlElement.setAttribute(node, 'src', src)
return
raise "no element with that id"
def tostring(self):
lines = self.spineXML_.toPrettyXML()
lines = lines.split('\n')
trimmedlines = []
for line in lines:
line = line.rstrip()
if len(line) != 0:
trimmedlines.append(line)
print "\n\nPatched html file\n==============================================="
print "==============================================="
print '\n'.join(trimmedlines)
print "==============================================="
print "===============================================\n\n"
return '\n'.join(trimmedlines)
def update(self):
self.zipfile_.putfile(self.path_, self.tostring())
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
class PackageDom(xmlom.XMLOM):
def getPackageElement(self):
return self.getRootElement()
# ---------------------------------------------------------------------------
# get the manifest
def getManifest(self):
return self.findChildrenByTagName(self.getRootElement(), 'manifest')[0]
# ---------------------------------------------------------------------------
# get the manifest
def getManifestItems(self):
items = []
for item in self.getManifest().childNodes:
if xmlElement.isElement(item):
items.append(item)
return items
# ---------------------------------------------------------------------------
# get the metadata
def getMetadata(self):
return self.findAllByTagName('metadata')[0]
# ---------------------------------------------------------------------------
# get the metadata
def getMetadataItems(self):
return self.findChildrenByTagName(self.getMetadata(), 'meta')
# ---------------------------------------------------------------------------
# get the metadata with attr & value
def getOpfMetadataItemsByAttr(self, name, value=None):
items = []
metadataitems = self.getMetadataItems()
for item in metadataitems:
itemValue = xmlElement.getAttributeValue(item, name)
if itemValue == None:
# no attribute with that value - continue
continue
elif value == None:
# we don't care about value, so this is a match
items.append(item)
elif itemValue == value:
# we care about, so this is a match
items.append(item)
# elif itemValue!= value:
# this is not a match
return items
#---------------------------------------------------------------------------
# get the spine
def getSpine(self):
return self.findAllByTagName('spine')[0]
#---------------------------------------------------------------------------
# get the spine items
def getSpineItems(self):
return self.findChildrenByTagName(self.getSpine(), 'itemref')
#---------------------------------------------------------------------------
# get the collections
def getCollections(self):
return self.findChildrenByTagName(self.getRootElement(), 'collection')
#---------------------------------------------------------------------------
# get the component collections
def getComponentCollections(self):
collections = self.getCollections()
found = 0
for collection in collections:
if xmlElement.getAttributeValue(collection, 'role') != 'component:component':
collections.remove(collection)
return collections
#---------------------------------------------------------------------------
# get a component collection
def getComponentCollection(self, vendor, componentName):
collections = self.getCollections()
found = 0
for collection in collections:
metadata = self.findChildrenByTagName(collection, 'metadata')
if len(metadata) == 1:
found = 0
metas = self.findChildrenByTagName(metadata[0], 'meta')
for meta in metas:
#<meta property="component:creator">Acme</meta>
#<meta property="component:name">Gallery_example</meta>
propval = xmlElement.getAttributeValue(meta, 'property')
if propval == "component:creator" and xmlElement.getText(meta) == vendor:
found += 1
elif propval == "component:name" and xmlElement.getText(meta) == componentName:
found += 1
if found == 2:
return collection
return None
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
class EPUBZipContainer:
def __init__(self, name, opt='r', debug=False):
self.name_ = name
self.zipfile_ = zipfile.ZipFile(os.path.normpath(name), opt)
self.__unzip()
self.opfpath_ = None
if len(self.names_) == 0:
# this is a blank epub, need to create an opf file and meta-inf
self.createMetaInf()
self.createPackageFile()
self.getOpfPath()
self.packagedom_ = PackageDom(self.contents_[self.opfpath_])
self.debug_ = debug
# ------------------------------------------------------------------------------
# interface definition
def createMetaInf(self):
pass
# ------------------------------------------------------------------------------
# interface definition
def createPackageFile(self):
pass
# ---------------------------------------------------------------------------
# get file name
def get_filename(self):
return self.name_
# ---------------------------------------------------------------------------
# get original zipfile
def __unzip(self):
self.names_ = self.zipfile_.namelist()
self.contents_ = {}
for name in self.names_:
self.contents_[name] = self.zipfile_.read(name)
# ---------------------------------------------------------------------------
# update original
def close(self, outputFilename):
if outputFilename == None:
outputFilename = posixpath.normpath(posixpath.join(posixpath.splitext(self.name_)[0] + ".merged.epub"))
else:
outputFilename = posixpath.normpath(outputFilename)
if self.name_ == outputFilename:
self.zipfile_.close()
if os.path.exists(os.path.normpath(outputFilename)):
os.remove(os.path.normpath(outputFilename))
newzipfile = zipfile.ZipFile(os.path.normpath(outputFilename), 'a')
newzipfile.writestr('mimetype', self.contents_['mimetype'])
self.contents_.pop('mimetype')
for name in self.contents_:
newzipfile.writestr(name, self.contents_[name])
newzipfile.close()
if self.name_ != outputFilename:
self.zipfile_.close()
# ---------------------------------------------------------------------------
# get original zipfile
def getBytes(self, name):
return self.contents_[name]
# ---------------------------------------------------------------------------
# get the path to the opf file from container.xml
def getOpfPath(self):
if self.opfpath_ == None:
xmlDom = xmlom.XMLOM(self.contents_['META-INF/container.xml'])
root = xmlDom.findAllByTagName('rootfile')[0]
self.opfpath_ = xmlDom.getAttribute(root, 'full-path')
return self.opfpath_
def getOpfDirectory(self):
path = self.getOpfPath()
return posixpath.dirname(path)
# ---------------------------------------------------------------------------
# get the package xmldom
def getOpfDom(self):
if self.packagedom_ == None:
path = self.getOpfPath()
opfXML = self.contents_[path]
self.packagedom_ = xmlom.XMLOM(opfXML)
return self.packagedom_
# ---------------------------------------------------------------------------
# get the package xmldom
def getfile(self, path):
return self.contents_[path]
# ---------------------------------------------------------------------------
# get the package xmldom
def putfile(self, path, text):
self.contents_[path] = text
#---------------------------------------------------------------------------
# get the manifest
def getOpfManifest(self):
return self.packagedom_.getManifest()
#---------------------------------------------------------------------------
# get the manifest
def getOpfManifestItems(self):
return self.packagedom_.getManifestItems()
#---------------------------------------------------------------------------
# get the metadata
def getOpfMetadata(self):
return self.packagedom_.getMetadata()
#---------------------------------------------------------------------------
# get the spine
def getOpfSpine(self):
return self.packagedom_.getSpine()
#---------------------------------------------------------------------------
# get the spine items
def getOpfSpineItems(self):
return self.packagedom_.getSpineItems()
#---------------------------------------------------------------------------
def getSpineItemPath(self, xmlid):
manifest = self.getOpfManifestItems()
for item in manifest:
if xmlElement.getAttributeValue(item, 'id') == xmlid:
return xmlElement.getAttributeValue(item, 'href')
return None
#---------------------------------------------------------------------------
# get spineitem files
def getOpfSpineItemFiles(self):
spinefiles = []
spineitems = self.getOpfSpineItems()
for itemref in spineitems:
idref = xmlElement.getAttributeValue(itemref, 'idref')
spinefiles.append(self.getSpineItemPath(idref))
return spinefiles
#---------------------------------------------------------------------------
# get the spine items
def getOpfCollections(self):
return self.packagedom_.getCollections()
#---------------------------------------------------------------------------
# debug - print out opf
def printOPF(self):
path = self.getOpfPath()
opfXML = self.contents_[path]
print opfXML
#---------------------------------------------------------------------------
def getComponentRelativePath(self, componentDir):
dstOPFPath = self.getOpfPath()
return posixpath.relpath(componentDir, os.path.dirname(dstOPFPath))
#---------------------------------------------------------------------------
# transfer component assets and update the destination opf file
def transferItems(self, srcComponent, dstDir):
# get items from manifest for transfer
itemList = srcComponent.getOpfManifestItems()
srcOpfDir = posixpath.dirname(srcComponent.getOpfPath())
for item in itemList:
if xmlElement.getAttributeValue(item, 'properties') == 'nav':
#do not copy nav doc
continue
href = xmlElement.getAttributeValue(item, 'href')
srcPath = posixpath.normpath(posixpath.join(srcOpfDir, href))
dstPath = posixpath.normpath(posixpath.join(dstDir, href.split('../').pop()))
# copy the bytes over
srcbytes = srcComponent.getfile(srcPath)
self.putfile(dstPath, srcbytes)
#---------------------------------------------------------------------------
# build the collection containing the component
def buildCollection(self, dstComponentDir, items, srcMetadata, idref, vendorName, componentName):
rootElement = self.getOpfDom().getPackageElement()
# add comment
commentString = ' start of component ' + vendorName + ' - ' + componentName + ' transfer time ' + time.asctime(
time.gmtime()) + ' UTC '
xmlElement.addComment(rootElement, commentString)
# add collection
collection = xmlElement.addChildElement(rootElement, 'collection', {'role': 'component:component'})
# add metadata to collection
metadata = xmlElement.addChildElement(collection, 'metadata')
for datum in srcMetadata:
meta = xmlElement.addChildElement(metadata, 'meta')
xmlElement.setAttribute(meta, 'property', datum['property'])
xmlElement.addTextNode(meta, datum['value'])
# add manifest collection to collection
collectionManifest = xmlElement.addChildElement(collection, 'collection', {'role': 'manifest'})
component = None
for item in items:
link = xmlElement.addChildElement(collectionManifest, "link")
if idref == xmlElement.getAttributeValue(item, 'id'):
component = item
href = xmlElement.getAttributeValue(item, 'href')
dstPath = posixpath.normpath(posixpath.join(dstComponentDir, href))
xmlElement.setAttribute(link, 'href', dstPath)
# add the html of the component
link = xmlElement.addChildElement(collection, 'link')
href = posixpath.normpath(posixpath.join(dstComponentDir, xmlElement.getAttributeValue(component, 'href')))
xmlElement.setAttribute(link, 'href', href)
#---------------------------------------------------------------------------
# add the component items to the manifest
def addManifestItems(self, dstComponentDir, items, vendorName, componentName):
# get the manifest element of the package file
dstManifest = self.getOpfManifest()
# add comment to indicate start component items
xmlElement.addComment(dstManifest,
' start of component manifest items ' + vendorName + ' - ' + componentName + ' ')
idprefix = EPUBComponentZipContainer.getIDPrefix(vendorName, componentName)
for item in items:
newitem = xmlElement.addChildElement(dstManifest, item.localName)
attributes = xmlElement.getAttributes(item)
for attr in attributes:
value = attributes[attr]
if attr == 'href':
href = posixpath.normpath(posixpath.join(dstComponentDir, value))
xmlElement.setAttribute(newitem, 'href', href)
elif attr != 'id':
xmlElement.setAttribute(newitem, attr, value)
else:
xmlElement.setAttribute(newitem, attr, idprefix + value)
# add comment to indicate end of component items
xmlElement.addComment(dstManifest,
' end of component manifest items ' + vendorName + ' - ' + componentName + ' ')
#---------------------------------------------------------------------------
# transfer data into the destintation package file
def transferMetadata(self, srcComponent, dstComponentDir, vendorName, componentName):
# get the component items, ignoring the nav doc
items = srcComponent.getOpfManifestItems()
for item in items:
if xmlElement.getAttributeValue(item, 'properties') == 'nav':
items.remove(item)
break
# get the idref of the component base html doc
srcSpineItems = srcComponent.getOpfSpineItems()
assert (len(srcSpineItems) == 1)
idref = xmlElement.getAttributeValue(srcSpineItems[0], 'idref')
# create component collection
self.buildCollection(dstComponentDir, items, srcComponent.getComponentMetadata(), idref, vendorName,
componentName)
# copy over component items into manifest
self.addManifestItems(dstComponentDir, items, vendorName, componentName)
# ensure component vocab is present
package = self.getOpfDom().getPackageElement()
prefix = xmlElement.getAttributeValue(package, 'prefix')
if prefix == None:
xmlElement.setAttribute(package, 'prefix', componentNamespace)
elif prefix.find(componentNamespace) < 0:
xmlElement.setAttribute(package, 'prefix', prefix + ' ' + componentNamespace)
if self.debug_:
print "\n\nIntegrated package file\n==============================================="
print "==============================================="
print self.getOpfDom().toPrettyXML()
print "==============================================="
print "===============================================\n\n"
# write out the updated manifest
self.putfile(self.getOpfPath(), self.getOpfDom().toPrettyXML())
def testComponentExistance(self, creator, name):
collections = self.getOpfDom().getComponentCollections()
for collection in collections:
creatorname = EPUBComponentZipContainer.getCollectionCreatorAndName(collection)
if creatorname['creator'] == creator and creatorname['name'] == name:
return True
return False
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
class ComponentZipContainer(EPUBZipContainer):
prefix = 'component'
namespace = 'component:http://www.idpf.org/vocab/component/#'
#------------------------------------------------------------------------------
def __init__(self, name, creator=None, componentName=None, debug=True):
self.creator_ = creator
self.componentName_ = componentName
if self.creator_ == None:
opt = 'r'
else:
opt = 'a'
EPUBZipContainer.__init__(self, name, opt, debug)
#------------------------------------------------------------------------------
# create the metainf for this epub
def createMetaInf(self):
# TODO verify this is correct
opfpath = '"' + posixpath.normpath(posixpath.join(self.componentName_, 'content.opf')) + '"'
blank_metainf = """<?xml version="1.0" encoding="UTF-8"?>
<container xmlns="urn:oasis:names:tc:opendocument:xmlns:container" version="1.0">
<rootfiles>
<rootfile full-path=""" + opfpath + """ media-type="application/oebps-package+xml"/>
</rootfiles>
</container>"""
self.contents_['META-INF/container.xml'] = blank_metainf
#------------------------------------------------------------------------------
# create a boiler plate package file, to be filled in with real data
def createPackageFile(self):
blank_packagefile = """<?xml version="1.0" encoding="UTF-8"?>
<package xmlns="http://www.idpf.org/2007/opf" version="3.0" xml:lang="en" unique-identifier="uid"
prefix="component: http://www.idpf.org/vocab/component/#">
<metadata xmlns:dc="http://purl.org/dc/elements/1.1/">
</metadata>
<manifest>
</manifest>
<spine>
</spine>
</package>"""
self.contents_[posixpath.normpath(posixpath.join(self.componentName_, 'content.opf'))] = blank_packagefile
#------------------------------------------------------------------------------
# create mimetype
def createMimeType(self):
self.contents_['mimetype'] = "application/epub+zip"
#------------------------------------------------------------------------------
# get the component metadata from the opf
def extract(self, srcEpub):
print "Extract: ", self.creator_, self.componentName_
collection = srcEpub.getOpfDom().getComponentCollection(self.creator_, self.componentName_)
manifest = srcEpub.getOpfDom().getManifestItems()
manifestDict = {}
for item in manifest:
manifestDict[xmlElement.getAttributeValue(item, 'href')] = item
if collection == None:
print "No component from:", self.creator_, self.componentName_
return False
self.createMimeType()
self.buildOpf(manifestDict, collection)
self.transferInItems(srcEpub, collection)
return True
#---------------------------------------------------------------------------
def buildNavDoc(self, linkref):
navDoc1_ = """<?xml version="1.0" encoding="UTF-8"?>
<html xmlns="http://www.w3.org/1999/xhtml" xmlns:epub="http://www.idpf.org/2007/ops" xml:lang='en-us' lang='en-us'>
<head>
<title>TOC</title>
<meta charset="utf-8" />
</head>
<body>
<nav epub:type="toc" id="toc">
<ol>
<li><a href='"""
navDoc2_ = """'>Gallery</a>
</li>
</ol>
</nav>
</body>
</html>
"""
navDoc = navDoc1_ + linkref + navDoc2_
print navDoc
self.contents_[posixpath.join(self.getOpfDirectory(), 'nav.xhtml')] = navDoc
#---------------------------------------------------------------------------
# transfer component assets and update the destination opf file
def transferInItems(self, srcEpub, collection):
# get items from manifest for transfer
srcManifest = xmlom.findChildrenByTagName(collection, 'collection')
srcItems = xmlom.findChildrenByTagName(srcManifest[0], 'link');
for item in srcItems:
href = xmlElement.getAttributeValue(item, 'href')
parts = href.split(self.componentName_ + '/')
newhref = parts.pop()
srcPath = posixpath.normpath(posixpath.join(srcEpub.getOpfDirectory(), href))
dstPath = posixpath.normpath(posixpath.join(self.getOpfDirectory(), newhref))
# copy the bytes over
srcbytes = srcEpub.getfile(srcPath)
self.putfile(dstPath, srcbytes)
#------------------------------------------------------------------------------
def buildOpf(self, manifestDict, collection):
self.copyMetadata(collection)
collectionSpine = xmlom.findChildrenByTagName(collection, 'link')
self.buildManifest(manifestDict, collection, xmlElement.getAttributeValue(collectionSpine[0], 'href'))
print self.getOpfDom().toPrettyXML()
# write out the updated manifest
self.putfile(self.getOpfPath(), self.getOpfDom().toPrettyXML())
#------------------------------------------------------------------------------
# copy over meta data
def copyMetadata(self, collection):
srcMetadata = xmlElement.findFirstChildElement(collection, 'metadata')
srcMetadatas = xmlElement.getChildElements(srcMetadata)
dstMetadata = self.getOpfDom().getMetadata()
for meta in srcMetadatas:
newmeta = xmlElement.addChildElement(dstMetadata, meta.localName, xmlElement.getAttributes(meta))
xmlElement.addTextNode(newmeta, xmlElement.getText(meta))
# <dc:type>scriptable-component</dc:type>
newmeta = xmlElement.addChildElement(dstMetadata, 'dc:type')
xmlElement.addTextNode(newmeta, 'scriptable-component')
# <dc:creator>Acme</dc:creator>
newmeta = xmlElement.addChildElement(dstMetadata, 'dc:creator')
xmlElement.addTextNode(newmeta, self.creator_)
# <dc:title id="title">Gallery</dc:title>
newmeta = xmlElement.addChildElement(dstMetadata, 'dc:title', {'id': 'title'})
xmlElement.addTextNode(newmeta, self.componentName_)
# <dc:description>Gallery_example</dc:description>
newmeta = xmlElement.addChildElement(dstMetadata, 'dc:description')
xmlElement.addTextNode(newmeta, 'Extracted component')
# <dc:identifier id="uid">1234567</dc:identifier>
newmeta = xmlElement.addChildElement(dstMetadata, 'dc:identifier', {'id': 'uid'})
xmlElement.addTextNode(newmeta, str(uuid.uuid4()))
# <dc:language>en-US</dc:language>
newmeta = xmlElement.addChildElement(dstMetadata, 'dc:language')
xmlElement.addTextNode(newmeta, 'en-us')
#print xmlElement.toPrettyXML(dstMetadata)
#------------------------------------------------------------------------------
# copy over meta data
def buildManifest(self, manifestDict, collection, linkref):
collectionManifest = xmlElement.findFirstChildElement(collection, 'collection', {'role': 'manifest'})
manifestitems = xmlElement.getChildElements(collectionManifest)
# <item id="nav" href="nav.xhtml" media-type="application/xhtml+xml" properties="nav"/>
manifestitems.append(xmlElement.addChildElement(collectionManifest, 'item',
{'id': 'nav', 'href': 'nav.xhtml',
'media-type': 'application/xhtml+xml', 'properties': 'nav'}))
dstManifest = self.getOpfDom().getManifest()
idprefix = EPUBComponentZipContainer.getIDPrefix(self.creator_, self.componentName_)
newManifestDict = {}
for item in manifestitems:
newitem = xmlElement.addChildElement(dstManifest, 'item', xmlElement.getAttributes(item))
# TODO make more robust
href = xmlElement.getAttributeValue(newitem, 'href')
parts = href.split(self.componentName_ + '/')
newhref = posixpath.normpath(parts.pop())
xmlElement.setAttribute(newitem, 'href', newhref)
attributes = xmlElement.getAttributes(manifestDict[href])
for attr in attributes:
if attr == 'id':
idvalue = xmlElement.getAttributeValue(manifestDict[href], 'id')
idvalue = idvalue.split(idprefix).pop()
xmlElement.setAttribute(newitem, 'id', idvalue)
elif attr == 'href':
continue;
else:
xmlElement.setAttribute(newitem, attr, attributes[attr])
if href == linkref:
self.buildSpine(idvalue, newhref)
#print xmlElement.toPrettyXML(dstManifest)
#------------------------------------------------------------------------------
# get the component metadata from the opf
def buildSpine(self, idvalue, linkref):
spine = self.getOpfDom().getSpine()
xmlElement.addChildElement(spine, 'itemref', {'idref': idvalue})
#xmlElement.addChildElement(spine, 'itemref', {'idref' : 'nav'})
self.buildNavDoc(linkref)
print 'boo'
#------------------------------------------------------------------------------
# get the component metadata from the opf
def getComponentMetadata(self):
componentMetadatum = []
metadataItems = self.getOpfDom().getMetadataItems()
for meta in metadataItems:
prop = xmlElement.getAttributeValue(meta, 'property')
if prop != None:
componentMetadatum.append({'property': prop, 'value': xmlElement.getText(meta)})
return componentMetadatum
#------------------------------------------------------------------------------
# get the component metadata from the opf
def getComponentManifest(self):
componentManifest = []
manifestItems = self.getOpfManifestItems()
for item in manifestItems:
componentManifest.append({'property': item.get('property'), 'value': item.text})
return componentManifest
#------------------------------------------------------------------------------
# get component base html
def getComponentHTML(self):
return self.getOpfSpineItemFiles()[0]
#---------------------------------------------------------------------------
# get the component creator and name from the meta properties
def getComponentCreatorAndName(self):
creatorProp = self.prefix + ":creator"
nameProp = self.prefix + ":name"
metadata = self.getComponentMetadata()
for meta in metadata:
if meta['property'] == creatorProp:
self.creator_ = meta['value']
if meta['property'] == nameProp:
self.componentName_ = meta['value']
if self.creator_ != None and self.componentName_ != None:
return {'creator': urllib.quote(self.creator_), 'name': urllib.quote(self.componentName_)}
return {'creator': None, 'name': None}
#---------------------------------------------------------------------------
# get the component creator and name from the meta properties
def setComponentCreatorAndName(self, creator, name):
creatorProp = self.prefix + ":creator"
nameProp = self.prefix + ":name"
metadata = self.getComponentMetadata()
for meta in metadata:
if meta['property'] == creatorProp:
meta['value'] = creator
if meta['property'] == nameProp:
meta['value'] = name
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
class EPUBComponentZipContainer(EPUBZipContainer):
@staticmethod
def getCollectionCreatorAndName(collection):
creatorName = {'creator': None, 'name': None}
metadata = xmlom.findChildrenByTagName(collection, 'metadata')
if len(metadata) == 1:
metas = xmlom.findChildrenByTagName(metadata[0], 'meta')
for meta in metas:
propval = xmlElement.getAttributeValue(meta, 'property')
if propval == "component:creator":
creatorName['creator'] = xmlElement.getText(meta)
elif propval == "component:name":
creatorName['name'] = xmlElement.getText(meta)
return creatorName
#---------------------------------------------------------------------------
@staticmethod
def getComponentDir(creator, name):
path = posixpath.normpath(posixpath.join(componentDirectory, creator, name))
return path
#---------------------------------------------------------------------------
@staticmethod
def getIDPrefix(creator, name):
return creator + '_' + name + '_'
| |
"""
Unit tests for reverse URL lookups.
"""
from __future__ import unicode_literals
import unittest
from django.conf import settings
from django.contrib.auth.models import User
from django.core.exceptions import ImproperlyConfigured, ViewDoesNotExist
from django.core.urlresolvers import (reverse, reverse_lazy, resolve, get_callable,
get_resolver, NoReverseMatch, Resolver404, ResolverMatch, RegexURLResolver,
RegexURLPattern)
from django.http import HttpRequest, HttpResponseRedirect, HttpResponsePermanentRedirect
from django.shortcuts import redirect
from django.test import TestCase
from django.test.utils import override_settings
from django.utils import six
from . import urlconf_outer, middleware, views
resolve_test_data = (
# These entries are in the format: (path, url_name, app_name, namespace, view_func, args, kwargs)
# Simple case
('/normal/42/37/', 'normal-view', None, '', views.empty_view, tuple(), {'arg1': '42', 'arg2': '37'}),
('/view_class/42/37/', 'view-class', None, '', views.view_class_instance, tuple(), {'arg1': '42', 'arg2': '37'}),
('/included/normal/42/37/', 'inc-normal-view', None, '', views.empty_view, tuple(), {'arg1': '42', 'arg2': '37'}),
('/included/view_class/42/37/', 'inc-view-class', None, '', views.view_class_instance, tuple(), {'arg1': '42', 'arg2': '37'}),
# Unnamed args are dropped if you have *any* kwargs in a pattern
('/mixed_args/42/37/', 'mixed-args', None, '', views.empty_view, tuple(), {'arg2': '37'}),
('/included/mixed_args/42/37/', 'inc-mixed-args', None, '', views.empty_view, tuple(), {'arg2': '37'}),
# Unnamed views will be resolved to the function/class name
('/unnamed/normal/42/37/', 'urlpatterns_reverse.views.empty_view', None, '', views.empty_view, tuple(), {'arg1': '42', 'arg2': '37'}),
('/unnamed/view_class/42/37/', 'urlpatterns_reverse.views.ViewClass', None, '', views.view_class_instance, tuple(), {'arg1': '42', 'arg2': '37'}),
# If you have no kwargs, you get an args list.
('/no_kwargs/42/37/', 'no-kwargs', None, '', views.empty_view, ('42', '37'), {}),
('/included/no_kwargs/42/37/', 'inc-no-kwargs', None, '', views.empty_view, ('42', '37'), {}),
# Namespaces
('/test1/inner/42/37/', 'urlobject-view', 'testapp', 'test-ns1', 'empty_view', tuple(), {'arg1': '42', 'arg2': '37'}),
('/included/test3/inner/42/37/', 'urlobject-view', 'testapp', 'test-ns3', 'empty_view', tuple(), {'arg1': '42', 'arg2': '37'}),
('/ns-included1/normal/42/37/', 'inc-normal-view', None, 'inc-ns1', views.empty_view, tuple(), {'arg1': '42', 'arg2': '37'}),
('/included/test3/inner/42/37/', 'urlobject-view', 'testapp', 'test-ns3', 'empty_view', tuple(), {'arg1': '42', 'arg2': '37'}),
('/default/inner/42/37/', 'urlobject-view', 'testapp', 'testapp', 'empty_view', tuple(), {'arg1': '42', 'arg2': '37'}),
('/other2/inner/42/37/', 'urlobject-view', 'nodefault', 'other-ns2', 'empty_view', tuple(), {'arg1': '42', 'arg2': '37'}),
('/other1/inner/42/37/', 'urlobject-view', 'nodefault', 'other-ns1', 'empty_view', tuple(), {'arg1': '42', 'arg2': '37'}),
# Nested namespaces
('/ns-included1/test3/inner/42/37/', 'urlobject-view', 'testapp', 'inc-ns1:test-ns3', 'empty_view', tuple(), {'arg1': '42', 'arg2': '37'}),
('/ns-included1/ns-included4/ns-included2/test3/inner/42/37/', 'urlobject-view', 'testapp', 'inc-ns1:inc-ns4:inc-ns2:test-ns3', 'empty_view', tuple(), {'arg1': '42', 'arg2': '37'}),
# Namespaces capturing variables
('/inc70/', 'inner-nothing', None, 'inc-ns5', views.empty_view, tuple(), {'outer': '70'}),
('/inc78/extra/foobar/', 'inner-extra', None, 'inc-ns5', views.empty_view, tuple(), {'outer': '78', 'extra': 'foobar'}),
)
test_data = (
('places', '/places/3/', [3], {}),
('places', '/places/3/', ['3'], {}),
('places', NoReverseMatch, ['a'], {}),
('places', NoReverseMatch, [], {}),
('places?', '/place/', [], {}),
('places+', '/places/', [], {}),
('places*', '/place/', [], {}),
('places2?', '/', [], {}),
('places2+', '/places/', [], {}),
('places2*', '/', [], {}),
('places3', '/places/4/', [4], {}),
('places3', '/places/harlem/', ['harlem'], {}),
('places3', NoReverseMatch, ['harlem64'], {}),
('places4', '/places/3/', [], {'id': 3}),
('people', NoReverseMatch, [], {}),
('people', '/people/adrian/', ['adrian'], {}),
('people', '/people/adrian/', [], {'name': 'adrian'}),
('people', NoReverseMatch, ['name with spaces'], {}),
('people', NoReverseMatch, [], {'name': 'name with spaces'}),
('people2', '/people/name/', [], {}),
('people2a', '/people/name/fred/', ['fred'], {}),
('people_backref', '/people/nate-nate/', ['nate'], {}),
('people_backref', '/people/nate-nate/', [], {'name': 'nate'}),
('optional', '/optional/fred/', [], {'name': 'fred'}),
('optional', '/optional/fred/', ['fred'], {}),
('hardcoded', '/hardcoded/', [], {}),
('hardcoded2', '/hardcoded/doc.pdf', [], {}),
('people3', '/people/il/adrian/', [], {'state': 'il', 'name': 'adrian'}),
('people3', NoReverseMatch, [], {'state': 'il'}),
('people3', NoReverseMatch, [], {'name': 'adrian'}),
('people4', NoReverseMatch, [], {'state': 'il', 'name': 'adrian'}),
('people6', '/people/il/test/adrian/', ['il/test', 'adrian'], {}),
('people6', '/people//adrian/', ['adrian'], {}),
('range', '/character_set/a/', [], {}),
('range2', '/character_set/x/', [], {}),
('price', '/price/$10/', ['10'], {}),
('price2', '/price/$10/', ['10'], {}),
('price3', '/price/$10/', ['10'], {}),
('product', '/product/chocolate+($2.00)/', [], {'price': '2.00', 'product': 'chocolate'}),
('headlines', '/headlines/2007.5.21/', [], dict(year=2007, month=5, day=21)),
('windows', r'/windows_path/C:%5CDocuments%20and%20Settings%5Cspam/', [], dict(drive_name='C', path=r'Documents and Settings\spam')),
('special', r'/special_chars/%2B%5C%24%2A/', [r'+\$*'], {}),
('special', r'/special_chars/some%20resource/', [r'some resource'], {}),
('special', r'/special_chars/10%25%20complete/', [r'10% complete'], {}),
('special', r'/special_chars/some%20resource/', [], {'chars': r'some resource'}),
('special', r'/special_chars/10%25%20complete/', [], {'chars': r'10% complete'}),
('special', NoReverseMatch, [''], {}),
('mixed', '/john/0/', [], {'name': 'john'}),
('repeats', '/repeats/a/', [], {}),
('repeats2', '/repeats/aa/', [], {}),
('repeats3', '/repeats/aa/', [], {}),
('insensitive', '/CaseInsensitive/fred', ['fred'], {}),
('test', '/test/1', [], {}),
('test2', '/test/2', [], {}),
('inner-nothing', '/outer/42/', [], {'outer': '42'}),
('inner-nothing', '/outer/42/', ['42'], {}),
('inner-nothing', NoReverseMatch, ['foo'], {}),
('inner-extra', '/outer/42/extra/inner/', [], {'extra': 'inner', 'outer': '42'}),
('inner-extra', '/outer/42/extra/inner/', ['42', 'inner'], {}),
('inner-extra', NoReverseMatch, ['fred', 'inner'], {}),
('disjunction', NoReverseMatch, ['foo'], {}),
('inner-disjunction', NoReverseMatch, ['10', '11'], {}),
('extra-places', '/e-places/10/', ['10'], {}),
('extra-people', '/e-people/fred/', ['fred'], {}),
('extra-people', '/e-people/fred/', [], {'name': 'fred'}),
('part', '/part/one/', [], {'value': 'one'}),
('part', '/prefix/xx/part/one/', [], {'value': 'one', 'prefix': 'xx'}),
('part2', '/part2/one/', [], {'value': 'one'}),
('part2', '/part2/', [], {}),
('part2', '/prefix/xx/part2/one/', [], {'value': 'one', 'prefix': 'xx'}),
('part2', '/prefix/xx/part2/', [], {'prefix': 'xx'}),
# Regression for #9038
# These views are resolved by method name. Each method is deployed twice -
# once with an explicit argument, and once using the default value on
# the method. This is potentially ambiguous, as you have to pick the
# correct view for the arguments provided.
('kwargs_view', '/arg_view/', [], {}),
('kwargs_view', '/arg_view/10/', [], {'arg1': 10}),
('urlpatterns_reverse.views.absolute_kwargs_view', '/absolute_arg_view/', [], {}),
('urlpatterns_reverse.views.absolute_kwargs_view', '/absolute_arg_view/10/', [], {'arg1': 10}),
('non_path_include', '/includes/non_path_include/', [], {}),
# Tests for #13154
('defaults', '/defaults_view1/3/', [], {'arg1': 3, 'arg2': 1}),
('defaults', '/defaults_view2/3/', [], {'arg1': 3, 'arg2': 2}),
('defaults', NoReverseMatch, [], {'arg1': 3, 'arg2': 3}),
('defaults', NoReverseMatch, [], {'arg2': 1}),
)
class NoURLPatternsTests(TestCase):
urls = 'urlpatterns_reverse.no_urls'
def test_no_urls_exception(self):
"""
RegexURLResolver should raise an exception when no urlpatterns exist.
"""
resolver = RegexURLResolver(r'^$', self.urls)
self.assertRaisesMessage(ImproperlyConfigured,
"The included urlconf urlpatterns_reverse.no_urls "
"doesn't have any patterns in it", getattr, resolver, 'url_patterns')
class URLPatternReverse(TestCase):
urls = 'urlpatterns_reverse.urls'
def test_urlpattern_reverse(self):
for name, expected, args, kwargs in test_data:
try:
got = reverse(name, args=args, kwargs=kwargs)
except NoReverseMatch:
self.assertEqual(expected, NoReverseMatch)
else:
self.assertEqual(got, expected)
def test_reverse_none(self):
# Reversing None should raise an error, not return the last un-named view.
self.assertRaises(NoReverseMatch, reverse, None)
def test_prefix_braces(self):
self.assertEqual('/%7B%7Binvalid%7D%7D/includes/non_path_include/',
reverse('non_path_include', prefix='/{{invalid}}/'))
def test_prefix_parenthesis(self):
self.assertEqual('/bogus%29/includes/non_path_include/',
reverse('non_path_include', prefix='/bogus)/'))
def test_prefix_format_char(self):
self.assertEqual('/bump%2520map/includes/non_path_include/',
reverse('non_path_include', prefix='/bump%20map/'))
def test_non_urlsafe_prefix_with_args(self):
# Regression for #20022
self.assertEqual('/%7Eme/places/1/',
reverse('places', args=[1], prefix='/~me/'))
def test_patterns_reported(self):
# Regression for #17076
try:
# this url exists, but requires an argument
reverse("people", args=[])
except NoReverseMatch as e:
pattern_description = r"1 pattern(s) tried: ['people/(?P<name>\\w+)/$']"
self.assertIn(pattern_description, str(e))
else:
# we can't use .assertRaises, since we want to inspect the
# exception
self.fail("Expected a NoReverseMatch, but none occurred.")
class ResolverTests(unittest.TestCase):
def test_resolver_repr(self):
"""
Test repr of RegexURLResolver, especially when urlconf_name is a list
(#17892).
"""
# Pick a resolver from a namespaced urlconf
resolver = get_resolver('urlpatterns_reverse.namespace_urls')
sub_resolver = resolver.namespace_dict['test-ns1'][1]
self.assertIn('<RegexURLPattern list>', repr(sub_resolver))
def test_reverse_lazy_object_coercion_by_resolve(self):
"""
Verifies lazy object returned by reverse_lazy is coerced to
text by resolve(). Previous to #21043, this would raise a TypeError.
"""
urls = 'urlpatterns_reverse.named_urls'
proxy_url = reverse_lazy('named-url1', urlconf=urls)
resolver = get_resolver(urls)
try:
resolver.resolve(proxy_url)
except TypeError:
self.fail('Failed to coerce lazy object to text')
def test_non_regex(self):
"""
Verifies that we raise a Resolver404 if what we are resolving doesn't
meet the basic requirements of a path to match - i.e., at the very
least, it matches the root pattern '^/'. We must never return None
from resolve, or we will get a TypeError further down the line.
Regression for #10834.
"""
self.assertRaises(Resolver404, resolve, '')
self.assertRaises(Resolver404, resolve, 'a')
self.assertRaises(Resolver404, resolve, '\\')
self.assertRaises(Resolver404, resolve, '.')
def test_404_tried_urls_have_names(self):
"""
Verifies that the list of URLs that come back from a Resolver404
exception contains a list in the right format for printing out in
the DEBUG 404 page with both the patterns and URL names, if available.
"""
urls = 'urlpatterns_reverse.named_urls'
# this list matches the expected URL types and names returned when
# you try to resolve a non-existent URL in the first level of included
# URLs in named_urls.py (e.g., '/included/non-existent-url')
url_types_names = [
[{'type': RegexURLPattern, 'name': 'named-url1'}],
[{'type': RegexURLPattern, 'name': 'named-url2'}],
[{'type': RegexURLPattern, 'name': None}],
[{'type': RegexURLResolver}, {'type': RegexURLPattern, 'name': 'named-url3'}],
[{'type': RegexURLResolver}, {'type': RegexURLPattern, 'name': 'named-url4'}],
[{'type': RegexURLResolver}, {'type': RegexURLPattern, 'name': None}],
[{'type': RegexURLResolver}, {'type': RegexURLResolver}],
]
try:
resolve('/included/non-existent-url', urlconf=urls)
self.fail('resolve did not raise a 404')
except Resolver404 as e:
# make sure we at least matched the root ('/') url resolver:
self.assertTrue('tried' in e.args[0])
tried = e.args[0]['tried']
self.assertEqual(len(e.args[0]['tried']), len(url_types_names), 'Wrong number of tried URLs returned. Expected %s, got %s.' % (len(url_types_names), len(e.args[0]['tried'])))
for tried, expected in zip(e.args[0]['tried'], url_types_names):
for t, e in zip(tried, expected):
self.assertIsInstance(t, e['type']), str('%s is not an instance of %s') % (t, e['type'])
if 'name' in e:
if not e['name']:
self.assertTrue(t.name is None, 'Expected no URL name but found %s.' % t.name)
else:
self.assertEqual(t.name, e['name'], 'Wrong URL name. Expected "%s", got "%s".' % (e['name'], t.name))
class ReverseLazyTest(TestCase):
urls = 'urlpatterns_reverse.reverse_lazy_urls'
def test_redirect_with_lazy_reverse(self):
response = self.client.get('/redirect/')
self.assertRedirects(response, "/redirected_to/", status_code=301)
def test_user_permission_with_lazy_reverse(self):
User.objects.create_user('alfred', 'alfred@example.com', password='testpw')
response = self.client.get('/login_required_view/')
self.assertRedirects(response, "/login/?next=/login_required_view/", status_code=302)
self.client.login(username='alfred', password='testpw')
response = self.client.get('/login_required_view/')
self.assertEqual(response.status_code, 200)
class ReverseShortcutTests(TestCase):
urls = 'urlpatterns_reverse.urls'
def test_redirect_to_object(self):
# We don't really need a model; just something with a get_absolute_url
class FakeObj(object):
def get_absolute_url(self):
return "/hi-there/"
res = redirect(FakeObj())
self.assertIsInstance(res, HttpResponseRedirect)
self.assertEqual(res.url, '/hi-there/')
res = redirect(FakeObj(), permanent=True)
self.assertIsInstance(res, HttpResponsePermanentRedirect)
self.assertEqual(res.url, '/hi-there/')
def test_redirect_to_view_name(self):
res = redirect('hardcoded2')
self.assertEqual(res.url, '/hardcoded/doc.pdf')
res = redirect('places', 1)
self.assertEqual(res.url, '/places/1/')
res = redirect('headlines', year='2008', month='02', day='17')
self.assertEqual(res.url, '/headlines/2008.02.17/')
self.assertRaises(NoReverseMatch, redirect, 'not-a-view')
def test_redirect_to_url(self):
res = redirect('/foo/')
self.assertEqual(res.url, '/foo/')
res = redirect('http://example.com/')
self.assertEqual(res.url, 'http://example.com/')
def test_redirect_view_object(self):
from .views import absolute_kwargs_view
res = redirect(absolute_kwargs_view)
self.assertEqual(res.url, '/absolute_arg_view/')
self.assertRaises(NoReverseMatch, redirect, absolute_kwargs_view, wrong_argument=None)
class NamespaceTests(TestCase):
urls = 'urlpatterns_reverse.namespace_urls'
def test_ambiguous_object(self):
"Names deployed via dynamic URL objects that require namespaces can't be resolved"
self.assertRaises(NoReverseMatch, reverse, 'urlobject-view')
self.assertRaises(NoReverseMatch, reverse, 'urlobject-view', args=[37, 42])
self.assertRaises(NoReverseMatch, reverse, 'urlobject-view', kwargs={'arg1': 42, 'arg2': 37})
def test_ambiguous_urlpattern(self):
"Names deployed via dynamic URL objects that require namespaces can't be resolved"
self.assertRaises(NoReverseMatch, reverse, 'inner-nothing')
self.assertRaises(NoReverseMatch, reverse, 'inner-nothing', args=[37, 42])
self.assertRaises(NoReverseMatch, reverse, 'inner-nothing', kwargs={'arg1': 42, 'arg2': 37})
def test_non_existent_namespace(self):
"Non-existent namespaces raise errors"
self.assertRaises(NoReverseMatch, reverse, 'blahblah:urlobject-view')
self.assertRaises(NoReverseMatch, reverse, 'test-ns1:blahblah:urlobject-view')
def test_normal_name(self):
"Normal lookups work as expected"
self.assertEqual('/normal/', reverse('normal-view'))
self.assertEqual('/normal/37/42/', reverse('normal-view', args=[37, 42]))
self.assertEqual('/normal/42/37/', reverse('normal-view', kwargs={'arg1': 42, 'arg2': 37}))
self.assertEqual('/+%5C$*/', reverse('special-view'))
def test_simple_included_name(self):
"Normal lookups work on names included from other patterns"
self.assertEqual('/included/normal/', reverse('inc-normal-view'))
self.assertEqual('/included/normal/37/42/', reverse('inc-normal-view', args=[37, 42]))
self.assertEqual('/included/normal/42/37/', reverse('inc-normal-view', kwargs={'arg1': 42, 'arg2': 37}))
self.assertEqual('/included/+%5C$*/', reverse('inc-special-view'))
def test_namespace_object(self):
"Dynamic URL objects can be found using a namespace"
self.assertEqual('/test1/inner/', reverse('test-ns1:urlobject-view'))
self.assertEqual('/test1/inner/37/42/', reverse('test-ns1:urlobject-view', args=[37, 42]))
self.assertEqual('/test1/inner/42/37/', reverse('test-ns1:urlobject-view', kwargs={'arg1': 42, 'arg2': 37}))
self.assertEqual('/test1/inner/+%5C$*/', reverse('test-ns1:urlobject-special-view'))
def test_embedded_namespace_object(self):
"Namespaces can be installed anywhere in the URL pattern tree"
self.assertEqual('/included/test3/inner/', reverse('test-ns3:urlobject-view'))
self.assertEqual('/included/test3/inner/37/42/', reverse('test-ns3:urlobject-view', args=[37, 42]))
self.assertEqual('/included/test3/inner/42/37/', reverse('test-ns3:urlobject-view', kwargs={'arg1': 42, 'arg2': 37}))
self.assertEqual('/included/test3/inner/+%5C$*/', reverse('test-ns3:urlobject-special-view'))
def test_namespace_pattern(self):
"Namespaces can be applied to include()'d urlpatterns"
self.assertEqual('/ns-included1/normal/', reverse('inc-ns1:inc-normal-view'))
self.assertEqual('/ns-included1/normal/37/42/', reverse('inc-ns1:inc-normal-view', args=[37, 42]))
self.assertEqual('/ns-included1/normal/42/37/', reverse('inc-ns1:inc-normal-view', kwargs={'arg1': 42, 'arg2': 37}))
self.assertEqual('/ns-included1/+%5C$*/', reverse('inc-ns1:inc-special-view'))
def test_namespace_pattern_with_variable_prefix(self):
"When using a include with namespaces when there is a regex variable in front of it"
self.assertEqual('/ns-outer/42/normal/', reverse('inc-outer:inc-normal-view', kwargs={'outer': 42}))
self.assertEqual('/ns-outer/42/normal/', reverse('inc-outer:inc-normal-view', args=[42]))
self.assertEqual('/ns-outer/42/normal/37/4/', reverse('inc-outer:inc-normal-view', kwargs={'outer': 42, 'arg1': 37, 'arg2': 4}))
self.assertEqual('/ns-outer/42/normal/37/4/', reverse('inc-outer:inc-normal-view', args=[42, 37, 4]))
self.assertEqual('/ns-outer/42/+%5C$*/', reverse('inc-outer:inc-special-view', kwargs={'outer': 42}))
self.assertEqual('/ns-outer/42/+%5C$*/', reverse('inc-outer:inc-special-view', args=[42]))
def test_multiple_namespace_pattern(self):
"Namespaces can be embedded"
self.assertEqual('/ns-included1/test3/inner/', reverse('inc-ns1:test-ns3:urlobject-view'))
self.assertEqual('/ns-included1/test3/inner/37/42/', reverse('inc-ns1:test-ns3:urlobject-view', args=[37, 42]))
self.assertEqual('/ns-included1/test3/inner/42/37/', reverse('inc-ns1:test-ns3:urlobject-view', kwargs={'arg1': 42, 'arg2': 37}))
self.assertEqual('/ns-included1/test3/inner/+%5C$*/', reverse('inc-ns1:test-ns3:urlobject-special-view'))
def test_nested_namespace_pattern(self):
"Namespaces can be nested"
self.assertEqual('/ns-included1/ns-included4/ns-included1/test3/inner/', reverse('inc-ns1:inc-ns4:inc-ns1:test-ns3:urlobject-view'))
self.assertEqual('/ns-included1/ns-included4/ns-included1/test3/inner/37/42/', reverse('inc-ns1:inc-ns4:inc-ns1:test-ns3:urlobject-view', args=[37, 42]))
self.assertEqual('/ns-included1/ns-included4/ns-included1/test3/inner/42/37/', reverse('inc-ns1:inc-ns4:inc-ns1:test-ns3:urlobject-view', kwargs={'arg1': 42, 'arg2': 37}))
self.assertEqual('/ns-included1/ns-included4/ns-included1/test3/inner/+%5C$*/', reverse('inc-ns1:inc-ns4:inc-ns1:test-ns3:urlobject-special-view'))
def test_app_lookup_object(self):
"A default application namespace can be used for lookup"
self.assertEqual('/default/inner/', reverse('testapp:urlobject-view'))
self.assertEqual('/default/inner/37/42/', reverse('testapp:urlobject-view', args=[37, 42]))
self.assertEqual('/default/inner/42/37/', reverse('testapp:urlobject-view', kwargs={'arg1': 42, 'arg2': 37}))
self.assertEqual('/default/inner/+%5C$*/', reverse('testapp:urlobject-special-view'))
def test_app_lookup_object_with_default(self):
"A default application namespace is sensitive to the 'current' app can be used for lookup"
self.assertEqual('/included/test3/inner/', reverse('testapp:urlobject-view', current_app='test-ns3'))
self.assertEqual('/included/test3/inner/37/42/', reverse('testapp:urlobject-view', args=[37, 42], current_app='test-ns3'))
self.assertEqual('/included/test3/inner/42/37/', reverse('testapp:urlobject-view', kwargs={'arg1': 42, 'arg2': 37}, current_app='test-ns3'))
self.assertEqual('/included/test3/inner/+%5C$*/', reverse('testapp:urlobject-special-view', current_app='test-ns3'))
def test_app_lookup_object_without_default(self):
"An application namespace without a default is sensitive to the 'current' app can be used for lookup"
self.assertEqual('/other2/inner/', reverse('nodefault:urlobject-view'))
self.assertEqual('/other2/inner/37/42/', reverse('nodefault:urlobject-view', args=[37, 42]))
self.assertEqual('/other2/inner/42/37/', reverse('nodefault:urlobject-view', kwargs={'arg1': 42, 'arg2': 37}))
self.assertEqual('/other2/inner/+%5C$*/', reverse('nodefault:urlobject-special-view'))
self.assertEqual('/other1/inner/', reverse('nodefault:urlobject-view', current_app='other-ns1'))
self.assertEqual('/other1/inner/37/42/', reverse('nodefault:urlobject-view', args=[37, 42], current_app='other-ns1'))
self.assertEqual('/other1/inner/42/37/', reverse('nodefault:urlobject-view', kwargs={'arg1': 42, 'arg2': 37}, current_app='other-ns1'))
self.assertEqual('/other1/inner/+%5C$*/', reverse('nodefault:urlobject-special-view', current_app='other-ns1'))
def test_special_chars_namespace(self):
self.assertEqual('/+%5C$*/included/normal/', reverse('special:inc-normal-view'))
self.assertEqual('/+%5C$*/included/normal/37/42/', reverse('special:inc-normal-view', args=[37, 42]))
self.assertEqual('/+%5C$*/included/normal/42/37/', reverse('special:inc-normal-view', kwargs={'arg1': 42, 'arg2': 37}))
self.assertEqual('/+%5C$*/included/+%5C$*/', reverse('special:inc-special-view'))
def test_namespaces_with_variables(self):
"Namespace prefixes can capture variables: see #15900"
self.assertEqual('/inc70/', reverse('inc-ns5:inner-nothing', kwargs={'outer': '70'}))
self.assertEqual('/inc78/extra/foobar/', reverse('inc-ns5:inner-extra', kwargs={'outer': '78', 'extra': 'foobar'}))
self.assertEqual('/inc70/', reverse('inc-ns5:inner-nothing', args=['70']))
self.assertEqual('/inc78/extra/foobar/', reverse('inc-ns5:inner-extra', args=['78', 'foobar']))
@override_settings(ROOT_URLCONF = urlconf_outer.__name__)
class RequestURLconfTests(TestCase):
def test_urlconf(self):
response = self.client.get('/test/me/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'outer:/test/me/,'
b'inner:/inner_urlconf/second_test/')
response = self.client.get('/inner_urlconf/second_test/')
self.assertEqual(response.status_code, 200)
response = self.client.get('/second_test/')
self.assertEqual(response.status_code, 404)
@override_settings(
MIDDLEWARE_CLASSES=(
'%s.ChangeURLconfMiddleware' % middleware.__name__,
)
)
def test_urlconf_overridden(self):
response = self.client.get('/test/me/')
self.assertEqual(response.status_code, 404)
response = self.client.get('/inner_urlconf/second_test/')
self.assertEqual(response.status_code, 404)
response = self.client.get('/second_test/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'outer:,inner:/second_test/')
@override_settings(
MIDDLEWARE_CLASSES=(
'%s.NullChangeURLconfMiddleware' % middleware.__name__,
)
)
def test_urlconf_overridden_with_null(self):
self.assertRaises(ImproperlyConfigured, self.client.get, '/test/me/')
@override_settings(
MIDDLEWARE_CLASSES=(
'%s.ChangeURLconfMiddleware' % middleware.__name__,
'%s.ReverseInnerInResponseMiddleware' % middleware.__name__,
)
)
def test_reverse_inner_in_response_middleware(self):
"""
Test reversing an URL from the *overridden* URLconf from inside
a response middleware.
"""
response = self.client.get('/second_test/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'/second_test/')
@override_settings(
MIDDLEWARE_CLASSES=(
'%s.ChangeURLconfMiddleware' % middleware.__name__,
'%s.ReverseOuterInResponseMiddleware' % middleware.__name__,
)
)
def test_reverse_outer_in_response_middleware(self):
"""
Test reversing an URL from the *default* URLconf from inside
a response middleware.
"""
message = "Reverse for 'outer' with arguments '()' and keyword arguments '{}' not found."
with self.assertRaisesMessage(NoReverseMatch, message):
self.client.get('/second_test/')
@override_settings(
MIDDLEWARE_CLASSES=(
'%s.ChangeURLconfMiddleware' % middleware.__name__,
'%s.ReverseInnerInStreaming' % middleware.__name__,
)
)
def test_reverse_inner_in_streaming(self):
"""
Test reversing an URL from the *overridden* URLconf from inside
a streaming response.
"""
response = self.client.get('/second_test/')
self.assertEqual(response.status_code, 200)
self.assertEqual(b''.join(response), b'/second_test/')
@override_settings(
MIDDLEWARE_CLASSES=(
'%s.ChangeURLconfMiddleware' % middleware.__name__,
'%s.ReverseOuterInStreaming' % middleware.__name__,
)
)
def test_reverse_outer_in_streaming(self):
"""
Test reversing an URL from the *default* URLconf from inside
a streaming response.
"""
message = "Reverse for 'outer' with arguments '()' and keyword arguments '{}' not found."
with self.assertRaisesMessage(NoReverseMatch, message):
self.client.get('/second_test/')
b''.join(self.client.get('/second_test/'))
class ErrorHandlerResolutionTests(TestCase):
"""Tests for handler400, handler404 and handler500"""
def setUp(self):
from django.core.urlresolvers import RegexURLResolver
urlconf = 'urlpatterns_reverse.urls_error_handlers'
urlconf_callables = 'urlpatterns_reverse.urls_error_handlers_callables'
self.resolver = RegexURLResolver(r'^$', urlconf)
self.callable_resolver = RegexURLResolver(r'^$', urlconf_callables)
def test_named_handlers(self):
from .views import empty_view
handler = (empty_view, {})
self.assertEqual(self.resolver.resolve400(), handler)
self.assertEqual(self.resolver.resolve404(), handler)
self.assertEqual(self.resolver.resolve500(), handler)
def test_callable_handers(self):
from .views import empty_view
handler = (empty_view, {})
self.assertEqual(self.callable_resolver.resolve400(), handler)
self.assertEqual(self.callable_resolver.resolve404(), handler)
self.assertEqual(self.callable_resolver.resolve500(), handler)
class DefaultErrorHandlerTests(TestCase):
urls = 'urlpatterns_reverse.urls_without_full_import'
def test_default_handler(self):
"If the urls.py doesn't specify handlers, the defaults are used"
try:
response = self.client.get('/test/')
self.assertEqual(response.status_code, 404)
except AttributeError:
self.fail("Shouldn't get an AttributeError due to undefined 404 handler")
try:
self.assertRaises(ValueError, self.client.get, '/bad_view/')
except AttributeError:
self.fail("Shouldn't get an AttributeError due to undefined 500 handler")
class NoRootUrlConfTests(TestCase):
"""Tests for handler404 and handler500 if urlconf is None"""
urls = None
def test_no_handler_exception(self):
self.assertRaises(ImproperlyConfigured, self.client.get, '/test/me/')
class ResolverMatchTests(TestCase):
urls = 'urlpatterns_reverse.namespace_urls'
def test_urlpattern_resolve(self):
for path, name, app_name, namespace, func, args, kwargs in resolve_test_data:
# Test legacy support for extracting "function, args, kwargs"
match_func, match_args, match_kwargs = resolve(path)
self.assertEqual(match_func, func)
self.assertEqual(match_args, args)
self.assertEqual(match_kwargs, kwargs)
# Test ResolverMatch capabilities.
match = resolve(path)
self.assertEqual(match.__class__, ResolverMatch)
self.assertEqual(match.url_name, name)
self.assertEqual(match.args, args)
self.assertEqual(match.kwargs, kwargs)
self.assertEqual(match.app_name, app_name)
self.assertEqual(match.namespace, namespace)
self.assertEqual(match.func, func)
# ... and for legacy purposes:
self.assertEqual(match[0], func)
self.assertEqual(match[1], args)
self.assertEqual(match[2], kwargs)
def test_resolver_match_on_request(self):
response = self.client.get('/resolver_match/')
resolver_match = response.resolver_match
self.assertEqual(resolver_match.url_name, 'test-resolver-match')
def test_resolver_match_on_request_before_resolution(self):
request = HttpRequest()
self.assertIsNone(request.resolver_match)
class ErroneousViewTests(TestCase):
urls = 'urlpatterns_reverse.erroneous_urls'
def test_erroneous_resolve(self):
self.assertRaises(ImportError, self.client.get, '/erroneous_inner/')
self.assertRaises(ImportError, self.client.get, '/erroneous_outer/')
self.assertRaises(ViewDoesNotExist, self.client.get, '/missing_inner/')
self.assertRaises(ViewDoesNotExist, self.client.get, '/missing_outer/')
self.assertRaises(ViewDoesNotExist, self.client.get, '/uncallable/')
def test_erroneous_reverse(self):
"""
Ensure that a useful exception is raised when a regex is invalid in the
URLConf.
Refs #6170.
"""
# The regex error will be hit before NoReverseMatch can be raised
self.assertRaises(ImproperlyConfigured, reverse, 'whatever blah blah')
class ViewLoadingTests(TestCase):
def test_view_loading(self):
# A missing view (identified by an AttributeError) should raise
# ViewDoesNotExist, ...
six.assertRaisesRegex(self, ViewDoesNotExist, ".*View does not exist in.*",
get_callable,
'urlpatterns_reverse.views.i_should_not_exist')
# ... but if the AttributeError is caused by something else don't
# swallow it.
self.assertRaises(AttributeError, get_callable,
'urlpatterns_reverse.views_broken.i_am_broken')
| |
"""Support for a Genius Hub system."""
from datetime import timedelta
import logging
from typing import Any, Dict, Optional
import aiohttp
from geniushubclient import GeniusHub
import voluptuous as vol
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_TEMPERATURE,
CONF_HOST,
CONF_MAC,
CONF_PASSWORD,
CONF_TOKEN,
CONF_USERNAME,
TEMP_CELSIUS,
)
from homeassistant.core import callback
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.helpers.discovery import async_load_platform
from homeassistant.helpers.dispatcher import (
async_dispatcher_connect,
async_dispatcher_send,
)
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.event import async_track_time_interval
from homeassistant.helpers.service import verify_domain_control
from homeassistant.helpers.typing import ConfigType, HomeAssistantType
import homeassistant.util.dt as dt_util
_LOGGER = logging.getLogger(__name__)
DOMAIN = "geniushub"
# temperature is repeated here, as it gives access to high-precision temps
GH_ZONE_ATTRS = ["mode", "temperature", "type", "occupied", "override"]
GH_DEVICE_ATTRS = {
"luminance": "luminance",
"measuredTemperature": "measured_temperature",
"occupancyTrigger": "occupancy_trigger",
"setback": "setback",
"setTemperature": "set_temperature",
"wakeupInterval": "wakeup_interval",
}
SCAN_INTERVAL = timedelta(seconds=60)
MAC_ADDRESS_REGEXP = r"^([0-9A-F]{2}:){5}([0-9A-F]{2})$"
V1_API_SCHEMA = vol.Schema(
{
vol.Required(CONF_TOKEN): cv.string,
vol.Required(CONF_MAC): vol.Match(MAC_ADDRESS_REGEXP),
}
)
V3_API_SCHEMA = vol.Schema(
{
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_MAC): vol.Match(MAC_ADDRESS_REGEXP),
}
)
CONFIG_SCHEMA = vol.Schema(
{DOMAIN: vol.Any(V3_API_SCHEMA, V1_API_SCHEMA)}, extra=vol.ALLOW_EXTRA
)
ATTR_ZONE_MODE = "mode"
ATTR_DURATION = "duration"
SVC_SET_ZONE_MODE = "set_zone_mode"
SVC_SET_ZONE_OVERRIDE = "set_zone_override"
SET_ZONE_MODE_SCHEMA = vol.Schema(
{
vol.Required(ATTR_ENTITY_ID): cv.entity_id,
vol.Required(ATTR_ZONE_MODE): vol.In(["off", "timer", "footprint"]),
}
)
SET_ZONE_OVERRIDE_SCHEMA = vol.Schema(
{
vol.Required(ATTR_ENTITY_ID): cv.entity_id,
vol.Required(ATTR_TEMPERATURE): vol.All(
vol.Coerce(float), vol.Range(min=4, max=28)
),
vol.Optional(ATTR_DURATION): vol.All(
cv.time_period, vol.Range(min=timedelta(minutes=5), max=timedelta(days=1)),
),
}
)
async def async_setup(hass: HomeAssistantType, config: ConfigType) -> bool:
"""Create a Genius Hub system."""
hass.data[DOMAIN] = {}
kwargs = dict(config[DOMAIN])
if CONF_HOST in kwargs:
args = (kwargs.pop(CONF_HOST),)
else:
args = (kwargs.pop(CONF_TOKEN),)
hub_uid = kwargs.pop(CONF_MAC, None)
client = GeniusHub(*args, **kwargs, session=async_get_clientsession(hass))
broker = hass.data[DOMAIN]["broker"] = GeniusBroker(hass, client, hub_uid)
try:
await client.update()
except aiohttp.ClientResponseError as err:
_LOGGER.error("Setup failed, check your configuration, %s", err)
return False
broker.make_debug_log_entries()
async_track_time_interval(hass, broker.async_update, SCAN_INTERVAL)
for platform in ["climate", "water_heater", "sensor", "binary_sensor", "switch"]:
hass.async_create_task(async_load_platform(hass, platform, DOMAIN, {}, config))
setup_service_functions(hass, broker)
return True
@callback
def setup_service_functions(hass: HomeAssistantType, broker):
"""Set up the service functions."""
@verify_domain_control(hass, DOMAIN)
async def set_zone_mode(call) -> None:
"""Set the system mode."""
entity_id = call.data[ATTR_ENTITY_ID]
registry = await hass.helpers.entity_registry.async_get_registry()
registry_entry = registry.async_get(entity_id)
if registry_entry is None or registry_entry.platform != DOMAIN:
raise ValueError(f"'{entity_id}' is not a known {DOMAIN} entity")
if registry_entry.domain != "climate":
raise ValueError(f"'{entity_id}' is not an {DOMAIN} zone")
payload = {
"unique_id": registry_entry.unique_id,
"service": call.service,
"data": call.data,
}
async_dispatcher_send(hass, DOMAIN, payload)
hass.services.async_register(
DOMAIN, SVC_SET_ZONE_MODE, set_zone_mode, schema=SET_ZONE_MODE_SCHEMA
)
hass.services.async_register(
DOMAIN, SVC_SET_ZONE_OVERRIDE, set_zone_mode, schema=SET_ZONE_OVERRIDE_SCHEMA
)
class GeniusBroker:
"""Container for geniushub client and data."""
def __init__(self, hass, client, hub_uid) -> None:
"""Initialize the geniushub client."""
self.hass = hass
self.client = client
self._hub_uid = hub_uid
self._connect_error = False
@property
def hub_uid(self) -> int:
"""Return the Hub UID (MAC address)."""
# pylint: disable=no-member
return self._hub_uid if self._hub_uid is not None else self.client.uid
async def async_update(self, now, **kwargs) -> None:
"""Update the geniushub client's data."""
try:
await self.client.update()
if self._connect_error:
self._connect_error = False
_LOGGER.info("Connection to geniushub re-established")
except (
aiohttp.ClientResponseError,
aiohttp.client_exceptions.ClientConnectorError,
) as err:
if not self._connect_error:
self._connect_error = True
_LOGGER.error(
"Connection to geniushub failed (unable to update), message is: %s",
err,
)
return
self.make_debug_log_entries()
async_dispatcher_send(self.hass, DOMAIN)
def make_debug_log_entries(self) -> None:
"""Make any useful debug log entries."""
# pylint: disable=protected-access
_LOGGER.debug(
"Raw JSON: \n\nclient._zones = %s \n\nclient._devices = %s",
self.client._zones,
self.client._devices,
)
class GeniusEntity(Entity):
"""Base for all Genius Hub entities."""
def __init__(self) -> None:
"""Initialize the entity."""
self._unique_id = self._name = None
async def async_added_to_hass(self) -> None:
"""Set up a listener when this entity is added to HA."""
self.async_on_remove(async_dispatcher_connect(self.hass, DOMAIN, self._refresh))
async def _refresh(self, payload: Optional[dict] = None) -> None:
"""Process any signals."""
self.async_schedule_update_ha_state(force_refresh=True)
@property
def unique_id(self) -> Optional[str]:
"""Return a unique ID."""
return self._unique_id
@property
def name(self) -> str:
"""Return the name of the geniushub entity."""
return self._name
@property
def should_poll(self) -> bool:
"""Return False as geniushub entities should not be polled."""
return False
class GeniusDevice(GeniusEntity):
"""Base for all Genius Hub devices."""
def __init__(self, broker, device) -> None:
"""Initialize the Device."""
super().__init__()
self._device = device
self._unique_id = f"{broker.hub_uid}_device_{device.id}"
self._last_comms = self._state_attr = None
@property
def device_state_attributes(self) -> Dict[str, Any]:
"""Return the device state attributes."""
attrs = {}
attrs["assigned_zone"] = self._device.data["assignedZones"][0]["name"]
if self._last_comms:
attrs["last_comms"] = self._last_comms.isoformat()
state = dict(self._device.data["state"])
if "_state" in self._device.data: # only via v3 API
state.update(self._device.data["_state"])
attrs["state"] = {
GH_DEVICE_ATTRS[k]: v for k, v in state.items() if k in GH_DEVICE_ATTRS
}
return attrs
async def async_update(self) -> None:
"""Update an entity's state data."""
if "_state" in self._device.data: # only via v3 API
self._last_comms = dt_util.utc_from_timestamp(
self._device.data["_state"]["lastComms"]
)
class GeniusZone(GeniusEntity):
"""Base for all Genius Hub zones."""
def __init__(self, broker, zone) -> None:
"""Initialize the Zone."""
super().__init__()
self._zone = zone
self._unique_id = f"{broker.hub_uid}_zone_{zone.id}"
async def _refresh(self, payload: Optional[dict] = None) -> None:
"""Process any signals."""
if payload is None:
self.async_schedule_update_ha_state(force_refresh=True)
return
if payload["unique_id"] != self._unique_id:
return
if payload["service"] == SVC_SET_ZONE_OVERRIDE:
temperature = round(payload["data"][ATTR_TEMPERATURE] * 10) / 10
duration = payload["data"].get(ATTR_DURATION, timedelta(hours=1))
await self._zone.set_override(temperature, int(duration.total_seconds()))
return
mode = payload["data"][ATTR_ZONE_MODE]
# pylint: disable=protected-access
if mode == "footprint" and not self._zone._has_pir:
raise TypeError(
f"'{self.entity_id}' can not support footprint mode (it has no PIR)"
)
await self._zone.set_mode(mode)
@property
def name(self) -> str:
"""Return the name of the climate device."""
return self._zone.name
@property
def device_state_attributes(self) -> Dict[str, Any]:
"""Return the device state attributes."""
status = {k: v for k, v in self._zone.data.items() if k in GH_ZONE_ATTRS}
return {"status": status}
class GeniusHeatingZone(GeniusZone):
"""Base for Genius Heating Zones."""
def __init__(self, broker, zone) -> None:
"""Initialize the Zone."""
super().__init__(broker, zone)
self._max_temp = self._min_temp = self._supported_features = None
@property
def current_temperature(self) -> Optional[float]:
"""Return the current temperature."""
return self._zone.data.get("temperature")
@property
def target_temperature(self) -> float:
"""Return the temperature we try to reach."""
return self._zone.data["setpoint"]
@property
def min_temp(self) -> float:
"""Return max valid temperature that can be set."""
return self._min_temp
@property
def max_temp(self) -> float:
"""Return max valid temperature that can be set."""
return self._max_temp
@property
def temperature_unit(self) -> str:
"""Return the unit of measurement."""
return TEMP_CELSIUS
@property
def supported_features(self) -> int:
"""Return the bitmask of supported features."""
return self._supported_features
async def async_set_temperature(self, **kwargs) -> None:
"""Set a new target temperature for this zone."""
await self._zone.set_override(
kwargs[ATTR_TEMPERATURE], kwargs.get(ATTR_DURATION, 3600)
)
| |
import matplotlib.pyplot as plt
import matplotlib as mpl
import subprocess as sub
import pandas as pd
import numpy as np
import sys
import re
import os
import scipy.stats as st
import random as rnd
from matplotlib.ticker import MaxNLocator
from matplotlib.patches import ConnectionPatch
from matplotlib.patches import Rectangle
import scipy.interpolate as interpol
#
from matplotlib import rc
rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})
## for Palatino and other serif fonts use:
# rc('font',**{'family':'serif','serif':['Palatino']})
rc('text', usetex=True)
# #
#
mpl.rcParams['text.latex.preamble'] = [
r'\usepackage{textcomp}', # i need upright \micro symbols, but you need...
# r'\sisetup{detect-all}', # ...this to force siunitx to actually use your fonts
r'\usepackage{helvet}', # set the normal font here
r'\usepackage{sansmath}', # load up the sansmath so that math -> helvet
r'\sansmath' # <- tricky! -- gotta actually tell tex to use!
]
#
#
font = {#'family' : 'sans-serif',
#'weight' : 'bold',
'size' :9}
rc('font', **font)
# # data loading ...
# constants ...
# ALPHABET=20
# amino acis alphabet ...
# aacids = data.columns.values[:ALPHABET]
aacids = sorted('CMFILVWYAGTSNQDEHRKP')
#############
#############################################################################
kelly_colors_hex = [
u'#FFB300', # Vivid Yellow
u'#803E75', # Strong Purple
u'#FF6800', # Vivid Orange
u'#A6BDD7', # Very Light Blue
u'#C10020', # Vivid Red
u'#CEA262', # Grayish Yellow
u'#817066', # Medium Gray
u'#007D34', # Vivid Green
u'#F6768E', # Strong Purplish Pink
u'#00538A', # Strong Blue
u'#FF7A5C', # Strong Yellowish Pink
u'#53377A', # Strong Violet
u'#FF8E00', # Vivid Orange Yellow
u'#B32851', # Strong Purplish Red
u'#F4C800', # Vivid Greenish Yellow
u'#7F180D', # Strong Reddish Brown
u'#93AA00', # Vivid Yellowish Green
u'#593315', # Deep Yellowish Brown
u'#F13A13', # Vivid Reddish Orange
u'#232C16' # Dark Olive Green
]
#############################################################################
cols = kelly_colors_hex
#############
# getting file name based on criteria combination ...
def get_exp_fname(cds_criteria,org_criteria,kingdom):
if kingdom in ['arch','archaea']:
return "exp_MTAD_%s_%s.arch.summary"%(cds_criteria,org_criteria)
elif kingdom in ['bact','bacteria']:
return "exp_MTAD_%s_%s.bact.summary"%(cds_criteria,org_criteria)
else:
raise TypeError('only archaeal and bacterial kingdoms are supported!')
#######################################################################################################
# TODO GENERATE APPROPROATE FILES HERE ...
if len(sys.argv)<=3:
raise TypeError("Use command line argument to enter CDS and organismal criteria!")
else:
the_combination = {'cds_criteria':sys.argv[1],'org_criteria':sys.argv[2]}
the_kingdom = sys.argv[3]
#
exp_fname = get_exp_fname(the_combination['cds_criteria'],the_combination['org_criteria'],the_kingdom)
#
data_exp = pd.read_csv(exp_fname,index_col=0)
# loaded ...
# give em names, as I figured, indexes and column names are awesome things ...
exp_T = data_exp['exp_T']
exp_M = data_exp['exp_M']
exp_A = data_exp['exp_A']
exp_D = data_exp['exp_D']
################################################
# WHERE RESULTS(PICTURES) SHOUDL GO ...
################################################
results_path = '.'
################################################
#####################################################
# SIMULATION DATA ...
# PATH to simulations data ...
# protein_design/the_simulations
simul_path = os.path.join(os.path.expanduser('~'),
"Dropbox (UMASS MED - BIB)",
"protein_design",
"the_simulations",
"Correct_MJ99_Argentina_PUBLICATION")
# data files and their names ...
shuffle_fname = os.path.join(simul_path,"shuffled.dat")
shuffle_slopes_fname = os.path.join(simul_path,"shuffled_slopes.dat")
# loading simul data ...
data_sorted_D = pd.read_csv(shuffle_slopes_fname)
data_sorted_A = pd.read_csv(shuffle_fname)
# loaded ...
########################################################
##################################
### COST VECTORS LOADING ###
##################################
# cost vectors loading ...
cost_vec_path = '.'
akashi = os.path.join(cost_vec_path,'akashi-cost.d')
argentina = os.path.join(cost_vec_path,'argentina-cost.d')
##################
akashi_cost = pd.read_csv(akashi,header=None,sep=' ')
argentina_cost = pd.read_csv(argentina,header=None,sep=' ')
##################
akashi_cost.set_index(0,inplace=True)
argentina_cost.set_index(0,inplace=True)
# loaded ...
#######################################################################################################
######################################################################################
######################################################################################
######################################################################################
######################################################################################
plt.clf()
x_fig_size = 7.3
v_coeff = 0.45
fig = plt.figure(figsize=(x_fig_size,v_coeff*x_fig_size))
# between axes ...
hor_space = 0.07
# axes info ...
left = 0.06
bottom = 0.12
width = 0.5*(0.9 - left - hor_space)
height = 0.98 - bottom
# bottom axes
ax_left = plt.axes([left, bottom, width, height])
left += (width + hor_space)
# top axes
ax_right = plt.axes([left, bottom, width, height])
#
#
#
###############################################################
###############################################################
###############################################################
###############################################################
# shuff histo ...
t_regime = 'A'
mod = 64
# wcf = 0.02
data_sorted_D = data_sorted_D[data_sorted_D.Temp<1.6]
# #
# ##
# ###
# #####
# #######
# #######
rmin,rmax,num = -1.0,1.0,25
bins = np.linspace(rmin,rmax,num=num)
#########
for wcf,df_w in data_sorted_D.groupby('W_coeff'):
corrs_w = []
for shuf,df in df_w.groupby('Shuff'):
sim_D = df[aacids].apply(lambda x: x.cov(df.Temp)/df.Temp.var())
corrs_w.append(exp_D.corr(sim_D))
if len(corrs_w)!= 100:
print "something went wrong: number of shuffles isn't 100!"
sys.exit(1)
# get p value ...
false_cases_num = sum(1 for cw in corrs_w[1:] if cw>corrs_w[0])
pvalue = false_cases_num*1.0/len(corrs_w)
#
################
# do plotting ...
################
if wcf == 0.06:
print "plotting right plot: slopes"
ax_right.hist(corrs_w, bins=bins,ec='none',normed=False, label='shuffled')
width = (rmax-rmin)/(num-1)
wt_bin_num = int((corrs_w[0]-rmin)/width)
ax_right.bar([wt_bin_num*width+rmin,],[1.0,],width=width,color='red',edgecolor='None',label='predicted')
# ax_right.bar([corrs_w[0]-0.5*width,],[1.0,],width=width,color='red',edgecolor='None',label='predicted')
ax_right.set_xlim((rmin,rmax))
ax_right.yaxis.set_ticks_position('left')
ax_right.xaxis.set_ticks_position('bottom')
ax_right.set_xlabel(r"$R_D$, slopes correlation")
f_title = lambda pval: (r"p=%.3f"%pval) if (pval>=0.001) else r"p\textless0.001"
leg_right = ax_right.legend(loc='upper right',frameon=False, title=f_title(pvalue))
# ax_right.set_title('$w$=%.2f'%wcf)
# ax_right.text(0.1,0.87,r'$w=%.2f$'%wcf,transform=ax_right.transAxes)
#
for legend_item in leg_right.get_patches():
legend_item.set_edgecolor('none')
# plt.title("Hist_mod%d_wcf%.3f_T%.1f.new_shuff2.pdf"%(mod,wcf,3.00))
# fig.savefig(os.path.join(results_path,"Hist_%s_mod%d_wcf%.3f.new_shuff2.pdf"%(t_regime,mod,wcf)))
else:
pass
for wcf,df in data_sorted_A.groupby('W_coeff'):
# shuff_at_wcf = data_sorted[data_sorted.W_coeff==wcf]
data_shuff_T = df[aacids].transpose()
# data_shuff_T_AA = data_shuff_T[:20]
cors = data_shuff_T.convert_objects(convert_numeric=True).corrwith(exp_A)
##################
# get p value ...
false_cases_num = sum(1 for cw in cors.values[1:] if cw>cors.values[0])
pvalue = false_cases_num*1.0/len(cors.values)
#
##################
# print wcf
if (cors.shape[0]>1)and(wcf==0.06):
print "plotting left plot: composition"
ax_left.hist(cors.values, bins=bins,ec='none',normed=False,label='shuffled')
ax_left.set_xlim((rmin,rmax))
ax_left.yaxis.set_ticks_position('left')
ax_left.xaxis.set_ticks_position('bottom')
#
width = (rmax-rmin)/(num-1)
wt_bin_num = int((cors.values[0]-rmin)/width)
ax_left.bar([wt_bin_num*width+rmin,],[1.0,],width=width,color='red',edgecolor='None',label='predicted')
f_title = lambda pval: (r"p=%.3f"%pval) if (pval>=0.001) else r"p\textless0.001"
leg_left = ax_left.legend(loc='upper right',frameon=False,title=f_title(pvalue))
ax_left.set_xlabel(r"$R_A$, composition correlation")
# ax_left.text(0.1,0.87,r'$w=%.2f$'%wcf,transform=ax_left.transAxes)
#
# set the same y limits ...
counts_max_right = ax_right.get_ylim()[1]
counts_max_left = ax_left.get_ylim()[1]
ax_left.set_ylim( ( 0,max(counts_max_right,counts_max_left) ) )
ax_right.set_ylim( ( 0,max(counts_max_right,counts_max_left) ) )
# #
#
for legend_item in leg_left.get_patches():
legend_item.set_edgecolor('none')
#
#
ax_left.set_ylabel('histogram counts')
#
#
ax_right.yaxis.set_tick_params(labelleft='off')
ax_left.yaxis.set_tick_params(labelright='off')
#
#
#
# ax_left.text(0.05,0.87,'A',fontweight='bold',fontsize=18,transform=ax_left.transAxes)
# ax_right.text(0.05,0.87,'B',fontweight='bold',fontsize=18,transform=ax_right.transAxes)
#
#
#
ax_left.tick_params(axis='x',which='both',top='off',bottom='on',pad=3)
ax_left.tick_params(axis='y',which='both',left='on',right='off',pad=3)
#
ax_right.tick_params(axis='x',which='both',top='off',bottom='on',pad=3)
ax_right.tick_params(axis='y',which='both',left='on',right='off',pad=3)
#
# # plt.legend(loc='best')
# # plt.title("Hist_mod%d_wcf%.3f_T%.1f.new_shuff2.pdf"%(mod,wcf,3.00))
# fig.savefig(os.path.join(results_path,"T_Hist_%s_mod%d_wcf%.3f.new_shuff2.pdf"%(t_regime,mod,wcf)))
else:
pass
fig.savefig(os.path.join(results_path,'%s_MainFig6.png'%exp_fname),dpi=300)
##############################
ra_wcf_df = {}
for wcf,df in data_sorted_A.groupby('W_coeff'):
data_shuff_T = df[aacids].transpose()
cors = data_shuff_T.convert_objects(convert_numeric=True).corrwith(exp_A)
##################
cors.reset_index(drop=True,inplace=True)
ra_wcf_df[wcf] = cors
######################
ra_wcf_df = pd.DataFrame(ra_wcf_df)
ra_wcf_df = ra_wcf_df.transpose()
#######################
#######################
plt.clf()
x_fig_size = 7.3
v_coeff = 0.45
fig = plt.figure(figsize=(x_fig_size,v_coeff*x_fig_size))
# between axes ...
hor_space = 0.09
# axes info ...
left = 0.1
bottom = 0.12
width = 0.5*(0.98 - left - hor_space)
height = 0.98 - bottom
# bottom axes
ax_left = plt.axes([left, bottom, width, height])
left += (width + hor_space)
# top axes
ax_right = plt.axes([left, bottom, width, height])
#######################
#######################
########
####
col_ff_log = lambda x,xmin,xmax: pd.np.log(x/xmin)/pd.np.log(xmax/xmin)
col_ff_lin = lambda x,xmin,xmax: (x-xmin)/(xmax-xmin)+0.1
# col_ff_sq = lambda x,xmin,xmax: (x**2-xmin**2)/(xmax**2-xmin**2)
col_ff_sq = lambda x,xmin,xmax: pd.np.sqrt(x-xmin)/pd.np.sqrt(xmax-xmin)
col_ff_pow = lambda x,xmin,xmax: pd.np.power((x-xmin),0.35)/pd.np.power((xmax-xmin),0.3)
col_norm = mpl.colors.Normalize(vmin=-0.03,vmax=0.12)
##
# left panel,
ramin,ramax = (lambda x: (x.max().min(),x.max().max()))(ra_wcf_df)
shuffs = range(100)
rnd.shuffle(shuffs)
for shuff in shuffs:
ra_max = ra_wcf_df[shuff].max()
wcf_max = ra_wcf_df[shuff].argmax()
ax_left.plot(ra_wcf_df.index,ra_wcf_df[shuff],'o-',color=plt.cm.hot_r(col_norm(wcf_max)),mew=0)
#
#
ax_left.set_ylim((-1,1))
ax_left.set_xlim((-0.005,0.125))
ax_left.set_ylabel(r'$R_A$, correlation coefficient')
ax_left.set_xlabel(r'$w$, cost adjustment parameter')
#
#
#
# right panel,
# w_range = (lambda x: (x.min(),x.max()) )(ra_wcf_df.index)
# bins = pd.np.linspace(w_range[0],w_range[1],ra_wcf_df.index.size)
bins = pd.np.arange(0,0.14,0.02)-0.01
#
ax_right.hist(ra_wcf_df.apply(lambda x: x.argmax()),bins=bins,edgecolor='deepskyblue',color='dodgerblue')
ax_right.set_xlim((bins[0],bins[-1]))
ax_right.set_ylabel('histogram counts')
ax_right.set_xlabel(r'$w^*$, optimal parameter')
ax_right.set_ylim((0,43))
ax_right.yaxis.set_major_locator( MaxNLocator(nbins = 5) )
cax = fig.add_axes([0.11,0.26,0.3,0.04])
cmap = mpl.cm.hot_r
# norm = mpl.colors.Normalize(vmin=5, vmax=10)
cbar = mpl.colorbar.ColorbarBase(cax, cmap=mpl.cm.hot_r, norm=col_norm, boundaries = [0,0.02,0.04,0.06,0.08,0.1,0.12,0.14],orientation='horizontal')
cbar.set_ticks(pd.np.asarray([0.0,0.02,0.04,0.06,0.08,0.1,0.12])+0.01)
cbar.set_ticklabels(['0.0','0.02','0.04','0.06','0.08','0.1','0.12'])
cbar.set_label(r'$w^*$, optimal parameter',labelpad=1)
# cbar.set_clim(vmin=0.02,vmax=0.12)
# ax_right.yaxis.set_tick_params(labelleft='off')
# ax_left.yaxis.set_tick_params(labelright='off')
# cax.xaxis.set_tick_params
cax.tick_params(axis='x',which='both',top='on',bottom='on',length=2.5,labelsize=7.5)
ax_left.tick_params(axis='x',which='both',top='off',bottom='on')
ax_left.tick_params(axis='y',which='both',right='off',left='on')
#
ax_right.tick_params(axis='x',which='both',top='off',bottom='on')
ax_right.tick_params(axis='y',which='both',right='off',left='on')
# ax_left.text(0.031,0.925,'A',transform=ax_left.transAxes,fontsize=14,fontweight='bold')
# ax_right.text(0.031,0.925,'B',transform=ax_right.transAxes,fontsize=14,fontweight='bold')
fig.savefig(os.path.join(results_path,'%s_SuppFig3.png'%exp_fname),dpi=300)
# ######################################################################################
# ######################################################################################
# ######################################################################################
# ######################################################################################
# # SHUFFLED RESULTS FOR SUPPLEMENT ...
# ######################################################################################
# ######################################################################################
# ######################################################################################
# ######################################################################################
# plt.clf()
# x_fig_size = 7.3
# v_coeff = 0.45
# fig = plt.figure(figsize=(x_fig_size,v_coeff*x_fig_size))
# # between axes ...
# hor_space = 0.07
# # axes info ...
# left = 0.06
# bottom = 0.12
# width = 0.5*(0.9 - left - hor_space)
# height = 0.98 - bottom
# # bottom axes
# ax_left = plt.axes([left, bottom, width, height])
# left += (width + hor_space)
# # top axes
# ax_right = plt.axes([left, bottom, width, height])
# #
# #
# #
# ###############################################################
# ###############################################################
# ###############################################################
# ###############################################################
# # shuff histo ...
# t_regime = 'A'
# mod = 64
# #
# #
# #
# #
# # wcf = 0.02
# #
# #
# #
# #
# for wcf,df in data_sorted_A.groupby('W_coeff'):
# # shuff_at_wcf = data_sorted[data_sorted.W_coeff==wcf]
# data_shuff_T = df[aacids].transpose()
# # data_shuff_T_AA = data_shuff_T[:20]
# cors = data_shuff_T.convert_objects(convert_numeric=True).corrwith(exp_T)
# ##################
# ##################
# # print wcf
# #
# #
# #
# #
# #
# #
# if (cors.shape[0]>1)and(wcf==0.06):
# print "plotting left plot: composition"
# ax_left.hist(cors.values, bins=bins,ec='none',normed=False,label='shuffled')
# ax_left.set_xlim((rmin,rmax))
# ax_left.yaxis.set_ticks_position('left')
# ax_left.xaxis.set_ticks_position('bottom')
# #
# width = (rmax-rmin)/(num-1)
# wt_bin_num = int((cors.values[0]-rmin)/width)
# ax_left.bar([wt_bin_num*width+rmin,],[1.0,],width=width,color='red',edgecolor='None',label='predicted')
# leg_left = ax_left.legend(loc='best',frameon=False)
# ax_left.set_xlabel("$R_{A}$, composition correlation")
# ax_left.text(0.13,0.87,'($w$=%.2f)'%wcf,transform=ax_left.transAxes)
# #
# # set the same y limits ...
# counts_max_right = ax_right.get_ylim()[1]
# counts_max_left = ax_left.get_ylim()[1]
# ax_left.set_ylim( ( 0,max(counts_max_right,counts_max_left) ) )
# ax_right.set_ylim( ( 0,max(counts_max_right,counts_max_left) ) )
# # #
# #
# for legend_item in leg_left.get_patches():
# legend_item.set_edgecolor('none')
# #
# #
# ax_left.set_ylabel('histogram counts')
# #
# #
# ax_right.yaxis.set_tick_params(labelleft='off')
# ax_left.yaxis.set_tick_params(labelright='off')
# #
# #
# #
# ax_left.text(0.05,0.87,'A',fontweight='bold',fontsize=18,transform=ax_left.transAxes)
# ax_right.text(0.05,0.87,'B',fontweight='bold',fontsize=18,transform=ax_right.transAxes)
# #
# #
# #
# ax_left.tick_params(axis='x',which='both',top='off',bottom='on',pad=3)
# ax_left.tick_params(axis='y',which='both',left='on',right='off',pad=3)
# #
# ax_right.tick_params(axis='x',which='both',top='off',bottom='on',pad=3)
# ax_right.tick_params(axis='y',which='both',left='on',right='off',pad=3)
# #
# # # plt.legend(loc='best')
# # # plt.title("Hist_mod%d_wcf%.3f_T%.1f.new_shuff2.pdf"%(mod,wcf,3.00))
# # fig.savefig(os.path.join(results_path,"T_Hist_%s_mod%d_wcf%.3f.new_shuff2.pdf"%(t_regime,mod,wcf)))
# else:
# pass
# fig.savefig(os.path.join(results_path,'validation.pdf'))
| |
import requests
import re
import urlparse
import traceback
import feedparser
import time
import urllib2
import httplib
from socket import error as SocketError
from boto.s3.key import Key
from django.conf import settings
from django.utils.text import compress_string
from utils import log as logging
from apps.rss_feeds.models import MFeedPage
from utils.feed_functions import timelimit
from OpenSSL.SSL import Error as OpenSSLError
from pyasn1.error import PyAsn1Error
# from utils.feed_functions import mail_feed_error_to_admin
BROKEN_PAGES = [
'tag:',
'info:',
'uuid:',
'urn:',
'[]',
]
# Also change in reader_utils.js.
BROKEN_PAGE_URLS = [
'nytimes.com',
'github.com',
'washingtonpost.com',
'stackoverflow.com',
'stackexchange.com',
'twitter.com',
'rankexploits',
]
class PageImporter(object):
def __init__(self, feed):
self.feed = feed
@property
def headers(self):
return {
'User-Agent': 'NewsBlur Page Fetcher - %s subscriber%s - %s '
'(Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_1) '
'AppleWebKit/534.48.3 (KHTML, like Gecko) Version/5.1 '
'Safari/534.48.3)' % (
self.feed.num_subscribers,
's' if self.feed.num_subscribers != 1 else '',
self.feed.permalink,
),
}
@timelimit(15)
def fetch_page(self, urllib_fallback=False, requests_exception=None):
html = None
feed_link = self.feed.feed_link
if not feed_link:
self.save_no_page()
return
if feed_link.startswith('www'):
self.feed.feed_link = 'http://' + feed_link
try:
if any(feed_link.startswith(s) for s in BROKEN_PAGES):
self.save_no_page()
return
elif any(s in feed_link.lower() for s in BROKEN_PAGE_URLS):
self.save_no_page()
return
elif feed_link.startswith('http'):
if urllib_fallback:
request = urllib2.Request(feed_link, headers=self.headers)
response = urllib2.urlopen(request)
time.sleep(0.01) # Grrr, GIL.
data = response.read()
else:
try:
response = requests.get(feed_link, headers=self.headers)
response.connection.close()
except requests.exceptions.TooManyRedirects:
response = requests.get(feed_link)
except (AttributeError, SocketError, OpenSSLError, PyAsn1Error), e:
logging.debug(' ***> [%-30s] Page fetch failed using requests: %s' % (self.feed, e))
self.save_no_page()
return
try:
data = response.text
except (LookupError, TypeError):
data = response.content
if response.encoding and response.encoding != 'utf-8':
try:
data = data.encode(response.encoding)
except LookupError:
pass
else:
try:
data = open(feed_link, 'r').read()
except IOError:
self.feed.feed_link = 'http://' + feed_link
self.fetch_page(urllib_fallback=True)
return
if data:
html = self.rewrite_page(data)
self.save_page(html)
else:
self.save_no_page()
return
except (ValueError, urllib2.URLError, httplib.BadStatusLine, httplib.InvalidURL,
requests.exceptions.ConnectionError), e:
self.feed.save_page_history(401, "Bad URL", e)
fp = feedparser.parse(self.feed.feed_address)
feed_link = fp.feed.get('link', "")
self.feed.save()
except (urllib2.HTTPError), e:
self.feed.save_page_history(e.code, e.msg, e.fp.read())
except (httplib.IncompleteRead), e:
self.feed.save_page_history(500, "IncompleteRead", e)
except (requests.exceptions.RequestException,
requests.packages.urllib3.exceptions.HTTPError), e:
logging.debug(' ***> [%-30s] Page fetch failed using requests: %s' % (self.feed, e))
# mail_feed_error_to_admin(self.feed, e, local_vars=locals())
return self.fetch_page(urllib_fallback=True, requests_exception=e)
except Exception, e:
logging.debug('[%d] ! -------------------------' % (self.feed.id,))
tb = traceback.format_exc()
logging.debug(tb)
logging.debug('[%d] ! -------------------------' % (self.feed.id,))
self.feed.save_page_history(500, "Error", tb)
# mail_feed_error_to_admin(self.feed, e, local_vars=locals())
if (not settings.DEBUG and hasattr(settings, 'RAVEN_CLIENT') and
settings.RAVEN_CLIENT):
settings.RAVEN_CLIENT.captureException()
if not urllib_fallback:
self.fetch_page(urllib_fallback=True)
else:
self.feed.save_page_history(200, "OK")
return html
def save_no_page(self):
logging.debug(' ---> [%-30s] ~FYNo original page: %s' % (self.feed, self.feed.feed_link))
self.feed.has_page = False
self.feed.save()
self.feed.save_page_history(404, "Feed has no original page.")
def rewrite_page(self, response):
BASE_RE = re.compile(r'<head(.*?\>)', re.I)
base_code = u'<base href="%s" />' % (self.feed.feed_link,)
try:
html = BASE_RE.sub(r'<head\1 '+base_code, response)
except:
response = response.decode('latin1').encode('utf-8')
html = BASE_RE.sub(r'<head\1 '+base_code, response)
if '<base href' not in html:
html = "%s %s" % (base_code, html)
# html = self.fix_urls(html)
return html.strip()
def fix_urls(self, document):
# BEWARE: This will rewrite URLs inside of <script> tags. You know, like
# Google Analytics. Ugh.
FIND_RE = re.compile(r'\b(href|src)\s*=\s*("[^"]*"|\'[^\']*\'|[^"\'<>=\s]+)')
ret = []
last_end = 0
for match in FIND_RE.finditer(document):
url = match.group(2)
if url[0] in "\"'":
url = url.strip(url[0])
parsed = urlparse.urlparse(url)
if parsed.scheme == parsed.netloc == '': #relative to domain
url = urlparse.urljoin(self.feed.feed_link, url)
ret.append(document[last_end:match.start(2)])
ret.append('"%s"' % (url,))
last_end = match.end(2)
ret.append(document[last_end:])
return ''.join(ret)
def save_page(self, html):
saved = False
if not html or len(html) < 100:
return
if settings.BACKED_BY_AWS.get('pages_on_node'):
saved = self.save_page_node(html)
if saved and self.feed.s3_page and settings.BACKED_BY_AWS.get('pages_on_s3'):
self.delete_page_s3()
if settings.BACKED_BY_AWS.get('pages_on_s3') and not saved:
saved = self.save_page_s3(html)
if not saved:
try:
feed_page = MFeedPage.objects.get(feed_id=self.feed.pk)
feed_page.page_data = html
feed_page.save()
except MFeedPage.DoesNotExist:
feed_page = MFeedPage.objects.create(feed_id=self.feed.pk, page_data=html)
return feed_page
def save_page_node(self, html):
url = "http://%s/original_page/%s" % (
settings.ORIGINAL_PAGE_SERVER,
self.feed.pk,
)
response = requests.post(url, files={
'original_page': compress_string(html),
})
if response.status_code == 200:
return True
def save_page_s3(self, html):
k = Key(settings.S3_PAGES_BUCKET)
k.key = self.feed.s3_pages_key
k.set_metadata('Content-Encoding', 'gzip')
k.set_metadata('Content-Type', 'text/html')
k.set_metadata('Access-Control-Allow-Origin', '*')
k.set_contents_from_string(compress_string(html))
k.set_acl('public-read')
try:
feed_page = MFeedPage.objects.get(feed_id=self.feed.pk)
feed_page.delete()
logging.debug(' ---> [%-30s] ~FYTransfering page data to S3...' % (self.feed))
except MFeedPage.DoesNotExist:
pass
if not self.feed.s3_page:
self.feed.s3_page = True
self.feed.save()
return True
def delete_page_s3(self):
k = Key(settings.S3_PAGES_BUCKET)
k.key = self.feed.s3_pages_key
k.delete()
self.feed.s3_page = False
self.feed.save()
| |
# (c) 2012 Massachusetts Institute of Technology. All Rights Reserved
# Code written by: Anton Goloborodko (golobor@mit.edu)
'''
This module contains the functions that map raw DNA sequences obtained in
the Hi-C experiment to a supplied genome.
The three main methods of this module are iterative_mapping, parse_sam and
fill_rsite.
The first, iterative_mapping() applies the bowtie2 read alignment software to
the raw reads from the sequencer. The second method, parse_sam() parses
the bowtie output, combines individual reads into pairs and converts the data
into the internal format that may be fed to the downstream functions. Finally,
fill_rsite() maps the sequences onto the restriction fragments.
-------------------------------------------------------------------------------
API Documentation
-----------------
'''
import os
import re
import glob
import subprocess
import tempfile
import logging
import warnings
import numpy as np
import Bio.Seq
import Bio.Restriction
import pysam
import time
import gc
import mirnylib.h5dict
import mirnylib.genome
from mirnylib.systemutils import commandExists, gzipWriter
# #TODO: write some autodetection of chromosome lengthes base on genome folder
# #TODO: throw an exception if no chromosomes found in chromosome folder
# #TODO: fix #-to-ID correspondence for other species.
log = logging.getLogger(__name__)
log.setLevel(logging.DEBUG)
MIN_MAPQ = 31
def readIsUnmapped(read):
if (read.mapq < MIN_MAPQ):
return True
# Skip non-uniquely aligned.
for tag in read.tags:
if tag[0] == 'XS':
return True
return False
print "hello from new mapping"
def sleep():
"""sleep for a second, run garbage collector, sleep again.
Sleep is split in small pieces to allow some callbacks to
possibly terminate in between (I don't know if it makes sense, but
it definitely does not hurt)"""
for _ in range(3):
time.sleep(0.1)
gc.collect()
for _ in range(3):
time.sleep(0.1)
def splitSRA(filename, outFile="auto", splitBy=4000000, FASTQ_BINARY="./fastq-dump", FASTQ_ARGS=[]):
inFile = os.path.abspath(filename)
if outFile == "auto":
outFile = filename.replace(".sra", "") + "_{0}_side{1}.fastq.gz"
pread = subprocess.Popen([FASTQ_BINARY, inFile, "-Z", "--split-files"] + FASTQ_ARGS ,
stdout=subprocess.PIPE, bufsize=-1)
inStream = pread.stdout
halted = False
for counter in xrange(1000000):
outProc1 = gzipWriter(outFile.format(counter, 1))
outProc2 = gzipWriter(outFile.format(counter, 2))
outStream1 = outProc1.stdin
outStream2 = outProc2.stdin
for _ in xrange(splitBy):
line = inStream.readline()
try:
assert line[0] == "@"
except AssertionError:
print 'Not fastq'
raise IOError("File is not fastq: {0}".format(filename))
except IndexError:
halted = True
break
fastq_entry = (line, inStream.readline(),
inStream.readline(), inStream.readline())
outStream1.writelines(fastq_entry)
outStream2.writelines((inStream.readline(), inStream.readline(),
inStream.readline(), inStream.readline()))
outProc1.communicate()
outProc2.communicate()
print "finished block number", counter
if halted:
return
def _detect_quality_coding_scheme(in_fastq, num_entries=10000):
in_file = open(in_fastq)
max_ord = 0
min_ord = 256
i = 0
while True:
line = in_file.readline()
if not line or i > num_entries:
break
if not line.startswith('@'):
raise Exception('%s does not comply with the FASTQ standards.')
fastq_entry = [line, in_file.readline(),
in_file.readline(), in_file.readline()]
min_ord = min(min_ord, min(ord(j) for j in fastq_entry[3].strip()))
max_ord = max(max_ord, max(ord(j) for j in fastq_entry[3].strip()))
i += 1
return min_ord, max_ord
def _line_count(path):
'''Count the number of lines in a file. The function was posted by
Mikola Kharechko on Stackoverflow.
'''
f = open(path)
lines = 0
buf_size = 1024 * 1024
read_f = f.read # loop optimization
buf = read_f(buf_size)
while buf:
lines += buf.count('\n')
buf = read_f(buf_size)
return lines
def _filter_fastq(ids, inStream, out_fastq, in_filename="none"):
'''Filter FASTQ sequences by their IDs.
Read entries from **in_fastq** and store in **out_fastq** only those
the whose ID are in **ids**.
'''
writingProcess = gzipWriter(out_fastq)
num_filtered = 0
num_total = 0
while True:
line = inStream.readline()
try:
assert line[0] == '@'
except AssertionError:
print 'Not fastq'
except IndexError:
break
# raise Exception('{0} does not comply with the FASTQ standards.'.format(in_filename))
fastq_entry = (line, inStream.readline(),
inStream.readline(), inStream.readline())
read_id = line.split()[0][1:]
if read_id in ids:
writingProcess.stdin.writelines(fastq_entry)
num_filtered += 1
num_total += 1
sleep()
writingProcess.communicate()
if writingProcess.returncode != 0:
raise RuntimeError("Writing process return code {0}".format(writingProcess.returncode))
return num_total, num_filtered
def _filter_unmapped_fastq(in_stream, in_sam, nonunique_fastq, in_filename="none"):
'''Read raw sequences from **in_fastq** and alignments from
**in_sam** and save the non-uniquely aligned and unmapped sequences
to **unique_sam**.
'''
samfile = pysam.Samfile(in_sam) # @UndefinedVariable
nonunique_ids = set()
for read in samfile:
if readIsUnmapped(read):
nonunique_ids.add(read.qname)
num_total, num_filtered = _filter_fastq(
nonunique_ids, in_stream, nonunique_fastq, in_filename=in_filename)
sleep()
return num_total, num_filtered
def iterative_mapping(bowtie_path, bowtie_index_path, fastq_path, out_sam_path,
min_seq_len, len_step, **kwargs):
'''Map raw HiC reads iteratively with bowtie2.
http://bowtie-bio.sourceforge.net/bowtie2/manual.shtml
Iterative mapping accounts for the modification of fragments' sequences
due to ligation.
The algorithm of iterative correction:
1. Truncate the sequences to the first N = **min_seq_len** base pairs,
starting at the **seq_start** position.
2. Map the sequences using bowtie2.
3. Store the uniquely mapped sequences in a SAM file at **out_sam_path**.
4. Go to the step 1, increase the truncation length N by **len_step** base
pairs, and map the non-mapped and non-uniquely mapped sequences,
...
Stop when the 3' end of the truncated sequence reaches the **seq_end**
position.
Parameters
----------
bowtie_path : str
The path to the bowtie2 executable.
bowtie_index_path : str
The path to the bowtie2 genome index. Since the index consists of
several files with the different suffices (e.g., hg18.1.bt2,
hg18.2.bt.2), provide only the common part (hg18).
fastq_path : str
The path to the input FASTQ or gzipped FASTQ file.
out_sam_path : str
The path to the output SAM file. If ends with .bam, then the output
is converted to BAM.
min_seq_len : int
The truncation length at the first iteration of mapping.
len_step : int
The increase in truncation length at each iteration.
seq_start, seq_end : int, optional
Slice the FASTQ sequences at [seq_start:seq_end]. Default is [O:None].
nthreads : int, optional
The number of Bowtie2 threads. Default is 8
bowtie_flags : str, optional
Extra command-line flags for Bowtie2. Default is ''.
temp_dir : str, optional
The path to the temporary folder. If not specified, this path is
supplied by the OS.
bash_reader : str, optional
A bash application to convert the input to the FASTQ format. The
application have to save the output into stdout.
The default value is None, that is the app is autodetected by the
extension (i.e. cat for .fastq, gunzip for .gz).
drop_sequences : bool, optional
If True, than drop the columns with sequences and PHRED qualities
from bowtie2 .sam and .bam outputs. Use to save disk space.
True by default.
'''
bowtie_path = os.path.abspath(os.path.expanduser(bowtie_path))
if not os.path.isfile(bowtie_path):
raise Exception(
'The bowtie binary is not found '
'at the specified path: {0}.'.format(bowtie_path))
bowtie_index_path = os.path.abspath(os.path.expanduser(bowtie_index_path))
fastq_path = os.path.abspath(os.path.expanduser(fastq_path))
if not os.path.isfile(fastq_path):
raise Exception(
'The fastq file is not found '
'at the specified path: {0}.'.format(fastq_path))
already_mapped = kwargs.get('already_mapped', [])
out_sam_path = os.path.abspath(os.path.expanduser(out_sam_path))
seq_start = kwargs.get('seq_start', 0)
seq_end = kwargs.get('seq_end', None)
nthreads = kwargs.get('nthreads', 4)
bowtie_flags = kwargs.get('bowtie_flags', '')
if subprocess.call(['which', 'samtools']) != 0:
raise Exception('samtools are not installed!')
# Check for a typo from the publication.
assert re.search('--score-min(\s+)-L', bowtie_flags) is None, (
'The flag --score-min -L 0.6,0.2 in the original publication was a typo. '
'The correct notation is --score-min L ... . Please fix the supplied '
'flags.')
temp_dir = os.path.abspath(os.path.expanduser(
kwargs.get('temp_dir', tempfile.gettempdir())))
if not os.path.isdir(temp_dir):
os.makedirs(temp_dir)
bash_reader = kwargs.get('bash_reader', None)
if bash_reader is None:
extension = fastq_path.split('.')[-1].lower()
if extension == 'gz':
if commandExists("pigz"):
bash_reader = "pigz -dc"
else:
bash_reader = 'gunzip -c'
else:
bash_reader = 'cat'
else:
if not commandExists(bash_reader):
bash_reader = os.path.abspath(os.path.expanduser(bash_reader))
if not os.path.isfile(bash_reader.split()[0]):
raise Exception(
'The bash reader is not found '
'at the specified location {0}.'.format(bash_reader))
reading_command = bash_reader.split() + [fastq_path, ]
# If bash reader is not 'cat', convert file to FASTQ first and
# run iterative_mapping recursively on the converted file.
if kwargs.get('drop_sequences', True):
drop_seqs_command = ['awk',
"""{OFS="\\t"; if ($1 ~ !/^@/) { $10="A"; $11="g"; if ($3 ~ /\\*/) $6="*"; else $6="1M"; } print}"""]
else:
drop_seqs_command = []
output_is_bam = (out_sam_path.split('.')[-1].lower() == 'bam')
bamming_command = ['samtools', 'view', '-bS', '-'] if output_is_bam else []
# Split input files if required and apply iterative mapping to each
# segment separately.
# Convert input relative arguments to the absolute length scale.
reading_process = subprocess.Popen(reading_command,
stdout=subprocess.PIPE)
reading_process.stdout.readline()
raw_seq_len = len(reading_process.stdout.readline().strip())
log.info('The length of whole sequences in the file: %d', raw_seq_len)
reading_process.terminate()
sleep()
if kwargs.get('first_iteration', True):
has_old_files = False
for path in sorted(glob.glob(out_sam_path + '.*')):
try:
mapped_len = int(path[len(out_sam_path) + 1:])
if ((mapped_len - min_seq_len) % len_step != 0) and (mapped_len != raw_seq_len):
has_old_files = True
except:
pass
if has_old_files:
raise Exception(
'The output folder contains a SAM file mapped '
'to a different length range. '
'Most likely, this is an artifact of previous mappings.')
if (seq_start < 0
or seq_start > raw_seq_len
or (seq_end and seq_end > raw_seq_len)):
raise Exception('An incorrect trimming region is supplied: [%d, %d), '
'the raw sequence length is %d' % (
seq_start, seq_end, raw_seq_len))
local_seq_end = min(raw_seq_len, seq_end) if seq_end else raw_seq_len
if min_seq_len <= local_seq_end - seq_start:
trim_5 = seq_start
trim_3 = raw_seq_len - seq_start - min_seq_len
local_out_sam = out_sam_path + '.' + str(min_seq_len)
mapping_command = [
bowtie_path, '-x', bowtie_index_path, '-q', '-',
'-5', str(trim_5), '-3', str(trim_3), '-p', str(nthreads)
] + bowtie_flags.split()
pipeline = []
try:
log.info('Reading command: %s', ' '.join(reading_command))
pipeline.append(
subprocess.Popen(reading_command, stdout=subprocess.PIPE, bufsize=-1))
log.info('Mapping command: %s', ' '.join(mapping_command))
pipeline.append(
subprocess.Popen(mapping_command,
stdin=pipeline[-1].stdout,
stdout=subprocess.PIPE if (bamming_command or drop_seqs_command) else open(local_out_sam, 'w'),
bufsize=-1))
if drop_seqs_command:
log.info('Output editing command: %s', ' '.join(drop_seqs_command))
pipeline.append(
subprocess.Popen(drop_seqs_command,
stdin=pipeline[-1].stdout,
stdout=subprocess.PIPE if bamming_command else open(local_out_sam, 'w'),
bufsize=-1))
if bamming_command:
log.info('Output formatting command: %s', ' '.join(bamming_command))
pipeline.append(
subprocess.Popen(bamming_command,
stdin=pipeline[-1].stdout,
stdout=open(local_out_sam, 'w'),
bufsize=-1))
pipeline[-1].wait()
finally:
sleep()
for process in pipeline:
if process.poll() is None:
process.terminate()
# Check if the next iteration is required.
if (len_step <= 0) or (min_seq_len + len_step > local_seq_end - seq_start):
if kwargs.get("first_iteration", True) == False:
print "Deleting previous file", fastq_path
os.remove(fastq_path)
return
# Recursively go to the next iteration.
log.info('Save the unique aligments and send the '
'non-unique ones to the next iteration')
reading_process = subprocess.Popen(reading_command,
stdout=subprocess.PIPE,
bufsize=-1)
unmapped_fastq_path = os.path.join(
temp_dir, os.path.split(fastq_path)[1] + '.%d' % min_seq_len + ".fastq.gz")
num_total, num_filtered = _filter_unmapped_fastq(
reading_process.stdout, local_out_sam, unmapped_fastq_path, in_filename=fastq_path)
reading_process.communicate()
sleep()
log.info(('{0} non-unique reads out of '
'{1} are sent the next iteration.').format(num_filtered, num_total))
if kwargs.get("first_iteration", True) == False:
print "Deleting previous file", fastq_path
os.remove(fastq_path)
kwargs['first_iteration'] = False
if commandExists("pigz"):
kwargs["bash_reader"] = "pigz -dc"
else:
kwargs["bash_reader"] = 'gunzip -c'
iterative_mapping(bowtie_path, bowtie_index_path, unmapped_fastq_path,
out_sam_path,
min_seq_len=min_seq_len + len_step,
len_step=len_step, **kwargs)
def _find_rfrags_inplace(lib, genome, min_frag_size, side):
'''Private: assign mapped reads to restriction fragments by
their 5' end position.
'''
assert isinstance(genome, mirnylib.genome.Genome) # make Pydev happy
side = str(side)
chrms = lib['chrms' + side]
# setting to zero chromosomes that are over the limit of the genome
removeMask = chrms >= genome.chrmCount
chrms[removeMask] = -1
lib['chrms' + side] = chrms
cuts = lib['cuts' + side]
cuts[removeMask] = -1
lib['cuts' + side] = cuts
rfragIdxs = np.zeros(len(chrms), dtype=np.int64)
uprsites = np.zeros(len(chrms), dtype=np.int64)
rsites = np.zeros(len(chrms), dtype=np.int64)
downrsites = np.zeros(len(chrms), dtype=np.int64)
# If the fragment was not mapped.
rfragIdxs[chrms == -1] = -1
rsites[chrms == -1] = -1
uprsites[chrms == -1] = -1
downrsites[chrms == -1] = -1
badCuts = np.nonzero(cuts >= genome.chrmLens[chrms])[0]
if len(badCuts) > 0:
maxDev = np.max(cuts[badCuts] - genome.chrmLens[chrms[badCuts]])
warnings.warn(
('\nDetermined many ({0}) reads that map after the end of chromosome!'
'\n Maximum deviation is {1} bp ').format(len(badCuts), maxDev))
if maxDev > 50:
raise StandardError("Deviation is too large. Probably, genome mismatch.")
cuts[badCuts] = np.array(genome.chrmLens[np.array(chrms[badCuts], dtype=int)] - 1, dtype=cuts.dtype)
if len(badCuts) > 10000:
raise StandardError("Determined too many (%s) reads that map after "
"the end of chromosome!" % len(badCuts))
strands = lib['strands' + side]
for chrm_idx in xrange(genome.chrmCount):
all_rsites = np.r_[0, genome.rsites[chrm_idx]]
idxs = (chrms == chrm_idx)
# Find the indexes of the restriction fragment...
rfragIdxs[idxs] = np.searchsorted(all_rsites, cuts[idxs]) - 1
uprsites[idxs] = all_rsites[rfragIdxs[idxs]]
downrsites[idxs] = all_rsites[rfragIdxs[idxs] + 1]
rsites[idxs] = np.where(
strands[idxs], downrsites[idxs], uprsites[idxs])
too_close = (np.abs(rsites[idxs] - cuts[idxs]) <= min_frag_size)
too_close_idxs = np.where(idxs)[0][too_close]
rfragIdxs[too_close_idxs] += strands[too_close_idxs] * 2 - 1
uprsites[too_close_idxs] = all_rsites[rfragIdxs[too_close_idxs]]
downrsites[too_close_idxs] = all_rsites[rfragIdxs[too_close_idxs] + 1]
rsites[too_close_idxs] = np.where(
strands[too_close_idxs],
downrsites[too_close_idxs],
uprsites[too_close_idxs])
lib['rfragIdxs' + side] = rfragIdxs
lib['uprsites' + side] = uprsites
lib['downrsites' + side] = downrsites
lib['rsites' + side] = rsites
def _parse_ss_sams(sam_basename, out_dict, genome_db,
max_seq_len=-1, reverse_complement=False, save_seqs=False, maxReads=None, IDLen=None):
"""Parse SAM files with single-sided reads.
"""
def _for_each_unique_read(sam_basename, genome_db, action):
sam_paths = glob.glob(sam_basename + '.*')
if not sam_paths:
raise Exception('No SAM/BAM files with \'%s\' basename are found.' % sam_basename)
for sam_path in sam_paths:
samfile = pysam.Samfile(sam_path) # @UndefinedVariable
# Make Bowtie's chromosome tids -> genome_db indices dictionary.
tid2idx = {}
for i in xrange(len(samfile.lengths)):
chrm_rname = samfile.getrname(i)
chrm_label = genome_db._extractChrmLabel(chrm_rname)
if chrm_label in genome_db.label2idx:
tid2idx[i] = genome_db.label2idx[chrm_label]
for read in samfile:
if readIsUnmapped(read):
continue
# Convert Bowtie's chromosome tids to genome_db indices.
# Skip chromosomes that are not in the genome.
if read.tid in tid2idx:
read.tid = tid2idx[read.tid]
action(read)
# Calculate reads statistics if we don't know anything about mapping parameters.
if (maxReads is None) or (IDLen is None):
def _count_stats(read):
# In Python, function is an object and can have an attribute.
# We are using the .cache attribute to store the stats.
_count_stats.id_len = max(_count_stats.id_len,
len(read.qname))
_count_stats.seq_len = max(_count_stats.seq_len,
len(read.seq))
_count_stats.num_reads += 1
_count_stats.id_len = 0
_count_stats.seq_len = 0
_count_stats.num_reads = 0
_for_each_unique_read(sam_basename, genome_db, _count_stats)
sam_stats = {'id_len': _count_stats.id_len,
'seq_len': _count_stats.seq_len,
'num_reads': _count_stats.num_reads}
log.info(
'Parsing SAM files with basename {0}, # of reads: {1}'.format(
sam_basename, sam_stats['num_reads']))
if max_seq_len > 0:
sam_stats['seq_len'] = min(max_seq_len, sam_stats['seq_len'])
if sam_stats['num_reads'] == 0:
out_dict.update(
{'chrms': [], 'strands': [], 'cuts': [], 'seqs': [], 'ids': []})
return out_dict
else:
print "not counting stats"
# Read and save each type of data separately.
def _write_to_array(read, array, value):
array[_write_to_array.i] = value
def inc(function):
function.i += 1
# ...chromosome ids
if maxReads is None:
numReads = sam_stats['num_reads']
else:
numReads = maxReads
chrmBuf = np.zeros((numReads,), dtype=np.int8)
strandBuf = np.zeros((numReads,), dtype=np.bool)
cutBuf = np.zeros((numReads,), dtype=np.int64)
if (maxReads is None) or (IDLen is None):
idArrayLen = sam_stats['id_len']
else:
idArrayLen = IDLen
idBuf = np.zeros((numReads,), dtype='|S%d' % idArrayLen)
_write_to_array.i = 0
if save_seqs:
seqBuf = np.zeros(
(sam_stats['num_reads'],), dtype='|S%d' % sam_stats['seq_len'])
_for_each_unique_read(sam_basename, genome_db,
action=lambda read: (_write_to_array(read, chrmBuf, read.tid),
_write_to_array(read, strandBuf, not read.is_reverse),
_write_to_array(read, cutBuf, read.pos + (len(read.seq) if read.is_reverse else 0)),
_write_to_array(read, idBuf, read.qname[:-2] if read.qname.endswith('/1') or read.qname.endswith('/2') else read.qname),
_write_to_array(read, seqBuf, Bio.Seq.reverse_complement(read.seq) if read.is_reverse and reverse_complement else read.seq),
inc(_write_to_array)))
if (maxReads is not None) and (IDLen is not None):
totReads = _write_to_array.i
seqBuf = seqBuf[:totReads]
out_dict['seqs'] = seqBuf
else:
print "In a recent update by default we're not saving sequences!!!"
print "use parse_sams(save_seqs=True) to save sequences"
warnings.warn(RuntimeWarning("Since 14-01-20 we're not saving sequences by default"))
_for_each_unique_read(sam_basename, genome_db,
action=lambda read: (_write_to_array(read, chrmBuf, read.tid),
_write_to_array(read, strandBuf, not read.is_reverse),
_write_to_array(read, cutBuf, read.pos + (len(read.seq) if read.is_reverse else 0)),
_write_to_array(read, idBuf, read.qname[:-2] if read.qname.endswith('/1') or read.qname.endswith('/2') else read.qname),
inc(_write_to_array)))
if (maxReads is not None) and (IDLen is not None):
totReads = _write_to_array.i
chrmBuf = chrmBuf[:totReads]
strandBuf = strandBuf[:totReads]
cutBuf = cutBuf[:totReads]
idBuf = idBuf[:totReads]
out_dict['chrms'] = chrmBuf
out_dict["strands"] = strandBuf
out_dict["cuts"] = cutBuf
out_dict["ids"] = idBuf
return out_dict
def parse_sam(sam_basename1, sam_basename2, out_dict, genome_db, save_seqs=False, **kwargs):
'''Parse SAM/BAM files with HiC reads.
Parameters
----------
sam_basename1 : str
A basename of SAM files with the mapped sequences of the first
side of Hi-C molecules.
sam_basename2 : str
A basename of SAM files with the mapped sequences of the second
side of Hi-C molecules.
out_dict : dict-like
A dict-like structure to store the library of matched HiC reads.
genome_db : str or mirnylib.genome.genome
A path to a folder with FASTA files or a genome object. It is used
to convert Bowtie chromosome indices to internal indices.
max_seq_len : int, optional
The length the sequences are truncated to before saving
into the library. The default value is -1, i.e. the sequences are
not truncated.
reverse_complement : bool, optional
If True then the sequences of reads on the reversed strand will be
reverse complemented. False by default.
keep_ids : bool, optional
If True then the IDs of reads are stored. False by default.
enzyme_name : str, optional
If specified, assign the reads to the restriction fragments with
the fill_rsites() function.
The name of the restriction enzyme. The full list of possible names
can be found in Bio.Restriction.AllEnzymes. If 'auto' and genome_db
has an enzyme set then use this enzyme.
min_frag_size : int, optional
The minimal distance between a cut site and a restriction site.
Used only if enzyme_name is specified.
If the actual distance is less or equal than minimal then the ultra-sonic
fragment is assigned to the next restriction fragment in the direction
of the read. Default is None, which means it is set to a half
of the length of the restriction motif.
'''
max_seq_len = kwargs.get('max_seq_len', -1)
reverse_complement = kwargs.get('reverse_complement', False)
keep_ids = kwargs.get('keep_ids', False)
enzyme_name = kwargs.get('enzyme_name', None)
min_frag_size = kwargs.get('min_frag_size', None)
maxReads = kwargs.get("maxReads", None)
IDLen = kwargs.get("IDLen", 50)
if isinstance(genome_db, str):
genome_db = mirnylib.genome.Genome(genome_db)
assert isinstance(genome_db, mirnylib.genome.Genome)
# Parse the single-sided reads.
ss_lib = {}
ss_lib[1] = mirnylib.h5dict.h5dict()
ss_lib[2] = mirnylib.h5dict.h5dict()
log.info('Parse the first side of the reads from %s' % sam_basename1)
_parse_ss_sams(sam_basename1, ss_lib[1], genome_db,
1 if not max_seq_len else max_seq_len, reverse_complement, save_seqs=save_seqs,
maxReads=maxReads, IDLen=IDLen)
log.info('Parse the second side of the reads from %s' % sam_basename2)
_parse_ss_sams(sam_basename2, ss_lib[2], genome_db,
1 if not max_seq_len else max_seq_len, reverse_complement, save_seqs=save_seqs,
maxReads=maxReads, IDLen=IDLen)
# Determine the number of double-sided reads.
all_ids = np.unique(np.concatenate((ss_lib[1]['ids'], ss_lib[2]['ids'])))
tot_num_reads = all_ids.shape[0]
if tot_num_reads == 0:
log.warning(
'The SAM files %s and %s do not contain unique double sided reads' %
(sam_basename1, sam_basename2))
# Pair single-sided reads and write into the output.
for i in [1, 2]:
sorting = np.searchsorted(all_ids, ss_lib[i]['ids'])
for key in ss_lib[i].keys():
# Create empty arrays if input is empty.
if tot_num_reads == 0:
out_dict[key + str(i)] = []
continue
# Don't save ids and seqs if not requested.
if key == 'ids' and not keep_ids:
continue
if key == 'seq' and not max_seq_len:
continue
# The default value is -1 for an undefined cut site and chromosome
# and 0 for other data.
if key == 'cuts' or key == 'chrms':
buf = -1 * np.ones(shape=tot_num_reads,
dtype=ss_lib[i].value_dtype(key))
else:
buf = np.zeros(shape=tot_num_reads,
dtype=ss_lib[i].value_dtype(key))
buf[sorting] = ss_lib[i][key]
out_dict[key + str(i)] = buf
del buf
misc_dict = {}
misc_dict['genome'] = {}
misc_dict['genome']['idx2label'] = dict(genome_db.idx2label)
misc_dict['genome']['label2idx'] = dict(genome_db.label2idx)
out_dict['misc'] = misc_dict
if not (enzyme_name is None):
fill_rsites(out_dict, genome_db, enzyme_name, min_frag_size)
return out_dict
def fill_rsites(lib, genome_db, enzyme_name='auto', min_frag_size=None):
'''Assign the mapped reads to the restriction fragments.
Parameters
----------
lib : dict
A library of mapped Hi-C molecules. Gets modified by the function.
genome_db : str or mirnylib.genome.genome
A path to the folder with genome sequences in FASTA format or
a mirnylib.genome.genome object.
enzyme_name : str
A name of the restriction enzyme. The full list of possible names
can be found in Bio.Restriction.AllEnzymes.
min_frag_size : int
The minimal distance between a cut site and a restriction site.
If the actual distance is less than minimal then the ultra-sonic
fragment is assigned to the next restriction fragment in the direction
of the read.
'''
if isinstance(genome_db, str):
genome_db = mirnylib.genome.Genome(genome_db)
assert isinstance(genome_db, mirnylib.genome.Genome)
if len(lib['chrms1']) == 0:
return lib
if enzyme_name == 'auto':
if not genome_db.hasEnzyme():
raise Exception('Set a restriction enzyme in the genome object or '
'supply its name')
else:
if enzyme_name not in Bio.Restriction.AllEnzymes:
raise Exception('Enzyme is not found in the library: %s' %
(enzyme_name,))
genome_db.setEnzyme(enzyme_name)
rsite_size = eval('len(Bio.Restriction.%s.site)' % genome_db.enzymeName)
if min_frag_size is None:
_min_frag_size = rsite_size / 2.0
else:
_min_frag_size = min_frag_size
_find_rfrags_inplace(lib, genome_db, _min_frag_size, 1)
_find_rfrags_inplace(lib, genome_db, _min_frag_size, 2)
return lib
| |
# SPDX-License-Identifier: Apache-2.0
#
# Copyright (C) 2015, ARM Limited and contributors.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Frequency Analysis Module """
import itertools
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
from matplotlib.ticker import FuncFormatter
import pandas as pd
import numpy as np
from lisa.analysis.base import TraceAnalysisBase
from lisa.utils import memoized
from lisa.trace import requires_events, requires_one_event_of, CPU, MissingTraceEventError
from lisa.datautils import series_integrate, df_refit_index, series_refit_index, series_deduplicate, df_add_delta, series_mean, df_window
class FrequencyAnalysis(TraceAnalysisBase):
"""
Support for plotting Frequency Analysis data
:param trace: input Trace object
:type trace: :class:`trace.Trace`
"""
name = 'frequency'
@requires_one_event_of('cpu_frequency', 'userspace@cpu_frequency_devlib')
def df_cpus_frequency(self, signals_init=True):
"""
Similar to ``trace.df_event('cpu_frequency')``, with
``userspace@cpu_frequency_devlib`` support.
:param signals_init: If ``True``, and initial value for signals will be
provided. This includes initial value taken outside window
boundaries and devlib-provided events.
The ``userspace@cpu_frequency_devlib`` user event is merged in the dataframe if
it provides earlier values for a CPU.
"""
def rename(df):
return df.rename(
{
'cpu_id': 'cpu',
'state': 'frequency',
},
axis=1,
)
def check_empty(df, excep):
if df.empty:
raise excep
else:
return df
try:
df = self.trace.df_event('cpu_frequency', signals_init=signals_init)
except MissingTraceEventError as e:
excep = e
df = pd.DataFrame(columns=['cpu', 'frequency'])
else:
excep = None
df = rename(df)
if not signals_init:
return check_empty(df, excep)
try:
devlib_df = self.trace.df_event('userspace@cpu_frequency_devlib')
except MissingTraceEventError as e:
return check_empty(df, e)
else:
devlib_df = rename(devlib_df)
def groupby_cpu(df):
return df.groupby('cpu', observed=True, sort=False)
# Get the initial values for each CPU
def init_freq(df, from_devlib):
df = groupby_cpu(df).head(1).copy()
df['from_devlib'] = from_devlib
return df
init_df = init_freq(df, False)
init_devlib_df = init_freq(devlib_df, True)
# Get the first frequency for each CPU as given by devlib and cpufreq.
init_df = pd.concat([init_df, init_devlib_df])
init_df.sort_index(inplace=True)
# Get the first value for each CPU
first_df = groupby_cpu(init_df).head(1)
# Only keep the ones coming from devlib, as the other ones are already
# in the cpufreq df
first_df = first_df[first_df['from_devlib'] == True]
del first_df['from_devlib']
df = pd.concat([first_df, df])
df.sort_index(inplace=True)
df.index.name = 'Time'
return check_empty(df, None)
@df_cpus_frequency.used_events
def df_cpu_frequency(self, cpu, **kwargs):
"""
Same as :meth:`df_cpus_frequency` but for a single CPU.
:param cpu: CPU ID to get the frequency of.
:type cpu: int
:Variable keyword arguments: Forwarded to :meth:`df_cpus_frequency`.
"""
df = self.df_cpus_frequency(**kwargs)
return df[df['cpu'] == cpu]
@df_cpus_frequency.used_events
def _check_freq_domain_coherency(self, cpus=None):
"""
Check that all CPUs of a given frequency domain have the same frequency
transitions.
:param cpus: CPUs to take into account. All other CPUs are ignored.
If `None`, all CPUs will be checked.
:type cpus: list(int) or None
"""
domains = self.trace.plat_info['freq-domains']
if cpus is None:
cpus = list(itertools.chain.from_iterable(domains))
if len(cpus) < 2:
return
df = self.df_cpus_frequency()
for domain in domains:
# restrict the domain to what we care. Other CPUs may have garbage
# data, but the caller is not going to look at it anyway.
domain = set(domain) & set(cpus)
if len(domain) < 2:
continue
# Get the frequency column for each CPU in the domain
freq_columns = [
# drop the index since we only care about the transitions, and
# not when they happened
df[df['cpu'] == cpu]['frequency'].reset_index(drop=True)
for cpu in domain
]
# Check that all columns are equal. If they are not, that means that
# at least one CPU has a frequency transition that is different
# from another one in the same domain, which is highly suspicious
ref = freq_columns[0]
for col in freq_columns:
# If the trace started in the middle of a group of transitions,
# ignore that transition by shifting and re-test
if not (ref.equals(col) or ref[:-1].equals(col.shift()[1:])):
raise ValueError(f'Frequencies of CPUs in the freq domain {cpus} are not coherent')
@TraceAnalysisBase.cache
@df_cpus_frequency.used_events
@requires_events('cpu_idle')
def _get_frequency_residency(self, cpus):
"""
Get a DataFrame with per cluster frequency residency, i.e. amount of
time spent at a given frequency in each cluster.
:param cpus: A tuple of CPU IDs
:type cpus: tuple(int)
:returns: A :class:`pandas.DataFrame` with:
* A ``total_time`` column (the total time spent at a frequency)
* A ``active_time`` column (the non-idle time spent at a frequency)
"""
freq_df = self.df_cpus_frequency()
# Assumption: all CPUs in a cluster run at the same frequency, i.e. the
# frequency is scaled per-cluster not per-CPU. Hence, we can limit the
# cluster frequencies data to a single CPU.
self._check_freq_domain_coherency(cpus)
cluster_freqs = freq_df[freq_df.cpu == cpus[0]]
# Compute TOTAL Time
cluster_freqs = df_add_delta(cluster_freqs, col="total_time", window=self.trace.window)
time_df = cluster_freqs[["total_time", "frequency"]].groupby('frequency', observed=True, sort=False).sum()
# Compute ACTIVE Time
cluster_active = self.trace.analysis.idle.signal_cluster_active(cpus)
# In order to compute the active time spent at each frequency we
# multiply 2 square waves:
# - cluster_active, a square wave of the form:
# cluster_active[t] == 1 if at least one CPU is reported to be
# non-idle by CPUFreq at time t
# cluster_active[t] == 0 otherwise
# - freq_active, square wave of the form:
# freq_active[t] == 1 if at time t the frequency is f
# freq_active[t] == 0 otherwise
cluster_freqs = cluster_freqs.join(
cluster_active.to_frame(name='active'), how='outer')
cluster_freqs.fillna(method='ffill', inplace=True)
# Compute total time by integrating the square wave
time_df['active_time'] = pd.Series({
freq: series_integrate(
cluster_freqs['active'] * (cluster_freqs['frequency'] == freq)
)
for freq in cluster_freqs['frequency'].unique()
})
return time_df
@_get_frequency_residency.used_events
def df_cpu_frequency_residency(self, cpu):
"""
Get per-CPU frequency residency, i.e. amount of
time CPU `cpu` spent at each frequency.
:param cpu: CPU ID
:type cpu: int
:returns: A :class:`pandas.DataFrame` with:
* A ``total_time`` column (the total time spent at a frequency)
* A ``active_time`` column (the non-idle time spent at a frequency)
"""
if not isinstance(cpu, int):
raise TypeError('Input CPU parameter must be an integer')
return self._get_frequency_residency((cpu,))
@_get_frequency_residency.used_events
def df_domain_frequency_residency(self, cpu):
"""
Get per-frequency-domain frequency residency, i.e. amount of time each
domain at each frequency.
:param cpu: Any CPU of the domain to analyse
:type cpu: int
:returns: A :class:`pandas.DataFrame` with:
* A ``total_time`` column (the total time spent at a frequency)
* A ``active_time`` column (the non-idle time spent at a frequency)
"""
domains = [
domain
for domain in self.trace.plat_info['freq-domains']
if cpu in domain
]
if not domains:
raise ValueError(f'The given CPU "{cpu}" does not belong to any domain')
else:
domain, = domains
return self._get_frequency_residency(tuple(domain))
@TraceAnalysisBase.cache
@df_cpu_frequency.used_events
def df_cpu_frequency_transitions(self, cpu):
"""
Compute number of frequency transitions of a given CPU.
:param cpu: a CPU ID
:type cpu: int
:returns: A :class:`pandas.DataFrame` with:
* A ``transitions`` column (the number of frequency transitions)
"""
freq_df = self.df_cpu_frequency(cpu, signals_init=False)
# Since we want to count the number of events appearing inside the
# window, make sure we don't get anything outside it
freq_df = df_window(
freq_df,
window=self.trace.window,
method='exclusive',
clip_window=False,
)
cpu_freqs = freq_df['frequency']
# Remove possible duplicates (example: when devlib sets trace markers
# a cpu_frequency event is triggered that can generate a duplicate)
cpu_freqs = series_deduplicate(cpu_freqs, keep='first', consecutives=True)
transitions = cpu_freqs.value_counts()
transitions.name = "transitions"
transitions.sort_index(inplace=True)
return pd.DataFrame(transitions)
@TraceAnalysisBase.cache
@df_cpu_frequency_transitions.used_events
def df_cpu_frequency_transition_rate(self, cpu):
"""
Compute frequency transition rate of a given CPU.
:param cpu: a CPU ID
:type cpu: int
:returns: A :class:`pandas.DataFrame` with:
* A ``transitions`` column (the number of frequency transitions per second)
"""
transitions = self.df_cpu_frequency_transitions(cpu)['transitions']
return pd.DataFrame(dict(
transitions=transitions / self.trace.time_range,
))
@df_cpu_frequency.used_events
def get_average_cpu_frequency(self, cpu):
"""
Get the average frequency for a given CPU
:param cpu: The CPU to analyse
:type cpu: int
"""
df = self.df_cpu_frequency(cpu)
freq = series_refit_index(df['frequency'], window=self.trace.window)
return series_mean(freq)
@TraceAnalysisBase.cache
@requires_events('clock_set_rate', 'clock_enable', 'clock_disable')
def df_peripheral_clock_effective_rate(self, clk_name):
rate_df = self.trace.df_event('clock_set_rate')
enable_df = self.trace.df_event('clock_enable')
disable_df = self.trace.df_event('clock_disable')
freq = rate_df[rate_df.clk_name == clk_name]
enables = enable_df[enable_df.clk_name == clk_name]
disables = disable_df[disable_df.clk_name == clk_name]
freq = pd.concat([freq, enables, disables], sort=False).sort_index()
freq['start'] = freq.index
freq['len'] = (freq.start - freq.start.shift()).fillna(0).shift(-1)
# The last value will be NaN, fix to be appropriate length
freq.loc[freq.index[-1], 'len'] = self.trace.end - freq.index[-1]
freq.ffill(inplace=True)
freq['effective_rate'] = np.where(
freq['state'] == 0, 0,
np.where(freq['state'] == 1, freq['state'], float('nan'))
)
return freq
###############################################################################
# Plotting Methods
###############################################################################
@TraceAnalysisBase.plot_method(return_axis=True)
@df_peripheral_clock_effective_rate.used_events
def plot_peripheral_clock(self, clk, axis=None, **kwargs):
"""
Plot the frequency of a particular peripheral clock
:param clk: The clk name to chart
:type clk: str
"""
logger = self.get_logger()
window = self.trace.window
start, end = window
def plotter(axis, local_fig):
freq_axis, state_axis = axis
freq_axis.get_figure().suptitle('Peripheral frequency', y=.97, fontsize=16, horizontalalignment='center')
freq = self.df_peripheral_clock_effective_rate(clk)
freq = df_refit_index(freq, window=window)
# Plot frequency information (set rate)
freq_axis.set_title("Clock frequency for " + clk)
set_rate = freq['state'].dropna()
rate_axis_lib = 0
if len(set_rate) > 0:
rate_axis_lib = set_rate.max()
set_rate.plot(style=['b--'], ax=freq_axis, drawstyle='steps-post', alpha=0.4, label="clock_set_rate value")
freq_axis.hlines(set_rate.iloc[-1], set_rate.index[-1], end, linestyle='--', color='b', alpha=0.4)
else:
logger.warning('No clock_set_rate events to plot')
# Plot frequency information (effective rate)
eff_rate = freq['effective_rate'].dropna()
eff_rate = series_refit_index(eff_rate, window=window)
if len(eff_rate) > 0 and eff_rate.max() > 0:
rate_axis_lib = max(rate_axis_lib, eff_rate.max())
eff_rate.plot(style=['b-'], ax=freq_axis, drawstyle='steps-post', alpha=1.0, label="Effective rate (with on/off)")
freq_axis.hlines(eff_rate.iloc[-1], eff_rate.index[-1], end, linestyle='-', color='b', alpha=1.0)
else:
logger.warning('No effective frequency events to plot')
freq_axis.set_ylim(0, rate_axis_lib * 1.1)
freq_axis.set_xlabel('')
freq_axis.grid(True)
freq_axis.legend()
def mhz(x, pos):
return '{:1.2f} MHz'.format(x * 1e-6)
freq_axis.get_yaxis().set_major_formatter(FuncFormatter(mhz))
on = freq[freq.state == 1]
state_axis.hlines([0] * len(on),
on['start'], on['start'] + on['len'],
linewidth=10.0, label='clock on', color='green')
off = freq[freq.state == 0]
state_axis.hlines([0] * len(off),
off['start'], off['start'] + off['len'],
linewidth=10.0, label='clock off', color='red')
# Plot time period that the clock state was unknown from the trace
indeterminate = pd.concat([on, off]).sort_index()
if indeterminate.empty:
indet_range_max = end
else:
indet_range_max = indeterminate.index[0]
state_axis.hlines(0, 0, indet_range_max, linewidth=1.0, label='indeterminate clock state', linestyle='--')
state_axis.legend(bbox_to_anchor=(0., 1.02, 1., 0.102), loc=3, ncol=3, mode='expand')
state_axis.set_yticks([])
state_axis.set_xlabel('seconds')
state_axis.set_xlim(start, end)
return self.do_plot(plotter, height=8, nrows=2, axis=axis, **kwargs)
@TraceAnalysisBase.plot_method()
@df_cpu_frequency.used_events
def plot_cpu_frequencies(self, cpu: CPU, axis, local_fig, average: bool=True):
"""
Plot frequency for the specified CPU
:param cpu: The CPU for which to plot frequencies
:type cpus: int
:param average: If ``True``, add a horizontal line which is the
frequency average.
:type average: bool
If ``sched_overutilized`` events are available, the plots will also
show the intervals of time where the system was overutilized.
"""
logger = self.get_logger()
df = self.df_cpu_frequency(cpu)
if "freqs" in self.trace.plat_info:
frequencies = self.trace.plat_info['freqs'][cpu]
else:
logger.info(f"Estimating CPU{cpu} frequencies from trace")
frequencies = sorted(list(df.frequency.unique()))
logger.debug(f"Estimated frequencies: {frequencies}")
avg = self.get_average_cpu_frequency(cpu)
logger.info(
"Average frequency for CPU{} : {:.3f} GHz".format(cpu, avg / 1e6))
df = df_refit_index(df, window=self.trace.window)
df['frequency'].plot(ax=axis, drawstyle='steps-post')
if average and avg > 0:
axis.axhline(avg, color=self.get_next_color(axis), linestyle='--',
label="average")
plot_overutilized = self.trace.analysis.status.plot_overutilized
if self.trace.has_events(plot_overutilized.used_events):
plot_overutilized(axis=axis)
axis.set_ylabel('Frequency (Hz)')
axis.set_ylim(frequencies[0] * 0.9, frequencies[-1] * 1.1)
axis.legend()
if local_fig:
axis.set_xlabel('Time')
axis.set_title(f'Frequency of CPU{cpu}')
axis.grid(True)
@TraceAnalysisBase.plot_method(return_axis=True)
@plot_cpu_frequencies.used_events
def plot_domain_frequencies(self, axis=None, **kwargs):
"""
Plot frequency trend for all frequency domains.
If ``sched_overutilized`` events are available, the plots will also show
the intervals of time where the cluster was overutilized.
"""
domains = self.trace.plat_info['freq-domains']
def plotter(axes, local_fig):
for idx, domain in enumerate(domains):
axis = axes[idx] if len(domains) > 1 else axes
self.plot_cpu_frequencies(domain[0], axis=axis)
axis.set_title(f'Frequencies of CPUS {domain}')
return self.do_plot(plotter, nrows=len(domains), sharex=True, axis=axis, **kwargs)
@TraceAnalysisBase.plot_method(return_axis=True)
@df_cpu_frequency_residency.used_events
def plot_cpu_frequency_residency(self, cpu: CPU, pct: bool=False, axis=None, **kwargs):
"""
Plot per-CPU frequency residency.
:param cpu: The CPU to generate the plot for
:type cpu: int
:param pct: Plot residencies in percentage
:type pct: bool
"""
residency_df = self.df_cpu_frequency_residency(cpu)
total_df = residency_df.total_time
active_df = residency_df.active_time
if pct:
total_df = total_df * 100 / total_df.sum()
active_df = active_df * 100 / active_df.sum()
def plotter(axes, local_fig):
total_df.plot.barh(ax=axes[0])
axes[0].set_title(f"CPU{cpu} total frequency residency")
active_df.plot.barh(ax=axes[1])
axes[1].set_title(f"CPU{cpu} active frequency residency")
for axis in axes:
if pct:
axis.set_xlabel("Time share (%)")
else:
axis.set_xlabel("Time (s)")
axis.set_ylabel("Frequency (Hz)")
axis.grid(True)
return self.do_plot(plotter, nrows=2, axis=axis, **kwargs)
@TraceAnalysisBase.plot_method(return_axis=True)
@plot_cpu_frequency_residency.used_events
def plot_domain_frequency_residency(self, pct: bool=False, axis=None, **kwargs):
"""
Plot the frequency residency for all frequency domains.
:param pct: Plot residencies in percentage
:type pct: bool
"""
domains = self.trace.plat_info['freq-domains']
def plotter(axes, local_fig):
for idx, domain in enumerate(domains):
local_axes = axes[2 * idx: 2 * (idx + 1)]
self.plot_cpu_frequency_residency(domain[0],
pct=pct,
axis=local_axes,
)
for axis in local_axes:
title = axis.get_title()
axis.set_title(title.replace(f'CPU{domain[0]}', f"CPUs {domain}"))
return self.do_plot(plotter, nrows=2 * len(domains), sharex=True, axis=axis, **kwargs)
@TraceAnalysisBase.plot_method()
@df_cpu_frequency_transitions.used_events
def plot_cpu_frequency_transitions(self, cpu: CPU, axis, local_fig, pct: bool=False):
"""
Plot frequency transitions count of the specified CPU
:param cpu: The CPU to genererate the plot for
:type cpu: int
:param pct: Plot frequency transitions in percentage
:type pct: bool
"""
df = self.df_cpu_frequency_transitions(cpu)
if pct:
df = df * 100 / df.sum()
if not df.empty:
df["transitions"].plot.barh(ax=axis)
axis.set_title(f'Frequency transitions of CPU{cpu}')
if pct:
axis.set_xlabel("Transitions share (%)")
else:
axis.set_xlabel("Transition count")
axis.set_ylabel("Frequency (Hz)")
axis.grid(True)
@TraceAnalysisBase.plot_method(return_axis=True)
@plot_cpu_frequency_transitions.used_events
def plot_domain_frequency_transitions(self, pct: bool=False, axis=None, **kwargs):
"""
Plot frequency transitions count for all frequency domains
:param pct: Plot frequency transitions in percentage
:type pct: bool
"""
domains = self.trace.plat_info['freq-domains']
def plotter(axes, local_fig):
for domain, axis in zip(domains, axes):
self.plot_cpu_frequency_transitions(
cpu=domain[0],
pct=pct,
axis=axis,
)
title = axis.get_title()
axis.set_title(title.replace(f'CPU{domain[0]}', f"CPUs {domain}"))
return self.do_plot(plotter, nrows=len(domains), axis=axis, **kwargs)
# vim :set tabstop=4 shiftwidth=4 expandtab textwidth=80
| |
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
import os.path
import sys
import unittest
import SCons.Errors
import SCons.Variables
import TestCmd
import TestUnit
class PathVariableTestCase(unittest.TestCase):
def test_PathVariable(self):
"""Test PathVariable creation"""
opts = SCons.Variables.Variables()
opts.Add(SCons.Variables.PathVariable('test',
'test option help',
'/default/path'))
o = opts.options[0]
assert o.key == 'test', o.key
assert o.help == 'test option help ( /path/to/test )', repr(o.help)
assert o.default == '/default/path', o.default
assert o.validator is not None, o.validator
assert o.converter is None, o.converter
def test_PathExists(self):
"""Test the PathExists validator"""
opts = SCons.Variables.Variables()
opts.Add(SCons.Variables.PathVariable('test',
'test option help',
'/default/path',
SCons.Variables.PathVariable.PathExists))
test = TestCmd.TestCmd(workdir='')
test.write('exists', 'exists\n')
o = opts.options[0]
o.validator('X', test.workpath('exists'), {})
dne = test.workpath('does_not_exist')
try:
o.validator('X', dne, {})
except SCons.Errors.UserError, e:
assert str(e) == 'Path for option X does not exist: %s' % dne, e
except:
raise Exception("did not catch expected UserError")
def test_PathIsDir(self):
"""Test the PathIsDir validator"""
opts = SCons.Variables.Variables()
opts.Add(SCons.Variables.PathVariable('test',
'test option help',
'/default/path',
SCons.Variables.PathVariable.PathIsDir))
test = TestCmd.TestCmd(workdir='')
test.subdir('dir')
test.write('file', "file\n")
o = opts.options[0]
o.validator('X', test.workpath('dir'), {})
f = test.workpath('file')
try:
o.validator('X', f, {})
except SCons.Errors.UserError, e:
assert str(e) == 'Directory path for option X is a file: %s' % f, e
except:
raise Exception("did not catch expected UserError")
dne = test.workpath('does_not_exist')
try:
o.validator('X', dne, {})
except SCons.Errors.UserError, e:
assert str(e) == 'Directory path for option X does not exist: %s' % dne, e
except:
raise Exception("did not catch expected UserError")
def test_PathIsDirCreate(self):
"""Test the PathIsDirCreate validator"""
opts = SCons.Variables.Variables()
opts.Add(SCons.Variables.PathVariable('test',
'test option help',
'/default/path',
SCons.Variables.PathVariable.PathIsDirCreate))
test = TestCmd.TestCmd(workdir='')
test.write('file', "file\n")
o = opts.options[0]
d = test.workpath('dir')
o.validator('X', d, {})
assert os.path.isdir(d)
f = test.workpath('file')
try:
o.validator('X', f, {})
except SCons.Errors.UserError, e:
assert str(e) == 'Path for option X is a file, not a directory: %s' % f, e
except:
raise Exception("did not catch expected UserError")
def test_PathIsFile(self):
"""Test the PathIsFile validator"""
opts = SCons.Variables.Variables()
opts.Add(SCons.Variables.PathVariable('test',
'test option help',
'/default/path',
SCons.Variables.PathVariable.PathIsFile))
test = TestCmd.TestCmd(workdir='')
test.subdir('dir')
test.write('file', "file\n")
o = opts.options[0]
o.validator('X', test.workpath('file'), {})
d = test.workpath('d')
try:
o.validator('X', d, {})
except SCons.Errors.UserError, e:
assert str(e) == 'File path for option X does not exist: %s' % d, e
except:
raise Exception("did not catch expected UserError")
dne = test.workpath('does_not_exist')
try:
o.validator('X', dne, {})
except SCons.Errors.UserError, e:
assert str(e) == 'File path for option X does not exist: %s' % dne, e
except:
raise Exception("did not catch expected UserError")
def test_PathAccept(self):
"""Test the PathAccept validator"""
opts = SCons.Variables.Variables()
opts.Add(SCons.Variables.PathVariable('test',
'test option help',
'/default/path',
SCons.Variables.PathVariable.PathAccept))
test = TestCmd.TestCmd(workdir='')
test.subdir('dir')
test.write('file', "file\n")
o = opts.options[0]
o.validator('X', test.workpath('file'), {})
d = test.workpath('d')
o.validator('X', d, {})
dne = test.workpath('does_not_exist')
o.validator('X', dne, {})
def test_validator(self):
"""Test the PathVariable validator argument"""
opts = SCons.Variables.Variables()
opts.Add(SCons.Variables.PathVariable('test',
'test option help',
'/default/path'))
test = TestCmd.TestCmd(workdir='')
test.write('exists', 'exists\n')
o = opts.options[0]
o.validator('X', test.workpath('exists'), {})
dne = test.workpath('does_not_exist')
try:
o.validator('X', dne, {})
except SCons.Errors.UserError, e:
expect = 'Path for option X does not exist: %s' % dne
assert str(e) == expect, e
else:
raise Exception("did not catch expected UserError")
def my_validator(key, val, env):
raise Exception("my_validator() got called for %s, %s!" % (key, val))
opts = SCons.Variables.Variables()
opts.Add(SCons.Variables.PathVariable('test2',
'more help',
'/default/path/again',
my_validator))
o = opts.options[0]
try:
o.validator('Y', 'value', {})
except Exception, e:
assert str(e) == 'my_validator() got called for Y, value!', e
else:
raise Exception("did not catch expected exception from my_validator()")
if __name__ == "__main__":
suite = unittest.makeSuite(PathVariableTestCase, 'test_')
TestUnit.run(suite)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| |
#!/usr/bin/env python
#
# Generated Wed Jun 30 10:34:05 2004 by generateDS.py.
#
import sys
import getopt
from xml.dom import minidom
from xml.dom import Node
#
# If you have installed IPython you can uncomment and use the following.
# IPython is available from http://ipython.scipy.org/.
#
## from IPython.Shell import IPShellEmbed
## args = ''
## ipshell = IPShellEmbed(args,
## banner = 'Dropping into IPython',
## exit_msg = 'Leaving Interpreter, back to program.')
# Then use the following line where and when you want to drop into the
# IPython shell:
# ipshell('<some message> -- Entering ipshell.\nHit Ctrl-D to exit')
#
# Support/utility functions.
#
def showIndent(outfile, level):
for idx in range(level):
outfile.write(' ')
def quote_xml(inStr):
s1 = inStr
s1 = s1.replace('&', '&')
s1 = s1.replace('<', '<')
s1 = s1.replace('"', '"')
return s1
def quote_python(inStr):
s1 = inStr
if s1.find("'") == -1:
if s1.find('\n') == -1:
return "'%s'" % s1
else:
return "'''%s'''" % s1
else:
if s1.find('"') != -1:
s1 = s1.replace('"', '\\"')
if s1.find('\n') == -1:
return '"%s"' % s1
else:
return '"""%s"""' % s1
#
# Data representation classes.
#
class xml_behavior:
subclass = None
def __init__(self, base_impl_url='', behaviors=None):
self.base_impl_url = base_impl_url
self.behaviors = behaviors
def factory(*args_, **kwargs_):
if xml_behavior.subclass:
return xml_behavior.subclass(*args_, **kwargs_)
else:
return xml_behavior(*args_, **kwargs_)
factory = staticmethod(factory)
def getBase_impl_url(self): return self.base_impl_url
def setBase_impl_url(self, base_impl_url): self.base_impl_url = base_impl_url
def getBehaviors(self): return self.behaviors
def setBehaviors(self, behaviors): self.behaviors = behaviors
def export(self, outfile, level, name_='xml-behavior'):
showIndent(outfile, level)
outfile.write('<%s>\n' % name_)
level += 1
showIndent(outfile, level)
outfile.write('<base-impl-url>%s</base-impl-url>\n' % quote_xml(self.getBase_impl_url()))
if self.behaviors:
self.behaviors.export(outfile, level)
level -= 1
showIndent(outfile, level)
outfile.write('</%s>\n' % name_)
def exportLiteral(self, outfile, level, name_='xml-behavior'):
level += 1
showIndent(outfile, level)
outfile.write('base_impl_url=%s,\n' % quote_python(self.getBase_impl_url()))
if self.behaviors:
showIndent(outfile, level)
outfile.write('behaviors=behaviors(\n')
self.behaviors.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
def build(self, node_):
attrs = node_.attributes
for child in node_.childNodes:
nodeName_ = child.nodeName.split(':')[-1]
if child.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'base-impl-url':
base_impl_url = ''
for text_ in child.childNodes:
base_impl_url += text_.nodeValue
self.base_impl_url = base_impl_url
elif child.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'behaviors':
obj = behaviors.factory()
obj.build(child)
self.setBehaviors(obj)
# end class xml_behavior
class behaviors:
subclass = None
def __init__(self, behavior=None):
if behavior is None:
self.behavior = []
else:
self.behavior = behavior
def factory(*args_, **kwargs_):
if behaviors.subclass:
return behaviors.subclass(*args_, **kwargs_)
else:
return behaviors(*args_, **kwargs_)
factory = staticmethod(factory)
def getBehavior(self): return self.behavior
def addBehavior(self, value): self.behavior.append(value)
def setBehavior(self, index, value): self.behavior[index] = value
def export(self, outfile, level, name_='behaviors'):
showIndent(outfile, level)
outfile.write('<%s>\n' % name_)
level += 1
for behavior in self.behavior:
behavior.export(outfile, level)
level -= 1
showIndent(outfile, level)
outfile.write('</%s>\n' % name_)
def exportLiteral(self, outfile, level, name_='behaviors'):
level += 1
showIndent(outfile, level)
outfile.write('behavior=[\n')
level += 1
for behavior in self.behavior:
showIndent(outfile, level)
outfile.write('behavior(\n')
behavior.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
level -= 1
def build(self, node_):
attrs = node_.attributes
for child in node_.childNodes:
nodeName_ = child.nodeName.split(':')[-1]
if child.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'behavior':
obj = behavior.factory()
obj.build(child)
self.behavior.append(obj)
# end class behaviors
class behavior:
subclass = None
def __init__(self, klass='', name='', return_type='', args=None, impl_url='', ancillaries=None):
self.klass = klass
self.name = name
self.return_type = return_type
self.args = args
self.impl_url = impl_url
self.ancillaries = ancillaries
def factory(*args_, **kwargs_):
if behavior.subclass:
return behavior.subclass(*args_, **kwargs_)
else:
return behavior(*args_, **kwargs_)
factory = staticmethod(factory)
def getClass(self): return self.klass
def setClass(self, klass): self.klass = klass
def getName(self): return self.name
def setName(self, name): self.name = name
def getReturn_type(self): return self.return_type
def setReturn_type(self, return_type): self.return_type = return_type
def getArgs(self): return self.args
def setArgs(self, args): self.args = args
def getImpl_url(self): return self.impl_url
def setImpl_url(self, impl_url): self.impl_url = impl_url
def getAncillaries(self): return self.ancillaries
def setAncillaries(self, ancillaries): self.ancillaries = ancillaries
def export(self, outfile, level, name_='behavior'):
showIndent(outfile, level)
outfile.write('<%s>\n' % name_)
level += 1
showIndent(outfile, level)
outfile.write('<class>%s</class>\n' % quote_xml(self.getKlass()))
showIndent(outfile, level)
outfile.write('<name>%s</name>\n' % quote_xml(self.getName()))
showIndent(outfile, level)
outfile.write('<return-type>%s</return-type>\n' % quote_xml(self.getReturn_type()))
if self.args:
self.args.export(outfile, level)
showIndent(outfile, level)
outfile.write('<impl-url>%s</impl-url>\n' % quote_xml(self.getImpl_url()))
if self.ancillaries:
self.ancillaries.export(outfile, level)
level -= 1
showIndent(outfile, level)
outfile.write('</%s>\n' % name_)
def exportLiteral(self, outfile, level, name_='behavior'):
level += 1
showIndent(outfile, level)
outfile.write('klass=%s,\n' % quote_python(self.getKlass()))
showIndent(outfile, level)
outfile.write('name=%s,\n' % quote_python(self.getName()))
showIndent(outfile, level)
outfile.write('return_type=%s,\n' % quote_python(self.getReturn_type()))
if self.args:
showIndent(outfile, level)
outfile.write('args=args(\n')
self.args.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
showIndent(outfile, level)
outfile.write('impl_url=%s,\n' % quote_python(self.getImpl_url()))
if self.ancillaries:
showIndent(outfile, level)
outfile.write('ancillaries=ancillaries(\n')
self.ancillaries.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
def build(self, node_):
attrs = node_.attributes
for child in node_.childNodes:
nodeName_ = child.nodeName.split(':')[-1]
if child.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'class':
klass = ''
for text_ in child.childNodes:
klass += text_.nodeValue
self.klass = klass
elif child.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'name':
name = ''
for text_ in child.childNodes:
name += text_.nodeValue
self.name = name
elif child.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'return-type':
return_type = ''
for text_ in child.childNodes:
return_type += text_.nodeValue
self.return_type = return_type
elif child.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'args':
obj = args.factory()
obj.build(child)
self.setArgs(obj)
elif child.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'impl-url':
impl_url = ''
for text_ in child.childNodes:
impl_url += text_.nodeValue
self.impl_url = impl_url
elif child.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'ancillaries':
obj = ancillaries.factory()
obj.build(child)
self.setAncillaries(obj)
# end class behavior
class args:
subclass = None
def __init__(self, arg=None):
if arg is None:
self.arg = []
else:
self.arg = arg
def factory(*args_, **kwargs_):
if args.subclass:
return args.subclass(*args_, **kwargs_)
else:
return args(*args_, **kwargs_)
factory = staticmethod(factory)
def getArg(self): return self.arg
def addArg(self, value): self.arg.append(value)
def setArg(self, index, value): self.arg[index] = value
def export(self, outfile, level, name_='args'):
showIndent(outfile, level)
outfile.write('<%s>\n' % name_)
level += 1
for arg in self.arg:
arg.export(outfile, level)
level -= 1
showIndent(outfile, level)
outfile.write('</%s>\n' % name_)
def exportLiteral(self, outfile, level, name_='args'):
level += 1
showIndent(outfile, level)
outfile.write('arg=[\n')
level += 1
for arg in self.arg:
showIndent(outfile, level)
outfile.write('arg(\n')
arg.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
level -= 1
def build(self, node_):
attrs = node_.attributes
for child in node_.childNodes:
nodeName_ = child.nodeName.split(':')[-1]
if child.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'arg':
obj = arg.factory()
obj.build(child)
self.arg.append(obj)
# end class args
class arg:
subclass = None
def __init__(self, name='', data_type=''):
self.name = name
self.data_type = data_type
def factory(*args_, **kwargs_):
if arg.subclass:
return arg.subclass(*args_, **kwargs_)
else:
return arg(*args_, **kwargs_)
factory = staticmethod(factory)
def getName(self): return self.name
def setName(self, name): self.name = name
def getData_type(self): return self.data_type
def setData_type(self, data_type): self.data_type = data_type
def export(self, outfile, level, name_='arg'):
showIndent(outfile, level)
outfile.write('<%s>\n' % name_)
level += 1
showIndent(outfile, level)
outfile.write('<name>%s</name>\n' % quote_xml(self.getName()))
showIndent(outfile, level)
outfile.write('<data-type>%s</data-type>\n' % quote_xml(self.getData_type()))
level -= 1
showIndent(outfile, level)
outfile.write('</%s>\n' % name_)
def exportLiteral(self, outfile, level, name_='arg'):
level += 1
showIndent(outfile, level)
outfile.write('name=%s,\n' % quote_python(self.getName()))
showIndent(outfile, level)
outfile.write('data_type=%s,\n' % quote_python(self.getData_type()))
level -= 1
def build(self, node_):
attrs = node_.attributes
for child in node_.childNodes:
nodeName_ = child.nodeName.split(':')[-1]
if child.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'name':
name = ''
for text_ in child.childNodes:
name += text_.nodeValue
self.name = name
elif child.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'data-type':
data_type = ''
for text_ in child.childNodes:
data_type += text_.nodeValue
self.data_type = data_type
# end class arg
class ancillaries:
subclass = None
def __init__(self, ancillary=None):
if ancillary is None:
self.ancillary = []
else:
self.ancillary = ancillary
def factory(*args_, **kwargs_):
if ancillaries.subclass:
return ancillaries.subclass(*args_, **kwargs_)
else:
return ancillaries(*args_, **kwargs_)
factory = staticmethod(factory)
def getAncillary(self): return self.ancillary
def addAncillary(self, value): self.ancillary.append(value)
def setAncillary(self, index, value): self.ancillary[index] = value
def export(self, outfile, level, name_='ancillaries'):
showIndent(outfile, level)
outfile.write('<%s>\n' % name_)
level += 1
for ancillary in self.ancillary:
ancillary.export(outfile, level, name_='ancillary')
level -= 1
showIndent(outfile, level)
outfile.write('</%s>\n' % name_)
def exportLiteral(self, outfile, level, name_='ancillaries'):
level += 1
showIndent(outfile, level)
outfile.write('ancillary=[\n')
level += 1
for ancillary in self.ancillary:
showIndent(outfile, level)
outfile.write('arg(\n')
ancillary.exportLiteral(outfile, level, name_='ancillary')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
level -= 1
def build(self, node_):
attrs = node_.attributes
for child in node_.childNodes:
nodeName_ = child.nodeName.split(':')[-1]
if child.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'ancillary':
obj = ancillary.factory()
obj.build(child)
self.ancillary.append(obj)
# end class ancillaries
class ancillary:
subclass = None
def __init__(self, klass='', role='', return_type='', name='', args=None, impl_url=''):
self.klass = klass
self.role = role
self.return_type = return_type
self.name = name
self.args = args
self.impl_url = impl_url
def factory(*args_, **kwargs_):
if ancillary.subclass:
return ancillary.subclass(*args_, **kwargs_)
else:
return ancillary(*args_, **kwargs_)
factory = staticmethod(factory)
def getClass(self): return self.klass
def setClass(self, klass): self.klass = klass
def getRole(self): return self.role
def setRole(self, role): self.role = role
def getReturn_type(self): return self.return_type
def setReturn_type(self, return_type): self.return_type = return_type
def getName(self): return self.name
def setName(self, name): self.name = name
def getArgs(self): return self.args
def setArgs(self, args): self.args = args
def getImpl_url(self): return self.impl_url
def setImpl_url(self, impl_url): self.impl_url = impl_url
def export(self, outfile, level, name_='ancillary'):
showIndent(outfile, level)
outfile.write('<%s>\n' % name_)
level += 1
showIndent(outfile, level)
outfile.write('<class>%s</class>\n' % quote_xml(self.getKlass()))
showIndent(outfile, level)
outfile.write('<role>%s</role>\n' % quote_xml(self.getRole()))
showIndent(outfile, level)
outfile.write('<return-type>%s</return-type>\n' % quote_xml(self.getReturn_type()))
showIndent(outfile, level)
outfile.write('<name>%s</name>\n' % quote_xml(self.getName()))
if self.args:
self.args.export(outfile, level)
showIndent(outfile, level)
outfile.write('<impl-url>%s</impl-url>\n' % quote_xml(self.getImpl_url()))
level -= 1
showIndent(outfile, level)
outfile.write('</%s>\n' % name_)
def exportLiteral(self, outfile, level, name_='ancillary'):
level += 1
showIndent(outfile, level)
outfile.write('klass=%s,\n' % quote_python(self.getKlass()))
showIndent(outfile, level)
outfile.write('role=%s,\n' % quote_python(self.getRole()))
showIndent(outfile, level)
outfile.write('return_type=%s,\n' % quote_python(self.getReturn_type()))
showIndent(outfile, level)
outfile.write('name=%s,\n' % quote_python(self.getName()))
if self.args:
showIndent(outfile, level)
outfile.write('args=args(\n')
self.args.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
showIndent(outfile, level)
outfile.write('impl_url=%s,\n' % quote_python(self.getImpl_url()))
level -= 1
def build(self, node_):
attrs = node_.attributes
for child in node_.childNodes:
nodeName_ = child.nodeName.split(':')[-1]
if child.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'class':
klass = ''
for text_ in child.childNodes:
klass += text_.nodeValue
self.klass = klass
elif child.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'role':
role = ''
for text_ in child.childNodes:
role += text_.nodeValue
self.role = role
elif child.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'return-type':
return_type = ''
for text_ in child.childNodes:
return_type += text_.nodeValue
self.return_type = return_type
elif child.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'name':
name = ''
for text_ in child.childNodes:
name += text_.nodeValue
self.name = name
elif child.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'args':
obj = args.factory()
obj.build(child)
self.setArgs(obj)
elif child.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'impl-url':
impl_url = ''
for text_ in child.childNodes:
impl_url += text_.nodeValue
self.impl_url = impl_url
# end class ancillary
from xml.sax import handler, make_parser
class SaxStackElement:
def __init__(self, name='', obj=None):
self.name = name
self.obj = obj
self.content = ''
#
# SAX handler
#
class SaxXml_behaviorHandler(handler.ContentHandler):
def __init__(self):
self.stack = []
self.root = None
def getRoot(self):
return self.root
def setDocumentLocator(self, locator):
self.locator = locator
def showError(self, msg):
print '*** (showError):', msg
sys.exit(-1)
def startElement(self, name, attrs):
done = 0
if name == 'xml-behavior':
obj = xml-behavior.factory()
stackObj = SaxStackElement('xml-behavior', obj)
self.stack.append(stackObj)
done = 1
elif name == 'base-impl-url':
stackObj = SaxStackElement('base_impl_url', None)
self.stack.append(stackObj)
done = 1
elif name == 'behaviors':
obj = behaviors.factory()
stackObj = SaxStackElement('behaviors', obj)
self.stack.append(stackObj)
done = 1
elif name == 'behavior':
obj = behavior.factory()
stackObj = SaxStackElement('behavior', obj)
self.stack.append(stackObj)
done = 1
elif name == 'class':
stackObj = SaxStackElement('klass', None)
self.stack.append(stackObj)
done = 1
elif name == 'name':
stackObj = SaxStackElement('name', None)
self.stack.append(stackObj)
done = 1
elif name == 'return-type':
stackObj = SaxStackElement('return_type', None)
self.stack.append(stackObj)
done = 1
elif name == 'args':
obj = args.factory()
stackObj = SaxStackElement('args', obj)
self.stack.append(stackObj)
done = 1
elif name == 'impl-url':
stackObj = SaxStackElement('impl_url', None)
self.stack.append(stackObj)
done = 1
elif name == 'ancillaries':
obj = ancillaries.factory()
stackObj = SaxStackElement('ancillaries', obj)
self.stack.append(stackObj)
done = 1
elif name == 'arg':
obj = arg.factory()
stackObj = SaxStackElement('arg', obj)
self.stack.append(stackObj)
done = 1
elif name == 'data-type':
stackObj = SaxStackElement('data_type', None)
self.stack.append(stackObj)
done = 1
elif name == 'ancillary':
obj = arg.factory()
stackObj = SaxStackElement('ancillary', obj)
self.stack.append(stackObj)
done = 1
elif name == 'role':
stackObj = SaxStackElement('role', None)
self.stack.append(stackObj)
done = 1
if not done:
self.reportError('"%s" element not allowed here.' % name)
def endElement(self, name):
done = 0
if name == 'xml-behavior':
if len(self.stack) == 1:
self.root = self.stack[-1].obj
self.stack.pop()
done = 1
elif name == 'base-impl-url':
if len(self.stack) >= 2:
content = self.stack[-1].content
self.stack[-2].obj.setBase_impl_url(content)
self.stack.pop()
done = 1
elif name == 'behaviors':
if len(self.stack) >= 2:
self.stack[-2].obj.setBehaviors(self.stack[-1].obj)
self.stack.pop()
done = 1
elif name == 'behavior':
if len(self.stack) >= 2:
self.stack[-2].obj.addBehavior(self.stack[-1].obj)
self.stack.pop()
done = 1
elif name == 'class':
if len(self.stack) >= 2:
content = self.stack[-1].content
self.stack[-2].obj.setClass(content)
self.stack.pop()
done = 1
elif name == 'name':
if len(self.stack) >= 2:
content = self.stack[-1].content
self.stack[-2].obj.setName(content)
self.stack.pop()
done = 1
elif name == 'return-type':
if len(self.stack) >= 2:
content = self.stack[-1].content
self.stack[-2].obj.setReturn_type(content)
self.stack.pop()
done = 1
elif name == 'args':
if len(self.stack) >= 2:
self.stack[-2].obj.setArgs(self.stack[-1].obj)
self.stack.pop()
done = 1
elif name == 'impl-url':
if len(self.stack) >= 2:
content = self.stack[-1].content
self.stack[-2].obj.setImpl_url(content)
self.stack.pop()
done = 1
elif name == 'ancillaries':
if len(self.stack) >= 2:
self.stack[-2].obj.setAncillaries(self.stack[-1].obj)
self.stack.pop()
done = 1
elif name == 'arg':
if len(self.stack) >= 2:
self.stack[-2].obj.addArg(self.stack[-1].obj)
self.stack.pop()
done = 1
elif name == 'data-type':
if len(self.stack) >= 2:
content = self.stack[-1].content
self.stack[-2].obj.setData_type(content)
self.stack.pop()
done = 1
elif name == 'ancillary':
if len(self.stack) >= 2:
self.stack[-2].obj.addAncillary(self.stack[-1].obj)
self.stack.pop()
done = 1
elif name == 'role':
if len(self.stack) >= 2:
content = self.stack[-1].content
self.stack[-2].obj.setRole(content)
self.stack.pop()
done = 1
if not done:
self.reportError('"%s" element not allowed here.' % name)
def characters(self, chrs, start, end):
if len(self.stack) > 0:
self.stack[-1].content += chrs[start:end]
def reportError(self, mesg):
locator = self.locator
sys.stderr.write('Doc: %s Line: %d Column: %d\n' % \
(locator.getSystemId(), locator.getLineNumber(),
locator.getColumnNumber() + 1))
sys.stderr.write(mesg)
sys.stderr.write('\n')
sys.exit(-1)
#raise RuntimeError
USAGE_TEXT = """
Usage: python <Parser>.py [ -s ] <in_xml_file>
Options:
-s Use the SAX parser, not the minidom parser.
"""
def usage():
print USAGE_TEXT
sys.exit(-1)
def saxParse(inFileName):
parser = make_parser()
documentHandler = SaxXml_behaviorHandler()
parser.setDocumentHandler(documentHandler)
parser.parse('file:%s' % inFileName)
root = documentHandler.getRoot()
sys.stdout.write('<?xml version="1.0" ?>\n')
root.export(sys.stdout, 0)
return root
def saxParseString(inString):
parser = make_parser()
documentHandler = SaxXml_behaviorHandler()
parser.setDocumentHandler(documentHandler)
parser.feed(inString)
parser.close()
rootObj = documentHandler.getRoot()
#sys.stdout.write('<?xml version="1.0" ?>\n')
#rootObj.export(sys.stdout, 0)
return rootObj
def parse(inFileName):
doc = minidom.parse(inFileName)
rootNode = doc.childNodes[0]
rootObj = xml_behavior.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
sys.stdout.write('<?xml version="1.0" ?>\n')
rootObj.export(sys.stdout, 0)
return rootObj
def parseString(inString):
doc = minidom.parseString(inString)
rootNode = doc.childNodes[0]
rootObj = xml_behavior.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
sys.stdout.write('<?xml version="1.0" ?>\n')
rootObj.export(sys.stdout, 0)
return rootObj
def parseLiteral(inFileName):
doc = minidom.parse(inFileName)
rootNode = doc.childNodes[0]
rootObj = xml_behavior.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
sys.stdout.write('from xmlbehavior import *\n\n')
sys.stdout.write('rootObj = xml_behavior(\n')
rootObj.exportLiteral(sys.stdout, 0)
sys.stdout.write(')\n')
return rootObj
def main():
args = sys.argv[1:]
if len(args) == 2 and args[0] == '-s':
saxParse(args[1])
elif len(args) == 1:
parseLiteral(args[0])
else:
usage()
if __name__ == '__main__':
main()
#import pdb
#pdb.run('main()')
| |
import os
import boto
from mock import Mock, PropertyMock, patch
from moto import mock_ec2
from tornado.testing import AsyncTestCase, gen_test
from loadsbroker.tests.util import (clear_boto_context, load_boto_context,
create_image)
here_dir = os.path.dirname(os.path.abspath(__file__))
ec2_mocker = mock_ec2()
_OLD_CONTEXT = []
def setUp():
_OLD_CONTEXT[:] = list(clear_boto_context())
ec2_mocker.start()
create_image()
def tearDown():
ec2_mocker.stop()
load_boto_context(*_OLD_CONTEXT)
class Test_broker(AsyncTestCase):
db_uri = "sqlite:////tmp/loads_test.db"
def _createFUT(self):
from loadsbroker.broker import Broker
return Broker("1234", self.io_loop, self.db_uri, None,
aws_use_filters=False, initial_db=None)
def test_broker_creation(self):
broker = self._createFUT()
self.assertNotEqual(broker, None)
broker.shutdown()
def test_broker_run_plan(self):
from tornado.concurrent import Future
# Setup all the mocks
mock_future = Mock(spec=Future)
# Setup the mock RunManager instance, and properties needed
mock_rm_inst = Mock()
mock_run = Mock()
type(mock_run).uuid = PropertyMock(return_value="asdf")
type(mock_rm_inst).run = PropertyMock(return_value=mock_run)
with patch('loadsbroker.broker.RunManager',
new_callable=Mock) as mock_rm:
broker = self._createFUT()
mock_rm.new_run.return_value = (mock_rm_inst, mock_future)
uuid = broker.run_plan("bleh", owner='tarek')
self.assertEqual(uuid, "asdf")
file_name = "/tmp/loads_test.db"
db_uri = "sqlite:///" + file_name
class Test_run_manager(AsyncTestCase):
def setUp(self):
super().setUp()
from loadsbroker.db import Database
from loadsbroker.db import setup_database
self.db = Database(db_uri, echo=True)
self.db_session = self.db.session()
setup_database(self.db_session, os.path.join(here_dir, "testdb.json"))
def tearDown(self):
super().tearDown()
import loadsbroker.aws
loadsbroker.aws.AWS_AMI_IDS = {k: {} for k in
loadsbroker.aws.AWS_REGIONS}
self.helpers = None
self.db = None
self.db_session = None
if os.path.exists(file_name):
os.remove(file_name)
async def _createFUT(self, plan_uuid=None, run_uuid=None):
from loadsbroker.broker import RunManager, RunHelpers
from loadsbroker.extensions import (
Docker, DNSMasq, InfluxDB, SSH, Telegraf, Watcher)
from loadsbroker.aws import EC2Pool
from loadsbroker.db import Plan, Run
if not plan_uuid:
plan_uuid = self.db_session.query(Plan).limit(1).one().uuid
region = "us-west-2"
# Setup the AMI we need available to make instances
conn = boto.ec2.connect_to_region(region)
reservation = conn.run_instances('ami-1234abcd',
instance_type='m1.small')
instance = reservation.instances[0]
conn.create_image(instance.id, "CoreOS stable")
kwargs = {}
kwargs["io_loop"] = self.io_loop
kwargs["use_filters"] = False
pool = EC2Pool("broker_1234", **kwargs)
await pool.ready
helpers = RunHelpers()
helpers.docker = Mock(spec=Docker)
helpers.dns = Mock(spec=DNSMasq)
helpers.influxdb = Mock(spec=InfluxDB)
helpers.telegraf = Mock(spec=Telegraf)
helpers.ssh = Mock(spec=SSH)
helpers.watcher = Mock(spec=Watcher)
async def return_none(*args, **kwargs):
return None
helpers.docker.setup_collection = return_none
helpers.docker.wait = return_none
helpers.docker.load_containers = return_none
self.helpers = helpers
run = Run.new_run(self.db_session, plan_uuid)
self.db_session.add(run)
self.db_session.commit()
rmg = RunManager(helpers, self.db_session, pool, self.io_loop, run)
return rmg
@gen_test(timeout=10)
async def test_create(self):
rm = await self._createFUT()
assert rm is not None
@gen_test(timeout=10)
async def test_initialize(self):
from loadsbroker.db import RUNNING, INITIALIZING
rm = await self._createFUT()
self.assertEqual(rm.state, INITIALIZING)
await rm._initialize()
self.assertEqual(rm.state, RUNNING)
@gen_test(timeout=10)
async def test_run(self):
from loadsbroker.db import (
RUNNING, INITIALIZING, TERMINATING, COMPLETED
)
rm = await self._createFUT()
self.assertEqual(rm.state, INITIALIZING)
await rm._initialize()
self.assertEqual(rm.state, RUNNING)
rm.sleep_time = 0.5
run_j = rm.run.json()
self.assertEqual(run_j['plan_id'], 1)
self.assertEqual(run_j['plan_name'], 'Single Server')
# Zero out extra calls
async def zero_out(*args, **kwargs):
return None
self.helpers.ssh.reload_sysctl = zero_out
self.helpers.dns.start = zero_out
self.helpers.watcher.start = zero_out
self.helpers.influxdb.start = zero_out
self.helpers.telegraf.start = zero_out
self.helpers.docker.run_containers = zero_out
self.helpers.docker.stop_containers = zero_out
self.helpers.dns.stop = zero_out
self.helpers.watcher.stop = zero_out
self.helpers.influxdb.stop = zero_out
self.helpers.telegraf.stop = zero_out
# Ensure instances all report as done after everything
# has been started
async def return_true(*args, **kwargs):
return not all([s.ec2_collection.started for s in rm._set_links])
self.helpers.docker.is_running = return_true
result = await rm._run()
self.assertEqual(rm.state, TERMINATING)
result = await rm._shutdown()
self.assertEqual(rm.state, COMPLETED)
self.assertEqual(result, None)
@gen_test(timeout=20)
async def test_abort(self):
from loadsbroker.db import (
RUNNING, INITIALIZING, TERMINATING
)
rm = await self._createFUT()
self.assertEqual(rm.state, INITIALIZING)
await rm._initialize()
self.assertEqual(rm.state, RUNNING)
rm.sleep_time = 0.5
# Zero out extra calls
async def zero_out(*args, **kwargs):
return None
self.helpers.ssh.reload_sysctl = zero_out
self.helpers.dns.start = zero_out
self.helpers.watcher.start = zero_out
self.helpers.influxdb.start = zero_out
self.helpers.telegraf.start = zero_out
self.helpers.docker.run_containers = zero_out
self.helpers.docker.stop_containers = zero_out
self.helpers.dns.stop = zero_out
self.helpers.watcher.stop = zero_out
self.helpers.influxdb.stop = zero_out
self.helpers.telegraf.stop = zero_out
# Ensure instances all report as done after everything
# has been started
async def return_true(*args, **kwargs):
all_started = all([s.ec2_collection.started
for s in rm._set_links])
if all_started:
rm.abort = True
return True
self.helpers.docker.is_running = return_true
result = await rm._run()
self.assertEqual(rm.state, TERMINATING)
self.assertEqual(result, None)
self.assertEqual([s.ec2_collection.finished for s in rm._set_links],
[False, False, False])
| |
# Copyright 2013 eNovance
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import threading
import time
import testscenarios
from oslo import messaging
from oslo.messaging.notify import dispatcher
from oslo_config import cfg
from oslo_messaging.tests import utils as test_utils
from six.moves import mock
load_tests = testscenarios.load_tests_apply_scenarios
class RestartableServerThread(object):
def __init__(self, server):
self.server = server
self.thread = None
def start(self):
if self.thread is None:
self.thread = threading.Thread(target=self.server.start)
self.thread.daemon = True
self.thread.start()
def stop(self):
if self.thread is not None:
# Check start() does nothing with a running listener
self.server.start()
self.server.stop()
self.server.wait()
self.thread.join(timeout=15)
ret = self.thread.isAlive()
self.thread = None
return ret
return True
class ListenerSetupMixin(object):
class ThreadTracker(object):
def __init__(self):
self._received_msgs = 0
self.threads = []
self.lock = threading.Lock()
def info(self, ctxt, publisher_id, event_type, payload, metadata):
# NOTE(sileht): this run into an other thread
with self.lock:
self._received_msgs += 1
def wait_for_messages(self, expect_messages):
while self._received_msgs < expect_messages:
time.sleep(0.01)
def stop(self):
for thread in self.threads:
thread.stop()
self.threads = []
def start(self, thread):
self.threads.append(thread)
thread.start()
def setUp(self):
self.trackers = {}
self.addCleanup(self._stop_trackers)
def _stop_trackers(self):
for pool in self.trackers:
self.trackers[pool].stop()
self.trackers = {}
def _setup_listener(self, transport, endpoints,
targets=None, pool=None):
if pool is None:
tracker_name = '__default__'
else:
tracker_name = pool
if targets is None:
targets = [messaging.Target(topic='testtopic')]
tracker = self.trackers.setdefault(
tracker_name, self.ThreadTracker())
listener = messaging.get_notification_listener(
transport, targets=targets, endpoints=[tracker] + endpoints,
allow_requeue=True, pool=pool)
thread = RestartableServerThread(listener)
tracker.start(thread)
return thread
def wait_for_messages(self, expect_messages, tracker_name='__default__'):
self.trackers[tracker_name].wait_for_messages(expect_messages)
def _setup_notifier(self, transport, topic='testtopic',
publisher_id='testpublisher'):
return messaging.Notifier(transport, topic=topic,
driver='messaging',
publisher_id=publisher_id)
class TestNotifyListener(test_utils.BaseTestCase, ListenerSetupMixin):
def __init__(self, *args):
super(TestNotifyListener, self).__init__(*args)
ListenerSetupMixin.__init__(self)
def setUp(self):
super(TestNotifyListener, self).setUp(conf=cfg.ConfigOpts())
ListenerSetupMixin.setUp(self)
def test_constructor(self):
transport = messaging.get_transport(self.conf, url='fake:')
target = messaging.Target(topic='foo')
endpoints = [object()]
listener = messaging.get_notification_listener(transport, [target],
endpoints)
self.assertIs(listener.conf, self.conf)
self.assertIs(listener.transport, transport)
self.assertIsInstance(listener.dispatcher,
dispatcher.NotificationDispatcher)
self.assertIs(listener.dispatcher.endpoints, endpoints)
self.assertEqual('blocking', listener.executor)
def test_no_target_topic(self):
transport = messaging.get_transport(self.conf, url='fake:')
listener = messaging.get_notification_listener(transport,
[messaging.Target()],
[mock.Mock()])
try:
listener.start()
except Exception as ex:
self.assertIsInstance(ex, messaging.InvalidTarget, ex)
else:
self.assertTrue(False)
def test_unknown_executor(self):
transport = messaging.get_transport(self.conf, url='fake:')
try:
messaging.get_notification_listener(transport, [], [],
executor='foo')
except Exception as ex:
self.assertIsInstance(ex, messaging.ExecutorLoadFailure)
self.assertEqual('foo', ex.executor)
else:
self.assertTrue(False)
def test_one_topic(self):
transport = messaging.get_transport(self.conf, url='fake:')
endpoint = mock.Mock()
endpoint.info.return_value = None
listener_thread = self._setup_listener(transport, [endpoint])
notifier = self._setup_notifier(transport)
notifier.info({}, 'an_event.start', 'test message')
self.wait_for_messages(1)
self.assertFalse(listener_thread.stop())
endpoint.info.assert_called_once_with(
{}, 'testpublisher', 'an_event.start', 'test message',
{'message_id': mock.ANY, 'timestamp': mock.ANY})
def test_two_topics(self):
transport = messaging.get_transport(self.conf, url='fake:')
endpoint = mock.Mock()
endpoint.info.return_value = None
targets = [messaging.Target(topic="topic1"),
messaging.Target(topic="topic2")]
listener_thread = self._setup_listener(transport, [endpoint],
targets=targets)
notifier = self._setup_notifier(transport, topic='topic1')
notifier.info({'ctxt': '1'}, 'an_event.start1', 'test')
notifier = self._setup_notifier(transport, topic='topic2')
notifier.info({'ctxt': '2'}, 'an_event.start2', 'test')
self.wait_for_messages(2)
self.assertFalse(listener_thread.stop())
endpoint.info.assert_has_calls([
mock.call({'ctxt': '1'}, 'testpublisher',
'an_event.start1', 'test',
{'timestamp': mock.ANY, 'message_id': mock.ANY}),
mock.call({'ctxt': '2'}, 'testpublisher',
'an_event.start2', 'test',
{'timestamp': mock.ANY, 'message_id': mock.ANY})],
any_order=True)
def test_two_exchanges(self):
transport = messaging.get_transport(self.conf, url='fake:')
endpoint = mock.Mock()
endpoint.info.return_value = None
targets = [messaging.Target(topic="topic",
exchange="exchange1"),
messaging.Target(topic="topic",
exchange="exchange2")]
listener_thread = self._setup_listener(transport, [endpoint],
targets=targets)
notifier = self._setup_notifier(transport, topic="topic")
def mock_notifier_exchange(name):
def side_effect(target, ctxt, message, version, retry):
target.exchange = name
return transport._driver.send_notification(target, ctxt,
message, version,
retry=retry)
transport._send_notification = mock.MagicMock(
side_effect=side_effect)
notifier.info({'ctxt': '0'},
'an_event.start', 'test message default exchange')
mock_notifier_exchange('exchange1')
notifier.info({'ctxt': '1'},
'an_event.start', 'test message exchange1')
mock_notifier_exchange('exchange2')
notifier.info({'ctxt': '2'},
'an_event.start', 'test message exchange2')
self.wait_for_messages(2)
self.assertFalse(listener_thread.stop())
endpoint.info.assert_has_calls([
mock.call({'ctxt': '1'}, 'testpublisher', 'an_event.start',
'test message exchange1',
{'timestamp': mock.ANY, 'message_id': mock.ANY}),
mock.call({'ctxt': '2'}, 'testpublisher', 'an_event.start',
'test message exchange2',
{'timestamp': mock.ANY, 'message_id': mock.ANY})],
any_order=True)
def test_two_endpoints(self):
transport = messaging.get_transport(self.conf, url='fake:')
endpoint1 = mock.Mock()
endpoint1.info.return_value = None
endpoint2 = mock.Mock()
endpoint2.info.return_value = messaging.NotificationResult.HANDLED
listener_thread = self._setup_listener(transport,
[endpoint1, endpoint2])
notifier = self._setup_notifier(transport)
notifier.info({}, 'an_event.start', 'test')
self.wait_for_messages(1)
self.assertFalse(listener_thread.stop())
endpoint1.info.assert_called_once_with(
{}, 'testpublisher', 'an_event.start', 'test', {
'timestamp': mock.ANY,
'message_id': mock.ANY})
endpoint2.info.assert_called_once_with(
{}, 'testpublisher', 'an_event.start', 'test', {
'timestamp': mock.ANY,
'message_id': mock.ANY})
def test_requeue(self):
transport = messaging.get_transport(self.conf, url='fake:')
endpoint = mock.Mock()
endpoint.info = mock.Mock()
def side_effect_requeue(*args, **kwargs):
if endpoint.info.call_count == 1:
return messaging.NotificationResult.REQUEUE
return messaging.NotificationResult.HANDLED
endpoint.info.side_effect = side_effect_requeue
listener_thread = self._setup_listener(transport, [endpoint])
notifier = self._setup_notifier(transport)
notifier.info({}, 'an_event.start', 'test')
self.wait_for_messages(2)
self.assertFalse(listener_thread.stop())
endpoint.info.assert_has_calls([
mock.call({}, 'testpublisher', 'an_event.start', 'test',
{'timestamp': mock.ANY, 'message_id': mock.ANY}),
mock.call({}, 'testpublisher', 'an_event.start', 'test',
{'timestamp': mock.ANY, 'message_id': mock.ANY})])
def test_two_pools(self):
transport = messaging.get_transport(self.conf, url='fake:')
endpoint1 = mock.Mock()
endpoint1.info.return_value = None
endpoint2 = mock.Mock()
endpoint2.info.return_value = None
targets = [messaging.Target(topic="topic")]
listener1_thread = self._setup_listener(transport, [endpoint1],
targets=targets, pool="pool1")
listener2_thread = self._setup_listener(transport, [endpoint2],
targets=targets, pool="pool2")
notifier = self._setup_notifier(transport, topic="topic")
notifier.info({'ctxt': '0'}, 'an_event.start', 'test message0')
notifier.info({'ctxt': '1'}, 'an_event.start', 'test message1')
self.wait_for_messages(2, "pool1")
self.wait_for_messages(2, "pool2")
self.assertFalse(listener2_thread.stop())
self.assertFalse(listener1_thread.stop())
def mocked_endpoint_call(i):
return mock.call({'ctxt': '%d' % i}, 'testpublisher',
'an_event.start', 'test message%d' % i,
{'timestamp': mock.ANY, 'message_id': mock.ANY})
endpoint1.info.assert_has_calls([mocked_endpoint_call(0),
mocked_endpoint_call(1)])
endpoint2.info.assert_has_calls([mocked_endpoint_call(0),
mocked_endpoint_call(1)])
def test_two_pools_three_listener(self):
transport = messaging.get_transport(self.conf, url='fake:')
endpoint1 = mock.Mock()
endpoint1.info.return_value = None
endpoint2 = mock.Mock()
endpoint2.info.return_value = None
endpoint3 = mock.Mock()
endpoint3.info.return_value = None
targets = [messaging.Target(topic="topic")]
listener1_thread = self._setup_listener(transport, [endpoint1],
targets=targets, pool="pool1")
listener2_thread = self._setup_listener(transport, [endpoint2],
targets=targets, pool="pool2")
listener3_thread = self._setup_listener(transport, [endpoint3],
targets=targets, pool="pool2")
def mocked_endpoint_call(i):
return mock.call({'ctxt': '%d' % i}, 'testpublisher',
'an_event.start', 'test message%d' % i,
{'timestamp': mock.ANY, 'message_id': mock.ANY})
notifier = self._setup_notifier(transport, topic="topic")
mocked_endpoint1_calls = []
for i in range(0, 25):
notifier.info({'ctxt': '%d' % i}, 'an_event.start',
'test message%d' % i)
mocked_endpoint1_calls.append(mocked_endpoint_call(i))
self.wait_for_messages(25, 'pool2')
listener2_thread.stop()
for i in range(0, 25):
notifier.info({'ctxt': '%d' % i}, 'an_event.start',
'test message%d' % i)
mocked_endpoint1_calls.append(mocked_endpoint_call(i))
self.wait_for_messages(50, 'pool2')
listener2_thread.start()
listener3_thread.stop()
for i in range(0, 25):
notifier.info({'ctxt': '%d' % i}, 'an_event.start',
'test message%d' % i)
mocked_endpoint1_calls.append(mocked_endpoint_call(i))
self.wait_for_messages(75, 'pool2')
listener3_thread.start()
for i in range(0, 25):
notifier.info({'ctxt': '%d' % i}, 'an_event.start',
'test message%d' % i)
mocked_endpoint1_calls.append(mocked_endpoint_call(i))
self.wait_for_messages(100, 'pool1')
self.wait_for_messages(100, 'pool2')
self.assertFalse(listener3_thread.stop())
self.assertFalse(listener2_thread.stop())
self.assertFalse(listener1_thread.stop())
self.assertEqual(100, endpoint1.info.call_count)
endpoint1.info.assert_has_calls(mocked_endpoint1_calls)
self.assertLessEqual(25, endpoint2.info.call_count)
self.assertLessEqual(25, endpoint3.info.call_count)
self.assertEqual(100, endpoint2.info.call_count +
endpoint3.info.call_count)
for call in mocked_endpoint1_calls:
self.assertIn(call, endpoint2.info.mock_calls +
endpoint3.info.mock_calls)
| |
# Copyright (c) 2013 Amazon.com, Inc. or its affiliates.
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Tests for Layer1 of DynamoDB v2
"""
import time
from tests.unit import unittest
from boto.dynamodb2 import exceptions
from boto.dynamodb2.layer1 import DynamoDBConnection
class DynamoDBv2Layer1Test(unittest.TestCase):
dynamodb = True
def setUp(self):
self.dynamodb = DynamoDBConnection()
self.table_name = 'test-%d' % int(time.time())
self.hash_key_name = 'username'
self.hash_key_type = 'S'
self.range_key_name = 'date_joined'
self.range_key_type = 'N'
self.read_units = 5
self.write_units = 5
self.attributes = [
{
'AttributeName': self.hash_key_name,
'AttributeType': self.hash_key_type,
},
{
'AttributeName': self.range_key_name,
'AttributeType': self.range_key_type,
}
]
self.schema = [
{
'AttributeName': self.hash_key_name,
'KeyType': 'HASH',
},
{
'AttributeName': self.range_key_name,
'KeyType': 'RANGE',
},
]
self.provisioned_throughput = {
'ReadCapacityUnits': self.read_units,
'WriteCapacityUnits': self.write_units,
}
self.lsi = [
{
'IndexName': 'MostRecentIndex',
'KeySchema': [
{
'AttributeName': self.hash_key_name,
'KeyType': 'HASH',
},
{
'AttributeName': self.range_key_name,
'KeyType': 'RANGE',
},
],
'Projection': {
'ProjectionType': 'KEYS_ONLY',
}
}
]
def create_table(self, table_name, attributes, schema,
provisioned_throughput, lsi=None, wait=True):
# Note: This is a slightly different ordering that makes less sense.
result = self.dynamodb.create_table(
attributes,
table_name,
schema,
provisioned_throughput,
local_secondary_indexes=lsi
)
self.addCleanup(self.dynamodb.delete_table, table_name)
if wait:
while True:
description = self.dynamodb.describe_table(table_name)
if description['Table']['TableStatus'].lower() == 'active':
return result
else:
time.sleep(5)
else:
return result
def test_integrated(self):
result = self.create_table(
self.table_name,
self.attributes,
self.schema,
self.provisioned_throughput,
self.lsi
)
self.assertEqual(
result['TableDescription']['TableName'],
self.table_name
)
description = self.dynamodb.describe_table(self.table_name)
self.assertEqual(description['Table']['ItemCount'], 0)
# Create some records.
record_1_data = {
'username': {'S': 'johndoe'},
'first_name': {'S': 'John'},
'last_name': {'S': 'Doe'},
'date_joined': {'N': '1366056668'},
'friend_count': {'N': '3'},
'friends': {'SS': ['alice', 'bob', 'jane']},
}
r1_result = self.dynamodb.put_item(self.table_name, record_1_data)
# Get the data.
record_1 = self.dynamodb.get_item(self.table_name, key={
'username': {'S': 'johndoe'},
'date_joined': {'N': '1366056668'},
}, consistent_read=True)
self.assertEqual(record_1['Item']['username']['S'], 'johndoe')
self.assertEqual(record_1['Item']['first_name']['S'], 'John')
self.assertEqual(record_1['Item']['friends']['SS'], [
'alice', 'bob', 'jane'
])
# Now in a batch.
self.dynamodb.batch_write_item({
self.table_name: [
{
'PutRequest': {
'Item': {
'username': {'S': 'jane'},
'first_name': {'S': 'Jane'},
'last_name': {'S': 'Doe'},
'date_joined': {'N': '1366056789'},
'friend_count': {'N': '1'},
'friends': {'SS': ['johndoe']},
},
},
},
]
})
# Now a query.
lsi_results = self.dynamodb.query(
self.table_name,
index_name='MostRecentIndex',
key_conditions={
'username': {
'AttributeValueList': [
{'S': 'johndoe'},
],
'ComparisonOperator': 'EQ',
},
},
consistent_read=True
)
self.assertEqual(lsi_results['Count'], 1)
results = self.dynamodb.query(self.table_name, key_conditions={
'username': {
'AttributeValueList': [
{'S': 'jane'},
],
'ComparisonOperator': 'EQ',
},
'date_joined': {
'AttributeValueList': [
{'N': '1366050000'}
],
'ComparisonOperator': 'GT',
}
}, consistent_read=True)
self.assertEqual(results['Count'], 1)
# Now a scan.
results = self.dynamodb.scan(self.table_name)
self.assertEqual(results['Count'], 2)
s_items = sorted([res['username']['S'] for res in results['Items']])
self.assertEqual(s_items, ['jane', 'johndoe'])
self.dynamodb.delete_item(self.table_name, key={
'username': {'S': 'johndoe'},
'date_joined': {'N': '1366056668'},
})
results = self.dynamodb.scan(self.table_name)
self.assertEqual(results['Count'], 1)
# Parallel scan (minus client-side threading).
self.dynamodb.batch_write_item({
self.table_name: [
{
'PutRequest': {
'Item': {
'username': {'S': 'johndoe'},
'first_name': {'S': 'Johann'},
'last_name': {'S': 'Does'},
'date_joined': {'N': '1366058000'},
'friend_count': {'N': '1'},
'friends': {'SS': ['jane']},
},
},
'PutRequest': {
'Item': {
'username': {'S': 'alice'},
'first_name': {'S': 'Alice'},
'last_name': {'S': 'Expert'},
'date_joined': {'N': '1366056800'},
'friend_count': {'N': '2'},
'friends': {'SS': ['johndoe', 'jane']},
},
},
},
]
})
time.sleep(20)
results = self.dynamodb.scan(self.table_name, segment=0, total_segments=2)
self.assertTrue(results['Count'] in [1, 2])
results = self.dynamodb.scan(self.table_name, segment=1, total_segments=2)
self.assertTrue(results['Count'] in [1, 2])
def test_without_range_key(self):
result = self.create_table(
self.table_name,
[
{
'AttributeName': self.hash_key_name,
'AttributeType': self.hash_key_type,
},
],
[
{
'AttributeName': self.hash_key_name,
'KeyType': 'HASH',
},
],
self.provisioned_throughput
)
self.assertEqual(
result['TableDescription']['TableName'],
self.table_name
)
description = self.dynamodb.describe_table(self.table_name)
self.assertEqual(description['Table']['ItemCount'], 0)
# Create some records.
record_1_data = {
'username': {'S': 'johndoe'},
'first_name': {'S': 'John'},
'last_name': {'S': 'Doe'},
'date_joined': {'N': '1366056668'},
'friend_count': {'N': '3'},
'friends': {'SS': ['alice', 'bob', 'jane']},
}
r1_result = self.dynamodb.put_item(self.table_name, record_1_data)
# Now try a range-less get.
johndoe = self.dynamodb.get_item(self.table_name, key={
'username': {'S': 'johndoe'},
}, consistent_read=True)
self.assertEqual(johndoe['Item']['username']['S'], 'johndoe')
self.assertEqual(johndoe['Item']['first_name']['S'], 'John')
self.assertEqual(johndoe['Item']['friends']['SS'], [
'alice', 'bob', 'jane'
])
| |
# Copyright 2012 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import errno
import hashlib
import httplib
import logging
import socket
import StringIO
import struct
import urlparse
try:
import json
except ImportError:
import simplejson as json
# Python 2.5 compat fix
if not hasattr(urlparse, 'parse_qsl'):
import cgi
urlparse.parse_qsl = cgi.parse_qsl
import OpenSSL
from glanceclient import exc
from glanceclient.common import utils
from glanceclient.openstack.common import strutils
try:
from eventlet import patcher
# Handle case where we are running in a monkey patched environment
if patcher.is_monkey_patched('socket'):
from eventlet.green.httplib import HTTPSConnection
from eventlet.green.OpenSSL.SSL import GreenConnection as Connection
from eventlet.greenio import GreenSocket
# TODO(mclaren): A getsockopt workaround: see 'getsockopt' doc string
GreenSocket.getsockopt = utils.getsockopt
else:
raise ImportError
except ImportError:
from httplib import HTTPSConnection
from OpenSSL.SSL import Connection as Connection
LOG = logging.getLogger(__name__)
USER_AGENT = 'python-glanceclient'
CHUNKSIZE = 1024 * 64 # 64kB
class HTTPClient(object):
def __init__(self, endpoint, **kwargs):
self.endpoint = endpoint
endpoint_parts = self.parse_endpoint(self.endpoint)
self.endpoint_scheme = endpoint_parts.scheme
self.endpoint_hostname = endpoint_parts.hostname
self.endpoint_port = endpoint_parts.port
self.endpoint_path = endpoint_parts.path
self.connection_class = self.get_connection_class(self.endpoint_scheme)
self.connection_kwargs = self.get_connection_kwargs(
self.endpoint_scheme, **kwargs)
self.identity_headers = kwargs.get('identity_headers')
self.auth_token = kwargs.get('token')
if self.identity_headers:
if self.identity_headers.get('X-Auth-Token'):
self.auth_token = self.identity_headers.get('X-Auth-Token')
del self.identity_headers['X-Auth-Token']
@staticmethod
def parse_endpoint(endpoint):
return urlparse.urlparse(endpoint)
@staticmethod
def get_connection_class(scheme):
if scheme == 'https':
return VerifiedHTTPSConnection
else:
return httplib.HTTPConnection
@staticmethod
def get_connection_kwargs(scheme, **kwargs):
_kwargs = {'timeout': float(kwargs.get('timeout', 600))}
if scheme == 'https':
_kwargs['cacert'] = kwargs.get('cacert', None)
_kwargs['cert_file'] = kwargs.get('cert_file', None)
_kwargs['key_file'] = kwargs.get('key_file', None)
_kwargs['insecure'] = kwargs.get('insecure', False)
_kwargs['ssl_compression'] = kwargs.get('ssl_compression', True)
return _kwargs
def get_connection(self):
_class = self.connection_class
try:
return _class(self.endpoint_hostname, self.endpoint_port,
**self.connection_kwargs)
except httplib.InvalidURL:
raise exc.InvalidEndpoint()
def log_curl_request(self, method, url, kwargs):
curl = ['curl -i -X %s' % method]
for (key, value) in kwargs['headers'].items():
header = '-H \'%s: %s\'' % (key, value)
curl.append(header)
conn_params_fmt = [
('key_file', '--key %s'),
('cert_file', '--cert %s'),
('cacert', '--cacert %s'),
]
for (key, fmt) in conn_params_fmt:
value = self.connection_kwargs.get(key)
if value:
curl.append(fmt % value)
if self.connection_kwargs.get('insecure'):
curl.append('-k')
if kwargs.get('body') is not None:
curl.append('-d \'%s\'' % kwargs['body'])
curl.append('%s%s' % (self.endpoint, url))
LOG.debug(strutils.safe_encode(' '.join(curl)))
@staticmethod
def log_http_response(resp, body=None):
status = (resp.version / 10.0, resp.status, resp.reason)
dump = ['\nHTTP/%.1f %s %s' % status]
dump.extend(['%s: %s' % (k, v) for k, v in resp.getheaders()])
dump.append('')
if body:
dump.extend([body, ''])
LOG.debug(strutils.safe_encode('\n'.join(dump)))
@staticmethod
def encode_headers(headers):
"""Encodes headers.
Note: This should be used right before
sending anything out.
:param headers: Headers to encode
:returns: Dictionary with encoded headers'
names and values
"""
to_str = strutils.safe_encode
return dict([(to_str(h), to_str(v)) for h, v in headers.iteritems()])
def _http_request(self, url, method, **kwargs):
"""Send an http request with the specified characteristics.
Wrapper around httplib.HTTP(S)Connection.request to handle tasks such
as setting headers and error handling.
"""
# Copy the kwargs so we can reuse the original in case of redirects
kwargs['headers'] = copy.deepcopy(kwargs.get('headers', {}))
kwargs['headers'].setdefault('User-Agent', USER_AGENT)
if self.auth_token:
kwargs['headers'].setdefault('X-Auth-Token', self.auth_token)
if self.identity_headers:
for k, v in self.identity_headers.iteritems():
kwargs['headers'].setdefault(k, v)
self.log_curl_request(method, url, kwargs)
conn = self.get_connection()
# Note(flaper87): Before letting headers / url fly,
# they should be encoded otherwise httplib will
# complain. If we decide to rely on python-request
# this wont be necessary anymore.
kwargs['headers'] = self.encode_headers(kwargs['headers'])
try:
if self.endpoint_path:
url = urlparse.urljoin(self.endpoint_path, url)
conn_url = urlparse.urlsplit(url).geturl()
# Note(flaper87): Ditto, headers / url
# encoding to make httplib happy.
conn_url = strutils.safe_encode(conn_url)
if kwargs['headers'].get('Transfer-Encoding') == 'chunked':
conn.putrequest(method, conn_url)
for header, value in kwargs['headers'].items():
conn.putheader(header, value)
conn.endheaders()
chunk = kwargs['body'].read(CHUNKSIZE)
# Chunk it, baby...
while chunk:
conn.send('%x\r\n%s\r\n' % (len(chunk), chunk))
chunk = kwargs['body'].read(CHUNKSIZE)
conn.send('0\r\n\r\n')
else:
conn.request(method, conn_url, **kwargs)
resp = conn.getresponse()
except socket.gaierror as e:
message = "Error finding address for %s: %s" % (
self.endpoint_hostname, e)
raise exc.InvalidEndpoint(message=message)
except (socket.error, socket.timeout) as e:
endpoint = self.endpoint
message = "Error communicating with %(endpoint)s %(e)s" % locals()
raise exc.CommunicationError(message=message)
body_iter = ResponseBodyIterator(resp)
# Read body into string if it isn't obviously image data
if resp.getheader('content-type', None) != 'application/octet-stream':
body_str = ''.join([chunk for chunk in body_iter])
self.log_http_response(resp, body_str)
body_iter = StringIO.StringIO(body_str)
else:
self.log_http_response(resp)
if 400 <= resp.status < 600:
LOG.error("Request returned failure status.")
raise exc.from_response(resp, body_str)
elif resp.status in (301, 302, 305):
# Redirected. Reissue the request to the new location.
return self._http_request(resp['location'], method, **kwargs)
elif resp.status == 300:
raise exc.from_response(resp)
return resp, body_iter
def json_request(self, method, url, **kwargs):
kwargs.setdefault('headers', {})
kwargs['headers'].setdefault('Content-Type', 'application/json')
if 'body' in kwargs:
kwargs['body'] = json.dumps(kwargs['body'])
resp, body_iter = self._http_request(url, method, **kwargs)
if 'application/json' in resp.getheader('content-type', None):
body = ''.join([chunk for chunk in body_iter])
try:
body = json.loads(body)
except ValueError:
LOG.error('Could not decode response body as JSON')
else:
body = None
return resp, body
def raw_request(self, method, url, **kwargs):
kwargs.setdefault('headers', {})
kwargs['headers'].setdefault('Content-Type',
'application/octet-stream')
if 'body' in kwargs:
if (hasattr(kwargs['body'], 'read')
and method.lower() in ('post', 'put')):
# We use 'Transfer-Encoding: chunked' because
# body size may not always be known in advance.
kwargs['headers']['Transfer-Encoding'] = 'chunked'
return self._http_request(url, method, **kwargs)
class OpenSSLConnectionDelegator(object):
"""
An OpenSSL.SSL.Connection delegator.
Supplies an additional 'makefile' method which httplib requires
and is not present in OpenSSL.SSL.Connection.
Note: Since it is not possible to inherit from OpenSSL.SSL.Connection
a delegator must be used.
"""
def __init__(self, *args, **kwargs):
self.connection = Connection(*args, **kwargs)
def __getattr__(self, name):
return getattr(self.connection, name)
def makefile(self, *args, **kwargs):
# Making sure socket is closed when this file is closed
# since we now avoid closing socket on connection close
# see new close method under VerifiedHTTPSConnection
kwargs['close'] = True
return socket._fileobject(self.connection, *args, **kwargs)
class VerifiedHTTPSConnection(HTTPSConnection):
"""
Extended HTTPSConnection which uses the OpenSSL library
for enhanced SSL support.
Note: Much of this functionality can eventually be replaced
with native Python 3.3 code.
"""
def __init__(self, host, port=None, key_file=None, cert_file=None,
cacert=None, timeout=None, insecure=False,
ssl_compression=True):
HTTPSConnection.__init__(self, host, port,
key_file=key_file,
cert_file=cert_file)
self.key_file = key_file
self.cert_file = cert_file
self.timeout = timeout
self.insecure = insecure
self.ssl_compression = ssl_compression
self.cacert = cacert
self.setcontext()
@staticmethod
def host_matches_cert(host, x509):
"""
Verify that the the x509 certificate we have received
from 'host' correctly identifies the server we are
connecting to, ie that the certificate's Common Name
or a Subject Alternative Name matches 'host'.
"""
common_name = x509.get_subject().commonName
# First see if we can match the CN
if common_name == host:
return True
# Support single wildcard matching
if common_name.startswith('*.') and host.find('.') > 0:
if common_name[2:] == host.split('.', 1)[1]:
return True
# Also try Subject Alternative Names for a match
san_list = None
for i in xrange(x509.get_extension_count()):
ext = x509.get_extension(i)
if ext.get_short_name() == 'subjectAltName':
san_list = str(ext)
for san in ''.join(san_list.split()).split(','):
if san == "DNS:%s" % host:
return True
# Server certificate does not match host
msg = ('Host "%s" does not match x509 certificate contents: '
'CommonName "%s"' % (host, common_name))
if san_list is not None:
msg = msg + ', subjectAltName "%s"' % san_list
raise exc.SSLCertificateError(msg)
def verify_callback(self, connection, x509, errnum,
depth, preverify_ok):
# NOTE(leaman): preverify_ok may be a non-boolean type
preverify_ok = bool(preverify_ok)
if x509.has_expired():
msg = "SSL Certificate expired on '%s'" % x509.get_notAfter()
raise exc.SSLCertificateError(msg)
if depth == 0 and preverify_ok:
# We verify that the host matches against the last
# certificate in the chain
return self.host_matches_cert(self.host, x509)
else:
# Pass through OpenSSL's default result
return preverify_ok
def setcontext(self):
"""
Set up the OpenSSL context.
"""
self.context = OpenSSL.SSL.Context(OpenSSL.SSL.SSLv23_METHOD)
if self.ssl_compression is False:
self.context.set_options(0x20000) # SSL_OP_NO_COMPRESSION
if self.insecure is not True:
self.context.set_verify(OpenSSL.SSL.VERIFY_PEER,
self.verify_callback)
else:
self.context.set_verify(OpenSSL.SSL.VERIFY_NONE,
lambda *args: True)
if self.cert_file:
try:
self.context.use_certificate_file(self.cert_file)
except Exception as e:
msg = 'Unable to load cert from "%s" %s' % (self.cert_file, e)
raise exc.SSLConfigurationError(msg)
if self.key_file is None:
# We support having key and cert in same file
try:
self.context.use_privatekey_file(self.cert_file)
except Exception as e:
msg = ('No key file specified and unable to load key '
'from "%s" %s' % (self.cert_file, e))
raise exc.SSLConfigurationError(msg)
if self.key_file:
try:
self.context.use_privatekey_file(self.key_file)
except Exception as e:
msg = 'Unable to load key from "%s" %s' % (self.key_file, e)
raise exc.SSLConfigurationError(msg)
if self.cacert:
try:
self.context.load_verify_locations(self.cacert)
except Exception as e:
msg = 'Unable to load CA from "%s"' % (self.cacert, e)
raise exc.SSLConfigurationError(msg)
else:
self.context.set_default_verify_paths()
def connect(self):
"""
Connect to an SSL port using the OpenSSL library and apply
per-connection parameters.
"""
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if self.timeout is not None:
# '0' microseconds
sock.setsockopt(socket.SOL_SOCKET, socket.SO_RCVTIMEO,
struct.pack('fL', self.timeout, 0))
self.sock = OpenSSLConnectionDelegator(self.context, sock)
self.sock.connect((self.host, self.port))
def close(self):
if self.sock:
# Removing reference to socket but don't close it yet.
# Response close will close both socket and associated
# file. Closing socket too soon will cause response
# reads to fail with socket IO error 'Bad file descriptor'.
self.sock = None
# Calling close on HTTPConnection to continue doing that cleanup.
HTTPSConnection.close(self)
class ResponseBodyIterator(object):
"""
A class that acts as an iterator over an HTTP response.
This class will also check response body integrity when iterating over
the instance and if a checksum was supplied using `set_checksum` method,
else by default the class will not do any integrity check.
"""
def __init__(self, resp):
self._resp = resp
self._checksum = None
self._size = int(resp.getheader('content-length', 0))
self._end_reached = False
def set_checksum(self, checksum):
"""
Set checksum to check against when iterating over this instance.
:raise: AttributeError if iterator is already consumed.
"""
if self._end_reached:
raise AttributeError("Can't set checksum for an already consumed"
" iterator")
self._checksum = checksum
def __len__(self):
return int(self._size)
def __iter__(self):
md5sum = hashlib.md5()
while True:
try:
chunk = self.next()
except StopIteration:
self._end_reached = True
# NOTE(mouad): Check image integrity when the end of response
# body is reached.
md5sum = md5sum.hexdigest()
if self._checksum is not None and md5sum != self._checksum:
raise IOError(errno.EPIPE,
'Corrupted image. Checksum was %s '
'expected %s' % (md5sum, self._checksum))
raise
else:
yield chunk
md5sum.update(chunk)
def next(self):
chunk = self._resp.read(CHUNKSIZE)
if chunk:
return chunk
else:
raise StopIteration()
| |
# Copyright 2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
import mock
from oslo_config import cfg
import testtools
from webob import exc
import webtest
from neutron.api import extensions
from neutron.common import config
from neutron.common import constants
from neutron.common import exceptions
from neutron import context
from neutron.db import quota_db
from neutron import quota
from neutron.tests import base
from neutron.tests import tools
from neutron.tests.unit.api.v2 import test_base
from neutron.tests.unit import testlib_api
TARGET_PLUGIN = 'neutron.plugins.ml2.plugin.Ml2Plugin'
_get_path = test_base._get_path
class QuotaExtensionTestCase(testlib_api.WebTestCase):
def setUp(self):
super(QuotaExtensionTestCase, self).setUp()
# Ensure existing ExtensionManager is not used
extensions.PluginAwareExtensionManager._instance = None
self.useFixture(tools.AttributeMapMemento())
# Create the default configurations
self.config_parse()
# Update the plugin and extensions path
self.setup_coreplugin(TARGET_PLUGIN)
cfg.CONF.set_override(
'quota_items',
['network', 'subnet', 'port', 'extra1'],
group='QUOTAS')
quota.QUOTAS = quota.QuotaEngine()
quota.register_resources_from_config()
self._plugin_patcher = mock.patch(TARGET_PLUGIN, autospec=True)
self.plugin = self._plugin_patcher.start()
self.plugin.return_value.supported_extension_aliases = ['quotas']
# QUOTAS will register the items in conf when starting
# extra1 here is added later, so have to do it manually
quota.QUOTAS.register_resource_by_name('extra1')
ext_mgr = extensions.PluginAwareExtensionManager.get_instance()
app = config.load_paste_app('extensions_test_app')
ext_middleware = extensions.ExtensionMiddleware(app, ext_mgr=ext_mgr)
self.api = webtest.TestApp(ext_middleware)
def tearDown(self):
self.api = None
self.plugin = None
super(QuotaExtensionTestCase, self).tearDown()
def _test_quota_default_values(self, expected_values):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id)}
res = self.api.get(_get_path('quotas', id=tenant_id, fmt=self.fmt),
extra_environ=env)
quota = self.deserialize(res)
for resource, expected_value in expected_values.items():
self.assertEqual(expected_value,
quota['quota'][resource])
class QuotaExtensionDbTestCase(QuotaExtensionTestCase):
fmt = 'json'
def setUp(self):
cfg.CONF.set_override(
'quota_driver',
'neutron.db.quota_db.DbQuotaDriver',
group='QUOTAS')
super(QuotaExtensionDbTestCase, self).setUp()
def test_quotas_loaded_right(self):
res = self.api.get(_get_path('quotas', fmt=self.fmt))
quota = self.deserialize(res)
self.assertEqual([], quota['quotas'])
self.assertEqual(200, res.status_int)
def test_quotas_default_values(self):
self._test_quota_default_values(
{'network': 10,
'subnet': 10,
'port': 50,
'extra1': -1})
def test_quotas_negative_default_value(self):
cfg.CONF.set_override(
'quota_port', -666, group='QUOTAS')
cfg.CONF.set_override(
'quota_network', -10, group='QUOTAS')
cfg.CONF.set_override(
'quota_subnet', -50, group='QUOTAS')
self._test_quota_default_values(
{'network': -1,
'subnet': -1,
'port': -1,
'extra1': -1})
def test_show_quotas_with_admin(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id + '2',
is_admin=True)}
res = self.api.get(_get_path('quotas', id=tenant_id, fmt=self.fmt),
extra_environ=env)
self.assertEqual(200, res.status_int)
quota = self.deserialize(res)
self.assertEqual(10, quota['quota']['network'])
self.assertEqual(10, quota['quota']['subnet'])
self.assertEqual(50, quota['quota']['port'])
def test_show_quotas_without_admin_forbidden_returns_403(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id + '2',
is_admin=False)}
res = self.api.get(_get_path('quotas', id=tenant_id, fmt=self.fmt),
extra_environ=env, expect_errors=True)
self.assertEqual(403, res.status_int)
def test_show_quotas_with_owner_tenant(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id,
is_admin=False)}
res = self.api.get(_get_path('quotas', id=tenant_id, fmt=self.fmt),
extra_environ=env)
self.assertEqual(200, res.status_int)
quota = self.deserialize(res)
self.assertEqual(10, quota['quota']['network'])
self.assertEqual(10, quota['quota']['subnet'])
self.assertEqual(50, quota['quota']['port'])
def test_list_quotas_with_admin(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id,
is_admin=True)}
res = self.api.get(_get_path('quotas', fmt=self.fmt),
extra_environ=env)
self.assertEqual(200, res.status_int)
quota = self.deserialize(res)
self.assertEqual([], quota['quotas'])
def test_list_quotas_without_admin_forbidden_returns_403(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id,
is_admin=False)}
res = self.api.get(_get_path('quotas', fmt=self.fmt),
extra_environ=env, expect_errors=True)
self.assertEqual(403, res.status_int)
def test_update_quotas_without_admin_forbidden_returns_403(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id,
is_admin=False)}
quotas = {'quota': {'network': 100}}
res = self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt),
self.serialize(quotas), extra_environ=env,
expect_errors=True)
self.assertEqual(403, res.status_int)
def test_update_quotas_with_non_integer_returns_400(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id,
is_admin=True)}
quotas = {'quota': {'network': 'abc'}}
res = self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt),
self.serialize(quotas), extra_environ=env,
expect_errors=True)
self.assertEqual(400, res.status_int)
def test_update_quotas_with_negative_integer_returns_400(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id,
is_admin=True)}
quotas = {'quota': {'network': -2}}
res = self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt),
self.serialize(quotas), extra_environ=env,
expect_errors=True)
self.assertEqual(400, res.status_int)
def test_update_quotas_with_out_of_range_integer_returns_400(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id,
is_admin=True)}
quotas = {'quota': {'network': constants.DB_INTEGER_MAX_VALUE + 1}}
res = self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt),
self.serialize(quotas), extra_environ=env,
expect_errors=True)
self.assertEqual(exc.HTTPBadRequest.code, res.status_int)
def test_update_quotas_to_unlimited(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id,
is_admin=True)}
quotas = {'quota': {'network': -1}}
res = self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt),
self.serialize(quotas), extra_environ=env,
expect_errors=False)
self.assertEqual(200, res.status_int)
def test_update_quotas_exceeding_current_limit(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id,
is_admin=True)}
quotas = {'quota': {'network': 120}}
res = self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt),
self.serialize(quotas), extra_environ=env,
expect_errors=False)
self.assertEqual(200, res.status_int)
def test_update_quotas_with_non_support_resource_returns_400(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id,
is_admin=True)}
quotas = {'quota': {'abc': 100}}
res = self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt),
self.serialize(quotas), extra_environ=env,
expect_errors=True)
self.assertEqual(400, res.status_int)
def test_update_quotas_with_admin(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id + '2',
is_admin=True)}
quotas = {'quota': {'network': 100}}
res = self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt),
self.serialize(quotas), extra_environ=env)
self.assertEqual(200, res.status_int)
env2 = {'neutron.context': context.Context('', tenant_id)}
res = self.api.get(_get_path('quotas', id=tenant_id, fmt=self.fmt),
extra_environ=env2)
quota = self.deserialize(res)
self.assertEqual(100, quota['quota']['network'])
self.assertEqual(10, quota['quota']['subnet'])
self.assertEqual(50, quota['quota']['port'])
def test_update_attributes(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id + '2',
is_admin=True)}
quotas = {'quota': {'extra1': 100}}
res = self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt),
self.serialize(quotas), extra_environ=env)
self.assertEqual(200, res.status_int)
env2 = {'neutron.context': context.Context('', tenant_id)}
res = self.api.get(_get_path('quotas', id=tenant_id, fmt=self.fmt),
extra_environ=env2)
quota = self.deserialize(res)
self.assertEqual(100, quota['quota']['extra1'])
def test_delete_quotas_with_admin(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id + '2',
is_admin=True)}
res = self.api.delete(_get_path('quotas', id=tenant_id, fmt=self.fmt),
extra_environ=env)
self.assertEqual(204, res.status_int)
def test_delete_quotas_without_admin_forbidden_returns_403(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id,
is_admin=False)}
res = self.api.delete(_get_path('quotas', id=tenant_id, fmt=self.fmt),
extra_environ=env, expect_errors=True)
self.assertEqual(403, res.status_int)
def test_quotas_loaded_bad_returns_404(self):
try:
res = self.api.get(_get_path('quotas'), expect_errors=True)
self.assertEqual(404, res.status_int)
except Exception:
pass
def test_quotas_limit_check(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id,
is_admin=True)}
quotas = {'quota': {'network': 5}}
res = self.api.put(_get_path('quotas', id=tenant_id,
fmt=self.fmt),
self.serialize(quotas), extra_environ=env)
self.assertEqual(200, res.status_int)
quota.QUOTAS.limit_check(context.Context('', tenant_id),
tenant_id,
network=4)
def test_quotas_limit_check_with_invalid_quota_value(self):
tenant_id = 'tenant_id1'
with testtools.ExpectedException(exceptions.InvalidQuotaValue):
quota.QUOTAS.limit_check(context.Context('', tenant_id),
tenant_id,
network=-2)
def test_quotas_limit_check_with_not_registered_resource_fails(self):
tenant_id = 'tenant_id1'
self.assertRaises(exceptions.QuotaResourceUnknown,
quota.QUOTAS.limit_check,
context.get_admin_context(load_admin_roles=False),
tenant_id,
foobar=1)
def test_quotas_get_tenant_from_request_context(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id,
is_admin=True)}
res = self.api.get(_get_path('quotas/tenant', fmt=self.fmt),
extra_environ=env)
self.assertEqual(200, res.status_int)
quota = self.deserialize(res)
self.assertEqual(quota['tenant']['tenant_id'], tenant_id)
def test_quotas_get_tenant_from_empty_request_context_returns_400(self):
env = {'neutron.context': context.Context('', '',
is_admin=True)}
res = self.api.get(_get_path('quotas/tenant', fmt=self.fmt),
extra_environ=env, expect_errors=True)
self.assertEqual(400, res.status_int)
class QuotaExtensionCfgTestCase(QuotaExtensionTestCase):
fmt = 'json'
def setUp(self):
cfg.CONF.set_override(
'quota_driver',
'neutron.quota.ConfDriver',
group='QUOTAS')
super(QuotaExtensionCfgTestCase, self).setUp()
def test_quotas_default_values(self):
self._test_quota_default_values(
{'network': 10,
'subnet': 10,
'port': 50,
'extra1': -1})
def test_quotas_negative_default_value(self):
cfg.CONF.set_override(
'quota_port', -666, group='QUOTAS')
self._test_quota_default_values(
{'network': 10,
'subnet': 10,
'port': -1,
'extra1': -1})
def test_show_quotas_with_admin(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id + '2',
is_admin=True)}
res = self.api.get(_get_path('quotas', id=tenant_id, fmt=self.fmt),
extra_environ=env)
self.assertEqual(200, res.status_int)
def test_show_quotas_without_admin_forbidden(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id + '2',
is_admin=False)}
res = self.api.get(_get_path('quotas', id=tenant_id, fmt=self.fmt),
extra_environ=env, expect_errors=True)
self.assertEqual(403, res.status_int)
def test_update_quotas_forbidden(self):
tenant_id = 'tenant_id1'
quotas = {'quota': {'network': 100}}
res = self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt),
self.serialize(quotas),
expect_errors=True)
self.assertEqual(403, res.status_int)
def test_delete_quotas_forbidden(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id,
is_admin=False)}
res = self.api.delete(_get_path('quotas', id=tenant_id, fmt=self.fmt),
extra_environ=env, expect_errors=True)
self.assertEqual(403, res.status_int)
class TestDbQuotaDriver(base.BaseTestCase):
"""Test for neutron.db.quota_db.DbQuotaDriver."""
def test_get_tenant_quotas_arg(self):
"""Call neutron.db.quota_db.DbQuotaDriver._get_quotas."""
driver = quota_db.DbQuotaDriver()
ctx = context.Context('', 'bar')
foo_quotas = {'network': 5}
default_quotas = {'network': 10}
target_tenant = 'foo'
with mock.patch.object(quota_db.DbQuotaDriver,
'get_tenant_quotas',
return_value=foo_quotas) as get_tenant_quotas:
quotas = driver._get_quotas(ctx,
target_tenant,
default_quotas)
self.assertEqual(quotas, foo_quotas)
get_tenant_quotas.assert_called_once_with(ctx,
default_quotas,
target_tenant)
class TestQuotaDriverLoad(base.BaseTestCase):
def setUp(self):
super(TestQuotaDriverLoad, self).setUp()
# Make sure QuotaEngine is reinitialized in each test.
quota.QUOTAS._driver = None
def _test_quota_driver(self, cfg_driver, loaded_driver,
with_quota_db_module=True):
cfg.CONF.set_override('quota_driver', cfg_driver, group='QUOTAS')
with mock.patch.dict(sys.modules, {}):
if (not with_quota_db_module and
'neutron.db.quota_db' in sys.modules):
del sys.modules['neutron.db.quota_db']
driver = quota.QUOTAS.get_driver()
self.assertEqual(loaded_driver, driver.__class__.__name__)
def test_quota_db_driver_with_quotas_table(self):
self._test_quota_driver('neutron.db.quota_db.DbQuotaDriver',
'DbQuotaDriver', True)
def test_quota_db_driver_fallback_conf_driver(self):
self._test_quota_driver('neutron.db.quota_db.DbQuotaDriver',
'ConfDriver', False)
def test_quota_conf_driver(self):
self._test_quota_driver('neutron.quota.ConfDriver',
'ConfDriver', True)
| |
__author__ = 'stephanie, denver'
import pandas as pd
import numpy as np
from collections import namedtuple
import logging
import datetime
from dateutil.parser import parse
from src.handlers.csvHandler import CSVReader
from src.controllers.Database import Database
logger = logging.getLogger('SDL_logger')
class Mapper():
'''
Class representing a mapped pandas dataframe object.
Attributes:
rawData : pandas.core.frame.DataFrame - Raw CSV data from the
given file.
tables : pandas.core.frame.DataFrame - The finnished tables to
write to the database.
mapping : dict - The YAML configuration mapping.
dbWriter : Database - The database object that will be used.
'''
def __init__(self, configDict):
self.tables = [] # Empty
self.mapping = configDict
self.rawData = pd.DataFrame # Empty
self.dbWriter = Database()
self.performDuplicateValueChecks = False
def isTimeToRun(self):
"""
isTimeToRun is a public method that checks whether this
configuration should run or not (based on it's schedule.)
"""
startDate = parse(str(self.mapping['Schedule']['Beginning']))
lastUpdate = str(self.mapping['Schedule']['LastUpdate'])
if str(lastUpdate) == '--':
lastUpdate = parse(str(self.mapping['Schedule']['Beginning']))
else:
lastUpdate = parse(str(self.mapping['Schedule']['LastUpdate']))
print "last updated:", lastUpdate
period = self.mapping['Schedule']['Time']
periodUnit = self.mapping['Schedule']['Frequency']
# Get period in terms of seconds.
if periodUnit == 'Week':
period = period * 604800
elif periodUnit == 'Day':
period = period * 86400
elif periodUnit == 'Hour':
period = period * 3600
elif periodUnit == 'Minute':
period = period * 60
elif periodUnit == 'Second':
period = period * 1
else:
logger.error("Unknown frequency: %s" % periodUnit)
return False
now = parse(datetime.datetime.strftime(datetime.datetime.now(), '%Y-%m-%d %H:%M:%S'))
print "current time:", now
delta = lastUpdate + datetime.timedelta(seconds=period)
print "Due:", delta
if startDate > now:
logger.info("Not beginning until %s" % startDate)
return False
if delta <= now:
self.recordLastUpdate(now)
return True
else:
logger.info("Not running until %s" % delta)
return False
def recordLastUpdate(self, time):
"""
Updates the "LastUpdate" atribute of the config file.
"""
self.mapping['Schedule']['LastUpdate'] = str(time)
def getDatabase(self):
'''
getDatabase is a public method that should be called before
interacting with the database. It snatches up the database
credentials from the YAML file object (mapping) and returns
a named-tuple called Credentials.
'''
Credentials = namedtuple('Credentials', 'engine host, db_name,\
uid, pwd')
return self.dbWriter.createConnection(Credentials(\
self.mapping['Database']['Engine'],
self.mapping['Database']['Address'],
self.mapping['Database']['DatabaseName'],
self.mapping['Database']['UserName'],
self.mapping['Database']['Password']))
def map(self):
'''
This public method begins the process of mapping the data
into something that we can write to the database. First,
the file is read from the config file (mapping), if that
works, we begin to build a new Pandas dataframe.
'''
#if self._readFile(self.mapping['Settings']['FileLocation']):
try:
if self._readFile(self.mapping['Settings']['FileLocation']):
self._buildTables()
#self._buildTables()
except KeyError as e:
logger.error("Unable to create mapping. Check your data & configuration files. %s" %e)
return False
return True
def _readFile(self, path):
'''
readFile gathers the raw data (rawData) from the given CSV
file (path). If rawData ends up being empty, we return False,
otherwise we return as True.
'''
reader = CSVReader()
# Collect the smallest 'LastByteRead' in the file.
byte = self._getStartByte()
logger.debug('Last successful byte read: %s' % byte)
logger.info("Data file location: '%s'" % path)
self.rawData = reader.byteReader(path,
start_byte=byte,
datecol=self.mapping['Settings']['DateTimeColumnName'],
sep=self.mapping['Settings']['Delimiter'],
header=self.mapping['Settings']['HeaderRowPosition'],
dataBegin=self.mapping['Settings']['DataRowPosition'])
if self.rawData.empty:
return False
else:
return True
def _getStartByte(self):
'''
getStartByte is a private method which determines the smallest
byte number of the given columns.
'''
m = [value['LastByteRead'] for key,value in \
self.mapping['Mappings'].items()]
# If all of the bytes are the same, don't do a
# duplicate values check when writing to database.
self.performDuplicateValueChecks = not len(set(m)) <= 1
# Starting byte will be the lowest value.
startByte = int(min(m))
# But if it's less than 0 (AKA running in restart mode)
# then just set it to 0.
if startByte < 0:
startByte = 0
return startByte
def _buildTables(self):
'''
buildTables creates a brand new Pandas dataframe (tables),
gathering the needed columns from the YAML file (mapping)
and raw data (rawData).
'''
for col, series in self.mapping['Mappings'].iteritems():
logger.info("*********doing column: %s" % col)
df = self.rawData[col].reset_index()
df.columns = ['ValueDateTime', 'DataValue']
if series['CalculateAggInterval'] not in [False, 'False', 'false']:
df['TimeAggregationInterval'] = 0
df['TimeAggregationIntervalUnitsID'] = 102
else:
df['TimeAggregationInterval'] = series['IntendedTimeSpacing']
df['TimeAggregationIntervalUnitsID'] = series['IntendedTimeSpacingUnitID']
# FIXME change to none when released.
# SDL Test Data is just for our testing purposes.
df['QualityCodeCV'] = 'None'
# df['QualityCodeCV'] = 'SDL Test Data'
df['CensorCodeCV'] = 'Not censored'
df['ResultID'] = series['ResultID']
df['ValueDateTimeUTCOffset'] = self.mapping['Settings']['UTCOffset']
noDataValue = self._getNoDataValue(df['ResultID'][0])
df = df.replace(to_replace=[np.nan, '-INF'],
value=[noDataValue, noDataValue],
regex=True)
replaced = df.isin([noDataValue]).sum().sum()
#print "BEFORE",df.ValueDateTime
df.ValueDateTime = pd.to_datetime(\
pd.Series(df.ValueDateTime))
#print "AFTER",df.ValueDateTime
self.tables.append((col, df))
logger.info('Result: %s Total Values: %d, Non-data Values: %d' % (col, len(df), replaced))
def _getNoDataValue(self, resultID):
'''
_getNoDataValue is a wrapper method to the database object's
getNoDataValue method.
'''
#print "-------resultID", resultID
return self.dbWriter.getNoDataValue(resultID)
def getTables(self):
'''
getTables is a public method that returns a Pandas dataframe.
It should be called after a mapping has been made.
'''
#logger.info("TABLES: %s" % self.tables)
return self.tables
def save(self, data):
'''
save is a public method that wraps the Database.write method.
'''
return self.dbWriter.write(data,
self.performDuplicateValueChecks)
def updateDateTime(self, seriesId, dateTime):
'''
updateDateTime is a public method that wraps the
Database.updateDateTime method. It's part of this module
because the database connection is only accessable through
here.
'''
return self.dbWriter.updateDateTime(seriesId, dateTime)
| |
# encoding: utf-8
# Copyright 2011 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import os
import os.path
import argparse
import json
import logging
import props
from Singleton import Singleton
from imgfac.Version import VERSION as VERSION
from urlgrabber import urlopen
class ApplicationConfiguration(Singleton):
configuration = props.prop("_configuration", "The configuration property.")
def _singleton_init(self, configuration = None):
super(ApplicationConfiguration, self)._singleton_init()
self.log = logging.getLogger('%s.%s' % (__name__, self.__class__.__name__))
self.jeos_images = { }
if configuration:
if not isinstance(configuration, dict):
raise Exception("ApplicationConfiguration configuration argument must be a dict")
self.log.debug("ApplicationConfiguration passed a dictionary - ignoring any local config files including JEOS configs")
self.configuration = configuration
else:
self.configuration = self.__parse_arguments()
self.__parse_jeos_images()
if not 'debug' in self.configuration:
# This most likely means we are being used as a module/library and are not running CLI or daemon
self.configuration['debug'] = False
if not 'secondary' in self.configuration:
# We use this in the non-daemon context so it needs to be set
# TODO: Something cleaner?
self.configuration['secondary'] = False
def __init__(self, configuration = None):
pass
def __new_argument_parser(self, appname):
main_description = """Image Factory is an application for creating system images for use on public and private clouds."""
argparser = argparse.ArgumentParser(description=main_description, prog=appname)
argparser.add_argument('--version', action='version', default=argparse.SUPPRESS, version=VERSION, help='Show the version number and exit')
argparser.add_argument('--verbose', action='store_true', default=False, help='Set verbose logging.')
argparser.add_argument('--config', default='/etc/imagefactory/imagefactory.conf', help='Configuration file to use. (default: %(default)s)')
argparser.add_argument('--imgdir', default='/tmp', help='Build image files in location specified. (default: %(default)s)')
argparser.add_argument('--timeout', type=int, default=3600, help='Set the timeout period for image building in seconds. (default: %(default)s)')
argparser.add_argument('--tmpdir', default='/tmp', help='Use the specified location for temporary files. (default: %(default)s)')
argparser.add_argument('--plugins', default='/etc/imagefactory/plugins.d', help='Plugin directory. (default: %(default)s)')
group_ec2 = argparser.add_argument_group(title='EC2 settings')
group_ec2.add_argument('--ec2-32bit-util', default = 'm1.small', help='Instance type to use when launching a 32 bit utility instance')
group_ec2.add_argument('--ec2-64bit-util', default = 'm1.large', help='Instance type to use when launching a 64 bit utility instance')
if(appname == 'imagefactoryd'):
debug_group = argparser.add_mutually_exclusive_group()
debug_group.add_argument('--debug', action='store_true', default=False, help='Set really verbose logging for debugging.')
debug_group.add_argument('--nodebug', dest='debug', action='store_false', help='Turn off the default verbose CLI logging')
argparser.add_argument('--foreground', action='store_true', default=False, help='Stay in the foreground and avoid launching a daemon. (default: %(default)s)')
group_rest = argparser.add_argument_group(title='REST service options')
group_rest.add_argument('--port', type=int, default=8075, help='Port to attach the RESTful http interface to. (default: %(default)s)')
group_rest.add_argument('--address', default='0.0.0.0', help='Interface address to listen to. (default: %(default)s)')
group_rest.add_argument('--no_ssl', action='store_true', default=False, help='Turn off SSL. (default: %(default)s)')
group_rest.add_argument('--ssl_pem', default='*', help='PEM certificate file to use for HTTPS access to the REST interface. (default: A transient certificate is generated at runtime.)')
group_rest.add_argument('--no_oauth', action='store_true', default=False, help='Use 2 legged OAuth to protect the REST interface. (default: %(default)s)')
group_rest.add_argument('--secondary', action='store_true', default=False, help='Operate as a secondary/helper factory. (default: %(default)s)')
elif(appname == 'imagefactory'):
debug_group = argparser.add_mutually_exclusive_group()
debug_group.add_argument('--debug', action='store_true', default=True, help='Set really verbose logging for debugging.')
debug_group.add_argument('--nodebug', dest='debug', action='store_false', help='Turn off the default verbose CLI logging')
argparser.add_argument('--output', choices=('log', 'json'), default='log', help='Choose between log or json output. (default: %(default)s)')
argparser.add_argument('--raw', action='store_true', default=False, help='Turn off pretty printing.')
subparsers = argparser.add_subparsers(title='commands', dest='command')
template_help = 'A file containing the image template or component outline, compatible with the TDL schema (http://imgfac.org/documentation/tdl).'
cmd_base = subparsers.add_parser('base_image', help='Build a generic image.')
cmd_base.add_argument('template', type=argparse.FileType(), help=template_help)
self.__add_param_arguments(cmd_base)
cmd_target = subparsers.add_parser('target_image', help='Customize an image for a given cloud.')
cmd_target.add_argument('target', help='The name of the target cloud for which to customize the image.')
target_group = cmd_target.add_mutually_exclusive_group(required=True)
target_group.add_argument('--id', help='The uuid of the BaseImage to customize.')
target_group.add_argument('--template', type=argparse.FileType(), help=template_help)
self.__add_param_arguments(cmd_target)
cmd_provider = subparsers.add_parser('provider_image', help='Push an image to a cloud provider.')
cmd_provider.add_argument('target', help='The target type of the given cloud provider')
cmd_provider.add_argument('provider', help="A file containing the cloud provider description or a string literal starting with '@' such as '@ec2-us-east-1'.")
cmd_provider.add_argument('credentials', type=argparse.FileType(), help='A file containing the cloud provider credentials')
provider_group = cmd_provider.add_mutually_exclusive_group(required=True)
provider_group.add_argument('--id', help='The uuid of the TargetImage to push.')
provider_group.add_argument('--template', type=argparse.FileType(), help=template_help)
self.__add_param_arguments(cmd_provider)
cmd_provider.add_argument('--snapshot', action='store_true', default=False, help='Use snapshot style building. (default: %(default)s)')
cmd_import = subparsers.add_parser('import_base_image', help='Import a base image from a local disk image file')
cmd_import.add_argument('image_file', help='A disk image file to import as a base image')
self.__add_param_arguments(cmd_import)
cmd_list = subparsers.add_parser('images', help='List images of a given type or get details of an image.')
cmd_list.add_argument('fetch_spec', help='JSON formatted string of key/value pairs')
cmd_delete = subparsers.add_parser('delete', help='Delete an image.')
cmd_delete.add_argument('id', help='UUID of the image to delete')
cmd_delete.add_argument('--provider', help="A file containing the cloud provider description or a string literal starting with '@' such as '@ec2-us-east-1'.")
cmd_delete.add_argument('--credentials', type=argparse.FileType(), help='A file containing the cloud provider credentials')
cmd_delete.add_argument('--target', help='The name of the target cloud for which to customize the image.')
self.__add_param_arguments(cmd_delete)
cmd_plugins = subparsers.add_parser('plugins', help='List active plugins or get details of a specific plugin.')
cmd_plugins.add_argument('--id')
return argparser
def __add_param_arguments(self, parser):
# We do this for all three image types so lets make it a util function
parameters_help = 'An optional JSON file containing additional parameters to pass to the builders.'
parser.add_argument('--parameters', type=argparse.FileType(), help=parameters_help)
parser.add_argument('--parameter', nargs=2, action='append', help='A parameter name and the literal value to assign it. Can be used more than once.')
parser.add_argument('--file-parameter', nargs=2, action='append', help='A parameter name and a file to insert into it. Can be used more than once.')
def __parse_arguments(self):
appname = sys.argv[0].rpartition('/')[2]
argparser = self.__new_argument_parser(appname)
if((appname == 'imagefactory') and (len(sys.argv) == 1)):
argparser.print_help()
sys.exit()
configuration = argparser.parse_args()
if (os.path.isfile(configuration.config)):
try:
def dencode(a_dict, encoding='ascii'):
new_dict = {}
for k,v in a_dict.items():
ek = k.encode(encoding)
if(isinstance(v, unicode)):
new_dict[ek] = v.encode(encoding)
elif(isinstance(v, dict)):
new_dict[ek] = dencode(v)
else:
new_dict[ek] = v
return new_dict
config_file = open(configuration.config)
uconfig = json.load(config_file)
config_file.close()
defaults = dencode(uconfig)
argparser.set_defaults(**defaults)
configuration = argparser.parse_args()
except Exception, e:
self.log.exception(e)
return configuration.__dict__
def __add_jeos_image(self, image_detail):
log = logging.getLogger('%s.%s' % (__name__, self.__class__.__name__))
# our multi-dimensional-dict has the following keys
# target - provider - os - version - arch - provider_image_id - user - cmd_prefix
for i in range(8):
try:
image_detail[i] = image_detail[i].strip()
except IndexError:
image_detail.append(None)
(target, provider, os, version, arch, provider_image_id, user, cmd_prefix) = image_detail
if not (target in self.jeos_images):
self.jeos_images[target] = {}
if not (provider in self.jeos_images[target]):
self.jeos_images[target][provider] = {}
if not (os in self.jeos_images[target][provider]):
self.jeos_images[target][provider][os] = {}
if not (version in self.jeos_images[target][provider][os]):
self.jeos_images[target][provider][os][version] = {}
if arch in self.jeos_images[target][provider][os][version]:
log.warning("JEOS image defined more than once for %s - %s - %s - %s - %s" % (target, provider, os, version, arch))
log.warning("Replacing (%s) with (%s)" % (self.jeos_images[target][provider][os][version][arch], provider_image_id))
self.jeos_images[target][provider][os][version][arch] = {'img_id':provider_image_id,
'user':user,
'cmd_prefix':cmd_prefix}
def __parse_jeos_images(self):
log = logging.getLogger('%s.%s' % (__name__, self.__class__.__name__))
config_urls = self.configuration['jeos_config']
# Expand directories from the config and url-ify files
# Read inlist - replace directories with their contents
nextlist = []
for path in config_urls:
if os.path.isdir(path):
for filename in os.listdir(path):
fullname = os.path.join(path, filename)
if os.path.isfile(fullname):
nextlist.append(fullname)
else:
nextlist.append(path)
# Read nextlist - replace files with file:// URLs
finalist = []
for path in nextlist:
if os.path.isfile(path):
finalist.append("file://" + path)
else:
finalist.append(path)
for url in finalist:
try:
filehandle = urlopen(str(url))
line = filehandle.readline().strip()
except:
log.warning("Failed to open JEOS URL (%s)" % url)
continue
line_number = 1
while line:
# Lines that start with '#' are a comment
if line[0] == "#":
pass
# Lines that are zero length are whitespace
elif len(line.split()) == 0:
pass
else:
image_detail = line.split(":")
if len(image_detail) >= 6:
self.__add_jeos_image(image_detail)
else:
log.warning("Failed to parse line %d in JEOS config (%s):\n%s" % (line_number, url, line))
line = filehandle.readline()
line_number += 1
filehandle.close()
| |
import asyncio
import logging
import os
import signal
import sys
import traceback
from gunicorn.workers import base # type: ignore
from sanic.compat import UVLOOP_INSTALLED
from sanic.log import logger
from sanic.server import HttpProtocol, Signal, serve, try_use_uvloop
from sanic.server.protocols.websocket_protocol import WebSocketProtocol
try:
import ssl # type: ignore
except ImportError: # no cov
ssl = None # type: ignore
if UVLOOP_INSTALLED: # no cov
try_use_uvloop()
class GunicornWorker(base.Worker):
http_protocol = HttpProtocol
websocket_protocol = WebSocketProtocol
def __init__(self, *args, **kw): # pragma: no cover
super().__init__(*args, **kw)
cfg = self.cfg
if cfg.is_ssl:
self.ssl_context = self._create_ssl_context(cfg)
else:
self.ssl_context = None
self.servers = {}
self.connections = set()
self.exit_code = 0
self.signal = Signal()
def init_process(self):
# create new event_loop after fork
asyncio.get_event_loop().close()
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(self.loop)
super().init_process()
def run(self):
is_debug = self.log.loglevel == logging.DEBUG
protocol = (
self.websocket_protocol
if self.app.callable.websocket_enabled
else self.http_protocol
)
self._server_settings = self.app.callable._helper(
loop=self.loop,
debug=is_debug,
protocol=protocol,
ssl=self.ssl_context,
run_async=True,
)
self._server_settings["signal"] = self.signal
self._server_settings.pop("sock")
self._await(self.app.callable._startup())
self._await(
self.app.callable._server_event("init", "before", loop=self.loop)
)
main_start = self._server_settings.pop("main_start", None)
main_stop = self._server_settings.pop("main_stop", None)
if main_start or main_stop: # noqa
logger.warning(
"Listener events for the main process are not available "
"with GunicornWorker"
)
try:
self._await(self._run())
self.app.callable.is_running = True
self._await(
self.app.callable._server_event(
"init", "after", loop=self.loop
)
)
self.loop.run_until_complete(self._check_alive())
self._await(
self.app.callable._server_event(
"shutdown", "before", loop=self.loop
)
)
self.loop.run_until_complete(self.close())
except BaseException:
traceback.print_exc()
finally:
try:
self._await(
self.app.callable._server_event(
"shutdown", "after", loop=self.loop
)
)
except BaseException:
traceback.print_exc()
finally:
self.loop.close()
sys.exit(self.exit_code)
async def close(self):
if self.servers:
# stop accepting connections
self.log.info(
"Stopping server: %s, connections: %s",
self.pid,
len(self.connections),
)
for server in self.servers:
server.close()
await server.wait_closed()
self.servers.clear()
# prepare connections for closing
self.signal.stopped = True
for conn in self.connections:
conn.close_if_idle()
# gracefully shutdown timeout
start_shutdown = 0
graceful_shutdown_timeout = self.cfg.graceful_timeout
while self.connections and (
start_shutdown < graceful_shutdown_timeout
):
await asyncio.sleep(0.1)
start_shutdown = start_shutdown + 0.1
# Force close non-idle connection after waiting for
# graceful_shutdown_timeout
for conn in self.connections:
if hasattr(conn, "websocket") and conn.websocket:
conn.websocket.fail_connection(code=1001)
else:
conn.abort()
async def _run(self):
for sock in self.sockets:
state = dict(requests_count=0)
self._server_settings["host"] = None
self._server_settings["port"] = None
server = await serve(
sock=sock,
connections=self.connections,
state=state,
**self._server_settings
)
self.servers[server] = state
async def _check_alive(self):
# If our parent changed then we shut down.
pid = os.getpid()
try:
while self.alive:
self.notify()
req_count = sum(
self.servers[srv]["requests_count"] for srv in self.servers
)
if self.max_requests and req_count > self.max_requests:
self.alive = False
self.log.info(
"Max requests exceeded, shutting down: %s", self
)
elif pid == os.getpid() and self.ppid != os.getppid():
self.alive = False
self.log.info("Parent changed, shutting down: %s", self)
else:
await asyncio.sleep(1.0, loop=self.loop)
except (Exception, BaseException, GeneratorExit, KeyboardInterrupt):
pass
@staticmethod
def _create_ssl_context(cfg):
"""Creates SSLContext instance for usage in asyncio.create_server.
See ssl.SSLSocket.__init__ for more details.
"""
ctx = ssl.SSLContext(cfg.ssl_version)
ctx.load_cert_chain(cfg.certfile, cfg.keyfile)
ctx.verify_mode = cfg.cert_reqs
if cfg.ca_certs:
ctx.load_verify_locations(cfg.ca_certs)
if cfg.ciphers:
ctx.set_ciphers(cfg.ciphers)
return ctx
def init_signals(self):
# Set up signals through the event loop API.
self.loop.add_signal_handler(
signal.SIGQUIT, self.handle_quit, signal.SIGQUIT, None
)
self.loop.add_signal_handler(
signal.SIGTERM, self.handle_exit, signal.SIGTERM, None
)
self.loop.add_signal_handler(
signal.SIGINT, self.handle_quit, signal.SIGINT, None
)
self.loop.add_signal_handler(
signal.SIGWINCH, self.handle_winch, signal.SIGWINCH, None
)
self.loop.add_signal_handler(
signal.SIGUSR1, self.handle_usr1, signal.SIGUSR1, None
)
self.loop.add_signal_handler(
signal.SIGABRT, self.handle_abort, signal.SIGABRT, None
)
# Don't let SIGTERM and SIGUSR1 disturb active requests
# by interrupting system calls
signal.siginterrupt(signal.SIGTERM, False)
signal.siginterrupt(signal.SIGUSR1, False)
def handle_quit(self, sig, frame):
self.alive = False
self.app.callable.is_running = False
self.cfg.worker_int(self)
def handle_abort(self, sig, frame):
self.alive = False
self.exit_code = 1
self.cfg.worker_abort(self)
sys.exit(1)
def _await(self, coro):
fut = asyncio.ensure_future(coro, loop=self.loop)
self.loop.run_until_complete(fut)
| |
"""
Quadrat statistics for planar point patterns
TODO
- use patch in matplotlib to plot rectangles and hexagons
- plot chi2 statistics in each cell
- delete those cells that do not intersect with the window (study area)
"""
__author__ = 'Serge Rey, Wei Kang, Hu Shao'
__all__ = ['RectangleM', 'HexagonM', 'QStatistic']
import numpy as np
from matplotlib import pyplot as plt
import math
import scipy
class RectangleM:
"""
Rectangle grid structure for quadrat-based method.
Parameters
----------
pp : :class:`.PointPattern`
Point Pattern instance.
count_column : integer
Number of rectangles in the horizontal
direction. Use in pair with count_row to
fully specify a rectangle. Incompatible with
rectangle_width and rectangle_height.
count_row : integer
Number of rectangles in the vertical
direction. Use in pair with count_column to
fully specify a rectangle. Incompatible with
rectangle_width and rectangle_height.
rectangle_width : float
Rectangle width. Use in pair with
rectangle_height to fully specify a rectangle.
Incompatible with count_column & count_row.
rectangle_height : float
Rectangle height. Use in pair with
rectangle_width to fully specify a rectangle.
Incompatible with count_column & count_row.
Attributes
----------
pp : :class:`.PointPattern`
Point Pattern instance.
mbb : array
Minimum bounding box for the point pattern.
points : array
x,y coordinates of the point points.
count_column : integer
Number of columns.
count_row : integer
Number of rows.
num : integer
Number of rectangular quadrats.
"""
def __init__(self, pp, count_column = 3, count_row = 3,
rectangle_width = 0, rectangle_height = 0):
self.mbb = pp.mbb
self.pp = pp
self.points = np.asarray(pp.points)
x_range = self.mbb[2]-self.mbb[0]
y_range = self.mbb[3]-self.mbb[1]
if rectangle_width & rectangle_height:
self.rectangle_width = rectangle_width
self.rectangle_height = rectangle_height
# calculate column count and row count
self.count_column = int(math.ceil(x_range / rectangle_width))
self.count_row = int(math.ceil(y_range / rectangle_height))
else:
self.count_column = count_column
self.count_row = count_row
# calculate the actual width and height of cell
self.rectangle_width = x_range/float(count_column)
self.rectangle_height = y_range/float(count_row)
self.num = self.count_column * self.count_row
def point_location_sta(self):
"""
Count the point events in each cell.
Returns
-------
dict_id_count : dict
keys: rectangle id, values: number of point
events in each cell.
"""
dict_id_count = {}
for i in range(self.count_row):
for j in range(self.count_column):
dict_id_count[j+i*self.count_column] = 0
for point in self.points:
index_x = (point[0]-self.mbb[0]) // self.rectangle_width
index_y = (point[1]-self.mbb[1]) // self.rectangle_height
if index_x == self.count_column:
index_x -= 1
if index_y == self.count_row:
index_y -= 1
id = index_y * self.count_column + index_x
dict_id_count[id] += 1
return dict_id_count
def plot(self, title="Quadrat Count"):
'''
Plot rectangle tessellation as well as the number of points falling in each rectangle.
Parameters
----------
title: str, optional
Title of the plot. Default is "Quadrat Count".
'''
line_width_cell = 1
line_color_cell = 'red'
x_min = self.mbb[0]
y_min = self.mbb[1]
# draw the point pattern along with its window
ax = self.pp.plot(window=True, title=title,
get_ax=True)
# draw cells and counts
x_start_end = [x_min,
x_min + self.count_column*self.rectangle_width]
for row in range(self.count_row + 1):
y = y_min + row*self.rectangle_height
ax.plot(x_start_end, [y, y], lw = line_width_cell,
color=line_color_cell)
y_start_end = [y_min,
y_min + self.count_row*self.rectangle_height]
for column in range(self.count_column + 1):
x = x_min + column*self.rectangle_width
ax.plot([x, x], y_start_end, lw = line_width_cell,
color=line_color_cell)
dict_id_count = self.point_location_sta()
for x in range(self.count_column):
for y in range(self.count_row):
cell_id = x + y*self.count_column
count = dict_id_count[cell_id]
position_x = x_min + self.rectangle_width*(x+0.5)
position_y = y_min + self.rectangle_height*(y+0.5)
ax.text(position_x, position_y, str(count))
plt.show()
class HexagonM:
"""
Hexagon grid structure for quadrat-based method.
Parameters
----------
pp : :class:`.PointPattern`
Point Pattern instance.
lh : float
Hexagon length (hexagon).
Attributes
----------
pp : :class:`.PointPattern`
Point Pattern instance.
h_length : float
Hexagon length (hexagon).
mbb : array
Minimum bounding box for the point pattern.
points : array
x,y coordinates of the point points.
h_length : float
Hexagon length (hexagon).
count_row_even : integer
Number of even rows.
count_row_odd : integer
Number of odd rows.
count_column : integer
Number of columns.
num : integer
Number of hexagonal quadrats.
"""
def __init__(self, pp, lh):
self.points = np.asarray(pp.points)
self.pp = pp
self.h_length = lh
self.mbb = pp.mbb
range_x = self.mbb[2] - self.mbb[0]
range_y = self.mbb[3] - self.mbb[1]
# calculate column count
self.count_column = 1
if self.h_length/2.0 < range_x:
temp = math.ceil((range_x - self.h_length/2) / (
1.5 * self.h_length))
self.count_column += int(temp)
# calculate row count for the even columns
self.semi_height = self.h_length * math.cos(math.pi/6)
self.count_row_even = 1
if self.semi_height < range_y:
temp = math.ceil((range_y-self.semi_height)/(
self.semi_height*2))
self.count_row_even += int(temp)
# for the odd columns
self.count_row_odd = int(math.ceil(range_y/(self.semi_height*2)))
# quadrat number
self.num = self.count_row_odd * ((self.count_column // 2) +
self.count_column % 2) + \
self.count_row_even * (self.count_column // 2)
def point_location_sta(self):
"""
Count the point events in each hexagon cell.
Returns
-------
dict_id_count : dict
keys: rectangle id, values: number of point
events in each hexagon cell.
"""
semi_cell_length = self.h_length / 2.0
dict_id_count = {}
# even row may be equal with odd row or 1 more than odd row
for i in range(self.count_row_even):
for j in range(self.count_column):
if self.count_row_even != self.count_row_odd and i ==\
self.count_row_even-1:
if j % 2 == 1:
continue
dict_id_count[j+i*self.count_column] = 0
x_min = self.mbb[0]
y_min = self.mbb[1]
x_max = self.mbb[2]
y_max = self.mbb[3]
points = np.array(self.points)
for point in points:
# find the possible x index
intercept_degree_x = ((point[0]-x_min)//semi_cell_length)
# find the possible y index
possible_y_index_even = int((point[1]+ self.semi_height -
y_min)/ (self.semi_height * 2))
possible_y_index_odd = int((point[1] - y_min) / (
self.semi_height * 2))
if intercept_degree_x % 3 != 1:
center_index_x = (intercept_degree_x+1) // 3
center_index_y = possible_y_index_odd
if center_index_x % 2 == 0:
center_index_y = possible_y_index_even
dict_id_count[center_index_x + center_index_y * self.count_column] += 1
else: # two columns of cells can be possible
center_index_x = intercept_degree_x//3
center_x = center_index_x*semi_cell_length*3 + x_min
center_index_y = possible_y_index_odd
center_y = (center_index_y*2+1)*self.semi_height + y_min
if center_index_x % 2 == 0:
center_index_y = possible_y_index_even
center_y = center_index_y*self.semi_height*2 + y_min
if point[1] > center_y: # compare the upper bound
x0 = center_x+self.h_length
y0 = center_y
x1 = center_x+semi_cell_length
y1 = center_y+self.semi_height
indicator = -(point[1] - ((y0-y1)/(x0-x1)*point[
0] + (x0*y1-x1*y0)/(x0-x1)))
else: #compare the lower bound
x0 = center_x+semi_cell_length
y0 = center_y-self.semi_height
x1 = center_x+self.h_length
y1 = center_y
indicator = point[1] - ((y0-y1)/(x0-x1)*point[0]
+ (x0*y1-x1*y0)/(x0-x1))
if indicator <= 0:
# we select right hexagon instead of the left
center_index_x += 1
center_index_y = possible_y_index_odd
if center_index_x % 2 == 0:
center_index_y = possible_y_index_even
dict_id_count[center_index_x + center_index_y
* self.count_column] += 1
return dict_id_count
def plot(self, title="Quadrat Count"):
'''
Plot hexagon quadrats as well as the number of points falling in each quadrat.
Parameters
----------
title: str, optional
Title of the plot. Default is "Quadrat Count".
'''
line_width_cell = 1
line_color_cell = 'red'
# draw the point pattern along with its window
ax = self.pp.plot(window=True, title= title,
get_ax=True)
x_min = self.mbb[0]
y_min = self.mbb[1]
# draw cells and counts
dict_id_count = self.point_location_sta()
for id in dict_id_count.keys():
index_x = id % self.count_column
index_y = id // self.count_column
center_x = index_x*self.h_length/2.0*3.0 + x_min
center_y = index_y*self.semi_height*2.0 + y_min
if index_x % 2 == 1: # for the odd columns
center_y = (index_y*2.0+1)*self.semi_height + y_min
list_points_cell = []
list_points_cell.append([center_x + self.h_length,
center_y])
list_points_cell.append([center_x
+ self.h_length/2,
center_y + self.semi_height])
list_points_cell.append([center_x
- self.h_length/2,
center_y+self.semi_height])
list_points_cell.append([center_x - self.h_length,
center_y])
list_points_cell.append([center_x
- self.h_length/2,
center_y-self.semi_height])
list_points_cell.append([center_x
+ self.h_length/2,
center_y-self.semi_height])
list_points_cell.append([center_x + self.h_length,
center_y])
ax.plot(np.array( list_points_cell)[:,0],np.array(
list_points_cell)[:,1], lw =line_width_cell,
color=line_color_cell)
ax.text(center_x, center_y, str(dict_id_count[id]))
plt.show()
class QStatistic:
"""
Quadrat analysis of point pattern.
Parameters
----------
pp : :class:`.PointPattern`
Point Pattern instance.
shape : string
Grid structure. Either "rectangle" or "hexagon".
Default is "rectangle".
nx : integer
Number of rectangles in the horizontal
direction. Only when shape is specified as
"rectangle" will nx be considered.
ny : integer
Number of rectangles in the vertical direction.
Only when shape is specified as "rectangle"
will ny be considered.
lh : float
Hexagon length (hexagon). Only when shape is
specified as "hexagon" will lh be considered.
Incompatible with nx & ny.
realizations : :class:`PointProcess`
Point process instance with more than 1 point
pattern realizations which would be used for
simulation based inference. Default is 0
where no simulation based inference is
performed.
Attributes
----------
pp : :class:`.PointPattern`
Point Pattern instance.
mr : :class:`.RectangleM` or :class:`.HexagonM`
RectangleM or HexagonM instance.
chi2 : float
Chi-squared test statistic for the observed
point pattern pp.
df : integer
Degree of freedom.
chi2_pvalue : float
p-value based on analytical chi-squared
distribution.
chi2_r_pvalue : float
p-value based on simulated sampling
distribution. Only available when
realizations is correctly specified.
chi2_realizations : array
Chi-squared test statistics calculated for
all of the simulated csr point patterns.
"""
def __init__(self, pp, shape= "rectangle",nx = 3, ny = 3,
lh = 10, realizations = 0):
self.pp = pp
if shape == "rectangle":
self.mr = RectangleM(pp, count_column = nx,
count_row = ny)
elif shape == "hexagon":
self.mr = HexagonM(pp,lh)
# calculate chi2 test statisitc for the observed point pattern
dict_id_count = self.mr.point_location_sta()
self.chi2,self.chi2_pvalue = scipy.stats.chisquare(
list(dict_id_count.values()))
self.df = self.mr.num - 1
# when realizations is specified, perform simulation based
# inference.
if realizations:
reals = realizations.realizations
sim_n = realizations.samples
chi2_realizations = [] #store test statisitcs for all the
for i in range(sim_n):
if shape == "rectangle":
mr_temp = RectangleM(reals[i],
count_column=nx,
count_row=ny)
elif shape == "hexagon":
mr_temp = HexagonM(reals[i],lh)
id_count_temp = mr_temp.point_location_sta().values()
#calculate test statistics for simulated point patterns
chi2_sim,p = scipy.stats.chisquare(list(id_count_temp))
chi2_realizations.append(chi2_sim)
self.chi2_realizations = np.array(chi2_realizations)
#calculate pseudo pvalue
above_chi2 = self.chi2_realizations >= self.chi2
larger_chi2 = sum(above_chi2)
self.chi2_r_pvalue = (larger_chi2 + 1.)/(sim_n+ 1.)
def plot(self, title = "Quadrat Count"):
'''
Plot quadrats as well as the number of points falling in each quadrat.
Parameters
----------
title: str, optional
Title of the plot. Default is "Quadrat Count".
'''
self.mr.plot(title = title)
| |
import binascii
import errno
import logging
import sys
import usb.core
import usb.util
from scapy.layers.bluetooth import *
from scapy.supersocket import SuperSocket
# See BT 4.2 Spec, Vol 4, Part B, "USB Transport Layer".
# Used for "Single Function Primary Controller" devices:
USB_DEVICE_CLASS_WIRELESS_CONTROLLER = 0xFF
USB_DEVICE_SUB_CLASS_RF_CONTROLLER = 0xBB
USB_DEVICE_PROTOCOL_BLUETOOTH = 0xBB
# Used for composite devices:
USB_DEVICE_CLASS_MISCELLANEOUS = 0xEF
USB_DEVICE_SUB_CLASS_COMMON_CLASS = 0x02
USB_DEVICE_PROTOCOL_IAD = 0x01
USB_ENDPOINT_HCI_CMD = 0x00
USB_ENDPOINT_HCI_EVT = 0x81
USB_HCI_CMD_REQUEST_PARAMS = {
"bmRequestType": 0x20, "bRequest": 0x00, "wValue": 0x00, "wIndex": 0x00
}
LOG = logging.getLogger("pybluetooth")
class PyUSBBluetoothUserSocketException(Exception):
pass
class PyUSBBluetoothL2CAPSocket(SuperSocket):
desc = "Read/write Bluetooth L2CAP with pyUSB"
def __init__(self, pyusb_dev):
raise Exception("NYI")
class PyUSBBluetoothHCISocket(SuperSocket):
desc = "Read/write Bluetooth HCI with pyUSB"
def __init__(self, pyusb_dev):
self.pyusb_dev = pyusb_dev
# Drain any data that was already pending:
while self.recv(timeout_secs=0.001):
pass
def __del__(self):
# Always try to do a HCI Reset to stop any on-going
# Bluetooth activity:
try:
self.hci_reset()
except:
pass
# Release the device, so it can be claimed again immediately when
# this object gets free'd.
try:
usb.util.dispose_resources(self.pyusb_dev)
except:
LOG.warn("Couldn't dispose %s" % self.pyusb_dev)
pass
def hci_reset(self):
self.send(HCI_Hdr() / HCI_Command_Hdr() / HCI_Cmd_Reset())
def recv(self, x=512, timeout_secs=10.0):
# FIXME: Don't know how many bytes to expect here,
# using 512 bytes -- will this fly if there's another event right
# after it? Or is each event guaranteed to be put in a USB packet of
# its own?
try:
data_array = self.pyusb_dev.read(
USB_ENDPOINT_HCI_EVT, 512, int(timeout_secs * 1000.0))
except usb.core.USBError as e:
if e.errno == errno.ETIMEDOUT:
return None
else:
raise e
data = ''.join([chr(c) for c in data_array]) # Ugh.. array return val
data = "\4" + data # Prepend H4 'Event' packet indicator
scapy_packet = HCI_Hdr(data)
LOG.debug("recv %s" % scapy_packet.lastlayer().summary())
LOG.debug("recv bytes: " + binascii.hexlify(data))
return scapy_packet
def send(self, scapy_packet):
data = str(scapy_packet)
LOG.debug("send %s" % scapy_packet.lastlayer().summary())
LOG.debug("send bytes: " + binascii.hexlify(data))
data = data[1:] # Cut off the H4 'Command' packet indicator (0x02)
sent_len = self.pyusb_dev.ctrl_transfer(
data_or_wLength=data, **USB_HCI_CMD_REQUEST_PARAMS)
l = len(data)
if sent_len != l:
raise PyUSBBluetoothUserSocketException(
"Send failure. Sent %u instead of %u bytes" % (sent_len, l))
def find_all_bt_adapters():
def bt_adapter_matcher(d):
# Check if the device is a "Single Function Primary Controller":
if (d.bDeviceClass == USB_DEVICE_CLASS_WIRELESS_CONTROLLER and
d.bDeviceSubClass == USB_DEVICE_SUB_CLASS_RF_CONTROLLER and
d.bDeviceProtocol == USB_DEVICE_PROTOCOL_BLUETOOTH):
return True
# Check if it's a composite device:
if not (d.bDeviceClass == USB_DEVICE_CLASS_MISCELLANEOUS and
d.bDeviceSubClass == USB_DEVICE_SUB_CLASS_COMMON_CLASS and
d.bDeviceProtocol == USB_DEVICE_PROTOCOL_IAD):
return False
for cfg in d:
bt_intf_descr = {
"bInterfaceClass": USB_DEVICE_CLASS_WIRELESS_CONTROLLER,
"bInterfaceSubClass": USB_DEVICE_SUB_CLASS_RF_CONTROLLER,
"bInterfaceProtocol": USB_DEVICE_PROTOCOL_BLUETOOTH,
}
intf = usb.util.find_descriptor(cfg, **bt_intf_descr)
if intf is not None:
return True
return False
devs = set()
matchers = [CUSTOM_USB_DEVICE_MATCHER, bt_adapter_matcher]
for matcher in matchers:
if not matcher:
continue
devs |= set(usb.core.find(find_all=True, custom_match=matcher))
# Unfortunately, usb.core.Device doesn't implement __eq__(),
# see https://github.com/walac/pyusb/issues/147.
# So filter out dupes here:
devs_deduped = set(devs)
for d in devs:
for dd in devs:
if d == dd:
continue
if d not in devs_deduped:
continue
if d.bus == dd.bus and d.address == dd.address:
devs_deduped.remove(dd)
return devs_deduped
class PyUSBBluetoothNoAdapterFoundException(Exception):
pass
def find_first_bt_adapter_pyusb_device_or_raise():
pyusb_devs = find_all_bt_adapters()
if len(pyusb_devs) == 0:
raise PyUSBBluetoothNoAdapterFoundException(
"No Bluetooth adapters found!")
def _is_usable_device(pyusb_dev):
try:
pyusb_dev.set_configuration()
PyUSBBluetoothHCISocket(pyusb_dev).hci_reset()
return True
except:
return False
pyusb_devs = filter(_is_usable_device, pyusb_devs)
if len(pyusb_devs) == 0:
raise PyUSBBluetoothNoAdapterFoundException(
"No Bluetooth *usable* adapters found!")
if len(pyusb_devs) > 1:
LOG.warn("More than 1 Bluetooth adapters found, "
"using the first one...")
pyusb_dev = pyusb_devs[0]
return pyusb_dev
def find_first_bt_adapter_pyusb_device():
try:
return find_first_bt_adapter_pyusb_device_or_raise()
except PyUSBBluetoothNoAdapterFoundException:
return None
def has_bt_adapter():
pyusb_dev = find_first_bt_adapter_pyusb_device()
if pyusb_dev is None:
return False
return True
def pebble_usb_class_matcher(d):
""" USB device class matcher for Pebble's Test Automation dongles """
USB_DEVICE_CLASS_VENDOR_SPECIFIC = 0xFF
USB_DEVICE_SUB_CLASS_PEBBLE_BT = 0xBB
USB_DEVICE_PROTOCOL_PEBBLE_BT = 0xBB
return (d.bDeviceClass == USB_DEVICE_CLASS_VENDOR_SPECIFIC and
d.bDeviceSubClass == USB_DEVICE_SUB_CLASS_PEBBLE_BT and
d.bDeviceProtocol == USB_DEVICE_PROTOCOL_PEBBLE_BT)
CUSTOM_USB_DEVICE_MATCHER = pebble_usb_class_matcher
def set_custom_matcher(matcher_func):
CUSTOM_USB_DEVICE_MATCHER = matcher_func
| |
#!/usr/bin/python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
Filter service runtime logging output and compute system call statistics.
To use this script, define the BENCHMARK symbol to be zero (default)
in nacl_syscall_hook.c. Next, run the service runtime with NACLLOG
set to an output file name. When the run is complete, run this script
with that file as input.
"""
import math
import re
import sys
class Stats:
"""
Compute basic statistics.
"""
def __init__(self):
self._sum_x = 0.0
self._sum_x_squared = 0.0
self._n = 0
# enddef
def Enter(self, val):
"""Enter a new value.
Args:
val: the new (floating point) value
"""
self._sum_x += val
self._sum_x_squared += val * val
self._n += 1
# enddef
def Mean(self):
"""Returns the mean of entered values.
"""
return self._sum_x / self._n
# enddef
def Variance(self):
"""Returns the variance of entered values.
"""
mean = self.Mean()
return self._sum_x_squared / self._n - mean * mean
# enddef
def Stddev(self):
"""Returns the standard deviation of entered values.
"""
return math.sqrt(self.Variance())
# enddef
def NumEntries(self):
"""Returns the number of data points entered.
"""
return self._n
# enddef
# endclass
class PeakStats:
"""Compute min and max for a data set. While far less efficient
than using a reduce, this class makes streaming data handling
easier.
"""
def __init__(self):
self._min = 1L << 64
self._max = -1
# enddef
def Enter(self, val):
"""Enter a new datum.
Args:
val: the new datum to be entered.
"""
if val > self._max:
self._max = val
# endif
if val < self._min:
self._min = val
# endif
# enddef
def Max(self):
"""Returns the maximum value found so far.
"""
return self._max
# enddef
def Min(self):
"""Returns the minimum value found so far.
"""
return self._min
# enddef
# endclass
class WindowedRate:
"""Class for computing statistics on events based on counting the
number of occurrences in a time interval. Statistcs on these
bucketed counts are then available.
"""
def __init__(self, duration):
self._t_start = -1
self._t_duration = duration
self._t_end = -1
self._event_count = 0
self._rate_stats = Stats()
self._peak_stats = PeakStats()
# enddef
def Enter(self, t):
"""Enter in a new event that occurred at time t.
Args:
t: the time at which an event occurred.
"""
if self._t_start == -1:
self._t_start = t
self._t_end = t + self._t_duration
return
# [ t_start, t_start + duration )
if t < self._t_end:
self._event_count += 1
return
# endif
self.Compute()
self._event_count = 1
next_end = self._t_end
while next_end < t:
next_end += self._t_duration
# endwhile
self._t_end = next_end
self._t_start = next_end - self._t_duration
# enddef
def Compute(self):
"""Finalize the last bucket.
"""
self._rate_stats.Enter(self._event_count)
self._peak_stats.Enter(self._event_count)
self._event_count = 0
# enddef
def RateStats(self):
"""Returns the event rate statistics object.
"""
return self._rate_stats
# enddef
def PeakStats(self):
"""Returns the peak event rate statistics object.
"""
return self._peak_stats
# endif
# endclass
class TimestampParser:
"""
A class to parse timestamp strings. This is needed because there is
implicit state: the timestamp string is HH:MM:SS.fract and may cross
a 24 hour boundary -- we do not log the date since that would make
the log file much larger and generally it is not needed (implicit in
file modification time) -- so we convert to a numeric representation
that is relative to an arbitrary epoch start, and the state enables
us to correctly handle midnight.
This code assumes that the timestamps are monotonically
non-decreasing.
"""
def __init__(self):
self._min_time = -1
# enddef
def Convert(self, timestamp):
"""Converts a timestamp string into a numeric timestamp value.
Args:
timestamp: A timestamp string in HH:MM:SS.fraction format.
Returns:
a numeric timestamp (arbitrary epoch)
"""
(hh, mm, ss) = map(float,timestamp.split(':'))
t = ((hh * 60) + mm) * 60 + ss
if self._min_time == -1:
self._min_time = t
# endif
while t < self._min_time:
t += 24 * 60 * 60
# endwhile
self._min_time = t
return t
# enddef
# endclass
def ReadFileHandle(fh, duration):
"""Reads log data from the provided file handle, and compute and
print various statistics on the system call rate based on the log
data.
"""
# log format "[pid:timestamp] msg" where the timestamp is
log_re = re.compile(r'\[[0-9,]+:([:.0-9]+)\] system call [0-9]+')
parser = TimestampParser()
inter_stats = Stats()
rate_stats = Stats()
windowed = WindowedRate(duration)
prev_time = -1
start_time = 0
for line in fh: # generator
m = log_re.search(line)
if m is not None:
timestamp = m.group(1)
t = parser.Convert(timestamp)
windowed.Enter(t)
if prev_time != -1:
elapsed = t - prev_time
inter_stats.Enter(elapsed)
rate_stats.Enter(1.0/elapsed)
else:
start_time = t
# endif
prev_time = t
# endif
# endfor
print '\nInter-syscall time'
print 'Mean: %g' % inter_stats.Mean()
print 'Stddev: %g' % inter_stats.Stddev()
print '\nInstantaneous Syscall Rate (unweighted!)'
print 'Mean : %g' % rate_stats.Mean()
print 'Stddev: %g' % rate_stats.Stddev()
print '\nAvg Syscall Rate: %g' % (rate_stats.NumEntries()
/ (prev_time - start_time))
print '\nSyscalls in %f interval' % duration
print 'Mean: %g' % windowed.RateStats().Mean()
print 'Stddev: %g' % windowed.RateStats().Stddev()
print 'Min: %g' % windowed.PeakStats().Min()
print 'Max: %g' % windowed.PeakStats().Max()
# enddef
def main(argv):
if len(argv) > 1:
print >>sys.stderr, 'no arguments expected\n'
return 1
# endif
ReadFileHandle(sys.stdin, 0.010)
return 0
# enddef
if __name__ == '__main__':
sys.exit(main(sys.argv))
# endif
| |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test configs for binary_op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
from tensorflow.lite.testing.zip_test_utils import create_tensor_data
from tensorflow.lite.testing.zip_test_utils import make_zip_of_tests
from tensorflow.lite.testing.zip_test_utils import register_make_test_function
def make_binary_op_tests(options,
binary_operator,
allow_fully_quantize=False,
expected_tf_failures=0,
test_parameters=None):
"""Make a set of tests to do binary ops with and without broadcast."""
if test_parameters is None:
test_parameters = []
test_parameters = test_parameters + [
# Avoid creating all combinations to keep the test size small.
{
"dtype": [tf.float32, tf.int32],
"input_shape_1": [[1, 3, 4, 3]],
"input_shape_2": [[1, 3, 4, 3]],
"activation": [True],
"fully_quantize": [False],
},
{
"dtype": [tf.float32],
"input_shape_1": [[5]],
"input_shape_2": [[5]],
"activation": [False, True],
"fully_quantize": [False],
},
{
"dtype": [tf.float32, tf.int32, tf.int64],
"input_shape_1": [[1, 3, 4, 3]],
"input_shape_2": [[3]],
"activation": [True, False],
"fully_quantize": [False],
},
{
"dtype": [tf.float32, tf.int32],
"input_shape_1": [[3]],
"input_shape_2": [[1, 3, 4, 3]],
"activation": [True, False],
"fully_quantize": [False],
},
{
"dtype": [tf.float32],
"input_shape_1": [[]],
"input_shape_2": [[]],
"activation": [False],
"fully_quantize": [False],
},
{
"dtype": [tf.float32],
"input_shape_1": [[0]],
"input_shape_2": [[1]],
"activation": [False],
"fully_quantize": [False],
},
{
"dtype": [tf.float32],
"input_shape_1": [[1, 3, 4, 3]],
"input_shape_2": [[1, 3, 4, 3]],
"activation": [False],
"fully_quantize": [True],
},
{
"dtype": [tf.float32],
"input_shape_1": [[5]],
"input_shape_2": [[5]],
"activation": [False],
"fully_quantize": [True],
},
{
"dtype": [tf.float32],
"input_shape_1": [[1, 3, 4, 3]],
"input_shape_2": [[3]],
"activation": [False],
"fully_quantize": [True],
},
{
"dtype": [tf.float32],
"input_shape_1": [[3]],
"input_shape_2": [[1, 3, 4, 3]],
"activation": [False],
"fully_quantize": [True],
},
{
"dtype": [tf.float32],
"input_shape_1": [[]],
"input_shape_2": [[]],
"activation": [False],
"fully_quantize": [True],
},
]
# test_parameters include fully_quantize option only when
# allow_fully_quantize is True.
if not allow_fully_quantize:
test_parameters = [
test_parameter for test_parameter in test_parameters
if True not in test_parameter["fully_quantize"]
]
def build_graph(parameters):
"""Builds the graph given the current parameters."""
input1 = tf.compat.v1.placeholder(
dtype=parameters["dtype"],
name="input1",
shape=parameters["input_shape_1"])
input2 = tf.compat.v1.placeholder(
dtype=parameters["dtype"],
name="input2",
shape=parameters["input_shape_2"])
out = binary_operator(input1, input2)
# TODO(karimnosseir): Update condition after moving to new converter.
if parameters["activation"] and (not options.use_experimental_converter or
(parameters["dtype"] != tf.int32 and
parameters["dtype"] != tf.int64)):
out = tf.nn.relu(out)
return [input1, input2], [out]
def build_inputs(parameters, sess, inputs, outputs):
"""Builds operand inputs for op."""
if allow_fully_quantize:
input1 = create_tensor_data(
parameters["dtype"],
parameters["input_shape_1"],
min_value=-1,
max_value=1)
input2 = create_tensor_data(
parameters["dtype"],
parameters["input_shape_2"],
min_value=-1,
max_value=1)
else:
input1 = create_tensor_data(parameters["dtype"],
parameters["input_shape_1"])
input2 = create_tensor_data(parameters["dtype"],
parameters["input_shape_2"])
return [input1, input2], sess.run(
outputs, feed_dict={
inputs[0]: input1,
inputs[1]: input2
})
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=expected_tf_failures)
def make_binary_op_tests_func(binary_operator):
"""Return a function that does a test on a binary operator."""
return lambda options: make_binary_op_tests(options, binary_operator)
@register_make_test_function()
def make_add_tests(options):
make_binary_op_tests(options, tf.add, allow_fully_quantize=True)
@register_make_test_function()
def make_div_tests(options):
make_binary_op_tests(options, tf.compat.v1.div)
@register_make_test_function()
def make_sub_tests(options):
"""Make zip tests for sub op with additional cases."""
test_parameters = [
{
"dtype": [tf.float32],
"input_shape_1": [[1, 3, 3, 3, 3]],
"input_shape_2": [[3]],
"activation": [False],
"fully_quantize": [False],
},
]
make_binary_op_tests(
options,
tf.subtract,
allow_fully_quantize=True,
test_parameters=test_parameters)
@register_make_test_function()
def make_mul_tests(options):
make_binary_op_tests(options, tf.multiply, allow_fully_quantize=True)
@register_make_test_function()
def make_pow_tests(options):
make_binary_op_tests(options, tf.pow, expected_tf_failures=7)
@register_make_test_function()
def make_floor_div_tests(options):
make_binary_op_tests(options, tf.math.floordiv)
@register_make_test_function()
def make_floor_mod_tests(options):
make_binary_op_tests(options, tf.math.floormod)
@register_make_test_function()
def make_squared_difference_tests(options):
make_binary_op_tests(options, tf.math.squared_difference)
| |
#!/usr/bin/env python3
import os
import sys
import argparse
import numpy as np
import matplotlib.pyplot as plt
from yellowbrick.regressor import AlphaSelection
from yellowbrick.regressor import ResidualsPlot
from yellowbrick.regressor import PredictionError
from yellowbrick.cluster import KElbowVisualizer
from yellowbrick.cluster import SilhouetteVisualizer
from yellowbrick.cluster import InterclusterDistance
from yellowbrick.classifier import PrecisionRecallCurve
from yellowbrick.classifier import ClassPredictionError
from yellowbrick.classifier import DiscriminationThreshold
from yellowbrick.datasets import load_spam, load_concrete, load_game
from sklearn.cluster import KMeans
from sklearn.datasets import make_blobs
from sklearn.naive_bayes import GaussianNB
from sklearn.preprocessing import OneHotEncoder
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression, Lasso, LassoCV, Ridge
FIGSIZE = (20, 4)
IMAGES = os.path.normpath(os.path.join(os.path.dirname(__file__), ".."))
YB_LOGO_PATH = os.path.join(IMAGES, "yb-fc.png")
def tts_plot(viz, X, y, test_size=0.20, random_state=42, score=True, finalize=True):
"""
Helper function to plot model visualizers with train_test_split
"""
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=test_size, random_state=random_state
)
viz.fit(X_train, y_train)
if score:
viz.score(X_test, y_test)
if finalize:
viz.finalize()
return viz
def class_prediction_error(ax=None):
data = load_game(return_dataset=True)
X, y = data.to_numpy()
X = OneHotEncoder().fit_transform(X).toarray()
viz = ClassPredictionError(GaussianNB(), ax=ax)
return tts_plot(viz, X, y)
def confusion_matrix(ax=None):
data = load_spam(return_dataset=True)
X, y = data.to_pandas()
viz = PrecisionRecallCurve(LogisticRegression(), ax=ax)
return tts_plot(viz, X, y)
def discrimination_threshold(ax=None):
data = load_spam(return_dataset=True)
X, y = data.to_pandas()
viz = DiscriminationThreshold(RandomForestClassifier(n_estimators=10), ax=ax)
return tts_plot(viz, X, y, score=False)
def classification_visualizers(saveto=None):
_, (axa, axb, axc) = plt.subplots(nrows=1, ncols=3, figsize=FIGSIZE)
class_prediction_error(axa)
confusion_matrix(axb)
discrimination_threshold(axc)
plt.tight_layout(pad=1.5)
if saveto is not None:
plt.savefig(saveto)
else:
plt.show()
def residuals_plot(ax=None):
data = load_concrete(return_dataset=True)
X, y = data.to_pandas()
viz = ResidualsPlot(Ridge(), ax=ax)
return tts_plot(viz, X, y)
def prediction_error(ax=None):
data = load_concrete(return_dataset=True)
X, y = data.to_pandas()
viz = PredictionError(Lasso(), ax=ax)
return tts_plot(viz, X, y)
def alpha_selection(ax=None):
data = load_concrete(return_dataset=True)
X, y = data.to_pandas()
alphas = np.logspace(-10, 1, 400)
viz = AlphaSelection(LassoCV(alphas=alphas), ax=ax)
return tts_plot(viz, X, y)
def regression_visualizers(saveto=None):
_, (axa, axb, axc) = plt.subplots(nrows=1, ncols=3, figsize=FIGSIZE)
residuals_plot(axa)
prediction_error(axb)
alpha_selection(axc)
plt.tight_layout(pad=1.5)
if saveto is not None:
plt.savefig(saveto)
else:
plt.show()
def intercluster_distance(ax=None):
X, y = make_blobs(centers=12, n_samples=1000, n_features=16, shuffle=True)
viz = InterclusterDistance(KMeans(9), ax=ax)
viz.fit(X)
viz.finalize()
return viz
def k_elbow(ax=None):
X, y = make_blobs(centers=12, n_samples=1000, n_features=16, shuffle=True)
viz = KElbowVisualizer(KMeans(), k=(4, 12), ax=ax, locate_elbow=False)
viz.fit(X)
viz.finalize()
return viz
def silhouette(ax=None):
X, y = make_blobs(centers=12, n_samples=1000, n_features=16, shuffle=True)
viz = SilhouetteVisualizer(KMeans(9), ax=ax)
viz.fit(X)
viz.finalize()
return viz
def clustering_visualizers(saveto=None):
_, (axa, axb, axc) = plt.subplots(nrows=1, ncols=3, figsize=FIGSIZE)
intercluster_distance(axa)
k_elbow(axb)
silhouette(axc).ax.get_legend().remove()
plt.tight_layout(pad=1.5)
if saveto is not None:
plt.savefig(saveto)
else:
plt.show()
def yb_logo(path=YB_LOGO_PATH, ax=None):
"""
Reads the YB image logo from the specified path and writes it to the axes.
"""
# Load image
with open(path, "rb") as fobj:
img = plt.imread(fobj, format="png")
if ax is None:
_, ax = plt.subplots()
# Draw image
ax.imshow(img, interpolation="nearest")
# Remove spines, ticks, grid, and other marks
ax.grid(False)
ax.set_xticks([])
ax.set_yticks([])
for pos in ["right", "top", "bottom", "left"]:
ax.spines[pos].set_visible(False)
return ax
def full_image(saveto=None, center_logo=False):
_, axes = plt.subplots(nrows=3, ncols=3, figsize=(21, 14))
# Top row: classifiers
class_prediction_error(axes[0][0])
confusion_matrix(axes[0][1])
discrimination_threshold(axes[0][2])
# Middle row: regressors
residuals_plot(axes[1][0])
alpha_selection(axes[1][2])
if center_logo:
yb_logo(ax=axes[1][1])
else:
prediction_error(axes[1][1])
# Bottom row: clusterers
intercluster_distance(axes[2][0])
k_elbow(axes[2][1])
silhouette(axes[2][2]).ax.get_legend().remove()
plt.tight_layout(pad=1.5)
if saveto is not None:
plt.savefig(saveto)
else:
plt.show()
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="generates images for the README.md banner"
)
parser.add_argument(
"-c",
"--classifiers",
type=str,
metavar="PATH",
default="classifiers.png",
help="path to save the classifiers banner image",
)
parser.add_argument(
"-r",
"--regressors",
type=str,
metavar="PATH",
default="regressors.png",
help="path to save the regressors banner image",
)
parser.add_argument(
"-C",
"--clusterers",
type=str,
metavar="PATH",
default="clusterers.png",
help="path to save the clusterers banner image",
)
parser.add_argument(
"-b",
"--banner",
type=str,
metavar="PATH",
default="",
help="make full banner image and save to disk",
)
parser.add_argument(
"-y",
"--yb",
action="store_true",
help="replace middle image of banner with logo",
)
args = parser.parse_args()
if args.banner:
full_image(args.banner, args.yb)
sys.exit(0)
if args.classifiers:
classification_visualizers(args.classifiers)
if args.regressors:
regression_visualizers(args.regressors)
if args.clusterers:
clustering_visualizers(args.clusterers)
| |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras generic Python utils."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python import keras
from tensorflow.python.platform import test
class HasArgTest(test.TestCase):
def test_has_arg(self):
def f_x(x):
return x
def f_x_args(x, *args):
_ = args
return x
def f_x_kwargs(x, **kwargs):
_ = kwargs
return x
self.assertTrue(keras.utils.generic_utils.has_arg(
f_x, 'x', accept_all=False))
self.assertFalse(keras.utils.generic_utils.has_arg(
f_x, 'y', accept_all=False))
self.assertTrue(keras.utils.generic_utils.has_arg(
f_x_args, 'x', accept_all=False))
self.assertFalse(keras.utils.generic_utils.has_arg(
f_x_args, 'y', accept_all=False))
self.assertTrue(keras.utils.generic_utils.has_arg(
f_x_kwargs, 'x', accept_all=False))
self.assertFalse(keras.utils.generic_utils.has_arg(
f_x_kwargs, 'y', accept_all=False))
self.assertTrue(keras.utils.generic_utils.has_arg(
f_x_kwargs, 'y', accept_all=True))
class TestCustomObjectScope(test.TestCase):
def test_custom_object_scope(self):
def custom_fn():
pass
class CustomClass(object):
pass
with keras.utils.generic_utils.custom_object_scope(
{'CustomClass': CustomClass, 'custom_fn': custom_fn}):
act = keras.activations.get('custom_fn')
self.assertEqual(act, custom_fn)
cl = keras.regularizers.get('CustomClass')
self.assertEqual(cl.__class__, CustomClass)
class SerializeKerasObjectTest(test.TestCase):
def test_serialize_none(self):
serialized = keras.utils.generic_utils.serialize_keras_object(None)
self.assertEqual(serialized, None)
deserialized = keras.utils.generic_utils.deserialize_keras_object(
serialized)
self.assertEqual(deserialized, None)
def test_serialize_custom_class_with_default_name(self):
@keras.utils.generic_utils.register_keras_serializable()
class TestClass(object):
def __init__(self, value):
self._value = value
def get_config(self):
return {'value': self._value}
serialized_name = 'Custom>TestClass'
inst = TestClass(value=10)
class_name = keras.utils.generic_utils._GLOBAL_CUSTOM_NAMES[TestClass]
self.assertEqual(serialized_name, class_name)
config = keras.utils.generic_utils.serialize_keras_object(inst)
self.assertEqual(class_name, config['class_name'])
new_inst = keras.utils.generic_utils.deserialize_keras_object(config)
self.assertIsNot(inst, new_inst)
self.assertIsInstance(new_inst, TestClass)
self.assertEqual(10, new_inst._value)
def test_serialize_custom_class_with_custom_name(self):
@keras.utils.generic_utils.register_keras_serializable(
'TestPackage', 'CustomName')
class OtherTestClass(object):
def __init__(self, val):
self._val = val
def get_config(self):
return {'val': self._val}
serialized_name = 'TestPackage>CustomName'
inst = OtherTestClass(val=5)
class_name = keras.utils.generic_utils._GLOBAL_CUSTOM_NAMES[OtherTestClass]
self.assertEqual(serialized_name, class_name)
config = keras.utils.generic_utils.serialize_keras_object(inst)
self.assertEqual(class_name, config['class_name'])
new_inst = keras.utils.generic_utils.deserialize_keras_object(config)
self.assertIsNot(inst, new_inst)
self.assertIsInstance(new_inst, OtherTestClass)
self.assertEqual(5, new_inst._val)
def test_serialize_custom_function(self):
@keras.utils.generic_utils.register_keras_serializable()
def my_fn():
return 42
serialized_name = 'Custom>my_fn'
class_name = keras.utils.generic_utils._GLOBAL_CUSTOM_NAMES[my_fn]
self.assertEqual(serialized_name, class_name)
config = keras.utils.generic_utils.serialize_keras_object(my_fn)
self.assertEqual(class_name, config)
fn = keras.utils.generic_utils.deserialize_keras_object(config)
self.assertEqual(42, fn())
def test_serialize_custom_class_without_get_config_fails(self):
with self.assertRaisesRegex(
ValueError, 'Cannot register a class that does '
'not have a get_config.*'):
@keras.utils.generic_utils.register_keras_serializable( # pylint: disable=unused-variable
'TestPackage', 'TestClass')
class TestClass(object):
def __init__(self, value):
self._value = value
def test_serialize_custom_objects_with_overwrite_fails(self):
with self.assertRaisesRegex(ValueError, '.*has already been registered.*'):
@keras.utils.generic_utils.register_keras_serializable() # pylint: disable=unused-variable
class TestClass(object):
def __init__(self, value):
self._value = value
def get_config(self):
return {'value': self._value}
def test_serializable_object(self):
class SerializableInt(int):
"""A serializable object to pass out of a test layer's config."""
def __new__(cls, value):
return int.__new__(cls, value)
def get_config(self):
return {'value': int(self)}
@classmethod
def from_config(cls, config):
return cls(**config)
layer = keras.layers.Dense(
SerializableInt(3),
activation='relu',
kernel_initializer='ones',
bias_regularizer='l2')
config = keras.layers.serialize(layer)
new_layer = keras.layers.deserialize(
config, custom_objects={'SerializableInt': SerializableInt})
self.assertEqual(new_layer.activation, keras.activations.relu)
self.assertEqual(new_layer.bias_regularizer.__class__,
keras.regularizers.L1L2)
self.assertEqual(new_layer.units.__class__, SerializableInt)
self.assertEqual(new_layer.units, 3)
def test_nested_serializable_object(self):
class SerializableInt(int):
"""A serializable object to pass out of a test layer's config."""
def __new__(cls, value):
return int.__new__(cls, value)
def get_config(self):
return {'value': int(self)}
@classmethod
def from_config(cls, config):
return cls(**config)
class SerializableNestedInt(int):
"""A serializable object containing another serializable object."""
def __new__(cls, value, int_obj):
obj = int.__new__(cls, value)
obj.int_obj = int_obj
return obj
def get_config(self):
return {'value': int(self), 'int_obj': self.int_obj}
@classmethod
def from_config(cls, config):
return cls(**config)
nested_int = SerializableInt(4)
layer = keras.layers.Dense(
SerializableNestedInt(3, nested_int),
activation='relu',
kernel_initializer='ones',
bias_regularizer='l2')
config = keras.layers.serialize(layer)
new_layer = keras.layers.deserialize(
config,
custom_objects={
'SerializableInt': SerializableInt,
'SerializableNestedInt': SerializableNestedInt
})
self.assertEqual(new_layer.activation, keras.activations.relu)
self.assertEqual(new_layer.bias_regularizer.__class__,
keras.regularizers.L1L2)
self.assertEqual(new_layer.units.__class__, SerializableNestedInt)
self.assertEqual(new_layer.units, 3)
self.assertEqual(new_layer.units.int_obj.__class__, SerializableInt)
self.assertEqual(new_layer.units.int_obj, 4)
class SliceArraysTest(test.TestCase):
def test_slice_arrays(self):
input_a = list([1, 2, 3])
self.assertEqual(
keras.utils.generic_utils.slice_arrays(input_a, start=0),
[None, None, None])
self.assertEqual(
keras.utils.generic_utils.slice_arrays(input_a, stop=3),
[None, None, None])
self.assertEqual(
keras.utils.generic_utils.slice_arrays(input_a, start=0, stop=1),
[None, None, None])
if __name__ == '__main__':
test.main()
| |
'''The systemBase provides the base class implementation for standard
system Base implementations. This systemBase itself is not intended
to be instantiated as the regular Thespian System Base, but instead it
provides a base class that should be subclassed by the various System
Base implementations.
'''
import logging
from thespian.actors import *
from thespian.system import *
from thespian.system.utilis import thesplog
from thespian.system.timing import toTimeDeltaOrNone, ExpirationTimer, unexpired
from thespian.system.messages.admin import *
from thespian.system.messages.status import *
from thespian.system.transport import *
import threading
from contextlib import closing
from datetime import timedelta
import os
MAX_SYSTEM_SHUTDOWN_DELAY = timedelta(seconds=10)
MAX_CHILD_ACTOR_CREATE_DELAY = timedelta(seconds=50)
MAX_CAPABILITY_UPDATE_DELAY = timedelta(seconds=5)
MAX_LOAD_SOURCE_DELAY = timedelta(seconds=61)
MAX_ADMIN_STATUS_REQ_DELAY = timedelta(seconds=2)
MAX_TELL_PERIOD = timedelta(seconds=60)
def ensure_TZ_set():
# Actor engines handle timeouts and tend to sample system time
# frequently. Under Linux, if TZ is not set to a value,
# /etc/localtime or similar is consulted on each call to obtain
# system time which can negatively affect performance. This
# function attempts to set TZ if possible/reasonable.
if 'TZ' in os.environ:
return
for fname in ('/etc/localtime',
'/usr/local/etc/localtime'):
if os.path.exists(fname):
os.environ['TZ'] = ':' + fname
return
# OK if it's not set, just may be slower
class TransmitTrack(object):
def __init__(self, transport, adminAddr):
self._newActorAddress = None
self._pcrFAILED = None
self._transport = transport
self._adminAddr = adminAddr
@property
def failed(self):
return self._pcrFAILED is not None
@property
def failure(self):
return self._pcrFAILED
@property
def failure_message(self):
return getattr(self, '_pcrMessage', None)
def transmit_failed(self, result, intent):
if result == SendStatus.DeadTarget and \
intent.targetAddr != self._adminAddr:
# Forward message to the dead letter handler; if the
# forwarding fails, just discard the message.
self._transport.scheduleTransmit(
None,
TransmitIntent(self._adminAddr,
DeadEnvelope(intent.targetAddr, intent.message)))
self._pcrFAILED = result
self._transport.abort_run()
class NewActorResponse(TransmitTrack):
def __init__(self, transport, adminAddr, *args, **kw):
super(NewActorResponse, self).__init__(transport, adminAddr, *args, **kw)
self._newActorAddress = None
@property
def pending(self):
return self._newActorAddress is None and not self.failed
@property
def actor_address(self):
return self._newActorAddress
def __call__(self, envelope):
if isinstance(envelope.message, PendingActorResponse):
self._newActorAddress = False if envelope.message.errorCode else \
envelope.message.actualAddress
self._pcrFAILED = envelope.message.errorCode
self._pcrMessage = getattr(envelope.message, 'errorStr', None)
# Stop running transport; got new actor address (or failure)
return False
# Discard everything else. Previous requests and operations
# may have caused there to be messages sent back to this
# endpoint that are queued ahead of the PendingActorResponse.
return True # Keep waiting for the PendingActorResponse
class ExternalOpsToActors(object):
def __init__(self, adminAddr, transport=None):
self._numPrimaries = 0
self._cv = threading.Condition()
self._transport_runner = False
# Expects self.transport has already been set by subclass __init__
self.adminAddr = adminAddr
if transport:
self.transport = transport
def _run_transport(self, maximumDuration=None, txonly=False,
incomingHandler=None):
# This is where multiple external threads are synchronized for
# receives. Transmits will flow down into the transmit layer
# where they are queued with thread safety, but threads
# blocking on a receive will all be lined up through this point.
max_runtime = ExpirationTimer(maximumDuration)
with self._cv:
while self._transport_runner:
self._cv.wait(max_runtime.view().remainingSeconds())
if max_runtime.view().expired():
return None
self._transport_runner = True
try:
r = Thespian__UpdateWork()
while isinstance(r, Thespian__UpdateWork):
r = self.transport.run(TransmitOnly if txonly else incomingHandler,
max_runtime.view().remaining())
return r
# incomingHandler callback could deadlock on this same thread; is it ever not None?
finally:
with self._cv:
self._transport_runner = False
self._cv.notify()
def _tx_to_actor(self, actorAddress, message):
# Send a message from this external process to an actor.
# Returns a TransmitTrack object that can be used to check for
# transmit errors.
txwatch = TransmitTrack(self.transport, self.adminAddr)
self.transport.scheduleTransmit(
None,
TransmitIntent(actorAddress, message,
onError=txwatch.transmit_failed))
return txwatch
def _tx_to_admin(self, message):
return self._tx_to_actor(self.adminAddr, message)
def newPrimaryActor(self, actorClass, targetActorRequirements, globalName,
sourceHash=None):
self._numPrimaries = self._numPrimaries + 1
actorClassName = '%s.%s'%(actorClass.__module__, actorClass.__name__) \
if hasattr(actorClass, '__name__') else actorClass
with closing(self.transport.external_transport_clone()) as tx_external:
response = NewActorResponse(tx_external, self.adminAddr)
tx_external.scheduleTransmit(
None,
TransmitIntent(self.adminAddr,
PendingActor(actorClassName,
None, self._numPrimaries,
targetActorRequirements,
globalName=globalName,
sourceHash=sourceHash),
onError=response.transmit_failed))
endwait = ExpirationTimer(MAX_CHILD_ACTOR_CREATE_DELAY)
# Do not use _run_transport: the tx_external transport
# context acquired above is unique to this thread and
# should not be synchronized/restricted by other threads.
tx_external.run(response, MAX_CHILD_ACTOR_CREATE_DELAY)
# Other items might abort the transport run... like transmit
# failures on a previous ask() that itself already timed out.
while response.pending and not endwait.view().expired():
tx_external.run(response, MAX_CHILD_ACTOR_CREATE_DELAY)
if response.failed:
if response.failure == PendingActorResponse.ERROR_Invalid_SourceHash:
raise InvalidActorSourceHash(sourceHash)
if response.failure == PendingActorResponse.ERROR_Invalid_ActorClass:
raise InvalidActorSpecification(actorClass,
response.failure_message)
if response.failure == PendingActorResponse.ERROR_Import:
info = response.failure_message
if info:
thesplog('Actor Create Failure, Import Error: %s', info)
raise ImportError(str(actorClass) + ': ' + info)
thesplog('Actor Create Failure, Import Error')
raise ImportError(actorClass)
if response.failure == PendingActorResponse.ERROR_No_Compatible_ActorSystem:
raise NoCompatibleSystemForActor(
actorClass, 'No compatible ActorSystem could be found')
raise ActorSystemFailure("Could not request new Actor from Admin (%s)"
% (response.failure))
if response.actor_address:
return response.actor_address
if response.actor_address is False:
raise NoCompatibleSystemForActor(
actorClass, 'No compatible ActorSystem could be found')
raise ActorSystemRequestTimeout(
'No response received to PendingActor request to Admin'
' at %s from %s'%(str(self.adminAddr),
str(self.transport.myAddress)))
def tell(self, anActor, msg):
attemptLimit = ExpirationTimer(MAX_TELL_PERIOD)
# transport may not use sockets, but this helps error handling
# in case it does.
import socket
for attempt in range(5000):
try:
txwatch = self._tx_to_actor(anActor, msg)
for attemptTime in unexpired(attemptLimit):
if not self._run_transport(attemptTime.remaining(),
txonly=True):
# all transmits completed
return
if txwatch.failed:
raise ActorSystemFailure(
'Error sending to %s: %s' % (str(anActor),
str(txwatch.failure)))
raise ActorSystemRequestTimeout(
'Unable to send to %s within %s' %
(str(anActor), str(MAX_TELL_PERIOD)))
except socket.error as ex:
import errno
if errno.EMFILE == ex.errno:
import time
time.sleep(0.1)
else:
raise
def listen(self, timeout):
while True:
response = self._run_transport(toTimeDeltaOrNone(timeout))
if not isinstance(response, ReceiveEnvelope):
break
# Do not send miscellaneous ActorSystemMessages to the caller
# that it might not recognize.
if not isInternalActorSystemMessage(response.message):
return response.message
return None
def ask(self, anActor, msg, timeout):
txwatch = self._tx_to_actor(anActor, msg) # KWQ: pass timeout on tx??
askLimit = ExpirationTimer(toTimeDeltaOrNone(timeout))
for remTime in unexpired(askLimit):
response = self._run_transport(remTime.remaining())
if txwatch.failed:
if txwatch.failure in [SendStatus.DeadTarget,
SendStatus.Failed,
SendStatus.NotSent]:
# Silent failure; not all transports can indicate
# this, so for conformity the Dead Letter handler is
# the intended method of handling this issue.
return None
raise ActorSystemFailure('Transmit of ask message to %s failed (%s)'%(
str(anActor),
str(txwatch.failure)))
if not isinstance(response, ReceiveEnvelope):
# Timed out or other failure, give up.
break
# Do not send miscellaneous ActorSystemMessages to the
# caller that it might not recognize. If one of those was
# recieved, loop to get another response.
if not isInternalActorSystemMessage(response.message):
return response.message
return None
class systemBase(ExternalOpsToActors):
"""This is the systemBase base class that various Thespian System Base
implementations should subclass. The System Base is
instantiated by each process that wishes to utilize an Actor
System and runs in the context of that process (as opposed to
the System Admin that may run in its own process).
This base is not present in the Actors themselves, only in the
external application that wish to talk to Actors.
Depending on the System Base implementation chosen by that
process, the instantiation may be private to that process or
shared by other processes; in the former case, there will be an
instance of this class in each process accessing the shared
Actor System, representing the Portal between the "external"
environment of that process and the shared Actor System
Implementation.
All ActorAddresses generated via newActor and newPrimaryActor
are local to this ActorSystemBase instance. Any and *all*
messages sent to other Actors must be able to be appropriately
serialized; this allows the pickling/unpickling process to
translate an ActorAddress from a local representation to a
global or remote representation.
"""
def __init__(self, system, logDefs = None):
ensure_TZ_set()
# Expects self.transport has already been set by subclass __init__
super(systemBase, self).__init__(
self.transport.getAdminAddr(system.capabilities))
tryingTime = ExpirationTimer(MAX_SYSTEM_SHUTDOWN_DELAY + timedelta(seconds=1))
while not tryingTime.view().expired():
if not self.transport.probeAdmin(self.adminAddr):
self._startAdmin(self.adminAddr,
self.transport.myAddress,
system.capabilities,
logDefs)
if self._verifyAdminRunning(): return
import time
time.sleep(0.5) # Previous version may have been exiting
if not self._verifyAdminRunning():
raise InvalidActorAddress(self.adminAddr,
'not a valid or useable ActorSystem Admin')
# KWQ: more details? couldn't start @ addr? response was ? instead of expected Thespian_SystemStatus?
def _verifyAdminRunning(self):
"""Returns boolean verification that the Admin is running and
available. Will query the admin for a positive response,
blocking until one is received.
"""
txwatch = self._tx_to_admin(QueryExists())
response = self._run_transport(MAX_ADMIN_STATUS_REQ_DELAY)
return not txwatch.failed and \
isinstance(response, ReceiveEnvelope) and \
isinstance(response.message, QueryAck) \
and not response.message.inShutdown
def __getstate__(self):
raise CannotPickle('ActorSystem cannot be Pickled.')
def shutdown(self):
thesplog('ActorSystem shutdown requested.', level=logging.INFO)
time_to_quit = ExpirationTimer(MAX_SYSTEM_SHUTDOWN_DELAY)
txwatch = self._tx_to_admin(SystemShutdown())
for remaining_time in unexpired(time_to_quit):
response = self._run_transport(remaining_time.remaining())
if txwatch.failed:
thesplog('Could not send shutdown request to Admin'
'; aborting but not necessarily stopped',
level=logging.WARNING)
return
if isinstance(response, ReceiveEnvelope):
if isinstance(response.message, SystemShutdownCompleted):
break
else:
thesplog('Expected shutdown completed message, got: %s', response.message,
level=logging.WARNING)
elif isinstance(response, (Thespian__Run_Expired,
Thespian__Run_Terminated,
Thespian__Run_Expired)):
break
else:
thesplog('No response to Admin shutdown request; Actor system not completely shutdown',
level=logging.ERROR)
self.transport.close()
thesplog('ActorSystem shutdown complete.')
def updateCapability(self, capabilityName, capabilityValue=None):
attemptLimit = ExpirationTimer(MAX_CAPABILITY_UPDATE_DELAY)
txwatch = self._tx_to_admin(CapabilityUpdate(capabilityName,
capabilityValue))
for remaining_time in unexpired(attemptLimit):
if not self._run_transport(remaining_time.remaining(), txonly=True):
return # all transmits completed
if txwatch.failed:
raise ActorSystemFailure(
'Error sending capability updates to Admin: %s' %
str(txwatch.failure))
raise ActorSystemRequestTimeout(
'Unable to confirm capability update in %s' %
str(MAX_CAPABILITY_UPDATE_DELAY))
def loadActorSource(self, fname):
loadLimit = ExpirationTimer(MAX_LOAD_SOURCE_DELAY)
f = fname if hasattr(fname, 'read') else open(fname, 'rb')
try:
d = f.read()
import hashlib
hval = hashlib.md5(d).hexdigest()
txwatch = self._tx_to_admin(
ValidateSource(hval, d, getattr(f, 'name',
str(fname)
if hasattr(fname, 'read')
else fname)))
for load_time in unexpired(loadLimit):
if not self._run_transport(load_time.remaining(), txonly=True):
# All transmits completed
return hval
if txwatch.failed:
raise ActorSystemFailure(
'Error sending source load to Admin: %s' %
str(txwatch.failure))
raise ActorSystemRequestTimeout('Load source timeout: ' +
str(loadLimit))
finally:
f.close()
def unloadActorSource(self, sourceHash):
loadLimit = ExpirationTimer(MAX_LOAD_SOURCE_DELAY)
txwatch = self._tx_to_admin(ValidateSource(sourceHash, None))
for load_time in unexpired(loadLimit):
if not self._run_transport(load_time.remaining(), txonly=True):
return # all transmits completed
if txwatch.failed:
raise ActorSystemFailure(
'Error sending source unload to Admin: %s' %
str(txwatch.failure))
raise ActorSystemRequestTimeout('Unload source timeout: ' +
str(loadLimit))
def external_clone(self):
"""Get a separate local endpoint that does not commingle traffic with
the the main ActorSystem or other contexts. Makes internal
blocking calls, so primarily appropriate for a
multi-threaded client environment.
"""
return BaseContext(self.adminAddr, self.transport)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Actors that involve themselves in topology
def preRegisterRemoteSystem(self, remoteAddress, remoteCapabilities):
self.send(self.adminAddr,
ConventionRegister(
self.transport.getAddressFromString(remoteAddress),
remoteCapabilities,
preRegister=True))
def deRegisterRemoteSystem(self, remoteAddress):
self.send(
self.adminAddr,
ConventionDeRegister(
remoteAddress
if isinstance(remoteAddress, ActorAddress) else
self.transport.getAddressFromString(remoteAddress)))
class BaseContext(ExternalOpsToActors):
def __init__(self, adminAddr, transport):
super(BaseContext, self).__init__(adminAddr,
transport.external_transport_clone())
def exit_context(self):
self.transport.close()
| |
# Copyright 2016 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""OAuth 2.0 Credentials.
This module provides credentials based on OAuth 2.0 access and refresh tokens.
These credentials usually access resources on behalf of a user (resource
owner).
Specifically, this is intended to use access tokens acquired using the
`Authorization Code grant`_ and can refresh those tokens using a
optional `refresh token`_.
Obtaining the initial access and refresh token is outside of the scope of this
module. Consult `rfc6749 section 4.1`_ for complete details on the
Authorization Code grant flow.
.. _Authorization Code grant: https://tools.ietf.org/html/rfc6749#section-1.3.1
.. _refresh token: https://tools.ietf.org/html/rfc6749#section-6
.. _rfc6749 section 4.1: https://tools.ietf.org/html/rfc6749#section-4.1
"""
from datetime import datetime
import io
import json
import six
from google.auth import _cloud_sdk
from google.auth import _helpers
from google.auth import credentials
from google.auth import exceptions
from google.oauth2 import _client
# The Google OAuth 2.0 token endpoint. Used for authorized user credentials.
_GOOGLE_OAUTH2_TOKEN_ENDPOINT = "https://oauth2.googleapis.com/token"
class Credentials(credentials.ReadOnlyScoped, credentials.CredentialsWithQuotaProject):
"""Credentials using OAuth 2.0 access and refresh tokens.
The credentials are considered immutable. If you want to modify the
quota project, use :meth:`with_quota_project` or ::
credentials = credentials.with_quota_project('myproject-123)
"""
def __init__(
self,
token,
refresh_token=None,
id_token=None,
token_uri=None,
client_id=None,
client_secret=None,
scopes=None,
quota_project_id=None,
expiry=None,
):
"""
Args:
token (Optional(str)): The OAuth 2.0 access token. Can be None
if refresh information is provided.
refresh_token (str): The OAuth 2.0 refresh token. If specified,
credentials can be refreshed.
id_token (str): The Open ID Connect ID Token.
token_uri (str): The OAuth 2.0 authorization server's token
endpoint URI. Must be specified for refresh, can be left as
None if the token can not be refreshed.
client_id (str): The OAuth 2.0 client ID. Must be specified for
refresh, can be left as None if the token can not be refreshed.
client_secret(str): The OAuth 2.0 client secret. Must be specified
for refresh, can be left as None if the token can not be
refreshed.
scopes (Sequence[str]): The scopes used to obtain authorization.
This parameter is used by :meth:`has_scopes`. OAuth 2.0
credentials can not request additional scopes after
authorization. The scopes must be derivable from the refresh
token if refresh information is provided (e.g. The refresh
token scopes are a superset of this or contain a wild card
scope like 'https://www.googleapis.com/auth/any-api').
quota_project_id (Optional[str]): The project ID used for quota and billing.
This project may be different from the project used to
create the credentials.
"""
super(Credentials, self).__init__()
self.token = token
self.expiry = expiry
self._refresh_token = refresh_token
self._id_token = id_token
self._scopes = scopes
self._token_uri = token_uri
self._client_id = client_id
self._client_secret = client_secret
self._quota_project_id = quota_project_id
def __getstate__(self):
"""A __getstate__ method must exist for the __setstate__ to be called
This is identical to the default implementation.
See https://docs.python.org/3.7/library/pickle.html#object.__setstate__
"""
return self.__dict__
def __setstate__(self, d):
"""Credentials pickled with older versions of the class do not have
all the attributes."""
self.token = d.get("token")
self.expiry = d.get("expiry")
self._refresh_token = d.get("_refresh_token")
self._id_token = d.get("_id_token")
self._scopes = d.get("_scopes")
self._token_uri = d.get("_token_uri")
self._client_id = d.get("_client_id")
self._client_secret = d.get("_client_secret")
self._quota_project_id = d.get("_quota_project_id")
@property
def refresh_token(self):
"""Optional[str]: The OAuth 2.0 refresh token."""
return self._refresh_token
@property
def scopes(self):
"""Optional[str]: The OAuth 2.0 permission scopes."""
return self._scopes
@property
def token_uri(self):
"""Optional[str]: The OAuth 2.0 authorization server's token endpoint
URI."""
return self._token_uri
@property
def id_token(self):
"""Optional[str]: The Open ID Connect ID Token.
Depending on the authorization server and the scopes requested, this
may be populated when credentials are obtained and updated when
:meth:`refresh` is called. This token is a JWT. It can be verified
and decoded using :func:`google.oauth2.id_token.verify_oauth2_token`.
"""
return self._id_token
@property
def client_id(self):
"""Optional[str]: The OAuth 2.0 client ID."""
return self._client_id
@property
def client_secret(self):
"""Optional[str]: The OAuth 2.0 client secret."""
return self._client_secret
@property
def requires_scopes(self):
"""False: OAuth 2.0 credentials have their scopes set when
the initial token is requested and can not be changed."""
return False
@_helpers.copy_docstring(credentials.CredentialsWithQuotaProject)
def with_quota_project(self, quota_project_id):
return self.__class__(
self.token,
refresh_token=self.refresh_token,
id_token=self.id_token,
token_uri=self.token_uri,
client_id=self.client_id,
client_secret=self.client_secret,
scopes=self.scopes,
quota_project_id=quota_project_id,
)
@_helpers.copy_docstring(credentials.Credentials)
def refresh(self, request):
if (
self._refresh_token is None
or self._token_uri is None
or self._client_id is None
or self._client_secret is None
):
raise exceptions.RefreshError(
"The credentials do not contain the necessary fields need to "
"refresh the access token. You must specify refresh_token, "
"token_uri, client_id, and client_secret."
)
access_token, refresh_token, expiry, grant_response = _client.refresh_grant(
request,
self._token_uri,
self._refresh_token,
self._client_id,
self._client_secret,
self._scopes,
)
self.token = access_token
self.expiry = expiry
self._refresh_token = refresh_token
self._id_token = grant_response.get("id_token")
if self._scopes and "scopes" in grant_response:
requested_scopes = frozenset(self._scopes)
granted_scopes = frozenset(grant_response["scopes"].split())
scopes_requested_but_not_granted = requested_scopes - granted_scopes
if scopes_requested_but_not_granted:
raise exceptions.RefreshError(
"Not all requested scopes were granted by the "
"authorization server, missing scopes {}.".format(
", ".join(scopes_requested_but_not_granted)
)
)
@classmethod
def from_authorized_user_info(cls, info, scopes=None):
"""Creates a Credentials instance from parsed authorized user info.
Args:
info (Mapping[str, str]): The authorized user info in Google
format.
scopes (Sequence[str]): Optional list of scopes to include in the
credentials.
Returns:
google.oauth2.credentials.Credentials: The constructed
credentials.
Raises:
ValueError: If the info is not in the expected format.
"""
keys_needed = set(("refresh_token", "client_id", "client_secret"))
missing = keys_needed.difference(six.iterkeys(info))
if missing:
raise ValueError(
"Authorized user info was not in the expected format, missing "
"fields {}.".format(", ".join(missing))
)
# access token expiry (datetime obj); auto-expire if not saved
expiry = info.get("expiry")
if expiry:
expiry = datetime.strptime(
expiry.rstrip("Z").split(".")[0], "%Y-%m-%dT%H:%M:%S"
)
else:
expiry = _helpers.utcnow() - _helpers.CLOCK_SKEW
# process scopes, which needs to be a seq
if scopes is None and "scopes" in info:
scopes = info.get("scopes")
if isinstance(scopes, str):
scopes = scopes.split(" ")
return cls(
token=info.get("token"),
refresh_token=info.get("refresh_token"),
token_uri=_GOOGLE_OAUTH2_TOKEN_ENDPOINT, # always overrides
scopes=scopes,
client_id=info.get("client_id"),
client_secret=info.get("client_secret"),
quota_project_id=info.get("quota_project_id"), # may not exist
expiry=expiry,
)
@classmethod
def from_authorized_user_file(cls, filename, scopes=None):
"""Creates a Credentials instance from an authorized user json file.
Args:
filename (str): The path to the authorized user json file.
scopes (Sequence[str]): Optional list of scopes to include in the
credentials.
Returns:
google.oauth2.credentials.Credentials: The constructed
credentials.
Raises:
ValueError: If the file is not in the expected format.
"""
with io.open(filename, "r", encoding="utf-8") as json_file:
data = json.load(json_file)
return cls.from_authorized_user_info(data, scopes)
def to_json(self, strip=None):
"""Utility function that creates a JSON representation of a Credentials
object.
Args:
strip (Sequence[str]): Optional list of members to exclude from the
generated JSON.
Returns:
str: A JSON representation of this instance. When converted into
a dictionary, it can be passed to from_authorized_user_info()
to create a new credential instance.
"""
prep = {
"token": self.token,
"refresh_token": self.refresh_token,
"token_uri": self.token_uri,
"client_id": self.client_id,
"client_secret": self.client_secret,
"scopes": self.scopes,
}
if self.expiry: # flatten expiry timestamp
prep["expiry"] = self.expiry.isoformat() + "Z"
# Remove empty entries (those which are None)
prep = {k: v for k, v in prep.items() if v is not None}
# Remove entries that explicitely need to be removed
if strip is not None:
prep = {k: v for k, v in prep.items() if k not in strip}
return json.dumps(prep)
class UserAccessTokenCredentials(credentials.CredentialsWithQuotaProject):
"""Access token credentials for user account.
Obtain the access token for a given user account or the current active
user account with the ``gcloud auth print-access-token`` command.
Args:
account (Optional[str]): Account to get the access token for. If not
specified, the current active account will be used.
quota_project_id (Optional[str]): The project ID used for quota
and billing.
"""
def __init__(self, account=None, quota_project_id=None):
super(UserAccessTokenCredentials, self).__init__()
self._account = account
self._quota_project_id = quota_project_id
def with_account(self, account):
"""Create a new instance with the given account.
Args:
account (str): Account to get the access token for.
Returns:
google.oauth2.credentials.UserAccessTokenCredentials: The created
credentials with the given account.
"""
return self.__class__(account=account, quota_project_id=self._quota_project_id)
@_helpers.copy_docstring(credentials.CredentialsWithQuotaProject)
def with_quota_project(self, quota_project_id):
return self.__class__(account=self._account, quota_project_id=quota_project_id)
def refresh(self, request):
"""Refreshes the access token.
Args:
request (google.auth.transport.Request): This argument is required
by the base class interface but not used in this implementation,
so just set it to `None`.
Raises:
google.auth.exceptions.UserAccessTokenError: If the access token
refresh failed.
"""
self.token = _cloud_sdk.get_auth_access_token(self._account)
@_helpers.copy_docstring(credentials.Credentials)
def before_request(self, request, method, url, headers):
self.refresh(request)
self.apply(headers)
| |
import abc
from collections import namedtuple
from ctypes import POINTER, Structure, byref
from functools import reduce
from operator import mul
import numpy as np
import sympy
from sympy.core.assumptions import _assume_rules
from cached_property import cached_property
from cgen import Struct, Value
from devito.data import default_allocator
from devito.symbolics import aligned_indices
from devito.tools import (Pickable, ctypes_to_cstr, dtype_to_cstr, dtype_to_ctype,
frozendict, memoized_meth)
from devito.types.args import ArgProvider
from devito.types.caching import Cached
from devito.types.lazy import Evaluable
from devito.types.utils import DimensionTuple
__all__ = ['Symbol', 'Scalar', 'Indexed', 'Object', 'LocalObject', 'CompositeObject']
Size = namedtuple('Size', 'left right')
Offset = namedtuple('Offset', 'left right')
class Basic(object):
"""
Four relevant types inherit from this class:
* AbstractSymbol: represents a scalar; may carry data; may be used
to build equations.
* AbstractFunction: represents a discrete R^n -> R function; may
carry data; may be used to build equations.
* AbstractTensor: represents a discrete 2nd order tensor or vector:
R^n -> R^(nd x nd) tensor (nd dimensions),
R^n -> R^nd vector (nd dimensions),
may carry data; may be used to build equations.
* AbstractObject: represents a generic object, for example a (pointer
to) data structure.
Basic
|
--------------------------------------------------------------
| | | |
AbstractSymbol AbstractFunction AbstractTensor AbstractObject
All these subtypes must implement a number of methods/properties to enable
code generation via the Devito compiler. These methods/properties are
easily recognizable as their name starts with _C_.
Notes
-----
The AbstractFunction sub-hierarchy is implemented in :mod:`dense.py`.
The AbstractTensor sub-hierarchy is implemented in :mod:`tensor.py`.
"""
# Top hierarchy
is_AbstractFunction = False
is_AbstractSymbol = False
is_AbstractObject = False
# Symbolic objects created internally by Devito
is_Symbol = False
is_ArrayBasic = False
is_Array = False
is_PointerArray = False
is_ObjectArray = False
is_Object = False
is_LocalObject = False
# Created by the user
is_Input = False
# Scalar symbolic objects created by the user
is_Dimension = False
is_Constant = False
# Tensor symbolic objects created by the user
is_DiscreteFunction = False
is_Function = False
is_TimeFunction = False
is_TempFunction = False
is_SparseTimeFunction = False
is_SparseFunction = False
is_PrecomputedSparseFunction = False
is_PrecomputedSparseTimeFunction = False
# Time dependence
is_TimeDependent = False
# Tensor and Vector valued objects
is_VectorValued = False
is_TensorValued = False
# Basic symbolic object properties
is_Scalar = False
is_Tensor = False
# Some other properties
is_PerfKnob = False # Does it impact the Operator performance?
@abc.abstractmethod
def __init__(self, *args, **kwargs):
return
@abc.abstractproperty
def _C_name(self):
"""
The C-level name of the object.
Returns
-------
str
"""
return
@abc.abstractproperty
def _C_typename(self):
"""
The C-level type of the object.
Returns
-------
str
"""
return
@abc.abstractproperty
def _C_typedata(self):
"""
The C-level type of the data values.
Returns
-------
str
"""
return
@abc.abstractproperty
def _C_ctype(self):
"""
The C-level type of the object, as a ctypes object, suitable for type
checking when calling functions via ctypes.
Returns
-------
ctypes type
"""
return
@property
def _C_typedecl(self):
"""
The C-level struct declaration representing the object.
Returns
-------
cgen.Struct or None
None if the object C type can be expressed with a basic C type,
such as float or int.
"""
return
@property
def _C_symbol(self):
"""
The C-level symbol. This may or may not coincide with the symbol used
to make up an `Eq`. For example, if `self` provides the C code with
a struct, then the _C_symbol will be the symbol representing such struct.
Returns
-------
Basic
"""
return self
class AbstractSymbol(sympy.Symbol, Basic, Pickable, Evaluable):
"""
Base class for scalar symbols.
The hierarchy is structured as follows
AbstractSymbol
|
-------------------------------------
| |
DataSymbol Symbol
| |
---------------- -------------------
| | | |
Constant DefaultDimension Scalar Dimension
<:mod:`dimension.py`>
All symbols can be used to build equations. However, while DataSymbol
carries data, Symbol is a pure symbolic object.
Constant, DefaultDimension, and Dimension (and most of its subclasses) are
part of the user API; Scalar, instead, is only used internally by Devito.
DefaultDimension and Dimension define a problem dimension (in other words,
an "iteration space"). They can be used to index into Functions. For more
information, refer to :mod:`dimension.py`.
"""
is_AbstractSymbol = True
is_Symbol = True
# SymPy default assumptions
is_real = True
is_imaginary = False
is_commutative = True
@classmethod
def _filter_assumptions(cls, **kwargs):
"""Extract sympy.Symbol-specific kwargs."""
assumptions = {}
for i in list(kwargs):
if i in _assume_rules.defined_facts:
assumptions[i] = kwargs.pop(i)
return assumptions, kwargs
def __new__(cls, *args, **kwargs):
name = kwargs.get('name') or args[0]
assumptions, kwargs = cls._filter_assumptions(**kwargs)
# Create the new Symbol
# Note: use __xnew__ to bypass sympy caching
newobj = sympy.Symbol.__xnew__(cls, name, **assumptions)
# Initialization
newobj._dtype = cls.__dtype_setup__(**kwargs)
newobj.__init_finalize__(*args, **kwargs)
return newobj
@classmethod
def __dtype_setup__(cls, **kwargs):
"""Extract the object data type from ``kwargs``."""
return kwargs.get('dtype', np.int32)
def __init__(self, *args, **kwargs):
# no-op, the true init is performed by __init_finalize__
pass
def __init_finalize__(self, *args, **kwargs):
self._is_const = kwargs.get('is_const', False)
@property
def dtype(self):
"""The data type of the object."""
return self._dtype
@property
def indices(self):
return ()
@property
def dimensions(self):
return self.indices
@property
def shape(self):
return ()
@property
def ndim(self):
return 0
@property
def symbolic_shape(self):
return ()
@property
def base(self):
return self
@property
def function(self):
return self
@property
def evaluate(self):
return self
def indexify(self):
return self
@property
def is_const(self):
"""
True if the symbol value cannot be modified within an Operator (and thus
its value is provided by the user directly from Python-land), False otherwise.
"""
return self._is_const
@property
def _C_name(self):
return self.name
@property
def _C_typename(self):
return '%s%s' % ('const ' if self.is_const else '',
dtype_to_cstr(self.dtype))
@property
def _C_typedata(self):
return dtype_to_cstr(self.dtype)
@property
def _C_ctype(self):
return dtype_to_ctype(self.dtype)
def _subs(self, old, new, **hints):
"""
This stub allows sympy.Basic.subs to operate on an expression
involving devito Scalars. Ordinarily the comparisons between
devito subclasses of sympy types are quite strict.
"""
try:
if old.name == self.name:
return new
except AttributeError:
pass
return self
# Pickling support
_pickle_args = []
_pickle_kwargs = ['name', 'dtype', 'is_const']
__reduce_ex__ = Pickable.__reduce_ex__
class Symbol(AbstractSymbol, Cached):
"""
A scalar symbol, cached by both Devito and SymPy, which does not carry
any data.
Notes
-----
A Symbol may not be in the SymPy cache, but still be present in the
Devito cache. This is because SymPy caches operations, rather than
actual objects.
"""
@classmethod
def _cache_key(cls, *args, **kwargs):
args = list(args)
key = {}
# The base type is necessary, otherwise two objects such as
# `Scalar(name='s')` and `Dimension(name='s')` would have the same key
key['cls'] = cls
# The name is always present, and added as if it were an arg
key['name'] = kwargs.pop('name', None) or args.pop(0)
# From the args
key['args'] = tuple(args)
# From the kwargs
key.update(kwargs)
return frozendict(key)
def __new__(cls, *args, **kwargs):
key = cls._cache_key(*args, **kwargs)
obj = cls._cache_get(key)
if obj is not None:
return obj
# Not in cache. Create a new Symbol via sympy.Symbol
name = kwargs.get('name') or args[0]
assumptions, kwargs = cls._filter_assumptions(**kwargs)
# Note: use __xnew__ to bypass sympy caching
newobj = sympy.Symbol.__xnew__(cls, name, **assumptions)
# Initialization
newobj._dtype = cls.__dtype_setup__(**kwargs)
newobj.__init_finalize__(*args, **kwargs)
# Store new instance in symbol cache
Cached.__init__(newobj, key)
return newobj
__hash__ = Cached.__hash__
class DataSymbol(AbstractSymbol, Cached):
"""
A scalar symbol, cached by both Devito and SymPy, which carries data.
"""
@classmethod
def _cache_key(cls, *args, **kwargs):
return cls
def __new__(cls, *args, **kwargs):
key = cls._cache_key(*args, **kwargs)
obj = cls._cache_get(key)
if obj is not None:
return obj
# Not in cache. Create a new Symbol via sympy.Symbol
name = kwargs.get('name') or args[0]
assumptions, kwargs = cls._filter_assumptions(**kwargs)
# Create new, unique type instance from cls and the symbol name
newcls = type(name, (cls,), dict(cls.__dict__))
# Create the new Symbol and invoke __init__
newobj = sympy.Symbol.__new__(newcls, name, **assumptions)
# Initialization
newobj._dtype = cls.__dtype_setup__(**kwargs)
newobj.__init_finalize__(*args, **kwargs)
# Store new instance in symbol cache
Cached.__init__(newobj, newcls)
return newobj
__hash__ = Cached.__hash__
# Pickling support
@property
def _pickle_reconstruct(self):
return self.__class__.__base__
class Scalar(Symbol, ArgProvider):
"""
Like a Symbol, but in addition it can pass runtime values to an Operator.
Parameters
----------
name : str
Name of the symbol.
dtype : data-type, optional
Any object that can be interpreted as a numpy data type. Defaults
to ``np.float32``.
is_const : bool, optional
True if the symbol value cannot be modified within an Operator,
False otherwise. Defaults to False.
**assumptions
Any SymPy assumptions, such as ``nonnegative=True``. Refer to the
SymPy documentation for more information.
"""
is_Scalar = True
@classmethod
def __dtype_setup__(cls, **kwargs):
return kwargs.get('dtype', np.float32)
@property
def default_value(self):
return None
@property
def _arg_names(self):
return (self.name,)
def _arg_defaults(self, **kwargs):
if self.default_value is None:
# It is possible that the Scalar value is provided indirectly
# through a wrapper object (e.g., a Dimension spacing `h_x` gets its
# value via a Grid object)
return {}
else:
return {self.name: self.default_value}
def _arg_values(self, **kwargs):
if self.name in kwargs:
return {self.name: kwargs.pop(self.name)}
else:
return self._arg_defaults()
class AbstractTensor(sympy.ImmutableDenseMatrix, Basic, Pickable, Evaluable):
"""
Base class for vector and tensor valued functions. It inherits from and
mimicks the behavior of a sympy.ImmutableDenseMatrix.
The sub-hierachy is as follows
AbstractTensor
|
TensorFunction
|
---------------------------------
| |
VectorFunction TensorTimeFunction
\-------\ |
\------- VectorTimeFunction
There are four relevant AbstractTensor sub-types: ::
* TensorFunction: A space-varying tensor valued function.
* VectorFunction: A space-varying vector valued function.
* TensorTimeFunction: A time-space-varying tensor valued function.
* VectorTimeFunction: A time-space-varying vector valued function.
"""
# Sympy attributes
is_MatrixLike = True
is_Matrix = True
# Devito attributes
is_AbstractTensor = True
is_TensorValued = True
is_VectorValued = False
@classmethod
def _new(cls, *args, **kwargs):
if args:
try:
# Constructor if input is (rows, cols, lambda)
newobj = super(AbstractTensor, cls)._new(*args)
except ValueError:
# Constructor if input is list of list as (row, cols, list_of_list)
# doesn't work as it expects a flattened.
newobj = super(AbstractTensor, cls)._new(args[2])
# Filter grid and dimensions
grids = {getattr(c, 'grid', None) for c in newobj._mat} - {None}
dimensions = {d for c in newobj._mat
for d in getattr(c, 'dimensions', ())} - {None}
# If none of the components are devito objects, returns a sympy Matrix
if len(grids) == 0 and len(dimensions) == 0:
return sympy.ImmutableDenseMatrix(*args)
elif len(grids) > 0:
dimensions = None
assert len(grids) == 1
grid = grids.pop()
else:
grid = None
dimensions = tuple(dimensions)
# Initialized with constructed object
newobj.__init_finalize__(newobj.rows, newobj.cols, newobj._mat,
grid=grid, dimensions=dimensions)
else:
# Initialize components and create new Matrix from standard
# Devito inputs
comps = cls.__subfunc_setup__(*args, **kwargs)
newobj = super(AbstractTensor, cls)._new(comps)
newobj.__init_finalize__(*args, **kwargs)
return newobj
def __init_finalize__(self, *args, **kwargs):
pass
__hash__ = sympy.ImmutableDenseMatrix.__hash__
def doit(self, **hint):
return self
def _eval_matrix_mul(self, other):
"""
Copy paste from sympy to avoid explicit call to sympy.Add
TODO: fix inside sympy
"""
other_len = other.rows*other.cols
new_len = self.rows*other.cols
new_mat = [self.zero]*new_len
# If we multiply an n x 0 with a 0 x m, the
# expected behavior is to produce an n x m matrix of zeros
if self.cols != 0 and other.rows != 0:
self_cols = self.cols
mat = self._mat
other_mat = other._mat
for i in range(new_len):
row, col = i // other.cols, i % other.cols
row_indices = range(self_cols*row, self_cols*(row+1))
col_indices = range(col, other_len, other.cols)
vec = [mat[a]*other_mat[b] for a, b in zip(row_indices, col_indices)]
new_mat[i] = sum(vec)
# Get new class and return product
newcls = self.classof_prod(other, new_mat)
return newcls._new(self.rows, other.cols, new_mat, copy=False)
@classmethod
def __subfunc_setup__(cls, *args, **kwargs):
"""Setup each component of the tensor as a Devito type."""
return []
class AbstractFunction(sympy.Function, Basic, Cached, Pickable, Evaluable):
"""
Base class for tensor symbols, cached by both SymPy and Devito. It inherits
from and mimicks the behaviour of a sympy.Function.
The hierarchy is structured as follows
AbstractFunction
|
---------------------------------
| |
DiscreteFunction Array
|
----------------------------------------
| |
| AbstractSparseFunction
| |
| -----------------------------------------------------
| | | |
Function SparseFunction AbstractSparseTimeFunction PrecomputedSparseFunction
| | | |
| | ------------------------------------ --------
| | | | |
TimeFunction SparseTimeFunction PrecomputedSparseTimeFunction
There are five relevant AbstractFunction sub-types: ::
* Array: A function that does not carry data.
* Function: A space-varying discrete function, which carries user data.
* TimeFunction: A time- and space-varying discrete function, which carries
user data.
* SparseFunction: A space-varying discrete function representing "sparse"
points, i.e. points that are not aligned with the
computational grid.
* SparseTimeFunction: A time- and space-varying function representing "sparse"
points, i.e. points that are not aligned with the
computational grid.
* PrecomputedSparseFunction: A SparseFunction that uses a custom interpolation
scheme, instead of linear interpolators.
* PrecomputedSparseTimeFunction: A SparseTimeFunction that uses a custom
interpolation scheme, instead of linear
interpolators.
"""
# Sympy attributes, explicitly say these are not Matrices
is_MatrixLike = False
is_Matrix = False
is_AbstractFunction = True
is_Tensor = True
# SymPy default assumptions
is_real = True
is_imaginary = False
is_commutative = True
@classmethod
def _cache_key(cls, *args, **kwargs):
return cls, args
def __new__(cls, *args, **kwargs):
options = kwargs.get('options', {'evaluate': False})
# Is the object already in cache (e.g., f(x), f(x+1)) ?
key = cls._cache_key(*args, **kwargs)
obj = cls._cache_get(key)
if obj is not None:
return obj
# Does the base object exist at least (e.g. f(x))?
obj = cls._cache_get(cls)
if obj is not None:
newobj = sympy.Function.__new__(cls, *args, **options)
newobj.__init_cached__(cls)
Cached.__init__(newobj, key)
return newobj
# Preprocess arguments
args, kwargs = cls.__args_setup__(*args, **kwargs)
# Not in cache. Create a new Function via sympy.Function
name = kwargs.get('name')
dimensions, indices = cls.__indices_setup__(**kwargs)
# Create new, unique type instance from cls and the symbol name
newcls = type(name, (cls,), dict(cls.__dict__))
# Create the new Function object and invoke __init__
newobj = sympy.Function.__new__(newcls, *indices, **options)
# Initialization. The following attributes must be available
# when executing __init_finalize__
newobj._name = name
newobj._dimensions = dimensions
newobj._shape = cls.__shape_setup__(**kwargs)
newobj._dtype = cls.__dtype_setup__(**kwargs)
newobj.__init_finalize__(*args, **kwargs)
# All objects cached on the AbstractFunction `newobj` keep a reference
# to `newobj` through the `function` field. Thus, all indexified
# object will point to `newobj`, the "actual Function".
newobj.function = newobj
# Store new instance in symbol cache
key = (newcls, indices)
Cached.__init__(newobj, key, newcls)
return newobj
def __init__(self, *args, **kwargs):
# no-op, the true init is performed by __init_finalize__
pass
def __init_finalize__(self, *args, **kwargs):
# Setup halo and padding regions
self._is_halo_dirty = False
self._halo = self.__halo_setup__(**kwargs)
self._padding = self.__padding_setup__(**kwargs)
__hash__ = Cached.__hash__
@classmethod
def __args_setup__(cls, *args, **kwargs):
"""
Preprocess *args and **kwargs before object initialization.
Notes
-----
This stub is invoked only if a look up in the cache fails.
"""
return args, kwargs
@classmethod
def __indices_setup__(cls, **kwargs):
"""Extract the object indices from ``kwargs``."""
return (), ()
@classmethod
def __shape_setup__(cls, **kwargs):
"""Extract the object shape from ``kwargs``."""
return ()
@classmethod
def __dtype_setup__(cls, **kwargs):
"""Extract the object data type from ``kwargs``."""
return None
def __halo_setup__(self, **kwargs):
return tuple(kwargs.get('halo', [(0, 0) for i in range(self.ndim)]))
def __padding_setup__(self, **kwargs):
return tuple(kwargs.get('padding', [(0, 0) for i in range(self.ndim)]))
@cached_property
def _honors_autopadding(self):
"""
True if the actual padding is greater or equal than whatever autopadding
would produce, False otherwise.
"""
autopadding = self.__padding_setup__(autopadding=True)
return all(l0 >= l1 and r0 >= r1
for (l0, r0), (l1, r1) in zip(self.padding, autopadding))
@property
def name(self):
"""The name of the object."""
return self._name
@property
def indices(self):
"""The indices (aka dimensions) of the object."""
return DimensionTuple(*self.args, getters=self.dimensions)
@property
def indices_ref(self):
"""The reference indices of the object (indices at first creation)."""
return DimensionTuple(*self.function.indices, getters=self.dimensions)
@property
def origin(self):
"""
Origin of the AbstractFunction in term of Dimension
f(x) : origin = 0
f(x + hx/2) : origin = hx/2
"""
return tuple(r - d for d, r in zip(self.dimensions, self.indices_ref))
@property
def dimensions(self):
"""Tuple of Dimensions representing the object indices."""
return self._dimensions
@property
def _eval_deriv(self):
return self
@property
def _is_on_grid(self):
"""
Check whether the object is on the grid and requires averaging.
For example, if the original non-staggered function is f(x)
then f(x) is on the grid and f(x + h_x/2) is off the grid.
"""
return self._check_indices(inds=self.indices)
@memoized_meth
def _check_indices(self, inds=None):
"""
Check if the function indices are aligned with the dimensions.
"""
inds = inds or self.indices
return all([aligned_indices(i, j, d.spacing) for i, j, d in
zip(inds, self.indices_ref, self.dimensions)])
@property
def evaluate(self):
# Average values if at a location not on the Function's grid
if self._is_on_grid:
return self
weight = 1.0
avg_list = [self]
is_averaged = False
for i, ir, d in zip(self.indices, self.indices_ref, self.dimensions):
off = (i - ir)/d.spacing
if not isinstance(off, sympy.Number) or int(off) == off:
pass
else:
weight *= 1/2
is_averaged = True
avg_list = [(a.xreplace({i: i - d.spacing/2}) +
a.xreplace({i: i + d.spacing/2})) for a in avg_list]
if not is_averaged:
return self
return weight * sum(avg_list)
@property
def shape(self):
"""The shape of the object."""
return self._shape
@property
def dtype(self):
"""The data type of the object."""
return self._dtype
@property
def ndim(self):
"""The rank of the object."""
return len(self.indices)
@property
def symbolic_shape(self):
"""
The symbolic shape of the object. This includes the domain, halo, and
padding regions. While halo and padding are known quantities (integers),
the domain size is given as a symbol.
"""
halo = [sympy.Add(*i, evaluate=False) for i in self._size_halo]
padding = [sympy.Add(*i, evaluate=False) for i in self._size_padding]
domain = [i.symbolic_size for i in self.dimensions]
ret = tuple(sympy.Add(i, j, k)
for i, j, k in zip(domain, halo, padding))
return DimensionTuple(*ret, getters=self.dimensions)
@cached_property
def indexed(self):
"""The wrapped IndexedData object."""
return IndexedData(self.name, shape=self.shape, function=self.function)
@property
def _mem_external(self):
"""
True if the associated data was/is/will be allocated directly
from Python (e.g., via NumPy arrays), False otherwise.
"""
return False
@property
def _mem_stack(self):
"""
True if the associated data should be allocated on the stack, False otherwise.
"""
return False
@property
def _mem_heap(self):
"""
True if the associated data was/is/will be allocated on the heap,
False otherwise.
"""
return False
@property
def size(self):
"""
The number of elements this object is expected to store in memory.
Note that this would need to be combined with self.dtype to give the actual
size in bytes.
"""
return reduce(mul, self.shape)
@property
def halo(self):
return self._halo
@property
def padding(self):
return self._padding
@property
def is_const(self):
return False
@property
def _C_name(self):
return "%s_vec" % self.name
@property
def _C_typedata(self):
return dtype_to_cstr(self.dtype)
@cached_property
def _C_symbol(self):
return BoundSymbol(name=self._C_name, dtype=self.dtype, function=self.function)
def _make_pointer(self):
"""Generate a symbolic pointer to self."""
raise NotImplementedError
@cached_property
def _size_domain(self):
"""Number of points in the domain region."""
return DimensionTuple(*self.shape, getters=self.dimensions)
@cached_property
def _size_halo(self):
"""Number of points in the halo region."""
left = tuple(zip(*self._halo))[0]
right = tuple(zip(*self._halo))[1]
sizes = tuple(Size(i, j) for i, j in self._halo)
return DimensionTuple(*sizes, getters=self.dimensions, left=left, right=right)
@cached_property
def _size_owned(self):
"""Number of points in the owned region."""
left = tuple(self._size_halo.right)
right = tuple(self._size_halo.left)
sizes = tuple(Size(i.right, i.left) for i in self._size_halo)
return DimensionTuple(*sizes, getters=self.dimensions, left=left, right=right)
@cached_property
def _size_padding(self):
"""Number of points in the padding region."""
left = tuple(zip(*self._padding))[0]
right = tuple(zip(*self._padding))[1]
sizes = tuple(Size(i, j) for i, j in self._padding)
return DimensionTuple(*sizes, getters=self.dimensions, left=left, right=right)
@cached_property
def _size_nopad(self):
"""Number of points in the domain+halo region."""
sizes = tuple(i+sum(j) for i, j in zip(self._size_domain, self._size_halo))
return DimensionTuple(*sizes, getters=self.dimensions)
@cached_property
def _size_nodomain(self):
"""Number of points in the padding+halo region."""
left = tuple(i for i, _ in np.add(self._halo, self._padding))
right = tuple(i for _, i in np.add(self._halo, self._padding))
sizes = tuple(Size(i, j) for i, j in np.add(self._halo, self._padding))
return DimensionTuple(*sizes, getters=self.dimensions, left=left, right=right)
@cached_property
def _offset_domain(self):
"""Number of points before the first domain element."""
offsets = tuple(np.add(self._size_padding.left, self._size_halo.left))
return DimensionTuple(*offsets, getters=self.dimensions)
@cached_property
def _offset_halo(self):
"""Number of points before the first and last halo elements."""
left = tuple(self._size_padding.left)
right = tuple(np.add(np.add(left, self._size_halo.left), self._size_domain))
offsets = tuple(Offset(i, j) for i, j in zip(left, right))
return DimensionTuple(*offsets, getters=self.dimensions, left=left, right=right)
@cached_property
def _offset_owned(self):
"""Number of points before the first and last owned elements."""
left = tuple(self._offset_domain)
right = tuple(np.add(self._offset_halo.left, self._size_domain))
offsets = tuple(Offset(i, j) for i, j in zip(left, right))
return DimensionTuple(*offsets, getters=self.dimensions, left=left, right=right)
@property
def _data_alignment(self):
"""
The base virtual address of the data carried by the object is a multiple
of the alignment.
"""
return default_allocator().guaranteed_alignment
def indexify(self, indices=None, lshift=False, subs=None):
"""Create a types.Indexed from the current object."""
if indices is not None:
return Indexed(self.indexed, *indices)
# Substitution for each index (spacing only used in own dimension)
subs = subs or {}
subs = [{**{d.spacing: 1, -d.spacing: -1}, **subs} for d in self.dimensions]
# Add halo shift
shift = self._size_nodomain.left if lshift else tuple([0]*len(self.dimensions))
# Indices after substitutions
indices = [sympy.sympify((a - o + f).xreplace(s)) for a, o, f, s in
zip(self.args, self.origin, shift, subs)]
indices = [i.xreplace({k: sympy.Integer(k) for k in i.atoms(sympy.Float)})
for i in indices]
return self.indexed[indices]
def __getitem__(self, index):
"""Shortcut for ``self.indexed[index]``."""
return self.indexed[index]
# Pickling support
_pickle_kwargs = ['name', 'dtype', 'halo', 'padding']
__reduce_ex__ = Pickable.__reduce_ex__
@property
def _pickle_reconstruct(self):
return self.__class__.__base__
class AbstractObject(Basic, sympy.Basic, Pickable):
"""
Base class for pointers to objects with derived type.
The hierarchy is structured as follows
AbstractObject
|
---------------------------------
| |
Object LocalObject
|
CompositeObject
Warnings
--------
AbstractObjects are created and managed directly by Devito.
"""
is_AbstractObject = True
def __new__(cls, *args, **kwargs):
obj = sympy.Basic.__new__(cls)
obj.__init__(*args, **kwargs)
return obj
def __init__(self, name, dtype):
self.name = name
self.dtype = dtype
def __repr__(self):
return self.name
__str__ = __repr__
def _hashable_content(self):
return (self.name, self.dtype)
@property
def free_symbols(self):
return {self}
@property
def _C_name(self):
return self.name
@property
def _C_typename(self):
return ctypes_to_cstr(self.dtype)
@property
def _C_ctype(self):
return self.dtype
@property
def function(self):
return self
# Pickling support
_pickle_args = ['name', 'dtype']
__reduce_ex__ = Pickable.__reduce_ex__
class Object(AbstractObject, ArgProvider):
"""
Pointer to object with derived type, provided by an outer scope.
"""
is_Object = True
def __init__(self, name, dtype, value=None):
super(Object, self).__init__(name, dtype)
self.value = value
@property
def _arg_names(self):
return (self.name,)
def _arg_defaults(self):
if callable(self.value):
return {self.name: self.value()}
else:
return {self.name: self.value}
def _arg_values(self, args=None, **kwargs):
"""
Produce runtime values for this Object after evaluating user input.
Parameters
----------
args : dict, optional
Known argument values.
**kwargs
Dictionary of user-provided argument overrides.
"""
if self.name in kwargs:
return {self.name: kwargs.pop(self.name)}
else:
return self._arg_defaults()
class CompositeObject(Object):
"""
Pointer to object with composite type (e.g., a C struct), provided
by an outer scope.
"""
_dtype_cache = {}
@classmethod
def _generate_unique_dtype(cls, pname, pfields):
dtype = POINTER(type(pname, (Structure,), {'_fields_': pfields}))
key = (pname, tuple(pfields))
return cls._dtype_cache.setdefault(key, dtype)
def __init__(self, name, pname, pfields, value=None):
dtype = CompositeObject._generate_unique_dtype(pname, pfields)
value = self.__value_setup__(dtype, value)
super(CompositeObject, self).__init__(name, dtype, value)
def __value_setup__(self, dtype, value):
return value or byref(dtype._type_())
@property
def pfields(self):
return tuple(self.dtype._type_._fields_)
@property
def pname(self):
return self.dtype._type_.__name__
@property
def fields(self):
return [i for i, _ in self.pfields]
def _hashable_content(self):
return (self.name, self.pfields)
@cached_property
def _C_typedecl(self):
return Struct(self.pname, [Value(ctypes_to_cstr(j), i) for i, j in self.pfields])
# Pickling support
_pickle_args = ['name', 'pname', 'pfields']
_pickle_kwargs = []
class LocalObject(AbstractObject):
"""
Pointer to object with derived type, defined in the local scope.
"""
is_LocalObject = True
# Extended SymPy hierarchy follows, for essentially two reasons:
# - To keep track of `function`
# - To override SymPy caching behaviour
class IndexedData(sympy.IndexedBase, Pickable):
"""
Wrapper class that inserts a pointer to the symbolic data object.
"""
def __new__(cls, label, shape=None, function=None):
# Make sure `label` is a devito.Symbol, not a sympy.Symbol
if isinstance(label, str):
label = Symbol(name=label, dtype=None)
obj = sympy.IndexedBase.__new__(cls, label, shape)
obj.function = function
return obj
def func(self, *args):
obj = super(IndexedData, self).func(*args)
obj.function = self.function
return obj
def __getitem__(self, indices, **kwargs):
"""Produce a types.Indexed, rather than a sympy.Indexed."""
indexed = super(IndexedData, self).__getitem__(indices, **kwargs)
return Indexed(*indexed.args)
# Pickling support
_pickle_kwargs = ['label', 'shape', 'function']
__reduce_ex__ = Pickable.__reduce_ex__
class BoundSymbol(AbstractSymbol):
"""
Wrapper class for Symbols that are bound to a symbolic data object.
Notes
-----
By deliberately inheriting from AbstractSymbol, a BoundSymbol won't be
in the devito cache. This will avoid cycling references in the cache
(e.g., an entry for a Function `u(x)` and an entry for `u._C_symbol` with
the latter's key including `u(x)`). This is totally fine. The BoundSymbol
is tied to a specific Function; once the Function gets out of scope, the
BoundSymbol will also become a garbage collector candidate.
"""
def __new__(cls, *args, function=None, **kwargs):
obj = AbstractSymbol.__new__(cls, *args, **kwargs)
obj._function = function
return obj
@property
def function(self):
return self._function
class Indexed(sympy.Indexed):
# The two type flags have changed in upstream sympy as of version 1.1,
# but the below interpretation is used throughout the compiler to
# identify Indexed objects. With the sympy-1.1 changes a new flag
# obj.is_Indexed was introduced which should be preferred, but the
# required changes are cumbersome and many...
is_Symbol = False
is_Atom = False
is_Dimension = False
@memoized_meth
def __str__(self):
return super().__str__()
def _hashable_content(self):
return super(Indexed, self)._hashable_content() + (self.base.function,)
@cached_property
def indices(self):
return DimensionTuple(*super().indices, getters=self.function.dimensions)
@property
def function(self):
return self.base.function
@property
def dtype(self):
return self.function.dtype
@property
def name(self):
return self.function.name
@property
def origin(self):
return self.function.origin
@cached_property
def free_symbols(self):
# Make it cached, since it's relatively expensive and called often
ret = super(Indexed, self).free_symbols
# Get rid of the IndexedBase label this Indexed stems from
# as in Devito we can't have it floating around in Eq's
ret.discard(self.base.label)
return ret
def compare(self, other):
"""
Override `sympy.Basic.compare` to honor Devito's canonical ordering
of arguments.
In SymPy:
f[x+1] < f[x+2] < ... < f[x+9] < f[x]
While in Devito we pretend
f[x] < f[x+1] < f[x+2] < ... < f[x+9]
That is the arguments need to be ordered monothonically based on the indices
so that the symbolic trees of two derivative expressions can be compared
argument-wise.
"""
if (self.__class__ != other.__class__) or (self.function is not other.function):
return super().compare(other)
for l, r in zip(self.indices, other.indices):
try:
c = int(sympy.sign(l - r))
except TypeError:
# E.g., `l=x+1` and `r=y` or `r=sqrt(x)`
c = l.compare(r)
if c:
return c
return 0
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# webkit_scrape.py
#
# Copyright 2015 Spencer McIntyre <zeroSteiner@gmail.com>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import collections
import logging
import os
import re
import string
import sys
import urllib
from king_phisher.client import gui_utilities
import requests
if sys.version_info[0] < 3:
import urlparse
urllib.parse = urlparse
else:
import urllib.parse
try:
from gi.repository import WebKit2
has_webkit2 = True
except ImportError:
has_webkit2 = False
class ClonedResourceDetails(collections.namedtuple('ClonedResourceDetails', ['resource', 'mime_type', 'size', 'file_name'])):
"""
A named tuple which contains details regard a resource that has been cloned.
.. py:attribute:: resource
The web resource that has been cloned.
.. py:attribute:: mime_type
The MIME type that was provided by the server for the cloned resource.
.. py:attribute:: size
The size of the original resource that was provided by the server.
.. py:attribute:: file_name
The path to the file which the resource was written to.
"""
pass
class WebPageCloner(object):
"""
This object is used to clone web pages. It will use the WebKit2GTK+ engine
and hook signals to detect what remote resources that are loaded from the
target URL. These resources are then written to disk. Resources that have
a MIME type of text/html have the King Phisher server javascript file
patched in..
"""
def __init__(self, target_url, dest_dir):
"""
:param str target_url: The URL of the target web page to clone.
:param str dest_dir: The path of a directory to write the resources to.
"""
if not has_webkit2:
raise RuntimeError('cloning requires WebKit2GTK+')
self.target_url = urllib.parse.urlparse(target_url)
dest_dir = os.path.abspath(dest_dir)
if not os.path.exists(dest_dir):
os.mkdir(dest_dir)
self.dest_dir = os.path.abspath(os.path.normpath(dest_dir))
self.logger = logging.getLogger('KingPhisher.Client.WebPageScraper')
self.cloned_resources = collections.OrderedDict()
"""A :py:class:`collections.OrderedDict` instance of :py:class:`.ClonedResourceDetails` keyed by the web resource they describe."""
self.load_started = False
self.load_failed_event = None
self.__web_resources = []
self.webview = WebKit2.WebView()
web_context = self.webview.get_context()
web_context.set_cache_model(WebKit2.CacheModel.DOCUMENT_VIEWER)
web_context.set_tls_errors_policy(WebKit2.TLSErrorsPolicy.IGNORE)
self.webview.connect('decide-policy', self.signal_decide_policy)
self.webview.connect('load-changed', self.signal_load_changed)
self.webview.connect('load-failed', self.signal_load_failed)
self.webview.connect('resource-load-started', self.signal_resource_load_started)
self.webview.load_uri(self.target_url_str)
def _webkit_empty_resource_bug_workaround(self, url, expected_len):
"""
This works around an issue in WebKit2GTK+ that will hopefully be
resolved eventually. Sometimes the resource data that is returned is
an empty string so attempt to re-request it with Python.
"""
try:
response = requests.get(url, timeout=10)
except requests.exceptions.RequestException:
self.logger.warning('failed to request the empty resource with python')
return ''
if response.status_code < 200 or response.status_code > 299:
self.logger.warning("requested the empty resource with python, but received status: {0} ({1})".format(response.status_code, response.reason))
return ''
data = response.content
if len(data) != expected_len:
self.logger.warning('requested the empty resource with python, but the length appears invalid')
return data
@property
def load_failed(self):
return self.load_failed_event != None
@property
def target_url_str(self):
return urllib.parse.urlunparse(self.target_url)
def copy_resource_data(self, resource, data):
"""
Copy the data from a loaded resource to a local file.
:param resource: The resource whose data is being copied.
:type resource: :py:class:`WebKit2.WebResource`
:param data: The raw data of the represented resource.
:type data: bytes, str
"""
resource_url_str = resource.get_property('uri')
resource_url = urllib.parse.urlparse(resource_url_str)
resource_path = os.path.split(resource_url.path)[0].lstrip('/')
resource_path = urllib.parse.unquote(resource_path)
directory = self.dest_dir
for part in resource_path.split('/'):
directory = os.path.join(directory, part)
if not os.path.exists(directory):
os.mkdir(directory)
mime_type = None
response = resource.get_response()
if response:
mime_type = response.get_mime_type()
resource_path = urllib.parse.unquote(resource_url.path)
if resource_path.endswith('/'):
resource_path += 'index.html'
resource_path = resource_path.lstrip('/')
resource_path = os.path.join(self.dest_dir, resource_path)
if mime_type == 'text/html':
data = self.patch_html(data)
with open(resource_path, 'wb') as file_h:
file_h.write(data)
crd = ClonedResourceDetails(urllib.parse.unquote(resource_url.path), mime_type, len(data), resource_path)
self.cloned_resources[resource_url.path] = crd
self.logger.debug("wrote {0:,} bytes to {1}".format(crd.size, resource_path))
def patch_html(self, data):
"""
Patch the HTML data to include the King Phisher javascript resource.
The script tag is inserted just before the closing head tag. If no head
tag is present, the data is left unmodified.
:param str data: The HTML data to patch.
:return: The patched HTML data.
:rtype: str
"""
match = re.search(r'</head>', data, flags=re.IGNORECASE)
if not match:
return data
end_head = match.start(0)
patched = ''
patched += data[:end_head]
patched += '<script src="/kp.js" type="text/javascript"></script>'
ws_cursor = end_head - 1
while ws_cursor > 0 and data[ws_cursor] in string.whitespace:
ws_cursor -= 1
patched += data[ws_cursor + 1:end_head]
patched += data[end_head:]
return patched
def resource_is_on_target(self, resource):
"""
Test whether the resource is on the target system. This tries to match
the hostname, scheme and port number of the resource's URI against the
target URI.
:return: Whether the resource is on the target or not.
:rtype: bool
"""
resource_url = urllib.parse.urlparse(resource.get_property('uri'))
if resource_url.netloc.lower() != self.target_url.netloc.lower():
return False
if resource_url.scheme.lower() != self.target_url.scheme.lower():
return False
rport = resource_url.port or (443 if resource_url.scheme == 'https' else 80)
tport = self.target_url.port or (443 if self.target_url.scheme == 'https' else 80)
if rport != tport:
return False
return True
def stop_cloning(self):
"""Stop the current cloning operation if it is running."""
if self.webview.get_property('is-loading'):
self.webview.stop_loading()
def wait(self):
"""
Wait for the cloning operation to complete and return whether the
operation was successful or not.
:return: True if the operation was successful.
:rtype: bool
"""
while not self.load_started:
gui_utilities.gtk_sync()
while self.webview.get_property('is-loading') or len(self.__web_resources):
gui_utilities.gtk_sync()
self.webview.destroy()
return not self.load_failed
def cb_get_data_finish(self, resource, task):
data = resource.get_data_finish(task)
for _ in range(1):
response = resource.get_response()
if not response:
break
resource_url_str = resource.get_property('uri')
if not self.resource_is_on_target(resource):
self.logger.debug('loaded external resource: ' + resource_url_str)
break
if len(data) == 0:
self.logger.warning('loaded empty on target resource: ' + resource_url_str)
data = self._webkit_empty_resource_bug_workaround(resource_url_str, response.get_content_length())
else:
self.logger.info('loaded on target resource: ' + resource_url_str)
if len(data):
self.copy_resource_data(resource, data)
self.__web_resources.remove(resource)
def signal_decide_policy(self, webview, decision, decision_type):
self.logger.debug("received policy decision request of type: {0}".format(decision_type.value_name))
if decision_type != WebKit2.PolicyDecisionType.NAVIGATION_ACTION:
return
new_target_url_str = decision.get_request().get_uri()
new_target_url = urllib.parse.urlparse(new_target_url_str)
if new_target_url_str == self.target_url_str:
return
# don't allow offsite redirects
if new_target_url.netloc.lower() != self.target_url.netloc.lower():
return
self.target_url = new_target_url
self.logger.info("updated the target url to: {0}".format(new_target_url_str))
def signal_load_changed(self, webview, load_event):
self.logger.debug("load status changed to: {0}".format(load_event.value_name))
if load_event == WebKit2.LoadEvent.STARTED:
self.load_started = True
def signal_load_failed(self, webview, event, uri, error):
self.logger.warning("load failed on event: {0} for uri: {1}".format(event.value_name, uri))
self.load_failed_event = event
def signal_resource_load_started(self, webveiw, resource, request):
self.__web_resources.append(resource)
resource.connect('failed', self.signal_resource_load_failed)
resource.connect('finished', self.signal_resource_load_finished)
def signal_resource_load_finished(self, resource):
resource.get_data(callback=self.cb_get_data_finish)
def signal_resource_load_failed(self, resource, error):
self.logger.warning('failed to load resource: ' + resource.get_uri())
| |
import os
import unittest
from cloudify_rest_client.deployments import Deployment
from cloudify_system_workflows.deployment_update.step_extractor import (
extract_steps,
DeploymentUpdateStep,
PROPERTY, PROPERTIES, OUTPUT, OUTPUTS, WORKFLOW, WORKFLOWS, NODE,
NODES, OPERATION, OPERATIONS, RELATIONSHIP, RELATIONSHIPS,
SOURCE_OPERATIONS, TARGET_OPERATIONS, TYPE, GROUP, GROUPS, POLICY_TYPE,
POLICY_TYPES, POLICY_TRIGGER, POLICY_TRIGGERS, HOST_ID, PLUGIN,
DEPLOYMENT_PLUGINS_TO_INSTALL, PLUGINS_TO_INSTALL, DESCRIPTION,
_update_topology_order_of_add_node_steps,
_find_relationship,
)
from dsl_parser import tasks
class StepExtractorTestCase(unittest.TestCase):
@staticmethod
def _get_node_scheme(node_id='node1', **params):
node = {
'id': node_id,
OPERATIONS: {},
PROPERTIES: {},
RELATIONSHIPS: [],
TYPE: '',
HOST_ID: '',
PLUGINS_TO_INSTALL: []
}
node.update(params)
return node
@staticmethod
def _get_relationship_scheme():
return {
SOURCE_OPERATIONS: {},
"target_id": "",
TARGET_OPERATIONS: {},
TYPE: "",
PROPERTIES: {}
}
def setUp(self):
super(StepExtractorTestCase, self).setUp()
self.deployment = Deployment({
'id': 'deployment_id',
'groups': {}
})
self.deployment_plan = {
DESCRIPTION: None,
NODES: {},
OPERATIONS: {},
PROPERTIES: {},
RELATIONSHIPS: [],
TYPE: '',
GROUPS: {},
POLICY_TYPES: {},
POLICY_TRIGGERS: {},
DEPLOYMENT_PLUGINS_TO_INSTALL: {},
OUTPUTS: {},
WORKFLOWS: {}
}
def test_entity_name(self):
step = DeploymentUpdateStep(action='add',
entity_type=NODE,
entity_id='nodes:node1')
self.assertEqual('node1', step.entity_name)
def test_update_topology_order_of_add_node_steps(self):
add_node_a_step = DeploymentUpdateStep(
action='add',
entity_type=NODE,
entity_id='nodes:node_a')
add_node_b_step = DeploymentUpdateStep(
action='add',
entity_type=NODE,
entity_id='nodes:node_b')
add_node_c_step = DeploymentUpdateStep(
action='add',
entity_type=NODE,
entity_id='nodes:node_c')
add_node_d_step = DeploymentUpdateStep(
action='add',
entity_type=NODE,
entity_id='nodes:node_d')
add_node_e_step = DeploymentUpdateStep(
action='add',
entity_type=NODE,
entity_id='nodes:node_e')
add_node_f_step = DeploymentUpdateStep(
action='add',
entity_type=NODE,
entity_id='nodes:node_f')
steps = [add_node_a_step, add_node_b_step, add_node_c_step,
add_node_d_step, add_node_e_step, add_node_f_step]
# Imagine the following relationships between the added nodes:
#
# e
# ^^
# | \
# c d
# ^ ^
# / \
# a b f
topologically_sorted_added_nodes = ['node_f', 'node_a', 'node_b',
'node_c', 'node_d', 'node_e']
_update_topology_order_of_add_node_steps(
steps, topologically_sorted_added_nodes)
self.assertEqual(5, add_node_e_step.topology_order)
self.assertEqual(4, add_node_d_step.topology_order)
self.assertEqual(3, add_node_c_step.topology_order)
self.assertEqual(2, add_node_b_step.topology_order)
self.assertEqual(1, add_node_a_step.topology_order)
self.assertEqual(0, add_node_f_step.topology_order)
def test_create_added_nodes_graph(self):
self.deployment_plan[NODES] = [
self._get_node_scheme('node_a', relationships=[
{"target_id": 'node_c'}
]),
self._get_node_scheme('node_b', relationships=[
{"target_id": 'node_c'}
]),
self._get_node_scheme('node_c', relationships=[
{"target_id": 'node_e'}
]),
self._get_node_scheme('node_d', relationships=[
{"target_id": 'node_e'}
]),
self._get_node_scheme('node_e'),
self._get_node_scheme('node_f'),
]
steps, _ = extract_steps([], self.deployment, self.deployment_plan)
order_by_id = {s.entity_id: s.topology_order for s in steps}
assert order_by_id['nodes:node_c'] > order_by_id['nodes:node_a']
assert order_by_id['nodes:node_c'] > order_by_id['nodes:node_b']
assert order_by_id['nodes:node_e'] > order_by_id['nodes:node_c']
assert order_by_id['nodes:node_e'] > order_by_id['nodes:node_d']
def test_description_no_change(self):
self.deployment[DESCRIPTION] = 'description'
self.deployment_plan[DESCRIPTION] = 'description'
steps, _ = extract_steps({}, self.deployment, self.deployment_plan)
assert steps == []
def test_description_modify_description(self):
self.deployment[DESCRIPTION] = 'description_old'
self.deployment_plan[DESCRIPTION] = 'description_new'
steps, _ = extract_steps({}, self.deployment, self.deployment_plan)
assert steps == [
DeploymentUpdateStep(
action='modify',
entity_type=DESCRIPTION,
entity_id='description')
]
def test_outputs_no_change(self):
self.deployment[OUTPUTS] = {'output1': 'output1_value'}
self.deployment_plan[OUTPUTS] = self.deployment.outputs
steps, _ = extract_steps({}, self.deployment, self.deployment_plan)
assert steps == []
def test_outputs_add_output(self):
self.deployment_plan[OUTPUTS] = {'output1': 'output1_value'}
steps, _ = extract_steps({}, self.deployment, self.deployment_plan)
assert steps == [
DeploymentUpdateStep(
action='add',
entity_type=OUTPUT,
entity_id='outputs:output1')
]
def test_outputs_remove_output(self):
self.deployment[OUTPUTS] = {'output1': 'output1_value'}
steps, _ = extract_steps({}, self.deployment, self.deployment_plan)
assert steps == [
DeploymentUpdateStep(
action='remove',
entity_type=OUTPUT,
entity_id='outputs:output1')
]
def test_outputs_modify_output(self):
self.deployment[OUTPUTS] = {'output1': 'output1_value'}
self.deployment_plan[OUTPUTS] = {'output1': 'output1_modified_value'}
steps, _ = extract_steps({}, self.deployment, self.deployment_plan)
assert steps == [
DeploymentUpdateStep(
action='modify',
entity_type=OUTPUT,
entity_id='outputs:output1')
]
def test_workflows_no_change(self):
self.deployment[WORKFLOWS] = {
'intact_workflow': {
'operation': 'module_name.foo',
'plugin': 'plugin_for_workflows'
}
}
self.deployment_plan[WORKFLOWS] = self.deployment.workflows
steps, _ = extract_steps({}, self.deployment, self.deployment_plan)
assert steps == []
def test_workflows_add_workflow_of_existing_plugin(self):
self.deployment_plan[WORKFLOWS] = {
'added_workflow': {
'operation': 'module_name.foo',
'plugin': 'plugin_for_workflows'
}
}
steps, _ = extract_steps({}, self.deployment, self.deployment_plan)
assert steps == [
DeploymentUpdateStep(
action='add',
entity_type=WORKFLOW,
entity_id='workflows:added_workflow')
]
def test_workflows_add_workflow_script(self):
self.deployment_plan[WORKFLOWS] = {
'new_workflow': {
'plugin': 'script',
}
}
steps, _ = extract_steps({}, self.deployment, self.deployment_plan)
assert steps == [
DeploymentUpdateStep(
action='add',
entity_type=WORKFLOW,
entity_id='workflows:new_workflow')
]
def test_workflows_remove_workflow(self):
self.deployment[WORKFLOWS] = {
'removed_workflow': {
'operation': 'module_name.foo',
'plugin': 'plugin_for_workflows'
}
}
steps, _ = extract_steps({}, self.deployment, self.deployment_plan)
assert steps == [
DeploymentUpdateStep(
action='remove',
entity_type=WORKFLOW,
entity_id='workflows:removed_workflow')
]
def test_workflows_modify_workflow_of_existing_plugin(self):
self.deployment[WORKFLOWS] = {
'added_workflow': {
'operation': 'module_name.foo',
'plugin': 'plugin_for_workflows'
}
}
self.deployment_plan[WORKFLOWS] = {
'added_workflow': {
'operation': 'module_name.bar',
'plugin': 'plugin_for_workflows'
}
}
steps, _ = extract_steps({}, self.deployment, self.deployment_plan)
assert steps == [
DeploymentUpdateStep(
action='modify',
entity_type=WORKFLOW,
entity_id='workflows:added_workflow')
]
def test_nodes_no_change(self):
nodes = [self._get_node_scheme()]
self.deployment_plan[NODES] = nodes
steps, _ = extract_steps(nodes, self.deployment, self.deployment_plan)
assert steps == []
def test_nodes_add_node(self):
self.deployment_plan[NODES] = [self._get_node_scheme()]
steps, _ = extract_steps({}, self.deployment, self.deployment_plan)
assert steps == [
DeploymentUpdateStep(
action='add',
entity_type=NODE,
entity_id='nodes:node1')
]
def test_nodes_remove_node(self):
nodes = [self._get_node_scheme()]
steps, _ = extract_steps(nodes, self.deployment, self.deployment_plan)
assert steps == [
DeploymentUpdateStep(
action='remove',
entity_type=NODE,
entity_id='nodes:node1')
]
def test_nodes_add_and_remove_node_changed_type(self):
nodes = [self._get_node_scheme(type='old_type')]
self.deployment_plan[NODES] = [self._get_node_scheme(type='new_type')]
supported_steps, unsupported_steps = \
extract_steps(nodes, self.deployment, self.deployment_plan)
assert len(supported_steps) == 0
assert unsupported_steps == [
DeploymentUpdateStep(
action='modify',
entity_type=NODE,
entity_id='nodes:node1',
supported=False),
]
def test_nodes_add_and_remove_node_changed_type_and_host_id(self):
nodes = [self._get_node_scheme(host_id='old_host_id')]
self.deployment_plan[NODES] = [
self._get_node_scheme(type='new_host_id')]
supported_steps, unsupported_steps = \
extract_steps(nodes, self.deployment, self.deployment_plan)
assert len(supported_steps) == 0
assert unsupported_steps == [
DeploymentUpdateStep(
action='modify',
entity_type=NODE,
entity_id='nodes:node1',
supported=False),
]
def test_node_properties_no_change(self):
nodes = [self._get_node_scheme(
properties={'property1': 'property1_value'}
)]
self.deployment_plan[NODES] = nodes
steps, _ = extract_steps(nodes, self.deployment, self.deployment_plan)
assert steps == []
def test_node_properties_add_property(self):
nodes = [self._get_node_scheme()]
self.deployment_plan[NODES] = [
self._get_node_scheme(properties={'property1': 'property1_value'})]
steps, _ = extract_steps(nodes, self.deployment, self.deployment_plan)
assert steps == [
DeploymentUpdateStep(
action='add',
entity_type=PROPERTY,
entity_id='nodes:node1:properties:property1')
]
def test_node_properties_remove_property(self):
nodes = [self._get_node_scheme(properties={
'property1': 'property1_value'})]
self.deployment_plan[NODES] = [self._get_node_scheme()]
steps, _ = extract_steps(nodes, self.deployment, self.deployment_plan)
assert steps == [
DeploymentUpdateStep(
action='remove',
entity_type=PROPERTY,
entity_id='nodes:node1:properties:property1')
]
def test_node_properties_modify_property(self):
nodes = [self._get_node_scheme(properties={
'property1': 'property1_value'})]
self.deployment_plan[NODES] = [self._get_node_scheme(properties={
'property1': 'property1_modified_value'})]
steps, _ = extract_steps(nodes, self.deployment, self.deployment_plan)
assert steps == [
DeploymentUpdateStep(
action='modify',
entity_type=PROPERTY,
entity_id='nodes:node1:properties:property1')
]
def test_node_operations_no_change(self):
nodes = [self._get_node_scheme(operations={
'full.operation1.name': {
'operation1_field': 'operation1_field_value'
}
})]
self.deployment_plan[NODES] = nodes
steps, _ = extract_steps(nodes, self.deployment, self.deployment_plan)
assert steps == []
def test_node_operations_add_operation(self):
nodes = [self._get_node_scheme()]
self.deployment_plan[NODES] = [self._get_node_scheme(operations={
'full.operation1.name': {
'operation1_field': 'operation1_field_value'
}
})]
steps, _ = extract_steps(nodes, self.deployment, self.deployment_plan)
assert steps == [
DeploymentUpdateStep(
action='add',
entity_type=OPERATION,
entity_id='nodes:node1:operations:full.operation1.name')
]
def test_node_operations_remove_operation(self):
nodes = [self._get_node_scheme(operations={
'full.operation1.name': {
'operation1_field': 'operation1_field_value'
}
})]
self.deployment_plan[NODES] = [self._get_node_scheme()]
steps, _ = extract_steps(nodes, self.deployment, self.deployment_plan)
assert steps == [
DeploymentUpdateStep(
action='remove',
entity_type=OPERATION,
entity_id='nodes:node1:operations:full.operation1.name')
]
def test_node_operations_modify_operation(self):
nodes = [self._get_node_scheme(operations={
'full.operation1.name': {
'operation1_field': 'operation1_field_value'
}
})]
self.deployment_plan[NODES] = [self._get_node_scheme(operations={
'full.operation1.name': {
'operation1_field': 'operation1_modified_field_value'
}
})]
steps, _ = extract_steps(nodes, self.deployment, self.deployment_plan)
assert steps == [
DeploymentUpdateStep(
action='modify',
entity_type=OPERATION,
entity_id='nodes:node1:operations:full.operation1.name')
]
def test_relationships_no_change(self):
nodes = [self._get_node_scheme(relationships=[
{
'type': 'relationship_type',
'type_hierarchy': ['rel_hierarchy'],
'target_id': 'relationship_target'
}
])]
self.deployment_plan[NODES] = nodes
steps, _ = extract_steps(nodes, self.deployment, self.deployment_plan)
assert steps == []
def test_relationships_add_relationship(self):
nodes = [self._get_node_scheme()]
self.deployment_plan[NODES] = [self._get_node_scheme(relationships=[
{
'type': 'relationship_type',
'type_hierarchy': ['rel_hierarchy'],
'target_id': 'relationship_target'
}
])]
steps, _ = extract_steps(nodes, self.deployment, self.deployment_plan)
assert steps == [
DeploymentUpdateStep(
action='add',
entity_type=RELATIONSHIP,
entity_id='nodes:node1:relationships:[0]')
]
def test_relationships_remove_relationship(self):
nodes = [self._get_node_scheme(relationships=[
{
'type': 'relationship_type',
'type_hierarchy': ['rel_hierarchy'],
'target_id': 'relationship_target'
}
])]
self.deployment_plan[NODES] = [self._get_node_scheme()]
steps, _ = extract_steps(nodes, self.deployment, self.deployment_plan)
assert steps == [
DeploymentUpdateStep(
action='remove',
entity_type=RELATIONSHIP,
entity_id='nodes:node1:relationships:[0]')
]
def test_relationships_change_type(self):
nodes = [self._get_node_scheme(relationships=[
{
'type': 'relationship_type',
'type_hierarchy': ['rel_hierarchy'],
'target_id': 'relationship_target'
}
])]
self.deployment_plan[NODES] = [self._get_node_scheme(relationships=[
{
'type': 'different_relationship_type',
'type_hierarchy': ['rel_hierarchy'],
'target_id': 'relationship_target'
}
])]
steps, _ = extract_steps(nodes, self.deployment, self.deployment_plan)
assert steps == [
DeploymentUpdateStep(
action='remove',
entity_type=RELATIONSHIP,
entity_id='nodes:node1:relationships:[0]'),
DeploymentUpdateStep(
action='add',
entity_type=RELATIONSHIP,
entity_id='nodes:node1:relationships:[0]')
]
def test_relationships_change_target_non_contained_in(self):
nodes = [self._get_node_scheme(relationships=[
{
'type': 'relationship_type',
'target_id': 'relationship_target',
'type_hierarchy': ['rel_hierarchy']
}
])]
self.deployment_plan[NODES] = [self._get_node_scheme(relationships=[
{
'type': 'relationship_type',
'target_id': 'different_relationship_target',
'type_hierarchy': ['rel_hierarchy']
}
])]
steps, _ = extract_steps(nodes, self.deployment, self.deployment_plan)
assert steps == [
DeploymentUpdateStep(
action='remove',
entity_type=RELATIONSHIP,
entity_id='nodes:node1:relationships:[0]'),
DeploymentUpdateStep(
action='add',
entity_type=RELATIONSHIP,
entity_id='nodes:node1:relationships:[0]')
]
def test_relationships_change_target_contained_in(self):
nodes = [self._get_node_scheme(relationships=[
{
'target_id': 'relationship_target',
'type_hierarchy': ['rel_hierarchy',
'cloudify.relationships.contained_in']
}
])]
self.deployment_plan[NODES] = [self._get_node_scheme(relationships=[
{
'type': 'relationship_type',
'target_id': 'different_relationship_target',
'type_hierarchy': ['rel_hierarchy',
'cloudify.relationships.contained_in']}
])]
_, unsupported_steps = extract_steps(
nodes, self.deployment, self.deployment_plan)
assert unsupported_steps == [
DeploymentUpdateStep(
action='modify',
entity_type=NODE,
entity_id='nodes:node1',
supported=False),
]
def test_relationships_change_type_and_target(self):
nodes = [self._get_node_scheme(relationships=[
{
'type': 'relationship_type',
'type_hierarchy': ['rel_hierarchy'],
'target_id': 'relationship_target'
}
])]
self.deployment_plan[NODES] = [self._get_node_scheme(relationships=[
{
'type': 'different_relationship_type',
'type_hierarchy': ['rel_hierarchy'],
'target_id': 'different_relationship_target'
}
])]
steps, _ = extract_steps(nodes, self.deployment, self.deployment_plan)
assert steps == [
DeploymentUpdateStep(
action='remove',
entity_type=RELATIONSHIP,
entity_id='nodes:node1:relationships:[0]'),
DeploymentUpdateStep(
action='add',
entity_type=RELATIONSHIP,
entity_id='nodes:node1:relationships:[0]')
]
def test_relationships_modify_order(self):
nodes = [self._get_node_scheme(relationships=[
{'type': 'relationship_type',
'type_hierarchy': ['rel_hierarchy'],
'target_id': 'relationship_target_1'},
{'type': 'relationship_type',
'type_hierarchy': ['rel_hierarchy'],
'target_id': 'relationship_target_2'},
{'type': 'relationship_type',
'type_hierarchy': ['rel_hierarchy'],
'target_id': 'relationship_target_3'},
{'type': 'relationship_type',
'type_hierarchy': ['rel_hierarchy'],
'target_id': 'relationship_target_4'}
])]
self.deployment_plan[NODES] = [self._get_node_scheme(relationships=[
{'type': 'relationship_type',
'type_hierarchy': ['rel_hierarchy'],
'target_id': 'relationship_target_2'},
{'type': 'relationship_type',
'type_hierarchy': ['rel_hierarchy'],
'target_id': 'relationship_target_4'},
{'type': 'relationship_type',
'type_hierarchy': ['rel_hierarchy'],
'target_id': 'relationship_target_3'},
{'type': 'relationship_type',
'type_hierarchy': ['rel_hierarchy'],
'target_id': 'relationship_target_1'}
])]
steps, _ = extract_steps(nodes, self.deployment, self.deployment_plan)
# we don't care for the order the steps were created in
assert set(steps) == {
DeploymentUpdateStep(
action='modify',
entity_type=RELATIONSHIP,
entity_id='nodes:node1:relationships:[0]:[3]'),
DeploymentUpdateStep(
action='modify',
entity_type=RELATIONSHIP,
entity_id='nodes:node1:relationships:[1]:[0]'),
DeploymentUpdateStep(
action='modify',
entity_type=RELATIONSHIP,
entity_id='nodes:node1:relationships:[3]:[1]')
}
def test_relationships_modify_order_with_add_and_remove(self):
nodes = [self._get_node_scheme(relationships=[
{'type': 'relationship_type',
'type_hierarchy': ['rel_hierarchy'],
'target_id': 'relationship_target_1'},
{'type': 'relationship_type',
'type_hierarchy': ['rel_hierarchy'],
'target_id': 'relationship_target_2'},
{'type': 'relationship_type',
'type_hierarchy': ['rel_hierarchy'],
'target_id': 'relationship_target_3'},
])]
self.deployment_plan[NODES] = [self._get_node_scheme(relationships=[
{'type': 'relationship_type',
'type_hierarchy': ['rel_hierarchy'],
'target_id': 'relationship_target_5'},
{'type': 'relationship_type',
'type_hierarchy': ['rel_hierarchy'],
'target_id': 'relationship_target_2'},
{'type': 'relationship_type',
'type_hierarchy': ['rel_hierarchy'],
'target_id': 'relationship_target_4'},
{'type': 'relationship_type',
'type_hierarchy': ['rel_hierarchy'],
'target_id': 'relationship_target_1'}
])]
steps, _ = extract_steps(nodes, self.deployment, self.deployment_plan)
# we don't care for the order the steps were created in
assert set(steps) == {
DeploymentUpdateStep(
action='modify',
entity_type=RELATIONSHIP,
entity_id='nodes:node1:relationships:[0]:[3]'),
DeploymentUpdateStep(
action='remove',
entity_type=RELATIONSHIP,
entity_id='nodes:node1:relationships:[2]'),
DeploymentUpdateStep(
action='add',
entity_type=RELATIONSHIP,
entity_id='nodes:node1:relationships:[2]'),
DeploymentUpdateStep(
action='add',
entity_type=RELATIONSHIP,
entity_id='nodes:node1:relationships:[0]')
}
def test_relationships_add_source_operation(self):
nodes = [self._get_node_scheme(relationships=[
{
'type': 'relationship_type',
'type_hierarchy': ['rel_hierarchy'],
'target_id': 'relationship_target',
SOURCE_OPERATIONS: {}
}
])]
self.deployment_plan[NODES] = [self._get_node_scheme(relationships=[
{
'type': 'relationship_type',
'type_hierarchy': ['rel_hierarchy'],
'target_id': 'relationship_target',
SOURCE_OPERATIONS: {'full.operation1': {}}
}
])]
steps, _ = extract_steps(nodes, self.deployment, self.deployment_plan)
assert steps == [
DeploymentUpdateStep(
action='add',
entity_type=OPERATION,
entity_id='nodes:node1:relationships:[0]:'
'source_operations:full.operation1')
]
def test_relationships_remove_source_operation(self):
nodes = [self._get_node_scheme(relationships=[
{
'type': 'relationship_type',
'type_hierarchy': ['rel_hierarchy'],
'target_id': 'relationship_target',
SOURCE_OPERATIONS: {'full.operation1': {}}
}
])]
self.deployment_plan[NODES] = [self._get_node_scheme(relationships=[
{
'type': 'relationship_type',
'type_hierarchy': ['rel_hierarchy'],
'target_id': 'relationship_target',
SOURCE_OPERATIONS: {}
}
])]
steps, _ = extract_steps(nodes, self.deployment, self.deployment_plan)
assert steps == [
DeploymentUpdateStep(
action='remove',
entity_type=OPERATION,
entity_id='nodes:node1:relationships:[0]:'
'source_operations:full.operation1')
]
def test_duplicate_relationship(self):
rel = {
'type': 'relationship_type',
'type_hierarchy': ['rel_hierarchy'],
'target_id': 'relationship_target',
}
nodes = [self._get_node_scheme(relationships=[rel, rel])]
self.deployment_plan[NODES] = [
self._get_node_scheme(relationships=[rel, rel])]
steps, _ = extract_steps(nodes, self.deployment, self.deployment_plan)
assert steps == []
def test_relationships_modify_source_operation(self):
nodes = [self._get_node_scheme(relationships=[
{
'type': 'relationship_type',
'type_hierarchy': ['rel_hierarchy'],
'target_id': 'relationship_target',
SOURCE_OPERATIONS: {
'full.operation1': {
'op1_old_field': 'op1_field_value'
}
}
}
])]
self.deployment_plan[NODES] = [self._get_node_scheme(relationships=[
{
'type': 'relationship_type',
'type_hierarchy': ['rel_hierarchy'],
'target_id': 'relationship_target',
SOURCE_OPERATIONS: {
'full.operation1': {
'op1_new_field': 'op1_field_value'
}
}
}
])]
steps, _ = extract_steps(nodes, self.deployment, self.deployment_plan)
assert steps == [
DeploymentUpdateStep(
action='modify',
entity_type=OPERATION,
entity_id='nodes:node1:relationships:[0]:'
'source_operations:full.operation1')
]
def test_relationships_add_target_operation(self):
nodes = [self._get_node_scheme(relationships=[
{
'type': 'relationship_type',
'type_hierarchy': ['rel_hierarchy'],
'target_id': 'relationship_target',
TARGET_OPERATIONS: {}
}
])]
self.deployment_plan[NODES] = [self._get_node_scheme(relationships=[
{
'type': 'relationship_type',
'type_hierarchy': ['rel_hierarchy'],
'target_id': 'relationship_target',
TARGET_OPERATIONS: {'full.operation1': {}}
}
])]
steps, _ = extract_steps(nodes, self.deployment, self.deployment_plan)
assert steps == [
DeploymentUpdateStep(
action='add',
entity_type=OPERATION,
entity_id='nodes:node1:relationships:[0]:'
'target_operations:full.operation1')
]
def test_relationships_remove_target_operation(self):
nodes = [self._get_node_scheme(relationships=[
{
'type': 'relationship_type',
'type_hierarchy': ['rel_hierarchy'],
'target_id': 'relationship_target',
TARGET_OPERATIONS: {'full.operation1': {}}
}
])]
self.deployment_plan[NODES] = [self._get_node_scheme(relationships=[
{
'type': 'relationship_type',
'type_hierarchy': ['rel_hierarchy'],
'target_id': 'relationship_target',
TARGET_OPERATIONS: {}
}
])]
steps, _ = extract_steps(nodes, self.deployment, self.deployment_plan)
assert steps == [
DeploymentUpdateStep(
action='remove',
entity_type=OPERATION,
entity_id='nodes:node1:relationships:[0]:'
'target_operations:full.operation1')
]
def test_relationships_modify_target_operation(self):
nodes = [self._get_node_scheme(relationships=[
{
'type': 'relationship_type',
'type_hierarchy': ['rel_hierarchy'],
'target_id': 'relationship_target',
TARGET_OPERATIONS: {
'full.operation1': {
'op1_old_field': 'op1_field_value'
}
}
}
])]
self.deployment_plan[NODES] = [self._get_node_scheme(relationships=[
{
'type': 'relationship_type',
'type_hierarchy': ['rel_hierarchy'],
'target_id': 'relationship_target',
TARGET_OPERATIONS: {
'full.operation1': {
'op1_new_field': 'op1_field_value'
}
}
}
])]
steps, _ = extract_steps(nodes, self.deployment, self.deployment_plan)
assert steps == [
DeploymentUpdateStep(
action='modify',
entity_type=OPERATION,
entity_id='nodes:node1:relationships:[0]:'
'target_operations:full.operation1')
]
def test_get_matching_relationship(self):
relationships_with_match = [
{'type': 'typeA', 'target_id': 'id_1', 'field2': 'value2'},
{'type': 'typeB', 'target_id': 'id_1'},
{'type': 'typeB', 'target_id': 'id_2'},
{'type': 'typeA', 'target_id': 'id_2'}
]
relationships_with_no_match = [
{'type': 'typeB', 'target_id': 'id_1'},
{'type': 'typeB', 'target_id': 'id_2'},
{'type': 'typeA', 'target_id': 'id_2'}
]
assert _find_relationship(
relationships_with_match, 'typeA', 'id_1'
) == ({'type': 'typeA', 'target_id': 'id_1', 'field2': 'value2'}, 0)
assert _find_relationship(
relationships_with_no_match, 'typeA', 'id_1'
) == (None, None)
def test_sort_steps_compare_action(self):
add_step = DeploymentUpdateStep(
action='add',
entity_type='',
entity_id='')
remove_step = DeploymentUpdateStep(
action='remove',
entity_type='',
entity_id='')
modify_step = DeploymentUpdateStep(
action='modify',
entity_type='',
entity_id='')
steps = [add_step, remove_step, modify_step]
expected_step_order = [remove_step, add_step, modify_step]
steps.sort()
assert steps == expected_step_order
def test_sort_steps_add_node_before_add_relationship(self):
add_node_step = DeploymentUpdateStep(
action='add',
entity_type=NODE,
entity_id='')
add_relationship_step = DeploymentUpdateStep(
action='add',
entity_type=RELATIONSHIP,
entity_id='')
steps = [add_relationship_step, add_node_step]
expected_step_order = [add_node_step, add_relationship_step]
steps.sort()
assert steps == expected_step_order
def test_sort_steps_remove_relationship_before_remove_node(self):
remove_relationship_step = DeploymentUpdateStep(
action='remove',
entity_type=RELATIONSHIP,
entity_id='')
remove_node_step = DeploymentUpdateStep(
action='remove',
entity_type=NODE,
entity_id='')
steps = [remove_node_step, remove_relationship_step]
expected_step_order = [remove_relationship_step, remove_node_step]
steps.sort()
assert steps == expected_step_order
def test_sort_steps_higher_topology_before_lower_topology(self):
default_topology_step = DeploymentUpdateStep(
action='add',
entity_type=NODE,
entity_id='')
topology_order_1_step = DeploymentUpdateStep(
action='add',
entity_type=NODE,
entity_id='',
topology_order=1)
topology_order_2_step = DeploymentUpdateStep(
action='add',
entity_type=NODE,
entity_id='',
topology_order=2)
steps = [topology_order_1_step,
default_topology_step,
topology_order_2_step]
expected_step_order = [
topology_order_2_step,
topology_order_1_step,
default_topology_step]
steps.sort()
assert steps == expected_step_order
def test_sort_steps_all_comparison_considerations(self):
add_node_step_default_topology = DeploymentUpdateStep(
action='add',
entity_type=NODE,
entity_id='')
add_node_step_topology_order_1 = DeploymentUpdateStep(
action='add',
entity_type=NODE,
entity_id='',
topology_order=1)
add_node_step_topology_order_2 = DeploymentUpdateStep(
action='add',
entity_type=NODE,
entity_id='',
topology_order=2)
remove_relationship_step = DeploymentUpdateStep(
action='remove',
entity_type=RELATIONSHIP,
entity_id='')
remove_node_step = DeploymentUpdateStep(
action='remove',
entity_type=NODE,
entity_id='')
add_relationship_step = DeploymentUpdateStep(
action='add',
entity_type=RELATIONSHIP,
entity_id='')
modify_property_step = DeploymentUpdateStep(
action='modify',
entity_type=PROPERTY,
entity_id='')
steps = [add_node_step_topology_order_1, remove_node_step,
modify_property_step, add_relationship_step,
add_node_step_default_topology, remove_relationship_step,
add_node_step_topology_order_2]
expected_step_order = [
remove_relationship_step,
remove_node_step,
add_node_step_topology_order_2,
add_node_step_topology_order_1,
add_node_step_default_topology,
add_relationship_step,
modify_property_step]
steps.sort()
assert steps == expected_step_order
def test_relationships_intact_property(self):
nodes = [self._get_node_scheme(relationships=[
{
'type': 'relationship_type',
'type_hierarchy': ['rel_hierarchy'],
'target_id': 'relationship_target',
PROPERTIES: {
'property1': 'property1_value'
}
}
])]
self.deployment_plan[NODES] = nodes
steps, _ = extract_steps(nodes, self.deployment, self.deployment_plan)
assert steps == []
def test_relationships_add_property(self):
nodes = [self._get_node_scheme(relationships=[
{
'type': 'relationship_type',
'type_hierarchy': ['rel_hierarchy'],
'target_id': 'relationship_target',
'properties': {}
}
])]
self.deployment_plan[NODES] = [self._get_node_scheme(relationships=[
{
'type': 'relationship_type',
'type_hierarchy': ['rel_hierarchy'],
'target_id': 'relationship_target',
PROPERTIES: {
'property1': 'property1_different_value'
}
}
])]
_, unsupported_steps = extract_steps(
nodes, self.deployment, self.deployment_plan)
assert unsupported_steps == [
DeploymentUpdateStep(
action='add',
entity_type=PROPERTY,
entity_id='nodes:node1:relationships:[0]:'
'properties:property1',
supported=False)
]
def test_relationships_remove_property(self):
nodes = [self._get_node_scheme(relationships=[
{
'type': 'relationship_type',
'type_hierarchy': ['rel_hierarchy'],
'target_id': 'relationship_target',
PROPERTIES: {
'property1': 'property1_different_value'
}
}
])]
self.deployment_plan[NODES] = [self._get_node_scheme(relationships=[
{
'type': 'relationship_type',
'type_hierarchy': ['rel_hierarchy'],
'target_id': 'relationship_target',
'properties': {}
}
])]
_, unsupported_steps = extract_steps(
nodes, self.deployment, self.deployment_plan)
assert unsupported_steps == [
DeploymentUpdateStep(
action='remove',
entity_type=PROPERTY,
entity_id='nodes:node1:relationships:[0]:'
'properties:property1',
supported=False)
]
def test_relationships_modify_property(self):
nodes = [self._get_node_scheme(relationships=[
{
'type': 'relationship_type',
'type_hierarchy': ['rel_hierarchy'],
'target_id': 'relationship_target',
PROPERTIES: {
'property1': 'property1_value'
}
}
])]
self.deployment_plan[NODES] = [self._get_node_scheme(relationships=[
{
'type': 'relationship_type',
'type_hierarchy': ['rel_hierarchy'],
'target_id': 'relationship_target',
PROPERTIES: {
'property1': 'property1_different_value'
}
}
])]
_, unsupported_steps = extract_steps(
nodes, self.deployment, self.deployment_plan)
assert unsupported_steps == [
DeploymentUpdateStep(
action='modify',
entity_type=PROPERTY,
entity_id='nodes:node1:relationships:[0]:'
'properties:property1',
supported=False)
]
def test_extract_steps_policy_types_no_change(self):
policy_types = {'policy_type1': 'policy_type1_value'}
self.deployment[POLICY_TYPES] = policy_types
self.deployment_plan[POLICY_TYPES] = policy_types
steps, unsupported_steps = extract_steps(
{}, self.deployment, self.deployment_plan)
assert steps == []
assert unsupported_steps == []
def test_policy_types_add_policy_type(self):
self.deployment_plan[POLICY_TYPES] = {
'policy_type1': 'policy_type1_value'
}
_, unsupported_steps = extract_steps(
{}, self.deployment, self.deployment_plan)
assert unsupported_steps == [
DeploymentUpdateStep(
action='add',
entity_type=POLICY_TYPE,
entity_id='policy_types:policy_type1',
supported=False)
]
def test_policy_types_remove_policy_type(self):
self.deployment[POLICY_TYPES] = {'policy_type1': 'policy_type1_value'}
_, unsupported_steps = extract_steps(
{}, self.deployment, self.deployment_plan)
assert unsupported_steps == [
DeploymentUpdateStep(
action='remove',
entity_type=POLICY_TYPE,
entity_id='policy_types:policy_type1',
supported=False)
]
def test_policy_types_modify_policy_type(self):
self.deployment[POLICY_TYPES] = {'policy_type1': 'policy_type1_value'}
self.deployment_plan[POLICY_TYPES] = {
'policy_type1': 'policy_type1_modified_value'
}
_, unsupported_steps = extract_steps(
{}, self.deployment, self.deployment_plan)
assert unsupported_steps == [
DeploymentUpdateStep(
action='modify',
entity_type=POLICY_TYPE,
entity_id='policy_types:policy_type1',
supported=False)
]
def test_extract_steps_policy_triggers_no_change(self):
policy_triggers = {'policy_trigger1': 'policy_trigger1_value'}
self.deployment[POLICY_TRIGGERS] = policy_triggers
self.deployment_plan[POLICY_TRIGGERS] = policy_triggers
steps, unsupported_steps = extract_steps(
{}, self.deployment, self.deployment_plan)
assert steps == []
assert unsupported_steps == []
def test_policy_triggers_add_policy_trigger(self):
self.deployment_plan[POLICY_TRIGGERS] = {
'policy_trigger1': 'policy_trigger1_value'
}
_, unsupported_steps = extract_steps(
{}, self.deployment, self.deployment_plan)
assert unsupported_steps == [
DeploymentUpdateStep(
action='add',
entity_type=POLICY_TRIGGER,
entity_id='policy_triggers:policy_trigger1',
supported=False)
]
def test_policy_triggers_remove_policy_trigger(self):
self.deployment[POLICY_TRIGGERS] = {
'policy_trigger1': 'policy_trigger1_value'
}
_, unsupported_steps = extract_steps(
{}, self.deployment, self.deployment_plan)
assert unsupported_steps == [
DeploymentUpdateStep(
action='remove',
entity_type=POLICY_TRIGGER,
entity_id='policy_triggers:policy_trigger1',
supported=False)
]
def test_policy_triggers_modify_policy_trigger(self):
self.deployment[POLICY_TRIGGERS] = {
'policy_trigger1': 'policy_trigger1_value'
}
self.deployment_plan[POLICY_TRIGGERS] = {
'policy_trigger1': 'policy_trigger1_modified_value'
}
_, unsupported_steps = extract_steps(
{}, self.deployment, self.deployment_plan)
assert unsupported_steps == [
DeploymentUpdateStep(
action='modify',
entity_type=POLICY_TRIGGER,
entity_id='policy_triggers:policy_trigger1',
supported=False)
]
def test_groups_no_change(self):
groups = {'group1': {}}
self.deployment[GROUPS] = groups
self.deployment_plan[GROUPS] = groups
steps, unsupported_steps = extract_steps(
{}, self.deployment, self.deployment_plan)
assert steps == []
assert unsupported_steps == []
def test_groups_add_group(self):
self.deployment_plan[GROUPS] = {'group1': {}}
_, unsupported_steps = extract_steps(
{}, self.deployment, self.deployment_plan)
assert unsupported_steps == [
DeploymentUpdateStep(
action='add',
entity_type=GROUP,
entity_id='groups:group1',
supported=False)
]
def test_groups_remove_group(self):
self.deployment[GROUPS] = {'group1': {}}
_, unsupported_steps = extract_steps(
{}, self.deployment, self.deployment_plan)
assert unsupported_steps == [
DeploymentUpdateStep(
action='remove',
entity_type=GROUP,
entity_id='groups:group1',
supported=False)
]
def test_groups_modify_group(self):
self.deployment[GROUPS] = {'group1': {'members': []}}
self.deployment_plan[GROUPS] = {'group1': {'members': ['a']}}
_, unsupported_steps = extract_steps(
{}, self.deployment, self.deployment_plan)
assert unsupported_steps == [
DeploymentUpdateStep(
action='modify',
entity_type=GROUP,
entity_id='groups:group1',
supported=False)
]
def test_groups_member_order(self):
self.deployment[GROUPS] = {'group1': {'members': ['a', 'b']}}
self.deployment_plan[GROUPS] = {'group1': {'members': ['b', 'a']}}
steps, unsupported_steps = extract_steps(
{}, self.deployment, self.deployment_plan)
assert steps == []
assert unsupported_steps == []
def test_ha_plugins_no_install(self):
nodes = [self._get_node_scheme(plugins_to_install=[
{'name': 'old', 'install': True}
])]
self.deployment_plan[NODES] = [self._get_node_scheme(
plugins_to_install=[{'name': 'new', 'install': False}]
)]
steps, _ = extract_steps(nodes, self.deployment, self.deployment_plan)
# Although install is set to False on the new plugin, we are still
# creating the step. We won't need to install the plugin (the
# PluginHandler takes care of that), but the value still needs to be
# updated in the node in the DB
assert steps == [
DeploymentUpdateStep(
action='add',
entity_type=PLUGIN,
entity_id='plugins_to_install:node1:new'
)
]
def test_ha_plugins_add_ha_plugin(self):
nodes = [self._get_node_scheme(plugins_to_install=[
{'name': 'old', 'install': True}
])]
self.deployment_plan[NODES] = [self._get_node_scheme(
plugins_to_install=[{'name': 'new', 'install': True}]
)]
steps, _ = extract_steps(nodes, self.deployment, self.deployment_plan)
assert steps == [
DeploymentUpdateStep(
action='add',
entity_type=PLUGIN,
entity_id='plugins_to_install:node1:new',
supported=True)
]
def test_ha_plugins_modify_ha_plugin(self):
nodes = [self._get_node_scheme(plugins_to_install=[
{
'name': 'name',
'executor': 'host_agent',
'install': True,
'source': 'old'
}
])]
self.deployment_plan[NODES] = [self._get_node_scheme(
plugins_to_install=[
{
'name': 'name',
'executor': 'host_agent',
'install': True,
'source': 'new'
}
]
)]
steps, _ = extract_steps(nodes, self.deployment, self.deployment_plan)
assert steps == [
DeploymentUpdateStep(
action='modify',
entity_type=PLUGIN,
entity_id='plugins_to_install:node1:name',
supported=True)
]
def test_all_changes_combined(self):
bp_before = os.path.join(
os.path.dirname(__file__), 'combined_changes_before.yaml')
bp_after = os.path.join(
os.path.dirname(__file__), 'combined_changes_after.yaml')
plan_before = tasks.prepare_deployment_plan(
tasks.parse_dsl(bp_before, os.path.dirname(bp_before)),
None,
{},
)
plan_after = tasks.prepare_deployment_plan(
tasks.parse_dsl(bp_after, os.path.dirname(bp_after)),
None,
{},
)
nodes = plan_before['nodes']
plan_after['nodes'] = plan_after['nodes']
self.deployment[GROUPS] = plan_before['groups']
self.deployment[WORKFLOWS] = plan_before['workflows']
self.deployment[POLICY_TYPES] = plan_before['policy_types']
self.deployment[POLICY_TRIGGERS] = plan_before['policy_triggers']
self.deployment[OUTPUTS] = plan_before['outputs']
expected_steps = {
'modify_description': DeploymentUpdateStep(
'modify',
DESCRIPTION,
'description'),
'remove_node': DeploymentUpdateStep(
'remove',
NODE,
'nodes:node1'),
'add_node': DeploymentUpdateStep(
'add',
NODE,
'nodes:node2',
topology_order=0),
'modify_node_changed_type': DeploymentUpdateStep(
'modify',
NODE,
'nodes:node3',
supported=False),
'add_property': DeploymentUpdateStep(
'add',
PROPERTY,
'nodes:node4:properties:added_prop'),
'remove_property': DeploymentUpdateStep(
'remove',
PROPERTY,
'nodes:node4:properties:removed_prop'),
'modify_property': DeploymentUpdateStep(
'modify',
PROPERTY,
'nodes:node4:properties:modified_prop'),
'remove_relationship': DeploymentUpdateStep(
'remove',
RELATIONSHIP,
'nodes:node6:relationships:[0]'),
'add_relationship': DeploymentUpdateStep(
'add',
RELATIONSHIP,
'nodes:node7:relationships:[0]'),
'remove_relationship_changed_target': DeploymentUpdateStep(
'remove',
RELATIONSHIP,
'nodes:node9:relationships:[0]'),
'add_relationship_changed_target': DeploymentUpdateStep(
'add',
RELATIONSHIP,
'nodes:node9:relationships:[0]'),
'remove_relationship_changed_type_and_target':
DeploymentUpdateStep(
'remove',
RELATIONSHIP,
'nodes:node10:relationships:[0]'),
'add_relationship_changed_type_and_target':
DeploymentUpdateStep(
'add',
RELATIONSHIP,
'nodes:node10:relationships:[0]'),
'add_operation': DeploymentUpdateStep(
'add',
OPERATION,
'nodes:node11:operations:interface1.added_operation'),
'add_operation_shortened': DeploymentUpdateStep(
'add',
OPERATION,
'nodes:node11:operations:added_operation'),
'remove_operation': DeploymentUpdateStep(
'remove',
OPERATION,
'nodes:node11:operations:interface1.removed_operation'),
'remove_operation_shortened': DeploymentUpdateStep(
'remove',
OPERATION,
'nodes:node11:operations:removed_operation'),
'modify_operation': DeploymentUpdateStep(
'modify',
OPERATION,
'nodes:node11:operations:interface1.modified_operation'),
'modify_operation_shortened': DeploymentUpdateStep(
'modify',
OPERATION,
'nodes:node11:operations:modified_operation'),
'add_relationship_operation': DeploymentUpdateStep(
'add',
OPERATION,
'nodes:node12:relationships:[0]:target_operations:'
'interface_for_modified_and_added.added_operation'),
'add_relationship_operation_shortened':
DeploymentUpdateStep(
'add',
OPERATION,
'nodes:node12:relationships:[0]:target_operations:'
'added_operation'),
'remove_relationship_operation': DeploymentUpdateStep(
'remove',
OPERATION,
'nodes:node12:relationships:[0]:source_operations:'
'interface_for_intact_and_removed.removed_operation'),
'remove_relationship_operation_shortened':
DeploymentUpdateStep(
'remove',
OPERATION,
'nodes:node12:relationships:[0]:source_operations:'
'removed_operation'),
'modify_relationship_operation': DeploymentUpdateStep(
'modify',
OPERATION,
'nodes:node12:relationships:[0]:target_operations:'
'interface_for_modified_and_added.modified_operation'),
'modify_relationship_operation_shortened':
DeploymentUpdateStep(
'modify',
OPERATION,
'nodes:node12:relationships:[0]:target_operations:'
'modified_operation'),
'add_output': DeploymentUpdateStep(
'add',
OUTPUT,
'outputs:added_output'),
'remove_output': DeploymentUpdateStep(
'remove',
OUTPUT,
'outputs:removed_output'),
'modify_output': DeploymentUpdateStep(
'modify',
OUTPUT,
'outputs:modified_output'),
'add_workflow_same_plugin': DeploymentUpdateStep(
'add',
WORKFLOW,
'workflows:added_workflow_same_plugin'),
'add_workflow_new_plugin': DeploymentUpdateStep(
'add',
WORKFLOW,
'workflows:added_workflow_new_plugin'),
'remove_workflow': DeploymentUpdateStep(
'remove',
WORKFLOW,
'workflows:removed_workflow'),
'modify_workflow_same_plugin': DeploymentUpdateStep(
'modify',
WORKFLOW,
'workflows:modified_workflow_same_plugin'),
'modify_workflow_new_plugin': DeploymentUpdateStep(
'modify',
WORKFLOW,
'workflows:modified_workflow_new_plugin'),
'add_policy_type': DeploymentUpdateStep(
'add',
POLICY_TYPE,
'policy_types:added_policy_type',
supported=False),
'remove_policy_type': DeploymentUpdateStep(
'remove',
POLICY_TYPE,
'policy_types:removed_policy_type',
supported=False),
'modify_policy_type': DeploymentUpdateStep(
'modify',
POLICY_TYPE,
'policy_types:modified_policy_type',
supported=False),
'add_policy_trigger': DeploymentUpdateStep(
'add',
POLICY_TRIGGER,
'policy_triggers:added_policy_trigger',
supported=False),
'remove_policy_trigger': DeploymentUpdateStep(
'remove',
POLICY_TRIGGER,
'policy_triggers:removed_policy_trigger',
supported=False),
'modify_policy_trigger': DeploymentUpdateStep(
'modify',
POLICY_TRIGGER,
'policy_triggers:modified_policy_trigger',
supported=False),
'add_group': DeploymentUpdateStep(
'add',
GROUP,
'groups:added_group',
supported=False),
'remove_group': DeploymentUpdateStep(
'remove',
GROUP,
'groups:removed_group',
supported=False),
'modify_group': DeploymentUpdateStep(
'modify',
GROUP,
'groups:modified_group',
supported=False),
'add_relationship_property': DeploymentUpdateStep(
'add',
PROPERTY,
'nodes:node13:relationships:[0]:'
'properties:added_relationship_prop',
supported=False),
'remove_relationship_property': DeploymentUpdateStep(
'remove',
PROPERTY,
'nodes:node13:relationships:[0]:'
'properties:removed_relationship_prop',
supported=False),
'modify_relationship_property': DeploymentUpdateStep(
'modify',
PROPERTY,
'nodes:node13:relationships:[0]:'
'properties:modified_relationship_prop',
supported=False),
'add_ha_plugin_plugins_to_install': DeploymentUpdateStep(
'add',
PLUGIN,
'plugins_to_install:node18:plugin3_name'),
'add_ha_plugin_plugin3_name': DeploymentUpdateStep(
'add',
PLUGIN,
'plugins:node18:plugin3_name'),
'add_cda_plugin_used_by_host': DeploymentUpdateStep(
'add',
PLUGIN,
'plugins:node16:cda_plugin_for_operations2'),
'add_cda_operation': DeploymentUpdateStep(
'add',
OPERATION,
'nodes:node16:operations:'
'interface_for_plugin_based_operations.'
'added_operation_new_cda_plugin',
supported=True),
'add_cda_operation_shortened': DeploymentUpdateStep(
'add',
OPERATION,
'nodes:node16:operations:added_operation_new_cda_plugin',
supported=True),
'modify_ha_operation': DeploymentUpdateStep(
'modify',
OPERATION,
'nodes:node18:operations:'
'interface_for_plugin_based_operations.'
'ha_operation_before',
supported=True),
'modify_ha_operation_shortened': DeploymentUpdateStep(
'modify',
OPERATION,
'nodes:node18:operations:ha_operation_before',
supported=True)
}
steps, unsupported_steps = extract_steps(
nodes, self.deployment, plan_after)
steps.extend(unsupported_steps)
self.assertEqual(set(expected_steps.values()), set(steps))
| |
###############################################################################
## fs.py
## 9te [angband.ornl.gov]
## Wed Jan 12 10:37:50 2011
###############################################################################
## Copyright (C) 2008 Oak Ridge National Laboratory, UT-Battelle, LLC.
##---------------------------------------------------------------------------##
## generated by /data/denovo/production/head/setup/bin/pygen built on 20110112
###############################################################################
import os, sys, math, string
# pyspn equation type
from spn_fv import *
print_it = False
##---------------------------------------------------------------------------##
## MAIN
##---------------------------------------------------------------------------##
initialize(sys.argv)
if node() == 0:
print "Denovo - pyspn Python Front-End"
print "-------------------------------"
print "Release : %16s" % (release())
print "Release Date : %16s" % (release_date())
print "Build Date : %16s" % (build_date())
print
timer = Timer()
timer.start()
##---------------------------------------------------------------------------##
## XS DATA
####### UO2 Fuel-Clad Macroscopic Cross Sections ##########
## Transport-corrected Total Cross Sections
T_UO2 = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
T_UO2[0] = 1.77949e-1
T_UO2[1] = 3.29805e-1
T_UO2[2] = 4.80388e-1
T_UO2[3] = 5.54367e-1
T_UO2[4] = 3.11801e-1
T_UO2[5] = 3.95168e-1
T_UO2[6] = 5.64406e-1
## Fission Cross Section
F_UO2 = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
F_UO2[0] = 7.21206e-3
F_UO2[1] = 8.19301e-4
F_UO2[2] = 6.45320e-3
F_UO2[3] = 1.85648e-2
F_UO2[4] = 1.78084e-2
F_UO2[5] = 8.30348e-2
F_UO2[6] = 2.16004e-1
## Nu
N_UO2 = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
N_UO2[0] = 2.78145
N_UO2[1] = 2.47443
N_UO2[2] = 2.43383
N_UO2[3] = 2.43380
N_UO2[4] = 2.43380
N_UO2[5] = 2.43380
N_UO2[6] = 2.43380
## Chi
C_UO2 = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
C_UO2[0] = 5.87910e-1
C_UO2[1] = 4.11760e-1
C_UO2[2] = 3.39060e-4
C_UO2[3] = 1.17610e-7
C_UO2[4] = 0.00000000
C_UO2[5] = 0.00000000
C_UO2[6] = 0.00000000
## Scattering Matrix for UO2 Fuel-Clad (Macroscopic)
S_UO2 = [ [[]], [[]], [[]], [[]], [[]], [[]], [[]]]
S_UO2[0] = [[1.27537e-1]]
S_UO2[1] = [[4.23780e-2], [3.24456e-1]]
S_UO2[2] = [[9.43740e-6], [1.63140e-3], [4.50940e-1]]
S_UO2[3] = [[5.51630e-9], [3.14270e-9], [2.67920e-3], [4.52565e-1], [1.25250e-4]]
S_UO2[4] = [[0.00000000], [0.00000000], [0.00000000], [5.56640e-3], [2.71401e-1], [1.29680e-3]]
S_UO2[5] = [[0.00000000], [0.00000000], [0.00000000], [0.00000000], [1.02550e-2], [2.65802e-1], [8.54580e-3]]
S_UO2[6] = [[0.00000000], [0.00000000], [0.00000000], [0.00000000], [1.00210e-8], [1.68090e-2], [2.73080e-1]]
## Upscattering Matrix
U_UO2 = [ [], [], [], [], [], [], [] ]
U_UO2[0] = []
U_UO2[1] = []
U_UO2[2] = []
U_UO2[3] = [4]
U_UO2[4] = [5]
U_UO2[5] = [6]
U_UO2[6] = []
######## 4.3% MOX Fuel-Clad Macroscopic Cross-Sections ############
## Transport-corrected Total Cross Sections
T_MOX43 = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
T_MOX43[0] = 1.78731e-1
T_MOX43[1] = 3.30849e-1
T_MOX43[2] = 4.83772e-1
T_MOX43[3] = 5.66922e-1
T_MOX43[4] = 4.26227e-1
T_MOX43[5] = 6.78997e-1
T_MOX43[6] = 6.82852e-1
## Fission Cross-Sections
F_MOX43 = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
F_MOX43[0] = 7.62704e-3
F_MOX43[1] = 8.76898e-4
F_MOX43[2] = 5.69835e-3
F_MOX43[3] = 2.28872e-2
F_MOX43[4] = 1.07635e-2
F_MOX43[5] = 2.32757e-1
F_MOX43[6] = 2.48968e-1
## Nu Cross-Sections
N_MOX43 = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
N_MOX43[0] = 2.85209
N_MOX43[1] = 2.89099
N_MOX43[2] = 2.85486
N_MOX43[3] = 2.86073
N_MOX43[4] = 2.85447
N_MOX43[5] = 2.86415
N_MOX43[6] = 2.86780
## Chi Cross-Sections
C_MOX43 = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
C_MOX43[0] = 5.87910e-1
C_MOX43[1] = 4.11760e-1
C_MOX43[2] = 3.39060e-4
C_MOX43[3] = 1.17610e-7
C_MOX43[4] = 0.00000000
C_MOX43[5] = 0.00000000
C_MOX43[6] = 0.00000000
## Scattering Matrix for 4.3% MOX Fuel-Clad (Macroscopic)
S_MOX43 = [ [[]], [[]], [[]], [[]], [[]], [[]], [[]] ]
S_MOX43[0] = [[1.28876e-1]]
S_MOX43[1] = [[4.14130e-2], [3.25452e-1]]
S_MOX43[2] = [[8.22900e-6], [1.63950e-3], [4.53188e-1]]
S_MOX43[3] = [[5.04050e-9], [1.59820e-9], [2.61420e-3], [4.57173e-1], [1.60460e-4]]
S_MOX43[4] = [[0.00000000], [0.00000000], [0.00000000], [5.53940e-3], [2.76814e-1], [2.00510e-3]]
S_MOX43[5] = [[0.00000000], [0.00000000], [0.00000000], [0.00000000], [9.31270e-3], [2.52962e-1], [8.49480e-3]]
S_MOX43[6] = [[0.00000000], [0.00000000], [0.00000000], [0.00000000], [9.16560e-9], [1.48500e-2], [2.65007e-1]]
## Upscattering Matrix
U_MOX43 = [ [], [], [], [], [], [], [] ]
U_MOX43[0] = []
U_MOX43[1] = []
U_MOX43[2] = []
U_MOX43[3] = [4]
U_MOX43[4] = [5]
U_MOX43[5] = [6]
U_MOX43[6] = []
############### Moderator 1 Macroscopic Cross-Sections ################
## Transport-corrected Total Cross Section
T_MOD1 = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
T_MOD1[0] = 1.59206e-1
T_MOD1[1] = 4.12970e-1
T_MOD1[2] = 5.90310e-1
T_MOD1[3] = 5.84350e-1
T_MOD1[4] = 7.18000e-1
T_MOD1[5] = 1.25445
T_MOD1[6] = 2.65038
## Scattering Matrix for Moderator (Macroscopic)
S_MOD1 = [ [[]], [[]], [[]], [[]], [[]], [[]], [[]] ]
S_MOD1[0] = [[4.44777e-2]]
S_MOD1[1] = [[1.13400e-1], [2.82334e-1]]
S_MOD1[2] = [[7.23470e-4], [1.29940e-1], [3.45256e-1]]
S_MOD1[3] = [[3.74990e-6], [6.23400e-4], [2.24570e-1], [9.10284e-2], [7.14370e-5]]
S_MOD1[4] = [[5.31840e-8], [4.80020e-5], [1.69990e-2], [4.15510e-1], [1.39138e-1], [2.21570e-3]]
S_MOD1[5] = [[0.00000000], [7.44860e-6], [2.64430e-3], [6.37320e-2], [5.11820e-1], [6.99913e-1], [1.32440e-1]]
S_MOD1[6] = [[0.00000000], [1.04550e-6], [5.03440e-4], [1.21390e-2], [6.12290e-2], [5.37320e-1], [2.48070 ]]
## Upscattering Matrix
U_MOD1 = [ [], [], [], [], [], [], [] ]
U_MOD1[0] = []
U_MOD1[1] = []
U_MOD1[2] = []
U_MOD1[3] = [4]
U_MOD1[4] = [5]
U_MOD1[5] = [6]
U_MOD1[6] = []
################### Create nuf vectors
NUF_UO2 = []
NUF_MOX43 = []
for i in range(0, 7):
NUF_UO2.append( N_UO2[i] * F_UO2[i] )
NUF_MOX43.append( N_MOX43[i] * F_MOX43[i] )
##---------------------------------------------------------------------------##
## BUILD MESH
def build_mesh(N):
# vacuum = 0
# UO2 = 1
# MOX = 2
# moderator = 3
# UO2 pins
uo2_pin = Pincell()
uo2_ids = [1]
uo2_r = [0.4759]
uo2_pin.set_shells(uo2_ids, uo2_r, 3)
# MOX pins
mox_pin = Pincell()
mox_ids = [2]
mox_r = [0.4759]
mox_pin.set_shells(mox_ids, mox_r, 3)
# Make a 2x2 uo2 lattice and a 2x2 mox lattice
uo2_lat = Lattice(2)
mox_lat = Lattice(2)
# lattices are uniform
layout = [0, 0, 0, 0]
uo2_lat.set_pins(layout)
mox_lat.set_pins(layout)
# assign the pins in the lattices
uo2_lat.assign_pin(uo2_pin, 0)
mox_lat.assign_pin(mox_pin, 0)
# build the lattice
uo2_lat.build_lattice(N)
mox_lat.build_lattice(N)
# print out mixing tables
if print_it:
print "UO2 Lattice"
for m in xrange(uo2_lat.num_mixtures()):
vf = uo2_lat.f(m)
print "%4i" % (m),
for f in vf:
print "%9.6f" % (f),
print
print "MOX Lattice"
for m in xrange(mox_lat.num_mixtures()):
vf = mox_lat.f(m)
print "%4i" % (m),
for f in vf:
print "%9.6f" % (f),
print
# make the mixtable for the combined lattices by appending the mox table
# to the UO2 table (don't include the clean mixtures at the front of the
# table)
num_mixtures = uo2_lat.num_mixtures() + mox_lat.num_mixtures() - 4
table = Vec_Dbl(num_mixtures * 4)
ctr = 0
mox_offset = uo2_lat.num_mixtures()
# add UO2 mixtures
for m in xrange(uo2_lat.num_mixtures()):
vf = uo2_lat.f(m)
for f in vf:
table[ctr] = f
ctr = ctr + 1
# add MOX mixtures, skipping the clean mixes
for m in xrange(4, mox_lat.num_mixtures()):
vf = mox_lat.f(m)
for f in vf:
table[ctr] = f
ctr = ctr + 1
# make the cleanids
cleanids = [0, 1, 2, 3]
# the total core is 3x3 assemblies (2x2 fuel surrounded by water)
xylat = uo2_lat.xy_planes()
Nr = len(xylat) - 1
delta = Vec_Dbl(Nr, 0.0)
for i in xrange(Nr):
delta[i] = xylat[i+1] - xylat[i]
if Nr % 2 != 0:
print "Non-even lattices cells."
sys.exit(1)
# build the core planes
xycore = Vec_Dbl(int(2.5*Nr) + 1, 0.0)
for n in xrange(2):
for i in xrange(Nr):
index = i + n * Nr
xycore[index + 1] = xycore[index] + delta[i]
for i in xrange(Nr/2):
index = i + 2 * Nr
xycore[index + 1] = xycore[index] + delta[i]
# z-planes (14 in each assembly)
height = 14.28 * 1.5
Nz = 21
z = [0.0] * (Nz + 1)
dz = height / float(Nz)
for k in xrange(Nz):
z[k+1] = z[k] + dz
# get matids for each lattice
uo2ids = Vec_Int(uo2_lat.mixids())
moxids = Vec_Int(mox_lat.mixids())
# update the mox mixtures (leave clean zones alone)
for m in xrange(len(moxids)):
if moxids[m] > 3:
moxids[m] = moxids[m] + mox_offset - 4
# assign the matids
Nx = len(xycore) - 1
Ny = len(xycore) - 1
# arrangement
# |-----|-----|-----|
# | | | |
# | mod | mod | mod |
# | | | |
# |-----|-----|-----|
# | | | |
# | mox | uo2 | mod | y
# | | | |
# |-----|-----|-----|
# | | | |
# | uo2 | mox | mod |
# | | | |
# |-----|-----|-----|
# x
mixids = Vec_Int(Nx * Ny * Nz, 3)
kend = Nz / 2
# (0, 0) lattice
for k in xrange(kend):
for j in xrange(Nr):
for i in xrange(Nr):
lat_cell = i + j * Nr
cell = i + j * Ny + k * Nx * Ny
mixids[cell] = uo2ids[lat_cell]
# (1, 0) lattice
for k in xrange(kend):
for j in xrange(Nr):
for i in xrange(Nr):
lat_cell = i + j * Nr
cell = (i + Nr) + j * Ny + k * Nx * Ny
mixids[cell] = moxids[lat_cell]
# (0, 1) lattice
for k in xrange(kend):
for j in xrange(Nr):
for i in xrange(Nr):
lat_cell = i + j * Nr
cell = i + (j + Nr) * Ny + k * Nx * Ny
mixids[cell] = moxids[lat_cell]
# (1, 1) lattice
for k in xrange(kend):
for j in xrange(Nr):
for i in xrange(Nr):
lat_cell = i + j * Nr
cell = (i + Nr) + (j + Nr) * Ny + k * Nx * Ny
mixids[cell] = uo2ids[lat_cell]
return (xycore, z, mixids, cleanids, table)
##---------------------------------------------------------------------------##
## DB
##---------------------------------------------------------------------------##
entries = {
"problem_type" : "FIXED_SOURCE",
"num_groups" : 7,
"downscatter" : False,
"Pn_order" : 0,
"tolerance" : 1.0e-3,
"max_itr" : 400,
"linear_solver_xml_file" : "azilut01.xml",
"boundary" : "reflect",
"boundary_db" : {"reflect" : [1, 0, 1, 0, 1, 0]},
"SPN_order" : 5
}
db = DB.from_dict(entries)
# decomposition
if nodes() == 1:
db.insert("num_blocks_i", 1)
db.insert("num_blocks_j", 1)
elif nodes() == 2:
db.insert("num_blocks_i", 2)
db.insert("num_blocks_j", 1)
elif nodes() == 16:
db.insert("num_blocks_i", 4)
db.insert("num_blocks_j", 4)
# Mesh
(r, z, mixids, cleanids, table) = build_mesh(10)
db.insert("x_edges", r)
db.insert("y_edges", r)
db.insert("z_edges", z)
##---------------------------------------------------------------------------##
## MANAGER
##---------------------------------------------------------------------------##
# make manager, material, and angles
manager = Manager()
mat = Mat()
# partition the problem
manager.partition(db, mat)
# get mapping and mesh objects
mapp = manager.get_map()
indexer = manager.get_indexer()
mesh = manager.get_mesh()
# global and local cell numbers
Gx = indexer.num_global(X)
Gy = indexer.num_global(Y)
Gz = mesh.num_cells_dim(Z)
Nx = mesh.num_cells_dim(X)
Ny = mesh.num_cells_dim(Y)
Nz = mesh.num_cells_dim(Z)
if node() == 0:
print ">>> Partitioned global mesh with %i x %i x %i cells" \
% (Gx, Gy, Gz)
##---------------------------------------------------------------------------##
## MATERIAL SETUP
##---------------------------------------------------------------------------##
# vacuum = 0
# UO2 = 1
# MOX = 2
# moderator = 3
# set database
xsdb = XS_DB(db)
xsdb.set_num(4)
xsdb.assign_zero(0)
for g in xrange(0, xsdb.num_groups()):
xsdb.assign_upscatter(1, g, T_UO2[g], U_UO2[g], S_UO2[g])
xsdb.assign_upscatter(2, g, T_MOX43[g], U_MOX43[g], S_MOX43[g])
xsdb.assign_upscatter(3, g, T_MOD1[g], U_MOD1[g], S_MOD1[g])
## Assign fission data
xsdb.assign_fission(1, NUF_UO2, C_UO2)
xsdb.assign_fission(2, NUF_MOX43, C_MOX43)
# make macro mixer
mixer = Macro_Mixer(xsdb)
mixer.set(cleanids, table)
# make the material database
mixer.mix_with_global_ids(mixids, mat)
##---------------------------------------------------------------------------##
## ENERGY PARTITIONING
##---------------------------------------------------------------------------##
manager.partition_energy(mat)
##---------------------------------------------------------------------------##
## SOURCE SETUP
##---------------------------------------------------------------------------##
# allocate source and problem state
source = Isotropic_Source()
manager.setup(source)
total = Gx * Gy * Gz
Ng = mat.num_groups()
srcids = Vec_Int(total, 0)
srcstr = Vec_Dbl(total, 0.0)
num_shapes = 2
shapes = Vec_Dbl(2 * mat.num_groups(), 0.0)
chi0 = xsdb.fission_data(1, 0, CHI)
chi1 = xsdb.fission_data(2, 0, CHI)
# source 0 spectrum -> UO2 Chi
# source 1 spectrum -> MOX Chi
# make shapes
ctr = 0
for g in xrange(Ng):
shapes[ctr] = xsdb.fission_data(1, g, CHI)
ctr += 1
for g in xrange(Ng):
shapes[ctr] = xsdb.fission_data(2, g, CHI)
ctr += 1
# assign ids and strengths
for cell in xrange(total):
matid = mixids[cell]
if mat.assigned_fission(matid):
for g in xrange(Ng):
srcstr[cell] += mat.fission_data(matid, g, NU_SIGMA_F)
if mat.fission_data(matid, 0, CHI) == chi1:
srcids[cell] = 1
# set the source
source.set(num_shapes, shapes, srcids, srcstr)
##---------------------------------------------------------------------------##
## SOLVE
##---------------------------------------------------------------------------##
if node() == 0:
print ">>> Setup complete"
print ">>> Solving with %s differencing" % (manager.spatial_descriptor())
# solve the problem
manager.solve(source)
##---------------------------------------------------------------------------##
## OUTPUT
##---------------------------------------------------------------------------##
# make SILO output
silo = SILO()
silo.add_mixer(mixer)
silo.open("fs")
phi = Vec_Dbl(mesh.num_cells(), 0.0)
for g in xrange(Ng):
flux = manager.moments(g)
for cell in xrange(mesh.num_cells()):
phi[cell] = phi[cell] + flux.scalar_flux(cell)
silo.add("phi", phi)
silo.close()
##---------------------------------------------------------------------------##
## TIMING
##---------------------------------------------------------------------------##
# output final database (has class-dependent defaults)
db.output()
timer.stop()
time = timer.wall_clock()
keys = timer_keys()
if len(keys) > 0 and node() == 0:
print "\n"
print "TIMING : Problem ran in %16.6e seconds." % (time)
print "------------------------------------------------------------------"
for key in keys:
print "%30s : %16.6e %16.6e" % (key, timer_value(key) / time, timer_value(key))
print "------------------------------------------------------------------"
##---------------------------------------------------------------------------##
manager.close()
finalize()
###############################################################################
## end of fs.py
###############################################################################
| |
""" Module to vette results against Human catalogs
SDSS-DR5 (JXP) and BOSS (Notredaeme)
"""
from __future__ import print_function, absolute_import, division, unicode_literals
import numpy as np
import pdb
import matplotlib as mpl
mpl.rcParams['font.family'] = 'stixgeneral'
from matplotlib.backends.backend_pdf import PdfPages
from matplotlib import pyplot as plt
import matplotlib.gridspec as gridspec
from astropy.table import Table
from astropy.coordinates import SkyCoord, match_coordinates_sky
from astropy import units as u
from linetools import utils as ltu
from pyigm.surveys.llssurvey import LLSSurvey
from pyigm.surveys.dlasurvey import DLASurvey
def json_to_sdss_dlasurvey(json_file, sdss_survey, add_pf=True, debug=False):
""" Convert JSON output file to a DLASurvey object
Assumes SDSS bookkeeping for sightlines (i.e. PLATE, FIBER)
Parameters
----------
json_file : str
Full path to the JSON results file
sdss_survey : DLASurvey
SDSS survey, usually human (e.g. JXP for DR5)
add_pf : bool, optional
Add plate/fiber to DLAs in sdss_survey
Returns
-------
ml_survey : LLSSurvey
Survey object for the LLS
"""
print("Loading SDSS Survey from JSON file {:s}".format(json_file))
# imports
from pyigm.abssys.dla import DLASystem
from pyigm.abssys.lls import LLSSystem
# Fiber key
for fkey in ['FIBER', 'FIBER_ID', 'FIB']:
if fkey in sdss_survey.sightlines.keys():
break
# Read
ml_results = ltu.loadjson(json_file)
use_platef = False
if 'plate' in ml_results[0].keys():
use_platef = True
else:
if 'id' in ml_results[0].keys():
use_id = True
# Init
#idict = dict(plate=[], fiber=[], classification_confidence=[], # FOR v2
# classification=[], ra=[], dec=[])
idict = dict(ra=[], dec=[])
if use_platef:
for key in ['plate', 'fiber', 'mjd']:
idict[key] = []
ml_tbl = Table()
ml_survey = LLSSurvey()
systems = []
in_ml = np.array([False]*len(sdss_survey.sightlines))
# Loop
for obj in ml_results:
# Sightline
for key in idict.keys():
idict[key].append(obj[key])
# DLAs
#if debug:
# if (obj['plate'] == 1366) & (obj['fiber'] == 614):
# sv_coord = SkyCoord(ra=obj['ra'], dec=obj['dec'], unit='deg')
# print("GOT A MATCH IN RESULTS FILE")
for idla in obj['dlas']:
"""
dla = DLASystem((sdss_survey.sightlines['RA'][mt[0]],
sdss_survey.sightlines['DEC'][mt[0]]),
idla['spectrum']/(1215.6701)-1., None,
idla['column_density'])
"""
if idla['z_dla'] < 1.8:
continue
isys = LLSSystem((obj['ra'],obj['dec']),
idla['z_dla'], None, NHI=idla['column_density'], zem=obj['z_qso'])
isys.confidence = idla['dla_confidence']
if use_platef:
isys.plate = obj['plate']
isys.fiber = obj['fiber']
elif use_id:
plate, fiber = [int(spl) for spl in obj['id'].split('-')]
isys.plate = plate
isys.fiber = fiber
# Save
systems.append(isys)
# Connect to sightlines
ml_coord = SkyCoord(ra=idict['ra'], dec=idict['dec'], unit='deg')
s_coord = SkyCoord(ra=sdss_survey.sightlines['RA'], dec=sdss_survey.sightlines['DEC'], unit='deg')
idx, d2d, d3d = match_coordinates_sky(s_coord, ml_coord, nthneighbor=1)
used = d2d < 1.*u.arcsec
for iidx in np.where(~used)[0]:
print("Sightline RA={:g}, DEC={:g} was not used".format(sdss_survey.sightlines['RA'][iidx],
sdss_survey.sightlines['DEC'][iidx]))
# Add plate/fiber to statistical DLAs
if add_pf:
dla_coord = sdss_survey.coord
idx2, d2d, d3d = match_coordinates_sky(dla_coord, s_coord, nthneighbor=1)
if np.min(d2d.to('arcsec').value) > 1.:
raise ValueError("Bad match to sightlines")
for jj,igd in enumerate(np.where(sdss_survey.mask)[0]):
dla = sdss_survey._abs_sys[igd]
try:
dla.plate = sdss_survey.sightlines['PLATE'][idx2[jj]]
except IndexError:
pdb.set_trace()
dla.fiber = sdss_survey.sightlines[fkey][idx2[jj]]
# Finish
ml_survey._abs_sys = systems
if debug:
ml2_coord = ml_survey.coord
minsep = np.min(sv_coord.separation(ml2_coord))
minsep2 = np.min(sv_coord.separation(s_coord))
tmp = sdss_survey.sightlines[used]
t_coord = SkyCoord(ra=tmp['RA'], dec=tmp['DEC'], unit='deg')
minsep3 = np.min(sv_coord.separation(t_coord))
pdb.set_trace()
ml_survey.sightlines = sdss_survey.sightlines[used]
for key in idict.keys():
ml_tbl[key] = idict[key]
ml_survey.ml_tbl = ml_tbl
# Return
return ml_survey
def vette_dlasurvey(ml_survey, sdss_survey, fig_root='tmp', lyb_cut=True,
dz_toler=0.03, debug=False):
"""
Parameters
----------
ml_survey : IGMSurvey
Survey describing the Machine Learning results
sdss_survey : DLASurvey
SDSS survey, usually human (e.g. JXP for DR5)
fig_root : str, optional
Root string for figures generated
lyb_cut : bool, optional
Cut surveys at Lyb in QSO rest-frame.
Recommended until LLS, Lyb and OVI is dealt with
dz_toler : float, optional
Tolerance for matching in redshift
Returns
-------
false_neg : list
List of systems that are false negatives from SDSS -> ML
midx : list
List of indices matching SDSS -> ML
"""
from pyigm.surveys import dlasurvey as pyis_ds
reload(pyis_ds)
# Cut at Lyb
if lyb_cut:
for survey in [ml_survey, sdss_survey]:
# Alter Z_START
zlyb = (1+survey.sightlines['ZEM']).data*1026./1215.6701 - 1.
survey.sightlines['Z_START'] = np.maximum(survey.sightlines['Z_START'], zlyb)
# Mask
mask = pyis_ds.dla_stat(survey, survey.sightlines, zem_tol=0.2) # Errors in zem!
survey.mask = mask
print("Done cutting on Lyb")
# Setup coords
ml_coords = ml_survey.coord
ml_z = ml_survey.zabs
s_coords = sdss_survey.coord
s_z = sdss_survey.zabs
# if debug:
# miss_coord = SkyCoord(ra=174.35545833333333,dec=44.585,unit='deg')
# minsep = np.min(miss_coord.separation(ml_coords))
# s_coord = SkyCoord(ra=ml_survey.sightlines['RA'], dec=ml_survey.sightlines['DEC'], unit='deg')
# isl = np.argmin(miss_coord.separation(s_coord))
# Match from SDSS and record false negatives
false_neg = []
midx = []
for igd in np.where(sdss_survey.mask)[0]:
isys = sdss_survey._abs_sys[igd]
# Match?
gd_radec = np.where(isys.coord.separation(ml_coords) < 1*u.arcsec)[0]
sep = isys.coord.separation(ml_coords)
if len(gd_radec) == 0:
false_neg.append(isys)
midx.append(-1)
else:
gdz = np.abs(ml_z[gd_radec] - isys.zabs) < dz_toler
# Only require one match
if np.sum(gdz) > 0:
iz = np.argmin(np.abs(ml_z[gd_radec] - isys.zabs))
midx.append(gd_radec[iz])
else:
false_neg.append(isys)
midx.append(-1)
if debug:
if (isys.plate == 1366) & (isys.fiber == 614):
pdb.set_trace()
# Match from ML and record false positives
false_pos = []
pidx = []
for igd in np.where(ml_survey.mask)[0]:
isys = ml_survey._abs_sys[igd]
# Match?
gd_radec = np.where(isys.coord.separation(s_coords) < 1*u.arcsec)[0]
sep = isys.coord.separation(s_coords)
if len(gd_radec) == 0:
false_pos.append(isys)
pidx.append(-1)
else:
gdz = np.abs(s_z[gd_radec] - isys.zabs) < dz_toler
# Only require one match
if np.sum(gdz) > 0:
iz = np.argmin(np.abs(s_z[gd_radec] - isys.zabs))
pidx.append(gd_radec[iz])
else:
false_pos.append(isys)
pidx.append(-1)
# Return
return false_neg, np.array(midx), false_pos
def mk_false_neg_table(false_neg, outfil):
""" Generate a simple CSV file of false negatives
Parameters
----------
false_neg : list
List of false negative systems
outfil : str
Returns
-------
"""
# Parse
ra, dec = [], []
zabs, zem = [], []
NHI = []
plate, fiber = [], []
for ifneg in false_neg:
ra.append(ifneg.coord.ra.value)
dec.append(ifneg.coord.dec.value)
zabs.append(ifneg.zabs)
zem.append(ifneg.zem)
NHI.append(ifneg.NHI)
plate.append(ifneg.plate)
fiber.append(ifneg.fiber)
# Generate a Table
fneg_tbl = Table()
fneg_tbl['RA'] = ra
fneg_tbl['DEC'] = dec
fneg_tbl['zabs'] = zabs
fneg_tbl['zem'] = zem
fneg_tbl['NHI'] = NHI
fneg_tbl['plate'] = plate
fneg_tbl['fiber'] = fiber
# Write
print("Writing false negative file: {:s}".format(outfil))
fneg_tbl.write(outfil, format='ascii.csv')#, overwrite=True)
def fig_dzdnhi(ml_survey, sdss_survey, midx, outfil='fig_dzdnhi.pdf'):
""" Compare zabs and NHI between SDSS and ML
Parameters
----------
ml_survey : IGMSurvey
Survey describing the Machine Learning results
This should be masked according to the vetting
sdss_survey : DLASurvey
SDSS survey, usually human (e.g. JXP for DR5)
This should be masked according to the vetting
midx : list
List of indices matching SDSS -> ML
outfil : str, optional
Input None to plot to screen
Returns
-------
"""
# z, NHI
z_sdss = sdss_survey.zabs
z_ml = ml_survey.zabs
NHI_sdss = sdss_survey.NHI
NHI_ml = ml_survey.NHI
# deltas
dz = []
dNHI = []
for qq,idx in enumerate(midx):
if idx < 0:
continue
# Match
dz.append(z_sdss[qq]-z_ml[idx])
dNHI.append(NHI_sdss[qq]-NHI_ml[idx])
# Figure
if outfil is not None:
pp = PdfPages(outfil)
fig = plt.figure(figsize=(8, 5))
plt.clf()
gs = gridspec.GridSpec(1, 2)
# dz
ax = plt.subplot(gs[0])
ax.hist(dz, color='green', bins=20)#, normed=True)#, bins=20 , zorder=1)
#ax.text(0.05, 0.74, lbl3, transform=ax.transAxes, color=wcolor, size=csz, ha='left')
ax.set_xlim(-0.03, 0.03)
ax.set_xlabel(r'$\delta z$ [SDSS-ML]')
# NHI
ax = plt.subplot(gs[1])
ax.hist(dNHI, color='blue', bins=20)#, normed=True)#, bins=20 , zorder=1)
#ax.text(0.05, 0.74, lbl3, transform=ax.transAxes, color=wcolor, size=csz, ha='left')
#ax.set_xlim(-0.03, 0.03)
ax.set_xlabel(r'$\Delta \log N_{\rm HI}$ [SDSS-ML]')
#
# End
plt.tight_layout(pad=0.2,h_pad=0.,w_pad=0.1)
if outfil is not None:
print('Writing {:s}'.format(outfil))
pp.savefig()
pp.close()
plt.close()
else:
plt.show()
def fig_falseneg(ml_survey, sdss_survey, false_neg, outfil='fig_falseneg.pdf'):
""" Figure on false negatives
Parameters
----------
ml_survey : IGMSurvey
Survey describing the Machine Learning results
This should be masked according to the vetting
sdss_survey : DLASurvey
SDSS survey, usually human (e.g. JXP for DR5)
This should be masked according to the vetting
midx : list
List of indices matching SDSS -> ML
false_neg : list
List of false negatives
outfil : str, optional
Input None to plot to screen
Returns
-------
"""
# Generate some lists
zabs_false = [isys.zabs for isys in false_neg]
zem_false = [isys.zem for isys in false_neg]
NHI_false = [isys.NHI for isys in false_neg]
# Figure
if outfil is not None:
pp = PdfPages(outfil)
fig = plt.figure(figsize=(8, 5))
plt.clf()
gs = gridspec.GridSpec(2, 2)
# zabs
ax = plt.subplot(gs[0])
ax.hist(zabs_false, color='green', bins=20)#, normed=True)#, bins=20 , zorder=1)
ax.set_xlabel(r'$z_{\rm abs}$')
# zem
ax = plt.subplot(gs[1])
ax.hist(zem_false, color='red', bins=20)#, normed=True)#, bins=20 , zorder=1)
ax.set_xlabel(r'$z_{\rm qso}$')
# NHI
ax = plt.subplot(gs[2])
ax.hist(NHI_false, color='blue', bins=20)#, normed=True)#, bins=20 , zorder=1)
ax.set_xlabel(r'$\log \, N_{\rm HI}$')
# End
plt.tight_layout(pad=0.2,h_pad=0.,w_pad=0.1)
if outfil is not None:
print('Writing {:s}'.format(outfil))
pp.savefig()
pp.close()
plt.close()
else:
plt.show()
def dr5_for_david():
""" Generate a Table for David
"""
# imports
from pyigm.abssys.dla import DLASystem
from pyigm.abssys.lls import LLSSystem
sdss_survey = DLASurvey.load_SDSS_DR5()
# Fiber key
for fkey in ['FIBER', 'FIBER_ID', 'FIB']:
if fkey in sdss_survey.sightlines.keys():
break
# Init
#idict = dict(plate=[], fiber=[], classification_confidence=[], # FOR v2
# classification=[], ra=[], dec=[])
# Connect to sightlines
s_coord = SkyCoord(ra=sdss_survey.sightlines['RA'], dec=sdss_survey.sightlines['DEC'], unit='deg')
# Add plate/fiber to statistical DLAs
dla_coord = sdss_survey.coord
idx2, d2d, d3d = match_coordinates_sky(dla_coord, s_coord, nthneighbor=1)
if np.min(d2d.to('arcsec').value) > 1.:
raise ValueError("Bad match to sightlines")
plates, fibers = [], []
for jj,igd in enumerate(np.where(sdss_survey.mask)[0]):
dla = sdss_survey._abs_sys[igd]
try:
dla.plate = sdss_survey.sightlines['PLATE'][idx2[jj]]
except IndexError:
pdb.set_trace()
dla.fiber = sdss_survey.sightlines[fkey][idx2[jj]]
plates.append(sdss_survey.sightlines['PLATE'][idx2[jj]])
fibers.append(sdss_survey.sightlines[fkey][idx2[jj]])
# Write
dtbl = Table()
dtbl['plate'] = plates
dtbl['fiber'] = fibers
dtbl['zabs'] = sdss_survey.zabs
dtbl['NHI'] = sdss_survey.NHI
dtbl.write('results/dr5_for_david.ascii', format='ascii')
# Write sightline info
stbl = sdss_survey.sightlines[['PLATE', 'FIB', 'Z_START', 'Z_END', 'RA', 'DEC']]
gdsl = stbl['Z_END'] > stbl['Z_START']
stbl[gdsl].write('results/dr5_sightlines_for_david.ascii', format='ascii')
def main(flg_tst, sdss=None, ml_survey=None):
# Load JSON for DR5
if (flg_tst % 2**1) >= 2**0:
if sdss is None:
sdss = DLASurvey.load_SDSS_DR5()
#ml_survey = json_to_sdss_dlasurvey('../results/dr5_v1_predictions.json', sdss)
ml_survey = json_to_sdss_dlasurvey('../results/dr5_v2_results.json', sdss)
# Vette
if (flg_tst % 2**2) >= 2**1:
if ml_survey is None:
sdss = DLASurvey.load_SDSS_DR5()
ml_survey = json_to_sdss_dlasurvey('../results/dr5_v2_results.json', sdss)
vette_dlasurvey(ml_survey, sdss)
# Vette v5 and generate CSV
if (flg_tst % 2**3) >= 2**2:
if ml_survey is None:
sdss = DLASurvey.load_SDSS_DR5()
ml_survey = json_to_sdss_dlasurvey('../results/dr5_v5_predictions.json', sdss)
false_neg, midx, _ = vette_dlasurvey(ml_survey, sdss)
# CSV of false negatives
mk_false_neg_table(false_neg, '../results/false_negative_DR5_v5.csv')
# Vette v6 and generate CSV
if (flg_tst % 2**4) >= 2**3:
if ml_survey is None:
sdss = DLASurvey.load_SDSS_DR5()
ml_survey = json_to_sdss_dlasurvey('../results/dr5_v6.1_results.json', sdss)
false_neg, midx, _ = vette_dlasurvey(ml_survey, sdss)
# CSV of false negatives
mk_false_neg_table(false_neg, '../results/false_negative_DR5_v6.1.csv')
# Vette gensample v2
if (flg_tst % 2**5) >= 2**4:
if ml_survey is None:
sdss = DLASurvey.load_SDSS_DR5()
ml_survey = json_to_sdss_dlasurvey('../results/results_catalog_dr7_model_gensample_v2.json',sdss)
false_neg, midx, false_pos = vette_dlasurvey(ml_survey, sdss)
# CSV of false negatives
mk_false_neg_table(false_neg, '../results/false_negative_DR5_v2_gen.csv')
mk_false_neg_table(false_pos, '../results/false_positives_DR5_v2_gen.csv')
# Vette gensample v4.3.1
if flg_tst & (2**5):
if ml_survey is None:
sdss = DLASurvey.load_SDSS_DR5()
ml_survey = json_to_sdss_dlasurvey('../results/results_model_4.3.1_data_dr5.json',sdss)
false_neg, midx, false_pos = vette_dlasurvey(ml_survey, sdss)
# CSV of false negatives
mk_false_neg_table(false_neg, '../results/false_negative_DR5_v4.3.1_gen.csv')
mk_false_neg_table(false_pos, '../results/false_positives_DR5_v4.3.1_gen.csv')
if flg_tst & (2**6):
dr5_for_david()
# Test
if __name__ == '__main__':
flg_tst = 0
#flg_tst += 2**0 # Load JSON for DR5
#flg_tst += 2**1 # Vette
#flg_tst += 2**2 # v5
#flg_tst += 2**3 # v6.1
#flg_tst += 2**4 # v2 of gensample
#flg_tst += 2**5 # v4.3.1 of gensample
flg_tst += 2**6 # Generate DR5 table for David
main(flg_tst)
| |
from copy import deepcopy
from cms.extensions.toolbar import ExtensionToolbar
from cms.toolbar_pool import toolbar_pool
from cms.utils.urlutils import admin_reverse
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Permission
from django.contrib.sites.models import Site
from cms.api import create_page, create_title
from cms.constants import PUBLISHER_STATE_DIRTY
from cms.extensions import extension_pool
from cms.extensions import TitleExtension
from cms.extensions import PageExtension
from cms.models import Page, PageType
from cms.test_utils.project.extensionapp.models import MyPageExtension, MyTitleExtension
from cms.test_utils.project.extensionapp.models import MultiTablePageExtension, MultiTableTitleExtension
from cms.test_utils.testcases import CMSTestCase as TestCase
from cms.tests.test_admin import AdminTestsBase
class ExtensionsTestCase(TestCase):
def test_register_extension(self):
initial_extension_count = len(extension_pool.page_extensions)
# --- None extension registering -----------------------------
from cms.exceptions import SubClassNeededError
none_extension = self.get_none_extension_class()
self.assertRaises(SubClassNeededError, extension_pool.register, none_extension)
self.assertEqual(len(extension_pool.page_extensions), initial_extension_count)
self.assertEqual(len(extension_pool.title_extensions), initial_extension_count)
# --- Page registering ---------------------------------------
page_extension = self.get_page_extension_class()
# register first time
extension_pool.register(page_extension)
self.assertEqual(len(extension_pool.page_extensions), initial_extension_count+1)
# register second time
extension_pool.register(page_extension)
self.assertEqual(len(extension_pool.page_extensions), initial_extension_count+1)
self.assertIs(extension_pool.signaling_activated, True)
# --- Title registering --------------------------------------
title_extension = self.get_title_extension_class()
# register first time
extension_pool.register(title_extension)
self.assertEqual(len(extension_pool.title_extensions), initial_extension_count+1)
# register second time
extension_pool.register(title_extension)
self.assertEqual(len(extension_pool.title_extensions), initial_extension_count+1)
self.assertIs(extension_pool.signaling_activated, True)
# --- Unregister ---------------------------------------------
extension_pool.unregister(page_extension)
self.assertEqual(len(extension_pool.page_extensions), initial_extension_count)
extension_pool.unregister(title_extension)
self.assertEqual(len(extension_pool.title_extensions), initial_extension_count)
# Unregister an object that is not registered yet
extension_pool.unregister(page_extension)
extension_pool.unregister(title_extension)
def get_page_extension_class(self):
from django.db import models
class TestPageExtension(PageExtension):
content = models.CharField('Content', max_length=50)
class Meta:
abstract = True
return TestPageExtension
def get_title_extension_class(self):
from django.db import models
class TestTitleExtension(TitleExtension):
content = models.CharField('Content', max_length=50)
class Meta:
abstract = True
return TestTitleExtension
def get_none_extension_class(self):
class TestNoneExtension(object):
pass
return TestNoneExtension
def test_copy_extensions(self):
root = create_page('Root', "nav_playground.html", "en", published=True)
page = create_page('Test Page Extension', "nav_playground.html", "en",
parent=root.get_draft_object())
subpage = create_page('Test subpage Extension', "nav_playground.html", "en",
parent=page)
page = Page.objects.get(pk=page.pk)
page_extension = MyPageExtension(extended_object=page, extra='page extension 1')
page_extension.save()
page.mypageextension = page_extension
title = page.get_title_obj()
title_extension = MyTitleExtension(extended_object=title, extra_title='title extension 1')
title_extension.save()
page.mytitleextension = title_extension
subpage_extension = MyPageExtension(extended_object=subpage, extra='page extension 2')
subpage_extension.save()
subpage.mypageextension = subpage_extension
subtitle = subpage.get_title_obj()
subtitle_extension = MyTitleExtension(extended_object=subtitle, extra_title='title extension 2')
subtitle_extension.save()
subpage.mytitleextension = subtitle_extension
# asserting original extensions
self.assertEqual(len(extension_pool.get_page_extensions()), 2)
self.assertEqual(len(extension_pool.get_title_extensions()), 2)
copied_page = page.copy_with_descendants(page.site, target_node=None, position='last-child')
# asserting original + copied extensions
self.assertEqual(len(extension_pool.get_page_extensions()), 4)
self.assertEqual(len(extension_pool.get_title_extensions()), 4)
# testing extension content
old_page_extensions = [page_extension, subpage_extension]
old_title_extension = [title_extension, subtitle_extension]
for index, new_node in enumerate([copied_page.node] + list(copied_page.node.get_descendants())):
new_page = new_node.page
self.assertEqual(extension_pool.get_page_extensions(new_page)[0].extra,
old_page_extensions[index].extra)
self.assertEqual(extension_pool.get_title_extensions(new_page.title_set.get(language='en'))[0].extra_title,
old_title_extension[index].extra_title)
# check that objects are actually different
self.assertNotEqual(extension_pool.get_page_extensions(new_page)[0].pk,
old_page_extensions[index].pk)
self.assertNotEqual(extension_pool.get_title_extensions(new_page.title_set.get(language='en'))[0].pk,
old_title_extension[index].pk)
# Test deleting original page for #3987
page.delete()
# asserting original extensions are gone, but copied ones should still exist
self.assertEqual(len(extension_pool.get_page_extensions()), 2)
self.assertEqual(len(extension_pool.get_title_extensions()), 2)
def test_copy_multitable_extensions(self):
root = create_page('Root', "nav_playground.html", "en", published=True)
page = create_page('Test Multi-Table Page Extension', "nav_playground.html", "en",
parent=root.get_draft_object())
subpage = create_page('Test Multi-Table subpage Extension', "nav_playground.html", "en",
parent=page)
page = Page.objects.get(pk=page.pk)
page_extension = MultiTablePageExtension(extended_object=page,
extension_parent_field='page extension 1',
multitable_extra='multi-table page extension 1')
page_extension.save()
page.multitablepageextension = page_extension
title = page.get_title_obj()
title_extension = MultiTableTitleExtension(extended_object=title,
extension_title_parent_field='title extension 1',
multitable_extra_title='multi-table title extension 1')
title_extension.save()
page.multitabletitleextension = title_extension
subpage_extension = MultiTablePageExtension(extended_object=subpage,
extension_parent_field='page extension 2',
multitable_extra='multi-table page extension 2')
subpage_extension.save()
subpage.multitablepageextension = subpage_extension
subtitle = subpage.get_title_obj()
subtitle_extension = MultiTableTitleExtension(extended_object=subtitle,
extension_title_parent_field='title extension 2',
multitable_extra_title='multi-table title extension 2')
subtitle_extension.save()
subpage.multitabletitleextension = subtitle_extension
# asserting original extensions
self.assertEqual(len(extension_pool.get_page_extensions()), 2)
self.assertEqual(len(extension_pool.get_title_extensions()), 2)
copied_page = page.copy_with_descendants(page.site, target_node=None, position='last-child')
# asserting original + copied extensions
self.assertEqual(len(extension_pool.get_page_extensions()), 4)
self.assertEqual(len(extension_pool.get_title_extensions()), 4)
# testing extension content
old_page_extensions = [page_extension, subpage_extension]
old_title_extension = [title_extension, subtitle_extension]
for index, node in enumerate([copied_page.node] + list(copied_page.node.get_descendants())):
new_page = node.page
copied_page_extension = extension_pool.get_page_extensions(new_page)[0]
copied_title_extension = extension_pool.get_title_extensions(new_page.title_set.get(language='en'))[0]
self.assertEqual(copied_page_extension.extension_parent_field,
old_page_extensions[index].extension_parent_field)
self.assertEqual(copied_page_extension.multitable_extra,
old_page_extensions[index].multitable_extra)
self.assertEqual(copied_title_extension.extension_title_parent_field,
old_title_extension[index].extension_title_parent_field)
self.assertEqual(copied_title_extension.multitable_extra_title,
old_title_extension[index].multitable_extra_title)
# check that objects are actually different
self.assertNotEqual(extension_pool.get_page_extensions(new_page)[0].pk,
old_page_extensions[index].pk)
self.assertNotEqual(extension_pool.get_title_extensions(new_page.title_set.get(language='en'))[0].pk,
old_title_extension[index].pk)
# Test deleting original page for #3987
page.delete()
# asserting original extensions are gone, but copied ones should still exist
self.assertEqual(len(extension_pool.get_page_extensions()), 2)
self.assertEqual(len(extension_pool.get_title_extensions()), 2)
def test_publish_page_extension(self):
page = create_page('Test Page Extension', "nav_playground.html", "en")
page_extension = MyPageExtension(extended_object=page, extra='page extension 1')
page_extension.save()
page.mypageextension = page_extension
# publish first time
page.publish('en')
self.assertEqual(page_extension.extra, page.publisher_public.mypageextension.extra)
self.assertEqual(page.get_publisher_state('en'), 0)
# change and publish again
page = Page.objects.get(pk=page.pk)
page_extension = page.mypageextension
page_extension.extra = 'page extension 1 - changed'
page_extension.save()
self.assertEqual(page.get_publisher_state('en', True), PUBLISHER_STATE_DIRTY)
page.publish('en')
self.assertEqual(page.get_publisher_state('en', True), 0)
# delete
page_extension.delete()
self.assertFalse(MyPageExtension.objects.filter(pk=page_extension.pk).exists())
self.assertEqual(page.get_publisher_state('en', True), PUBLISHER_STATE_DIRTY)
def test_publish_multitable_page_extension(self):
page = create_page('Test Multi-Table Page Extension', "nav_playground.html", "en")
page_extension = MultiTablePageExtension(extended_object=page,
extension_parent_field='page extension 1',
multitable_extra='multi-table page extension 1')
page_extension.save()
page.multitablepageextension = page_extension
# publish first time
page.publish('en')
# print(dir(page))
self.assertEqual(page_extension.extension_parent_field, page.publisher_public.multitablepageextension.extension_parent_field)
self.assertEqual(page_extension.multitable_extra, page.publisher_public.multitablepageextension.multitable_extra)
self.assertEqual(page.get_publisher_state('en'), 0)
# change and publish again
page = Page.objects.get(pk=page.pk)
page_extension = page.multitablepageextension
page_extension.extension_parent_field = 'page extension 1 - changed'
page_extension.multitable_extra = 'multi-table page extension 1 - changed'
page_extension.save()
self.assertEqual(page.get_publisher_state('en', True), PUBLISHER_STATE_DIRTY)
page.publish('en')
self.assertEqual(page.get_publisher_state('en', True), 0)
# delete
page_extension.delete()
self.assertFalse(MultiTablePageExtension.objects.filter(pk=page_extension.pk).exists())
self.assertEqual(page.get_publisher_state('en', True), PUBLISHER_STATE_DIRTY)
def test_publish_title_extension(self):
page = create_page('Test Title Extension', "nav_playground.html", "en")
title = page.get_title_obj()
title_extension = MyTitleExtension(extended_object=title, extra_title='title extension 1')
title_extension.save()
page.mytitleextension = title_extension
# publish first time
page.publish('en')
self.assertEqual(page.get_publisher_state('en'), 0)
self.assertEqual(title_extension.extra_title, page.publisher_public.get_title_obj().mytitleextension.extra_title)
# change and publish again
page = Page.objects.get(pk=page.pk)
title = page.get_title_obj()
title_extension = title.mytitleextension
title_extension.extra_title = 'title extension 1 - changed'
title_extension.save()
self.assertEqual(page.get_publisher_state('en', True), PUBLISHER_STATE_DIRTY)
page.publish('en')
self.assertEqual(page.get_publisher_state('en', True), 0)
# delete
title_extension.delete()
self.assertFalse(MyTitleExtension.objects.filter(pk=title_extension.pk).exists())
def test_publish_mutlitable_title_extension(self):
page = create_page('Test Title Extension', "nav_playground.html", "en")
title = page.get_title_obj()
title_extension = MultiTableTitleExtension(extended_object=title,
extension_title_parent_field='title extension 1',
multitable_extra_title='multi table title extension 1')
title_extension.save()
page.multitabletitleextension = title_extension
# publish first time
page.publish('en')
self.assertEqual(page.get_publisher_state('en'), 0)
self.assertEqual(title_extension.extension_title_parent_field, page.publisher_public.get_title_obj().multitabletitleextension.extension_title_parent_field)
self.assertEqual(title_extension.multitable_extra_title, page.publisher_public.get_title_obj().multitabletitleextension.multitable_extra_title)
# change and publish again
page = Page.objects.get(pk=page.pk)
title = page.get_title_obj()
title_extension = title.multitabletitleextension
title_extension.extension_title_parent_field = 'title extension 1 - changed'
title_extension.multitable_extra_title = 'multitable title extension 1 - changed'
title_extension.save()
self.assertEqual(page.get_publisher_state('en', True), PUBLISHER_STATE_DIRTY)
page.publish('en')
self.assertEqual(page.get_publisher_state('en', True), 0)
# delete
title_extension.delete()
self.assertFalse(MultiTableTitleExtension.objects.filter(pk=title_extension.pk).exists())
class ExtensionAdminTestCase(AdminTestsBase):
def setUp(self):
User = get_user_model()
self.admin, self.normal_guy = self._get_guys()
if get_user_model().USERNAME_FIELD == 'email':
self.no_page_permission_user = User.objects.create_user('no_page_permission', 'test2@test.com', 'test2@test.com')
else:
self.no_page_permission_user = User.objects.create_user('no_page_permission', 'test2@test.com', 'no_page_permission')
self.no_page_permission_user.is_staff = True
self.no_page_permission_user.is_active = True
self.no_page_permission_user.save()
[self.no_page_permission_user.user_permissions.add(p) for p in Permission.objects.filter(
codename__in=[
'change_mypageextension', 'change_mytitleextension',
'add_mypageextension', 'add_mytitleextension',
'delete_mypageextension', 'delete_mytitleextension',
]
)]
self.site = Site.objects.get(pk=1)
self.page = create_page(
'My Extension Page', 'nav_playground.html', 'en',
site=self.site, created_by=self.admin)
self.page_title = self.page.get_title_obj('en')
create_title('de', 'de title', self.page)
self.page_extension = MyPageExtension.objects.create(
extended_object=self.page,
extra="page extension text")
self.title_extension = MyTitleExtension.objects.create(
extended_object=self.page.get_title_obj(),
extra_title="title extension text")
self.page_without_extension = create_page(
'A Page', 'nav_playground.html', 'en',
site=self.site, created_by=self.admin)
self.page_title_without_extension = self.page_without_extension.get_title_obj()
def test_duplicate_extensions(self):
with self.login_user_context(self.admin):
# create page copy
page_data = {
'title': 'type1', 'slug': 'type1', '_save': 1, 'template': 'nav_playground.html',
'site': 1, 'language': 'en', 'source': self.page.pk,
}
self.assertEqual(Page.objects.all().count(), 2)
self.assertEqual(MyPageExtension.objects.all().count(), 1)
self.assertEqual(MyTitleExtension.objects.all().count(), 1)
response = self.client.post(
self.get_admin_url(Page, 'duplicate', self.page.pk),
data=page_data,
)
# Check that page and its extensions have been copied
self.assertRedirects(response, self.get_admin_url(Page, 'changelist'))
self.assertEqual(Page.objects.all().count(), 3)
self.assertEqual(MyPageExtension.objects.all().count(), 2)
self.assertEqual(MyTitleExtension.objects.all().count(), 2)
def test_page_type_extensions(self):
with self.login_user_context(self.admin):
# create page copy
page_data = {
'title': 'type1', 'slug': 'type1', '_save': 1, 'template': 'nav_playground.html',
'site': 1, 'language': 'en', 'source': self.page.pk,
}
self.assertEqual(Page.objects.all().count(), 2)
self.assertEqual(MyPageExtension.objects.all().count(), 1)
self.assertEqual(MyTitleExtension.objects.all().count(), 1)
response = self.client.post(
self.get_admin_url(PageType, 'add'),
data=page_data,
)
self.assertRedirects(response, self.get_admin_url(PageType, 'changelist'))
# Check that new page type has extensions from source page
self.assertEqual(Page.objects.all().count(), 4)
self.assertEqual(Page.objects.filter(is_page_type=True).count(), 2)
self.assertEqual(MyPageExtension.objects.all().count(), 2)
self.assertEqual(MyTitleExtension.objects.all().count(), 2)
def test_admin_page_extension(self):
with self.login_user_context(self.admin):
# add a new extension
response = self.client.get(
admin_reverse('extensionapp_mypageextension_add') + '?extended_object=%s' % self.page_without_extension.pk
)
self.assertEqual(response.status_code, 200)
# make sure there is no extension yet
self.assertFalse(MyPageExtension.objects.filter(extended_object=self.page_without_extension).exists())
post_data = {
'extra': 'my extra'
}
response = self.client.post(
admin_reverse('extensionapp_mypageextension_add') + '?extended_object=%s' % self.page_without_extension.pk,
post_data, follow=True
)
created_page_extension = MyPageExtension.objects.get(extended_object=self.page_without_extension)
# can delete extension
response = self.client.post(
admin_reverse('extensionapp_mypageextension_delete', args=(created_page_extension.pk,)),
{'post': 'yes'}, follow=True
)
self.assertFalse(MyPageExtension.objects.filter(extended_object=self.page_without_extension).exists())
# accessing the add view on a page that already has an extension should redirect
response = self.client.get(
admin_reverse('extensionapp_mypageextension_add') + '?extended_object=%s' % self.page.pk
)
self.assertRedirects(response, admin_reverse('extensionapp_mypageextension_change', args=(self.page_extension.pk,)))
# saving an extension should work without the GET parameter
post_data = {
'extra': 'my extra text'
}
self.client.post(
admin_reverse('extensionapp_mypageextension_change', args=(self.page_extension.pk,)),
post_data, follow=True
)
self.assertTrue(MyPageExtension.objects.filter(extra='my extra text', pk=self.page_extension.pk).exists())
with self.login_user_context(self.no_page_permission_user):
# can't save if user does not have permissions to change the page
post_data = {
'extra': 'try to change extra text'
}
response = self.client.post(
admin_reverse('extensionapp_mypageextension_change', args=(self.page_extension.pk,)),
post_data, follow=True
)
self.assertEqual(response.status_code, 403)
# can't delete without page permission
response = self.client.post(
admin_reverse('extensionapp_mypageextension_delete', args=(self.page_extension.pk,)),
{'post': 'yes'}, follow=True
)
self.assertEqual(response.status_code, 403)
self.assertTrue(MyPageExtension.objects.filter(extended_object=self.page).exists())
def test_toolbar_page_extension(self):
old_toolbars = deepcopy(toolbar_pool.toolbars)
class SampleExtension(ExtensionToolbar):
model = MyPageExtension # The PageExtension / TitleExtension you are working with
def populate(self):
current_page_menu = self._setup_extension_toolbar()
if current_page_menu:
position = 0
page_extension, url = self.get_page_extension_admin()
if url:
current_page_menu.add_modal_item('TestItem', url=url,
disabled=not self.toolbar.edit_mode_active,
position=position)
toolbar_pool.register(SampleExtension)
with self.login_user_context(self.admin):
response = self.client.get('{}?edit'.format(self.page.get_absolute_url()))
self.assertIn("TestItem", response.rendered_content)
toolbar_pool.toolbars = old_toolbars
def test_toolbar_title_extension(self):
old_toolbars = deepcopy(toolbar_pool.toolbars)
class SampleExtension(ExtensionToolbar):
model = MyTitleExtension
def populate(self):
current_page_menu = self._setup_extension_toolbar()
if current_page_menu:
position = 0
urls = self.get_title_extension_admin()
for title_extension, url in urls:
current_page_menu.add_modal_item('TestItem', url=url,
disabled=not self.toolbar.edit_mode_active,
position=position)
toolbar_pool.register(SampleExtension)
with self.login_user_context(self.admin):
response = self.client.get('{}?edit'.format(self.page.get_absolute_url()))
self.assertIn("TestItem", response.rendered_content)
toolbar_pool.toolbars = old_toolbars
def test_admin_title_extension(self):
with self.login_user_context(self.admin):
# add a new extension
response = self.client.get(
admin_reverse('extensionapp_mytitleextension_add') + '?extended_object=%s' % self.page_title_without_extension.pk
)
self.assertEqual(response.status_code, 200)
# make sure there is no extension yet
self.assertFalse(MyTitleExtension.objects.filter(extended_object=self.page_title_without_extension).exists())
post_data = {
'extra_title': 'my extra title'
}
self.client.post(
admin_reverse('extensionapp_mytitleextension_add') + '?extended_object=%s' % self.page_title_without_extension.pk,
post_data, follow=True
)
created_title_extension = MyTitleExtension.objects.get(extended_object=self.page_title_without_extension)
# can delete extension
self.client.post(
admin_reverse('extensionapp_mytitleextension_delete', args=(created_title_extension.pk,)),
{'post': 'yes'}, follow=True
)
self.assertFalse(MyTitleExtension.objects.filter(extended_object=self.page_title_without_extension).exists())
# accessing the add view on a page that already has an extension should redirect
response = self.client.get(
admin_reverse('extensionapp_mytitleextension_add') + '?extended_object=%s' % self.page_title.pk
)
self.assertRedirects(response, admin_reverse('extensionapp_mytitleextension_change', args=(self.title_extension.pk,)))
# saving an extension should work without the GET parameter
post_data = {
'extra_title': 'my extra text'
}
self.client.post(
admin_reverse('extensionapp_mytitleextension_change', args=(self.title_extension.pk,)),
post_data, follow=True
)
self.assertTrue(MyTitleExtension.objects.filter(extra_title='my extra text', pk=self.title_extension.pk).exists())
with self.login_user_context(self.no_page_permission_user):
# can't save if user does not have permissions to change the page
post_data = {
'extra_title': 'try to change extra text'
}
response = self.client.post(
admin_reverse('extensionapp_mytitleextension_change', args=(self.title_extension.pk,)),
post_data, follow=True
)
self.assertEqual(response.status_code, 403)
# can't delete without page permission
response = self.client.post(
admin_reverse('extensionapp_mytitleextension_delete', args=(self.title_extension.pk,)),
{'post': 'yes'}, follow=True
)
self.assertEqual(response.status_code, 403)
self.assertTrue(MyTitleExtension.objects.filter(extended_object=self.page_title).exists())
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright 2013 Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import datetime
import json
import six
from .. import base
from girder import events
from girder.constants import AccessType, SortDir
from girder.models.notification import ProgressState
def setUpModule():
base.startServer()
def tearDownModule():
base.stopServer()
class FolderTestCase(base.TestCase):
def setUp(self):
base.TestCase.setUp(self)
users = ({
'email': 'good@email.com',
'login': 'goodlogin',
'firstName': 'First',
'lastName': 'Last',
'password': 'goodpassword'
}, {
'email': 'regularuser@email.com',
'login': 'regularuser',
'firstName': 'First',
'lastName': 'Last',
'password': 'goodpassword'
})
self.admin, self.user = [self.model('user').createUser(**user) for user in users]
def testChildFolders(self):
# Test with some bad parameters
resp = self.request(path='/folder', method='GET', params={})
self.assertStatus(resp, 400)
self.assertEqual(resp.json['message'], 'Invalid search mode.')
resp = self.request(path='/folder', method='GET', params={
'parentType': 'invalid',
'parentId': self.admin['_id']
})
self.assertStatus(resp, 400)
self.assertEqual(
resp.json['message'],
'Invalid value for parentType: "invalid". Allowed values: folder, user, collection.')
# We should only be able to see the public folder if we are anonymous
resp = self.request(path='/folder', method='GET', params={
'parentType': 'user',
'parentId': self.admin['_id']
})
self.assertStatusOk(resp)
self.assertEqual(len(resp.json), 1)
# Test GET on the result folder
resp = self.request(
path='/folder/%s' % str(resp.json[0]['_id']))
self.assertStatusOk(resp)
self.assertIsInstance(resp.json, dict)
self.assertFalse('access' in resp.json)
# If we log in as the user, we should also be able to see the
# private folder. Also test that our sortdir param works.
resp = self.request(
path='/folder', method='GET', user=self.admin, params={
'parentType': 'user',
'parentId': self.admin['_id'],
'sort': 'name',
'sortdir': SortDir.DESCENDING
})
self.assertStatusOk(resp)
self.assertEqual(len(resp.json), 2)
self.assertEqual(resp.json[0]['name'], 'Public')
self.assertEqual(resp.json[1]['name'], 'Private')
publicFolder = resp.json[0]
privateFolder = resp.json[1]
# Change properties of a folder
resp = self.request(
path='/folder/%s' % publicFolder['_id'], method='PUT',
user=self.admin, params={
'name': 'New name ',
'description': ' A description '
})
self.assertStatusOk(resp)
self.assertEqual(resp.json['name'], 'New name')
self.assertEqual(resp.json['description'], 'A description')
# Move should fail with a bogus parent
resp = self.request(
path='/folder/%s' % publicFolder['_id'], method='PUT',
user=self.admin, params={
'parentType': 'badParent',
'parentId': privateFolder['_id']
})
self.assertStatus(resp, 400)
# Move the public folder underneath the private folder
resp = self.request(
path='/folder/%s' % publicFolder['_id'], method='PUT',
user=self.admin, params={
'parentType': 'folder',
'parentId': privateFolder['_id']
})
self.assertStatusOk(resp)
self.assertEqual(resp.json['parentCollection'], 'folder')
self.assertEqual(resp.json['parentId'], privateFolder['_id'])
self.assertEqual(resp.json['name'], 'New name')
# Move should fail if we don't have write permission on the
# destination parent
publicFolder = self.model('folder').load(
publicFolder['_id'], force=True)
publicFolder = self.model('folder').setUserAccess(
publicFolder, self.user, AccessType.WRITE, save=True)
resp = self.request(
path='/folder/%s' % publicFolder['_id'], method='PUT',
user=self.user, params={
'parentId': self.admin['_id'],
'parentType': 'user'
})
self.assertStatus(resp, 403)
self.assertTrue(resp.json['message'].startswith(
'Write access denied for user'))
def testCreateFolder(self):
self.ensureRequiredParams(
path='/folder', method='POST', required=['name', 'parentId'],
user=self.admin)
# Grab the default user folders
resp = self.request(
path='/folder', method='GET', user=self.admin, params={
'parentType': 'user',
'parentId': self.admin['_id'],
'sort': 'name',
'sortdir': 1
})
privateFolder = resp.json[0]
publicFolder = resp.json[1]
self.assertEqual(privateFolder['name'], 'Private')
self.assertEqual(publicFolder['name'], 'Public')
# Try to create a folder as anonymous; should fail
resp = self.request(path='/folder', method='POST', params={
'name': 'a folder',
'parentId': publicFolder['_id']
})
self.assertStatus(resp, 401)
# Try to create a folder with a bogus parent; should fail
resp = self.request(
path='/folder', method='POST', user=self.admin, params={
'name': ' My public subfolder ',
'parentType': 'badParent',
'parentId': publicFolder['_id']
})
self.assertStatus(resp, 400)
# Try to create a folder with a blank name; should fail
resp = self.request(
path='/folder', method='POST', user=self.admin, params={
'name': ' ',
'parentId': publicFolder['_id']
})
self.assertStatus(resp, 400)
# Actually create subfolder under Public
resp = self.request(
path='/folder', method='POST', user=self.admin, params={
'name': ' My public subfolder ',
'parentId': publicFolder['_id']
})
self.assertStatusOk(resp)
self.assertEqual(resp.json['parentId'], publicFolder['_id'])
self.assertEqual(resp.json['parentCollection'], 'folder')
self.assertTrue(resp.json['public'])
folder = self.model('folder').load(resp.json['_id'], force=True)
self.assertTrue(self.model('folder').hasAccess(
folder, self.admin, AccessType.ADMIN))
# Now fetch the children of Public, we should see it
resp = self.request(
path='/folder', method='GET', user=self.admin, params={
'parentType': 'folder',
'parentId': publicFolder['_id']
})
self.assertStatusOk(resp)
self.assertEqual(len(resp.json), 1)
self.assertEqual(resp.json[0]['name'], 'My public subfolder')
# Try to create a folder with same name
resp = self.request(
path='/folder', method='POST', user=self.admin, params={
'name': ' My public subfolder ',
'parentId': publicFolder['_id']
})
self.assertValidationError(resp, 'name')
# Create a folder in the user
resp = self.request(
path='/folder', method='POST', user=self.admin, params={
'name': 'New User Folder',
'parentType': 'user',
'parentId': str(self.admin['_id'])
})
self.assertStatus(resp, 200)
def testReuseExisting(self):
self.ensureRequiredParams(
path='/folder', method='POST', required=['name', 'parentId'],
user=self.admin)
# Grab the default user folders
resp = self.request(
path='/folder', method='GET', user=self.admin, params={
'parentType': 'user',
'parentId': self.admin['_id'],
'sort': 'name',
'sortdir': 1
})
publicFolder = resp.json[1]
# Actually create subfolder under Public
newFolder = self.request(
path='/folder', method='POST', user=self.admin, params={
'name': ' My public subfolder ',
'parentId': publicFolder['_id']
})
self.assertStatusOk(newFolder)
# Try to create a folder with same name, reuseExisting flag not set
resp = self.request(
path='/folder', method='POST', user=self.admin, params={
'name': ' My public subfolder ',
'parentId': publicFolder['_id']
})
self.assertValidationError(resp, 'name')
# Create folder with same name, reuseExisting flag set
reuseFolder = self.request(
path='/folder', method='POST', user=self.admin, params={
'name': ' My public subfolder ',
'parentId': publicFolder['_id'],
'reuseExisting': True
})
self.assertStatusOk(reuseFolder)
self.assertEqual(newFolder.json['_id'], reuseFolder.json['_id'])
def testFolderMetadataDirect(self):
resp = self.request(
path='/folder', method='GET', user=self.admin, params={
'parentType': 'user',
'parentId': self.admin['_id'],
'sort': 'name',
'sortdir': 1
})
self.assertStatusOk(resp)
publicFolder = resp.json[1]
# Actually create subfolder under Public
resp = self.request(
path='/folder', method='POST', user=self.admin, params={
'name': ' My public subfolder ',
'parentId': publicFolder['_id'],
'metadata': 'invalid json'
})
self.assertStatus(resp, 400)
self.assertEqual(
resp.json['message'],
'Parameter metadata must be valid JSON.')
metadata = {
'foo': 'bar',
'test': 2
}
resp = self.request(
path='/folder', method='POST', user=self.admin, params={
'name': ' My public subfolder with meta',
'parentId': publicFolder['_id'],
'metadata': json.dumps(metadata)}
)
self.assertStatusOk(resp)
folder = resp.json
self.assertEqual(folder['meta']['foo'], metadata['foo'])
self.assertEqual(folder['meta']['test'], metadata['test'])
metadata = {
'foo': None,
'test': 3,
'bar': 'baz'
}
resp = self.request(
path='/folder/{_id}'.format(**folder), method='PUT',
user=self.admin, params={'metadata': json.dumps(metadata)}
)
self.assertStatusOk(resp)
folder = resp.json
self.assertNotHasKeys(folder['meta'], ['foo'])
self.assertEqual(folder['meta']['test'], metadata['test'])
self.assertEqual(folder['meta']['bar'], metadata['bar'])
def testFolderMetadataCrud(self):
"""
Test CRUD of metadata on folders
"""
# Grab the default user folders
resp = self.request(
path='/folder', method='GET', user=self.admin, params={
'parentType': 'user',
'parentId': self.admin['_id'],
'sort': 'name',
'sortdir': 1
})
self.assertStatusOk(resp)
publicFolder = resp.json[1]
# Actually create subfolder under Public
resp = self.request(
path='/folder', method='POST', user=self.admin, params={
'name': ' My public subfolder ',
'parentId': publicFolder['_id']
})
self.assertStatusOk(resp)
folder = resp.json
# Test that bad json fails
resp = self.request(path='/folder/%s/metadata' % folder['_id'],
method='PUT', user=self.admin,
body='badJSON', type='application/json')
self.assertStatus(resp, 400)
# Add some metadata
metadata = {
'foo': 'bar',
'test': 2
}
resp = self.request(path='/folder/%s/metadata' % folder['_id'],
method='PUT', user=self.admin,
body=json.dumps(metadata), type='application/json')
folder = resp.json
self.assertEqual(folder['meta']['foo'], metadata['foo'])
self.assertEqual(folder['meta']['test'], metadata['test'])
# Edit and remove metadata
metadata['test'] = None
metadata['foo'] = 'baz'
resp = self.request(path='/folder/%s/metadata' % folder['_id'],
method='PUT', user=self.admin,
body=json.dumps(metadata), type='application/json')
folder = resp.json
self.assertEqual(folder['meta']['foo'], metadata['foo'])
self.assertNotHasKeys(folder['meta'], ['test'])
# Make sure metadata cannot be added if there is a period in the key
# name
metadata = {
'foo.bar': 'notallowed'
}
resp = self.request(path='/folder/%s/metadata' % folder['_id'],
method='PUT', user=self.admin,
body=json.dumps(metadata), type='application/json')
self.assertStatus(resp, 400)
self.assertEqual(
resp.json['message'], 'Invalid key foo.bar: keys must not contain the "." character.')
# Make sure metadata cannot be added if the key begins with a $
metadata = {
'$foobar': 'alsonotallowed'
}
resp = self.request(path='/folder/%s/metadata' % folder['_id'],
method='PUT', user=self.admin,
body=json.dumps(metadata), type='application/json')
self.assertStatus(resp, 400)
self.assertEqual(
resp.json['message'],
'Invalid key $foobar: keys must not start with the "$" character.')
# Test allowNull
metadata = {
'foo': None
}
resp = self.request(
path='/folder/%s/metadata' % folder['_id'], params={'allowNull': True},
user=self.admin, method='PUT', body=json.dumps(metadata), type='application/json')
self.assertStatusOk(resp)
self.assertEqual(resp.json['meta'], metadata)
# Test delete metadata endpoint
resp = self.request(
path='/folder/%s/metadata' % folder['_id'], user=self.admin, method='DELETE',
body=json.dumps(['foo']), type='application/json')
self.assertStatusOk(resp)
self.assertEqual(resp.json['meta'], {})
def testDeleteFolder(self):
cbInfo = {}
# Hook into model deletion with kwargs event to test it
def cb(event):
cbInfo['kwargs'] = event.info['kwargs']
cbInfo['doc'] = event.info['document']
with events.bound('model.folder.remove_with_kwargs', 'test', cb):
# Requesting with no path should fail
resp = self.request(path='/folder', method='DELETE',
user=self.admin)
self.assertStatus(resp, 400)
# Grab one of the user's top level folders
folders = self.model('folder').childFolders(
parent=self.admin, parentType='user', user=self.admin, limit=1,
sort=[('name', SortDir.DESCENDING)])
folderResp = six.next(folders)
# Add a subfolder and an item to that folder
subfolder = self.model('folder').createFolder(
folderResp, 'sub', parentType='folder', creator=self.admin)
item = self.model('item').createItem(
'item', creator=self.admin, folder=subfolder)
self.assertTrue('_id' in subfolder)
self.assertTrue('_id' in item)
# Delete the folder
resp = self.request(path='/folder/%s' % folderResp['_id'],
method='DELETE', user=self.admin, params={
'progress': 'true'
})
self.assertStatusOk(resp)
# Make sure the folder, its subfolder, and its item were all deleted
folder = self.model('folder').load(folderResp['_id'], force=True)
subfolder = self.model('folder').load(subfolder['_id'], force=True)
item = self.model('item').load(item['_id'])
self.assertEqual(folder, None)
self.assertEqual(subfolder, None)
self.assertEqual(item, None)
# Make sure progress record exists and that it is set to expire soon
notifs = list(self.model('notification').get(self.admin))
self.assertEqual(len(notifs), 1)
self.assertEqual(notifs[0]['type'], 'progress')
self.assertEqual(notifs[0]['data']['state'], ProgressState.SUCCESS)
self.assertEqual(notifs[0]['data']['title'],
'Deleting folder Public')
self.assertEqual(notifs[0]['data']['message'], 'Done')
self.assertEqual(notifs[0]['data']['total'], 3)
self.assertEqual(notifs[0]['data']['current'], 3)
self.assertTrue(notifs[0]['expires'] < datetime.datetime.utcnow() +
datetime.timedelta(minutes=1))
# Make sure our event handler was called with expected args
self.assertTrue('kwargs' in cbInfo)
self.assertTrue('doc' in cbInfo)
self.assertTrue('progress' in cbInfo['kwargs'])
self.assertEqual(cbInfo['doc']['_id'], folderResp['_id'])
def testCleanFolder(self):
folder = six.next(self.model('folder').childFolders(
parent=self.admin, parentType='user', user=self.admin, limit=1,
sort=[('name', SortDir.DESCENDING)]))
# Add some data under the folder
subfolder = self.model('folder').createFolder(
folder, 'sub', parentType='folder', creator=self.admin)
item = self.model('item').createItem(
'item', creator=self.admin, folder=folder)
subitem = self.model('item').createItem(
'item', creator=self.admin, folder=subfolder)
# Clean the folder contents
resp = self.request(path='/folder/%s/contents' % folder['_id'],
method='DELETE', user=self.admin, params={
'progress': 'true'
})
self.assertStatusOk(resp)
# Make sure the subfolder and items were deleted, but that the top
# folder still exists.
old, folder = folder, self.model('folder').load(folder['_id'],
force=True)
subfolder = self.model('folder').load(subfolder['_id'], force=True)
item = self.model('item').load(item['_id'])
subitem = self.model('item').load(subitem['_id'])
self.assertTrue('_id' in folder)
self.assertEqual(folder, old)
self.assertEqual(subfolder, None)
self.assertEqual(item, None)
self.assertEqual(subitem, None)
def testLazyFieldComputation(self):
"""
Demonstrate that a folder that is saved in the database without
derived fields (like lowerName or baseParentId) get those values
computed at load() time.
"""
folder = self.model('folder').createFolder(
parent=self.admin, parentType='user', creator=self.admin,
name=' My Folder Name')
self.assertEqual(folder['lowerName'], 'my folder name')
self.assertEqual(folder['baseParentType'], 'user')
# Force the item to be saved without lowerName and baseParentType
# fields
del folder['lowerName']
del folder['baseParentType']
folder = self.model('folder').save(folder, validate=False)
folder = self.model('folder').find({'_id': folder['_id']})[0]
self.assertNotHasKeys(folder, ('lowerName', 'baseParentType'))
# Now ensure that calling load() actually populates those fields and
# saves the results persistently
self.model('folder').load(folder['_id'], force=True)
folder = self.model('folder').find({'_id': folder['_id']})[0]
self.assertHasKeys(folder, ('lowerName', 'baseParentType'))
self.assertEqual(folder['lowerName'], 'my folder name')
self.assertEqual(folder['baseParentType'], 'user')
self.assertEqual(folder['baseParentId'], self.admin['_id'])
def testParentsToRoot(self):
"""
Demonstrate that forcing parentsToRoot will cause it to skip the
filtering process.
"""
userFolder = self.model('folder').createFolder(
parent=self.admin, parentType='user', creator=self.admin,
name=' My Folder Name')
# Filtering adds the _accessLevel key to the object
# So forcing should result in an absence of that key
parents = self.model('folder').parentsToRoot(userFolder, force=True)
for parent in parents:
self.assertNotIn('_accessLevel', parent['object'])
parents = self.model('folder').parentsToRoot(userFolder)
for parent in parents:
self.assertIn('_accessLevel', parent['object'])
# The logic is a bit different for user/collection parents,
# so we need to handle the other case
subFolder = self.model('folder').createFolder(
parent=userFolder, parentType='folder', creator=self.admin,
name=' My Subfolder Name')
parents = self.model('folder').parentsToRoot(subFolder, force=True)
for parent in parents:
self.assertNotIn('_accessLevel', parent['object'])
parents = self.model('folder').parentsToRoot(subFolder, user=self.admin)
for parent in parents:
self.assertIn('_accessLevel', parent['object'])
def testFolderAccessAndDetails(self):
# create a folder to work with
folder = self.model('folder').createFolder(
parent=self.admin, parentType='user', creator=self.admin,
name='Folder')
resp = self.request(
path='/folder/%s/access' % folder['_id'], method='GET',
user=self.admin)
self.assertStatusOk(resp)
access = resp.json
self.assertEqual(access, {
'users': [{
'login': self.admin['login'],
'level': AccessType.ADMIN,
'id': str(self.admin['_id']),
'flags': [],
'name': '%s %s' % (
self.admin['firstName'], self.admin['lastName'])}],
'groups': []
})
self.assertTrue(not folder.get('public'))
# Setting the access list with bad json should throw an error
resp = self.request(
path='/folder/%s/access' % folder['_id'], method='PUT',
user=self.admin, params={'access': 'badJSON'})
self.assertStatus(resp, 400)
# Change the access to public
resp = self.request(
path='/folder/%s/access' % folder['_id'], method='PUT',
user=self.admin,
params={'access': json.dumps(access), 'public': True})
self.assertStatusOk(resp)
resp = self.request(
path='/folder/%s' % folder['_id'], method='GET',
user=self.admin)
self.assertStatusOk(resp)
self.assertEqual(resp.json['public'], True)
# Create an item in the folder
self.model('item').createItem(
folder=folder, creator=self.admin, name='Item')
# Create a public and private folder within the folder
self.model('folder').createFolder(
parent=folder, parentType='folder', creator=self.admin,
name='Public', public=True)
self.model('folder').createFolder(
parent=folder, parentType='folder', creator=self.admin,
name='Private', public=False)
# Test folder details as anonymous
resp = self.request(
path='/folder/%s/details' % str(folder['_id']))
self.assertStatusOk(resp)
self.assertEqual(resp.json['nItems'], 1)
self.assertEqual(resp.json['nFolders'], 1)
# Test folder details as admin
resp = self.request(
path='/folder/%s/details' % str(folder['_id']), user=self.admin)
self.assertStatusOk(resp)
self.assertEqual(resp.json['nItems'], 1)
self.assertEqual(resp.json['nFolders'], 2)
def testFolderCopy(self):
# create a folder with a subfolder, items, and metadata
mainFolder = self.model('folder').createFolder(
parent=self.admin, parentType='user', creator=self.admin,
name='Main Folder')
subFolder = self.model('folder').createFolder(
parent=mainFolder, parentType='folder', creator=self.admin,
name='Sub Folder')
mainItem = self.model('item').createItem(
'Main Item', creator=self.admin, folder=mainFolder)
subItem = self.model('item').createItem(
'Sub Item', creator=self.admin, folder=subFolder)
metadata = {'key': 'value'}
resp = self.request(
path='/folder/%s/metadata' % mainFolder['_id'], method='PUT',
user=self.admin, body=json.dumps(metadata),
type='application/json')
self.assertStatusOk(resp)
# Add a file under the main item to test size reporting
size = 5
self.uploadFile(
name='test.txt', contents='.' * size, user=self.admin,
parent=mainItem, parentType='item')
mainFolder = self.model('folder').load(mainFolder['_id'], force=True)
self.assertEqual(mainFolder['size'], size)
# Now copy the folder alongside itself
resp = self.request(
path='/folder/%s/copy' % mainFolder['_id'], method='POST',
user=self.admin)
self.assertStatusOk(resp)
# Check our new folder information
newFolder = resp.json
self.assertEqual(newFolder['name'], 'Main Folder (1)')
self.assertEqual(newFolder['size'], size)
# Check the copied item inside the new folder
resp = self.request('/item', user=self.admin, params={
'folderId': newFolder['_id']})
self.assertStatusOk(resp)
self.assertEqual(len(resp.json), 1)
self.assertEqual(resp.json[0]['name'], 'Main Item')
self.assertEqual(resp.json[0]['size'], size)
# Check copied folder metadata
resp = self.request(
path='/folder/%s' % newFolder['_id'], method='GET',
user=self.admin, type='application/json')
self.assertStatusOk(resp)
self.assertEqual(resp.json['meta'], metadata)
# Check for the item, subfolder, and subfolder item
resp = self.request(
path='/folder', method='GET',
params={'parentType': 'folder', 'parentId': str(newFolder['_id'])},
user=self.admin, type='application/json')
self.assertStatusOk(resp)
self.assertEqual(len(resp.json), 1)
newSub = resp.json[0]
self.assertEqual(newSub['name'], subFolder['name'])
self.assertNotEqual(str(newSub['_id']), str(subFolder['_id']))
resp = self.request(
path='/item', method='GET',
params={'folderId': str(newFolder['_id'])},
user=self.admin, type='application/json')
self.assertStatusOk(resp)
self.assertEqual(len(resp.json), 1)
newItem = resp.json[0]
self.assertEqual(newItem['name'], mainItem['name'])
self.assertNotEqual(str(newItem['_id']), str(mainItem['_id']))
resp = self.request(
path='/item', method='GET',
params={'folderId': str(newSub['_id'])},
user=self.admin, type='application/json')
self.assertStatusOk(resp)
self.assertEqual(len(resp.json), 1)
newSubItem = resp.json[0]
self.assertEqual(newSubItem['name'], subItem['name'])
self.assertNotEqual(str(newSubItem['_id']), str(subItem['_id']))
# Test copying the subFolder
resp = self.request(
path='/folder/%s/copy' % subFolder['_id'], method='POST',
user=self.admin, params={'public': 'original', 'progress': True})
self.assertStatusOk(resp)
# Check our new folder name
newSubFolder = resp.json
self.assertEqual(newSubFolder['name'], 'Sub Folder (1)')
# Test that a bogus parentType throws an error
resp = self.request(
path='/folder/%s/copy' % subFolder['_id'], method='POST',
user=self.admin, params={'parentType': 'badValue'})
self.assertStatus(resp, 400)
# Test that when we copy a folder into itself we don't recurse
resp = self.request(
path='/folder/%s/copy' % subFolder['_id'], method='POST',
user=self.admin, params={
'progress': True,
'parentType': 'folder',
'parentId': str(subFolder['_id'])})
self.assertStatusOk(resp)
# Test copying with public set to False
resp = self.request(
path='/folder/%s/copy' % subFolder['_id'], method='POST',
user=self.admin, params={'public': 'false', 'progress': True})
self.assertStatusOk(resp)
| |
import sys
import numpy as np
import ctypes
from numba import jit, literal_unroll, njit, typeof
from numba.core import types
from numba.core.compiler import compile_isolated
from numba.core.itanium_mangler import mangle_type
from numba.core.config import IS_WIN32
from numba.core.errors import TypingError
from numba.np.numpy_support import numpy_version
import unittest
from numba.np import numpy_support
from numba.tests.support import TestCase, skip_ppc64le_issue6465
_FS = ('e', 'f')
def get_a(ary, i):
return ary[i].a
def get_b(ary, i):
return ary[i].b
def get_c(ary, i):
return ary[i].c
def make_getitem(item):
# This also exercises constant lookup from a closure variable
def get_xx(ary, i):
return ary[i][item]
return get_xx
# Issue #1664: constant index lookup should fall back to regular getitem
def get_zero_a(ary, _unused):
return ary[0].a
getitem_a = make_getitem('a')
getitem_b = make_getitem('b')
getitem_c = make_getitem('c')
def get_a_subarray(ary, i):
return ary.a[i]
def get_b_subarray(ary, i):
return ary.b[i]
def get_c_subarray(ary, i):
return ary.c[i]
def get_a_zero(ary, _unused):
return ary.a[0]
def make_getitem_subarray(item):
# This also exercises constant lookup from a closure variable
def get_xx_subarray(ary, i):
return ary[item][i]
return get_xx_subarray
getitem_a_subarray = make_getitem_subarray('a')
getitem_b_subarray = make_getitem_subarray('b')
getitem_c_subarray = make_getitem_subarray('c')
def get_two_arrays_a(ary1, ary2, i):
return ary1[i].a + ary2[i].a
def get_two_arrays_b(ary1, ary2, i):
return ary1[i].b + ary2[i].b
def get_two_arrays_c(ary1, ary2, i):
return ary1[i].c + ary2[i].c
def get_two_arrays_distinct(ary1, ary2, i):
return ary1[i].a + ary2[i].f
def set_a(ary, i, v):
ary[i].a = v
def set_b(ary, i, v):
ary[i].b = v
def set_c(ary, i, v):
ary[i].c = v
def make_setitem(item):
def set_xx(ary, i, v):
ary[i][item] = v
return set_xx
setitem_a = make_setitem('a')
setitem_b = make_setitem('b')
setitem_c = make_setitem('c')
def set_a_subarray(ary, i, v):
ary.a[i] = v
def set_b_subarray(ary, i, v):
ary.b[i] = v
def set_c_subarray(ary, i, v):
ary.c[i] = v
def make_setitem_subarray(item):
def set_xx_subarray(ary, i, v):
ary[item][i] = v
return set_xx_subarray
setitem_a_subarray = make_setitem('a')
setitem_b_subarray = make_setitem('b')
setitem_c_subarray = make_setitem('c')
def set_record(ary, i, j):
ary[i] = ary[j]
def get_record_a(rec, val):
x = rec.a
rec.a = val
return x
def get_record_b(rec, val):
x = rec.b
rec.b = val
return x
def get_record_c(rec, val):
x = rec.c
rec.c = val
return x
def get_record_rev_a(val, rec):
x = rec.a
rec.a = val
return x
def get_record_rev_b(val, rec):
x = rec.b
rec.b = val
return x
def get_record_rev_c(val, rec):
x = rec.c
rec.c = val
return x
def get_two_records_a(rec1, rec2):
x = rec1.a + rec2.a
return x
def get_two_records_b(rec1, rec2):
x = rec1.b + rec2.b
return x
def get_two_records_c(rec1, rec2):
x = rec1.c + rec2.c
return x
def get_two_records_distinct(rec1, rec2):
x = rec1.a + rec2.f
return x
def record_return(ary, i):
return ary[i]
def record_write_array(ary):
ary.g = 2
ary.h[0] = 3.0
ary.h[1] = 4.0
def record_write_2d_array(ary):
ary.i = 3
ary.j[0, 0] = 5.0
ary.j[0, 1] = 6.0
ary.j[1, 0] = 7.0
ary.j[1, 1] = 8.0
ary.j[2, 0] = 9.0
ary.j[2, 1] = 10.0
def record_read_array0(ary):
return ary.h[0]
def record_read_array1(ary):
return ary.h[1]
def record_read_2d_array00(ary):
return ary.j[0,0]
def record_read_2d_array10(ary):
return ary.j[1,0]
def record_read_2d_array01(ary):
return ary.j[0,1]
def record_read_first_arr(ary):
return ary.k[2, 2]
def record_read_second_arr(ary):
return ary.l[2, 2]
def get_charseq(ary, i):
return ary[i].n
def set_charseq(ary, i, cs):
ary[i].n = cs
def get_charseq_tuple(ary, i):
return ary[i].m, ary[i].n
def get_field1(rec):
fs = ('e', 'f')
f = fs[1]
return rec[f]
def get_field2(rec):
fs = ('e', 'f')
out = 0
for f in literal_unroll(fs):
out += rec[f]
return out
def get_field3(rec):
f = _FS[1]
return rec[f]
def get_field4(rec):
out = 0
for f in literal_unroll(_FS):
out += rec[f]
return out
def set_field1(rec):
fs = ('e', 'f')
f = fs[1]
rec[f] = 10
return rec
def set_field2(rec):
fs = ('e', 'f')
for f in literal_unroll(fs):
rec[f] = 10
return rec
def set_field3(rec):
f = _FS[1]
rec[f] = 10
return rec
def set_field4(rec):
for f in literal_unroll(_FS):
rec[f] = 10
return rec
recordtype = np.dtype([('a', np.float64),
('b', np.int16),
('c', np.complex64),
('d', (np.str_, 5))])
recordtype2 = np.dtype([('e', np.int32),
('f', np.float64)], align=True)
recordtype3 = np.dtype([('first', np.float32),
('second', np.float64)])
recordwitharray = np.dtype([('g', np.int32),
('h', np.float32, 2)])
recordwith2darray = np.dtype([('i', np.int32),
('j', np.float32, (3, 2))])
recordwith2arrays = np.dtype([('k', np.int32, (10, 20)),
('l', np.int32, (6, 12))])
recordwithcharseq = np.dtype([('m', np.int32),
('n', 'S5')])
class TestRecordDtypeMakeCStruct(unittest.TestCase):
def test_two_scalars(self):
class Ref(ctypes.Structure):
_fields_ = [
('apple', ctypes.c_int32),
('orange', ctypes.c_float),
]
ty = types.Record.make_c_struct([
('apple', types.int32),
('orange', types.float32),
])
# Correct offsets
self.assertEqual(len(ty), 2)
self.assertEqual(ty.offset('apple'), Ref.apple.offset)
self.assertEqual(ty.offset('orange'), Ref.orange.offset)
# Correct size
self.assertEqual(ty.size, ctypes.sizeof(Ref))
# Is aligned
dtype = ty.dtype
self.assertTrue(dtype.isalignedstruct)
def test_three_scalars(self):
class Ref(ctypes.Structure):
_fields_ = [
('apple', ctypes.c_int32),
('mango', ctypes.c_int8),
('orange', ctypes.c_float),
]
ty = types.Record.make_c_struct([
('apple', types.int32),
('mango', types.int8),
('orange', types.float32),
])
# Correct offsets
self.assertEqual(len(ty), 3)
self.assertEqual(ty.offset('apple'), Ref.apple.offset)
self.assertEqual(ty.offset('mango'), Ref.mango.offset)
self.assertEqual(ty.offset('orange'), Ref.orange.offset)
# Correct size
self.assertEqual(ty.size, ctypes.sizeof(Ref))
# Is aligned
dtype = ty.dtype
self.assertTrue(dtype.isalignedstruct)
def test_complex_struct(self):
class Complex(ctypes.Structure):
_fields_ = [
('real', ctypes.c_double),
('imag', ctypes.c_double),
]
class Ref(ctypes.Structure):
_fields_ = [
('apple', ctypes.c_int32),
('mango', Complex),
]
ty = types.Record.make_c_struct([
('apple', types.intc),
('mango', types.complex128),
])
# Correct offsets
self.assertEqual(len(ty), 2)
self.assertEqual(ty.offset('apple'), Ref.apple.offset)
self.assertEqual(ty.offset('mango'), Ref.mango.offset)
# Correct size
self.assertEqual(ty.size, ctypes.sizeof(Ref))
# Is aligned?
# NumPy version < 1.16 misalign complex-128 types to 16bytes.
# (it seems to align on windows?!)
if numpy_version >= (1, 16) or IS_WIN32:
dtype = ty.dtype
self.assertTrue(dtype.isalignedstruct)
else:
with self.assertRaises(ValueError) as raises:
dtype = ty.dtype
# get numpy alignment
npalign = np.dtype(np.complex128).alignment
# llvm should align to alignment of double.
llalign = np.dtype(np.double).alignment
self.assertIn(
("NumPy is using a different alignment ({}) "
"than Numba/LLVM ({}) for complex128. "
"This is likely a NumPy bug.").format(npalign, llalign),
str(raises.exception),
)
class TestRecordDtype(unittest.TestCase):
def _createSampleArrays(self):
'''
Set up the data structures to be used with the Numpy and Numba
versions of functions.
In this case, both accept recarrays.
'''
self.refsample1d = np.recarray(3, dtype=recordtype)
self.refsample1d2 = np.recarray(3, dtype=recordtype2)
self.refsample1d3 = np.recarray(3, dtype=recordtype)
self.nbsample1d = np.recarray(3, dtype=recordtype)
self.nbsample1d2 = np.recarray(3, dtype=recordtype2)
self.nbsample1d3 = np.recarray(3, dtype=recordtype)
def setUp(self):
self._createSampleArrays()
for ary in (self.refsample1d, self.nbsample1d):
for i in range(ary.size):
x = i + 1
ary[i]['a'] = x / 2
ary[i]['b'] = x
ary[i]['c'] = x * 1j
ary[i]['d'] = "%d" % x
for ary2 in (self.refsample1d2, self.nbsample1d2):
for i in range(ary2.size):
x = i + 5
ary2[i]['e'] = x
ary2[i]['f'] = x / 2
for ary3 in (self.refsample1d3, self.nbsample1d3):
for i in range(ary3.size):
x = i + 10
ary3[i]['a'] = x / 2
ary3[i]['b'] = x
ary3[i]['c'] = x * 1j
ary3[i]['d'] = "%d" % x
def get_cfunc(self, pyfunc, argspec):
cres = compile_isolated(pyfunc, argspec)
return cres.entry_point
def test_from_dtype(self):
rec = numpy_support.from_dtype(recordtype)
self.assertEqual(rec.typeof('a'), types.float64)
self.assertEqual(rec.typeof('b'), types.int16)
self.assertEqual(rec.typeof('c'), types.complex64)
self.assertEqual(rec.typeof('d'), types.UnicodeCharSeq(5))
self.assertEqual(rec.offset('a'), recordtype.fields['a'][1])
self.assertEqual(rec.offset('b'), recordtype.fields['b'][1])
self.assertEqual(rec.offset('c'), recordtype.fields['c'][1])
self.assertEqual(rec.offset('d'), recordtype.fields['d'][1])
self.assertEqual(recordtype.itemsize, rec.size)
def _test_get_equal(self, pyfunc):
rec = numpy_support.from_dtype(recordtype)
cfunc = self.get_cfunc(pyfunc, (rec[:], types.intp))
for i in range(self.refsample1d.size):
self.assertEqual(pyfunc(self.refsample1d, i),
cfunc(self.nbsample1d, i))
def test_get_a(self):
self._test_get_equal(get_a)
self._test_get_equal(get_a_subarray)
self._test_get_equal(getitem_a)
self._test_get_equal(getitem_a_subarray)
self._test_get_equal(get_a_zero)
self._test_get_equal(get_zero_a)
def test_get_b(self):
self._test_get_equal(get_b)
self._test_get_equal(get_b_subarray)
self._test_get_equal(getitem_b)
self._test_get_equal(getitem_b_subarray)
def test_get_c(self):
self._test_get_equal(get_c)
self._test_get_equal(get_c_subarray)
self._test_get_equal(getitem_c)
self._test_get_equal(getitem_c_subarray)
def _test_get_two_equal(self, pyfunc):
'''
Test with two arrays of the same type
'''
rec = numpy_support.from_dtype(recordtype)
cfunc = self.get_cfunc(pyfunc, (rec[:], rec[:], types.intp))
for i in range(self.refsample1d.size):
self.assertEqual(pyfunc(self.refsample1d, self.refsample1d3, i),
cfunc(self.nbsample1d, self.nbsample1d3, i))
def test_two_distinct_arrays(self):
'''
Test with two arrays of distinct record types
'''
pyfunc = get_two_arrays_distinct
rec1 = numpy_support.from_dtype(recordtype)
rec2 = numpy_support.from_dtype(recordtype2)
cfunc = self.get_cfunc(pyfunc, (rec1[:], rec2[:], types.intp))
for i in range(self.refsample1d.size):
pres = pyfunc(self.refsample1d, self.refsample1d2, i)
cres = cfunc(self.nbsample1d, self.nbsample1d2, i)
self.assertEqual(pres,cres)
def test_get_two_a(self):
self._test_get_two_equal(get_two_arrays_a)
def test_get_two_b(self):
self._test_get_two_equal(get_two_arrays_b)
def test_get_two_c(self):
self._test_get_two_equal(get_two_arrays_c)
def _test_set_equal(self, pyfunc, value, valuetype):
rec = numpy_support.from_dtype(recordtype)
cfunc = self.get_cfunc(pyfunc, (rec[:], types.intp, valuetype))
for i in range(self.refsample1d.size):
expect = self.refsample1d.copy()
pyfunc(expect, i, value)
got = self.nbsample1d.copy()
cfunc(got, i, value)
# Match the entire array to ensure no memory corruption
np.testing.assert_equal(expect, got)
def test_set_a(self):
def check(pyfunc):
self._test_set_equal(pyfunc, 3.1415, types.float64)
# Test again to check if coercion works
self._test_set_equal(pyfunc, 3., types.float32)
check(set_a)
check(set_a_subarray)
check(setitem_a)
check(setitem_a_subarray)
def test_set_b(self):
def check(pyfunc):
self._test_set_equal(pyfunc, 123, types.int32)
# Test again to check if coercion works
self._test_set_equal(pyfunc, 123, types.float64)
check(set_b)
check(set_b_subarray)
check(setitem_b)
check(setitem_b_subarray)
def test_set_c(self):
def check(pyfunc):
self._test_set_equal(pyfunc, 43j, types.complex64)
# Test again to check if coercion works
self._test_set_equal(pyfunc, 43j, types.complex128)
check(set_c)
check(set_c_subarray)
check(setitem_c)
check(setitem_c_subarray)
def test_set_record(self):
pyfunc = set_record
rec = numpy_support.from_dtype(recordtype)
cfunc = self.get_cfunc(pyfunc, (rec[:], types.intp, types.intp))
test_indices = [(0, 1), (1, 2), (0, 2)]
for i, j in test_indices:
expect = self.refsample1d.copy()
pyfunc(expect, i, j)
got = self.nbsample1d.copy()
cfunc(got, i, j)
# Match the entire array to ensure no memory corruption
self.assertEqual(expect[i], expect[j])
self.assertEqual(got[i], got[j])
np.testing.assert_equal(expect, got)
def _test_record_args(self, revargs):
"""
Testing scalar record value as argument
"""
npval = self.refsample1d.copy()[0]
nbval = self.nbsample1d.copy()[0]
attrs = 'abc'
valtypes = types.float64, types.int16, types.complex64
values = 1.23, 12345, 123 + 456j
old_refcnt = sys.getrefcount(nbval)
for attr, valtyp, val in zip(attrs, valtypes, values):
expected = getattr(npval, attr)
nbrecord = numpy_support.from_dtype(recordtype)
# Test with a record as either the first argument or the second
# argument (issue #870)
if revargs:
prefix = 'get_record_rev_'
argtypes = (valtyp, nbrecord)
args = (val, nbval)
else:
prefix = 'get_record_'
argtypes = (nbrecord, valtyp)
args = (nbval, val)
pyfunc = globals()[prefix + attr]
cfunc = self.get_cfunc(pyfunc, argtypes)
got = cfunc(*args)
try:
self.assertEqual(expected, got)
except AssertionError:
# On ARM, a LLVM misoptimization can produce buggy code,
# see https://llvm.org/bugs/show_bug.cgi?id=24669
import llvmlite.binding as ll
if attr != 'c':
raise
if ll.get_default_triple() != 'armv7l-unknown-linux-gnueabihf':
raise
self.assertEqual(val, got)
else:
self.assertEqual(nbval[attr], val)
del got, expected, args
# Check for potential leaks (issue #441)
self.assertEqual(sys.getrefcount(nbval), old_refcnt)
def test_record_args(self):
self._test_record_args(False)
def test_record_args_reverse(self):
self._test_record_args(True)
def test_two_records(self):
'''
Testing the use of two scalar records of the same type
'''
npval1 = self.refsample1d.copy()[0]
npval2 = self.refsample1d.copy()[1]
nbval1 = self.nbsample1d.copy()[0]
nbval2 = self.nbsample1d.copy()[1]
attrs = 'abc'
valtypes = types.float64, types.int32, types.complex64
for attr, valtyp in zip(attrs, valtypes):
expected = getattr(npval1, attr) + getattr(npval2, attr)
nbrecord = numpy_support.from_dtype(recordtype)
pyfunc = globals()['get_two_records_' + attr]
cfunc = self.get_cfunc(pyfunc, (nbrecord, nbrecord))
got = cfunc(nbval1, nbval2)
self.assertEqual(expected, got)
def test_two_distinct_records(self):
'''
Testing the use of two scalar records of differing type
'''
nbval1 = self.nbsample1d.copy()[0]
nbval2 = self.refsample1d2.copy()[0]
expected = nbval1['a'] + nbval2['f']
nbrecord1 = numpy_support.from_dtype(recordtype)
nbrecord2 = numpy_support.from_dtype(recordtype2)
cfunc = self.get_cfunc(get_two_records_distinct, (nbrecord1, nbrecord2))
got = cfunc(nbval1, nbval2)
self.assertEqual(expected, got)
def test_record_write_array(self):
'''
Testing writing to a 1D array within a structured type
'''
nbval = np.recarray(1, dtype=recordwitharray)
nbrecord = numpy_support.from_dtype(recordwitharray)
cfunc = self.get_cfunc(record_write_array, (nbrecord,))
cfunc(nbval[0])
expected = np.recarray(1, dtype=recordwitharray)
expected[0].g = 2
expected[0].h[0] = 3.0
expected[0].h[1] = 4.0
np.testing.assert_equal(expected, nbval)
def test_record_write_2d_array(self):
'''
Test writing to a 2D array within a structured type
'''
nbval = np.recarray(1, dtype=recordwith2darray)
nbrecord = numpy_support.from_dtype(recordwith2darray)
cfunc = self.get_cfunc(record_write_2d_array, (nbrecord,))
cfunc(nbval[0])
expected = np.recarray(1, dtype=recordwith2darray)
expected[0].i = 3
expected[0].j[:] = np.asarray([5.0, 6.0, 7.0, 8.0, 9.0, 10.0],
np.float32).reshape(3, 2)
np.testing.assert_equal(expected, nbval)
def test_record_read_array(self):
'''
Test reading from a 1D array within a structured type
'''
nbval = np.recarray(1, dtype=recordwitharray)
nbval[0].h[0] = 15.0
nbval[0].h[1] = 25.0
nbrecord = numpy_support.from_dtype(recordwitharray)
cfunc = self.get_cfunc(record_read_array0, (nbrecord,))
res = cfunc(nbval[0])
np.testing.assert_equal(res, nbval[0].h[0])
cfunc = self.get_cfunc(record_read_array1, (nbrecord,))
res = cfunc(nbval[0])
np.testing.assert_equal(res, nbval[0].h[1])
def test_record_read_2d_array(self):
'''
Test reading from a 2D array within a structured type
'''
nbval = np.recarray(1, dtype=recordwith2darray)
nbval[0].j = np.asarray([1.5, 2.5, 3.5, 4.5, 5.5, 6.5],
np.float32).reshape(3, 2)
nbrecord = numpy_support.from_dtype(recordwith2darray)
cfunc = self.get_cfunc(record_read_2d_array00, (nbrecord,))
res = cfunc(nbval[0])
np.testing.assert_equal(res, nbval[0].j[0, 0])
cfunc = self.get_cfunc(record_read_2d_array01, (nbrecord,))
res = cfunc(nbval[0])
np.testing.assert_equal(res, nbval[0].j[0, 1])
cfunc = self.get_cfunc(record_read_2d_array10, (nbrecord,))
res = cfunc(nbval[0])
np.testing.assert_equal(res, nbval[0].j[1, 0])
def test_record_return(self):
"""
Testing scalar record value as return value.
We can only return a copy of the record.
"""
pyfunc = record_return
recty = numpy_support.from_dtype(recordtype)
cfunc = self.get_cfunc(pyfunc, (recty[:], types.intp))
attrs = 'abc'
indices = [0, 1, 2]
for index, attr in zip(indices, attrs):
nbary = self.nbsample1d.copy()
old_refcnt = sys.getrefcount(nbary)
res = cfunc(nbary, index)
self.assertEqual(nbary[index], res)
# Prove that this is a by-value copy
setattr(res, attr, 0)
self.assertNotEqual(nbary[index], res)
del res
# Check for potential leaks
self.assertEqual(sys.getrefcount(nbary), old_refcnt)
def test_record_arg_transform(self):
"""
Testing that transforming the name of a record type argument to a
function does not result in the fields of the record being used to
uniquely identify them, and that no other condition results in the
transformed name being excessively long.
"""
rec = numpy_support.from_dtype(recordtype3)
transformed = mangle_type(rec)
self.assertNotIn('first', transformed)
self.assertNotIn('second', transformed)
# len(transformed) is generally 10, but could be longer if a large
# number of typecodes are in use. Checking <20 should provide enough
# tolerance.
self.assertLess(len(transformed), 20)
struct_arr = types.Array(rec, 1, 'C')
transformed = mangle_type(struct_arr)
self.assertIn('Array', transformed)
self.assertNotIn('first', transformed)
self.assertNotIn('second', transformed)
# Length is usually 50 - 5 chars tolerance as above.
self.assertLess(len(transformed), 50)
def test_record_two_arrays(self):
"""
Tests that comparison of NestedArrays by key is working correctly. If
the two NestedArrays in recordwith2arrays compare equal (same length
and ndim but different shape) incorrect code will be generated for one
of the functions.
"""
nbrecord = numpy_support.from_dtype(recordwith2arrays)
rec = np.recarray(1, dtype=recordwith2arrays)[0]
rec.k[:] = np.arange(200).reshape(10,20)
rec.l[:] = np.arange(72).reshape(6,12)
pyfunc = record_read_first_arr
cfunc = self.get_cfunc(pyfunc, (nbrecord,))
self.assertEqual(cfunc(rec), pyfunc(rec))
pyfunc = record_read_second_arr
cfunc = self.get_cfunc(pyfunc, (nbrecord,))
self.assertEqual(cfunc(rec), pyfunc(rec))
def test_structure_dtype_with_titles(self):
# the following is the definition of int4 vector type from pyopencl
vecint4 = np.dtype([(('x', 's0'), 'i4'), (('y', 's1'), 'i4'),
(('z', 's2'), 'i4'), (('w', 's3'), 'i4')])
nbtype = numpy_support.from_dtype(vecint4)
self.assertEqual(len(nbtype.fields), len(vecint4.fields))
arr = np.zeros(10, dtype=vecint4)
def pyfunc(a):
for i in range(a.size):
j = i + 1
a[i]['s0'] = j * 2
a[i]['x'] += -1
a[i]['s1'] = j * 3
a[i]['y'] += -2
a[i]['s2'] = j * 4
a[i]['z'] += -3
a[i]['s3'] = j * 5
a[i]['w'] += -4
return a
expect = pyfunc(arr.copy())
cfunc = self.get_cfunc(pyfunc, (nbtype[:],))
got = cfunc(arr.copy())
np.testing.assert_equal(expect, got)
def test_record_dtype_with_titles_roundtrip(self):
recdtype = np.dtype([(("title a", 'a'), np.float_), ('b', np.float_)])
nbtype = numpy_support.from_dtype(recdtype)
self.assertTrue(nbtype.is_title('title a'))
self.assertFalse(nbtype.is_title('a'))
self.assertFalse(nbtype.is_title('b'))
got = numpy_support.as_dtype(nbtype)
self.assertTrue(got, recdtype)
def _get_cfunc_nopython(pyfunc, argspec):
return jit(argspec, nopython=True)(pyfunc)
class TestRecordDtypeWithDispatcher(TestRecordDtype):
'''
Same as TestRecordDtype, but stressing the Dispatcher's type dispatch
mechanism (issue #384). Note that this does not stress caching of ndarray
typecodes as the path that uses the cache is not taken with recarrays.
'''
def get_cfunc(self, pyfunc, argspec):
return _get_cfunc_nopython(pyfunc, argspec)
class TestRecordDtypeWithStructArrays(TestRecordDtype):
'''
Same as TestRecordDtype, but using structured arrays instead of recarrays.
'''
def _createSampleArrays(self):
'''
Two different versions of the data structures are required because Numba
supports attribute access on structured arrays, whereas Numpy does not.
However, the semantics of recarrays and structured arrays are equivalent
for these tests so Numpy with recarrays can be used for comparison with
Numba using structured arrays.
'''
self.refsample1d = np.recarray(3, dtype=recordtype)
self.refsample1d2 = np.recarray(3, dtype=recordtype2)
self.refsample1d3 = np.recarray(3, dtype=recordtype)
self.nbsample1d = np.zeros(3, dtype=recordtype)
self.nbsample1d2 = np.zeros(3, dtype=recordtype2)
self.nbsample1d3 = np.zeros(3, dtype=recordtype)
class TestRecordDtypeWithStructArraysAndDispatcher(TestRecordDtypeWithStructArrays): # noqa: E501
'''
Same as TestRecordDtypeWithStructArrays, stressing the Dispatcher's type
dispatch mechanism (issue #384) and caching of ndarray typecodes for void
types (which occur in structured arrays).
'''
def get_cfunc(self, pyfunc, argspec):
return _get_cfunc_nopython(pyfunc, argspec)
@skip_ppc64le_issue6465
class TestRecordDtypeWithCharSeq(unittest.TestCase):
def _createSampleaArray(self):
self.refsample1d = np.recarray(3, dtype=recordwithcharseq)
self.nbsample1d = np.zeros(3, dtype=recordwithcharseq)
def _fillData(self, arr):
for i in range(arr.size):
arr[i]['m'] = i
arr[0]['n'] = 'abcde' # no null-byte
arr[1]['n'] = 'xyz' # null-byte
arr[2]['n'] = 'u\x00v\x00\x00' # null-byte at the middle and at the end
def setUp(self):
self._createSampleaArray()
self._fillData(self.refsample1d)
self._fillData(self.nbsample1d)
def get_cfunc(self, pyfunc):
rectype = numpy_support.from_dtype(recordwithcharseq)
cres = compile_isolated(pyfunc, (rectype[:], types.intp))
return cres.entry_point
def test_return_charseq(self):
pyfunc = get_charseq
cfunc = self.get_cfunc(pyfunc)
for i in range(self.refsample1d.size):
expected = pyfunc(self.refsample1d, i)
got = cfunc(self.nbsample1d, i)
self.assertEqual(expected, got)
def test_npm_argument_charseq(self):
"""
Test CharSeq as NPM argument
"""
def pyfunc(arr, i):
return arr[i].n
identity = jit(lambda x: x) # an identity function
@jit(nopython=True)
def cfunc(arr, i):
return identity(arr[i].n)
for i in range(self.refsample1d.size):
expected = pyfunc(self.refsample1d, i)
got = cfunc(self.nbsample1d, i)
self.assertEqual(expected, got)
def test_py_argument_charseq(self):
"""
Test CharSeq as python wrapper argument
"""
pyfunc = set_charseq
# compile
rectype = numpy_support.from_dtype(recordwithcharseq)
cres = compile_isolated(pyfunc, (rectype[:], types.intp,
rectype.typeof('n')))
cfunc = cres.entry_point
for i in range(self.refsample1d.size):
chars = "{0}".format(hex(i + 10))
pyfunc(self.refsample1d, i, chars)
cfunc(self.nbsample1d, i, chars)
np.testing.assert_equal(self.refsample1d, self.nbsample1d)
def test_py_argument_char_seq_near_overflow(self):
"""
Test strings that are as long as the charseq capacity
"""
pyfunc = set_charseq
# compile
rectype = numpy_support.from_dtype(recordwithcharseq)
cres = compile_isolated(pyfunc, (rectype[:], types.intp,
rectype.typeof('n')))
cfunc = cres.entry_point
cs_near_overflow = "abcde"
self.assertEqual(len(cs_near_overflow),
recordwithcharseq['n'].itemsize)
cfunc(self.nbsample1d, 0, cs_near_overflow)
self.assertEqual(self.nbsample1d[0]['n'].decode('ascii'),
cs_near_overflow)
# Check that we didn't overwrite
np.testing.assert_equal(self.refsample1d[1:], self.nbsample1d[1:])
def test_py_argument_char_seq_truncate(self):
"""
NumPy silently truncates strings to fix inside charseq
"""
pyfunc = set_charseq
# compile
rectype = numpy_support.from_dtype(recordwithcharseq)
cres = compile_isolated(pyfunc, (rectype[:], types.intp,
rectype.typeof('n')))
cfunc = cres.entry_point
cs_overflowed = "abcdef"
pyfunc(self.refsample1d, 1, cs_overflowed)
cfunc(self.nbsample1d, 1, cs_overflowed)
np.testing.assert_equal(self.refsample1d, self.nbsample1d)
self.assertEqual(self.refsample1d[1].n,
cs_overflowed[:-1].encode("ascii"))
def test_return_charseq_tuple(self):
pyfunc = get_charseq_tuple
cfunc = self.get_cfunc(pyfunc)
for i in range(self.refsample1d.size):
expected = pyfunc(self.refsample1d, i)
got = cfunc(self.nbsample1d, i)
self.assertEqual(expected, got)
class TestRecordArrayGetItem(unittest.TestCase):
"""
Test getitem when index is Literal[str]
"""
def test_literal_variable(self):
arr = np.array([1, 2], dtype=recordtype2)
pyfunc = get_field1
jitfunc = njit(pyfunc)
self.assertEqual(pyfunc(arr[0]), jitfunc(arr[0]))
def test_literal_unroll(self):
arr = np.array([1, 2], dtype=recordtype2)
pyfunc = get_field2
jitfunc = njit(pyfunc)
self.assertEqual(pyfunc(arr[0]), jitfunc(arr[0]))
def test_literal_variable_global_tuple(self):
"""
This tests the getitem of record array when the indexes come from a
global tuple. It tests getitem behaviour but also tests that a global
tuple is being typed as a tuple of constants.
"""
arr = np.array([1, 2], dtype=recordtype2)
pyfunc = get_field3
jitfunc = njit(pyfunc)
self.assertEqual(pyfunc(arr[0]), jitfunc(arr[0]))
def test_literal_unroll_global_tuple(self):
"""
This tests the getitem of record array when the indexes come from a
global tuple and are being unrolled.
It tests getitem behaviour but also tests that literal_unroll accepts
a global tuple as argument
"""
arr = np.array([1, 2], dtype=recordtype2)
pyfunc = get_field4
jitfunc = njit(pyfunc)
self.assertEqual(pyfunc(arr[0]), jitfunc(arr[0]))
def test_literal_unroll_free_var_tuple(self):
"""
This tests the getitem of record array when the indexes come from a
free variable tuple (not local, not global) and are being unrolled.
It tests getitem behaviour but also tests that literal_unroll accepts
a free variable tuple as argument
"""
fs = ('e', 'f')
arr = np.array([1, 2], dtype=recordtype2)
def get_field(rec):
out = 0
for f in literal_unroll(fs):
out += rec[f]
return out
jitfunc = njit(get_field)
self.assertEqual(get_field(arr[0]), jitfunc(arr[0]))
def test_error_w_invalid_field(self):
arr = np.array([1, 2], dtype=recordtype3)
jitfunc = njit(get_field1)
with self.assertRaises(TypingError) as raises:
jitfunc(arr[0])
self.assertIn("Field 'f' was not found in record with fields "
"('first', 'second')", str(raises.exception))
def test_literal_unroll_dynamic_to_static_getitem_transform(self):
# See issue #6634
keys = ('a', 'b', 'c')
n = 5
def pyfunc(rec):
x = np.zeros((n,))
for o in literal_unroll(keys):
x += rec[o]
return x
dt = np.float64
ldd = [np.arange(dt(n)) for x in keys]
ldk = [(x, np.float64,) for x in keys]
rec = np.rec.fromarrays(ldd, dtype=ldk)
expected = pyfunc(rec)
got = njit(pyfunc)(rec)
np.testing.assert_allclose(expected, got)
class TestRecordArraySetItem(unittest.TestCase):
"""
Test setitem when index is Literal[str]
"""
def test_literal_variable(self):
arr = np.array([1, 2], dtype=recordtype2)
pyfunc = set_field1
jitfunc = njit(pyfunc)
self.assertEqual(pyfunc(arr[0].copy()), jitfunc(arr[0].copy()))
def test_literal_unroll(self):
arr = np.array([1, 2], dtype=recordtype2)
pyfunc = set_field2
jitfunc = njit(pyfunc)
self.assertEqual(pyfunc(arr[0].copy()), jitfunc(arr[0].copy()))
def test_literal_variable_global_tuple(self):
"""
This tests the setitem of record array when the indexes come from a
global tuple. It tests getitem behaviour but also tests that a global
tuple is being typed as a tuple of constants.
"""
arr = np.array([1, 2], dtype=recordtype2)
pyfunc = set_field3
jitfunc = njit(pyfunc)
self.assertEqual(pyfunc(arr[0].copy()), jitfunc(arr[0].copy()))
def test_literal_unroll_global_tuple(self):
"""
This tests the setitem of record array when the indexes come from a
global tuple and are being unrolled.
It tests setitem behaviour but also tests that literal_unroll accepts
a global tuple as argument
"""
arr = np.array([1, 2], dtype=recordtype2)
pyfunc = set_field4
jitfunc = njit(pyfunc)
self.assertEqual(pyfunc(arr[0].copy()), jitfunc(arr[0].copy()))
def test_literal_unroll_free_var_tuple(self):
"""
This tests the setitem of record array when the indexes come from a
free variable tuple (not local, not global) and are being unrolled.
It tests setitem behaviour but also tests that literal_unroll accepts
a free variable tuple as argument
"""
arr = np.array([1, 2], dtype=recordtype2)
fs = arr.dtype.names
def set_field(rec):
for f in literal_unroll(fs):
rec[f] = 10
return rec
jitfunc = njit(set_field)
self.assertEqual(set_field(arr[0].copy()), jitfunc(arr[0].copy()))
def test_error_w_invalid_field(self):
arr = np.array([1, 2], dtype=recordtype3)
jitfunc = njit(set_field1)
with self.assertRaises(TypingError) as raises:
jitfunc(arr[0])
self.assertIn("Field 'f' was not found in record with fields "
"('first', 'second')", str(raises.exception))
class TestSubtyping(TestCase):
def setUp(self):
self.value = 2
a_dtype = np.dtype([('a', 'f8')])
ab_dtype = np.dtype([('a', 'f8'), ('b', 'f8')])
self.a_rec1 = np.array([1], dtype=a_dtype)[0]
self.a_rec2 = np.array([2], dtype=a_dtype)[0]
self.ab_rec1 = np.array([(self.value, 3)], dtype=ab_dtype)[0]
self.ab_rec2 = np.array([(self.value + 1, 3)], dtype=ab_dtype)[0]
self.func = lambda rec: rec['a']
def test_common_field(self):
"""
Test that subtypes do not require new compilations
"""
njit_sig = njit(types.float64(typeof(self.a_rec1)))
functions = [
njit(self.func), # jitted function with open njit
njit_sig(self.func) # jitted fc with closed signature
]
for fc in functions:
fc(self.a_rec1)
fc.disable_compile()
y = fc(self.ab_rec1)
self.assertEqual(self.value, y)
def test_tuple_of_records(self):
@njit
def foo(rec_tup):
x = 0
for i in range(len(rec_tup)):
x += rec_tup[i]['a']
return x
foo((self.a_rec1, self.a_rec2))
foo.disable_compile()
y = foo((self.ab_rec1, self.ab_rec2))
self.assertEqual(2 * self.value + 1, y)
def test_array_field(self):
"""
Tests subtyping with array fields
"""
rec1 = np.empty(1, dtype=[('a', 'f8', (4,))])[0]
rec1['a'][0] = 1
rec2 = np.empty(1, dtype=[('a', 'f8', (4,)), ('b', 'f8')])[0]
rec2['a'][0] = self.value
@njit
def foo(rec):
return rec['a'][0]
foo(rec1)
foo.disable_compile()
y = foo(rec2)
self.assertEqual(self.value, y)
def test_no_subtyping1(self):
"""
test that conversion rules don't allow subtypes with different field
names
"""
c_dtype = np.dtype([('c', 'f8')])
c_rec1 = np.array([1], dtype=c_dtype)[0]
@njit
def foo(rec):
return rec['c']
foo(c_rec1)
foo.disable_compile()
with self.assertRaises(TypeError) as err:
foo(self.a_rec1)
self.assertIn("No matching definition for argument type(s) Record",
str(err.exception))
def test_no_subtyping2(self):
"""
test that conversion rules don't allow smaller records as subtypes
"""
jit_fc = njit(self.func)
jit_fc(self.ab_rec1)
jit_fc.disable_compile()
with self.assertRaises(TypeError) as err:
jit_fc(self.a_rec1)
self.assertIn("No matching definition for argument type(s) Record",
str(err.exception))
def test_no_subtyping3(self):
"""
test that conversion rules don't allow records with fields with same
name but incompatible type
"""
other_a_rec = np.array(['a'], dtype=np.dtype([('a', 'U25')]))[0]
jit_fc = njit(self.func)
jit_fc(self.a_rec1)
jit_fc.disable_compile()
with self.assertRaises(TypeError) as err:
jit_fc(other_a_rec)
self.assertIn("No matching definition for argument type(s) Record",
str(err.exception))
def test_branch_pruning(self):
"""
test subtyping behaviour in a case with a dead branch
"""
@njit
def foo(rec, flag=None):
n = 0
n += rec['a']
if flag is not None:
# Dead branch pruning will hide this branch
n += rec['b']
rec['b'] += 20
return n
self.assertEqual(foo(self.a_rec1), self.a_rec1[0])
# storing value because it will be mutated
k = self.ab_rec1[1]
self.assertEqual(foo(self.ab_rec1, flag=1), self.ab_rec1[0] + k)
self.assertEqual(self.ab_rec1[1], k + 20)
foo.disable_compile()
self.assertEqual(len(foo.nopython_signatures), 2)
self.assertEqual(foo(self.a_rec1) + 1, foo(self.ab_rec1))
self.assertEqual(foo(self.ab_rec1, flag=1), self.ab_rec1[0] + k + 20)
class TestRecordArrayExceptions(TestCase):
def test_nested_array_in_buffer_raises(self):
# see issue #6473
@njit()
def foo(x):
x["y"][0] = 1
dt = np.dtype([("y", (np.uint64, 5)),])
x = np.ones(1, dtype=dt)
with self.assertRaises(TypingError) as e:
foo(x)
ex1 = "The dtype of a Buffer type cannot itself be a Buffer type"
ex2 = "unsupported Buffer was: nestedarray(uint64, (5,))"
excstr = str(e.exception)
self.assertIn(ex1, excstr)
self.assertIn(ex2, excstr)
if __name__ == '__main__':
unittest.main()
| |
#!/usr/bin/python
#======================================================================
#
# Project : hpp_IOStressTest
# File : IOST_WMain.py
# Version : 1.01
# Date : Sep 21, 2016
# Author : HuuHoang Nguyen
# Contact : hhnguyen@apm.com
# : hoangnh.hpp@gmail.com
# License : MIT License
# Copyright : 2016
# Description: The hpp_IOStressTest is under the MIT License, a copy of license which may be found in LICENSE
#
#======================================================================
import io
import os
import re
import operator
import sys
import base64
import time
from IOST_Prepare import IOST_Prepare
from IOST_Config import *
from IOST_AboutDialog import *
from IOST_WRun import *
from IOST_Basic import *
from IOST_LoadConfigFileDialog import *
import gtk
import gobject
import gtk.glade
#======================================================================
class IOST_WMain(IOST_Config, IOST_AboutDialog, IOST_LoadConfigFileDialog, IOST_WRun):
def __init__(self, glade_filename, window_name, config_data, config_objs):
"""
"""
# self.IOST_WMain_Config = config_data
if os.path.isfile(glade_filename):
self.IOST_WMain_GladeFile = glade_filename
else:
self.IOST_WMain_GladeDir = os.path.dirname(sys.argv[0])
self.IOST_WMain_GladeFile = os.path.join(self.IOST_GladeDir, glade_filename)
IOST_Config.__init__(self)
self.ConfigData = config_data
self.ConfigObjs = config_objs
# Get WMain window from glade file
self.GetWMain_Obj(self.IOST_WMain_GladeFile, window_name)
self.GetStationInfo_Obj()
self.InitStationInfo_Obj()
self.GetLoadConfigFile_Obj()
self.GetI2C_Obj()
self.InitI2C_Obj()
self.GetSPI_Obj()
self.InitSPI_Obj()
self.GetUART_Obj()
self.InitUART_Obj()
self.GetUSB_Obj()
self.InitUSB_Obj()
self.GetSATA_Obj()
self.InitSATA_Obj()
self.GetPCIE_Obj()
self.InitPCIE_Obj()
self.GetETH_Obj()
self.InitETH_Obj()
self.GetAutoMail_Obj()
self.InitAutoMail_Obj()
self.IOST_WMain_Show()
#----------------------------------------------------------------------
def GetWMain_Obj(self, glade_filename, window_name):
"""
"""
self.IOST_WMain_Builder = gtk.Builder()
self.IOST_WMain_Builder.add_from_file(glade_filename)
self.IOST_WMain_Builder.connect_signals(self)
self.ConfigObjs["IOST_WMain"]["WName"] = self.IOST_WMain_Builder.get_object(window_name)
# self.ConfigObjs_all= self.IOST_WMain_Builder.get_objects()
# pprint (self.ConfigObjs_all)
#----------------------------------------------------------------------
def GetStationInfo_Obj(self):
"""
"""
self.ConfigObjs["IOST_WMain"]["ConsoleIP_Obj"] = self.IOST_WMain_Builder.get_object(self.ConfigData["IOST_WMain"]["ConsoleIP_EName"])
# self.ConfigObjs["IOST_WMain"]["ConsoleUser_Obj"] = self.IOST_WMain_Builder.get_object(self.ConfigData["IOST_WMain"]["ConsoleIP_EName"])
# self.ConfigObjs["IOST_WMain"]["ConsolePwd_Obj"] = self.IOST_WMain_Builder.get_object(self.ConfigData["IOST_WMain"]["ConsoleIP_EName"])
self.ConfigObjs["IOST_WMain"]["ConsolePort_Obj"] = self.IOST_WMain_Builder.get_object(self.ConfigData["IOST_WMain"]["ConsolePort_EName"])
self.ConfigObjs["IOST_WMain"]["SlimproIP_Obj"] = self.IOST_WMain_Builder.get_object(self.ConfigData["IOST_WMain"]["SlimproIP_EName"])
# self.ConfigObjs["IOST_WMain"]["SlimproUser_Obj"] = self.IOST_WMain_Builder.get_object(self.ConfigData["IOST_WMain"]["SlimproUser_EName"])
# self.ConfigObjs["IOST_WMain"]["SlimproPwd_Obj"] = self.IOST_WMain_Builder.get_object(self.ConfigData["IOST_WMain"]["SlimproPwd_EName"])
self.ConfigObjs["IOST_WMain"]["SlimproPort_Obj"] = self.IOST_WMain_Builder.get_object(self.ConfigData["IOST_WMain"]["SlimproPort_EName"])
self.ConfigObjs["IOST_WMain"]["NPS_IP_Obj"] = self.IOST_WMain_Builder.get_object(self.ConfigData["IOST_WMain"]["NPS_IP_EName"])
self.ConfigObjs["IOST_WMain"]["NPS_Port_Obj"] = self.IOST_WMain_Builder.get_object(self.ConfigData["IOST_WMain"]["NPS_Port_EName"])
self.ConfigObjs["IOST_WMain"]["ServerIP_Obj"] = self.IOST_WMain_Builder.get_object(self.ConfigData["IOST_WMain"]["ServerIP_EName"])
# self.ConfigObjs["IOST_WMain"]["ServerUser_Obj"] = self.IOST_WMain_Builder.get_object(self.ConfigData["ServerUser_EName"])
# self.ConfigObjs["IOST_WMain"]["ServerPwd_Obj"] = self.IOST_WMain_Builder.get_object(self.ConfigData["ServerPwd_EName"])
self.ConfigObjs["IOST_WMain"]["ThermalIP_Obj"] = self.IOST_WMain_Builder.get_object(self.ConfigData["IOST_WMain"]["ThermalIP_EName"])
# self.ConfigObjs["IOST_WMain"]["ThermalUser_Obj"] = self.IOST_WMain_Builder.get_object(self.ConfigData["IOST_WMain"]["ThermalUser_EName"])
# self.ConfigObjs["IOST_WMain"]["ThermalPwd_Obj"] = self.IOST_WMain_Builder.get_object(self.ConfigData["IOST_WMain"]["ThermalPwd_EName"])
self.ConfigObjs["IOST_WMain"]["ThermalPort_Obj"] = self.IOST_WMain_Builder.get_object(self.ConfigData["IOST_WMain"]["ThermalPort_EName"])
self.ConfigObjs["IOST_WMain"]["Temperature_Obj"] = self.IOST_WMain_Builder.get_object(self.ConfigData["IOST_WMain"]["Temperature_EName"])
self.ConfigObjs["IOST_WMain"]["TimeRunHour_Obj"] = self.IOST_WMain_Builder.get_object(self.ConfigData["IOST_WMain"]["TimeRunHour_EName"])
self.ConfigObjs["IOST_WMain"]["OCD_Enable_Obj"] = self.IOST_WMain_Builder.get_object(self.ConfigData["IOST_WMain"]["OCD_Enable_CBName"])
self.ConfigObjs["IOST_WMain"]["OCD_IP_Obj"] = self.IOST_WMain_Builder.get_object(self.ConfigData["IOST_WMain"]["OCD_IP_EName"])
# self.ConfigObjs["IOST_WMain"]["OCD_RootUser_Obj"] = self.IOST_WMain_Builder.get_object(self.ConfigData["ThermalPort_EName"])
# self.ConfigObjs["IOST_WMain"]["OCD_pwd_Obj"] = self.IOST_WMain_Builder.get_object(self.ConfigData["ThermalPort_EName"])
self.ConfigObjs["IOST_WMain"]["BDI_Enable_Obj"] = self.IOST_WMain_Builder.get_object(self.ConfigData["IOST_WMain"]["BDI_Enable_CBName"])
self.ConfigObjs["IOST_WMain"]["BDI_IP_Obj"] = self.IOST_WMain_Builder.get_object(self.ConfigData["IOST_WMain"]["BDI_IP_EName"])
#----------------------------------------------------------------------
def GetLoadConfigFile_Obj(self):
self.ConfigObjs["IOST_WMain"]["ConfigFilePath_Obj"] = self.IOST_WMain_Builder.get_object(self.ConfigData["IOST_WMain"]["ConfigFilePath_EName"])
self.ConfigObjs["IOST_WMain"]["CondifFileSelect_Obj"] = self.IOST_WMain_Builder.get_object(self.ConfigData["IOST_WMain"]["ConfigFileSelect_BName"])
#----------------------------------------------------------------------
def InitStationInfo_Obj(self):
"""
"""
self.ConfigObjs["IOST_WMain"]["ConsoleIP_Obj"].set_text(self.ConfigData["StationInfo"]["ConsoleIP"])
self.ConfigObjs["IOST_WMain"]["ConsolePort_Obj"].set_text(self.ConfigData["StationInfo"]["ConsolePort"])
self.ConfigObjs["IOST_WMain"]["SlimproPort_Obj"].set_text(self.ConfigData["StationInfo"]["SlimproPort"])
self.ConfigObjs["IOST_WMain"]["NPS_IP_Obj"].set_text(self.ConfigData["StationInfo"]["NPS_IP"])
self.ConfigObjs["IOST_WMain"]["NPS_Port_Obj"].set_text(self.ConfigData["StationInfo"]["NPS_Port"])
self.ConfigObjs["IOST_WMain"]["ServerIP_Obj"].set_text(self.ConfigData["StationInfo"]["ServerIP"])
self.ConfigObjs["IOST_WMain"]["ThermalIP_Obj"].set_text(self.ConfigData["StationInfo"]["ThermalIP"])
self.ConfigObjs["IOST_WMain"]["ThermalPort_Obj"].set_text(self.ConfigData["StationInfo"]["ThermalPort"])
self.ConfigObjs["IOST_WMain"]["Temperature_Obj"].set_text(self.ConfigData["StationInfo"]["Temperature"])
self.ConfigObjs["IOST_WMain"]["TimeRunHour_Obj"].set_text(self.ConfigData["StationInfo"]["TimeRunHour"])
if self.ConfigData["StationInfo"]["OCD_Enable"] == "Enable":
self.ConfigObjs["IOST_WMain"]["OCD_IP_Obj"].set_text(self.ConfigData["StationInfo"]["OCD_IP"])
else:
# self.ConfigObjs["IOST_WMain"]["OCD_IP_Obj"].set_visibility(False)
self.ConfigObjs["IOST_WMain"]["OCD_Enable_Obj"].set_active(False)
self.ConfigObjs["IOST_WMain"]["OCD_IP_Obj"].set_editable(False)
if self.ConfigData["StationInfo"]["BDI_Enable"] == "Enable":
self.ConfigObjs["IOST_WMain"]["BDI_IP_Obj"].set_text(self.ConfigData["StationInfo"]["BDI_IP"])
else:
# self.ConfigObjs["IOST_WMain"]["OCD_IP_Obj"].set_visibility(False)
self.ConfigObjs["IOST_WMain"]["BDI_Enable_Obj"].set_active(False)
self.ConfigObjs["IOST_WMain"]["BDI_IP_Obj"].set_editable(False)
#----------------------------------------------------------------------
def GetI2C_Obj(self):
"""
Get all I2C objects on WMain Program
"""
# self.IOST_GetI2C0_Object = self.IOST_WMain_Builder.get_object("IOST_WMain_Config_I2C0_B")
self.ConfigObjs["IOST_WMain"]["I2C0_Enable_Obj"] = self.IOST_WMain_Builder.get_object(self.ConfigData["IOST_WMain"]["I2C0_Enable_CName"])
self.ConfigObjs["IOST_WMain"]["I2C0_Config_Obj"] = self.IOST_WMain_Builder.get_object(self.ConfigData["IOST_WMain"]["I2C0_Config_BName"])
self.ConfigObjs["IOST_WMain"]["I2C1_Enable_Obj"] = self.IOST_WMain_Builder.get_object(self.ConfigData["IOST_WMain"]["I2C1_Enable_CName"])
self.ConfigObjs["IOST_WMain"]["I2C1_Config_Obj"] = self.IOST_WMain_Builder.get_object(self.ConfigData["IOST_WMain"]["I2C1_Config_BName"])
self.ConfigObjs["IOST_WMain"]["I2C2_Enable_Obj"] = self.IOST_WMain_Builder.get_object(self.ConfigData["IOST_WMain"]["I2C2_Enable_CName"])
self.ConfigObjs["IOST_WMain"]["I2C2_Config_Obj"] = self.IOST_WMain_Builder.get_object(self.ConfigData["IOST_WMain"]["I2C2_Config_BName"])
self.ConfigObjs["IOST_WMain"]["I2C3_Enable_Obj"] = self.IOST_WMain_Builder.get_object(self.ConfigData["IOST_WMain"]["I2C3_Enable_CName"])
self.ConfigObjs["IOST_WMain"]["I2C3_Config_Obj"] = self.IOST_WMain_Builder.get_object(self.ConfigData["IOST_WMain"]["I2C3_Config_BName"])
self.ConfigObjs["IOST_WMain"]["I2C4_Enable_Obj"] = self.IOST_WMain_Builder.get_object(self.ConfigData["IOST_WMain"]["I2C4_Enable_CName"])
self.ConfigObjs["IOST_WMain"]["I2C4_Config_Obj"] = self.IOST_WMain_Builder.get_object(self.ConfigData["IOST_WMain"]["I2C4_Config_BName"])
self.ConfigObjs["IOST_WMain"]["I2C5_Enable_Obj"] = self.IOST_WMain_Builder.get_object(self.ConfigData["IOST_WMain"]["I2C5_Enable_CName"])
self.ConfigObjs["IOST_WMain"]["I2C5_Config_Obj"] = self.IOST_WMain_Builder.get_object(self.ConfigData["IOST_WMain"]["I2C5_Config_BName"])
#----------------------------------------------------------------------
def InitI2C_Obj(self):
"""
Init all I2C objects when start IOST Wmain program
"""
if self.ConfigData["I2C0"][0] == "Disable":
self.ConfigObjs["IOST_WMain"]["I2C0_Config_Obj"].set_sensitive(False)
if self.ConfigData["I2C1"][0] == "Disable":
self.ConfigObjs["IOST_WMain"]["I2C1_Config_Obj"].set_sensitive(False)
if self.ConfigData["I2C2"][0] == "Disable":
self.ConfigObjs["IOST_WMain"]["I2C2_Config_Obj"].set_sensitive(False)
if self.ConfigData["I2C3"][0] == "Disable":
self.ConfigObjs["IOST_WMain"]["I2C3_Config_Obj"].set_sensitive(False)
if self.ConfigData["I2C4"][0] == "Disable":
self.ConfigObjs["IOST_WMain"]["I2C4_Config_Obj"].set_sensitive(False)
if self.ConfigData["I2C5"][0] == "Disable":
self.ConfigObjs["IOST_WMain"]["I2C5_Config_Obj"].set_sensitive(False)
#----------------------------------------------------------------------
def GetSPI_Obj(self):
"""
"""
self.ConfigObjs["IOST_WMain"]["SPI0_Enable_Obj"] = self.IOST_WMain_Builder.get_object(self.ConfigData["IOST_WMain"]["SPI0_Enable_CName"])
self.ConfigObjs["IOST_WMain"]["SPI0_Config_Obj"] = self.IOST_WMain_Builder.get_object(self.ConfigData["IOST_WMain"]["SPI0_Config_BName"])
self.ConfigObjs["IOST_WMain"]["SPI1_Enable_Obj"] = self.IOST_WMain_Builder.get_object(self.ConfigData["IOST_WMain"]["SPI1_Enable_CName"])
self.ConfigObjs["IOST_WMain"]["SPI1_Config_Obj"] = self.IOST_WMain_Builder.get_object(self.ConfigData["IOST_WMain"]["SPI1_Config_BName"])
self.ConfigObjs["IOST_WMain"]["SPI2_Enable_Obj"] = self.IOST_WMain_Builder.get_object(self.ConfigData["IOST_WMain"]["SPI2_Enable_CName"])
self.ConfigObjs["IOST_WMain"]["SPI2_Config_Obj"] = self.IOST_WMain_Builder.get_object(self.ConfigData["IOST_WMain"]["SPI2_Config_BName"])
#----------------------------------------------------------------------
def InitSPI_Obj(self):
"""
"""
if self.ConfigData["SPI0"][0] == "Disable":
self.ConfigObjs["IOST_WMain"]["SPI0_Config_Obj"].set_sensitive(False)
if self.ConfigData["SPI1"][0] == "Disable":
self.ConfigObjs["IOST_WMain"]["SPI1_Config_Obj"].set_sensitive(False)
if self.ConfigData["SPI2"][0] == "Disable":
self.ConfigObjs["IOST_WMain"]["SPI2_Config_Obj"].set_sensitive(False)
#----------------------------------------------------------------------
def GetUART_Obj(self):
"""
"""
self.ConfigObjs["IOST_WMain"]["UART0_Enable_Obj"] = self.IOST_WMain_Builder.get_object(self.ConfigData["IOST_WMain"]["UART0_Enable_CName"])
self.ConfigObjs["IOST_WMain"]["UART0_Config_Obj"] = self.IOST_WMain_Builder.get_object(self.ConfigData["IOST_WMain"]["UART0_Config_BName"])
self.ConfigObjs["IOST_WMain"]["UART1_Enable_Obj"] = self.IOST_WMain_Builder.get_object(self.ConfigData["IOST_WMain"]["UART1_Enable_CName"])
self.ConfigObjs["IOST_WMain"]["UART1_Config_Obj"] = self.IOST_WMain_Builder.get_object(self.ConfigData["IOST_WMain"]["UART1_Config_BName"])
self.ConfigObjs["IOST_WMain"]["UART2_Enable_Obj"] = self.IOST_WMain_Builder.get_object(self.ConfigData["IOST_WMain"]["UART2_Enable_CName"])
self.ConfigObjs["IOST_WMain"]["UART2_Config_Obj"] = self.IOST_WMain_Builder.get_object(self.ConfigData["IOST_WMain"]["UART2_Config_BName"])
self.ConfigObjs["IOST_WMain"]["UART3_Enable_Obj"] = self.IOST_WMain_Builder.get_object(self.ConfigData["IOST_WMain"]["UART3_Enable_CName"])
self.ConfigObjs["IOST_WMain"]["UART3_Config_Obj"] = self.IOST_WMain_Builder.get_object(self.ConfigData["IOST_WMain"]["UART3_Config_BName"])
self.ConfigObjs["IOST_WMain"]["UART4_Enable_Obj"] = self.IOST_WMain_Builder.get_object(self.ConfigData["IOST_WMain"]["UART4_Enable_CName"])
self.ConfigObjs["IOST_WMain"]["UART4_Config_Obj"] = self.IOST_WMain_Builder.get_object(self.ConfigData["IOST_WMain"]["UART4_Config_BName"])
#----------------------------------------------------------------------
def InitUART_Obj(self):
"""
"""
if self.ConfigData["UART0"][0] == "Disable":
self.ConfigObjs["IOST_WMain"]["UART0_Config_Obj"].set_sensitive(False)
if self.ConfigData["UART1"][0] == "Disable":
self.ConfigObjs["IOST_WMain"]["UART1_Config_Obj"].set_sensitive(False)
if self.ConfigData["UART2"][0] == "Disable":
self.ConfigObjs["IOST_WMain"]["UART2_Config_Obj"].set_sensitive(False)
if self.ConfigData["UART3"][0] == "Disable":
self.ConfigObjs["IOST_WMain"]["UART3_Config_Obj"].set_sensitive(False)
if self.ConfigData["UART4"][0] == "Disable":
self.ConfigObjs["IOST_WMain"]["UART4_Config_Obj"].set_sensitive(False)
#----------------------------------------------------------------------
def GetUSB_Obj(self):
"""
"""
self.ConfigObjs["IOST_WMain"]["USB0_Enable_Obj"] = self.IOST_WMain_Builder.get_object(self.ConfigData["IOST_WMain"]["USB0_Enable_CName"])
self.ConfigObjs["IOST_WMain"]["USB0_Config_Obj"] = self.IOST_WMain_Builder.get_object(self.ConfigData["IOST_WMain"]["USB0_Config_BName"])
self.ConfigObjs["IOST_WMain"]["USB1_Enable_Obj"] = self.IOST_WMain_Builder.get_object(self.ConfigData["IOST_WMain"]["USB1_Enable_CName"])
self.ConfigObjs["IOST_WMain"]["USB1_Config_Obj"] = self.IOST_WMain_Builder.get_object(self.ConfigData["IOST_WMain"]["USB1_Config_BName"])
#----------------------------------------------------------------------
def InitUSB_Obj(self):
"""
"""
if self.ConfigData["USB0"][0] == "Disable":
self.ConfigObjs["IOST_WMain"]["USB0_Config_Obj"].set_sensitive(False)
if self.ConfigData["USB1"][0] == "Disable":
self.ConfigObjs["IOST_WMain"]["USB1_Config_Obj"].set_sensitive(False)
#----------------------------------------------------------------------
def GetSATA_Obj(self):
"""
"""
self.ConfigObjs["IOST_WMain"]["SATA0_Enable_Obj"] = self.IOST_WMain_Builder.get_object(self.ConfigData["IOST_WMain"]["SATA0_Enable_CName"])
self.ConfigObjs["IOST_WMain"]["SATA0_Config_Obj"] = self.IOST_WMain_Builder.get_object(self.ConfigData["IOST_WMain"]["SATA0_Config_BName"])
self.ConfigObjs["IOST_WMain"]["SATA1_Enable_Obj"] = self.IOST_WMain_Builder.get_object(self.ConfigData["IOST_WMain"]["SATA1_Enable_CName"])
self.ConfigObjs["IOST_WMain"]["SATA1_Config_Obj"] = self.IOST_WMain_Builder.get_object(self.ConfigData["IOST_WMain"]["SATA1_Config_BName"])
self.ConfigObjs["IOST_WMain"]["SATA2_Enable_Obj"] = self.IOST_WMain_Builder.get_object(self.ConfigData["IOST_WMain"]["SATA2_Enable_CName"])
self.ConfigObjs["IOST_WMain"]["SATA2_Config_Obj"] = self.IOST_WMain_Builder.get_object(self.ConfigData["IOST_WMain"]["SATA2_Config_BName"])
self.ConfigObjs["IOST_WMain"]["SATA3_Enable_Obj"] = self.IOST_WMain_Builder.get_object(self.ConfigData["IOST_WMain"]["SATA3_Enable_CName"])
self.ConfigObjs["IOST_WMain"]["SATA3_Config_Obj"] = self.IOST_WMain_Builder.get_object(self.ConfigData["IOST_WMain"]["SATA3_Config_BName"])
#----------------------------------------------------------------------
def InitSATA_Obj(self):
"""
"""
if self.ConfigData["SATA0"][0] == "Disable":
self.ConfigObjs["IOST_WMain"]["SATA0_Config_Obj"].set_sensitive(False)
if self.ConfigData["SATA1"][0] == "Disable":
self.ConfigObjs["IOST_WMain"]["SATA1_Config_Obj"].set_sensitive(False)
if self.ConfigData["SATA2"][0] == "Disable":
self.ConfigObjs["IOST_WMain"]["SATA2_Config_Obj"].set_sensitive(False)
if self.ConfigData["SATA3"][0] == "Disable":
self.ConfigObjs["IOST_WMain"]["SATA3_Config_Obj"].set_sensitive(False)
#----------------------------------------------------------------------
def GetPCIE_Obj(self):
"""
"""
self.ConfigObjs["IOST_WMain"]["PCIE0_Enable_Obj"] = self.IOST_WMain_Builder.get_object(self.ConfigData["IOST_WMain"]["PCIE0_Enable_CName"])
self.ConfigObjs["IOST_WMain"]["PCIE0_Config_Obj"] = self.IOST_WMain_Builder.get_object(self.ConfigData["IOST_WMain"]["PCIE0_Config_BName"])
self.ConfigObjs["IOST_WMain"]["PCIE1_Enable_Obj"] = self.IOST_WMain_Builder.get_object(self.ConfigData["IOST_WMain"]["PCIE1_Enable_CName"])
self.ConfigObjs["IOST_WMain"]["PCIE1_Config_Obj"] = self.IOST_WMain_Builder.get_object(self.ConfigData["IOST_WMain"]["PCIE1_Config_BName"])
self.ConfigObjs["IOST_WMain"]["PCIE2_Enable_Obj"] = self.IOST_WMain_Builder.get_object(self.ConfigData["IOST_WMain"]["PCIE2_Enable_CName"])
self.ConfigObjs["IOST_WMain"]["PCIE2_Config_Obj"] = self.IOST_WMain_Builder.get_object(self.ConfigData["IOST_WMain"]["PCIE2_Config_BName"])
self.ConfigObjs["IOST_WMain"]["PCIE3_Enable_Obj"] = self.IOST_WMain_Builder.get_object(self.ConfigData["IOST_WMain"]["PCIE3_Enable_CName"])
self.ConfigObjs["IOST_WMain"]["PCIE3_Config_Obj"] = self.IOST_WMain_Builder.get_object(self.ConfigData["IOST_WMain"]["PCIE3_Config_BName"])
#----------------------------------------------------------------------
def InitPCIE_Obj(self):
"""
"""
if self.ConfigData["PCIE0"][0] == "Disable":
self.ConfigObjs["IOST_WMain"]["PCIE0_Config_Obj"].set_sensitive(False)
if self.ConfigData["PCIE1"][0] == "Disable":
self.ConfigObjs["IOST_WMain"]["PCIE1_Config_Obj"].set_sensitive(False)
if self.ConfigData["PCIE2"][0] == "Disable":
self.ConfigObjs["IOST_WMain"]["PCIE2_Config_Obj"].set_sensitive(False)
if self.ConfigData["PCIE3"][0] == "Disable":
self.ConfigObjs["IOST_WMain"]["PCIE3_Config_Obj"].set_sensitive(False)
#----------------------------------------------------------------------
def GetETH_Obj(self):
"""
"""
self.ConfigObjs["IOST_WMain"]["RGMII0_Enable_Obj"] = self.IOST_WMain_Builder.get_object(self.ConfigData["IOST_WMain"]["RGMII0_Enable_CName"])
self.ConfigObjs["IOST_WMain"]["RGMII0_Config_Obj"] = self.IOST_WMain_Builder.get_object(self.ConfigData["IOST_WMain"]["RGMII0_Config_BName"])
#----------------------------------------------------------------------
def InitETH_Obj(self):
"""
"""
if self.ConfigData["RGMII0"][0] == "Disable":
self.ConfigObjs["IOST_WMain"]["RGMII0_Config_Obj"].set_sensitive(False)
#----------------------------------------------------------------------
def GetAutoMail_Obj(self):
"""
"""
self.ConfigObjs["IOST_WMain"]["AutoMail_Enable_Obj"] = self.IOST_WMain_Builder.get_object(self.ConfigData["IOST_WMain"]["AutoMail_Enable_CName"])
self.ConfigObjs["IOST_WMain"]["AutoMail_Config_Obj"] = self.IOST_WMain_Builder.get_object(self.ConfigData["IOST_WMain"]["AutoMail_Config_BName"])
#----------------------------------------------------------------------
def InitAutoMail_Obj(self):
"""
"""
if self.ConfigData["AutoMail"] == "Disable":
self.ConfigObjs["IOST_WMain"]["AutoMail_Config_Obj"].set_sensitive(False)
#----------------------------------------------------------------------
def GetSaveConfigButton_Obj(self):
"""
"""
#----------------------------------------------------------------------
def GetCancelButton_Obj(self):
"""
"""
#----------------------------------------------------------------------
def GetRunButton_Obj(self):
"""
"""
#----------------------------------------------------------------------
def IOST_WMain_Show(self):
self.ConfigObjs["IOST_WMain"]["WName"].show()
#----------------------------------------------------------------------
def on_IOST_Wmain_MenuHelp_About_activate(self, object, data=None):
IOST_AboutDialog.__init__(self, self.IOST_WMain_GladeFile, self.ConfigData["IOST_WHelpAbout"]["WName"],
self.IOST_WMain_Builder)
IOST_AboutDialog.Run(self)
#----------------------------------------------------------------------
"""
def on_IOST_WMain_destroy(self, object, data=None):
# "Exit with detroy"
# # Basic = IOST_Basic()
# # Response = Basic.IOST_Basic_MsgConfirm('Exit the main window')
# # if Response == gtk.RESPONSE_OK:
# # gtk.main_quit()
# # return False
# # else:
# # return True
pass
"""
#----------------------------------------------------------------------
def IOST_WMain_Confirm_Exit(self, object, event, data=None):
"Display a Diaglog confirm to exit Main Window"
Basic = IOST_Basic()
# IOST_Basic=IOST_Basic()
Response = Basic.MsgConfirm('Close the IOST Main Window')
return Response
#----------------------------------------------------------------------
def on_IOST_WMain_MenuFile_Quit_activate(self, object, data=None):
"Exit with from File -> Exit"
Response = self.IOST_WMain_Confirm_Exit(object, object, data)
if Response == gtk.RESPONSE_OK:
gtk.main_quit()
#----------------------------------------------------------------------
def on_IOST_WMain_delete_event(self, object, event, data=None):
""
Response = self.IOST_WMain_Confirm_Exit(object, event, data)
if Response == gtk.RESPONSE_OK:
gtk.main_quit()
return False
else:
return True
#----------------------------------------------------------------------
def on_IOST_WMain_destroy_event(self, object, data=None):
""
Response = self.IOST_WMain_Confirm_Exit(object, event, data)
if Response == gtk.RESPONSE_OK:
gtk.main_quit()
#----------------------------------------------------------------------
def delete_event(self, widget, data=None):
"Exit by delete even"
pass
# return False
# gtk.main_quit()
#----------------------------------------------------------------------
def on_IOST_Wmain_Config_CTRL_Run_B_clicked(self, object, data=None):
"Control to Run button"
IOST_WRun.__init__(self, self.IOST_WMain_GladeFile,
self.ConfigData["IOST_WRun"]["WName"],
None)
self.ConfigObjs["IOST_WMain"]["WMain_Obj"].hide()
#----------------------------------------------------------------------
# Run Button
#----------------------------------------------------------------------
def on_IOST_Wmain_Config_Save_B_clicked(self, object, data=None):
"Control to Save Condfig button"
# --------------------
# Cancel Button
# --------------------
def on_IOST_Wmain_Config_CTRL_Cancel_B_clicked(self, object, data=None):
"Control to Cancel button"
#----------------------------------------------------------------------
# I2C
#----------------------------------------------------------------------
def on_IOST_WMain_Config_I2C0_B_clicked(self, object, data=None):
"Control to ConfigI2C-0 button "
def on_IOST_WMain_Config_I2C0_C_toggled(self, object, data=None):
# entry.set_editable(checkbutton.get_active())
self.ConfigObjs["IOST_WMain"]["I2C0_Config_Obj"].set_sensitive(self.ConfigObjs["IOST_WMain"]["I2C0_Enable_Obj"].get_active())
#----------------------------------------------------------------------
def on_IOST_WMain_Config_I2C1_B_clicked(self, object, data=None):
"Control to ConfigI2C-1 button "
def on_IOST_WMain_Config_I2C1_C_toggled(self, object, data=None):
# entry.set_editable(checkbutton.get_active())
self.ConfigObjs["IOST_WMain"]["I2C1_Config_Obj"].set_sensitive(self.ConfigObjs["IOST_WMain"]["I2C1_Enable_Obj"].get_active())
#----------------------------------------------------------------------
def on_IOST_WMain_Config_I2C2_B_clicked(self, object, data=None):
"Control to ConfigI2C-2 button "
def on_IOST_WMain_Config_I2C2_C_toggled(self, object, data=None):
# entry.set_editable(checkbutton.get_active())
self.ConfigObjs["IOST_WMain"]["I2C2_Config_Obj"].set_sensitive(self.ConfigObjs["IOST_WMain"]["I2C2_Enable_Obj"].get_active())
#----------------------------------------------------------------------
def on_IOST_WMain_Config_I2C3_B_clicked(self, object, data=None):
"Control to ConfigI2C-3 button "
def on_IOST_WMain_Config_I2C3_C_toggled(self, object, data=None):
# entry.set_editable(checkbutton.get_active())
self.ConfigObjs["IOST_WMain"]["I2C3_Config_Obj"].set_sensitive(self.ConfigObjs["IOST_WMain"]["I2C3_Enable_Obj"].get_active())
#----------------------------------------------------------------------
def on_IOST_WMain_Config_I2C4_B_clicked(self, object, data=None):
"Control to ConfigI2C-4 button "
def on_IOST_WMain_Config_I2C4_C_toggled(self, object, data=None):
# entry.set_editable(checkbutton.get_active())
self.ConfigObjs["IOST_WMain"]["I2C4_Config_Obj"].set_sensitive(self.ConfigObjs["IOST_WMain"]["I2C4_Enable_Obj"].get_active())
#----------------------------------------------------------------------
def on_IOST_WMain_Config_I2C5_B_clicked(self, object, data=None):
"Control to ConfigI2C-5 button "
def on_IOST_WMain_Config_I2C5_C_toggled(self, object, data=None):
# entry.set_editable(checkbutton.get_active())
self.ConfigObjs["IOST_WMain"]["I2C5_Config_Obj"].set_sensitive(self.ConfigObjs["IOST_WMain"]["I2C5_Enable_Obj"].get_active())
#----------------------------------------------------------------------
# USB
#----------------------------------------------------------------------
def on_IOST_WMain_Config_USB0_B_clicked(self, object, data=None):
"Control to ConfigUSB-0 button "
def on_IOST_WMain_Config_USB1_B_clicked(self, object, data=None):
"Control to ConfigUSB-1 button "
def on_IOST_WMain_Config_USB2_B_clicked(self, object, data=None):
"Control to ConfigUSB-2 button "
#----------------------------------------------------------------------
# SATA
#----------------------------------------------------------------------
def on_IOST_WMain_Config_SATA0_B_clicked(self, object, data=None):
"Control to ConfigSATA-0 button "
def on_IOST_WMain_Config_SATA1_B_clicked(self, object, data=None):
"Control to ConfigSATA-1 button "
def on_IOST_WMain_Config_SATA2_B_clicked(self, object, data=None):
"Control to ConfigSATA-2 button "
def on_IOST_WMain_Config_SATA3_B_clicked(self, object, data=None):
"Control to ConfigSATA-3 button "
#----------------------------------------------------------------------
# UART
#----------------------------------------------------------------------
def on_IOST_WMain_Config_UART0_B_clicked(self, object, data=None):
"Control to ConfigUART-0 button "
def on_IOST_WMain_Config_UART1_B_clicked(self, object, data=None):
"Control to ConfigUART-1 button "
def on_IOST_WMain_Config_UART2_B_clicked(self, object, data=None):
"Control to ConfigUART-2 button "
def on_IOST_WMain_Config_UART3_B_clicked(self, object, data=None):
"Control to ConfigUART-3 button "
def on_IOST_WMain_Config_UART4_B_clicked(self, object, data=None):
"Control to ConfigUART-4 button "
#----------------------------------------------------------------------
# SPI
#----------------------------------------------------------------------
def on_IOST_WMain_Config_SPI0_B_clicked(self, object, data=None):
"Control to ConfigSPI-0 button "
def on_IOST_WMain_Config_SPI1_B_clicked(self, object, data=None):
"Control to ConfigSPI-1 button "
def on_IOST_WMain_Config_SPI2_B_clicked(self, object, data=None):
"Control to ConfigSPI-2 button "
#----------------------------------------------------------------------
# PCIE
#----------------------------------------------------------------------
def on_IOST_WMain_Config_PCIE0_B_clicked(self, object, data=None):
"Control to ConfigPCIE-0 button "
def on_IOST_WMain_Config_PCIE1_B_clicked(self, object, data=None):
"Control to ConfigPCIE-1 button "
def on_IOST_WMain_Config_PCIE2_B_clicked(self, object, data=None):
"Control to ConfigPCIE-2 button "
def on_IOST_WMain_Config_PCIE3_B_clicked(self, object, data=None):
"Control to ConfigPCIE-3 button "
def on_IOST_WMain_Config_RGMII0_B_clicked(self, object, data=None):
"Control to Config Rgmii button "
def on_IOST_WMain_Config_Email_B_clicked(self, object, data=None):
"Control to Config SetupEmail button "
#===========================================================================================
def on_IOST_WMain_LoadConfigFile_Select_B_clicked(self, object, data=None):
IOST_LoadConfigFileDialog.__init__(self, self.IOST_WMain_GladeFile,
self.ConfigData["IOST_WLoafConfigFile"]["WName"],
self.IOST_WMain_Builder)
self.WindowRun()
file_path = self.ConfigData["IOST_WLoafConfigFile"]["ConfigFilePath"]
self.ConfigObjs["IOST_WMain"]["ConfigFilePath_Obj"].set_text(file_path)
# def on_button_LoadConfigFile_OK_clicked(self, object, data=None):
# "Get file name and exit dialog window"
# self.IOST_LoadConfigFile_Obj.ConfigFile = self.IOST_LoadConfigFile_Obj.get_filename()
# self.IOST_LoadConfigFile_Obj.entry_PathConfigFile.set_text(self.IOST_LoadConfigFile_Obj.ConfigFile)
# self.IOST_LoadConfigFile_Obj.hide()
# print "The file name choise is: %s " %self.IOST_LoadConfigFile_Obj.ConfigFile
# def on_button_LoadConfigFile_Cancel_clicked(self, object, data=None):
# self.IOST_LoadConfigFile_Obj.hide()
#======================================================================
| |
import json
from datetime import timedelta
import arrow
import boto3
from moto import mock_sns, mock_sqs, mock_ses
from lemur.notifications import service
from lemur.certificates.schemas import certificate_notification_output_schema
from lemur.plugins.lemur_aws.sns import format_message
from lemur.plugins.lemur_aws.sns import publish
from lemur.tests.factories import NotificationFactory, CertificateFactory
from lemur.tests.test_messaging import verify_sender_email
@mock_sns()
def test_format_nonexpiration(certificate, endpoint):
data = [certificate_notification_output_schema.dump(certificate).data]
for certificate in data:
expected_message = {
"notification_type": "not-expiration",
"certificate_name": certificate["name"],
"expires": arrow.get(certificate["validityEnd"]).format("YYYY-MM-DDTHH:mm:ss"),
"issuer": certificate["issuer"],
"id": certificate["id"],
"endpoints_detected": 0,
"owner": certificate["owner"],
"details": "https://lemur.example.com/#/certificates/{name}".format(name=certificate["name"])
}
# We don't currently support any SNS notifications besides expiration;
# when we do, this test will probably need to be refactored.
# For now, this is a placeholder proving empty options works as long as it's not "expiration" type
assert expected_message == json.loads(format_message(certificate, "not-expiration", None))
@mock_sns()
def test_format_expiration(certificate, endpoint):
data = [certificate_notification_output_schema.dump(certificate).data]
options = get_options()
for certificate in data:
expected_message = {
"notification_type": "expiration",
"certificate_name": certificate["name"],
"expires": arrow.get(certificate["validityEnd"]).format("YYYY-MM-DDTHH:mm:ss"),
"issuer": certificate["issuer"],
"id": certificate["id"],
"endpoints_detected": 0,
"owner": certificate["owner"],
"details": "https://lemur.example.com/#/certificates/{name}".format(name=certificate["name"]),
"notification_interval_days": 10 # 10 days specified in options
}
assert expected_message == json.loads(format_message(certificate, "expiration", options))
@mock_sns()
@mock_sqs()
def create_and_subscribe_to_topic():
sns_client = boto3.client("sns", region_name="us-east-1")
topic_arn = sns_client.create_topic(Name='lemursnstest')["TopicArn"]
sqs_client = boto3.client("sqs", region_name="us-east-1")
queue = sqs_client.create_queue(QueueName="lemursnstestqueue")
queue_url = queue["QueueUrl"]
queue_arn = sqs_client.get_queue_attributes(
QueueUrl=queue_url, AttributeNames=["All"])["Attributes"]["QueueArn"]
sns_client.subscribe(TopicArn=topic_arn, Protocol="sqs", Endpoint=queue_arn)
return [topic_arn, sqs_client, queue_url]
@mock_sns()
@mock_sqs()
def test_publish(certificate, endpoint):
data = [certificate_notification_output_schema.dump(certificate).data]
topic_arn, sqs_client, queue_url = create_and_subscribe_to_topic()
message_ids = publish(topic_arn, data, "expiration", get_options(), region_name="us-east-1")
assert len(message_ids) == len(data)
received_messages = sqs_client.receive_message(QueueUrl=queue_url)["Messages"]
for certificate in data:
expected_message_id = message_ids[certificate["name"]]
actual_message = next(
(m for m in received_messages if json.loads(m["Body"])["MessageId"] == expected_message_id), None)
actual_json = json.loads(actual_message["Body"])
assert actual_json["Message"] == format_message(certificate, "expiration", get_options())
assert actual_json["Subject"] == "Lemur: Expiration Notification"
def get_options():
return [
{"name": "interval", "value": 10},
{"name": "unit", "value": "days"},
{"name": "region", "value": "us-east-1"},
{"name": "accountNumber", "value": "123456789012"},
{"name": "topicName", "value": "lemursnstest"},
]
@mock_sns()
@mock_sqs()
@mock_ses() # because email notifications are also sent
def test_send_expiration_notification():
from lemur.notifications.messaging import send_expiration_notifications
verify_sender_email() # emails are sent to owner and security; SNS only used for configured notification
topic_arn, sqs_client, queue_url = create_and_subscribe_to_topic()
notification = NotificationFactory(plugin_name="aws-sns")
notification.options = get_options()
now = arrow.utcnow()
in_ten_days = now + timedelta(days=10, hours=1) # a bit more than 10 days since we'll check in the future
certificate = CertificateFactory()
certificate.not_after = in_ten_days
certificate.notifications.append(notification)
assert send_expiration_notifications([], []) == (3, 0) # owner, SNS, and security
received_messages = sqs_client.receive_message(QueueUrl=queue_url)["Messages"]
assert len(received_messages) == 1
expected_message = format_message(certificate_notification_output_schema.dump(certificate).data, "expiration",
notification.options)
actual_message = json.loads(received_messages[0]["Body"])["Message"]
assert actual_message == expected_message
@mock_sns()
@mock_sqs()
@mock_ses()
def test_send_expiration_notification_sns_disabled():
from lemur.notifications.messaging import send_expiration_notifications
topic_arn, sqs_client, queue_url = create_and_subscribe_to_topic()
prepare_test()
# though email is not disabled, we don't send the owner/security notifications via email if
# the main notification's plugin is disabled
assert send_expiration_notifications([], ['aws-sns']) == (0, 0)
received_messages = sqs_client.receive_message(QueueUrl=queue_url)
assert "Messages" not in received_messages
@mock_sns()
@mock_sqs()
@mock_ses()
def test_send_expiration_notification_email_disabled():
from lemur.notifications.messaging import send_expiration_notifications
topic_arn, sqs_client, queue_url = create_and_subscribe_to_topic()
notification, certificate = prepare_test()
assert send_expiration_notifications([], ['email-notification']) == (1, 0) # SNS only
received_messages = sqs_client.receive_message(QueueUrl=queue_url)["Messages"]
assert len(received_messages) == 1
expected_message = format_message(certificate_notification_output_schema.dump(certificate).data, "expiration",
notification.options)
actual_message = json.loads(received_messages[0]["Body"])["Message"]
assert actual_message == expected_message
@mock_sns()
@mock_sqs()
@mock_ses()
def test_send_expiration_notification_both_disabled():
from lemur.notifications.messaging import send_expiration_notifications
topic_arn, sqs_client, queue_url = create_and_subscribe_to_topic()
prepare_test()
assert send_expiration_notifications([], ['aws-sns', 'email-notification']) == (0, 0)
received_messages = sqs_client.receive_message(QueueUrl=queue_url)
assert "Messages" not in received_messages
def prepare_test():
verify_sender_email() # emails are sent to owner and security; SNS only used for configured notification
# set all existing notifications to disabled so we don't have multiple conflicting in the tests
for prior_notification in service.get_all():
service.update(prior_notification.id, prior_notification.label, prior_notification.plugin_name,
prior_notification.options, prior_notification.description, False, [], [])
notification = NotificationFactory(plugin_name="aws-sns")
notification.options = get_options()
now = arrow.utcnow()
in_ten_days = now + timedelta(days=10, hours=1) # a bit more than 10 days since we'll check in the future
certificate = CertificateFactory()
certificate.not_after = in_ten_days
certificate.notifications.append(notification)
return notification, certificate
# Currently disabled as the SNS plugin doesn't support this type of notification
# def test_send_rotation_notification(endpoint, source_plugin):
# from lemur.notifications.messaging import send_rotation_notification
# from lemur.deployment.service import rotate_certificate
#
# notification = NotificationFactory(plugin_name="aws-sns")
# notification.options = get_options()
#
# new_certificate = CertificateFactory()
# rotate_certificate(endpoint, new_certificate)
# assert endpoint.certificate == new_certificate
#
# assert send_rotation_notification(new_certificate)
# Currently disabled as the SNS plugin doesn't support this type of notification
# def test_send_pending_failure_notification(user, pending_certificate, async_issuer_plugin):
# from lemur.notifications.messaging import send_pending_failure_notification
#
# assert send_pending_failure_notification(pending_certificate)
| |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
import tvm.testing
from tvm import te
import numpy as np
from tvm.contrib import utils
def test_add():
nn = 1024
n = tvm.runtime.convert(nn)
A = te.placeholder((n,), name="A")
B = te.placeholder((n,), name="B")
C = te.compute(A.shape, lambda *i: A(*i) + B(*i), name="C")
s = te.create_schedule(C.op)
def check_c():
mhost = tvm.build(s, [A, B, C], "c", name="test_fadd")
temp = utils.tempdir()
path_dso = temp.relpath("temp.so")
mhost.export_library(path_dso)
m = tvm.runtime.load_module(path_dso)
fadd = m["test_fadd"]
dev = tvm.cpu(0)
# launch the kernel.
n = nn
a = tvm.nd.array(np.random.uniform(size=n).astype(A.dtype), dev)
b = tvm.nd.array(np.random.uniform(size=n).astype(B.dtype), dev)
c = tvm.nd.array(np.zeros(n, dtype=C.dtype), dev)
fadd(a, b, c)
tvm.testing.assert_allclose(c.numpy(), a.numpy() + b.numpy())
check_c()
def test_add_pipeline():
nn = 1024
n = tvm.runtime.convert(nn)
A = te.placeholder((n,), name="A")
B = te.placeholder((n,), name="B")
AA = te.compute((n,), lambda *i: A(*i), name="A")
BB = te.compute((n,), lambda *i: B(*i), name="B")
T = te.compute(A.shape, lambda *i: AA(*i) + BB(*i), name="T")
C = te.compute(A.shape, lambda *i: T(*i), name="C")
s = te.create_schedule(C.op)
xo, xi = s[C].split(C.op.axis[0], factor=4)
xo1, xo2 = s[C].split(xo, factor=13)
s[C].parallel(xo2)
s[C].pragma(xo1, "parallel_launch_point")
s[C].pragma(xo2, "parallel_stride_pattern")
s[C].pragma(xo2, "parallel_barrier_when_finish")
s[C].vectorize(xi)
def check_c():
# Specifically allow offset to test codepath when offset is available
Ab = tvm.tir.decl_buffer(
A.shape, A.dtype, elem_offset=te.size_var("Aoffset"), offset_factor=8, name="A"
)
binds = {A: Ab}
# BUILD and invoke the kernel.
f1 = tvm.lower(s, [A, B, C], name="test_fadd_pipeline")
mhost = tvm.build(f1, target="c")
temp = utils.tempdir()
path_dso = temp.relpath("temp.so")
mhost.export_library(path_dso)
m = tvm.runtime.load_module(path_dso)
fadd = m["test_fadd_pipeline"]
dev = tvm.cpu(0)
# launch the kernel.
n = nn
a = tvm.nd.array(np.random.uniform(size=n).astype(A.dtype), dev)
b = tvm.nd.array(np.random.uniform(size=n).astype(B.dtype), dev)
c = tvm.nd.array(np.zeros(n, dtype=C.dtype), dev)
fadd(a, b, c)
tvm.testing.assert_allclose(c.numpy(), a.numpy() + b.numpy())
check_c()
def test_reinterpret():
nn = 1024
n = tvm.runtime.convert(nn)
A = te.placeholder((n,), name="A", dtype="int32")
B = te.compute(
A.shape, lambda *i: tvm.tir.call_intrin("float32", "tir.reinterpret", 2 + A(*i)), name="B"
)
s = te.create_schedule(B.op)
def check_c():
mhost = tvm.build(s, [A, B], "c", name="test_reinterpret")
temp = utils.tempdir()
path_dso = temp.relpath("temp.so")
mhost.export_library(path_dso)
m = tvm.runtime.load_module(path_dso)
fadd = m["test_reinterpret"]
dev = tvm.cpu(0)
n = nn
a = tvm.nd.array(np.random.randint(-(2 ** 30), 2 ** 30, size=n).astype(A.dtype), dev)
b = tvm.nd.array(np.zeros(n, dtype=B.dtype), dev)
fadd(a, b)
tvm.testing.assert_allclose(b.numpy(), (2 + a.numpy()).view("float32"))
check_c()
def test_ceil():
nn = 1024
n = tvm.runtime.convert(nn)
A = te.placeholder((n,), name="A", dtype="float32")
B = te.compute(A.shape, lambda *i: tvm.tir.call_intrin("float32", "tir.ceil", A(*i)), name="B")
s = te.create_schedule(B.op)
def check_c():
mhost = tvm.build(s, [A, B], "c", name="test_ceil")
temp = utils.tempdir()
path_dso = temp.relpath("temp.so")
mhost.export_library(path_dso)
m = tvm.runtime.load_module(path_dso)
fceil = m["test_ceil"]
dev = tvm.cpu(0)
n = nn
a = tvm.nd.array(np.random.rand(n).astype(A.dtype), dev)
b = tvm.nd.array(np.zeros(n, dtype=B.dtype), dev)
fceil(a, b)
tvm.testing.assert_allclose(b.numpy(), (np.ceil(a.numpy()).view("float32")))
check_c()
def test_floor():
nn = 1024
n = tvm.runtime.convert(nn)
A = te.placeholder((n,), name="A", dtype="float32")
B = te.compute(A.shape, lambda *i: tvm.tir.call_intrin("float32", "tir.floor", A(*i)), name="B")
s = te.create_schedule(B.op)
def check_c():
mhost = tvm.build(s, [A, B], "c", name="test_floor")
temp = utils.tempdir()
path_dso = temp.relpath("temp.so")
mhost.export_library(path_dso)
m = tvm.runtime.load_module(path_dso)
ffloor = m["test_floor"]
dev = tvm.cpu(0)
n = nn
a = tvm.nd.array(np.random.rand(n).astype(A.dtype), dev)
b = tvm.nd.array(np.zeros(n, dtype=B.dtype), dev)
ffloor(a, b)
tvm.testing.assert_allclose(b.numpy(), (np.floor(a.numpy()).view("float32")))
check_c()
def test_round():
nn = 1024
n = tvm.runtime.convert(nn)
A = te.placeholder((n,), name="A", dtype="float32")
B = te.compute(A.shape, lambda *i: tvm.tir.call_intrin("float32", "tir.round", A(*i)), name="B")
s = te.create_schedule(B.op)
def check_c():
mhost = tvm.build(s, [A, B], "c", name="test_round")
temp = utils.tempdir()
path_dso = temp.relpath("temp.so")
mhost.export_library(path_dso)
m = tvm.runtime.load_module(path_dso)
fround = m["test_round"]
dev = tvm.cpu(0)
n = nn
a = tvm.nd.array(np.random.rand(n).astype(A.dtype), dev)
b = tvm.nd.array(np.zeros(n, dtype=B.dtype), dev)
fround(a, b)
tvm.testing.assert_allclose(b.numpy(), (np.round(a.numpy()).view("float32")))
check_c()
def test_call_packed():
def fake_func(fname="fake.func"):
ib = tvm.tir.ir_builder.create()
A = ib.pointer("float32", name="A")
fake_func1 = tvm.tir.call_packed(fname, A[0])
ib.emit(fake_func1)
body = ib.get()
return A, body
def check_global_packed_func():
fname = "fake.func"
A, body = fake_func(fname)
func1 = tvm.tir.PrimFunc([A], body).with_attr("global_symbol", "func1")
B, body = fake_func()
func2 = tvm.tir.PrimFunc([B], body).with_attr("global_symbol", "func2")
mod = tvm.IRModule({"fake_func1": func1, "fake_func2": func2})
fcode = tvm.build(mod, None, "c")
src = fcode.get_source()
# there are two locations calling the packed func
assert src.count(fname) == 2
suffix = "_packed"
packed_func_name = fname + suffix
# func name will be standardized by GetUniqueName and not exists anymore
assert src.find(packed_func_name) == -1
packed_func_real_name = "_".join(fname.split(".")) + suffix
func_declaration = "static void* %s = NULL;" % packed_func_real_name
# src only has 1 valid declaration
assert src.count(func_declaration) == 1
check_global_packed_func()
if __name__ == "__main__":
test_add()
test_add_pipeline()
test_reinterpret()
test_ceil()
test_floor()
test_round()
test_call_packed()
| |
from __future__ import absolute_import, print_function, division
import math
import os
import sys
import traceback
import urwid
from typing import Optional, Union # noqa
from mitmproxy import contentviews
from mitmproxy import controller
from mitmproxy import exceptions
from mitmproxy import models
from mitmproxy import utils
from mitmproxy.console import common
from mitmproxy.console import flowdetailview
from mitmproxy.console import grideditor
from mitmproxy.console import searchable
from mitmproxy.console import signals
from mitmproxy.console import tabs
from mitmproxy.flow import export
from netlib.http import Headers
from netlib.http import status_codes
class SearchError(Exception):
pass
def _mkhelp():
text = []
keys = [
("A", "accept all intercepted flows"),
("a", "accept this intercepted flow"),
("b", "save request/response body"),
("C", "export flow to clipboard"),
("D", "duplicate flow"),
("d", "delete flow"),
("e", "edit request/response"),
("f", "load full body data"),
("m", "change body display mode for this entity\n(default mode can be changed in the options)"),
(None,
common.highlight_key("automatic", "a") +
[("text", ": automatic detection")]
),
(None,
common.highlight_key("hex", "e") +
[("text", ": Hex")]
),
(None,
common.highlight_key("html", "h") +
[("text", ": HTML")]
),
(None,
common.highlight_key("image", "i") +
[("text", ": Image")]
),
(None,
common.highlight_key("javascript", "j") +
[("text", ": JavaScript")]
),
(None,
common.highlight_key("json", "s") +
[("text", ": JSON")]
),
(None,
common.highlight_key("urlencoded", "u") +
[("text", ": URL-encoded data")]
),
(None,
common.highlight_key("raw", "r") +
[("text", ": raw data")]
),
(None,
common.highlight_key("xml", "x") +
[("text", ": XML")]
),
("E", "export flow to file"),
("r", "replay request"),
("V", "revert changes to request"),
("v", "view body in external viewer"),
("w", "save all flows matching current view filter"),
("W", "save this flow"),
("x", "delete body"),
("z", "encode/decode a request/response"),
("tab", "next tab"),
("h, l", "previous tab, next tab"),
("space", "next flow"),
("|", "run script on this flow"),
("/", "search (case sensitive)"),
("n", "repeat search forward"),
("N", "repeat search backwards"),
]
text.extend(common.format_keyvals(keys, key="key", val="text", indent=4))
return text
help_context = _mkhelp()
footer = [
('heading_key', "?"), ":help ",
('heading_key', "q"), ":back ",
]
class FlowViewHeader(urwid.WidgetWrap):
def __init__(self, master, f):
self.master = master # type: "mitmproxy.console.master.ConsoleMaster"
self.flow = f # type: models.HTTPFlow
self._w = common.format_flow(
f,
False,
extended=True,
hostheader=self.master.options.showhost
)
signals.flow_change.connect(self.sig_flow_change)
def sig_flow_change(self, sender, flow):
if flow == self.flow:
self._w = common.format_flow(
flow,
False,
extended=True,
hostheader=self.master.options.showhost
)
cache = utils.LRUCache(200)
TAB_REQ = 0
TAB_RESP = 1
class FlowView(tabs.Tabs):
highlight_color = "focusfield"
def __init__(self, master, state, flow, tab_offset):
self.master, self.state, self.flow = master, state, flow
super(FlowView, self).__init__(
[
(self.tab_request, self.view_request),
(self.tab_response, self.view_response),
(self.tab_details, self.view_details),
],
tab_offset
)
self.show()
self.last_displayed_body = None
signals.flow_change.connect(self.sig_flow_change)
def tab_request(self):
if self.flow.intercepted and not self.flow.reply.acked and not self.flow.response:
return "Request intercepted"
else:
return "Request"
def tab_response(self):
if self.flow.intercepted and not self.flow.reply.acked and self.flow.response:
return "Response intercepted"
else:
return "Response"
def tab_details(self):
return "Detail"
def view_request(self):
return self.conn_text(self.flow.request)
def view_response(self):
return self.conn_text(self.flow.response)
def view_details(self):
return flowdetailview.flowdetails(self.state, self.flow)
def sig_flow_change(self, sender, flow):
if flow == self.flow:
self.show()
def content_view(self, viewmode, message):
if message.raw_content is None:
msg, body = "", [urwid.Text([("error", "[content missing]")])]
return msg, body
else:
full = self.state.get_flow_setting(
self.flow,
(self.tab_offset, "fullcontents"),
False
)
if full:
limit = sys.maxsize
else:
limit = contentviews.VIEW_CUTOFF
flow_modify_cache_invalidation = hash((
message.raw_content,
message.headers.fields,
getattr(message, "path", None),
))
return cache.get(
# We move message into this partial function as it is not hashable.
lambda *args: self._get_content_view(message, *args),
viewmode,
limit,
flow_modify_cache_invalidation
)
def _get_content_view(self, message, viewmode, max_lines, _):
try:
content = message.content
if content != message.raw_content:
enc = "[decoded {}]".format(
message.headers.get("content-encoding")
)
else:
enc = None
except ValueError:
content = message.raw_content
enc = "[cannot decode]"
try:
query = None
if isinstance(message, models.HTTPRequest):
query = message.query
description, lines = contentviews.get_content_view(
viewmode, content, headers=message.headers, query=query
)
except exceptions.ContentViewException:
s = "Content viewer failed: \n" + traceback.format_exc()
signals.add_log(s, "error")
description, lines = contentviews.get_content_view(
contentviews.get("Raw"), content, headers=message.headers
)
description = description.replace("Raw", "Couldn't parse: falling back to Raw")
if enc:
description = " ".join([enc, description])
# Give hint that you have to tab for the response.
if description == "No content" and isinstance(message, models.HTTPRequest):
description = "No request content (press tab to view response)"
# If the users has a wide terminal, he gets fewer lines; this should not be an issue.
chars_per_line = 80
max_chars = max_lines * chars_per_line
total_chars = 0
text_objects = []
for line in lines:
txt = []
for (style, text) in line:
if total_chars + len(text) > max_chars:
text = text[:max_chars - total_chars]
txt.append((style, text))
total_chars += len(text)
if total_chars == max_chars:
break
# round up to the next line.
total_chars = int(math.ceil(total_chars / chars_per_line) * chars_per_line)
text_objects.append(urwid.Text(txt))
if total_chars == max_chars:
text_objects.append(urwid.Text([
("highlight", "Stopped displaying data after %d lines. Press " % max_lines),
("key", "f"),
("highlight", " to load all data.")
]))
break
return description, text_objects
def viewmode_get(self):
override = self.state.get_flow_setting(
self.flow,
(self.tab_offset, "prettyview")
)
return self.state.default_body_view if override is None else override
def conn_text(self, conn):
if conn:
txt = common.format_keyvals(
[(h + ":", v) for (h, v) in conn.headers.items(multi=True)],
key = "header",
val = "text"
)
viewmode = self.viewmode_get()
msg, body = self.content_view(viewmode, conn)
cols = [
urwid.Text(
[
("heading", msg),
]
),
urwid.Text(
[
" ",
('heading', "["),
('heading_key', "m"),
('heading', (":%s]" % viewmode.name)),
],
align="right"
)
]
title = urwid.AttrWrap(urwid.Columns(cols), "heading")
txt.append(title)
txt.extend(body)
else:
txt = [
urwid.Text(""),
urwid.Text(
[
("highlight", "No response. Press "),
("key", "e"),
("highlight", " and edit any aspect to add one."),
]
)
]
return searchable.Searchable(self.state, txt)
def set_method_raw(self, m):
if m:
self.flow.request.method = m
signals.flow_change.send(self, flow = self.flow)
def edit_method(self, m):
if m == "e":
signals.status_prompt.send(
prompt = "Method",
text = self.flow.request.method,
callback = self.set_method_raw
)
else:
for i in common.METHOD_OPTIONS:
if i[1] == m:
self.flow.request.method = i[0].upper()
signals.flow_change.send(self, flow = self.flow)
def set_url(self, url):
request = self.flow.request
try:
request.url = str(url)
except ValueError:
return "Invalid URL."
signals.flow_change.send(self, flow = self.flow)
def set_resp_status_code(self, status_code):
try:
status_code = int(status_code)
except ValueError:
return None
self.flow.response.status_code = status_code
if status_code in status_codes.RESPONSES:
self.flow.response.reason = status_codes.RESPONSES[status_code]
signals.flow_change.send(self, flow = self.flow)
def set_resp_reason(self, reason):
self.flow.response.reason = reason
signals.flow_change.send(self, flow = self.flow)
def set_headers(self, fields, conn):
conn.headers = Headers(fields)
signals.flow_change.send(self, flow = self.flow)
def set_query(self, lst, conn):
conn.query = lst
signals.flow_change.send(self, flow = self.flow)
def set_path_components(self, lst, conn):
conn.path_components = lst
signals.flow_change.send(self, flow = self.flow)
def set_form(self, lst, conn):
conn.urlencoded_form = lst
signals.flow_change.send(self, flow = self.flow)
def edit_form(self, conn):
self.master.view_grideditor(
grideditor.URLEncodedFormEditor(
self.master,
conn.urlencoded_form.items(multi=True),
self.set_form,
conn
)
)
def edit_form_confirm(self, key, conn):
if key == "y":
self.edit_form(conn)
def set_cookies(self, lst, conn):
conn.cookies = lst
signals.flow_change.send(self, flow = self.flow)
def set_setcookies(self, data, conn):
conn.cookies = data
signals.flow_change.send(self, flow = self.flow)
def edit(self, part):
if self.tab_offset == TAB_REQ:
message = self.flow.request
else:
if not self.flow.response:
self.flow.response = models.HTTPResponse(
self.flow.request.http_version,
200, b"OK", Headers(), b""
)
self.flow.response.reply = controller.DummyReply()
message = self.flow.response
self.flow.backup()
if message == self.flow.request and part == "c":
self.master.view_grideditor(
grideditor.CookieEditor(
self.master,
message.cookies.items(multi=True),
self.set_cookies,
message
)
)
if message == self.flow.response and part == "c":
self.master.view_grideditor(
grideditor.SetCookieEditor(
self.master,
message.cookies.items(multi=True),
self.set_setcookies,
message
)
)
if part == "r":
# Fix an issue caused by some editors when editing a
# request/response body. Many editors make it hard to save a
# file without a terminating newline on the last line. When
# editing message bodies, this can cause problems. For now, I
# just strip the newlines off the end of the body when we return
# from an editor.
c = self.master.spawn_editor(message.get_content(strict=False) or b"")
message.content = c.rstrip(b"\n")
elif part == "f":
if not message.urlencoded_form and message.raw_content:
signals.status_prompt_onekey.send(
prompt = "Existing body is not a URL-encoded form. Clear and edit?",
keys = [
("yes", "y"),
("no", "n"),
],
callback = self.edit_form_confirm,
args = (message,)
)
else:
self.edit_form(message)
elif part == "h":
self.master.view_grideditor(
grideditor.HeaderEditor(
self.master,
message.headers.fields,
self.set_headers,
message
)
)
elif part == "p":
p = message.path_components
self.master.view_grideditor(
grideditor.PathEditor(
self.master,
p,
self.set_path_components,
message
)
)
elif part == "q":
self.master.view_grideditor(
grideditor.QueryEditor(
self.master,
message.query.items(multi=True),
self.set_query, message
)
)
elif part == "u":
signals.status_prompt.send(
prompt = "URL",
text = message.url,
callback = self.set_url
)
elif part == "m" and message == self.flow.request:
signals.status_prompt_onekey.send(
prompt = "Method",
keys = common.METHOD_OPTIONS,
callback = self.edit_method
)
elif part == "o":
signals.status_prompt.send(
prompt = "Code",
text = str(message.status_code),
callback = self.set_resp_status_code
)
elif part == "m" and message == self.flow.response:
signals.status_prompt.send(
prompt = "Message",
text = message.reason,
callback = self.set_resp_reason
)
signals.flow_change.send(self, flow = self.flow)
def _view_nextprev_flow(self, np, flow):
try:
idx = self.state.view.index(flow)
except IndexError:
return
if np == "next":
new_flow, new_idx = self.state.get_next(idx)
else:
new_flow, new_idx = self.state.get_prev(idx)
if new_flow is None:
signals.status_message.send(message="No more flows!")
else:
signals.pop_view_state.send(self)
self.master.view_flow(new_flow, self.tab_offset)
def view_next_flow(self, flow):
return self._view_nextprev_flow("next", flow)
def view_prev_flow(self, flow):
return self._view_nextprev_flow("prev", flow)
def change_this_display_mode(self, t):
self.state.add_flow_setting(
self.flow,
(self.tab_offset, "prettyview"),
contentviews.get_by_shortcut(t)
)
signals.flow_change.send(self, flow = self.flow)
def keypress(self, size, key):
conn = None # type: Optional[Union[models.HTTPRequest, models.HTTPResponse]]
if self.tab_offset == TAB_REQ:
conn = self.flow.request
elif self.tab_offset == TAB_RESP:
conn = self.flow.response
key = super(self.__class__, self).keypress(size, key)
# Special case: Space moves over to the next flow.
# We need to catch that before applying common.shortcuts()
if key == " ":
self.view_next_flow(self.flow)
return
key = common.shortcuts(key)
if key in ("up", "down", "page up", "page down"):
# Pass scroll events to the wrapped widget
self._w.keypress(size, key)
elif key == "a":
self.flow.accept_intercept(self.master)
signals.flow_change.send(self, flow = self.flow)
elif key == "A":
self.master.accept_all()
signals.flow_change.send(self, flow = self.flow)
elif key == "d":
if self.state.flow_count() == 1:
self.master.view_flowlist()
elif self.state.view.index(self.flow) == len(self.state.view) - 1:
self.view_prev_flow(self.flow)
else:
self.view_next_flow(self.flow)
f = self.flow
if not f.reply.acked:
f.kill(self.master)
self.state.delete_flow(f)
elif key == "D":
f = self.master.duplicate_flow(self.flow)
signals.pop_view_state.send(self)
self.master.view_flow(f)
signals.status_message.send(message="Duplicated.")
elif key == "p":
self.view_prev_flow(self.flow)
elif key == "r":
r = self.master.replay_request(self.flow)
if r:
signals.status_message.send(message=r)
signals.flow_change.send(self, flow = self.flow)
elif key == "V":
if self.flow.modified():
self.state.revert(self.flow)
signals.flow_change.send(self, flow = self.flow)
signals.status_message.send(message="Reverted.")
else:
signals.status_message.send(message="Flow not modified.")
elif key == "W":
signals.status_prompt_path.send(
prompt = "Save this flow",
callback = self.master.save_one_flow,
args = (self.flow,)
)
elif key == "|":
signals.status_prompt_path.send(
prompt = "Send flow to script",
callback = self.master.run_script_once,
args = (self.flow,)
)
elif key == "e":
if self.tab_offset == TAB_REQ:
signals.status_prompt_onekey.send(
prompt="Edit request",
keys=(
("cookies", "c"),
("query", "q"),
("path", "p"),
("url", "u"),
("header", "h"),
("form", "f"),
("raw body", "r"),
("method", "m"),
),
callback=self.edit
)
elif self.tab_offset == TAB_RESP:
signals.status_prompt_onekey.send(
prompt="Edit response",
keys=(
("cookies", "c"),
("code", "o"),
("message", "m"),
("header", "h"),
("raw body", "r"),
),
callback=self.edit
)
else:
signals.status_message.send(
message="Tab to the request or response",
expire=1
)
elif key in set("bfgmxvzEC") and not conn:
signals.status_message.send(
message = "Tab to the request or response",
expire = 1
)
return
elif key == "b":
if self.tab_offset == TAB_REQ:
common.ask_save_body("q", self.flow)
else:
common.ask_save_body("s", self.flow)
elif key == "f":
signals.status_message.send(message="Loading all body data...")
self.state.add_flow_setting(
self.flow,
(self.tab_offset, "fullcontents"),
True
)
signals.flow_change.send(self, flow = self.flow)
signals.status_message.send(message="")
elif key == "m":
p = list(contentviews.view_prompts)
p.insert(0, ("Clear", "C"))
signals.status_prompt_onekey.send(
self,
prompt = "Display mode",
keys = p,
callback = self.change_this_display_mode
)
elif key == "E":
if self.tab_offset == TAB_REQ:
scope = "q"
else:
scope = "s"
signals.status_prompt_onekey.send(
self,
prompt = "Export to file",
keys = [(e[0], e[1]) for e in export.EXPORTERS],
callback = common.export_to_clip_or_file,
args = (scope, self.flow, common.ask_save_path)
)
elif key == "C":
if self.tab_offset == TAB_REQ:
scope = "q"
else:
scope = "s"
signals.status_prompt_onekey.send(
self,
prompt = "Export to clipboard",
keys = [(e[0], e[1]) for e in export.EXPORTERS],
callback = common.export_to_clip_or_file,
args = (scope, self.flow, common.copy_to_clipboard_or_prompt)
)
elif key == "x":
conn.content = None
signals.flow_change.send(self, flow=self.flow)
elif key == "v":
if conn.raw_content:
t = conn.headers.get("content-type")
if "EDITOR" in os.environ or "PAGER" in os.environ:
self.master.spawn_external_viewer(conn.get_content(strict=False), t)
else:
signals.status_message.send(
message = "Error! Set $EDITOR or $PAGER."
)
elif key == "z":
self.flow.backup()
e = conn.headers.get("content-encoding", "identity")
if e != "identity":
try:
conn.decode()
except ValueError:
signals.status_message.send(
message = "Could not decode - invalid data?"
)
else:
signals.status_prompt_onekey.send(
prompt = "Select encoding: ",
keys = (
("gzip", "z"),
("deflate", "d"),
("brotli", "b"),
),
callback = self.encode_callback,
args = (conn,)
)
signals.flow_change.send(self, flow = self.flow)
else:
# Key is not handled here.
return key
def encode_callback(self, key, conn):
encoding_map = {
"z": "gzip",
"d": "deflate",
"b": "brotli",
}
conn.encode(encoding_map[key])
signals.flow_change.send(self, flow = self.flow)
| |
from flask import current_app
from flask.ext.login import UserMixin, AnonymousUserMixin
from werkzeug.security import generate_password_hash, check_password_hash
from itsdangerous import (
TimedJSONWebSignatureSerializer as Serializer,
BadSignature,
SignatureExpired,
)
from .. import db, login_manager
from . import Agency
class Permission:
GENERAL = 0x01
AGENCY_WORKER = 0x10
ADMINISTER = 0xff
class Role(db.Model):
__tablename__ = 'roles'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64), unique=True)
index = db.Column(db.String(64))
# True if user is assigned this role by default
default = db.Column(db.Boolean, default=False, index=True)
permissions = db.Column(db.Integer)
users = db.relationship('User', backref='role', lazy='dynamic')
@staticmethod
def insert_roles():
roles = {
'User': (
Permission.GENERAL, 'main', True
),
'AgencyWorker': (
Permission.AGENCY_WORKER, 'main', False
),
'Administrator': (
Permission.ADMINISTER, 'admin', False # grants all permissions
)
}
for r in roles:
role = Role.query.filter_by(name=r).first()
if role is None:
role = Role(name=r)
role.permissions = roles[r][0]
role.index = roles[r][1]
role.default = roles[r][2]
db.session.add(role)
db.session.commit()
def __repr__(self):
return '<Role \'%s\'>' % self.name
class User(UserMixin, db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
confirmed = db.Column(db.Boolean, default=False)
first_name = db.Column(db.String(64), index=True)
last_name = db.Column(db.String(64), index=True)
email = db.Column(db.String(64), unique=True, index=True)
phone_number = db.Column(db.String(16), unique=True, index=True)
password_hash = db.Column(db.String(128))
role_id = db.Column(db.Integer, db.ForeignKey('roles.id'))
incident_reports = db.relationship('IncidentReport',
backref='user',
lazy='select')
# also related to agencies via the agency_user_table
def __init__(self, **kwargs):
super(User, self).__init__(**kwargs)
if self.role is None:
if self.email == current_app.config['ADMIN_EMAIL']:
self.role = Role.query.filter_by(
permissions=Permission.ADMINISTER).first()
if self.role is None:
self.role = Role.query.filter_by(default=True).first()
def full_name(self):
return '%s %s' % (self.first_name, self.last_name)
def can(self, permissions):
return self.role is not None and \
(self.role.permissions & permissions) == permissions
def is_admin(self):
return self.role.permissions == Permission.ADMINISTER
def is_worker(self):
return self.role.permissions == Permission.AGENCY_WORKER
def is_agency_worker(self):
return self.can(Permission.AGENCY_WORKER)
def is_general_user(self):
return self.can(Permission.GENERAL)
@property
def password(self):
raise AttributeError('`password` is not a readable attribute')
@password.setter
def password(self, password):
self.password_hash = generate_password_hash(password)
def verify_password(self, password):
return check_password_hash(self.password_hash, password)
def generate_confirmation_token(self, expiration=604800):
"""Generate a confirmation token to email a new user."""
s = Serializer(current_app.config['SECRET_KEY'], expiration)
return s.dumps({'confirm': self.id})
def generate_email_change_token(self, new_email, expiration=3600):
"""Generate an email change token to email an existing user."""
s = Serializer(current_app.config['SECRET_KEY'], expiration)
return s.dumps({'change_email': self.id, 'new_email': new_email})
def generate_password_reset_token(self, expiration=3600):
"""
Generate a password reset change token to email to an existing user.
"""
s = Serializer(current_app.config['SECRET_KEY'], expiration)
return s.dumps({'reset': self.id})
def confirm_account(self, token):
"""Verify that the provided token is for this user's id."""
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except (BadSignature, SignatureExpired):
return False
if data.get('confirm') != self.id:
return False
self.confirmed = True
db.session.add(self)
db.session.commit()
return True
def change_email(self, token):
"""Verify the new email for this user."""
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except (BadSignature, SignatureExpired):
return False
if data.get('change_email') != self.id:
return False
new_email = data.get('new_email')
if new_email is None:
return False
if self.query.filter_by(email=new_email).first() is not None:
return False
self.email = new_email
db.session.add(self)
db.session.commit()
return True
def reset_password(self, token, new_password):
"""Verify the new password for this user."""
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except (BadSignature, SignatureExpired):
return False
if data.get('reset') != self.id:
return False
self.password = new_password
db.session.add(self)
db.session.commit()
return True
@staticmethod
def generate_fake(count=10, **kwargs):
"""Generate a number of fake users for testing."""
from sqlalchemy.exc import IntegrityError
from random import seed, choice, sample, randint
from faker import Faker
fake = Faker()
roles = Role.query.all()
agencies = Agency.query.all()
seed()
for i in range(count):
u = User(
first_name=fake.first_name(),
last_name=fake.last_name(),
phone_number='+1{}'.format(''.join([str(randint(0, 9))
for _ in range(0, 10)])),
email=fake.email(),
password=fake.password(),
confirmed=True,
role=choice(roles),
**kwargs
)
if u.role.name == 'AgencyWorker':
u.agencies = sample(agencies, randint(1, len(agencies)))
db.session.add(u)
try:
db.session.commit()
except IntegrityError:
db.session.rollback()
def __repr__(self):
return '<User \'%s\'>' % self.full_name()
class AnonymousUser(AnonymousUserMixin):
def can(self, _):
return False
def is_admin(self):
return False
login_manager.anonymous_user = AnonymousUser
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
| |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Benchmark for Conv2D op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import time
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python.client import session as session_lib
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import flags
from tensorflow.python.platform import test
FLAGS = flags.FLAGS
flags.DEFINE_boolean(
"enable_layout_optimizer", False,
"If true, enables layout optimizer to update input data format for faster "
"execution of convolution ops.")
def build_graph(device, dtype, data_format, input_shape, filter_shape, strides,
padding, num_iters, warmup_iters):
"""builds a graph containing a sequence of conv2d operations.
Args:
device: String, the device to run on.
dtype: Data type for the convolution.
data_format: A string from: "NHWC" or "NCHW". Data format for input and
output data.
input_shape: Shape of the input tensor.
filter_shape: Shape of the filter tensor.
strides: A list of ints. 1-D of length 4. The stride of sliding
window for each dimension of input.
padding: A string from: "SAME", "VALID". The type of padding
algorithm to use.
num_iters: number of iterations to run conv2d.
warmup_iters: number of iterations for warmup runs.
Returns:
An array of tensors to run()
"""
with ops.device("/%s:0" % device):
inp = variables.Variable(
random_ops.truncated_normal(input_shape, dtype=dtype))
filt = variables.Variable(
random_ops.truncated_normal(filter_shape, dtype=dtype))
outputs = []
conv2d_op = nn_ops.conv2d(
inp, filt, strides, padding, data_format=data_format)
outputs.append(conv2d_op)
for _ in range(1, num_iters):
with ops.control_dependencies([conv2d_op]):
conv2d_op = nn_ops.conv2d(
inp, filt, strides, padding, data_format=data_format)
outputs.append(conv2d_op)
warmup_groups = []
warmup_conv2d_op = nn_ops.conv2d(
inp, filt, strides, padding, data_format=data_format)
warmup_groups.append(warmup_conv2d_op)
for _ in range(1, warmup_iters):
with ops.control_dependencies([warmup_conv2d_op]):
warmup_conv2d_op = nn_ops.conv2d(
inp, filt, strides, padding, data_format=data_format)
warmup_groups.append(warmup_conv2d_op)
return control_flow_ops.group(*warmup_groups), control_flow_ops.group(
*outputs)
class Conv2DBenchmark(test.Benchmark):
"""Benchmark conv2d!"""
def _run_graph(self, device, dtype, data_format, input_shape, filter_shape,
strides, padding, num_iters, warmup_iters):
"""runs the graph and print its execution time.
Args:
device: String, the device to run on.
dtype: Data type for the convolution.
data_format: A string from: "NHWC" or "NCHW". Data format for input and
output data.
input_shape: Shape of the input tensor.
filter_shape: Shape of the filter tensor.
strides: A list of ints. 1-D of length 4. The stride of sliding
window for each dimension of input.
padding: A string from: "SAME", "VALID". The type of padding
algorithm to use. num_iters: Number of iterations to run the
benchmark.
num_iters: number of iterations to run conv2d.
warmup_iters: number of iterations for warmup runs.
Returns:
The duration of the run in seconds.
"""
graph = ops.Graph()
with graph.as_default():
warmup_outputs, outputs = build_graph(device, dtype, data_format,
input_shape, filter_shape, strides,
padding, num_iters, warmup_iters)
config = config_pb2.ConfigProto()
config.graph_options.optimizer_options.opt_level = -1
rewrite_options = config.graph_options.rewrite_options
# Disable layout optimizer to not change input data_format.
rewrite_options.layout_optimizer = (
rewriter_config_pb2.RewriterConfig.ON if FLAGS.enable_layout_optimizer
else rewriter_config_pb2.RewriterConfig.OFF)
# Convolution ops are effectively noop in the test graph as we are not
# fetching the convolution outputs. Disable dependency optimizer to not
# remove the conv ops.
rewrite_options.dependency_optimization = (
rewriter_config_pb2.RewriterConfig.OFF)
with session_lib.Session(graph=graph, config=config) as session:
# TODO(hinsu): Use run_op_benchmark method from test.Benchmark to run
# benchmark along with warmup.
variables.global_variables_initializer().run()
# warmup runs
session.run(warmup_outputs)
start_time = time.time()
session.run(outputs)
duration = (time.time() - start_time) / num_iters
print("%s %s %s inputshape:%s filtershape:%s strides:%s padding:%s "
"%d iters: %.8f sec" %
(device, str(dtype), data_format, str(input_shape).replace(
" ", ""), str(filter_shape).replace(" ", ""),
str(strides).replace(" ", ""), padding, num_iters, duration))
name_template = (
"conv2d_{device}_{datatype}_{data_format}_input_shape_{inputshape}_"
"filter_shape_{filtershape}_strides_{strides}_padding_{padding}")
self.report_benchmark(
name=name_template.format(
device=device,
datatype=str(dtype),
data_format=str(data_format),
inputshape=str(input_shape).replace(" ", ""),
filtershape=str(filter_shape).replace(" ", ""),
strides=str(strides).replace(" ", ""),
padding=padding).replace(" ", ""),
iters=num_iters,
wall_time=duration)
return duration
def benchmark_conv2d(self):
print("conv2d benchmark:")
data_types = [dtypes.float32, dtypes.float16]
data_formats = ["NHWC", "NCHW"]
in_channels = list(range(3, 16))
out_channels = [4, 16, 32]
hw_strides = [[2, 2]]
paddings = ["VALID", "SAME"]
args_lists = [
data_types, data_formats, in_channels, out_channels, hw_strides,
paddings
]
for args in itertools.product(*args_lists):
dtype, data_format, in_channel, out_channel, hw_stride, padding = args
# Keep batch size same as out channels just to reduce the number of
# different configurations to benchmark.
batch_size = out_channel
h, w, fh, fw = 500, 500, 3, 3
if data_format == "NHWC":
ishape = [batch_size, h, w, in_channel]
stride = [1] + hw_stride + [1]
elif data_format == "NCHW":
ishape = [batch_size, in_channel, h, w]
stride = [1, 1] + hw_stride
else:
raise ValueError("Unknown data_format: " + str(data_format))
fshape = [fh, fw, in_channel, out_channel]
num_iters = 80
warmup_iters = 2
self._run_graph("gpu", dtype, data_format, ishape, fshape, stride,
padding, num_iters, warmup_iters)
if __name__ == "__main__":
test.main()
| |
# -*- Mode: Python -*-
# $Id: filesys.py,v 1.9 2003/12/24 16:10:56 akuchling Exp $
# Author: Sam Rushing <rushing@nightmare.com>
#
# Generic filesystem interface.
#
# We want to provide a complete wrapper around any and all
# filesystem operations.
# this class is really just for documentation,
# identifying the API for a filesystem object.
# opening files for reading, and listing directories, should
# return a producer.
class abstract_filesystem:
def __init__ (self):
pass
def current_directory (self):
"Return a string representing the current directory."
pass
def listdir (self, path, long=0):
"""Return a listing of the directory at 'path' The empty string
indicates the current directory. If 'long' is set, instead
return a list of (name, stat_info) tuples
"""
pass
def open (self, path, mode):
"Return an open file object"
pass
def stat (self, path):
"Return the equivalent of os.stat() on the given path."
pass
def isdir (self, path):
"Does the path represent a directory?"
pass
def isfile (self, path):
"Does the path represent a plain file?"
pass
def cwd (self, path):
"Change the working directory."
pass
def cdup (self):
"Change to the parent of the current directory."
pass
def longify (self, path):
"""Return a 'long' representation of the filename
[for the output of the LIST command]"""
pass
# standard wrapper around a unix-like filesystem, with a 'false root'
# capability.
# security considerations: can symbolic links be used to 'escape' the
# root? should we allow it? if not, then we could scan the
# filesystem on startup, but that would not help if they were added
# later. We will probably need to check for symlinks in the cwd method.
# what to do if wd is an invalid directory?
import os
import stat
import re
import string
def safe_stat (path):
try:
return (path, os.stat (path))
except:
return None
import glob
class os_filesystem:
path_module = os.path
# set this to zero if you want to disable pathname globbing.
# [we currently don't glob, anyway]
do_globbing = 1
def __init__ (self, root, wd='/'):
self.root = root
self.wd = wd
def current_directory (self):
return self.wd
def isfile (self, path):
p = self.normalize (self.path_module.join (self.wd, path))
return self.path_module.isfile (self.translate(p))
def isdir (self, path):
p = self.normalize (self.path_module.join (self.wd, path))
return self.path_module.isdir (self.translate(p))
def cwd (self, path):
p = self.normalize (self.path_module.join (self.wd, path))
translated_path = self.translate(p)
if not self.path_module.isdir (translated_path):
return 0
else:
old_dir = os.getcwd()
# temporarily change to that directory, in order
# to see if we have permission to do so.
try:
can = 0
try:
os.chdir (translated_path)
can = 1
self.wd = p
except:
pass
finally:
if can:
os.chdir (old_dir)
return can
def cdup (self):
return self.cwd ('..')
def listdir (self, path, long=0):
p = self.translate (path)
# I think we should glob, but limit it to the current
# directory only.
ld = os.listdir (p)
if not long:
return list_producer (ld, None)
else:
old_dir = os.getcwd()
try:
os.chdir (p)
# if os.stat fails we ignore that file.
result = filter (None, map (safe_stat, ld))
finally:
os.chdir (old_dir)
return list_producer (result, self.longify)
# TODO: implement a cache w/timeout for stat()
def stat (self, path):
p = self.translate (path)
return os.stat (p)
def open (self, path, mode):
p = self.translate (path)
return open (p, mode)
def unlink (self, path):
p = self.translate (path)
return os.unlink (p)
def mkdir (self, path):
p = self.translate (path)
return os.mkdir (p)
def rmdir (self, path):
p = self.translate (path)
return os.rmdir (p)
def rename(self, src, dst):
return os.rename(self.translate(src),self.translate(dst))
# utility methods
def normalize (self, path):
# watch for the ever-sneaky '/+' path element
path = re.sub('/+', '/', path)
p = self.path_module.normpath (path)
# remove 'dangling' cdup's.
if len(p) > 2 and p[:3] == '/..':
p = '/'
return p
def translate (self, path):
# we need to join together three separate
# path components, and do it safely.
# <real_root>/<current_directory>/<path>
# use the operating system's path separator.
path = string.join (string.split (path, '/'), os.sep)
p = self.normalize (self.path_module.join (self.wd, path))
p = self.normalize (self.path_module.join (self.root, p[1:]))
return p
def longify (self, (path, stat_info)):
return unix_longify (path, stat_info)
def __repr__ (self):
return '<unix-style fs root:%s wd:%s>' % (
self.root,
self.wd
)
if os.name == 'posix':
class unix_filesystem (os_filesystem):
pass
class schizophrenic_unix_filesystem (os_filesystem):
PROCESS_UID = os.getuid()
PROCESS_EUID = os.geteuid()
PROCESS_GID = os.getgid()
PROCESS_EGID = os.getegid()
def __init__ (self, root, wd='/', persona=(None, None)):
os_filesystem.__init__ (self, root, wd)
self.persona = persona
def become_persona (self):
if self.persona is not (None, None):
uid, gid = self.persona
# the order of these is important!
os.setegid (gid)
os.seteuid (uid)
def become_nobody (self):
if self.persona is not (None, None):
os.seteuid (self.PROCESS_UID)
os.setegid (self.PROCESS_GID)
# cwd, cdup, open, listdir
def cwd (self, path):
try:
self.become_persona()
return os_filesystem.cwd (self, path)
finally:
self.become_nobody()
def cdup (self, path):
try:
self.become_persona()
return os_filesystem.cdup (self)
finally:
self.become_nobody()
def open (self, filename, mode):
try:
self.become_persona()
return os_filesystem.open (self, filename, mode)
finally:
self.become_nobody()
def listdir (self, path, long=0):
try:
self.become_persona()
return os_filesystem.listdir (self, path, long)
finally:
self.become_nobody()
# For the 'real' root, we could obtain a list of drives, and then
# use that. Doesn't win32 provide such a 'real' filesystem?
# [yes, I think something like this "\\.\c\windows"]
class msdos_filesystem (os_filesystem):
def longify (self, (path, stat_info)):
return msdos_longify (path, stat_info)
# A merged filesystem will let you plug other filesystems together.
# We really need the equivalent of a 'mount' capability - this seems
# to be the most general idea. So you'd use a 'mount' method to place
# another filesystem somewhere in the hierarchy.
# Note: this is most likely how I will handle ~user directories
# with the http server.
class merged_filesystem:
def __init__ (self, *fsys):
pass
# this matches the output of NT's ftp server (when in
# MSDOS mode) exactly.
def msdos_longify (file, stat_info):
if stat.S_ISDIR (stat_info[stat.ST_MODE]):
dir = '<DIR>'
else:
dir = ' '
date = msdos_date (stat_info[stat.ST_MTIME])
return '%s %s %8d %s' % (
date,
dir,
stat_info[stat.ST_SIZE],
file
)
def msdos_date (t):
try:
info = time.gmtime (t)
except:
info = time.gmtime (0)
# year, month, day, hour, minute, second, ...
hour = info[3]
if hour > 11:
merid = 'PM'
hour = hour - 12
else:
merid = 'AM'
return '%02d-%02d-%02d %02d:%02d%s' % (
info[1],
info[2],
info[0]%100,
hour,
info[4],
merid
)
months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
mode_table = {
'0':'---',
'1':'--x',
'2':'-w-',
'3':'-wx',
'4':'r--',
'5':'r-x',
'6':'rw-',
'7':'rwx'
}
import time
def unix_longify (file, stat_info):
# for now, only pay attention to the lower bits
mode = ('%o' % stat_info[stat.ST_MODE])[-3:]
mode = string.join (map (lambda x: mode_table[x], mode), '')
if stat.S_ISDIR (stat_info[stat.ST_MODE]):
dirchar = 'd'
else:
dirchar = '-'
date = ls_date (long(time.time()), stat_info[stat.ST_MTIME])
return '%s%s %3d %-8d %-8d %8d %s %s' % (
dirchar,
mode,
stat_info[stat.ST_NLINK],
stat_info[stat.ST_UID],
stat_info[stat.ST_GID],
stat_info[stat.ST_SIZE],
date,
file
)
# Emulate the unix 'ls' command's date field.
# it has two formats - if the date is more than 180
# days in the past, then it's like this:
# Oct 19 1995
# otherwise, it looks like this:
# Oct 19 17:33
def ls_date (now, t):
try:
info = time.gmtime (t)
except:
info = time.gmtime (0)
# 15,600,000 == 86,400 * 180
if (now - t) > 15600000:
return '%s %2d %d' % (
months[info[1]-1],
info[2],
info[0]
)
else:
return '%s %2d %02d:%02d' % (
months[info[1]-1],
info[2],
info[3],
info[4]
)
# ===========================================================================
# Producers
# ===========================================================================
class list_producer:
def __init__ (self, list, func=None):
self.list = list
self.func = func
# this should do a pushd/popd
def more (self):
if not self.list:
return ''
else:
# do a few at a time
bunch = self.list[:50]
if self.func is not None:
bunch = map (self.func, bunch)
self.list = self.list[50:]
return string.joinfields (bunch, '\r\n') + '\r\n'
| |
# Copyright 2014 DreamHost, LLC
#
# Author: DreamHost, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import re
import netaddr
from oslo.config import cfg
from akanda.rug.openstack.common import jsonutils
LOG = logging.getLogger(__name__)
DEFAULT_AS = 64512
OPTIONS = [
cfg.StrOpt('provider_rules_path'),
cfg.IntOpt('asn', default=DEFAULT_AS),
cfg.IntOpt('neighbor_asn', default=DEFAULT_AS),
]
cfg.CONF.register_opts(OPTIONS)
EXTERNAL_NET = 'external'
INTERNAL_NET = 'internal'
MANAGEMENT_NET = 'management'
SERVICE_STATIC = 'static'
SERVICE_DHCP = 'dhcp'
SERVICE_RA = 'ra'
def build_config(client, router, interfaces):
provider_rules = load_provider_rules(cfg.CONF.provider_rules_path)
networks = generate_network_config(client, router, interfaces)
gateway = get_default_v4_gateway(client, router, networks)
return {
'asn': cfg.CONF.asn,
'neighbor_asn': cfg.CONF.neighbor_asn,
'default_v4_gateway': gateway,
'networks': networks,
'labels': provider_rules.get('labels', {}),
'floating_ips': generate_floating_config(router),
'tenant_id': router.tenant_id,
'hostname': 'ak-%s' % router.tenant_id
}
def get_default_v4_gateway(client, router, networks):
"""Find the IPv4 default gateway for the router.
"""
LOG.debug('networks = %r', networks)
LOG.debug('external interface = %s', router.external_port.mac_address)
# Now find the subnet that our external IP is on, and return its
# gateway.
for n in networks:
if n['network_type'] == EXTERNAL_NET:
v4_addresses = [
addr
for addr in (netaddr.IPAddress(ip.partition('/')[0])
for ip in n['interface']['addresses'])
if addr.version == 4
]
for s in n['subnets']:
subnet = netaddr.IPNetwork(s['cidr'])
if subnet.version != 4:
continue
LOG.debug(
'%s: checking if subnet %s should have the default route',
router.id, s['cidr'])
for addr in v4_addresses:
if addr in subnet:
LOG.debug(
'%s: found gateway %s for subnet %s on network %s',
router.id,
s['gateway_ip'],
s['cidr'],
n['network_id'],
)
return s['gateway_ip']
# Sometimes we are asked to build a configuration for the server
# when the external interface is still marked as "down". We can
# report that case, but we don't treat it as an error here because
# we'll be asked to do it again when the interface comes up.
LOG.info('%s: no default gateway was found', router.id)
return ''
def load_provider_rules(path):
try:
return jsonutils.load(open(path))
except: # pragma nocover
LOG.exception('unable to open provider rules: %s' % path)
def generate_network_config(client, router, interfaces):
iface_map = dict((i['lladdr'], i['ifname']) for i in interfaces)
retval = [
_network_config(
client,
router.external_port,
iface_map[router.external_port.mac_address],
EXTERNAL_NET),
_management_network_config(
router.management_port,
iface_map[router.management_port.mac_address],
interfaces,
)]
retval.extend(
_network_config(
client,
p,
iface_map[p.mac_address],
INTERNAL_NET,
client.get_network_ports(p.network_id))
for p in router.internal_ports)
return retval
def _management_network_config(port, ifname, interfaces):
for iface in interfaces:
if iface['ifname'] == ifname:
return _make_network_config_dict(
iface, MANAGEMENT_NET, port.network_id)
def _network_config(client, port, ifname, network_type, network_ports=[]):
subnets = client.get_network_subnets(port.network_id)
subnets_dict = dict((s.id, s) for s in subnets)
return _make_network_config_dict(
_interface_config(ifname, port, subnets_dict),
network_type,
port.network_id,
subnets_dict=subnets_dict,
network_ports=network_ports)
def _make_network_config_dict(interface, network_type, network_id,
v4_conf=SERVICE_STATIC, v6_conf=SERVICE_STATIC,
subnets_dict={}, network_ports=[]):
return {'interface': interface,
'network_id': network_id,
'v4_conf_service': v4_conf,
'v6_conf_service': v6_conf,
'network_type': network_type,
'subnets': [_subnet_config(s) for s in subnets_dict.values()],
'allocations': _allocation_config(network_ports, subnets_dict)}
def _interface_config(ifname, port, subnets_dict):
def fmt(fixed):
return '%s/%s' % (fixed.ip_address,
subnets_dict[fixed.subnet_id].cidr.prefixlen)
return {'ifname': ifname,
'addresses': [fmt(fixed) for fixed in port.fixed_ips]}
def _subnet_config(subnet):
return {
'cidr': str(subnet.cidr),
'dhcp_enabled': subnet.enable_dhcp and subnet.ipv6_ra_mode != 'slaac',
'dns_nameservers': subnet.dns_nameservers,
'host_routes': subnet.host_routes,
'gateway_ip': (str(subnet.gateway_ip)
if subnet.gateway_ip is not None
else ''),
}
def _allocation_config(ports, subnets_dict):
r = re.compile('[:.]')
allocations = []
for port in ports:
addrs = {
str(fixed.ip_address): subnets_dict[fixed.subnet_id].enable_dhcp
for fixed in port.fixed_ips
}
if not addrs:
continue
allocations.append(
{
'ip_addresses': addrs,
'device_id': port.device_id,
'hostname': '%s.local' % r.sub('-', sorted(addrs.keys())[0]),
'mac_address': port.mac_address
}
)
return allocations
def generate_floating_config(router):
return [
{'floating_ip': str(fip.floating_ip), 'fixed_ip': str(fip.fixed_ip)}
for fip in router.floating_ips
]
| |
# orm/state.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Defines instrumentation of instances.
This module is usually not directly visible to user applications, but
defines a large part of the ORM's interactivity.
"""
import weakref
from .. import util
from . import exc as orm_exc, interfaces
from .path_registry import PathRegistry
from .base import PASSIVE_NO_RESULT, SQL_OK, NEVER_SET, ATTR_WAS_SET, \
NO_VALUE, PASSIVE_NO_INITIALIZE, INIT_OK, PASSIVE_OFF
from . import base
class InstanceState(interfaces._InspectionAttr):
"""tracks state information at the instance level."""
session_id = None
key = None
runid = None
load_options = util.EMPTY_SET
load_path = ()
insert_order = None
_strong_obj = None
modified = False
expired = False
deleted = False
_load_pending = False
is_instance = True
def __init__(self, obj, manager):
self.class_ = obj.__class__
self.manager = manager
self.obj = weakref.ref(obj, self._cleanup)
self.callables = {}
self.committed_state = {}
@util.memoized_property
def attrs(self):
"""Return a namespace representing each attribute on
the mapped object, including its current value
and history.
The returned object is an instance of :class:`.AttributeState`.
"""
return util.ImmutableProperties(
dict(
(key, AttributeState(self, key))
for key in self.manager
)
)
@property
def transient(self):
"""Return true if the object is transient."""
return self.key is None and \
not self._attached
@property
def pending(self):
"""Return true if the object is pending."""
return self.key is None and \
self._attached
@property
def persistent(self):
"""Return true if the object is persistent."""
return self.key is not None and \
self._attached
@property
def detached(self):
"""Return true if the object is detached."""
return self.key is not None and \
not self._attached
@property
@util.dependencies("sqlalchemy.orm.session")
def _attached(self, sessionlib):
return self.session_id is not None and \
self.session_id in sessionlib._sessions
@property
@util.dependencies("sqlalchemy.orm.session")
def session(self, sessionlib):
"""Return the owning :class:`.Session` for this instance,
or ``None`` if none available."""
return sessionlib._state_session(self)
@property
def object(self):
"""Return the mapped object represented by this
:class:`.InstanceState`."""
return self.obj()
@property
def identity(self):
"""Return the mapped identity of the mapped object.
This is the primary key identity as persisted by the ORM
which can always be passed directly to
:meth:`.Query.get`.
Returns ``None`` if the object has no primary key identity.
.. note::
An object which is transient or pending
does **not** have a mapped identity until it is flushed,
even if its attributes include primary key values.
"""
if self.key is None:
return None
else:
return self.key[1]
@property
def identity_key(self):
"""Return the identity key for the mapped object.
This is the key used to locate the object within
the :attr:`.Session.identity_map` mapping. It contains
the identity as returned by :attr:`.identity` within it.
"""
# TODO: just change .key to .identity_key across
# the board ? probably
return self.key
@util.memoized_property
def parents(self):
return {}
@util.memoized_property
def _pending_mutations(self):
return {}
@util.memoized_property
def mapper(self):
"""Return the :class:`.Mapper` used for this mapepd object."""
return self.manager.mapper
@property
def has_identity(self):
"""Return ``True`` if this object has an identity key.
This should always have the same value as the
expression ``state.persistent or state.detached``.
"""
return bool(self.key)
def _detach(self):
self.session_id = self._strong_obj = None
def _dispose(self):
self._detach()
del self.obj
def _cleanup(self, ref):
instance_dict = self._instance_dict()
if instance_dict:
instance_dict.discard(self)
self.callables = {}
self.session_id = self._strong_obj = None
del self.obj
def obj(self):
return None
@property
def dict(self):
o = self.obj()
if o is not None:
return base.instance_dict(o)
else:
return {}
def _initialize_instance(*mixed, **kwargs):
self, instance, args = mixed[0], mixed[1], mixed[2:]
manager = self.manager
manager.dispatch.init(self, args, kwargs)
try:
return manager.original_init(*mixed[1:], **kwargs)
except:
manager.dispatch.init_failure(self, args, kwargs)
raise
def get_history(self, key, passive):
return self.manager[key].impl.get_history(self, self.dict, passive)
def get_impl(self, key):
return self.manager[key].impl
def _get_pending_mutation(self, key):
if key not in self._pending_mutations:
self._pending_mutations[key] = PendingCollection()
return self._pending_mutations[key]
def __getstate__(self):
state_dict = {'instance': self.obj()}
state_dict.update(
(k, self.__dict__[k]) for k in (
'committed_state', '_pending_mutations', 'modified', 'expired',
'callables', 'key', 'parents', 'load_options',
'class_',
) if k in self.__dict__
)
if self.load_path:
state_dict['load_path'] = self.load_path.serialize()
state_dict['manager'] = self.manager._serialize(self, state_dict)
return state_dict
def __setstate__(self, state_dict):
inst = state_dict['instance']
if inst is not None:
self.obj = weakref.ref(inst, self._cleanup)
self.class_ = inst.__class__
else:
# None being possible here generally new as of 0.7.4
# due to storage of state in "parents". "class_"
# also new.
self.obj = None
self.class_ = state_dict['class_']
self.committed_state = state_dict.get('committed_state', {})
self._pending_mutations = state_dict.get('_pending_mutations', {})
self.parents = state_dict.get('parents', {})
self.modified = state_dict.get('modified', False)
self.expired = state_dict.get('expired', False)
self.callables = state_dict.get('callables', {})
self.__dict__.update([
(k, state_dict[k]) for k in (
'key', 'load_options',
) if k in state_dict
])
if 'load_path' in state_dict:
self.load_path = PathRegistry.\
deserialize(state_dict['load_path'])
state_dict['manager'](self, inst, state_dict)
def _initialize(self, key):
"""Set this attribute to an empty value or collection,
based on the AttributeImpl in use."""
self.manager.get_impl(key).initialize(self, self.dict)
def _reset(self, dict_, key):
"""Remove the given attribute and any
callables associated with it."""
old = dict_.pop(key, None)
if old is not None and self.manager[key].impl.collection:
self.manager[key].impl._invalidate_collection(old)
self.callables.pop(key, None)
def _expire_attribute_pre_commit(self, dict_, key):
"""a fast expire that can be called by column loaders during a load.
The additional bookkeeping is finished up in commit_all().
Should only be called for scalar attributes.
This method is actually called a lot with joined-table
loading, when the second table isn't present in the result.
"""
dict_.pop(key, None)
self.callables[key] = self
@classmethod
def _row_processor(cls, manager, fn, key):
impl = manager[key].impl
if impl.collection:
def _set_callable(state, dict_, row):
old = dict_.pop(key, None)
if old is not None:
impl._invalidate_collection(old)
state.callables[key] = fn
else:
def _set_callable(state, dict_, row):
state.callables[key] = fn
return _set_callable
def _expire(self, dict_, modified_set):
self.expired = True
if self.modified:
modified_set.discard(self)
self.modified = False
self._strong_obj = None
self.committed_state.clear()
InstanceState._pending_mutations._reset(self)
# clear out 'parents' collection. not
# entirely clear how we can best determine
# which to remove, or not.
InstanceState.parents._reset(self)
for key in self.manager:
impl = self.manager[key].impl
if impl.accepts_scalar_loader and \
(impl.expire_missing or key in dict_):
self.callables[key] = self
old = dict_.pop(key, None)
if impl.collection and old is not None:
impl._invalidate_collection(old)
self.manager.dispatch.expire(self, None)
def _expire_attributes(self, dict_, attribute_names):
pending = self.__dict__.get('_pending_mutations', None)
for key in attribute_names:
impl = self.manager[key].impl
if impl.accepts_scalar_loader:
self.callables[key] = self
old = dict_.pop(key, None)
if impl.collection and old is not None:
impl._invalidate_collection(old)
self.committed_state.pop(key, None)
if pending:
pending.pop(key, None)
self.manager.dispatch.expire(self, attribute_names)
def __call__(self, state, passive):
"""__call__ allows the InstanceState to act as a deferred
callable for loading expired attributes, which is also
serializable (picklable).
"""
if not passive & SQL_OK:
return PASSIVE_NO_RESULT
toload = self.expired_attributes.\
intersection(self.unmodified)
self.manager.deferred_scalar_loader(self, toload)
# if the loader failed, or this
# instance state didn't have an identity,
# the attributes still might be in the callables
# dict. ensure they are removed.
for k in toload.intersection(self.callables):
del self.callables[k]
return ATTR_WAS_SET
@property
def unmodified(self):
"""Return the set of keys which have no uncommitted changes"""
return set(self.manager).difference(self.committed_state)
def unmodified_intersection(self, keys):
"""Return self.unmodified.intersection(keys)."""
return set(keys).intersection(self.manager).\
difference(self.committed_state)
@property
def unloaded(self):
"""Return the set of keys which do not have a loaded value.
This includes expired attributes and any other attribute that
was never populated or modified.
"""
return set(self.manager).\
difference(self.committed_state).\
difference(self.dict)
@property
def _unloaded_non_object(self):
return self.unloaded.intersection(
attr for attr in self.manager
if self.manager[attr].impl.accepts_scalar_loader
)
@property
def expired_attributes(self):
"""Return the set of keys which are 'expired' to be loaded by
the manager's deferred scalar loader, assuming no pending
changes.
see also the ``unmodified`` collection which is intersected
against this set when a refresh operation occurs.
"""
return set([k for k, v in self.callables.items() if v is self])
def _instance_dict(self):
return None
def _modified_event(self, dict_, attr, previous, collection=False):
if not attr.send_modified_events:
return
if attr.key not in self.committed_state:
if collection:
if previous is NEVER_SET:
if attr.key in dict_:
previous = dict_[attr.key]
if previous not in (None, NO_VALUE, NEVER_SET):
previous = attr.copy(previous)
self.committed_state[attr.key] = previous
# assert self._strong_obj is None or self.modified
if (self.session_id and self._strong_obj is None) \
or not self.modified:
instance_dict = self._instance_dict()
if instance_dict:
instance_dict._modified.add(self)
# only create _strong_obj link if attached
# to a session
inst = self.obj()
if self.session_id:
self._strong_obj = inst
if inst is None:
raise orm_exc.ObjectDereferencedError(
"Can't emit change event for attribute '%s' - "
"parent object of type %s has been garbage "
"collected."
% (
self.manager[attr.key],
base.state_class_str(self)
))
self.modified = True
def _commit(self, dict_, keys):
"""Commit attributes.
This is used by a partial-attribute load operation to mark committed
those attributes which were refreshed from the database.
Attributes marked as "expired" can potentially remain "expired" after
this step if a value was not populated in state.dict.
"""
for key in keys:
self.committed_state.pop(key, None)
self.expired = False
for key in set(self.callables).\
intersection(keys).\
intersection(dict_):
del self.callables[key]
def _commit_all(self, dict_, instance_dict=None):
"""commit all attributes unconditionally.
This is used after a flush() or a full load/refresh
to remove all pending state from the instance.
- all attributes are marked as "committed"
- the "strong dirty reference" is removed
- the "modified" flag is set to False
- any "expired" markers/callables for attributes loaded are removed.
Attributes marked as "expired" can potentially remain
"expired" after this step if a value was not populated in state.dict.
"""
self._commit_all_states([(self, dict_)], instance_dict)
@classmethod
def _commit_all_states(self, iter, instance_dict=None):
"""Mass version of commit_all()."""
for state, dict_ in iter:
state.committed_state.clear()
InstanceState._pending_mutations._reset(state)
callables = state.callables
for key in list(callables):
if key in dict_ and callables[key] is state:
del callables[key]
if instance_dict and state.modified:
instance_dict._modified.discard(state)
state.modified = state.expired = False
state._strong_obj = None
class AttributeState(object):
"""Provide an inspection interface corresponding
to a particular attribute on a particular mapped object.
The :class:`.AttributeState` object is accessed
via the :attr:`.InstanceState.attrs` collection
of a particular :class:`.InstanceState`::
from sqlalchemy import inspect
insp = inspect(some_mapped_object)
attr_state = insp.attrs.some_attribute
"""
def __init__(self, state, key):
self.state = state
self.key = key
@property
def loaded_value(self):
"""The current value of this attribute as loaded from the database.
If the value has not been loaded, or is otherwise not present
in the object's dictionary, returns NO_VALUE.
"""
return self.state.dict.get(self.key, NO_VALUE)
@property
def value(self):
"""Return the value of this attribute.
This operation is equivalent to accessing the object's
attribute directly or via ``getattr()``, and will fire
off any pending loader callables if needed.
"""
return self.state.manager[self.key].__get__(
self.state.obj(), self.state.class_)
@property
def history(self):
"""Return the current pre-flush change history for
this attribute, via the :class:`.History` interface.
This method will **not** emit loader callables if the value of the
attribute is unloaded.
.. seealso::
:meth:`.AttributeState.load_history` - retrieve history
using loader callables if the value is not locally present.
:func:`.attributes.get_history` - underlying function
"""
return self.state.get_history(self.key,
PASSIVE_NO_INITIALIZE)
def load_history(self):
"""Return the current pre-flush change history for
this attribute, via the :class:`.History` interface.
This method **will** emit loader callables if the value of the
attribute is unloaded.
.. seealso::
:attr:`.AttributeState.history`
:func:`.attributes.get_history` - underlying function
.. versionadded:: 0.9.0
"""
return self.state.get_history(self.key,
PASSIVE_OFF ^ INIT_OK)
class PendingCollection(object):
"""A writable placeholder for an unloaded collection.
Stores items appended to and removed from a collection that has not yet
been loaded. When the collection is loaded, the changes stored in
PendingCollection are applied to it to produce the final result.
"""
def __init__(self):
self.deleted_items = util.IdentitySet()
self.added_items = util.OrderedIdentitySet()
def append(self, value):
if value in self.deleted_items:
self.deleted_items.remove(value)
else:
self.added_items.add(value)
def remove(self, value):
if value in self.added_items:
self.added_items.remove(value)
else:
self.deleted_items.add(value)
| |
import random
import sys
import datetime
def subnet_calc():
try:
print("\n")
now = datetime.datetime.now()
print('Starting Subnet Validator at ' + now.strftime("%Y-%m-%d %H:%M"))
#Get IP address and check if valid
while True:
ip_address = input("Enter an IP address: ")
#evaluate each octet
a = ip_address.split('.')
if (len(a) == 4) and (1 <= int(a[0]) <= 223) and (int(a[0]) != 127) and (int(a[0]) != 169 or int(a[1]) != 254) and (0 <= int(a[1]) <= 255 and 0 <= int(a[2]) <= 255 and 0 <= int(a[3]) <= 255):
break
else:
print ("\nINVALID IP address. Please check your values and retry!\n")
continue
masks = [255, 254, 252, 248, 240, 224, 192, 128, 0]
#Validate subnet mask
while True:
subnet_mask = input("Enter a subnet mask: ")
#Check octets
b = subnet_mask.split('.')
if (len(b) == 4) and (int(b[0]) == 255) and (int(b[1]) in masks) and (int(b[2]) in masks) and (int(b[3]) in masks) and (int(b[0]) >= int(b[1]) >= int(b[2]) >= int(b[3])):
break
else:
prompt = ("\nThe subnet mask is INVALID! One sample is 255.255.255.0 for a common /24 subnet.")
prompt += ("\nPlease ensure you have four octets in your subnet mask.")
print(prompt)
continue
#Subnet identification
#Convert mask to binary string
mask_octets_padded = []
mask_octets_decimal = subnet_mask.split(".")
#print mask_octets_decimal
for octet_index in range(0, len(mask_octets_decimal)):
#print bin(int(mask_octets_decimal[octet_index]))
binary_octet = bin(int(mask_octets_decimal[octet_index])).split("b")[1]
#print binary_octet
if len(binary_octet) == 8:
mask_octets_padded.append(binary_octet)
elif len(binary_octet) < 8:
binary_octet_padded = binary_octet.zfill(8)
mask_octets_padded.append(binary_octet_padded)
#print mask_octets_padded
decimal_mask = "".join(mask_octets_padded)
print(decimal_mask) #Example: for 255.255.255.0 => 11111111111111111111111100000000
#Counting host bits in the mask and calculating number of hosts/subnet
no_of_zeros = decimal_mask.count("0")
no_of_ones = 32 - no_of_zeros
no_of_hosts = abs(2 ** no_of_zeros - 2) #return positive value for mask /32
#print no_of_zeros
#print no_of_ones
#print no_of_hosts
#Obtaining wildcard mask
wildcard_octets = []
for w_octet in mask_octets_decimal:
wild_octet = 255 - int(w_octet)
wildcard_octets.append(str(wild_octet))
#print wildcard_octets
wildcard_mask = ".".join(wildcard_octets)
#print wildcard_mask
#Convert IP to binary string
ip_octets_padded = []
ip_octets_decimal = ip_address.split(".")
for octet_index in range(0, len(ip_octets_decimal)):
binary_octet = bin(int(ip_octets_decimal[octet_index])).split("b")[1]
if len(binary_octet) < 8:
binary_octet_padded = binary_octet.zfill(8)
ip_octets_padded.append(binary_octet_padded)
else:
ip_octets_padded.append(binary_octet)
#print ip_octets_padded
binary_ip = "".join(ip_octets_padded)
#print binary_ip #Example: for 192.168.2.100 => 11000000101010000000001001100100
#Obtain the network address and broadcast address from the binary strings obtained above
network_address_binary = binary_ip[:(no_of_ones)] + "0" * no_of_zeros
print("Binary address %s" % network_address_binary)
broadcast_address_binary = binary_ip[:(no_of_ones)] + "1" * no_of_zeros
#print broadcast_address_binary
net_ip_octets = []
for octet in range(0, len(network_address_binary), 8):
net_ip_octet = network_address_binary[octet:octet+8]
net_ip_octets.append(net_ip_octet)
#print net_ip_octets
net_ip_address = []
for each_octet in net_ip_octets:
net_ip_address.append(str(int(each_octet, 2)))
#print net_ip_address
network_address = ".".join(net_ip_address)
#print network_address
bst_ip_octets = []
for octet in range(0, len(broadcast_address_binary), 8):
bst_ip_octet = broadcast_address_binary[octet:octet+8]
bst_ip_octets.append(bst_ip_octet)
#print bst_ip_octets
bst_ip_address = []
for each_octet in bst_ip_octets:
bst_ip_address.append(str(int(each_octet, 2)))
#print bst_ip_address
broadcast_address = ".".join(bst_ip_address)
#print broadcast_address
#Results for selected IP/mask
print("\n")
print("Network address is: %s" % network_address)
print("Broadcast address is: %s" % broadcast_address)
print("Number of valid hosts per subnet: %s" % no_of_hosts)
print("Wildcard mask: %s" % wildcard_mask)
print("Mask bits: %s" % no_of_ones)
print("\n")
############# Application #1 - Part #4 #############
#Generation of random IP in subnet
while True:
generate = input("Generate random ip address from subnet? (y/n)")
if generate == "y":
generated_ip = []
#Obtain available IP address in range, based on the difference between octets in broadcast address and network address
for indexb, oct_bst in enumerate(bst_ip_address):
#print indexb, oct_bst
for indexn, oct_net in enumerate(net_ip_address):
#print indexn, oct_net
if indexb == indexn:
if oct_bst == oct_net:
#Add identical octets to the generated_ip list
generated_ip.append(oct_bst)
else:
#Generate random number(s) from within octet intervals and append to the list
generated_ip.append(str(random.randint(int(oct_net), int(oct_bst))))
#IP address generated from the subnet pool
#print generated_ip
y_iaddr = ".".join(generated_ip)
#print y_iaddr
print("Random IP address is: %s" % y_iaddr)
print("\n")
continue
else:
print("Thank you for checking the address!\n")
break
except KeyboardInterrupt:
print("\n\nProgram aborted by user. Exiting...\n")
sys.exit()
#Calling the function
subnet_calc()
| |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Operations for writing summary data, for use in analysis and visualization.
See the [Summaries and
TensorBoard](https://www.tensorflow.org/guide/summaries_and_tensorboard) guide.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from google.protobuf import json_format as _json_format
# exports Summary, SummaryDescription, Event, TaggedRunMetadata, SessionLog
# pylint: disable=unused-import
from tensorflow.core.framework.summary_pb2 import Summary
from tensorflow.core.framework.summary_pb2 import SummaryDescription
from tensorflow.core.framework.summary_pb2 import SummaryMetadata as _SummaryMetadata # pylint: enable=unused-import
from tensorflow.core.util.event_pb2 import Event
from tensorflow.core.util.event_pb2 import SessionLog
from tensorflow.core.util.event_pb2 import TaggedRunMetadata
# pylint: enable=unused-import
from tensorflow.python.distribute import summary_op_util as _distribute_summary_op_util
from tensorflow.python.eager import context as _context
from tensorflow.python.framework import constant_op as _constant_op
from tensorflow.python.framework import dtypes as _dtypes
from tensorflow.python.framework import ops as _ops
from tensorflow.python.ops import gen_logging_ops as _gen_logging_ops
from tensorflow.python.ops import gen_summary_ops as _gen_summary_ops # pylint: disable=unused-import
from tensorflow.python.ops import summary_op_util as _summary_op_util
# exports FileWriter, FileWriterCache
# pylint: disable=unused-import
from tensorflow.python.summary.writer.writer import FileWriter
from tensorflow.python.summary.writer.writer_cache import FileWriterCache
# pylint: enable=unused-import
from tensorflow.python.util import compat as _compat
from tensorflow.python.util.tf_export import tf_export
@tf_export(v1=['summary.scalar'])
def scalar(name, tensor, collections=None, family=None):
"""Outputs a `Summary` protocol buffer containing a single scalar value.
The generated Summary has a Tensor.proto containing the input Tensor.
Args:
name: A name for the generated node. Will also serve as the series name in
TensorBoard.
tensor: A real numeric Tensor containing a single value.
collections: Optional list of graph collections keys. The new summary op is
added to these collections. Defaults to `[GraphKeys.SUMMARIES]`.
family: Optional; if provided, used as the prefix of the summary tag name,
which controls the tab name used for display on Tensorboard.
Returns:
A scalar `Tensor` of type `string`. Which contains a `Summary` protobuf.
Raises:
ValueError: If tensor has the wrong shape or type.
@compatibility(TF2)
This API is not compatible with eager execution or `tf.function`. To migrate
to TF2, please use `tf.summary.scalar` instead. Please check
[Migrating tf.summary usage to
TF 2.0](https://www.tensorflow.org/tensorboard/migrate#in_tf_1x) for concrete
steps for migration. `tf.summary.scalar` can also log training metrics in
Keras, you can check [Logging training metrics in
Keras](https://www.tensorflow.org/tensorboard/scalars_and_keras) for detials.
#### How to Map Arguments
| TF1 Arg Name | TF2 Arg Name | Note |
| :------------ | :-------------- | :------------------------------------- |
| `name` | `name` | - |
| `tensor` | `data` | - |
| - | `step` | Explicit int64-castable monotonic step |
: : : value. If omitted, this defaults to :
: : : `tf.summary.experimental.get_step()`. :
| `collections` | Not Supported | - |
| `family` | Removed | Please use `tf.name_scope` instead to |
: : : manage summary name prefix. :
| - | `description` | Optional long-form `str` description |
: : : for the summary. Markdown is supported.:
: : : Defaults to empty. :
@end_compatibility
"""
if _distribute_summary_op_util.skip_summary():
return _constant_op.constant('')
with _summary_op_util.summary_scope(
name, family, values=[tensor]) as (tag, scope):
val = _gen_logging_ops.scalar_summary(tags=tag, values=tensor, name=scope)
_summary_op_util.collect(val, collections, [_ops.GraphKeys.SUMMARIES])
return val
@tf_export(v1=['summary.image'])
def image(name, tensor, max_outputs=3, collections=None, family=None):
"""Outputs a `Summary` protocol buffer with images.
The summary has up to `max_outputs` summary values containing images. The
images are built from `tensor` which must be 4-D with shape `[batch_size,
height, width, channels]` and where `channels` can be:
* 1: `tensor` is interpreted as Grayscale.
* 3: `tensor` is interpreted as RGB.
* 4: `tensor` is interpreted as RGBA.
The images have the same number of channels as the input tensor. For float
input, the values are normalized one image at a time to fit in the range
`[0, 255]`. `uint8` values are unchanged. The op uses two different
normalization algorithms:
* If the input values are all positive, they are rescaled so the largest one
is 255.
* If any input value is negative, the values are shifted so input value 0.0
is at 127. They are then rescaled so that either the smallest value is 0,
or the largest one is 255.
The `tag` in the outputted Summary.Value protobufs is generated based on the
name, with a suffix depending on the max_outputs setting:
* If `max_outputs` is 1, the summary value tag is '*name*/image'.
* If `max_outputs` is greater than 1, the summary value tags are
generated sequentially as '*name*/image/0', '*name*/image/1', etc.
Args:
name: A name for the generated node. Will also serve as a series name in
TensorBoard.
tensor: A 4-D `uint8` or `float32` `Tensor` of shape `[batch_size, height,
width, channels]` where `channels` is 1, 3, or 4.
max_outputs: Max number of batch elements to generate images for.
collections: Optional list of ops.GraphKeys. The collections to add the
summary to. Defaults to [_ops.GraphKeys.SUMMARIES]
family: Optional; if provided, used as the prefix of the summary tag name,
which controls the tab name used for display on Tensorboard.
Returns:
A scalar `Tensor` of type `string`. The serialized `Summary` protocol
buffer.
@compatibility(TF2)
This API is not compatible with eager execution and `tf.function`. To migrate
to TF2, please use `tf.summary.image` instead. Please check
[Migrating tf.summary usage to
TF 2.0](https://www.tensorflow.org/tensorboard/migrate#in_tf_1x) for concrete
steps for migration.
#### How to Map Arguments
| TF1 Arg Name | TF2 Arg Name | Note |
| :------------ | :-------------- | :------------------------------------- |
| `name` | `name` | - |
| `tensor` | `data` | - |
| - | `step` | Explicit int64-castable monotonic step |
: : : value. If omitted, this defaults to :
: : : `tf.summary.experimental.get_step()`. :
| `max_outputs` | `max_outputs` | - |
| `collections` | Not Supported | - |
| `family` | Removed | Please use `tf.name_scope` instead |
: : : to manage summary name prefix. :
| - | `description` | Optional long-form `str` description |
: : : for the summary. Markdown is supported.:
: : : Defaults to empty. :
@end_compatibility
"""
if _distribute_summary_op_util.skip_summary():
return _constant_op.constant('')
with _summary_op_util.summary_scope(
name, family, values=[tensor]) as (tag, scope):
val = _gen_logging_ops.image_summary(
tag=tag, tensor=tensor, max_images=max_outputs, name=scope)
_summary_op_util.collect(val, collections, [_ops.GraphKeys.SUMMARIES])
return val
@tf_export(v1=['summary.histogram'])
def histogram(name, values, collections=None, family=None):
# pylint: disable=line-too-long
"""Outputs a `Summary` protocol buffer with a histogram.
Adding a histogram summary makes it possible to visualize your data's
distribution in TensorBoard. You can see a detailed explanation of the
TensorBoard histogram dashboard
[here](https://www.tensorflow.org/get_started/tensorboard_histograms).
The generated
[`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto)
has one summary value containing a histogram for `values`.
This op reports an `InvalidArgument` error if any value is not finite.
Args:
name: A name for the generated node. Will also serve as a series name in
TensorBoard.
values: A real numeric `Tensor`. Any shape. Values to use to
build the histogram.
collections: Optional list of graph collections keys. The new summary op is
added to these collections. Defaults to `[GraphKeys.SUMMARIES]`.
family: Optional; if provided, used as the prefix of the summary tag name,
which controls the tab name used for display on Tensorboard.
Returns:
A scalar `Tensor` of type `string`. The serialized `Summary` protocol
buffer.
@compatibility(TF2)
This API is not compatible with eager execution and `tf.function`. To migrate
to TF2, please use `tf.summary.histogram` instead. Please check
[Migrating tf.summary usage to
TF 2.0](https://www.tensorflow.org/tensorboard/migrate#in_tf_1x) for concrete
steps for migration.
#### How to Map Arguments
| TF1 Arg Name | TF2 Arg Name | Note |
| :------------ | :-------------- | :------------------------------------- |
| `name` | `name` | - |
| `values` | `data` | - |
| - | `step` | Explicit int64-castable monotonic step |
: : : value. If omitted, this defaults to :
: : : `tf.summary.experimental.get_step()` :
| - | `buckets` | Optional positive `int` specifying |
: : : the histogram bucket number. :
| `collections` | Not Supported | - |
| `family` | Removed | Please use `tf.name_scope` instead |
: : : to manage summary name prefix. :
| - | `description` | Optional long-form `str` description |
: : : for the summary. Markdown is supported.:
: : : Defaults to empty. :
@end_compatibility
"""
if _distribute_summary_op_util.skip_summary():
return _constant_op.constant('')
with _summary_op_util.summary_scope(
name, family, values=[values],
default_name='HistogramSummary') as (tag, scope):
val = _gen_logging_ops.histogram_summary(
tag=tag, values=values, name=scope)
_summary_op_util.collect(val, collections, [_ops.GraphKeys.SUMMARIES])
return val
@tf_export(v1=['summary.audio'])
def audio(name, tensor, sample_rate, max_outputs=3, collections=None,
family=None):
# pylint: disable=line-too-long
"""Outputs a `Summary` protocol buffer with audio.
The summary has up to `max_outputs` summary values containing audio. The
audio is built from `tensor` which must be 3-D with shape `[batch_size,
frames, channels]` or 2-D with shape `[batch_size, frames]`. The values are
assumed to be in the range of `[-1.0, 1.0]` with a sample rate of
`sample_rate`.
The `tag` in the outputted Summary.Value protobufs is generated based on the
name, with a suffix depending on the max_outputs setting:
* If `max_outputs` is 1, the summary value tag is '*name*/audio'.
* If `max_outputs` is greater than 1, the summary value tags are
generated sequentially as '*name*/audio/0', '*name*/audio/1', etc
Args:
name: A name for the generated node. Will also serve as a series name in
TensorBoard.
tensor: A 3-D `float32` `Tensor` of shape `[batch_size, frames, channels]`
or a 2-D `float32` `Tensor` of shape `[batch_size, frames]`.
sample_rate: A Scalar `float32` `Tensor` indicating the sample rate of the
signal in hertz.
max_outputs: Max number of batch elements to generate audio for.
collections: Optional list of ops.GraphKeys. The collections to add the
summary to. Defaults to [_ops.GraphKeys.SUMMARIES]
family: Optional; if provided, used as the prefix of the summary tag name,
which controls the tab name used for display on Tensorboard.
Returns:
A scalar `Tensor` of type `string`. The serialized `Summary` protocol
buffer.
@compatibility(TF2)
This API is not compatible with eager execution or `tf.function`. To migrate
to TF2, please use `tf.summary.audio` instead. Please check
[Migrating tf.summary usage to
TF 2.0](https://www.tensorflow.org/tensorboard/migrate#in_tf_1x) for concrete
steps for migration.
#### How to Map Arguments
| TF1 Arg Name | TF2 Arg Name | Note |
| :------------ | :-------------- | :------------------------------------- |
| `name` | `name` | - |
| `tensor` | `data` | Input for this argument now must be |
: : : three-dimensional `[k, t, c]`, where :
: : : `k` is the number of audio clips, `t` :
: : : is the number of frames, and `c` is :
: : : the number of channels. Two-dimensional:
: : : input is no longer supported. :
| `sample_rate` | `sample_rate` | - |
| - | `step` | Explicit int64-castable monotonic step |
: : : value. If omitted, this defaults to :
: : : `tf.summary.experimental.get_step()`. :
| `max_outputs` | `max_outputs` | - |
| `collections` | Not Supported | - |
| `family` | Removed | Please use `tf.name_scope` instead to |
: : : manage summary name prefix. :
| - | `encoding` | Optional constant str for the desired |
: : : encoding. Check the docs for :
: : : `tf.summary.audio` for latest supported:
: : : audio formats. :
| - | `description` | Optional long-form `str` description |
: : : for the summary. Markdown is supported.:
: : : Defaults to empty. :
@end_compatibility
"""
if _distribute_summary_op_util.skip_summary():
return _constant_op.constant('')
with _summary_op_util.summary_scope(
name, family=family, values=[tensor]) as (tag, scope):
sample_rate = _ops.convert_to_tensor(
sample_rate, dtype=_dtypes.float32, name='sample_rate')
val = _gen_logging_ops.audio_summary_v2(
tag=tag, tensor=tensor, max_outputs=max_outputs,
sample_rate=sample_rate, name=scope)
_summary_op_util.collect(val, collections, [_ops.GraphKeys.SUMMARIES])
return val
@tf_export(v1=['summary.text'])
def text(name, tensor, collections=None):
"""Summarizes textual data.
Text data summarized via this plugin will be visible in the Text Dashboard
in TensorBoard. The standard TensorBoard Text Dashboard will render markdown
in the strings, and will automatically organize 1d and 2d tensors into tables.
If a tensor with more than 2 dimensions is provided, a 2d subarray will be
displayed along with a warning message. (Note that this behavior is not
intrinsic to the text summary api, but rather to the default TensorBoard text
plugin.)
Args:
name: A name for the generated node. Will also serve as a series name in
TensorBoard.
tensor: a string-type Tensor to summarize.
collections: Optional list of ops.GraphKeys. The collections to add the
summary to. Defaults to [_ops.GraphKeys.SUMMARIES]
Returns:
A TensorSummary op that is configured so that TensorBoard will recognize
that it contains textual data. The TensorSummary is a scalar `Tensor` of
type `string` which contains `Summary` protobufs.
Raises:
ValueError: If tensor has the wrong type.
@compatibility(TF2)
This API is not compatible with eager execution or `tf.function`. To migrate
to TF2, please use `tf.summary.text` instead. Please check
[Migrating tf.summary usage to
TF 2.0](https://www.tensorflow.org/tensorboard/migrate#in_tf_1x) for concrete
steps for migration.
#### How to Map Arguments
| TF1 Arg Name | TF2 Arg Name | Note |
| :------------ | :-------------- | :------------------------------------- |
| `name` | `name` | - |
| `tensor` | `data` | - |
| - | `step` | Explicit int64-castable monotonic step |
: : : value. If omitted, this defaults to :
: : : `tf.summary.experimental.get_step()`. :
| `collections` | Not Supported | - |
| - | `description` | Optional long-form `str` description |
: : : for the summary. Markdown is supported.:
: : : Defaults to empty. :
@end_compatibility
"""
if tensor.dtype != _dtypes.string:
raise ValueError('Expected tensor %s to have dtype string, got %s' %
(tensor.name, tensor.dtype))
summary_metadata = _SummaryMetadata(
plugin_data=_SummaryMetadata.PluginData(plugin_name='text'))
t_summary = tensor_summary(
name=name,
tensor=tensor,
summary_metadata=summary_metadata,
collections=collections)
return t_summary
@tf_export(v1=['summary.tensor_summary'])
def tensor_summary(name,
tensor,
summary_description=None,
collections=None,
summary_metadata=None,
family=None,
display_name=None):
"""Outputs a `Summary` protocol buffer with a serialized tensor.proto.
Args:
name: A name for the generated node. If display_name is not set, it will
also serve as the tag name in TensorBoard. (In that case, the tag
name will inherit tf name scopes.)
tensor: A tensor of any type and shape to serialize.
summary_description: A long description of the summary sequence. Markdown
is supported.
collections: Optional list of graph collections keys. The new summary op is
added to these collections. Defaults to `[GraphKeys.SUMMARIES]`.
summary_metadata: Optional SummaryMetadata proto (which describes which
plugins may use the summary value).
family: Optional; if provided, used as the prefix of the summary tag,
which controls the name used for display on TensorBoard when
display_name is not set.
display_name: A string used to name this data in TensorBoard. If this is
not set, then the node name will be used instead.
Returns:
A scalar `Tensor` of type `string`. The serialized `Summary` protocol
buffer.
"""
if summary_metadata is None:
summary_metadata = _SummaryMetadata()
if summary_description is not None:
summary_metadata.summary_description = summary_description
if display_name is not None:
summary_metadata.display_name = display_name
serialized_summary_metadata = summary_metadata.SerializeToString()
if _distribute_summary_op_util.skip_summary():
return _constant_op.constant('')
with _summary_op_util.summary_scope(
name, family, values=[tensor]) as (tag, scope):
val = _gen_logging_ops.tensor_summary_v2(
tensor=tensor,
tag=tag,
name=scope,
serialized_summary_metadata=serialized_summary_metadata)
_summary_op_util.collect(val, collections, [_ops.GraphKeys.SUMMARIES])
return val
@tf_export(v1=['summary.merge'])
def merge(inputs, collections=None, name=None):
# pylint: disable=line-too-long
"""Merges summaries.
This op creates a
[`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto)
protocol buffer that contains the union of all the values in the input
summaries.
When the Op is run, it reports an `InvalidArgument` error if multiple values
in the summaries to merge use the same tag.
Args:
inputs: A list of `string` `Tensor` objects containing serialized `Summary`
protocol buffers.
collections: Optional list of graph collections keys. The new summary op is
added to these collections. Defaults to `[]`.
name: A name for the operation (optional).
Returns:
A scalar `Tensor` of type `string`. The serialized `Summary` protocol
buffer resulting from the merging.
Raises:
RuntimeError: If called with eager mode enabled.
@compatibility(TF2)
This API is not compatible with eager execution or `tf.function`. To migrate
to TF2, this API can be omitted entirely, because in TF2 individual summary
ops, like `tf.summary.scalar()`, write directly to the default summary writer
if one is active. Thus, it's not necessary to merge summaries or to manually
add the resulting merged summary output to the writer. See the usage example
shown below.
For a comprehensive `tf.summary` migration guide, please follow
[Migrating tf.summary usage to
TF 2.0](https://www.tensorflow.org/tensorboard/migrate#in_tf_1x).
#### TF1 & TF2 Usage Example
TF1:
```python
dist = tf.compat.v1.placeholder(tf.float32, [100])
tf.compat.v1.summary.histogram(name="distribution", values=dist)
writer = tf.compat.v1.summary.FileWriter("/tmp/tf1_summary_example")
summaries = tf.compat.v1.summary.merge_all()
sess = tf.compat.v1.Session()
for step in range(100):
mean_moving_normal = np.random.normal(loc=step, scale=1, size=[100])
summ = sess.run(summaries, feed_dict={dist: mean_moving_normal})
writer.add_summary(summ, global_step=step)
```
TF2:
```python
writer = tf.summary.create_file_writer("/tmp/tf2_summary_example")
for step in range(100):
mean_moving_normal = np.random.normal(loc=step, scale=1, size=[100])
with writer.as_default(step=step):
tf.summary.histogram(name='distribution', data=mean_moving_normal)
```
@end_compatibility
"""
# pylint: enable=line-too-long
if _context.executing_eagerly():
raise RuntimeError(
'Merging tf.summary.* ops is not compatible with eager execution. '
'Use tf.contrib.summary instead.')
if _distribute_summary_op_util.skip_summary():
return _constant_op.constant('')
name = _summary_op_util.clean_tag(name)
with _ops.name_scope(name, 'Merge', inputs):
val = _gen_logging_ops.merge_summary(inputs=inputs, name=name)
_summary_op_util.collect(val, collections, [])
return val
@tf_export(v1=['summary.merge_all'])
def merge_all(key=_ops.GraphKeys.SUMMARIES, scope=None, name=None):
"""Merges all summaries collected in the default graph.
Args:
key: `GraphKey` used to collect the summaries. Defaults to
`GraphKeys.SUMMARIES`.
scope: Optional scope used to filter the summary ops, using `re.match`.
name: A name for the operation (optional).
Returns:
If no summaries were collected, returns None. Otherwise returns a scalar
`Tensor` of type `string` containing the serialized `Summary` protocol
buffer resulting from the merging.
Raises:
RuntimeError: If called with eager execution enabled.
@compatibility(TF2)
This API is not compatible with eager execution or `tf.function`. To migrate
to TF2, this API can be omitted entirely, because in TF2 individual summary
ops, like `tf.summary.scalar()`, write directly to the default summary writer
if one is active. Thus, it's not necessary to merge summaries or to manually
add the resulting merged summary output to the writer. See the usage example
shown below.
For a comprehensive `tf.summary` migration guide, please follow
[Migrating tf.summary usage to
TF 2.0](https://www.tensorflow.org/tensorboard/migrate#in_tf_1x).
#### TF1 & TF2 Usage Example
TF1:
```python
dist = tf.compat.v1.placeholder(tf.float32, [100])
tf.compat.v1.summary.histogram(name="distribution", values=dist)
writer = tf.compat.v1.summary.FileWriter("/tmp/tf1_summary_example")
summaries = tf.compat.v1.summary.merge_all()
sess = tf.compat.v1.Session()
for step in range(100):
mean_moving_normal = np.random.normal(loc=step, scale=1, size=[100])
summ = sess.run(summaries, feed_dict={dist: mean_moving_normal})
writer.add_summary(summ, global_step=step)
```
TF2:
```python
writer = tf.summary.create_file_writer("/tmp/tf2_summary_example")
for step in range(100):
mean_moving_normal = np.random.normal(loc=step, scale=1, size=[100])
with writer.as_default(step=step):
tf.summary.histogram(name='distribution', data=mean_moving_normal)
```
@end_compatibility
"""
if _context.executing_eagerly():
raise RuntimeError(
'Merging tf.summary.* ops is not compatible with eager execution. '
'Use tf.contrib.summary instead.')
summary_ops = _ops.get_collection(key, scope=scope)
if not summary_ops:
return None
else:
return merge(summary_ops, name=name)
@tf_export(v1=['summary.get_summary_description'])
def get_summary_description(node_def):
"""Given a TensorSummary node_def, retrieve its SummaryDescription.
When a Summary op is instantiated, a SummaryDescription of associated
metadata is stored in its NodeDef. This method retrieves the description.
Args:
node_def: the node_def_pb2.NodeDef of a TensorSummary op
Returns:
a summary_pb2.SummaryDescription
Raises:
ValueError: if the node is not a summary op.
@compatibility(eager)
Not compatible with eager execution. To write TensorBoard
summaries under eager execution, use `tf.contrib.summary` instead.
@end_compatibility
"""
if node_def.op != 'TensorSummary':
raise ValueError("Can't get_summary_description on %s" % node_def.op)
description_str = _compat.as_str_any(node_def.attr['description'].s)
summary_description = SummaryDescription()
_json_format.Parse(description_str, summary_description)
return summary_description
| |
# -*- coding: utf-8 -*-
#
# last tinkered with by korylprince at gmail.com on 2012-07-12
#
import sys
import logging
try:
import ldap
import ldap.filter
ldap.set_option(ldap.OPT_REFERRALS, 0)
except Exception, e:
logging.error('missing ldap, try "easy_install python-ldap"')
raise e
def ldap_auth(server='ldap', port=None,
base_dn='ou=users,dc=domain,dc=com',
mode='uid', secure=False, cert_path=None, cert_file=None,
bind_dn=None, bind_pw=None, filterstr='objectClass=*',
username_attrib='uid',
custom_scope='subtree',
allowed_groups=None,
manage_user=False,
user_firstname_attrib='cn:1',
user_lastname_attrib='cn:2',
user_mail_attrib='mail',
manage_groups=False,
db=None,
group_dn=None,
group_name_attrib='cn',
group_member_attrib='memberUid',
group_filterstr='objectClass=*',
logging_level='error'):
"""
to use ldap login with MS Active Directory:
from gluon.contrib.login_methods.ldap_auth import ldap_auth
auth.settings.login_methods.append(ldap_auth(
mode='ad', server='my.domain.controller',
base_dn='ou=Users,dc=domain,dc=com'))
to use ldap login with Notes Domino:
auth.settings.login_methods.append(ldap_auth(
mode='domino',server='my.domino.server'))
to use ldap login with OpenLDAP:
auth.settings.login_methods.append(ldap_auth(
server='my.ldap.server', base_dn='ou=Users,dc=domain,dc=com'))
to use ldap login with OpenLDAP and subtree search and (optionally)
multiple DNs:
auth.settings.login_methods.append(ldap_auth(
mode='uid_r', server='my.ldap.server',
base_dn=['ou=Users,dc=domain,dc=com','ou=Staff,dc=domain,dc=com']))
or (if using CN):
auth.settings.login_methods.append(ldap_auth(
mode='cn', server='my.ldap.server',
base_dn='ou=Users,dc=domain,dc=com'))
or you can full customize the search for user:
auth.settings.login_methods.append(ldap_auth(
mode='custom', server='my.ldap.server',
base_dn='ou=Users,dc=domain,dc=com',
username_attrib='uid',
custom_scope='subtree'))
the custom_scope can be: base, onelevel, subtree.
If using secure ldaps:// pass secure=True and cert_path="..."
If ldap is using GnuTLS then you need cert_file="..." instead cert_path
because cert_path isn't implemented in GnuTLS :(
If you need to bind to the directory with an admin account in order to
search it then specify bind_dn & bind_pw to use for this.
- currently only implemented for Active Directory
If you need to restrict the set of allowed users (e.g. to members of a
department) then specify an rfc4515 search filter string.
- currently only implemented for mode in ['ad', 'company', 'uid_r']
You can manage user attributes first name, last name, email from ldap:
auth.settings.login_methods.append(ldap_auth(...as usual...,
manage_user=True,
user_firstname_attrib='cn:1',
user_lastname_attrib='cn:2',
user_mail_attrib='mail'
))
Where:
manage_user - let web2py handle user data from ldap
user_firstname_attrib - the attribute containing the user's first name
optionally you can specify parts.
Example: cn: "John Smith" - 'cn:1'='John'
user_lastname_attrib - the attribute containing the user's last name
optionally you can specify parts.
Example: cn: "John Smith" - 'cn:2'='Smith'
user_mail_attrib - the attribute containing the user's email address
If you need group control from ldap to web2py app's database feel free
to set:
auth.settings.login_methods.append(ldap_auth(...as usual...,
manage_groups=True,
db=db,
group_dn='ou=Groups,dc=domain,dc=com',
group_name_attrib='cn',
group_member_attrib='memberUid',
group_filterstr='objectClass=*'
))
Where:
manage_group - let web2py handle the groups from ldap
db - is the database object (need to have auth_user, auth_group,
auth_membership)
group_dn - the ldap branch of the groups
group_name_attrib - the attribute where the group name is stored
group_member_attrib - the attribute containing the group members name
group_filterstr - as the filterstr but for group select
You can restrict login access to specific groups if you specify:
auth.settings.login_methods.append(ldap_auth(...as usual...,
allowed_groups=[...],
group_dn='ou=Groups,dc=domain,dc=com',
group_name_attrib='cn',
group_member_attrib='memberUid',#use 'member' for Active Directory
group_filterstr='objectClass=*'
))
Where:
allowed_groups - a list with allowed ldap group names
group_dn - the ldap branch of the groups
group_name_attrib - the attribute where the group name is stored
group_member_attrib - the attribute containing the group members name
group_filterstr - as the filterstr but for group select
If using Active Directory you must specify bind_dn and bind_pw for
allowed_groups unless anonymous bind works.
You can set the logging level with the "logging_level" parameter, default
is "error" and can be set to error, warning, info, debug.
"""
logger = logging.getLogger('web2py.auth.ldap_auth')
if logging_level == 'error':
logger.setLevel(logging.ERROR)
elif logging_level == 'warning':
logger.setLevel(logging.WARNING)
elif logging_level == 'info':
logger.setLevel(logging.INFO)
elif logging_level == 'debug':
logger.setLevel(logging.DEBUG)
def ldap_auth_aux(username,
password,
ldap_server=server,
ldap_port=port,
ldap_basedn=base_dn,
ldap_mode=mode,
ldap_binddn=bind_dn,
ldap_bindpw=bind_pw,
secure=secure,
cert_path=cert_path,
cert_file=cert_file,
filterstr=filterstr,
username_attrib=username_attrib,
custom_scope=custom_scope,
manage_user=manage_user,
user_firstname_attrib=user_firstname_attrib,
user_lastname_attrib=user_lastname_attrib,
user_mail_attrib=user_mail_attrib,
manage_groups=manage_groups,
allowed_groups=allowed_groups,
db=db):
if password == '': # http://tools.ietf.org/html/rfc4513#section-5.1.2
logger.warning('blank password not allowed')
return False
logger.debug('mode: [%s] manage_user: [%s] custom_scope: [%s]'
' manage_groups: [%s]' % (str(mode), str(manage_user),
str(custom_scope), str(manage_groups)))
if manage_user:
if user_firstname_attrib.count(':') > 0:
(user_firstname_attrib,
user_firstname_part) = user_firstname_attrib.split(':', 1)
user_firstname_part = (int(user_firstname_part) - 1)
else:
user_firstname_part = None
if user_lastname_attrib.count(':') > 0:
(user_lastname_attrib,
user_lastname_part) = user_lastname_attrib.split(':', 1)
user_lastname_part = (int(user_lastname_part) - 1)
else:
user_lastname_part = None
user_firstname_attrib = ldap.filter.escape_filter_chars(
user_firstname_attrib)
user_lastname_attrib = ldap.filter.escape_filter_chars(
user_lastname_attrib)
user_mail_attrib = ldap.filter.escape_filter_chars(
user_mail_attrib)
try:
if allowed_groups:
if not is_user_in_allowed_groups(username, password):
return False
con = init_ldap()
if ldap_mode == 'ad':
# Microsoft Active Directory
if '@' not in username:
domain = []
for x in ldap_basedn.split(','):
if "DC=" in x.upper():
domain.append(x.split('=')[-1])
username = "%s@%s" % (username, '.'.join(domain))
username_bare = username.split("@")[0]
con.set_option(ldap.OPT_PROTOCOL_VERSION, 3)
# In cases where ForestDnsZones and DomainDnsZones are found,
# result will look like the following:
# ['ldap://ForestDnsZones.domain.com/DC=ForestDnsZones,
# DC=domain,DC=com']
if ldap_binddn:
# need to search directory with an admin account 1st
con.simple_bind_s(ldap_binddn, ldap_bindpw)
else:
# credentials should be in the form of username@domain.tld
con.simple_bind_s(username, password)
# this will throw an index error if the account is not found
# in the ldap_basedn
requested_attrs = ['sAMAccountName']
if manage_user:
requested_attrs.extend([user_firstname_attrib,
user_lastname_attrib,
user_mail_attrib])
result = con.search_ext_s(
ldap_basedn, ldap.SCOPE_SUBTREE,
"(&(sAMAccountName=%s)(%s))" % (
ldap.filter.escape_filter_chars(username_bare),
filterstr),
requested_attrs)[0][1]
if not isinstance(result, dict):
# result should be a dict in the form
# {'sAMAccountName': [username_bare]}
logger.warning('User [%s] not found!' % username)
return False
if ldap_binddn:
# We know the user exists & is in the correct OU
# so now we just check the password
con.simple_bind_s(username, password)
username = username_bare
if ldap_mode == 'domino':
# Notes Domino
if "@" in username:
username = username.split("@")[0]
con.simple_bind_s(username, password)
if manage_user:
# TODO: sorry I have no clue how to query attrs in domino
result = {user_firstname_attrib: username,
user_lastname_attrib: None,
user_mail_attrib: None}
if ldap_mode == 'cn':
# OpenLDAP (CN)
dn = "cn=" + username + "," + ldap_basedn
con.simple_bind_s(dn, password)
if manage_user:
result = con.search_s(dn, ldap.SCOPE_BASE,
"(objectClass=*)",
[user_firstname_attrib,
user_lastname_attrib,
user_mail_attrib])[0][1]
if ldap_mode == 'uid':
# OpenLDAP (UID)
dn = "uid=" + username + "," + ldap_basedn
con.simple_bind_s(dn, password)
if manage_user:
result = con.search_s(dn, ldap.SCOPE_BASE,
"(objectClass=*)",
[user_firstname_attrib,
user_lastname_attrib,
user_mail_attrib])[0][1]
if ldap_mode == 'company':
# no DNs or password needed to search directory
dn = ""
pw = ""
# bind anonymously
con.simple_bind_s(dn, pw)
# search by e-mail address
filter = '(&(mail=%s)(%s))' % (
ldap.filter.escape_filter_chars(username),
filterstr)
# find the uid
attrs = ['uid']
if manage_user:
attrs.extend([user_firstname_attrib,
user_lastname_attrib,
user_mail_attrib])
# perform the actual search
company_search_result = con.search_s(ldap_basedn,
ldap.SCOPE_SUBTREE,
filter, attrs)
dn = company_search_result[0][0]
result = company_search_result[0][1]
# perform the real authentication test
con.simple_bind_s(dn, password)
if ldap_mode == 'uid_r':
# OpenLDAP (UID) with subtree search and multiple DNs
if isinstance(ldap_basedn, list):
basedns = ldap_basedn
else:
basedns = [ldap_basedn]
filter = '(&(uid=%s)(%s))' % (
ldap.filter.escape_filter_chars(username), filterstr)
found = False
for basedn in basedns:
try:
result = con.search_s(basedn, ldap.SCOPE_SUBTREE,
filter)
if result:
user_dn = result[0][0]
# Check the password
con.simple_bind_s(user_dn, password)
found = True
break
except ldap.LDAPError, detail:
(exc_type, exc_value) = sys.exc_info()[:2]
logger.warning(
"ldap_auth: searching %s for %s resulted in %s: %s\n" %
(basedn, filter, exc_type, exc_value)
)
if not found:
logger.warning('User [%s] not found!' % username)
return False
result = result[0][1]
if ldap_mode == 'custom':
# OpenLDAP (username_attrs) with subtree search and
# multiple DNs
if isinstance(ldap_basedn, list):
basedns = ldap_basedn
else:
basedns = [ldap_basedn]
filter = '(&(%s=%s)(%s))' % (username_attrib,
ldap.filter.escape_filter_chars(
username),
filterstr)
if custom_scope == 'subtree':
ldap_scope = ldap.SCOPE_SUBTREE
elif custom_scope == 'base':
ldap_scope = ldap.SCOPE_BASE
elif custom_scope == 'onelevel':
ldap_scope = ldap.SCOPE_ONELEVEL
found = False
for basedn in basedns:
try:
result = con.search_s(basedn, ldap_scope, filter)
if result:
user_dn = result[0][0]
# Check the password
con.simple_bind_s(user_dn, password)
found = True
break
except ldap.LDAPError, detail:
(exc_type, exc_value) = sys.exc_info()[:2]
logger.warning(
"ldap_auth: searching %s for %s resulted in %s: %s\n" %
(basedn, filter, exc_type, exc_value)
)
if not found:
logger.warning('User [%s] not found!' % username)
return False
result = result[0][1]
if manage_user:
logger.info('[%s] Manage user data' % str(username))
try:
if user_firstname_part is not None:
store_user_firstname = result[user_firstname_attrib][
0].split(' ', 1)[user_firstname_part]
else:
store_user_firstname = result[user_firstname_attrib][0]
except KeyError, e:
store_user_firstname = None
try:
if user_lastname_part is not None:
store_user_lastname = result[user_lastname_attrib][
0].split(' ', 1)[user_lastname_part]
else:
store_user_lastname = result[user_lastname_attrib][0]
except KeyError, e:
store_user_lastname = None
try:
store_user_mail = result[user_mail_attrib][0]
except KeyError, e:
store_user_mail = None
try:
#
# user as username
# #################
user_in_db = db(db.auth_user.username == username)
if user_in_db.count() > 0:
user_in_db.update(first_name=store_user_firstname,
last_name=store_user_lastname,
email=store_user_mail)
else:
db.auth_user.insert(first_name=store_user_firstname,
last_name=store_user_lastname,
email=store_user_mail,
username=username)
except:
#
# user as email
# ##############
user_in_db = db(db.auth_user.email == username)
if user_in_db.count() > 0:
user_in_db.update(first_name=store_user_firstname,
last_name=store_user_lastname)
else:
db.auth_user.insert(first_name=store_user_firstname,
last_name=store_user_lastname,
email=username)
con.unbind()
if manage_groups:
if not do_manage_groups(username, password):
return False
return True
except ldap.INVALID_CREDENTIALS, e:
return False
except ldap.LDAPError, e:
import traceback
logger.warning('[%s] Error in ldap processing' % str(username))
logger.debug(traceback.format_exc())
return False
except IndexError, ex: # for AD membership test
import traceback
logger.warning('[%s] Ldap result indexing error' % str(username))
logger.debug(traceback.format_exc())
return False
def is_user_in_allowed_groups(username,
password=None,
allowed_groups=allowed_groups):
"""
Figure out if the username is a member of an allowed group
in ldap or not
"""
#
# Get all group name where the user is in actually in ldap
# #########################################################
ldap_groups_of_the_user = get_user_groups_from_ldap(username, password)
# search for allowed group names
if type(allowed_groups) != type(list()):
allowed_groups = [allowed_groups]
for group in allowed_groups:
if ldap_groups_of_the_user.count(group) > 0:
# Match
return True
# No match
return False
def do_manage_groups(username,
password=None,
db=db):
"""
Manage user groups
Get all user's group from ldap and refresh the already stored
ones in web2py's application database or create new groups
according to ldap.
"""
logger.info('[%s] Manage user groups' % str(username))
try:
#
# Get all group name where the user is in actually in ldap
# #########################################################
ldap_groups_of_the_user = get_user_groups_from_ldap(
username, password)
#
# Get all group name where the user is in actually in local db
# #############################################################
try:
db_user_id = db(db.auth_user.username == username).select(
db.auth_user.id).first().id
except:
try:
db_user_id = db(db.auth_user.email == username).select(
db.auth_user.id).first().id
except AttributeError, e:
#
# There is no user in local db
# We create one
# ##############################
try:
db_user_id = db.auth_user.insert(username=username,
first_name=username)
except AttributeError, e:
db_user_id = db.auth_user.insert(email=username,
first_name=username)
if not db_user_id:
logging.error(
'There is no username or email for %s!' % username)
raise
db_group_search = db((db.auth_membership.user_id == db_user_id) &
(db.auth_user.id == db.auth_membership.user_id) &
(db.auth_group.id == db.auth_membership.group_id))
db_groups_of_the_user = list()
db_group_id = dict()
if db_group_search.count() > 0:
for group in db_group_search.select(db.auth_group.id,
db.auth_group.role,
distinct=True):
db_group_id[group.role] = group.id
db_groups_of_the_user.append(group.role)
logging.debug('db groups of user %s: %s' %
(username, str(db_groups_of_the_user)))
#
# Delete user membership from groups where user is not anymore
# #############################################################
for group_to_del in db_groups_of_the_user:
if ldap_groups_of_the_user.count(group_to_del) == 0:
db((db.auth_membership.user_id == db_user_id) &
(db.auth_membership.group_id == \
db_group_id[group_to_del])).delete()
#
# Create user membership in groups where user is not in already
# ##############################################################
for group_to_add in ldap_groups_of_the_user:
if db_groups_of_the_user.count(group_to_add) == 0:
if db(db.auth_group.role == group_to_add).count() == 0:
gid = db.auth_group.insert(role=group_to_add,
description='Generated from LDAP')
else:
gid = db(db.auth_group.role == group_to_add).select(
db.auth_group.id).first().id
db.auth_membership.insert(user_id=db_user_id,
group_id=gid)
except:
logger.warning("[%s] Groups are not managed successfully!" %
str(username))
import traceback
logger.debug(traceback.format_exc())
return False
return True
def init_ldap(ldap_server=server,
ldap_port=port,
ldap_basedn=base_dn,
ldap_mode=mode,
secure=secure,
cert_path=cert_path,
cert_file=cert_file):
"""
Inicialize ldap connection
"""
logger.info('[%s] Initialize ldap connection' % str(ldap_server))
if secure:
if not ldap_port:
ldap_port = 636
con = ldap.initialize(
"ldaps://" + ldap_server + ":" + str(ldap_port))
if cert_path:
con.set_option(ldap.OPT_X_TLS_CACERTDIR, cert_path)
if cert_file:
con.set_option(ldap.OPT_X_TLS_CACERTFILE, cert_file)
else:
if not ldap_port:
ldap_port = 389
con = ldap.initialize(
"ldap://" + ldap_server + ":" + str(ldap_port))
return con
def get_user_groups_from_ldap(username,
password=None,
base_dn=base_dn,
ldap_binddn=bind_dn,
ldap_bindpw=bind_pw,
group_dn=group_dn,
group_name_attrib=group_name_attrib,
group_member_attrib=group_member_attrib,
group_filterstr=group_filterstr,
ldap_mode=mode):
"""
Get all group names from ldap where the user is in
"""
logger.info('[%s] Get user groups from ldap' % str(username))
#
# Get all group name where the user is in actually in ldap
# #########################################################
# Initialize ldap
if not group_dn:
group_dn = base_dn
con = init_ldap()
logger.debug('Username init: [%s]' % username)
if ldap_mode == 'ad':
#
# Get the AD username
# ####################
if '@' not in username:
domain = []
for x in base_dn.split(','):
if "DC=" in x.upper():
domain.append(x.split('=')[-1])
username = "%s@%s" % (username, '.'.join(domain))
username_bare = username.split("@")[0]
con.set_option(ldap.OPT_PROTOCOL_VERSION, 3)
# In cases where ForestDnsZones and DomainDnsZones are found,
# result will look like the following:
# ['ldap://ForestDnsZones.domain.com/DC=ForestDnsZones,
# DC=domain,DC=com']
if ldap_binddn:
# need to search directory with an admin account 1st
con.simple_bind_s(ldap_binddn, ldap_bindpw)
logger.debug('Ldap bind connect...')
else:
# credentials should be in the form of username@domain.tld
con.simple_bind_s(username, password)
logger.debug('Ldap username connect...')
# We have to use the full string
username = con.search_ext_s(base_dn, ldap.SCOPE_SUBTREE,
"(&(sAMAccountName=%s)(%s))" %
(ldap.filter.escape_filter_chars(username_bare),
filterstr), ["cn"])[0][0]
else:
if ldap_binddn:
# need to search directory with an bind_dn account 1st
con.simple_bind_s(ldap_binddn, ldap_bindpw)
else:
# bind as anonymous
con.simple_bind_s('', '')
# if username is None, return empty list
if username is None:
return list()
# search for groups where user is in
filter = '(&(%s=%s)(%s))' % (ldap.filter.escape_filter_chars(
group_member_attrib
),
ldap.filter.escape_filter_chars(username),
group_filterstr)
group_search_result = con.search_s(group_dn,
ldap.SCOPE_SUBTREE,
filter, [group_name_attrib])
ldap_groups_of_the_user = list()
for group_row in group_search_result:
group = group_row[1]
if type(group) == dict and group.has_key(group_name_attrib):
ldap_groups_of_the_user.extend(group[group_name_attrib])
con.unbind()
logger.debug('User groups: %s' % ldap_groups_of_the_user)
return list(ldap_groups_of_the_user)
if filterstr[0] == '(' and filterstr[-1] == ')': # rfc4515 syntax
filterstr = filterstr[1:-1] # parens added again where used
return ldap_auth_aux
| |
"""Support to embed Plex."""
import asyncio
from functools import partial
import logging
import plexapi.exceptions
from plexapi.gdm import GDM
from plexwebsocket import (
SIGNAL_CONNECTION_STATE,
STATE_CONNECTED,
STATE_DISCONNECTED,
STATE_STOPPED,
PlexWebsocket,
)
import requests.exceptions
from homeassistant.components.media_player import DOMAIN as MP_DOMAIN
from homeassistant.config_entries import ENTRY_STATE_SETUP_RETRY
from homeassistant.const import CONF_URL, CONF_VERIFY_SSL, EVENT_HOMEASSISTANT_STOP
from homeassistant.core import callback
from homeassistant.exceptions import ConfigEntryAuthFailed, ConfigEntryNotReady
from homeassistant.helpers import device_registry as dev_reg, entity_registry as ent_reg
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.helpers.debounce import Debouncer
from homeassistant.helpers.dispatcher import (
async_dispatcher_connect,
async_dispatcher_send,
)
from .const import (
CONF_SERVER,
CONF_SERVER_IDENTIFIER,
DISPATCHERS,
DOMAIN as PLEX_DOMAIN,
GDM_DEBOUNCER,
GDM_SCANNER,
PLATFORMS,
PLATFORMS_COMPLETED,
PLEX_SERVER_CONFIG,
PLEX_UPDATE_LIBRARY_SIGNAL,
PLEX_UPDATE_PLATFORMS_SIGNAL,
SERVERS,
WEBSOCKETS,
)
from .errors import ShouldUpdateConfigEntry
from .server import PlexServer
from .services import async_setup_services
_LOGGER = logging.getLogger(__package__)
async def async_setup(hass, config):
"""Set up the Plex component."""
hass.data.setdefault(
PLEX_DOMAIN,
{SERVERS: {}, DISPATCHERS: {}, WEBSOCKETS: {}, PLATFORMS_COMPLETED: {}},
)
await async_setup_services(hass)
gdm = hass.data[PLEX_DOMAIN][GDM_SCANNER] = GDM()
def gdm_scan():
_LOGGER.debug("Scanning for GDM clients")
gdm.scan(scan_for_clients=True)
hass.data[PLEX_DOMAIN][GDM_DEBOUNCER] = Debouncer(
hass,
_LOGGER,
cooldown=10,
immediate=True,
function=gdm_scan,
).async_call
return True
async def async_setup_entry(hass, entry):
"""Set up Plex from a config entry."""
server_config = entry.data[PLEX_SERVER_CONFIG]
if entry.unique_id is None:
hass.config_entries.async_update_entry(
entry, unique_id=entry.data[CONF_SERVER_IDENTIFIER]
)
if MP_DOMAIN not in entry.options:
options = dict(entry.options)
options.setdefault(MP_DOMAIN, {})
hass.config_entries.async_update_entry(entry, options=options)
plex_server = PlexServer(
hass,
server_config,
entry.data[CONF_SERVER_IDENTIFIER],
entry.options,
entry.entry_id,
)
try:
await hass.async_add_executor_job(plex_server.connect)
except ShouldUpdateConfigEntry:
new_server_data = {
**entry.data[PLEX_SERVER_CONFIG],
CONF_URL: plex_server.url_in_use,
CONF_SERVER: plex_server.friendly_name,
}
hass.config_entries.async_update_entry(
entry, data={**entry.data, PLEX_SERVER_CONFIG: new_server_data}
)
except requests.exceptions.ConnectionError as error:
if entry.state != ENTRY_STATE_SETUP_RETRY:
_LOGGER.error(
"Plex server (%s) could not be reached: [%s]",
server_config[CONF_URL],
error,
)
raise ConfigEntryNotReady from error
except plexapi.exceptions.Unauthorized as ex:
raise ConfigEntryAuthFailed(
f"Token not accepted, please reauthenticate Plex server '{entry.data[CONF_SERVER]}'"
) from ex
except (
plexapi.exceptions.BadRequest,
plexapi.exceptions.NotFound,
) as error:
_LOGGER.error(
"Login to %s failed, verify token and SSL settings: [%s]",
entry.data[CONF_SERVER],
error,
)
return False
_LOGGER.debug(
"Connected to: %s (%s)", plex_server.friendly_name, plex_server.url_in_use
)
server_id = plex_server.machine_identifier
hass.data[PLEX_DOMAIN][SERVERS][server_id] = plex_server
hass.data[PLEX_DOMAIN][PLATFORMS_COMPLETED][server_id] = set()
entry.add_update_listener(async_options_updated)
unsub = async_dispatcher_connect(
hass,
PLEX_UPDATE_PLATFORMS_SIGNAL.format(server_id),
plex_server.async_update_platforms,
)
hass.data[PLEX_DOMAIN][DISPATCHERS].setdefault(server_id, [])
hass.data[PLEX_DOMAIN][DISPATCHERS][server_id].append(unsub)
@callback
def plex_websocket_callback(msgtype, data, error):
"""Handle callbacks from plexwebsocket library."""
if msgtype == SIGNAL_CONNECTION_STATE:
if data == STATE_CONNECTED:
_LOGGER.debug("Websocket to %s successful", entry.data[CONF_SERVER])
hass.async_create_task(plex_server.async_update_platforms())
elif data == STATE_DISCONNECTED:
_LOGGER.debug(
"Websocket to %s disconnected, retrying", entry.data[CONF_SERVER]
)
# Stopped websockets without errors are expected during shutdown and ignored
elif data == STATE_STOPPED and error:
_LOGGER.error(
"Websocket to %s failed, aborting [Error: %s]",
entry.data[CONF_SERVER],
error,
)
hass.async_create_task(hass.config_entries.async_reload(entry.entry_id))
elif msgtype == "playing":
hass.async_create_task(plex_server.async_update_session(data))
elif msgtype == "status":
if data["StatusNotification"][0]["title"] == "Library scan complete":
async_dispatcher_send(
hass,
PLEX_UPDATE_LIBRARY_SIGNAL.format(server_id),
)
session = async_get_clientsession(hass)
subscriptions = ["playing", "status"]
verify_ssl = server_config.get(CONF_VERIFY_SSL)
websocket = PlexWebsocket(
plex_server.plex_server,
plex_websocket_callback,
subscriptions=subscriptions,
session=session,
verify_ssl=verify_ssl,
)
hass.data[PLEX_DOMAIN][WEBSOCKETS][server_id] = websocket
def start_websocket_session(platform, _):
hass.data[PLEX_DOMAIN][PLATFORMS_COMPLETED][server_id].add(platform)
if hass.data[PLEX_DOMAIN][PLATFORMS_COMPLETED][server_id] == PLATFORMS:
hass.loop.create_task(websocket.listen())
def close_websocket_session(_):
websocket.close()
unsub = hass.bus.async_listen_once(
EVENT_HOMEASSISTANT_STOP, close_websocket_session
)
hass.data[PLEX_DOMAIN][DISPATCHERS][server_id].append(unsub)
for platform in PLATFORMS:
task = hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, platform)
)
task.add_done_callback(partial(start_websocket_session, platform))
async_cleanup_plex_devices(hass, entry)
def get_plex_account(plex_server):
try:
return plex_server.account
except (plexapi.exceptions.BadRequest, plexapi.exceptions.Unauthorized):
return None
await hass.async_add_executor_job(get_plex_account, plex_server)
return True
async def async_unload_entry(hass, entry):
"""Unload a config entry."""
server_id = entry.data[CONF_SERVER_IDENTIFIER]
websocket = hass.data[PLEX_DOMAIN][WEBSOCKETS].pop(server_id)
websocket.close()
dispatchers = hass.data[PLEX_DOMAIN][DISPATCHERS].pop(server_id)
for unsub in dispatchers:
unsub()
tasks = [
hass.config_entries.async_forward_entry_unload(entry, platform)
for platform in PLATFORMS
]
await asyncio.gather(*tasks)
hass.data[PLEX_DOMAIN][SERVERS].pop(server_id)
return True
async def async_options_updated(hass, entry):
"""Triggered by config entry options updates."""
server_id = entry.data[CONF_SERVER_IDENTIFIER]
# Guard incomplete setup during reauth flows
if server_id in hass.data[PLEX_DOMAIN][SERVERS]:
hass.data[PLEX_DOMAIN][SERVERS][server_id].options = entry.options
@callback
def async_cleanup_plex_devices(hass, entry):
"""Clean up old and invalid devices from the registry."""
device_registry = dev_reg.async_get(hass)
entity_registry = ent_reg.async_get(hass)
device_entries = hass.helpers.device_registry.async_entries_for_config_entry(
device_registry, entry.entry_id
)
for device_entry in device_entries:
if (
len(
hass.helpers.entity_registry.async_entries_for_device(
entity_registry, device_entry.id, include_disabled_entities=True
)
)
== 0
):
_LOGGER.debug(
"Removing orphaned device: %s / %s",
device_entry.name,
device_entry.identifiers,
)
device_registry.async_remove_device(device_entry.id)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.