gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import hashlib
import itertools
from oslo_config import cfg
from oslo_serialization import jsonutils
import six
import yaql
from yaql.language import exceptions
from heat.common import exception
from heat.common.i18n import _
from heat.engine import attributes
from heat.engine.cfn import functions as cfn_funcs
from heat.engine import function
opts = [
cfg.IntOpt('limit_iterators',
default=200,
help=_('The maximum number of elements in collection '
'expression can take for its evaluation.')),
cfg.IntOpt('memory_quota',
default=10000,
help=_('The maximum size of memory in bytes that '
'expression can take for its evaluation.'))
]
cfg.CONF.register_opts(opts, group='yaql')
class GetParam(function.Function):
"""A function for resolving parameter references.
Takes the form::
get_param: <param_name>
or::
get_param:
- <param_name>
- <path1>
- ...
"""
def __init__(self, stack, fn_name, args):
super(GetParam, self).__init__(stack, fn_name, args)
self.parameters = self.stack.parameters
def result(self):
args = function.resolve(self.args)
if not args:
raise ValueError(_('Function "%s" must have arguments') %
self.fn_name)
if isinstance(args, six.string_types):
param_name = args
path_components = []
elif isinstance(args, collections.Sequence):
param_name = args[0]
path_components = args[1:]
else:
raise TypeError(_('Argument to "%s" must be string or list') %
self.fn_name)
if not isinstance(param_name, six.string_types):
raise TypeError(_('Parameter name in "%s" must be string') %
self.fn_name)
try:
parameter = self.parameters[param_name]
except KeyError:
raise exception.UserParameterMissing(key=param_name)
def get_path_component(collection, key):
if not isinstance(collection, (collections.Mapping,
collections.Sequence)):
raise TypeError(_('"%s" can\'t traverse path') % self.fn_name)
if not isinstance(key, (six.string_types, int)):
raise TypeError(_('Path components in "%s" '
'must be strings') % self.fn_name)
if isinstance(collection, collections.Sequence
) and isinstance(key, six.string_types):
try:
key = int(key)
except ValueError:
raise TypeError(_("Path components in '%s' "
"must be a string that can be "
"parsed into an "
"integer.") % self.fn_name)
return collection[key]
try:
return six.moves.reduce(get_path_component, path_components,
parameter)
except (KeyError, IndexError, TypeError):
return ''
class GetAttThenSelect(cfn_funcs.GetAtt):
"""A function for resolving resource attributes.
Takes the form::
get_attr:
- <resource_name>
- <attribute_name>
- <path1>
- ...
"""
def _parse_args(self):
if (not isinstance(self.args, collections.Sequence) or
isinstance(self.args, six.string_types)):
raise TypeError(_('Argument to "%s" must be a list') %
self.fn_name)
if len(self.args) < 2:
raise ValueError(_('Arguments to "%s" must be of the form '
'[resource_name, attribute, (path), ...]') %
self.fn_name)
self._path_components = self.args[2:]
return tuple(self.args[:2])
def result(self):
attribute = super(GetAttThenSelect, self).result()
if attribute is None:
return None
path_components = function.resolve(self._path_components)
return attributes.select_from_attribute(attribute, path_components)
def dep_attrs(self, resource_name):
if self._resource().name == resource_name:
path = function.resolve(self._path_components)
attr = [function.resolve(self._attribute)]
if path:
attrs = [tuple(attr + path)]
else:
attrs = attr
else:
attrs = []
return itertools.chain(function.dep_attrs(self.args, resource_name),
attrs)
class GetAtt(GetAttThenSelect):
"""A function for resolving resource attributes.
Takes the form::
get_attr:
- <resource_name>
- <attribute_name>
- <path1>
- ...
"""
def result(self):
path_components = function.resolve(self._path_components)
attribute = function.resolve(self._attribute)
r = self._resource()
if (r.status in (r.IN_PROGRESS, r.COMPLETE) and
r.action in (r.CREATE, r.ADOPT, r.SUSPEND, r.RESUME,
r.UPDATE, r.CHECK, r.SNAPSHOT)):
return r.FnGetAtt(attribute, *path_components)
else:
return None
class GetAttAllAttributes(GetAtt):
"""A function for resolving resource attributes.
Takes the form::
get_attr:
- <resource_name>
- <attributes_name>
- <path1>
- ...
where <attributes_name> and <path1>, ... are optional arguments. If there
is no <attributes_name>, result will be dict of all resource's attributes.
Else function returns resolved resource's attribute.
"""
def _parse_args(self):
if not self.args:
raise ValueError(_('Arguments to "%s" can be of the next '
'forms: [resource_name] or '
'[resource_name, attribute, (path), ...]'
) % self.fn_name)
elif isinstance(self.args, collections.Sequence):
if len(self.args) > 1:
return super(GetAttAllAttributes, self)._parse_args()
else:
return self.args[0], None
else:
raise TypeError(_('Argument to "%s" must be a list') %
self.fn_name)
def dep_attrs(self, resource_name):
"""Check if there is no attribute_name defined, return empty chain."""
if self._attribute is not None:
return super(GetAttAllAttributes, self).dep_attrs(resource_name)
elif self._resource().name == resource_name:
res = self._resource()
attrs = six.iterkeys(res.attributes_schema)
else:
attrs = []
return itertools.chain(function.dep_attrs(self.args,
resource_name), attrs)
def result(self):
if self._attribute is None:
r = self._resource()
if (r.status in (r.IN_PROGRESS, r.COMPLETE) and
r.action in (r.CREATE, r.ADOPT, r.SUSPEND, r.RESUME,
r.UPDATE, r.CHECK, r.SNAPSHOT)):
return r.FnGetAtts()
else:
return None
else:
return super(GetAttAllAttributes, self).result()
def _allow_without_attribute_name(self):
return True
class Replace(cfn_funcs.Replace):
"""A function for performing string substitutions.
Takes the form::
str_replace:
template: <key_1> <key_2>
params:
<key_1>: <value_1>
<key_2>: <value_2>
...
And resolves to::
"<value_1> <value_2>"
This is implemented using Python's str.replace on each key. The order in
which replacements are performed is undefined.
"""
def _parse_args(self):
if not isinstance(self.args, collections.Mapping):
raise TypeError(_('Arguments to "%s" must be a map') %
self.fn_name)
try:
mapping = self.args['params']
string = self.args['template']
except (KeyError, TypeError):
example = ('''str_replace:
template: This is var1 template var2
params:
var1: a
var2: string''')
raise KeyError(_('"str_replace" syntax should be %s') %
example)
else:
return mapping, string
class ReplaceJson(Replace):
"""A function for performing string substitutions.
Behaves the same as Replace, but tolerates non-string parameter
values, e.g map/list - these are serialized as json before doing
the string substitution.
"""
def result(self):
template = function.resolve(self._string)
mapping = function.resolve(self._mapping)
if not isinstance(template, six.string_types):
raise TypeError(_('"%s" template must be a string') % self.fn_name)
if not isinstance(mapping, collections.Mapping):
raise TypeError(_('"%s" params must be a map') % self.fn_name)
def replace(string, change):
placeholder, value = change
if not isinstance(placeholder, six.string_types):
raise TypeError(_('"%s" param placeholders must be strings') %
self.fn_name)
if value is None:
value = ''
if not isinstance(value,
(six.string_types, six.integer_types,
float, bool)):
if isinstance(value,
(collections.Mapping, collections.Sequence)):
try:
value = jsonutils.dumps(value, default=None)
except TypeError:
raise TypeError(_('"%(name)s" params must be strings, '
'numbers, list or map. '
'Failed to json serialize %(value)s'
) % {'name': self.fn_name,
'value': value})
else:
raise TypeError(_('"%s" params must be strings, numbers, '
'list or map.') % self.fn_name)
return string.replace(placeholder, six.text_type(value))
return six.moves.reduce(replace, six.iteritems(mapping), template)
class GetFile(function.Function):
"""A function for including a file inline.
Takes the form::
get_file: <file_key>
And resolves to the content stored in the files dictionary under the given
key.
"""
def __init__(self, stack, fn_name, args):
super(GetFile, self).__init__(stack, fn_name, args)
self.files = self.stack.t.files
def result(self):
args = function.resolve(self.args)
if not (isinstance(args, six.string_types)):
raise TypeError(_('Argument to "%s" must be a string') %
self.fn_name)
f = self.files.get(args)
if f is None:
fmt_data = {'fn_name': self.fn_name,
'file_key': args}
raise ValueError(_('No content found in the "files" section for '
'%(fn_name)s path: %(file_key)s') % fmt_data)
return f
class Join(cfn_funcs.Join):
"""A function for joining strings.
Takes the form::
{ "list_join" : [ "<delim>", [ "<string_1>", "<string_2>", ... ] ] }
And resolves to::
"<string_1><delim><string_2><delim>..."
"""
class JoinMultiple(function.Function):
"""A function for joining one or more lists of strings.
Takes the form::
{ "list_join" : [ "<delim>", [ "<string_1>", "<string_2>", ... ] ] }
And resolves to::
"<string_1><delim><string_2><delim>..."
Optionally multiple lists may be specified, which will also be joined.
"""
def __init__(self, stack, fn_name, args):
super(JoinMultiple, self).__init__(stack, fn_name, args)
example = '"%s" : [ " ", [ "str1", "str2"] ...]' % fn_name
fmt_data = {'fn_name': fn_name,
'example': example}
if not isinstance(args, list):
raise TypeError(_('Incorrect arguments to "%(fn_name)s" '
'should be: %(example)s') % fmt_data)
try:
self._delim = args[0]
self._joinlists = args[1:]
if len(self._joinlists) < 1:
raise ValueError
except (IndexError, ValueError):
raise ValueError(_('Incorrect arguments to "%(fn_name)s" '
'should be: %(example)s') % fmt_data)
def result(self):
r_joinlists = function.resolve(self._joinlists)
strings = []
for jl in r_joinlists:
if jl:
if (isinstance(jl, six.string_types) or
not isinstance(jl, collections.Sequence)):
raise TypeError(_('"%s" must operate on '
'a list') % self.fn_name)
strings += jl
delim = function.resolve(self._delim)
if not isinstance(delim, six.string_types):
raise TypeError(_('"%s" delimiter must be a string') %
self.fn_name)
def ensure_string(s):
msg = _('Items to join must be string, map or list not %s'
) % (repr(s)[:200])
if s is None:
return ''
elif isinstance(s, six.string_types):
return s
elif isinstance(s, (collections.Mapping, collections.Sequence)):
try:
return jsonutils.dumps(s, default=None)
except TypeError:
msg = _('Items to join must be string, map or list. '
'%s failed json serialization'
) % (repr(s)[:200])
raise TypeError(msg)
return delim.join(ensure_string(s) for s in strings)
class MapMerge(function.Function):
"""A function for merging maps.
Takes the form::
{ "map_merge" : [{'k1': 'v1', 'k2': 'v2'}, {'k1': 'v2'}] }
And resolves to::
{'k1': 'v2', 'k2': 'v2'}
"""
def __init__(self, stack, fn_name, args):
super(MapMerge, self).__init__(stack, fn_name, args)
example = (_('"%s" : [ { "key1": "val1" }, { "key2": "val2" } ]')
% fn_name)
self.fmt_data = {'fn_name': fn_name, 'example': example}
def result(self):
args = function.resolve(self.args)
if not isinstance(args, collections.Sequence):
raise TypeError(_('Incorrect arguments to "%(fn_name)s" '
'should be: %(example)s') % self.fmt_data)
def ensure_map(m):
if m is None:
return {}
elif isinstance(m, collections.Mapping):
return m
else:
msg = _('Incorrect arguments: Items to merge must be maps.')
raise TypeError(msg)
ret_map = {}
for m in args:
ret_map.update(ensure_map(m))
return ret_map
class ResourceFacade(cfn_funcs.ResourceFacade):
"""A function for retrieving data in a parent provider template.
A function for obtaining data from the facade resource from within the
corresponding provider template.
Takes the form::
resource_facade: <attribute_type>
where the valid attribute types are "metadata", "deletion_policy" and
"update_policy".
"""
_RESOURCE_ATTRIBUTES = (
METADATA, DELETION_POLICY, UPDATE_POLICY,
) = (
'metadata', 'deletion_policy', 'update_policy'
)
class Removed(function.Function):
"""This function existed in previous versions of HOT, but has been removed.
Check the HOT guide for an equivalent native function.
"""
def validate(self):
exp = (_("The function %s is not supported in this version of HOT.") %
self.fn_name)
raise exception.InvalidTemplateVersion(explanation=exp)
def result(self):
return super(Removed, self).result()
class Repeat(function.Function):
"""A function for iterating over a list of items.
Takes the form::
repeat:
template:
<body>
for_each:
<var>: <list>
The result is a new list of the same size as <list>, where each element
is a copy of <body> with any occurrences of <var> replaced with the
corresponding item of <list>.
"""
def __init__(self, stack, fn_name, args):
super(Repeat, self).__init__(stack, fn_name, args)
if not isinstance(self.args, collections.Mapping):
raise TypeError(_('Arguments to "%s" must be a map') %
self.fn_name)
# We don't check for invalid keys appearing here, which is wrong but
# it's probably too late to change
try:
self._for_each = self.args['for_each']
self._template = self.args['template']
except KeyError:
example = ('''repeat:
template: This is %var%
for_each:
%var%: ['a', 'b', 'c']''')
raise KeyError(_('"repeat" syntax should be %s') % example)
def validate(self):
super(Repeat, self).validate()
if not isinstance(self._for_each, function.Function):
if not isinstance(self._for_each, collections.Mapping):
raise TypeError(_('The "for_each" argument to "%s" must '
'contain a map') % self.fn_name)
if not all(self._valid_list(v) for v in self._for_each.values()):
raise TypeError(_('The values of the "for_each" argument '
'to "%s" must be lists') % self.fn_name)
@staticmethod
def _valid_list(arg):
return (isinstance(arg, (collections.Sequence,
function.Function)) and
not isinstance(arg, six.string_types))
def _do_replacement(self, keys, values, template):
if isinstance(template, six.string_types):
for (key, value) in zip(keys, values):
template = template.replace(key, value)
return template
elif isinstance(template, collections.Sequence):
return [self._do_replacement(keys, values, elem)
for elem in template]
elif isinstance(template, collections.Mapping):
return dict((self._do_replacement(keys, values, k),
self._do_replacement(keys, values, v))
for (k, v) in template.items())
def result(self):
for_each = function.resolve(self._for_each)
if not all(self._valid_list(l) for l in for_each.values()):
raise TypeError(_('The values of the "for_each" argument to '
'"%s" must be lists') % self.fn_name)
template = function.resolve(self._template)
keys, lists = six.moves.zip(*for_each.items())
return [self._do_replacement(keys, replacements, template)
for replacements in itertools.product(*lists)]
class Digest(function.Function):
"""A function for performing digest operations.
Takes the form::
digest:
- <algorithm>
- <value>
Valid algorithms are the ones provided by natively by hashlib (md5, sha1,
sha224, sha256, sha384, and sha512) or any one provided by OpenSSL.
"""
def validate_usage(self, args):
if not (isinstance(args, list) and
all([isinstance(a, six.string_types) for a in args])):
msg = _('Argument to function "%s" must be a list of strings')
raise TypeError(msg % self.fn_name)
if len(args) != 2:
msg = _('Function "%s" usage: ["<algorithm>", "<value>"]')
raise ValueError(msg % self.fn_name)
if six.PY3:
algorithms = hashlib.algorithms_available
else:
algorithms = hashlib.algorithms
if args[0].lower() not in algorithms:
msg = _('Algorithm must be one of %s')
raise ValueError(msg % six.text_type(algorithms))
def digest(self, algorithm, value):
_hash = hashlib.new(algorithm)
_hash.update(six.b(value))
return _hash.hexdigest()
def result(self):
args = function.resolve(self.args)
self.validate_usage(args)
return self.digest(*args)
class StrSplit(function.Function):
"""A function for splitting delimited strings into a list.
Optionally extracting a specific list member by index.
Takes the form::
str_split: [delimiter, string, <index> ]
or::
str_split:
- delimiter
- string
- <index>
If <index> is specified, the specified list item will be returned
otherwise, the whole list is returned, similar to get_attr with
path based attributes accessing lists.
"""
def __init__(self, stack, fn_name, args):
super(StrSplit, self).__init__(stack, fn_name, args)
example = '"%s" : [ ",", "apples,pears", <index>]' % fn_name
self.fmt_data = {'fn_name': fn_name,
'example': example}
self.fn_name = fn_name
if isinstance(args, (six.string_types, collections.Mapping)):
raise TypeError(_('Incorrect arguments to "%(fn_name)s" '
'should be: %(example)s') % self.fmt_data)
def result(self):
args = function.resolve(self.args)
try:
delim = args.pop(0)
str_to_split = args.pop(0)
except (AttributeError, IndexError):
raise ValueError(_('Incorrect arguments to "%(fn_name)s" '
'should be: %(example)s') % self.fmt_data)
split_list = str_to_split.split(delim)
# Optionally allow an index to be specified
if args:
try:
index = int(args.pop(0))
except ValueError:
raise ValueError(_('Incorrect index to "%(fn_name)s" '
'should be: %(example)s') % self.fmt_data)
else:
try:
res = split_list[index]
except IndexError:
raise ValueError(_('Incorrect index to "%(fn_name)s" '
'should be between 0 and '
'%(max_index)s')
% {'fn_name': self.fn_name,
'max_index': len(split_list) - 1})
else:
res = split_list
return res
class Yaql(function.Function):
"""A function for executing a yaql expression.
Takes the form::
yaql:
expression:
<body>
data:
<var>: <list>
Evaluates expression <body> on the given data.
"""
_parser = None
@classmethod
def get_yaql_parser(cls):
if cls._parser is None:
global_options = {
'yaql.limitIterators': cfg.CONF.yaql.limit_iterators,
'yaql.memoryQuota': cfg.CONF.yaql.memory_quota
}
cls._parser = yaql.YaqlFactory().create(global_options)
return cls._parser
def __init__(self, stack, fn_name, args):
super(Yaql, self).__init__(stack, fn_name, args)
self.parser = self.get_yaql_parser()
self.context = yaql.create_context()
if not isinstance(self.args, collections.Mapping):
raise TypeError(_('Arguments to "%s" must be a map.') %
self.fn_name)
try:
self._expression = self.args['expression']
self._data = self.args.get('data', {})
for arg in six.iterkeys(self.args):
if arg not in ['expression', 'data']:
raise KeyError
except (KeyError, TypeError):
example = ('''%s:
expression: $.data.var1.sum()
data:
var1: [3, 2, 1]''') % self.fn_name
raise KeyError(_('"%(name)s" syntax should be %(example)s') % {
'name': self.fn_name, 'example': example})
def validate_expression(self, expression):
try:
self.parser(expression)
except exceptions.YaqlException as yex:
raise ValueError(_('Bad expression %s.') % yex)
def validate(self):
super(Yaql, self).validate()
if not isinstance(self._data,
(collections.Mapping, function.Function)):
raise TypeError(_('The "data" argument to "%s" must contain '
'a map.') % self.fn_name)
if not isinstance(self._expression,
(six.string_types, function.Function)):
raise TypeError(_('The "expression" argument to %s must '
'contain a string or a '
'function.') % self.fn_name)
if isinstance(self._expression, six.string_types):
self.validate_expression(self._expression)
def result(self):
data = function.resolve(self._data)
if not isinstance(data, collections.Mapping):
raise TypeError(_('The "data" argument to "%s" must contain '
'a map.') % self.fn_name)
ctxt = {'data': data}
self.context['$'] = ctxt
if not isinstance(self._expression, six.string_types):
self._expression = function.resolve(self._expression)
self.validate_expression(self._expression)
return self.parser(self._expression).evaluate(context=self.context)
|
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
import librosa
import numpy as np
from scipy.spatial.distance import cdist
import pytest
from test_core import srand
@pytest.mark.xfail(raises=librosa.ParameterError)
def test_1d_input():
X = np.array([[1], [3], [3], [8], [1]])
Y = np.array([[2], [0], [0], [8], [7], [2]])
librosa.sequence.dtw(X=X, Y=Y)
def test_dtw_global():
# Example taken from:
# Meinard Mueller, Fundamentals of Music Processing
X = np.array([[1, 3, 3, 8, 1]])
Y = np.array([[2, 0, 0, 8, 7, 2]])
gt_D = np.array([[1., 2., 3., 10., 16., 17.],
[2., 4., 5., 8., 12., 13.],
[3., 5., 7., 10., 12., 13.],
[9., 11., 13., 7., 8., 14.],
[10, 10., 11., 14., 13., 9.]])
mut_D, _ = librosa.sequence.dtw(X, Y)
assert np.array_equal(gt_D, mut_D)
# Check that it works without backtracking
mut_D2 = librosa.sequence.dtw(X, Y, backtrack=False)
assert np.array_equal(mut_D, mut_D2)
def test_dtw_global_constrained():
# Example taken from:
# Meinard Mueller, Fundamentals of Music Processing
X = np.array([[1, 3, 3, 8, 1]])
Y = np.array([[2, 0, 0, 8, 7, 2]])
# With band_rad = 0.5, the GT distance array is
gt_D = np.array([[1., 2., 3., np.inf, np.inf, np.inf],
[2., 4., 5., 8., np.inf, np.inf],
[np.inf, 5., 7., 10., 12., np.inf],
[np.inf, np.inf, 13., 7., 8., 14.],
[np.inf, np.inf, np.inf, 14., 13., 9.]])
mut_D = librosa.sequence.dtw(X, Y, backtrack=False,
global_constraints=True, band_rad=0.5)
assert np.array_equal(gt_D, mut_D)
def test_dtw_global_supplied_distance_matrix():
# Example taken from:
# Meinard Mueller, Fundamentals of Music Processing
X = np.array([[1, 3, 3, 8, 1]])
Y = np.array([[2, 0, 0, 8, 7, 2]])
# Precompute distance matrix.
C = cdist(X.T, Y.T, metric='euclidean')
gt_D = np.array([[1., 2., 3., 10., 16., 17.],
[2., 4., 5., 8., 12., 13.],
[3., 5., 7., 10., 12., 13.],
[9., 11., 13., 7., 8., 14.],
[10, 10., 11., 14., 13., 9.]])
# Supply precomputed distance matrix and specify an invalid distance
# metric to verify that it isn't used.
mut_D, _ = librosa.sequence.dtw(C=C, metric='invalid')
assert np.array_equal(gt_D, mut_D)
def test_dtw_gobal_boundary():
# Verify that boundary condition is fulfilled for subseq=False.
# See https://github.com/librosa/librosa/pull/920
X = np.array([1, 2, 3, 4, 5])
Y = np.array([1, 1, 1, 2, 4, 5, 6, 5, 5])
gt_wp = np.array([[0, 0], [0, 1], [0, 2], [1, 3], [2, 3], [3, 4], [4, 5],
[4, 6], [4, 7], [4, 8]])
D, wp = librosa.sequence.dtw(X, Y, subseq=False)
wp = wp[::-1]
assert np.array_equal(gt_wp, wp)
def test_dtw_subseq_boundary():
# Verify that boundary condition doesn't have to be fulfilled for
# subseq=True.
# See https://github.com/librosa/librosa/pull/920
X = np.array([1, 2, 3, 4, 5])
Y = np.array([1, 1, 1, 2, 4, 5, 6, 5, 5])
gt_wp = np.array([[0, 2], [1, 3], [2, 3], [3, 4], [4, 5]])
D, wp = librosa.sequence.dtw(X, Y, subseq=True)
wp = wp[::-1]
assert np.array_equal(gt_wp, wp)
@pytest.mark.xfail(raises=librosa.ParameterError)
def test_dtw_incompatible_args_01():
librosa.sequence.dtw(C=1, X=1, Y=1)
@pytest.mark.xfail(raises=librosa.ParameterError)
def test_dtw_incompatible_args_02():
librosa.sequence.dtw(C=None, X=None, Y=None)
@pytest.mark.xfail(raises=librosa.ParameterError)
def test_dtw_incompatible_sigma_add():
X = np.array([[1, 3, 3, 8, 1]])
Y = np.array([[2, 0, 0, 8, 7, 2]])
librosa.sequence.dtw(X=X, Y=Y, weights_add=np.arange(10))
@pytest.mark.xfail(raises=librosa.ParameterError)
def test_dtw_incompatible_sigma_mul():
X = np.array([[1, 3, 3, 8, 1]])
Y = np.array([[2, 0, 0, 8, 7, 2]])
librosa.sequence.dtw(X=X, Y=Y, weights_mul=np.arange(10))
@pytest.mark.xfail(raises=librosa.ParameterError)
def test_dtw_incompatible_sigma_diag():
X = np.array([[1, 3, 3, 8, 1, 2]])
Y = np.array([[2, 0, 0, 8, 7]])
librosa.sequence.dtw(X=X, Y=Y, step_sizes_sigma=np.ones((1, 2), dtype=int))
def test_dtw_global_diagonal():
# query is a linear ramp
X = np.linspace(0.1, 1, 10)
Y = X
gt_wp = list(zip(list(range(10)), list(range(10))))[::-1]
mut_D, mut_wp = librosa.sequence.dtw(X, Y, subseq=True, metric='cosine',
step_sizes_sigma=np.array([[1, 1]]),
weights_mul=np.array([1, ]))
assert np.array_equal(np.asarray(gt_wp), np.asarray(mut_wp))
def test_dtw_subseq():
srand()
# query is a linear ramp
X = np.linspace(0, 1, 100)
# database is query surrounded by noise
noise_len = 200
noise = np.random.rand(noise_len)
Y = np.concatenate((noise, noise, X, noise))
_, mut_wp = librosa.sequence.dtw(X, Y, subseq=True)
# estimated sequence has to match original sequence
# note the +1 due to python indexing
mut_X = Y[mut_wp[-1][1]:mut_wp[0][1] + 1]
assert np.array_equal(X, mut_X)
def test_dtw_subseq_supplied_distance_matrix():
X = np.array([[0], [1], [2]])
Y = np.array([[1], [2], [3], [4]])
C = cdist(X, Y)
costs0, path0 = librosa.sequence.dtw(X.T, Y.T, subseq=True)
costs1, path1 = librosa.sequence.dtw(C=C, subseq=True)
assert np.array_equal(costs0, costs1)
assert np.array_equal(path0, path1)
def test_dtw_subseq_sym():
Y = np.array([10., 10., 0., 1., 2., 3., 10., 10.])
X = np.arange(4)
gt_wp_XY = np.array([[3, 5], [2, 4], [1, 3], [0, 2]])
gt_wp_YX = np.array([[5, 3], [4, 2], [3, 1], [2, 0]])
_, mut_wp_XY = librosa.sequence.dtw(X, Y, subseq=True)
_, mut_wp_YX = librosa.sequence.dtw(Y, X, subseq=True)
assert np.array_equal(gt_wp_XY, mut_wp_XY)
assert np.array_equal(gt_wp_YX, mut_wp_YX)
def test_dtw_global_constraint_destructive():
# Issue #1029, dtw with global constraints inserts nans
# into the cost matrix. This is fine when the cost is computed
# locally, but if passed by reference, it's destructive.
# This test checks that the cost matrix is unmodified.
C1 = np.ones((20, 20))
C2 = np.copy(C1)
librosa.sequence.dtw(C=C1, global_constraints=True)
assert np.array_equal(C1, C2)
def test_dtw_global_inf():
# What should we do if backtracking fails in full sequence mode?
# This will happen if the inner loop of bt aborts prematurely
# by walking off the edge of the cost array instead of
# path-following to (0, 0)
# Construct a cost matrix where full alignment is impossible
C = np.zeros((4, 4), dtype=np.float)
C[-1, -1] = np.inf
with pytest.raises(librosa.ParameterError):
librosa.sequence.dtw(C=C, subseq=False)
def test_dtw_subseq_inf():
# Construct a cost matrix where partial alignment is impossible
C = np.zeros((4, 4), dtype=np.float)
C[-1, :] = np.inf
with pytest.raises(librosa.ParameterError):
librosa.sequence.dtw(C=C, subseq=True)
def test_dtw_subseq_pass():
# Construct a cost matrix where partial alignment is possible
C = np.zeros((4, 4), dtype=np.float)
C[-1, 2:] = np.inf
librosa.sequence.dtw(C=C, subseq=True)
@pytest.mark.xfail(raises=librosa.ParameterError)
def test_dtw_nan_fail():
C = np.ones((10, 10))
C[4, 6] = np.nan
librosa.sequence.dtw(C=C)
@pytest.mark.xfail(raises=librosa.ParameterError)
@pytest.mark.parametrize('steps', [np.array([[1, -1]]), np.array([[-1, 1]]), np.array([[-1, -1]])])
def test_dtw_negative_steps(steps):
C = np.ones((10, 10))
librosa.sequence.dtw(C=C, step_sizes_sigma=steps)
|
|
# Copyright 2015, Pinterest, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Logic handling job execution.
In a typical case, each job class has its own executor.
"""
import abc
import atexit
import os
import select
import signal
import subprocess
import threading
import time
import traceback
from pinball.config.pinball_config import PinballConfig
from pinball.config.utils import get_log
from pinball.config.utils import timestamp_to_str
from pinball.persistence.token_data import TokenData
from pinball.workflow import log_saver
from pinball.workflow.job import ShellConditionJob
from pinball.workflow.job import ShellJob
from pinball.workflow.buffered_line_reader import BufferedLineReader
from pinball.workflow.utils import get_logs_dir
__author__ = 'Pawel Garbacki, Mao Ye'
__copyright__ = 'Copyright 2015, Pinterest, Inc.'
__credits__ = [__author__]
__license__ = 'Apache'
__version__ = '2.0'
LOG = get_log('pinball.workflow.job_executor')
class ExecutionRecord(TokenData):
"""A data object holding information about a single job execution."""
def __init__(self, info=None, instance=None, start_time=None,
end_time=None, exit_code=None, logs=None):
self.info = info
self.instance = instance
self.start_time = start_time
self.end_time = end_time
self.exit_code = exit_code
self.events = []
# TODO(pawel): rename this to attributes for naming consistency.
self.properties = {}
self.cleanup_exit_code = None
# Setting logs={} in the argument list is a bad idea.
# See http://effbot.org/zone/default-values.htm for explanation.
self.logs = logs if logs is not None else {}
@property
def _COMPATIBILITY_ATTRIBUTES(self):
result = super(ExecutionRecord, self)._COMPATIBILITY_ATTRIBUTES
result['properties'] = {}
result['instance'] = None
result['cleanup_exit_code'] = None
result['events'] = []
return result
def get_event_attributes(self):
ATTRIBUTE_PREFIX = 'EVENT_ATTR:'
result = {}
for key, value in self.properties.items():
if key.startswith(ATTRIBUTE_PREFIX):
result[key.split(':', 1)[1]] = value
return result
def __str__(self):
if self.start_time:
start_time = timestamp_to_str(self.start_time)
else:
start_time = str(self.start_time)
if self.end_time:
end_time = timestamp_to_str(self.end_time)
else:
end_time = str(self.end_time)
return ('ExecutionRecord(info=%s, instance=%s, start_time=%s, '
'end_time=%s, exit_code=%s, events=%s, properties=%s, '
'logs=%s)' %
(self.info, self.instance, start_time, end_time,
self.exit_code, self.events, self.properties, self.logs))
def __repr__(self):
return self.__str__()
class JobExecutor(object):
"""Interface of a client communicating with token master."""
__metaclass__ = abc.ABCMeta
def __init__(self, workflow, instance, job_name, job, data_builder,
emailer):
self._workflow = workflow
self._instance = instance
self._job_name = job_name
self.job = job
self._data_builder = data_builder
self._emailer = emailer
# A map from log type to the log saver storing job output of this type.
self._log_savers = {}
_cleaners = set()
@staticmethod
@atexit.register
def _call_cleaners():
for cleaner in JobExecutor._cleaners:
cleaner()
@staticmethod
def from_job(workflow, instance, job_name, job, data_builder, emailer):
"""Create an executor capable of running a given job."""
if type(job) == ShellJob or type(job) == ShellConditionJob:
return ShellJobExecutor(workflow, instance, job_name, job,
data_builder, emailer)
@abc.abstractmethod
def prepare(self):
return
@abc.abstractmethod
def execute(self):
return
@abc.abstractmethod
def abort(self):
return
class ShellJobExecutor(JobExecutor):
def __init__(self, workflow, instance, job_name, job, data_builder,
emailer):
super(ShellJobExecutor, self).__init__(workflow, instance, job_name,
job, data_builder, emailer)
# Indicates if job data has not been recorded in the master.
self.job_dirty = False
self._process = None
self._aborted = False
self._abort_timeout_reached = False
self._warn_timeout_reached = False
self._lock = threading.Lock()
self._log_pipe_readers = {}
def _get_logs_dir(self, log_directory):
"""Generate name of directory where job logs are stored.
Returns:
Name of the job logs directory.
"""
return get_logs_dir(self._workflow, self._instance, log_directory)
def _get_log_filename(self, log_type, timestamp):
"""Generate log file name.
Args:
log_type: Type of the log stored in the file.
timestamp: The execution timestamp.
"""
# TODO(pawel): the file name should contain more context, in particular
# the workflow and instance names.
filename = '%s.%d.%s' % (self.job.name, timestamp, log_type)
log_directory = PinballConfig.S3_LOGS_DIR \
if PinballConfig.S3_LOGS_DIR else PinballConfig.LOCAL_LOGS_DIR
return os.path.join(self._get_logs_dir(log_directory), filename)
def _get_last_execution_record(self):
"""Retrieve the most recent job execution record.
Returns:
The most recent job execution record.
"""
assert self.job.history
return self.job.history[-1]
def _check_token_lost(self):
"""Check if the ownership of the job token has been lost.
Returns:
True iff the job token ownership has been lost.
"""
if not self.job.history:
return False
execution_record = self.job.history[-1]
assert execution_record.start_time
if not execution_record.end_time:
execution_record.end_time = time.time()
execution_record.exit_code = 1
message = 'executor failed to renew job ownership on time\n'
self._append_to_pinlog(message)
return True
return False
def prepare(self):
"""Prepare the execution.
As a side effect, this method appends an execution record to the job
history.
Returns:
True iff the preparations succeeded. If False, the job should not
be executed.
"""
if self._check_token_lost():
return False
execution_record = ExecutionRecord(instance=self._instance,
start_time=time.time())
self.job.history.append(execution_record)
self.job.truncate_history()
execution_record.events = self.job.events
self.job.events = []
if self.job.disabled:
execution_record.info = 'DISABLED'
return True
else:
execution_record.info = self.job.command
try:
logs_dir = self._get_logs_dir(PinballConfig.LOCAL_LOGS_DIR)
if not os.path.exists(logs_dir):
os.makedirs(logs_dir)
except:
LOG.exception('')
execution_record.end_time = time.time()
execution_record.exit_code = 1
return False
for log_type in ['stdout', 'stderr']:
execution_record.logs[log_type] = self._get_log_filename(
log_type, execution_record.start_time)
self._log_savers[log_type] = \
log_saver.FileLogSaver.from_path(
execution_record.logs[log_type])
self._log_savers[log_type].open()
return True
def _set_log_pipe_reader(self, process):
""" Sets up buffered line reader to read the log pipes
Args:
process: the process to set up the log piper reader for
"""
self._log_pipe_readers = {
process.stdout: BufferedLineReader(process.stdout),
process.stderr: BufferedLineReader(process.stderr)}
def _process_log_line(self, line):
"""Process a log line extracting properties.
Args:
line: The log line to process.
"""
# A magic value marking log lines with key=value pairs.
PINBALL_MAGIC = 'PINBALL:'
if line.startswith(PINBALL_MAGIC):
if not line.endswith('\n'):
LOG.error('PINBALL magic string is not properly terminated:')
LOG.error(line)
line = line.strip()
line = line[len(PINBALL_MAGIC):]
delimiter_index = line.find('=')
key, value = (line[:delimiter_index], line[delimiter_index + 1:])
if key:
execution_record = self._get_last_execution_record()
# Python guarantees that this operation is atomic so we don't
# need locks around it.
execution_record.properties[key] = value
self.job_dirty = True
def _consume_logs(self, process):
"""Process logs produced by the specified process.
Args:
fout: The file descriptor to write stdout to.
ferr: The file descriptor to write stderr to.
process: The process whose logs we want to consume.
Returns:
True iff any data was read.
"""
TIMEOUT_SEC = 60. # 1 minute
streams = []
if not process.stdout.closed:
streams.append(process.stdout)
if not process.stderr.closed:
streams.append(process.stderr)
if not streams:
return False
ready_to_read = select.select(streams,
[],
[],
TIMEOUT_SEC)[0]
if not ready_to_read:
LOG.info('select timeout reached while reading output of command '
'%s', self.job.command)
for source in ready_to_read:
lines = self._log_pipe_readers[source].readlines()
if self._log_pipe_readers[source].eof():
source.close()
for line in lines:
if source == process.stdout:
self._log_savers['stdout'].write(line)
else:
assert source == process.stderr
self._log_savers['stderr'].write(line)
self._process_log_line(line)
return ready_to_read != []
def _write_separator_to_logs(self, flag):
msg = '\n<<<<<<<<<<%s of cleanup code logs>>>>>>>>>>\n' % flag
for log_type in ['stdout', 'stderr']:
if log_type in self._log_savers:
self._log_savers[log_type].write(msg)
def _execute_cleanup(self):
execution_record = self._get_last_execution_record()
if not self.job.cleanup_template:
return None
try:
cleanup_command = (self.job.cleanup_template %
execution_record.properties)
except KeyError:
message = ('Could not customize cleanup command %s with '
'properties %s' % (self.job.cleanup_template,
execution_record.properties))
self._append_to_pinlog(message)
return 1
env = os.environ.copy()
env.pop('DJANGO_SETTINGS_MODULE', None)
cleanup_process = subprocess.Popen(
cleanup_command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True,
env=env,
preexec_fn=os.setsid)
self._set_log_pipe_reader(cleanup_process)
self._write_separator_to_logs('Start')
while cleanup_process.poll() is None:
self._consume_logs(cleanup_process)
# Check again to catch anything after the process
# exits.
while self._consume_logs(cleanup_process):
pass
self._write_separator_to_logs('End')
return cleanup_process.wait()
def _append_to_pinlog(self, message):
"""Append message to pinlog.
PINLOG means pinball log which contains log lines pinball itself
generates for a particular job.
Args:
message: The log message to append.
"""
execution_record = self._get_last_execution_record()
pinlog = execution_record.logs.get('pinlog')
try:
if not pinlog:
#TODO(Mao): Move the logic to create local dirs to log saver
logs_dir = self._get_logs_dir(PinballConfig.LOCAL_LOGS_DIR)
if not os.path.exists(logs_dir):
os.makedirs(logs_dir)
pinlog = self._get_log_filename('pinlog',
execution_record.start_time)
self._log_savers['pinlog'] = log_saver.FileLogSaver.from_path(
pinlog)
self._log_savers['pinlog'].open()
execution_record.logs['pinlog'] = pinlog
self._log_savers['pinlog'].write(message)
except:
LOG.exception('')
def _get_emails(self):
"""Get notification emails for the currently running job.
Returns:
List of job's notification email addresses.
"""
emails = set(self.job.emails)
schedule_data = self._data_builder.get_schedule(self._workflow)
if schedule_data:
emails.update(schedule_data.emails)
return list(emails)
def _check_timeouts(self):
"""Check if timeouts have been reached."""
if self._abort_timeout_reached:
return
execution_record = self._get_last_execution_record()
start_time = execution_record.start_time
now = time.time()
if (self.job.abort_timeout_sec and
start_time + self.job.abort_timeout_sec < now):
self._abort_timeout_reached = True
self.abort()
return
if (not self._warn_timeout_reached and self.job.warn_timeout_sec and
start_time + self.job.warn_timeout_sec < now):
self._warn_timeout_reached = True
emails = self._get_emails()
if not emails:
return
execution = len(self.job.history) - 1
job_execution_data = self._data_builder.get_execution(
self._workflow, self._instance, self._job_name, execution)
self._emailer.send_job_timeout_warning_message(emails,
job_execution_data)
def execute(self):
"""Execute the job.
It is assumed that the prepare method has been called and returned
True.
Returns:
True iff the execution succeeded.
"""
execution_record = self._get_last_execution_record()
assert not execution_record.end_time
if self.job.disabled:
execution_record.end_time = execution_record.start_time
execution_record.exit_code = 0
return True
try:
assert not self._process
with self._lock:
# We need the lock to prevent a situation where the job
# executes even though it got aborted (by a different
# thread).
aborted = self._aborted
if not aborted:
command = self.job.customize_command()
LOG.info('executing command: %s', command)
env = os.environ.copy()
# Pinball sets Django module path which may interfere
# with the command being executed.
env.pop('DJANGO_SETTINGS_MODULE', None)
# The os.setsid() is passed in the argument preexec_fn
# so it's run after the fork() and before exec() to
# run the shell. It attaches a session id of the child
# process to the parent process which is a shell in our
# case. This will make it the group leader. So when a
# signal is sent to the process group leader, it's
# transmitted to all of the child processes.
self._process = subprocess.Popen(
command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True,
env=env,
preexec_fn=os.setsid)
self._set_log_pipe_reader(self._process)
JobExecutor._cleaners.add(self.abort)
if aborted:
# TODO(pawel): we should have an explicit indicator that
# the job was aborted.
execution_record.exit_code = 1
else:
while self._process.poll() is None:
self._consume_logs(self._process)
self._check_timeouts()
# Check again to catch anything after the process
# exits.
while self._consume_logs(self._process):
pass
execution_record.exit_code = self._process.wait()
JobExecutor._cleaners.remove(self.abort)
with self._lock:
self._process = None
if execution_record.exit_code != 0:
execution_record.cleanup_exit_code = \
self._execute_cleanup()
except:
LOG.exception('')
execution_record.exit_code = 1
self._append_to_pinlog(traceback.format_exc())
finally:
execution_record.end_time = time.time()
# Make sure we've saved all the logs
try:
for log_type in self._log_savers:
self._log_savers[log_type].close()
except:
LOG.exception('')
return execution_record.exit_code == 0
def abort(self):
"""Abort the currently running job."""
with self._lock:
self._aborted = True
if self._process:
os.killpg(self._process.pid, signal.SIGKILL)
|
|
class Point:
def __init__(self, x=0, y=0):
self.x = x
self.y = y
class Block:
def __init__(self, ll, ur):
self.ll = ll
self.ur = ur
def is_overlap(self, p, scale=1):
if self.ll.x * scale <= p.x <= self.ur.x * scale and \
self.ll.y * scale <= p.y <= self.ur.y * scale:
return True
else:
return False
def is_line_intersect(self, line, scale=1):
x_len = abs(self.ur.x - self.ll.x)
y_len = abs(self.ur.y - self.ll.y)
corner1 = Point(self.ll.x, self.ll.y)
corner2 = Point(corner1.x + x_len, corner1.y)
corner3 = Point(corner1.x, corner1.y + y_len)
corner4 = Point(self.ur.x, self.ur.y)
ls = []
lines = [Line(corner1, corner2), Line(corner1, corner3),
Line(corner2, corner4), Line(corner3, corner4)]
for edge in lines:
x = line.is_line_intersect(edge, scale)
if x:
ls.append(x)
if ls == []:
return False
final = []
for x, y in ls:
final.append(((line.start.x * scale - x) ** 2 +
(line.start.y * scale - y) ** 2) ** 0.5)
return ls[final.index(min(final))], min(final)
class Floor(Block):
def __init__(self, ll, ur, color=(255, 255, 255)):
Block.__init__(self, ll, ur)
self._color = color
def get_color(self):
return self._color
def set_color(self, color):
self._color = color
color = property(get_color, set_color)
class Wall(Block):
def __init__(self, ll, ur, height=10):
Block.__init__(self, ll, ur)
self._height = height
def get_height(self):
return self._height
def set_height(self, height):
self._height = height
height = property(get_height, set_height)
class World:
def __init__(self, blocks, init_pos=None, init_heading=0):
self.blocks = blocks
self.init_heading = init_heading
if init_pos is not None:
self.init_pos = init_pos
else:
ll, ur = self.get_world_boundaries()
self.init_pos = Point(abs(ur.x - ll.x) // 2, abs(ur.y - ll.y) // 2)
def get_init_pos(self):
return self.init_pos
def get_init_heading(self):
return self.init_heading
def is_overlap(self, p, scale=1):
for b in self.blocks:
if b.is_overlap(p, scale) and isinstance(b, Wall):
return True
return False
def get_centre_world(self):
ll, ur = self.get_world_boundaries()
xc = abs(ur.x - ll.x) // 2
yc = abs(ur.y - ll.y) // 2
return Point(xc, yc)
def get_world_boundaries(self):
min_x = self.blocks[0].ll.x
min_y = self.blocks[0].ll.y
max_x = self.blocks[0].ll.x
max_y = self.blocks[0].ll.y
for b in self.blocks:
ll_x = b.ll.x
ll_y = b.ll.y
ur_x = b.ur.x
ur_y = b.ur.y
min_x = min(min_x, ll_x, ur_x)
min_y = min(min_y, ll_y, ur_y)
max_x = max(max_x, ll_x, ur_x)
max_y = max(max_y, ll_y, ur_y)
ll = Point(min_x, min_y)
ur = Point(max_x, max_y)
return ll, ur
def draw_world(self, t, scale=1):
for b in self.blocks:
self.draw_block(t, b)
def draw_block(self, t, b):
t.penup()
t.setposition(b.ll.x, b.ll.y)
t.pendown()
t.setx(b.ur.x)
t.sety(b.ur.y)
t.setx(b.ll.x)
t.sety(b.ll.y)
t.penup()
class PGWorld(World):
def draw_world(self, robot, scale=1):
for b in self.blocks:
self.draw_block(robot, b, scale)
def draw_block(self, robot, block, scale=1):
assert(block.ll.x <= block.ur.x and block.ll.y <= block.ur.y)
length = abs(float(block.ur.x) - float(block.ll.x)) * scale
width = abs(float(block.ur.y) - float(block.ll.y)) * scale
ul_x = block.ll.x * scale
ul_y = (robot.window.size[1] - block.ll.y * scale - width)
ll, ur = self.get_world_boundaries()
if isinstance(block, Floor):
robot.window.screen.fill(block.color,
rect=[ul_x, ul_y, length, width])
else:
robot.window.screen.fill([0, 0, 0],
rect=[ul_x, ul_y, length, width])
class Line:
def __init__(self, start, end):
self.start = start
self.end = end
if self.end.x != self.start.x:
self.grad = (self.end.y -
self.start.y) / (self.end.x - self.start.x)
self.c = self.start.y - self.grad * self.start.x
else:
self.grad = None
self.c = None
def is_line_intersect(self, line, scale=1):
eps = 1e-7
if self.grad is not None and line.grad is None:
y = (self.grad * line.start.x + self.c) * scale
x = line.start.x * scale
if (self.start.x * scale - eps <= x <= self.end.x * scale + eps or
self.start.x * scale + eps >= x >= self.end.x * scale - eps) and \
(line.start.y * scale - eps <= y <= line.end.y * scale + eps or
line.start.y * scale + eps >= y >= line.end.y * scale - eps):
return (x, y)
else:
return False
elif line.grad is not None and self.grad is None:
y = (line.grad * self.start.x + line.c)* scale
x = self.start.x * scale
if (line.start.x * scale - eps <= x <= line.end.x * scale + eps or
line.start.x * scale + eps >= x >= line.end.x * scale- eps) and \
(self.start.y * scale - eps <= y <= self.end.y * scale + eps or
self.start.y * scale + eps >= y >= self.end.y * scale - eps):
return (x, y)
else:
return False
if self.grad == line.grad:
return False
x = (self.c - line.c) / (line.grad - self.grad)
y = (self.grad * x + self.c)
x *= scale
y *= scale
if (self.start.x * scale - eps <= x <= self.end.x * scale + eps or
self.start.x * scale + eps >= x >= self.end.x * scale - eps) and \
(self.start.y * scale - eps <= y <= self.end.y * scale + eps or
self.start.y * scale + eps >= y >= self.end.y * scale - eps) and \
(line.start.x * scale - eps <= x <= line.end.x * scale + eps or
line.start.x * scale + eps >= x >= line.end.x * scale - eps) and \
(line.start.y * scale - eps <= y <= line.end.y * scale + eps or
line.start.y * scale + eps >= y >= line.end.y * scale - eps):
return (x, y)
return False
|
|
# -*- coding: utf-8 -*-
"""Main module."""
import sys
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.patheffects import RendererBase
from matplotlib import transforms
from matplotlib.font_manager import FontProperties
from matplotlib.ticker import MultipleLocator
from matplotlib.ticker import FormatStrFormatter
from .utils import despine
from .colorschemes import default_colorschemes
from matplotlib.lines import Line2D
from .format_utils import process_data
class Scale(RendererBase):
"""Scale alphabets using affine transformation"""
def __init__(self, sx, sy=None):
self._sx = sx
self._sy = sy
def draw_path(self, renderer, gc, tpath, affine, rgbFace):
affine = affine.identity().scale(self._sx, self._sy) + affine
renderer.draw_path(gc, tpath, affine, rgbFace)
def _setup_plt():
plt.rcParams['savefig.dpi'] = 120
plt.rcParams['figure.dpi'] = 120
plt.rcParams['figure.autolayout'] = False
plt.rcParams['figure.figsize'] = 12, 8
plt.rcParams['axes.labelsize'] = 18
plt.rcParams['axes.titlesize'] = 20
plt.rcParams['font.size'] = 16
plt.rcParams['lines.linewidth'] = 2.0
plt.rcParams['lines.markersize'] = 8
plt.rcParams['legend.fontsize'] = 14
def _setup_font(fontfamily='Arial', fontsize=12):
"""Setup font properties"""
#_setup_plt()
font = FontProperties()
font.set_size(fontsize)
font.set_weight('bold')
font.set_family(fontfamily)
return font
def setup_axis(ax,
axis='x',
majorticks=10,
minorticks=5,
xrotation=0,
yrotation=0):
"""Setup axes defaults"""
major_locator = MultipleLocator(majorticks)
major_formatter = FormatStrFormatter('%d')
minor_locator = MultipleLocator(minorticks)
if axis == 'x':
ax.xaxis.set_major_locator(major_locator)
ax.xaxis.set_major_formatter(major_formatter)
ax.xaxis.set_minor_locator(minor_locator)
elif axis == 'y':
ax.yaxis.set_major_locator(major_locator)
ax.yaxis.set_major_formatter(major_formatter)
ax.yaxis.set_minor_locator(minor_locator)
elif axis == 'both':
ax.xaxis.set_major_locator(major_locator)
ax.xaxis.set_major_formatter(major_formatter)
ax.yaxis.set_minor_locator(minor_locator)
ax.yaxis.set_major_locator(major_locator)
ax.yaxis.set_major_formatter(major_formatter)
ax.yaxis.set_minor_locator(minor_locator)
ax.tick_params(which='major', width=2, length=10)
ax.tick_params(which='minor', width=1, length=6)
def _draw_text_data_coord(height_matrix,
ax,
fontfamily,
colorscheme='classic',
scalex=1,
draw_axis=False,
debug=False):
fig = ax.get_figure()
bbox = ax.get_window_extent().transformed(fig.dpi_scale_trans.inverted())
width, height = bbox.width, bbox.height
width *= fig.dpi
height *= fig.dpi
fontsize = (height / 1.7) * 72.0 / fig.dpi #/72.0
font = _setup_font(fontsize=fontsize, fontfamily=fontfamily)
trans_offset = transforms.offset_copy(
ax.transData, fig=fig, x=1, y=0, units='points')
if not isinstance(colorscheme, dict):
colorscheme = default_colorschemes[colorscheme]
for xindex, xcol in enumerate(height_matrix):
yshift = 0
total_shift = 0
total_score = 0
for basechar, basescore in xcol:
txt = ax.text(
xindex + 1,
0,
basechar,
transform=trans_offset,
fontsize=fontsize,
color=colorscheme[basechar],
ha='center',
va='baseline',
family='monospace',
#va='baseline',
fontproperties=font, )
txt.set_path_effects([Scale(1.0, basescore)])
fig.canvas.draw()
window_ext = txt.get_window_extent(
txt._renderer) #(fig.canvas.renderer) #txt._renderer)
if basescore > 0.3:
yshift = window_ext.height * basescore #- fontsize/10# fontsize/4#/1.20 #*.85 #* fig.dpi/72.0
else:
yshift = window_ext.height * basescore # - fontsize/11# fontsize/4#/1.20 #*.85 #* fig.dpi/72.0
total_score += basescore
if debug:
ax.axhline(
y=total_score, color='r', linstyle='dashed', linewidth=1)
trans_offset = transforms.offset_copy(
txt._transform, fig=fig, y=yshift, units='dots')
trans_offset = transforms.offset_copy(
ax.transData, fig=fig, x=1, y=0, units='dots')
if not draw_axis:
ax.axis('off')
def _draw_text_display_coord(height_matrix,
ax,
fontfamily,
colorscheme='classic',
scalex=1,
is_protein=False,
draw_axis=False):
xshifts_list = []
fig = ax.get_figure()
bbox = ax.get_window_extent().transformed(fig.dpi_scale_trans.inverted())
width, height = bbox.width, bbox.height
width *= fig.dpi
height *= fig.dpi
fontsize = (height / 1.7) * 72.0 / fig.dpi
font = _setup_font(fontsize=fontsize, fontfamily=fontfamily)
xshift = 160
xshifts_list.append(xshift)
trans_offset = transforms.offset_copy(
ax.transData, fig=fig, x=xshift, y=0, units='dots')
ax.trans_offsets = [trans_offset]
if not isinstance(colorscheme, dict):
colorscheme = default_colorschemes[colorscheme]
for xindex, xcol in enumerate(height_matrix):
yshift = 0
total_shift = 0
total_score = 0
for basechar, basescore in xcol:
txt = ax.text(
0,
0,
basechar,
transform=trans_offset,
fontsize=fontsize,
color=colorscheme[basechar],
va='baseline',
family='monospace',
ha='center',
fontproperties=font)
txt.set_path_effects([Scale(scalex, basescore)])
fig.canvas.draw()
window_ext = txt.get_window_extent(
txt._renderer) #(fig.canvas.renderer) #txt._renderer)
if basescore > 0.3:
yshift = window_ext.height * basescore # - fontsize/10# fontsize/4#/1.20 #*.85 #* fig.dpi/72.0
else:
yshift = window_ext.height * basescore # - fontsize/11# fontsize/4#/1.20 #*.85 #* fig.dpi/72.0
total_score += basescore
trans_offset = transforms.offset_copy( #ax.transData,
txt._transform,
fig=fig,
y=yshift,
#x=xshift,
units='dots')
if is_protein:
trans_offset1 = transforms.offset_copy(
ax.transData,
fig=fig,
x=xshift + (window_ext.width * (scalex - 2) + 10 *
(scalex - 2)),
y=0,
units='dots')
else:
trans_offset1 = transforms.offset_copy(
ax.transData, fig=fig, x=xshift, y=0, units='dots')
xshift += window_ext.width * scalex + 10 * scalex
xshifts_list.append(xshift)
trans_offset = transforms.offset_copy(
ax.transData, fig=fig, x=xshift, y=0, units='dots')
if draw_axis:
line = Line2D(
[0, 0], [0, -0.1],
transform=trans_offset1,
linewidth=2,
clip_on=False,
color='black')
ax.add_line(line)
if xindex < len(height_matrix) - 1:
line = Line2D(
[0, 1], [0, 0],
transform=trans_offset1,
linewidth=0.5,
clip_on=False,
color='black')
ax.add_line(line)
ax.text(
0, -0.2, str(xindex + 1), transform=trans_offset1, ha='center')
ax.trans_offsets.append(trans_offset1)
ax.set_xticks([])
ax.set_xticklabels([])
return xshifts_list
def draw_protein(data,
ax,
data_type='bits',
seq_type='dna',
yaxis='bits',
colorscheme='classic',
fontfamily='Arial',
scalex=3):
#ax.set_xticks(range(1, len(data)*3 + 1))
if yaxis == 'probability':
ax.set_yticks(list(range(0, 2)))
elif yaxis == 'bits':
ax.set_yticks(list(range(0, 3)))
#ax.set_xticklabels(range(1, len(data) + 1), rotation=90)
setup_axis(ax, 'y', majorticks=1, minorticks=0.1)
_draw_text_display_coord(
data, ax, fontfamily, colorscheme, scalex=scalex, is_protein=True)
def draw_logo(data,
data_type='bits',
seq_type='dna',
yaxis='bits',
colorscheme='classic',
nrow=1,
ncol=1,
padding=0,
draw_range=None,
coordinate_type='data',
draw_axis=False,
fontfamily='Arial',
debug=False,
ax=None):
"""Draw sequence logo
Parameters
----------
data : str or dict or matrix
data_type : str
Options : 'msa', 'meme', 'jaspar', 'counts', 'pwm', 'pfm', 'ic'
yaxis : str
Type of plot. Options : 'probability', 'bits'
colorscheme : str
Colorschemes. Options for DNA : basepairing/classic.
AA : hydrophobicity, chemistry
nrow : int
Total nrows in column. This doesn't work with 'data' type dict
ncol : int
Total nrows in column. This doesn't work with 'data' type dict
"""
if yaxis not in ['probability', 'bits']:
sys.stderr.write(
'yaxis can be {}, got {}\n'.format(['probability', 'bits'], yaxis))
sys.exit(1)
if not ax:
fig, axarr = plt.subplots(nrow, ncol, squeeze=False)
fig.set_size_inches(((len(data) + 1) * ncol, 3 * nrow))
ax = axarr[0, 0]
else:
fig = ax.get_figure()
axarr = np.array([[ax]])
ax.set_xticks(list(range(1, len(data) + 1)))
ax.set_xticklabels(list(range(1, len(data) + 1)), rotation=90)
pfm, ic = process_data(data, data_type=data_type, seq_type=seq_type)
if yaxis == 'probability':
xshifts_list = _draw_text_data_coord(
pfm, ax, fontfamily, colorscheme, draw_axis=draw_axis)
else:
xshifts_list = _draw_text_data_coord(
ic, ax, fontfamily, colorscheme, draw_axis=draw_axis)
#ax.axis('off')
#despine(
# ax=ax,
# trim=False,
# top=True,
# right=True,
# bottom=True,
# offset=0)
return
#ax.set_xticks(range(1, len(data) + 1))
if draw_axis:
ax.set_xticks(list(range(len(data))))
if yaxis == 'probability':
ax.set_yticks(list(range(0, 2)))
elif yaxis == 'bits':
ax.set_yticks(list(range(0, 3)))
#ax.set_xticklabels(range(1, len(data) + 1), rotation=90)
setup_axis(ax, 'y', majorticks=1, minorticks=0.1)
else:
ax.axis('off')
if data_type != 'bits':
pfm, ic = process_data(data, data_type=data_type, seq_type=seq_type)
else:
ic = data
pfm = data
if draw_range:
# ic = np.array(ic)
ic = ic[draw_range[0]:draw_range[1]]
pfm = pfm[draw_range[0]:draw_range[1]]
if coordinate_type == 'data':
ax = axarr[0, 0]
ax.set_xticks(list(range(1, len(data) + 1)))
ax.set_xticklabels(list(range(1, len(data) + 1)), rotation=90)
if yaxis == 'probability':
xshifts_list = _draw_text_data_coord(
pfm, ax, fontfamily, colorscheme, draw_axis=draw_axis)
else:
xshifts_list = _draw_text_data_coord(
ic, ax, fontfamily, colorscheme, draw_axis=draw_axis)
else:
if yaxis == 'probability':
xshifts_list = _draw_text_display_coord(
pfm, ax, fontfamily, colorscheme, draw_axis=draw_axis)
else:
xshifts_list = _draw_text_display_coord(
ic, ax, fontfamily, colorscheme, draw_axis=draw_axis)
for i in range(nrow):
for j in range(ncol):
if i == j == 0:
despine(
ax=axarr[i, j],
trim=False,
top=True,
right=True,
bottom=True)
continue
despine(
ax=axarr[i, j],
trim=False,
top=True,
right=True,
bottom=True,
offset=00)
axi = axarr[i, j]
#axi.get_shared_x_axes().join(axi, ax)
#axi.set_xticklabels([])
#ax.set_xticklabels(range(1, len(data) + 1), rotation=90)
return fig, axarr
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pyspark import since
from pyspark.ml.util import keyword_only
from pyspark.ml.wrapper import JavaEstimator, JavaModel
from pyspark.ml.param.shared import *
from pyspark.mllib.common import inherit_doc
__all__ = ['ALS', 'ALSModel']
@inherit_doc
class ALS(JavaEstimator, HasCheckpointInterval, HasMaxIter, HasPredictionCol, HasRegParam, HasSeed):
"""
Alternating Least Squares (ALS) matrix factorization.
ALS attempts to estimate the ratings matrix `R` as the product of
two lower-rank matrices, `X` and `Y`, i.e. `X * Yt = R`. Typically
these approximations are called 'factor' matrices. The general
approach is iterative. During each iteration, one of the factor
matrices is held constant, while the other is solved for using least
squares. The newly-solved factor matrix is then held constant while
solving for the other factor matrix.
This is a blocked implementation of the ALS factorization algorithm
that groups the two sets of factors (referred to as "users" and
"products") into blocks and reduces communication by only sending
one copy of each user vector to each product block on each
iteration, and only for the product blocks that need that user's
feature vector. This is achieved by pre-computing some information
about the ratings matrix to determine the "out-links" of each user
(which blocks of products it will contribute to) and "in-link"
information for each product (which of the feature vectors it
receives from each user block it will depend on). This allows us to
send only an array of feature vectors between each user block and
product block, and have the product block find the users' ratings
and update the products based on these messages.
For implicit preference data, the algorithm used is based on
"Collaborative Filtering for Implicit Feedback Datasets", available
at `http://dx.doi.org/10.1109/ICDM.2008.22`, adapted for the blocked
approach used here.
Essentially instead of finding the low-rank approximations to the
rating matrix `R`, this finds the approximations for a preference
matrix `P` where the elements of `P` are 1 if r > 0 and 0 if r <= 0.
The ratings then act as 'confidence' values related to strength of
indicated user preferences rather than explicit ratings given to
items.
>>> df = sqlContext.createDataFrame(
... [(0, 0, 4.0), (0, 1, 2.0), (1, 1, 3.0), (1, 2, 4.0), (2, 1, 1.0), (2, 2, 5.0)],
... ["user", "item", "rating"])
>>> als = ALS(rank=10, maxIter=5)
>>> model = als.fit(df)
>>> model.rank
10
>>> model.userFactors.orderBy("id").collect()
[Row(id=0, features=[...]), Row(id=1, ...), Row(id=2, ...)]
>>> test = sqlContext.createDataFrame([(0, 2), (1, 0), (2, 0)], ["user", "item"])
>>> predictions = sorted(model.transform(test).collect(), key=lambda r: r[0])
>>> predictions[0]
Row(user=0, item=2, prediction=0.39...)
>>> predictions[1]
Row(user=1, item=0, prediction=3.19...)
>>> predictions[2]
Row(user=2, item=0, prediction=-1.15...)
.. versionadded:: 1.4.0
"""
# a placeholder to make it appear in the generated doc
rank = Param(Params._dummy(), "rank", "rank of the factorization")
numUserBlocks = Param(Params._dummy(), "numUserBlocks", "number of user blocks")
numItemBlocks = Param(Params._dummy(), "numItemBlocks", "number of item blocks")
implicitPrefs = Param(Params._dummy(), "implicitPrefs", "whether to use implicit preference")
alpha = Param(Params._dummy(), "alpha", "alpha for implicit preference")
userCol = Param(Params._dummy(), "userCol", "column name for user ids")
itemCol = Param(Params._dummy(), "itemCol", "column name for item ids")
ratingCol = Param(Params._dummy(), "ratingCol", "column name for ratings")
nonnegative = Param(Params._dummy(), "nonnegative",
"whether to use nonnegative constraint for least squares")
@keyword_only
def __init__(self, rank=10, maxIter=10, regParam=0.1, numUserBlocks=10, numItemBlocks=10,
implicitPrefs=False, alpha=1.0, userCol="user", itemCol="item", seed=None,
ratingCol="rating", nonnegative=False, checkpointInterval=10):
"""
__init__(self, rank=10, maxIter=10, regParam=0.1, numUserBlocks=10, numItemBlocks=10, \
implicitPrefs=false, alpha=1.0, userCol="user", itemCol="item", seed=None, \
ratingCol="rating", nonnegative=false, checkpointInterval=10)
"""
super(ALS, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.recommendation.ALS", self.uid)
self.rank = Param(self, "rank", "rank of the factorization")
self.numUserBlocks = Param(self, "numUserBlocks", "number of user blocks")
self.numItemBlocks = Param(self, "numItemBlocks", "number of item blocks")
self.implicitPrefs = Param(self, "implicitPrefs", "whether to use implicit preference")
self.alpha = Param(self, "alpha", "alpha for implicit preference")
self.userCol = Param(self, "userCol", "column name for user ids")
self.itemCol = Param(self, "itemCol", "column name for item ids")
self.ratingCol = Param(self, "ratingCol", "column name for ratings")
self.nonnegative = Param(self, "nonnegative",
"whether to use nonnegative constraint for least squares")
self._setDefault(rank=10, maxIter=10, regParam=0.1, numUserBlocks=10, numItemBlocks=10,
implicitPrefs=False, alpha=1.0, userCol="user", itemCol="item", seed=None,
ratingCol="rating", nonnegative=False, checkpointInterval=10)
kwargs = self.__init__._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.4.0")
def setParams(self, rank=10, maxIter=10, regParam=0.1, numUserBlocks=10, numItemBlocks=10,
implicitPrefs=False, alpha=1.0, userCol="user", itemCol="item", seed=None,
ratingCol="rating", nonnegative=False, checkpointInterval=10):
"""
setParams(self, rank=10, maxIter=10, regParam=0.1, numUserBlocks=10, numItemBlocks=10, \
implicitPrefs=False, alpha=1.0, userCol="user", itemCol="item", seed=None, \
ratingCol="rating", nonnegative=False, checkpointInterval=10)
Sets params for ALS.
"""
kwargs = self.setParams._input_kwargs
return self._set(**kwargs)
def _create_model(self, java_model):
return ALSModel(java_model)
@since("1.4.0")
def setRank(self, value):
"""
Sets the value of :py:attr:`rank`.
"""
self._paramMap[self.rank] = value
return self
@since("1.4.0")
def getRank(self):
"""
Gets the value of rank or its default value.
"""
return self.getOrDefault(self.rank)
@since("1.4.0")
def setNumUserBlocks(self, value):
"""
Sets the value of :py:attr:`numUserBlocks`.
"""
self._paramMap[self.numUserBlocks] = value
return self
@since("1.4.0")
def getNumUserBlocks(self):
"""
Gets the value of numUserBlocks or its default value.
"""
return self.getOrDefault(self.numUserBlocks)
@since("1.4.0")
def setNumItemBlocks(self, value):
"""
Sets the value of :py:attr:`numItemBlocks`.
"""
self._paramMap[self.numItemBlocks] = value
return self
@since("1.4.0")
def getNumItemBlocks(self):
"""
Gets the value of numItemBlocks or its default value.
"""
return self.getOrDefault(self.numItemBlocks)
@since("1.4.0")
def setNumBlocks(self, value):
"""
Sets both :py:attr:`numUserBlocks` and :py:attr:`numItemBlocks` to the specific value.
"""
self._paramMap[self.numUserBlocks] = value
self._paramMap[self.numItemBlocks] = value
@since("1.4.0")
def setImplicitPrefs(self, value):
"""
Sets the value of :py:attr:`implicitPrefs`.
"""
self._paramMap[self.implicitPrefs] = value
return self
@since("1.4.0")
def getImplicitPrefs(self):
"""
Gets the value of implicitPrefs or its default value.
"""
return self.getOrDefault(self.implicitPrefs)
@since("1.4.0")
def setAlpha(self, value):
"""
Sets the value of :py:attr:`alpha`.
"""
self._paramMap[self.alpha] = value
return self
@since("1.4.0")
def getAlpha(self):
"""
Gets the value of alpha or its default value.
"""
return self.getOrDefault(self.alpha)
@since("1.4.0")
def setUserCol(self, value):
"""
Sets the value of :py:attr:`userCol`.
"""
self._paramMap[self.userCol] = value
return self
@since("1.4.0")
def getUserCol(self):
"""
Gets the value of userCol or its default value.
"""
return self.getOrDefault(self.userCol)
@since("1.4.0")
def setItemCol(self, value):
"""
Sets the value of :py:attr:`itemCol`.
"""
self._paramMap[self.itemCol] = value
return self
@since("1.4.0")
def getItemCol(self):
"""
Gets the value of itemCol or its default value.
"""
return self.getOrDefault(self.itemCol)
@since("1.4.0")
def setRatingCol(self, value):
"""
Sets the value of :py:attr:`ratingCol`.
"""
self._paramMap[self.ratingCol] = value
return self
@since("1.4.0")
def getRatingCol(self):
"""
Gets the value of ratingCol or its default value.
"""
return self.getOrDefault(self.ratingCol)
@since("1.4.0")
def setNonnegative(self, value):
"""
Sets the value of :py:attr:`nonnegative`.
"""
self._paramMap[self.nonnegative] = value
return self
@since("1.4.0")
def getNonnegative(self):
"""
Gets the value of nonnegative or its default value.
"""
return self.getOrDefault(self.nonnegative)
class ALSModel(JavaModel):
"""
Model fitted by ALS.
.. versionadded:: 1.4.0
"""
@property
@since("1.4.0")
def rank(self):
"""rank of the matrix factorization model"""
return self._call_java("rank")
@property
@since("1.4.0")
def userFactors(self):
"""
a DataFrame that stores user factors in two columns: `id` and
`features`
"""
return self._call_java("userFactors")
@property
@since("1.4.0")
def itemFactors(self):
"""
a DataFrame that stores item factors in two columns: `id` and
`features`
"""
return self._call_java("itemFactors")
if __name__ == "__main__":
import doctest
from pyspark.context import SparkContext
from pyspark.sql import SQLContext
globs = globals().copy()
# The small batch size here ensures that we see multiple batches,
# even in these small test examples:
sc = SparkContext("local[2]", "ml.recommendation tests")
sqlContext = SQLContext(sc)
globs['sc'] = sc
globs['sqlContext'] = sqlContext
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
sc.stop()
if failure_count:
exit(-1)
|
|
from django.conf.urls import url
import lfs.manage
import lfs.manage.actions.views
import lfs.manage.categories.category
import lfs.manage.categories.portlet
import lfs.manage.categories.products
import lfs.manage.categories.view
import lfs.manage.customer_tax.views
import lfs.manage.discounts.views
import lfs.manage.images.views
import lfs.manage.information.views
import lfs.manage.product
import lfs.manage.product.accessories
import lfs.manage.product.categories
import lfs.manage.product.related_products
import lfs.manage.product.variants
import lfs.manage.product_taxes.views
import lfs.manage.property.views
import lfs.manage.property_groups.views
import lfs.manage.shipping_methods.views
import lfs.manage.static_blocks.views
import lfs.manage.views.carts
import lfs.manage.views.customer
import lfs.manage.views.criteria
import lfs.manage.views.dashboard
import lfs.manage.views.export
import lfs.manage.views.marketing.marketing
import lfs.manage.views.marketing.featured
import lfs.manage.views.marketing.rating_mails
import lfs.manage.views.marketing.topseller
import lfs.manage.views.orders
import lfs.manage.views.payment
import lfs.manage.views.review
import lfs.manage.views.utils
from lfs.catalog.models import Product
from lfs.catalog.models import Category
from lfs.core.models import Shop
from lfs.manage.product.seo import SEOForm as ProductSEOForm
from lfs.manage.pages.views import PageSEOView
from lfs.manage.views.shop import ShopSEOView
from lfs.manage.seo.views import SEOView
from lfs.manage.delivery_times import views as delivery_times_views
from lfs.manage.manufacturers import views as manufacturers_views
from lfs.manage.manufacturers import products as manufacturers_products_views
from lfs.manage.voucher import views as voucher_views
from lfs.manage.views import lfs_portlets
from lfs.manage.product import product
from lfs.manufacturer.models import Manufacturer
from lfs.page.models import Page
urlpatterns = [
url(r'^$', lfs.manage.views.dashboard.dashboard, name="lfs_manage_dashboard"),
# Delivery Times
url(r'^delivery_times$', delivery_times_views.manage_delivery_times, name="lfs_manage_delivery_times"),
url(r'^delivery_time/(?P<id>\d*)$', delivery_times_views.manage_delivery_time, name="lfs_manage_delivery_time"),
url(r'^add-delivery-time$', delivery_times_views.add_delivery_time, name="lfs_manage_add_delivery_time"),
url(r'^delete-delivery-time/(?P<id>\d*)$', delivery_times_views.delete_delivery_time, name="lfs_delete_delivery_time"),
url(r'^no-times$', delivery_times_views.no_delivery_times, name="lfs_no_delivery_times"),
# Manufacturer
url(r'^manufacturer-dispatcher$', manufacturers_views.manufacturer_dispatcher, name="lfs_manufacturer_dispatcher"),
url(r'^manufacturer/(?P<manufacturer_id>\d*)$', manufacturers_views.manage_manufacturer, name="lfs_manage_manufacturer"),
url(r'^update-manufacturer-data/(?P<manufacturer_id>\d*)$', manufacturers_views.update_data, name="lfs_manufacturer_update_manufacturer_data"),
url(r'^add-manufacturer$', manufacturers_views.add_manufacturer, name="lfs_manufacturer_add_manufacturer"),
url(r'^delete-manufacturer/(?P<manufacturer_id>\d*)$', manufacturers_views.delete_manufacturer, name="lfs_manufacturer_delete_manufacturer"),
url(r'^edit-category-manufacturer/(?P<manufacturer_id>\d*)/(?P<category_id>\d*)$', manufacturers_views.edit_category, name="lfs_manufacturer_edit_category"),
url(r'^edit-product-manufacturer/(?P<manufacturer_id>\d*)/(?P<product_id>\d*)$', manufacturers_views.edit_product, name="lfs_manufacturer_edit_product"),
url(r'^category-state-manufacturer/(?P<manufacturer_id>\d*)/(?P<category_id>\d*)$', manufacturers_views.category_state, name="lfs_manufacturer_category_state"),
url(r'^manufacturer-inline/(?P<manufacturer_id>\d*)/(?P<category_id>\d*)$', manufacturers_views.manufacturer_inline, name="lfs_manufacturer_inline"),
url(r'^manufacturers-ajax/$', manufacturers_views.manufacturers_ajax, name="lfs_manufacturers_ajax"),
url(r'^no-manufacturers$', manufacturers_views.no_manufacturers, name="lfs_manage_no_manufacturers"),
url(r'^edit-manufacturer-view/(?P<manufacturer_id>\d*)$', manufacturers_views.manufacturer_view, name="lfs_manage_manufacturer_view"),
url(r'^manufacturer-products-inline/(?P<manufacturer_id>\d*)$', manufacturers_products_views.products_inline, name="lfs_manage_manufacturer_products_inline"),
url(r'^manufacturer-selected-products/(?P<manufacturer_id>\d*)$', manufacturers_products_views.selected_products, name="lfs_manage_manufacturer_selected_products"),
url(r'^manufacturer-add-products/(?P<manufacturer_id>\d*)$', manufacturers_products_views.add_products, name="lfs_manage_manufacturer_add_products"),
url(r'^manufacturer-remove-products/(?P<manufacturer_id>\d*)$', manufacturers_products_views.remove_products, name="lfs_manage_manufacturer_remove_products"),
url(r'^manufacturer-load-products-tab/(?P<manufacturer_id>\d*)$', manufacturers_products_views.products_tab, name="lfs_manufacturer_load_products_tab"),
# Marketing
url(r'^featured$', lfs.manage.views.marketing.marketing.manage_featured_page, name="lfs_manage_featured"),
url(r'^marketing$', lfs.manage.views.marketing.marketing.manage_marketing, name="lfs_manage_marketing"),
url(r'^add-featured$', lfs.manage.views.marketing.featured.add_featured, name="lfs_manage_add_featured"),
url(r'^update-featured$', lfs.manage.views.marketing.featured.update_featured, name="lfs_manage_update_featured"),
url(r'^featured-inline$', lfs.manage.views.marketing.featured.manage_featured_inline, name="lfs_manage_featured_inline"),
url(r'^manage-rating-mails$', lfs.manage.views.marketing.rating_mails.manage_rating_mails, name="lfs_manage_rating_mails"),
url(r'^send-rating-mails$', lfs.manage.views.marketing.rating_mails.send_rating_mails, name="lfs_send_rating_mails"),
url(r'^add-topseller$', lfs.manage.views.marketing.topseller.add_topseller, name="lfs_manage_add_topseller"),
url(r'^update-topseller$', lfs.manage.views.marketing.topseller.update_topseller, name="lfs_manage_update_topseller"),
url(r'^topseller-inline$', lfs.manage.views.marketing.topseller.manage_topseller_inline, name="lfs_manage_topseller_inline"),
# Voucher
url(r'^vouchers$', voucher_views.manage_vouchers, name="lfs_manage_vouchers"),
url(r'^no-vouchers$', voucher_views.no_vouchers, name="lfs_no_vouchers"),
url(r'^add-voucher-group$', voucher_views.add_voucher_group, name="lfs_manage_add_voucher_group"),
url(r'^voucher-group/(?P<id>\d+)$', voucher_views.voucher_group, name="lfs_manage_voucher_group"),
url(r'^delete-voucher-group/(?P<id>\d+)$', voucher_views.delete_voucher_group, name="lfs_delete_voucher_group"),
url(r'^save-voucher-group-data/(?P<id>\d+)$', voucher_views.save_voucher_group_data, name="lfs_manage_save_voucher_group_data"),
url(r'^save-voucher-options$', voucher_views.save_voucher_options, name="lfs_manage_save_voucher_options"),
url(r'^add-vouchers/(?P<group_id>\d+)$', voucher_views.add_vouchers, name="lfs_manage_add_vouchers"),
url(r'^delete-vouchers/(?P<group_id>\d+)$', voucher_views.delete_vouchers, name="lfs_manage_delete_vouchers"),
url(r'^set-vouchers-page$', voucher_views.set_vouchers_page, name="lfs_set_vouchers_page"),
# Portlets
url(r'^add-portlet/(?P<object_type_id>\d+)/(?P<object_id>\d+)$', lfs_portlets.add_portlet, name="lfs_add_portlet"),
url(r'^update-portlets/(?P<object_type_id>\d+)/(?P<object_id>\d+)$', lfs_portlets.update_portlets, name="lfs_update_portlets"),
url(r'^delete-portlet/(?P<portletassignment_id>\d+)$', lfs_portlets.delete_portlet, name="lfs_delete_portlet"),
url(r'^edit-portlet/(?P<portletassignment_id>\d+)$', lfs_portlets.edit_portlet, name="lfs_edit_portlet"),
url(r'^move-portlet/(?P<portletassignment_id>\d+)$', lfs_portlets.move_portlet, name="lfs_move_portlet"),
# Product
url(r'^product-dispatcher$', product.product_dispatcher, name="lfs_manage_product_dispatcher"),
url(r'^product-by-id/(?P<product_id>\d*)$', product.product_by_id, name="lfs_manage_product_by_id"),
url(r'^product/(?P<product_id>\d*)$', product.manage_product, name="lfs_manage_product"),
url(r'^product-data-form/(?P<product_id>\d*)$', product.product_data_form),
url(r'^add-product$', product.add_product, name="lfs_manage_add_product"),
url(r'^edit-product-data/(?P<product_id>\d*)$', product.edit_product_data, name="lfs_manage_edit_product_data"),
url(r'^delete-product/(?P<product_id>\d*)$', product.delete_product, name="lfs_manage_delete_product"),
url(r'^selectable-products-inline$', product.selectable_products_inline, name="lfs_manage_selectable_products_inline"),
url(r'^save-product-stock/(?P<product_id>\d*)$', product.stock, name="lfs_save_product_stock"),
url(r'^change-product-subtype/(?P<product_id>\d*)$', product.change_subtype, name="lfs_change_product_subtype"),
url(r'^products$', product.products, name="lfs_manage_products"),
url(r'^products-inline$', product.products_inline, name="lfs_products_inline"),
url(r'^save-products$', product.save_products, name="lfs_manage_save_products"),
url(r'^set-product-filters$', product.set_filters, name="lfs_set_product_filters"),
url(r'^set-product-name-filter$', product.set_name_filter, name="lfs_set_product_name_filter"),
url(r'^reset-product-filters$', product.reset_filters, name="lfs_reset_product_filters"),
url(r'^set-products-page$', product.set_products_page, name="lfs_set_products_page"),
url(r'^no-products$', product.no_products, name="lfs_manage_no_products"),
url(r'^product-categories-tab/(?P<product_id>\d*)$', lfs.manage.product.categories.manage_categories, name="lfs_product_categories_tab"),
url(r'^product-accessories-tab/(?P<product_id>\d*)$', lfs.manage.product.accessories.load_tab, name="lfs_manage_product_accessories_tab"),
url(r'^product-relateds-tab/(?P<product_id>\d*)$', lfs.manage.product.related_products.load_tab, name="lfs_manage_product_related_products_tab"),
url(r'^product-variants-tab/(?P<product_id>\d*)$', lfs.manage.product.variants.manage_variants, name="lfs_manage_product_variants_tab"),
url(r'^change-product-categories/(?P<product_id>\d*)$', lfs.manage.product.categories.change_categories, name="lfs_manage_product_categories"),
# Product Images
url(r'^add-image/(?P<product_id>\d*)$', lfs.manage.product.images.add_image, name="lfs_manage_add_image"),
url(r'^update-images/(?P<product_id>\d*)$', lfs.manage.product.images.update_images, name="lfs_manage_update_images"),
url(r'^product-images/(?P<product_id>\d*)$', lfs.manage.product.images.list_images, name="lfs_manage_images"),
url(r'^update-active-images/(?P<product_id>\d*)$', lfs.manage.product.images.update_active_images, name="lfs_manage_update_active_images"),
url(r'^move-image/(?P<id>\d+)$', lfs.manage.product.images.move_image, name="lfs_move_image"),
# Product Attachments
url(r'^add-attachment/(?P<product_id>\d*)$', lfs.manage.product.attachments.add_attachment, name="lfs_manage_add_attachment"),
url(r'^update-attachments/(?P<product_id>\d*)$', lfs.manage.product.attachments.update_attachments, name="lfs_manage_update_attachments"),
url(r'^product-attachments/(?P<product_id>\d*)$', lfs.manage.product.attachments.list_attachments, name="lfs_manage_attachments"),
url(r'^move-product-attachments/(?P<id>\d+)$', lfs.manage.product.attachments.move_attachment, name="lfs_move_product_attachment"),
# Product variants
url(r'^properties/(?P<product_id>\d*)$', lfs.manage.product.variants.manage_variants, name="lfs_manage_variants"),
url(r'^add-property/(?P<product_id>\d*)$', lfs.manage.product.variants.add_property, name="lfs_manage_add_property"),
url(r'^add-property-option/(?P<product_id>\d*)$', lfs.manage.product.variants.add_property_option, name="lfs_manage_add_property_option"),
url(r'^delete-property/(?P<product_id>\d*)/(?P<property_id>\d*)$', lfs.manage.product.variants.delete_property, name="lfs_manage_delete_property"),
url(r'^delete-property-option/(?P<product_id>\d*)/(?P<option_id>\d*)$', lfs.manage.product.variants.delete_property_option, name="lfs_manage_delete_property_option"),
url(r'^change-property-position$', lfs.manage.product.variants.change_property_position, name="lfs_manage_change_property_position"),
url(r'^update-variants/(?P<product_id>\d*)$', lfs.manage.product.variants.update_variants, name="lfs_manage_update_variants"),
url(r'^add-variants/(?P<product_id>\d*)$', lfs.manage.product.variants.add_variants, name="lfs_manage_add_variants"),
url(r'^edit-sub-type/(?P<product_id>\d*)$', lfs.manage.product.variants.edit_sub_type, name="lfs_manage_edit_sub_type"),
url(r'^update-category-variant/(?P<product_id>\d*)$', lfs.manage.product.variants.update_category_variant, name="lfs_update_category_variant"),
# Global Images
url(r'^imagebrowser$', lfs.manage.images.views.imagebrowser, name="lfs_manage_imagebrowser"),
url(r'^global-images$', lfs.manage.images.views.images, name="lfs_manage_global_images"),
url(r'^global-images-list$', lfs.manage.images.views.images_list, name="lfs_manage_global_images_list"),
url(r'^delete-global-images$', lfs.manage.images.views.delete_images, name="lfs_manage_delete_images"),
url(r'^add-global-images$', lfs.manage.images.views.add_images, name="lfs_manage_add_global_image"),
# Property Groups
url(r'^property-groups', lfs.manage.property_groups.views.manage_property_groups, name="lfs_manage_property_groups"),
url(r'^property-group/(?P<id>\d*)', lfs.manage.property_groups.views.manage_property_group, name="lfs_manage_property_group"),
url(r'^add-property-group', lfs.manage.property_groups.views.add_property_group, name="lfs_manage_add_property_group"),
url(r'^delete-property-group/(?P<id>\d*)', lfs.manage.property_groups.views.delete_property_group, name="lfs_delete_property_group"),
url(r'^assign-properties/(?P<group_id>\d*)', lfs.manage.property_groups.views.assign_properties, name="lfs_assign_properties"),
url(r'^update-properties/(?P<group_id>\d*)', lfs.manage.property_groups.views.update_properties, name="lfs_update_properties"),
url(r'^no-property-groups$', lfs.manage.property_groups.views.no_property_groups, name="lfs_manage_no_property_groups"),
url(r'^sort-property-groups$', lfs.manage.property_groups.views.sort_property_groups, name="lfs_manage_sort_property_groups"),
# Property Groups / Products
url(r'^assign-products-to-property-group/(?P<group_id>\d*)', lfs.manage.property_groups.views.assign_products, name="lfs_assign_products_to_property_group"),
url(r'^remove-products-from-property-group/(?P<group_id>\d*)', lfs.manage.property_groups.views.remove_products, name="lfs_pg_remove_products"),
url(r'^pg-products-inline/(?P<product_group_id>\d*)', lfs.manage.property_groups.views.products_inline, name="lfs_pg_products_inline"),
# Shop Properties
url(r'^shop-properties$', lfs.manage.property.views.manage_properties, name="lfs_manage_shop_properties"),
url(r'^shop-property/(?P<id>\d*)', lfs.manage.property.views.manage_property, name="lfs_manage_shop_property"),
url(r'^update-shop-property-type/(?P<id>\d*)', lfs.manage.property.views.update_property_type, name="lfs_update_shop_property_type"),
url(r'^add-shop-property$', lfs.manage.property.views.add_property, name="lfs_add_shop_property"),
url(r'^delete-shop-property/(?P<id>\d*)', lfs.manage.property.views.delete_property, name="lfs_delete_shop_property"),
url(r'^add-shop-property-option/(?P<property_id>\d*)', lfs.manage.property.views.add_option, name="lfs_add_shop_property_option"),
url(r'^add-shop-property-step/(?P<property_id>\d*)', lfs.manage.property.views.add_step, name="lfs_add_shop_property_step"),
url(r'^save-shop-property-step/(?P<property_id>\d*)', lfs.manage.property.views.save_step_range, name="lfs_save_shop_property_step_range"),
url(r'^save-shop-property-step-type/(?P<property_id>\d*)', lfs.manage.property.views.save_step_type, name="lfs_save_shop_property_step_type"),
url(r'^delete-shop-property-option/(?P<id>\d*)', lfs.manage.property.views.delete_option, name="lfs_delete_shop_property_option"),
url(r'^delete-shop-property-step/(?P<id>\d*)', lfs.manage.property.views.delete_step, name="lfs_delete_shop_property_step"),
url(r'^save-number-field-validators/(?P<property_id>\d*)', lfs.manage.property.views.save_number_field_validators, name="lfs_save_number_field_validators"),
url(r'^save-select-field/(?P<property_id>\d*)', lfs.manage.property.views.save_select_field, name="lfs_save_select_field"),
url(r'^no-properties$', lfs.manage.property.views.no_properties, name="lfs_manage_no_shop_properties"),
url(r'^set-property-name-filter$', lfs.manage.property.views.set_name_filter, name="lfs_set_property_name_filter"),
url(r'^set-property-page$', lfs.manage.property.views.set_properties_page, name="lfs_set_properties_page"),
# Product properties
url(r'^update-product-properties/(?P<product_id>\d*)$', lfs.manage.product.properties.update_properties, name="lfs_update_product_properties"),
url(r'^update-product-property-groups/(?P<product_id>\d*)$', lfs.manage.product.properties.update_property_groups, name="lfs_update_product_property_groups"),
# Accesories
url(r'^accessories/(?P<product_id>\d*)$', lfs.manage.product.accessories.manage_accessories, name="lfs_manage_accessories"),
url(r'^accessories-inline/(?P<product_id>\d*)$', lfs.manage.product.accessories.manage_accessories_inline, name="lfs_manage_accessories_inline"),
url(r'^add-accessories/(?P<product_id>\d*)$', lfs.manage.product.accessories.add_accessories, name="lfs_manage_add_accessories"),
url(r'^remove-accessories/(?P<product_id>\d*)$', lfs.manage.product.accessories.remove_accessories, name="lfs_manage_remove_accessories"),
url(r'^update-accessories/(?P<product_id>\d*)$', lfs.manage.product.accessories.update_accessories, name="lfs_manage_update_accessories"),
# Related Products
url(r'^related-products/(?P<product_id>\d*)$', lfs.manage.product.related_products.manage_related_products, name="lfs_manage_related_products"),
url(r'^related-products-inline/(?P<product_id>\d*)$', lfs.manage.product.related_products.manage_related_products_inline, name="lfs_manage_related_products_inline"),
url(r'^add-related-products/(?P<product_id>\d*)$', lfs.manage.product.related_products.add_related_products, name="lfs_manage_add_related_products"),
url(r'^remove-related-products/(?P<product_id>\d*)$', lfs.manage.product.related_products.remove_related_products, name="lfs_manage_remove_related_products"),
url(r'^manage-related-products/(?P<product_id>\d*)$', lfs.manage.product.related_products.update_related_products, name="lfs_manage_update_related_products"),
# Carts
url(r'^carts$', lfs.manage.views.carts.carts_view, name="lfs_manage_carts"),
url(r'^carts-inline$', lfs.manage.views.carts.carts_inline, name="lfs_carts_inline"),
url(r'^cart-inline/(?P<cart_id>\d*)$', lfs.manage.views.carts.cart_inline, name="lfs_cart_inline"),
url(r'^cart/(?P<cart_id>\d*)$', lfs.manage.views.carts.cart_view, name="lfs_manage_cart"),
url(r'^selectable-carts-inline$', lfs.manage.views.carts.selectable_carts_inline, name="lfs_selectable_carts_inline"),
url(r'^set-cart-filters$', lfs.manage.views.carts.set_cart_filters, name="lfs_set_cart_filters"),
url(r'^set-cart-filters-date$', lfs.manage.views.carts.set_cart_filters_date, name="lfs_set_cart_filters_date"),
url(r'^reset-cart-filters$', lfs.manage.views.carts.reset_cart_filters, name="lfs_reset_cart_filters"),
url(r'^set-carts-page$', lfs.manage.views.carts.set_carts_page, name="lfs_set_carts_page"),
url(r'^set-cart-page$', lfs.manage.views.carts.set_cart_page, name="lfs_set_cart_page"),
# Categories
url(r'^categories$', lfs.manage.categories.category.manage_categories, name="lfs_manage_categories"),
url(r'^category/(?P<category_id>\d*)$', lfs.manage.categories.category.manage_category, name="lfs_manage_category"),
url(r'^category-by-id/(?P<category_id>\d*)$', lfs.manage.categories.category.category_by_id, name="lfs_category_by_id"),
url(r'^add-products/(?P<category_id>\d*)$', lfs.manage.categories.products.add_products, name="lfs_manage_category_add_products"),
url(r'^remove-products/(?P<category_id>\d*)$', lfs.manage.categories.products.remove_products, name="lfs_manage_category_remove_products"),
url(r'^add-top-category$', lfs.manage.categories.category.add_category, name="lfs_manage_add_top_category"),
url(r'^add-category/(?P<category_id>\d*)$', lfs.manage.categories.category.add_category, name="lfs_manage_add_category"),
url(r'^delete-category/(?P<id>[-\w]*)$', lfs.manage.categories.category.delete_category, name="lfs_delete_category"),
url(r'^products-inline/(?P<category_id>\d*)$', lfs.manage.categories.products.products_inline, name="lfs_manage_category_products_inline"),
url(r'^edit-category-data/(?P<category_id>\d*)$', lfs.manage.categories.category.edit_category_data, name="lfs_manage_category_edit_data"),
url(r'^edit-category-view/(?P<category_id>\d*)$', lfs.manage.categories.category.category_view, name="lfs_manage_category_view"),
url(r'^selected-products/(?P<category_id>\d*)$', lfs.manage.categories.products.selected_products, name="lfs_selected_products"),
url(r'^load-products-tab/(?P<category_id>\d*)$', lfs.manage.categories.products.products_tab, name="lfs_load_products_tab"),
url(r'^sort-categories$', lfs.manage.categories.category.sort_categories, name="lfs_sort_categories"),
url(r'^no-categories$', lfs.manage.categories.view.no_categories, name="lfs_manage_no_categories"),
# Customers
url(r'^customers$', lfs.manage.views.customer.customers, name="lfs_manage_customers"),
url(r'^customers-inline$', lfs.manage.views.customer.customers_inline, name="lfs_customers_inline"),
url(r'^customer/(?P<customer_id>\d*)$', lfs.manage.views.customer.customer, name="lfs_manage_customer"),
url(r'^customer-inline/(?P<customer_id>\d*)$', lfs.manage.views.customer.customer_inline, name="lfs_customer_inline"),
url(r'^set-customer-filters$', lfs.manage.views.customer.set_customer_filters, name="lfs_set_customer_filters"),
url(r'^reset-customer-filters$', lfs.manage.views.customer.reset_customer_filters, name="lfs_reset_customer_filters"),
url(r'^set-customer-ordering/(?P<ordering>\w*)$', lfs.manage.views.customer.set_ordering, name="lfs_set_customer_ordering"),
url(r'^selectable-customers-inline$', lfs.manage.views.customer.selectable_customers_inline, name="lfs_selectable_customers_inline"),
url(r'^set-selectable-customers-page$', lfs.manage.views.customer.set_selectable_customers_page, name="lfs_set_selectable_customers_page"),
url(r'^set-customers-page$', lfs.manage.views.customer.set_customers_page, name="lfs_set_customers_page"),
# export
url(r'^export-dispatcher$', lfs.manage.views.export.export_dispatcher, name="lfs_export_dispatcher"),
url(r'^export/(?P<export_id>\d*)$', lfs.manage.views.export.manage_export, name="lfs_export"),
url(r'^export-inline/(?P<export_id>\d*)/(?P<category_id>\d*)$', lfs.manage.views.export.export_inline, name="lfs_export_inline"),
url(r'^edit-category/(?P<export_id>\d*)/(?P<category_id>\d*)$', lfs.manage.views.export.edit_category, name="lfs_export_edit_category"),
url(r'^edit-product/(?P<export_id>\d*)/(?P<product_id>\d*)$', lfs.manage.views.export.edit_product, name="lfs_export_edit_product"),
url(r'^category-state/(?P<export_id>\d*)/(?P<category_id>\d*)$', lfs.manage.views.export.category_state, name="lfs_export_category_state"),
url(r'^update-export-data/(?P<export_id>\d*)$', lfs.manage.views.export.update_data, name="lfs_export_update_export_data"),
url(r'^add-export$', lfs.manage.views.export.add_export, name="lfs_export_add_export"),
url(r'^delete-export/(?P<export_id>\d*)$', lfs.manage.views.export.delete_export, name="lfs_export_delete_export"),
url(r'^export-export/(?P<slug>[-\w]*)$', lfs.manage.views.export.export, name="lfs_export_export"),
url(r'^update-category-variants-option/(?P<export_id>\d*)/(?P<category_id>\d*)$', lfs.manage.views.export.update_category_variants_option, name="lfs_export_update_category_variants_option"),
# Shipping Methods
url(r'^shipping$', lfs.manage.shipping_methods.views.manage_shipping, name="lfs_manage_shipping"),
url(r'^shipping-method/(?P<shipping_method_id>\d*)$', lfs.manage.shipping_methods.views.manage_shipping_method, name="lfs_manage_shipping_method"),
url(r'^add-shipping-method', lfs.manage.shipping_methods.views.add_shipping_method, name="lfs_manage_add_shipping_method"),
url(r'^save-shipping-data/(?P<shipping_method_id>\d*)$', lfs.manage.shipping_methods.views.save_shipping_method_data, name="lfs_manage_save_shipping_method_data"),
url(r'^delete-shipping-method/(?P<shipping_method_id>\d*)$', lfs.manage.shipping_methods.views.delete_shipping_method, name="lfs_manage_delete_shipping_method"),
url(r'^add-shipping-price/(?P<shipping_method_id>\d*)$', lfs.manage.shipping_methods.views.add_shipping_price, name="lfs_manage_add_shipping_price"),
url(r'^update-shipping-prices/(?P<shipping_method_id>\d*)$', lfs.manage.shipping_methods.views.update_shipping_prices, name="lfs_manage_update_shipping_prices"),
url(r'^shipping-price-criteria/(?P<shipping_price_id>\d*)$', lfs.manage.shipping_methods.views.shipping_price_criteria, name="lfs_manage_shipping_price_criteria"),
url(r'^save-shipping-price-criteria/(?P<shipping_price_id>\d*)$', lfs.manage.shipping_methods.views.save_shipping_price_criteria, name="lfs_manage_save_shipping_price_criteria"),
url(r'^save-shipping-method-criteria/(?P<shipping_method_id>\d*)$', lfs.manage.shipping_methods.views.save_shipping_method_criteria, name="lfs_manage_save_shipping_method_criteria"),
url(r'^sort-shipping-methods$', lfs.manage.shipping_methods.views.sort_shipping_methods, name="lfs_sort_shipping_methods"),
url(r'^no-shipping-methods$', lfs.manage.shipping_methods.views.no_shipping_methods, name="lfs_manage_no_shipping_methods"),
# Discounts
url(r'^discounts$', lfs.manage.discounts.views.manage_discounts, name="lfs_manage_discounts"),
url(r'^discount/(?P<id>\d*)$', lfs.manage.discounts.views.manage_discount, name="lfs_manage_discount"),
url(r'^add-discount', lfs.manage.discounts.views.add_discount, name="lfs_manage_add_discount"),
url(r'^save-discount-data/(?P<id>\d*)$', lfs.manage.discounts.views.save_discount_data, name="lfs_manage_save_discount_data"),
url(r'^delete-discount/(?P<id>\d*)$', lfs.manage.discounts.views.delete_discount, name="lfs_manage_delete_discount"),
url(r'^save-discount-criteria/(?P<id>\d*)$', lfs.manage.discounts.views.save_discount_criteria, name="lfs_manage_save_discount_criteria"),
url(r'^no-discounts$', lfs.manage.discounts.views.no_discounts, name="lfs_manage_no_discounts"),
# Discounts / Products
url(r'^assign-products-to-discount/(?P<discount_id>\d*)', lfs.manage.discounts.views.assign_products, name="lfs_assign_products_to_discount"),
url(r'^remove-products-from-discount/(?P<discount_id>\d*)', lfs.manage.discounts.views.remove_products, name="lfs_discount_remove_products"),
url(r'^discount-products-inline/(?P<discount_id>\d*)', lfs.manage.discounts.views.products_inline, name="lfs_discount_products_inline"),
# Pages
url(r'^add-page$', lfs.manage.pages.views.add_page, name="lfs_add_page"),
url(r'^delete-page/(?P<id>\d*)$', lfs.manage.pages.views.delete_page, name="lfs_delete_page"),
url(r'^manage-pages$', lfs.manage.pages.views.manage_pages, name="lfs_manage_pages"),
url(r'^manage-page/(?P<id>\d*)$', lfs.manage.pages.views.manage_page, name="lfs_manage_page"),
url(r'^page-by-id/(?P<id>\d*)$', lfs.manage.pages.views.page_view_by_id, name="lfs_page_view_by_id"),
url(r'^sort-pages$', lfs.manage.pages.views.sort_pages, name="lfs_sort_pages"),
url(r'^save-page-data-tab/(?P<id>\d*)$', lfs.manage.pages.views.save_data_tab, name="lfs_save_page_data_tab"),
# Payment
url(r'^payment$', lfs.manage.views.payment.manage_payment, name="lfs_manage_payment"),
url(r'^payment-method/(?P<payment_method_id>\d*)$', lfs.manage.views.payment.manage_payment_method, name="lfs_manage_payment_method"),
url(r'^add-payment-method', lfs.manage.views.payment.add_payment_method, name="lfs_add_payment_method"),
url(r'^save-payment-data/(?P<payment_method_id>\d*)$', lfs.manage.views.payment.save_payment_method_data, name="lfs_manage_save_payment_method_data"),
url(r'^delete-payment-method/(?P<payment_method_id>\d*)$', lfs.manage.views.payment.delete_payment_method, name="lfs_delete_payment_method"),
url(r'^add-payment-price/(?P<payment_method_id>\d*)$', lfs.manage.views.payment.add_payment_price, name="lfs_manage_add_payment_price"),
url(r'^update-payment-prices/(?P<payment_method_id>\d*)$', lfs.manage.views.payment.update_payment_prices, name="lfs_manage_update_payment_prices"),
url(r'^payment-price-criteria/(?P<payment_price_id>\d*)$', lfs.manage.views.payment.payment_price_criteria, name="lfs_manage_payment_price_criteria"),
url(r'^save-payment-price-criteria/(?P<payment_price_id>\d*)$', lfs.manage.views.payment.save_payment_price_criteria, name="lfs_manage_save_payment_price_criteria"),
url(r'^save-payment-method-criteria/(?P<payment_method_id>\d*)$', lfs.manage.views.payment.save_payment_method_criteria, name="lfs_manage_save_payment_method_criteria"),
url(r'^sort-payment-methods$', lfs.manage.views.payment.sort_payment_methods, name="lfs_sort_payment_methods"),
# Orders
url(r'^manage-orders$', lfs.manage.views.orders.manage_orders, name="lfs_manage_orders"),
url(r'^orders$', lfs.manage.views.orders.orders_view, name="lfs_orders"),
url(r'^orders-inline$', lfs.manage.views.orders.orders_inline, name="lfs_orders_inline"),
url(r'^order/(?P<order_id>\d*)$', lfs.manage.views.orders.order_view, name="lfs_manage_order"),
url(r'^delete-order/(?P<order_id>\d*)$', lfs.manage.views.orders.delete_order, name="lfs_delete_order"),
url(r'^send-order/(?P<order_id>\d*)$', lfs.manage.views.orders.send_order, name="lfs_send_order"),
url(r'^set-orders-filter$', lfs.manage.views.orders.set_order_filters, name="lfs_set_order_filter"),
url(r'^set-orders-filter-date$', lfs.manage.views.orders.set_order_filters_date, name="lfs_set_order_filters_date"),
url(r'^reset-order-filter$', lfs.manage.views.orders.reset_order_filters, name="lfs_reset_order_filters"),
url(r'^set-selectable-orders-page$', lfs.manage.views.orders.set_selectable_orders_page, name="lfs_set_selectable_orders_page"),
url(r'^set-orders-page$', lfs.manage.views.orders.set_orders_page, name="lfs_set_orders_page"),
url(r'^change-order-state$', lfs.manage.views.orders.change_order_state, name="lfs_change_order_state"),
# Order numbers
url(r'^save-order-numbers-tab$', lfs.manage.views.shop.save_order_numbers_tab, name="lfs_save_order_numbers_tab"),
# Criteria
url(r'^add-criterion', lfs.manage.views.criteria.add_criterion, name="lfs_add_criterion"),
url(r'^change-criterion', lfs.manage.views.criteria.change_criterion_form, name="lfs_manage_criteria_change_criterion_form"),
# Static blocks
url(r'^add-static-block$', lfs.manage.static_blocks.views.add_static_block, name="lfs_manage_add_static_block"),
url(r'^delete-static-block/(?P<id>\d*)$', lfs.manage.static_blocks.views.delete_static_block, name="lfs_delete_static_block"),
url(r'^preview-static-block/(?P<id>\d*)$', lfs.manage.static_blocks.views.preview_static_block, name="lfs_preview_static_block"),
url(r'^static-blocks$', lfs.manage.static_blocks.views.manage_static_blocks, name="lfs_manage_static_blocks"),
url(r'^static-block/(?P<id>\d*)$', lfs.manage.static_blocks.views.manage_static_block, name="lfs_manage_static_block"),
url(r'^add_files/(?P<id>[-\w]*)', lfs.manage.static_blocks.views.add_files, name="lfs_add_files_to_static_block"),
url(r'^update_files/(?P<id>[-\w]*)', lfs.manage.static_blocks.views.update_files, name="lfs_manage_update_files_sb"),
url(r'^reload_files/(?P<id>[-\w]*)', lfs.manage.static_blocks.views.reload_files, name="lfs_reload_files"),
url(r'^sort-static-blocks$', lfs.manage.static_blocks.views.sort_static_blocks, name="lfs_sort_static_blocks"),
url(r'^no-static-blocks$', lfs.manage.static_blocks.views.no_static_blocks, name="lfs_manage_no_static_blocks"),
# Reviews
url(r'^reviews$', lfs.manage.views.review.reviews, name="lfs_manage_reviews"),
url(r'^review/(?P<review_id>\d*)$', lfs.manage.views.review.review, name="lfs_manage_review"),
url(r'^set-review-filters$', lfs.manage.views.review.set_review_filters, name="lfs_set_review_filters"),
url(r'^reset-review-filters$', lfs.manage.views.review.reset_review_filters, name="lfs_reset_review_filters"),
url(r'^set-review-ordering/(?P<ordering>\w*)$', lfs.manage.views.review.set_ordering, name="lfs_set_review_ordering"),
url(r'^set-review-state/(?P<review_id>\d*)$', lfs.manage.views.review.set_review_state, name="lfs_set_review_state"),
url(r'^delete-review/(?P<review_id>\d*)$', lfs.manage.views.review.delete_review, name="lfs_delete_review"),
url(r'^set-reviews-page$', lfs.manage.views.review.set_reviews_page, name="lfs_set_reviews_page"),
url(r'^set-selectable-reviews-page$', lfs.manage.views.review.set_selectable_reviews_page, name="lfs_set_selectable_reviews_page"),
# Shop
url(r'^shop$', lfs.manage.views.shop.manage_shop, name="lfs_manage_shop"),
url(r'^save-shop-data-tab$', lfs.manage.views.shop.save_data_tab, name="lfs_save_shop_data_tab"),
url(r'^save-shop-default-values-tab$', lfs.manage.views.shop.save_default_values_tab, name="lfs_save_shop_default_values_tab"),
# Actions
url(r'^actions$', lfs.manage.actions.views.manage_actions, name="lfs_manage_actions"),
url(r'^action/(?P<id>\d*)$', lfs.manage.actions.views.manage_action, name="lfs_manage_action"),
url(r'^no-actions$', lfs.manage.actions.views.no_actions, name="lfs_no_actions"),
url(r'^add-action$', lfs.manage.actions.views.add_action, name="lfs_add_action"),
url(r'^delete-action/(?P<id>\d*)$', lfs.manage.actions.views.delete_action, name="lfs_delete_action"),
url(r'^save-action/(?P<id>\d*)$', lfs.manage.actions.views.save_action, name="lfs_save_action"),
url(r'^sort-actions$', lfs.manage.actions.views.sort_actions, name="lfs_sort_actions"),
# Product Taxes
url(r'^add-product-tax$', lfs.manage.product_taxes.views.add_tax, name="lfs_manage_add_tax"),
url(r'^delete-product-tax/(?P<id>\d*)$', lfs.manage.product_taxes.views.delete_tax, name="lfs_delete_tax"),
url(r'^product-taxes$', lfs.manage.product_taxes.views.manage_taxes, name="lfs_manage_taxes"),
url(r'^product-tax/(?P<id>\d*)$', lfs.manage.product_taxes.views.manage_tax, name="lfs_manage_tax"),
url(r'^no-product-taxes$', lfs.manage.product_taxes.views.no_taxes, name="lfs_manage_no_taxes"),
# Customer tax
url(r'^add-customer-tax$', lfs.manage.customer_tax.views.add_customer_tax, name="lfs_add_customer_tax"),
url(r'^delete-customer-tax/(?P<id>\d*)$', lfs.manage.customer_tax.views.delete_customer_tax, name="lfs_delete_customer_tax"),
url(r'^customer-taxes$', lfs.manage.customer_tax.views.manage_customer_taxes, name="lfs_manage_customer_taxes"),
url(r'^customer-tax/(?P<id>\d*)$', lfs.manage.customer_tax.views.manage_customer_tax, name="lfs_manage_customer_tax"),
url(r'^no-customer-taxes$', lfs.manage.customer_tax.views.no_customer_taxes, name="lfs_manage_no_customer_taxes"),
url(r'^save-customer-tax-criteria/(?P<id>\d*)$', lfs.manage.customer_tax.views.save_criteria, name="lfs_manage_save_customer_tax_criteria"),
url(r'^save-customer-tax-data/(?P<id>\d*)$', lfs.manage.customer_tax.views.save_data, name="lfs_manage_save_customer_tax_data"),
# Utils
url(r'^utilities$', lfs.manage.views.utils.utilities, name="lfs_manage_utils"),
url(r'^clear-cache$', lfs.manage.views.utils.clear_cache, name="lfs_clear_cache"),
url(r'^set-category-levels$', lfs.manage.views.utils.set_category_levels, name="lfs_set_category_levels"),
url(r'^update-effective-price$', lfs.manage.views.utils.update_effective_price, name="lfs_update_effective_price"),
url(r'^reindex-topseller$', lfs.manage.views.utils.reindex_topseller, name="lfs_reindex_topseller"),
# Information
url(r'^environment$', lfs.manage.information.views.environment, name="lfs_manage_environment"),
]
# Manufacturer / SEO
urlpatterns += SEOView.get_seo_urlpattern(Manufacturer)
urlpatterns += ShopSEOView.get_seo_urlpattern(Shop)
urlpatterns += PageSEOView.get_seo_urlpattern(Page)
urlpatterns += SEOView.get_seo_urlpattern(Product, form_klass=ProductSEOForm, template_name='manage/product/seo.html')
urlpatterns += SEOView.get_seo_urlpattern(Category)
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Original source: github.com/okfn/bibserver
# Authors:
# markmacgillivray
# Etienne Posthumus (epoz)
# Francois Boulogne <fboulogne at april dot org>
import sys
import logging
logger = logging.getLogger(__name__)
__all__ = ['BibTexParser']
if sys.version_info >= (3, 0):
from io import StringIO
ustr = str
else:
from StringIO import StringIO
ustr = unicode
class BibTexParser(object):
"""
A parser for bibtex files.
By default (i.e. without customizations), each value in entries are considered
as a string.
:param fileobj: a filehandler
:param customization: a function
Example:
>>> from bibtexparser.bparser import BibTexParser
>>> filehandler = open('bibtex', 'r')
>>> parser = BibTexParser(filehandler)
>>> record_list = parser.get_entry_list()
>>> records_dict = parser.get_entry_dict()
"""
def __init__(self, fileobj, customization=None):
data = fileobj.read()
# On some sample data files, the character encoding detection simply hangs
# We are going to default to utf8, and mandate it.
self.encoding = 'utf8'
# Some files have Byte-order marks inserted at the start
if data[:3] == '\xef\xbb\xbf':
data = data[3:]
self.fileobj = StringIO(data)
# set which bibjson schema this parser parses to
self.has_metadata = False
self.persons = []
# if bibtex file has substition strings, they are stored here,
# then the values are checked for those substitions in _add_val
self.replace_dict = {}
# pre-defined set of key changes
self.alt_dict = {
'keyw': 'keyword',
'keywords': 'keyword',
'authors': 'author',
'editors': 'editor',
'url': 'link',
'urls': 'link',
'links': 'link',
'subjects': 'subject'
}
self.records = self._parse_records(customization=customization)
self.entries_hash = {}
def get_entry_list(self):
"""Get a list of bibtex entries.
:retuns: list -- entries
"""
return self.records
def get_entry_dict(self):
"""Get a dictionnary of bibtex entries.
The dict key is the bibtex entry key
:retuns: dict -- entries
"""
# If the hash has never been made, make it
if not self.entries_hash:
for entry in self.records:
self.entries_hash[entry['id']] = entry
return self.entries_hash
def _parse_records(self, customization=None):
"""Parse the bibtex into a list of records.
:param customization: a function
:returns: list -- records
"""
def _add_parsed_record(record, records):
"""
Atomic function to parse a record
and append the result in records
"""
if record != "":
logger.debug('The record is not empty. Let\'s parse it.')
parsed = self._parse_record(record, customization=customization)
if parsed:
logger.debug('Store the result of the parsed record')
records.append(parsed)
else:
logger.debug('Nothing returned from the parsed record!')
else:
logger.debug('The record is empty')
records = []
record = ""
# read each line, bundle them up until they form an object, then send for parsing
for linenumber, line in enumerate(self.fileobj):
logger.debug('Inspect line %s', linenumber)
if '--BREAK--' in line:
logger.debug('--BREAK-- encountered')
break
else:
if line.strip().startswith('@'):
logger.debug('Line starts with @')
_add_parsed_record(record, records)
logger.debug('The record is set to empty')
record = ""
if len(line.strip()) > 0:
logger.debug('The line is not empty, add it to record')
record += line
# catch any remaining record and send it for parsing
_add_parsed_record(record, records)
logger.debug('Return the result')
return records
def _parse_record(self, record, customization=None):
"""Parse a record.
* tidy whitespace and other rubbish
* parse out the bibtype and citekey
* find all the key-value pairs it contains
:param record: a record
:param customization: a function
:returns: dict --
"""
d = {}
if not record.startswith('@'):
logger.debug('The record does not start with @. Return empty dict.')
return {}
# prepare record
record = '\n'.join([i.strip() for i in record.split('\n')])
if '}\n' in record:
record, rubbish = record.replace('\r\n', '\n').replace('\r', '\n').rsplit('}\n', 1)
# if a string record, put it in the replace_dict
if record.lower().startswith('@string'):
logger.debug('The record startswith @string')
key, val = [i.strip().strip('"').strip('{').strip('}').replace('\n', ' ') for i in record.split('{', 1)[1].strip('\n').strip(',').strip('}').split('=')]
self.replace_dict[key] = val
logger.debug('Return a dict')
return d
# for each line in record
logger.debug('Split the record of its lines and treat them')
kvs = [i.strip() for i in record.split(',\n')]
inkey = ""
inval = ""
for kv in kvs:
logger.debug('Inspect: %s', kv)
if kv.startswith('@') and not inkey:
# it is the start of the record - set the bibtype and citekey (id)
logger.debug('Line starts with @ and the key is not stored yet.')
bibtype, id = kv.split('{', 1)
bibtype = self._add_key(bibtype)
id = id.strip('}').strip(',')
elif '=' in kv and not inkey:
# it is a line with a key value pair on it
logger.debug('Line contains a key-pair value and the key is not stored yet.')
key, val = [i.strip() for i in kv.split('=', 1)]
key = self._add_key(key)
# if it looks like the value spans lines, store details for next loop
if (val.count('{') != val.count('}')) or (val.startswith('"') and not val.replace('}', '').endswith('"')):
logger.debug('The line is not ending the record.')
inkey = key
inval = val
else:
logger.debug('The line is the end of the record.')
d[key] = self._add_val(val)
elif inkey:
logger.debug('Continues the previous line to complete the key pair value...')
# if this line continues the value from a previous line, append
inval += ', ' + kv
# if it looks like this line finishes the value, store it and clear for next loop
if (inval.startswith('{') and inval.endswith('}')) or (inval.startswith('"') and inval.endswith('"')):
logger.debug('This line represents the end of the current key-pair value')
d[inkey] = self._add_val(inval)
inkey = ""
inval = ""
else:
logger.debug('This line does NOT represent the end of the current key-pair value')
logger.debug('All lines have been treated')
if not d:
logger.debug('The dict is empty, return it.')
return d
# put author names into persons list
if 'author_data' in d:
self.persons = [i for i in d['author_data'].split('\n')]
del d['author_data']
d['type'] = bibtype
d['id'] = id
if not self.has_metadata and 'type' in d:
if d['type'] == 'personal bibliography' or d['type'] == 'comment':
self.has_metadata = True
if customization is None:
logger.debug('No customization to apply, return dict')
return d
else:
# apply any customizations to the record object then return it
logger.debug('Apply customizations and return dict')
return customization(d)
def _strip_quotes(self, val):
"""Strip double quotes enclosing string
:param val: a value
:type val: string
:returns: string -- value
"""
val = val.strip()
if val.startswith('"') and val.endswith('"'):
return val[1:-1]
return val
def _strip_braces(self, val):
"""Strip braces enclosing string
:param val: a value
:type val: string
:returns: string -- value
"""
val.strip()
if val.startswith('{') and val.endswith('}'):
return val[1:-1]
return val
def _string_subst(self, val):
""" Substitute string definitions
:param val: a value
:type val: string
:returns: string -- value
"""
if not val:
return ''
for k in list(self.replace_dict.keys()):
if val == k:
val = self.replace_dict[k]
if not isinstance(val, ustr):
val = ustr(val, self.encoding, 'ignore')
return val
def _add_val(self, val):
""" Clean instring before adding to dictionary
:param val: a value
:type val: string
:returns: string -- value
"""
if not val or val == "{}":
return ''
val = self._strip_braces(val)
val = self._strip_quotes(val)
val = self._strip_braces(val)
val = self._string_subst(val)
return val
def _add_key(self, key):
""" Add a key and homogeneize alternative forms.
:param key: a key
:type key: string
:returns: string -- value
"""
key = key.strip().strip('@').lower()
if key in list(self.alt_dict.keys()):
key = self.alt_dict[key]
if not isinstance(key, ustr):
return ustr(key, 'utf-8')
else:
return key
|
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
from typing import Awaitable, Callable, Dict, Optional, Sequence, Union
import pkg_resources
import google.auth # type: ignore
import google.api_core
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.api_core import operations_v1
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
from google.cloud.eventarc_v1.types import channel
from google.cloud.eventarc_v1.types import channel_connection
from google.cloud.eventarc_v1.types import eventarc
from google.cloud.eventarc_v1.types import trigger
from google.longrunning import operations_pb2 # type: ignore
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution("google-cloud-eventarc",).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
class EventarcTransport(abc.ABC):
"""Abstract transport class for Eventarc."""
AUTH_SCOPES = ("https://www.googleapis.com/auth/cloud-platform",)
DEFAULT_HOST: str = "eventarc.googleapis.com"
def __init__(
self,
*,
host: str = DEFAULT_HOST,
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
**kwargs,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A list of scopes.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ":" not in host:
host += ":443"
self._host = host
scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES}
# Save the scopes.
self._scopes = scopes
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials and credentials_file:
raise core_exceptions.DuplicateCredentialArgs(
"'credentials_file' and 'credentials' are mutually exclusive"
)
if credentials_file is not None:
credentials, _ = google.auth.load_credentials_from_file(
credentials_file, **scopes_kwargs, quota_project_id=quota_project_id
)
elif credentials is None:
credentials, _ = google.auth.default(
**scopes_kwargs, quota_project_id=quota_project_id
)
# If the credentials are service account credentials, then always try to use self signed JWT.
if (
always_use_jwt_access
and isinstance(credentials, service_account.Credentials)
and hasattr(service_account.Credentials, "with_always_use_jwt_access")
):
credentials = credentials.with_always_use_jwt_access(True)
# Save the credentials.
self._credentials = credentials
def _prep_wrapped_messages(self, client_info):
# Precompute the wrapped methods.
self._wrapped_methods = {
self.get_trigger: gapic_v1.method.wrap_method(
self.get_trigger, default_timeout=None, client_info=client_info,
),
self.list_triggers: gapic_v1.method.wrap_method(
self.list_triggers, default_timeout=None, client_info=client_info,
),
self.create_trigger: gapic_v1.method.wrap_method(
self.create_trigger, default_timeout=None, client_info=client_info,
),
self.update_trigger: gapic_v1.method.wrap_method(
self.update_trigger, default_timeout=None, client_info=client_info,
),
self.delete_trigger: gapic_v1.method.wrap_method(
self.delete_trigger, default_timeout=None, client_info=client_info,
),
self.get_channel: gapic_v1.method.wrap_method(
self.get_channel, default_timeout=None, client_info=client_info,
),
self.list_channels: gapic_v1.method.wrap_method(
self.list_channels, default_timeout=None, client_info=client_info,
),
self.create_channel_: gapic_v1.method.wrap_method(
self.create_channel_, default_timeout=None, client_info=client_info,
),
self.update_channel: gapic_v1.method.wrap_method(
self.update_channel, default_timeout=None, client_info=client_info,
),
self.delete_channel: gapic_v1.method.wrap_method(
self.delete_channel, default_timeout=None, client_info=client_info,
),
self.get_channel_connection: gapic_v1.method.wrap_method(
self.get_channel_connection,
default_timeout=None,
client_info=client_info,
),
self.list_channel_connections: gapic_v1.method.wrap_method(
self.list_channel_connections,
default_timeout=None,
client_info=client_info,
),
self.create_channel_connection: gapic_v1.method.wrap_method(
self.create_channel_connection,
default_timeout=None,
client_info=client_info,
),
self.delete_channel_connection: gapic_v1.method.wrap_method(
self.delete_channel_connection,
default_timeout=None,
client_info=client_info,
),
}
def close(self):
"""Closes resources associated with the transport.
.. warning::
Only call this method if the transport is NOT shared
with other clients - this may cause errors in other clients!
"""
raise NotImplementedError()
@property
def operations_client(self):
"""Return the client designed to process long-running operations."""
raise NotImplementedError()
@property
def get_trigger(
self,
) -> Callable[
[eventarc.GetTriggerRequest], Union[trigger.Trigger, Awaitable[trigger.Trigger]]
]:
raise NotImplementedError()
@property
def list_triggers(
self,
) -> Callable[
[eventarc.ListTriggersRequest],
Union[eventarc.ListTriggersResponse, Awaitable[eventarc.ListTriggersResponse]],
]:
raise NotImplementedError()
@property
def create_trigger(
self,
) -> Callable[
[eventarc.CreateTriggerRequest],
Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
@property
def update_trigger(
self,
) -> Callable[
[eventarc.UpdateTriggerRequest],
Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
@property
def delete_trigger(
self,
) -> Callable[
[eventarc.DeleteTriggerRequest],
Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
@property
def get_channel(
self,
) -> Callable[
[eventarc.GetChannelRequest], Union[channel.Channel, Awaitable[channel.Channel]]
]:
raise NotImplementedError()
@property
def list_channels(
self,
) -> Callable[
[eventarc.ListChannelsRequest],
Union[eventarc.ListChannelsResponse, Awaitable[eventarc.ListChannelsResponse]],
]:
raise NotImplementedError()
@property
def create_channel_(
self,
) -> Callable[
[eventarc.CreateChannelRequest],
Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
@property
def update_channel(
self,
) -> Callable[
[eventarc.UpdateChannelRequest],
Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
@property
def delete_channel(
self,
) -> Callable[
[eventarc.DeleteChannelRequest],
Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
@property
def get_channel_connection(
self,
) -> Callable[
[eventarc.GetChannelConnectionRequest],
Union[
channel_connection.ChannelConnection,
Awaitable[channel_connection.ChannelConnection],
],
]:
raise NotImplementedError()
@property
def list_channel_connections(
self,
) -> Callable[
[eventarc.ListChannelConnectionsRequest],
Union[
eventarc.ListChannelConnectionsResponse,
Awaitable[eventarc.ListChannelConnectionsResponse],
],
]:
raise NotImplementedError()
@property
def create_channel_connection(
self,
) -> Callable[
[eventarc.CreateChannelConnectionRequest],
Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
@property
def delete_channel_connection(
self,
) -> Callable[
[eventarc.DeleteChannelConnectionRequest],
Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
__all__ = ("EventarcTransport",)
|
|
"""The tests for the automation component."""
import asyncio
import pytest
from homeassistant.components import logbook
import homeassistant.components.automation as automation
from homeassistant.components.automation import (
ATTR_SOURCE,
DOMAIN,
EVENT_AUTOMATION_RELOADED,
EVENT_AUTOMATION_TRIGGERED,
SERVICE_TRIGGER,
)
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_NAME,
EVENT_HOMEASSISTANT_STARTED,
SERVICE_RELOAD,
SERVICE_TOGGLE,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
STATE_OFF,
STATE_ON,
)
from homeassistant.core import Context, CoreState, State, callback
from homeassistant.exceptions import HomeAssistantError, Unauthorized
from homeassistant.setup import async_setup_component
import homeassistant.util.dt as dt_util
from tests.async_mock import Mock, patch
from tests.common import assert_setup_component, async_mock_service, mock_restore_cache
from tests.components.logbook.test_init import MockLazyEventPartialState
@pytest.fixture
def calls(hass):
"""Track calls to a mock service."""
return async_mock_service(hass, "test", "automation")
async def test_service_data_not_a_dict(hass, calls):
"""Test service data not dict."""
with assert_setup_component(0, automation.DOMAIN):
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {"platform": "event", "event_type": "test_event"},
"action": {"service": "test.automation", "data": 100},
}
},
)
async def test_service_specify_data(hass, calls):
"""Test service data."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"alias": "hello",
"trigger": {"platform": "event", "event_type": "test_event"},
"action": {
"service": "test.automation",
"data_template": {
"some": "{{ trigger.platform }} - "
"{{ trigger.event.event_type }}"
},
},
}
},
)
time = dt_util.utcnow()
with patch("homeassistant.helpers.script.utcnow", return_value=time):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].data["some"] == "event - test_event"
state = hass.states.get("automation.hello")
assert state is not None
assert state.attributes.get("last_triggered") == time
async def test_service_specify_entity_id(hass, calls):
"""Test service data."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {"platform": "event", "event_type": "test_event"},
"action": {"service": "test.automation", "entity_id": "hello.world"},
}
},
)
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
assert ["hello.world"] == calls[0].data.get(ATTR_ENTITY_ID)
async def test_service_specify_entity_id_list(hass, calls):
"""Test service data."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {"platform": "event", "event_type": "test_event"},
"action": {
"service": "test.automation",
"entity_id": ["hello.world", "hello.world2"],
},
}
},
)
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
assert ["hello.world", "hello.world2"] == calls[0].data.get(ATTR_ENTITY_ID)
async def test_two_triggers(hass, calls):
"""Test triggers."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": [
{"platform": "event", "event_type": "test_event"},
{"platform": "state", "entity_id": "test.entity"},
],
"action": {"service": "test.automation"},
}
},
)
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
hass.states.async_set("test.entity", "hello")
await hass.async_block_till_done()
assert len(calls) == 2
async def test_trigger_service_ignoring_condition(hass, calls):
"""Test triggers."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"alias": "test",
"trigger": [{"platform": "event", "event_type": "test_event"}],
"condition": {
"condition": "state",
"entity_id": "non.existing",
"state": "beer",
},
"action": {"service": "test.automation"},
}
},
)
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 0
await hass.services.async_call(
"automation", "trigger", {"entity_id": "automation.test"}, blocking=True
)
assert len(calls) == 1
await hass.services.async_call(
"automation",
"trigger",
{"entity_id": "automation.test", "skip_condition": True},
blocking=True,
)
assert len(calls) == 2
await hass.services.async_call(
"automation",
"trigger",
{"entity_id": "automation.test", "skip_condition": False},
blocking=True,
)
assert len(calls) == 2
async def test_two_conditions_with_and(hass, calls):
"""Test two and conditions."""
entity_id = "test.entity"
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": [{"platform": "event", "event_type": "test_event"}],
"condition": [
{"condition": "state", "entity_id": entity_id, "state": "100"},
{
"condition": "numeric_state",
"entity_id": entity_id,
"below": 150,
},
],
"action": {"service": "test.automation"},
}
},
)
hass.states.async_set(entity_id, 100)
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
hass.states.async_set(entity_id, 101)
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
hass.states.async_set(entity_id, 151)
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
async def test_shorthand_conditions_template(hass, calls):
"""Test shorthand nation form in conditions."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": [{"platform": "event", "event_type": "test_event"}],
"condition": "{{ is_state('test.entity', 'hello') }}",
"action": {"service": "test.automation"},
}
},
)
hass.states.async_set("test.entity", "hello")
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
hass.states.async_set("test.entity", "goodbye")
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
async def test_automation_list_setting(hass, calls):
"""Event is not a valid condition."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {"platform": "event", "event_type": "test_event"},
"action": {"service": "test.automation"},
},
{
"trigger": {"platform": "event", "event_type": "test_event_2"},
"action": {"service": "test.automation"},
},
]
},
)
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
hass.bus.async_fire("test_event_2")
await hass.async_block_till_done()
assert len(calls) == 2
async def test_automation_calling_two_actions(hass, calls):
"""Test if we can call two actions from automation async definition."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {"platform": "event", "event_type": "test_event"},
"action": [
{"service": "test.automation", "data": {"position": 0}},
{"service": "test.automation", "data": {"position": 1}},
],
}
},
)
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 2
assert calls[0].data["position"] == 0
assert calls[1].data["position"] == 1
async def test_shared_context(hass, calls):
"""Test that the shared context is passed down the chain."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"alias": "hello",
"trigger": {"platform": "event", "event_type": "test_event"},
"action": {"event": "test_event2"},
},
{
"alias": "bye",
"trigger": {"platform": "event", "event_type": "test_event2"},
"action": {"service": "test.automation"},
},
]
},
)
context = Context()
first_automation_listener = Mock()
event_mock = Mock()
hass.bus.async_listen("test_event2", first_automation_listener)
hass.bus.async_listen(EVENT_AUTOMATION_TRIGGERED, event_mock)
hass.bus.async_fire("test_event", context=context)
await hass.async_block_till_done()
# Ensure events was fired
assert first_automation_listener.call_count == 1
assert event_mock.call_count == 2
# Verify automation triggered evenet for 'hello' automation
args, _ = event_mock.call_args_list[0]
first_trigger_context = args[0].context
assert first_trigger_context.parent_id == context.id
# Ensure event data has all attributes set
assert args[0].data.get(ATTR_NAME) is not None
assert args[0].data.get(ATTR_ENTITY_ID) is not None
assert args[0].data.get(ATTR_SOURCE) is not None
# Ensure context set correctly for event fired by 'hello' automation
args, _ = first_automation_listener.call_args
assert args[0].context is first_trigger_context
# Ensure the 'hello' automation state has the right context
state = hass.states.get("automation.hello")
assert state is not None
assert state.context is first_trigger_context
# Verify automation triggered evenet for 'bye' automation
args, _ = event_mock.call_args_list[1]
second_trigger_context = args[0].context
assert second_trigger_context.parent_id == first_trigger_context.id
# Ensure event data has all attributes set
assert args[0].data.get(ATTR_NAME) is not None
assert args[0].data.get(ATTR_ENTITY_ID) is not None
assert args[0].data.get(ATTR_SOURCE) is not None
# Ensure the service call from the second automation
# shares the same context
assert len(calls) == 1
assert calls[0].context is second_trigger_context
async def test_services(hass, calls):
"""Test the automation services for turning entities on/off."""
entity_id = "automation.hello"
assert hass.states.get(entity_id) is None
assert not automation.is_on(hass, entity_id)
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"alias": "hello",
"trigger": {"platform": "event", "event_type": "test_event"},
"action": {"service": "test.automation"},
}
},
)
assert hass.states.get(entity_id) is not None
assert automation.is_on(hass, entity_id)
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
await hass.services.async_call(
automation.DOMAIN,
SERVICE_TURN_OFF,
{
ATTR_ENTITY_ID: entity_id,
},
blocking=True,
)
assert not automation.is_on(hass, entity_id)
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
await hass.services.async_call(
automation.DOMAIN, SERVICE_TOGGLE, {ATTR_ENTITY_ID: entity_id}, blocking=True
)
assert automation.is_on(hass, entity_id)
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 2
await hass.services.async_call(
automation.DOMAIN,
SERVICE_TOGGLE,
{ATTR_ENTITY_ID: entity_id},
blocking=True,
)
assert not automation.is_on(hass, entity_id)
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 2
await hass.services.async_call(
automation.DOMAIN, SERVICE_TOGGLE, {ATTR_ENTITY_ID: entity_id}, blocking=True
)
await hass.services.async_call(
automation.DOMAIN, SERVICE_TRIGGER, {ATTR_ENTITY_ID: entity_id}, blocking=True
)
assert len(calls) == 3
await hass.services.async_call(
automation.DOMAIN, SERVICE_TURN_OFF, {ATTR_ENTITY_ID: entity_id}, blocking=True
)
await hass.services.async_call(
automation.DOMAIN, SERVICE_TRIGGER, {ATTR_ENTITY_ID: entity_id}, blocking=True
)
assert len(calls) == 4
await hass.services.async_call(
automation.DOMAIN, SERVICE_TURN_ON, {ATTR_ENTITY_ID: entity_id}, blocking=True
)
assert automation.is_on(hass, entity_id)
async def test_reload_config_service(hass, calls, hass_admin_user, hass_read_only_user):
"""Test the reload config service."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"alias": "hello",
"trigger": {"platform": "event", "event_type": "test_event"},
"action": {
"service": "test.automation",
"data_template": {"event": "{{ trigger.event.event_type }}"},
},
}
},
)
assert hass.states.get("automation.hello") is not None
assert hass.states.get("automation.bye") is None
listeners = hass.bus.async_listeners()
assert listeners.get("test_event") == 1
assert listeners.get("test_event2") is None
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].data.get("event") == "test_event"
test_reload_event = []
hass.bus.async_listen(
EVENT_AUTOMATION_RELOADED, lambda event: test_reload_event.append(event)
)
with patch(
"homeassistant.config.load_yaml_config_file",
autospec=True,
return_value={
automation.DOMAIN: {
"alias": "bye",
"trigger": {"platform": "event", "event_type": "test_event2"},
"action": {
"service": "test.automation",
"data_template": {"event": "{{ trigger.event.event_type }}"},
},
}
},
):
with pytest.raises(Unauthorized):
await hass.services.async_call(
automation.DOMAIN,
SERVICE_RELOAD,
context=Context(user_id=hass_read_only_user.id),
blocking=True,
)
await hass.services.async_call(
automation.DOMAIN,
SERVICE_RELOAD,
context=Context(user_id=hass_admin_user.id),
blocking=True,
)
# De-flake ?!
await hass.async_block_till_done()
assert len(test_reload_event) == 1
assert hass.states.get("automation.hello") is None
assert hass.states.get("automation.bye") is not None
listeners = hass.bus.async_listeners()
assert listeners.get("test_event") is None
assert listeners.get("test_event2") == 1
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
hass.bus.async_fire("test_event2")
await hass.async_block_till_done()
assert len(calls) == 2
assert calls[1].data.get("event") == "test_event2"
async def test_reload_config_when_invalid_config(hass, calls):
"""Test the reload config service handling invalid config."""
with assert_setup_component(1, automation.DOMAIN):
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"alias": "hello",
"trigger": {"platform": "event", "event_type": "test_event"},
"action": {
"service": "test.automation",
"data_template": {"event": "{{ trigger.event.event_type }}"},
},
}
},
)
assert hass.states.get("automation.hello") is not None
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].data.get("event") == "test_event"
with patch(
"homeassistant.config.load_yaml_config_file",
autospec=True,
return_value={automation.DOMAIN: "not valid"},
):
await hass.services.async_call(automation.DOMAIN, SERVICE_RELOAD, blocking=True)
assert hass.states.get("automation.hello") is None
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
async def test_reload_config_handles_load_fails(hass, calls):
"""Test the reload config service."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"alias": "hello",
"trigger": {"platform": "event", "event_type": "test_event"},
"action": {
"service": "test.automation",
"data_template": {"event": "{{ trigger.event.event_type }}"},
},
}
},
)
assert hass.states.get("automation.hello") is not None
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].data.get("event") == "test_event"
with patch(
"homeassistant.config.load_yaml_config_file",
side_effect=HomeAssistantError("bla"),
):
await hass.services.async_call(automation.DOMAIN, SERVICE_RELOAD, blocking=True)
assert hass.states.get("automation.hello") is not None
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 2
@pytest.mark.parametrize("service", ["turn_off_stop", "turn_off_no_stop", "reload"])
async def test_automation_stops(hass, calls, service):
"""Test that turning off / reloading stops any running actions as appropriate."""
entity_id = "automation.hello"
test_entity = "test.entity"
config = {
automation.DOMAIN: {
"alias": "hello",
"trigger": {"platform": "event", "event_type": "test_event"},
"action": [
{"event": "running"},
{"wait_template": "{{ is_state('test.entity', 'goodbye') }}"},
{"service": "test.automation"},
],
}
}
assert await async_setup_component(hass, automation.DOMAIN, config)
running = asyncio.Event()
@callback
def running_cb(event):
running.set()
hass.bus.async_listen_once("running", running_cb)
hass.states.async_set(test_entity, "hello")
hass.bus.async_fire("test_event")
await running.wait()
if service == "turn_off_stop":
await hass.services.async_call(
automation.DOMAIN,
SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: entity_id},
blocking=True,
)
elif service == "turn_off_no_stop":
await hass.services.async_call(
automation.DOMAIN,
SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: entity_id, automation.CONF_STOP_ACTIONS: False},
blocking=True,
)
else:
with patch(
"homeassistant.config.load_yaml_config_file",
autospec=True,
return_value=config,
):
await hass.services.async_call(
automation.DOMAIN, SERVICE_RELOAD, blocking=True
)
hass.states.async_set(test_entity, "goodbye")
await hass.async_block_till_done()
assert len(calls) == (1 if service == "turn_off_no_stop" else 0)
async def test_automation_restore_state(hass):
"""Ensure states are restored on startup."""
time = dt_util.utcnow()
mock_restore_cache(
hass,
(
State("automation.hello", STATE_ON),
State("automation.bye", STATE_OFF, {"last_triggered": time}),
),
)
config = {
automation.DOMAIN: [
{
"alias": "hello",
"trigger": {"platform": "event", "event_type": "test_event_hello"},
"action": {"service": "test.automation"},
},
{
"alias": "bye",
"trigger": {"platform": "event", "event_type": "test_event_bye"},
"action": {"service": "test.automation"},
},
]
}
assert await async_setup_component(hass, automation.DOMAIN, config)
state = hass.states.get("automation.hello")
assert state
assert state.state == STATE_ON
assert state.attributes["last_triggered"] is None
state = hass.states.get("automation.bye")
assert state
assert state.state == STATE_OFF
assert state.attributes["last_triggered"] == time
calls = async_mock_service(hass, "test", "automation")
assert automation.is_on(hass, "automation.bye") is False
hass.bus.async_fire("test_event_bye")
await hass.async_block_till_done()
assert len(calls) == 0
assert automation.is_on(hass, "automation.hello")
hass.bus.async_fire("test_event_hello")
await hass.async_block_till_done()
assert len(calls) == 1
async def test_initial_value_off(hass):
"""Test initial value off."""
calls = async_mock_service(hass, "test", "automation")
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"alias": "hello",
"initial_state": "off",
"trigger": {"platform": "event", "event_type": "test_event"},
"action": {"service": "test.automation", "entity_id": "hello.world"},
}
},
)
assert not automation.is_on(hass, "automation.hello")
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 0
async def test_initial_value_on(hass):
"""Test initial value on."""
hass.state = CoreState.not_running
calls = async_mock_service(hass, "test", "automation")
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"alias": "hello",
"initial_state": "on",
"trigger": {"platform": "event", "event_type": "test_event"},
"action": {
"service": "test.automation",
"entity_id": ["hello.world", "hello.world2"],
},
}
},
)
assert automation.is_on(hass, "automation.hello")
await hass.async_start()
await hass.async_block_till_done()
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
async def test_initial_value_off_but_restore_on(hass):
"""Test initial value off and restored state is turned on."""
hass.state = CoreState.not_running
calls = async_mock_service(hass, "test", "automation")
mock_restore_cache(hass, (State("automation.hello", STATE_ON),))
await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"alias": "hello",
"initial_state": "off",
"trigger": {"platform": "event", "event_type": "test_event"},
"action": {"service": "test.automation", "entity_id": "hello.world"},
}
},
)
assert not automation.is_on(hass, "automation.hello")
await hass.async_start()
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 0
async def test_initial_value_on_but_restore_off(hass):
"""Test initial value on and restored state is turned off."""
calls = async_mock_service(hass, "test", "automation")
mock_restore_cache(hass, (State("automation.hello", STATE_OFF),))
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"alias": "hello",
"initial_state": "on",
"trigger": {"platform": "event", "event_type": "test_event"},
"action": {"service": "test.automation", "entity_id": "hello.world"},
}
},
)
assert automation.is_on(hass, "automation.hello")
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
async def test_no_initial_value_and_restore_off(hass):
"""Test initial value off and restored state is turned on."""
calls = async_mock_service(hass, "test", "automation")
mock_restore_cache(hass, (State("automation.hello", STATE_OFF),))
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"alias": "hello",
"trigger": {"platform": "event", "event_type": "test_event"},
"action": {"service": "test.automation", "entity_id": "hello.world"},
}
},
)
assert not automation.is_on(hass, "automation.hello")
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 0
async def test_automation_is_on_if_no_initial_state_or_restore(hass):
"""Test initial value is on when no initial state or restored state."""
calls = async_mock_service(hass, "test", "automation")
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"alias": "hello",
"trigger": {"platform": "event", "event_type": "test_event"},
"action": {"service": "test.automation", "entity_id": "hello.world"},
}
},
)
assert automation.is_on(hass, "automation.hello")
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
async def test_automation_not_trigger_on_bootstrap(hass):
"""Test if automation is not trigger on bootstrap."""
hass.state = CoreState.not_running
calls = async_mock_service(hass, "test", "automation")
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"alias": "hello",
"trigger": {"platform": "event", "event_type": "test_event"},
"action": {"service": "test.automation", "entity_id": "hello.world"},
}
},
)
assert automation.is_on(hass, "automation.hello")
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 0
hass.bus.async_fire(EVENT_HOMEASSISTANT_STARTED)
await hass.async_block_till_done()
assert automation.is_on(hass, "automation.hello")
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
assert ["hello.world"] == calls[0].data.get(ATTR_ENTITY_ID)
async def test_automation_bad_trigger(hass, caplog):
"""Test bad trigger configuration."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"alias": "hello",
"trigger": {"platform": "automation"},
"action": [],
}
},
)
assert "Integration 'automation' does not provide trigger support." in caplog.text
async def test_automation_with_error_in_script(hass, caplog):
"""Test automation with an error in script."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"alias": "hello",
"trigger": {"platform": "event", "event_type": "test_event"},
"action": {"service": "test.automation", "entity_id": "hello.world"},
}
},
)
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert "Service not found" in caplog.text
async def test_automation_with_error_in_script_2(hass, caplog):
"""Test automation with an error in script."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"alias": "hello",
"trigger": {"platform": "event", "event_type": "test_event"},
"action": {"service": None, "entity_id": "hello.world"},
}
},
)
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert "string value is None" in caplog.text
async def test_automation_restore_last_triggered_with_initial_state(hass):
"""Ensure last_triggered is restored, even when initial state is set."""
time = dt_util.utcnow()
mock_restore_cache(
hass,
(
State("automation.hello", STATE_ON),
State("automation.bye", STATE_ON, {"last_triggered": time}),
State("automation.solong", STATE_OFF, {"last_triggered": time}),
),
)
config = {
automation.DOMAIN: [
{
"alias": "hello",
"initial_state": "off",
"trigger": {"platform": "event", "event_type": "test_event"},
"action": {"service": "test.automation"},
},
{
"alias": "bye",
"initial_state": "off",
"trigger": {"platform": "event", "event_type": "test_event"},
"action": {"service": "test.automation"},
},
{
"alias": "solong",
"initial_state": "on",
"trigger": {"platform": "event", "event_type": "test_event"},
"action": {"service": "test.automation"},
},
]
}
await async_setup_component(hass, automation.DOMAIN, config)
state = hass.states.get("automation.hello")
assert state
assert state.state == STATE_OFF
assert state.attributes["last_triggered"] is None
state = hass.states.get("automation.bye")
assert state
assert state.state == STATE_OFF
assert state.attributes["last_triggered"] == time
state = hass.states.get("automation.solong")
assert state
assert state.state == STATE_ON
assert state.attributes["last_triggered"] == time
async def test_extraction_functions(hass):
"""Test extraction functions."""
assert await async_setup_component(
hass,
DOMAIN,
{
DOMAIN: [
{
"alias": "test1",
"trigger": {"platform": "state", "entity_id": "sensor.trigger_1"},
"condition": {
"condition": "state",
"entity_id": "light.condition_state",
"state": "on",
},
"action": [
{
"service": "test.script",
"data": {"entity_id": "light.in_both"},
},
{
"service": "test.script",
"data": {"entity_id": "light.in_first"},
},
{
"domain": "light",
"device_id": "device-in-both",
"entity_id": "light.bla",
"type": "turn_on",
},
],
},
{
"alias": "test2",
"trigger": {
"platform": "device",
"domain": "light",
"type": "turned_on",
"entity_id": "light.trigger_2",
"device_id": "trigger-device-2",
},
"condition": {
"condition": "device",
"device_id": "condition-device",
"domain": "light",
"type": "is_on",
"entity_id": "light.bla",
},
"action": [
{
"service": "test.script",
"data": {"entity_id": "light.in_both"},
},
{
"condition": "state",
"entity_id": "sensor.condition",
"state": "100",
},
{"scene": "scene.hello"},
{
"domain": "light",
"device_id": "device-in-both",
"entity_id": "light.bla",
"type": "turn_on",
},
{
"domain": "light",
"device_id": "device-in-last",
"entity_id": "light.bla",
"type": "turn_on",
},
],
},
]
},
)
assert set(automation.automations_with_entity(hass, "light.in_both")) == {
"automation.test1",
"automation.test2",
}
assert set(automation.entities_in_automation(hass, "automation.test1")) == {
"sensor.trigger_1",
"light.condition_state",
"light.in_both",
"light.in_first",
}
assert set(automation.automations_with_device(hass, "device-in-both")) == {
"automation.test1",
"automation.test2",
}
assert set(automation.devices_in_automation(hass, "automation.test2")) == {
"trigger-device-2",
"condition-device",
"device-in-both",
"device-in-last",
}
async def test_logbook_humanify_automation_triggered_event(hass):
"""Test humanifying Automation Trigger event."""
hass.config.components.add("recorder")
await async_setup_component(hass, automation.DOMAIN, {})
await async_setup_component(hass, "logbook", {})
entity_attr_cache = logbook.EntityAttributeCache(hass)
event1, event2 = list(
logbook.humanify(
hass,
[
MockLazyEventPartialState(
EVENT_AUTOMATION_TRIGGERED,
{ATTR_ENTITY_ID: "automation.hello", ATTR_NAME: "Hello Automation"},
),
MockLazyEventPartialState(
EVENT_AUTOMATION_TRIGGERED,
{
ATTR_ENTITY_ID: "automation.bye",
ATTR_NAME: "Bye Automation",
ATTR_SOURCE: "source of trigger",
},
),
],
entity_attr_cache,
{},
)
)
assert event1["name"] == "Hello Automation"
assert event1["domain"] == "automation"
assert event1["message"] == "has been triggered"
assert event1["entity_id"] == "automation.hello"
assert event2["name"] == "Bye Automation"
assert event2["domain"] == "automation"
assert event2["message"] == "has been triggered by source of trigger"
assert event2["entity_id"] == "automation.bye"
async def test_automation_variables(hass, caplog):
"""Test automation variables."""
calls = async_mock_service(hass, "test", "automation")
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"variables": {
"test_var": "defined_in_config",
"event_type": "{{ trigger.event.event_type }}",
},
"trigger": {"platform": "event", "event_type": "test_event"},
"action": {
"service": "test.automation",
"data": {
"value": "{{ test_var }}",
"event_type": "{{ event_type }}",
},
},
},
{
"variables": {
"test_var": "defined_in_config",
},
"trigger": {"platform": "event", "event_type": "test_event_2"},
"condition": {
"condition": "template",
"value_template": "{{ trigger.event.data.pass_condition }}",
},
"action": {
"service": "test.automation",
},
},
{
"variables": {
"test_var": "{{ trigger.event.data.break + 1 }}",
},
"trigger": {"platform": "event", "event_type": "test_event_3"},
"action": {
"service": "test.automation",
},
},
]
},
)
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].data["value"] == "defined_in_config"
assert calls[0].data["event_type"] == "test_event"
hass.bus.async_fire("test_event_2")
await hass.async_block_till_done()
assert len(calls) == 1
hass.bus.async_fire("test_event_2", {"pass_condition": True})
await hass.async_block_till_done()
assert len(calls) == 2
assert "Error rendering variables" not in caplog.text
hass.bus.async_fire("test_event_3")
await hass.async_block_till_done()
assert len(calls) == 2
assert "Error rendering variables" in caplog.text
hass.bus.async_fire("test_event_3", {"break": 0})
await hass.async_block_till_done()
assert len(calls) == 3
async def test_blueprint_automation(hass, calls):
"""Test blueprint automation."""
assert await async_setup_component(
hass,
"automation",
{
"automation": {
"use_blueprint": {
"path": "test_event_service.yaml",
"input": {
"trigger_event": "blueprint_event",
"service_to_call": "test.automation",
},
}
}
},
)
hass.bus.async_fire("blueprint_event")
await hass.async_block_till_done()
assert len(calls) == 1
assert automation.entities_in_automation(hass, "automation.automation_0") == [
"light.kitchen"
]
|
|
import threading
from logging import getLogger
from os import urandom
from hashlib import sha1
from redis import StrictRedis
from redis.exceptions import NoScriptError
__version__ = "2.2.0"
logger = getLogger(__name__)
UNLOCK_SCRIPT = b"""
if redis.call("get", KEYS[1]) == ARGV[1] then
redis.call("del", KEYS[2])
redis.call("lpush", KEYS[2], 1)
return redis.call("del", KEYS[1])
else
return 0
end
"""
UNLOCK_SCRIPT_HASH = sha1(UNLOCK_SCRIPT).hexdigest()
class AlreadyAcquired(RuntimeError):
pass
class NotAcquired(RuntimeError):
pass
class AlreadyStarted(RuntimeError):
pass
class Lock(object):
"""
A Lock context manager implemented via redis SETNX/BLPOP.
"""
def __init__(self, redis_client, name, expire=None, id=None, auto_renewal=False):
"""
:param redis_client:
An instance of :class:`~StrictRedis`.
:param name:
The name (redis key) the lock should have.
:param expire:
The lock expiry time in seconds. If left at the default (None)
the lock will not expire.
:param id:
The ID (redis value) the lock should have. A random value is
generated when left at the default.
:param auto_renewal:
If set to True, Lock will automatically renew the lock so that it
doesn't expire for as long as the lock is held (acquire() called
or running in a context manager).
Implementation note: Renewal will happen using a daemon thread with
an interval of expire*2/3. If wishing to use a different renewal
time, subclass Lock, call super().__init__() then set
self._lock_renewal_interval to your desired interval.
"""
assert isinstance(redis_client, StrictRedis)
if auto_renewal and expire is None:
raise ValueError("Expire may not be None when auto_renewal is set")
self._client = redis_client
self._expire = expire if expire is None else int(expire)
self._id = urandom(16) if id is None else id
self._held = False
self._name = 'lock:'+name
self._signal = 'lock-signal:'+name
self._lock_renewal_interval = expire*2/3 if auto_renewal else None
self._lock_renewal_thread = None
def reset(self):
"""
Forcibly deletes the lock. Use this with care.
"""
self._client.delete(self._name)
self._client.delete(self._signal)
@property
def id(self):
return self._id
def get_owner_id(self):
return self._client.get(self._name)
def acquire(self, blocking=True):
logger.debug("Getting %r ...", self._name)
if self._held:
raise AlreadyAcquired("Already aquired from this Lock instance.")
busy = True
while busy:
busy = not self._client.set(self._name, self._id, nx=True, ex=self._expire)
if busy:
if blocking:
self._client.blpop(self._signal, self._expire or 0)
else:
logger.debug("Failed to get %r.", self._name)
return False
logger.debug("Got lock for %r.", self._name)
self._held = True
if self._lock_renewal_interval is not None:
self._start_lock_renewer()
return True
def _lock_renewer(self, interval):
"""
Renew the lock key in redis every `interval` seconds for as long
as `self._lock_renewal_thread.should_exit` is False.
"""
log = getLogger("%s.lock_refresher" % __name__)
while not self._lock_renewal_thread.wait_for_exit_request(timeout=interval):
log.debug("Refreshing lock")
self._client.set(self._name, self._id, xx=True, ex=self._expire)
log.debug("Exit requested, stopping lock refreshing")
def _start_lock_renewer(self):
"""Start the lock refresher"""
if self._lock_renewal_thread is not None:
raise AlreadyStarted("Lock refresh thread already started")
logger.debug(
"Starting thread to refresh lock every %s seconds",
self._lock_renewal_interval
)
self._lock_renewal_thread = InterruptableThread(
group=None,
target=self._lock_renewer,
kwargs={'interval': self._lock_renewal_interval}
)
self._lock_renewal_thread.setDaemon(True)
self._lock_renewal_thread.start()
def _stop_lock_renewer(self):
"""Stop the lock refresher"""
if self._lock_renewal_thread is None or not self._lock_renewal_thread.is_alive():
return
logger.debug("Signalling the lock refresher to stop")
self._lock_renewal_thread.request_exit()
self._lock_renewal_thread.join()
self._lock_renewal_thread = None
logger.debug("Lock refresher has stopped")
def __enter__(self):
assert self.acquire(blocking=True)
return self
def __exit__(self, exc_type=None, exc_value=None, traceback=None, force=False):
if not (self._held or force):
raise NotAcquired("This Lock instance didn't acquire the lock.")
if self._lock_renewal_thread is not None:
self._stop_lock_renewer()
logger.debug("Releasing %r.", self._name)
try:
self._client.evalsha(UNLOCK_SCRIPT_HASH, 2, self._name, self._signal, self._id)
except NoScriptError:
logger.warn("UNLOCK_SCRIPT not cached.")
self._client.eval(UNLOCK_SCRIPT, 2, self._name, self._signal, self._id)
self._held = False
release = __exit__
class InterruptableThread(threading.Thread):
"""
A Python thread that can be requested to stop by calling request_exit()
on it.
Code running inside this thread should periodically check the
`should_exit` property (or use wait_for_exit_request) on the thread
object and stop further processing once it returns True.
"""
def __init__(self, *args, **kwargs):
self._should_exit = threading.Event()
super(InterruptableThread, self).__init__(*args, **kwargs)
def request_exit(self):
"""
Signal the thread that it should stop performing more work and exit.
"""
self._should_exit.set()
@property
def should_exit(self):
return self._should_exit.isSet()
def wait_for_exit_request(self, timeout=None):
"""
Wait until the thread has been signalled to exit.
If timeout is specified (as a float of seconds to wait) then wait
up to this many seconds before returning the value of `should_exit`.
"""
should_exit = self._should_exit.wait(timeout)
if should_exit is None:
# Python 2.6 compatibility which doesn't return self.__flag when
# calling Event.wait()
should_exit = self.should_exit
return should_exit
def reset_all(redis_client):
"""
Forcibly deletes all locks if its remains (like a crash reason). Use this with care.
"""
for lock_key in redis_client.keys('lock:*'):
redis_client.delete(lock_key)
for lock_key in redis_client.keys('lock-signal:*'):
redis_client.delete(lock_key)
|
|
from __future__ import print_function
import grtrans_batch as gr
import pickle
import numpy as np
import copy
import sys
def load_pickle(file):
with open(file, 'rb') as f:
if sys.version_info.major > 2:
data = pickle.load(f, encoding='latin1')
else:
data = pickle.load(f)
return data
def run_test_problems(save=0,pgrtrans=0,nosphacc=0,compile=0):
# run grtrans test problems
tol=1e-2; failed=[]; xlist=[]
xlist.append(gr.grtrans())
if compile > 0:
if pgrtrans==0:
xlist[-1].compile_grtrans()
else:
xlist[-1].compile_pgrtrans()
passed=0; max_passed=0
if nosphacc <= 0:
# sphacc
# xlist[-1].write_grtrans_inputs('inputs.in',fname='SPHACC',nfreq=15,nmu=1,fmin=2.41e10,fmax=6.31e14,ename='POLSYNCHTH',nvals=4,spin=0.,mbh=10.,standard=1,uout=.003,nn=[100,100,100")
# New tests of 1d intensity profile & full spectrum 12/14/2012
xlist[-1].write_grtrans_inputs('inputs.in',fname='SPHACC',nfreq=25,nmu=1,fmin=1e8,fmax=1e15,ename='SYNCHTHAV',nvals=1,spin=0.,mbh=1.,standard=1,nn=[10000,1,100],gridvals=[0.,400.,0.,0.],uout=.0025,oname='sphacc_abs.out')
if pgrtrans==0:
xlist[-1].run_grtrans()
xlist[-1].read_grtrans_output()
else:
xlist[-1].run_pgrtrans(fname='SPHACC',nfreq=25,nmu=1,fmin=1e8,fmax=1e15,ename='SYNCHTHAV',nvals=1,spin=0.,mbh=1.,standard=1,nn=[10000,1,100],gridvals=[0.,400.,0.,0.],uout=.0025,oname='sphacc_abs.out')
xlist[-1].calc_spec_pgrtrans((np.shape(xlist[-1].ivals))[2])
if save==0:
i = load_pickle('test_grtrans_sphacc_intensity.p')
if pgrtrans==0:
terr = np.sum(np.abs(xlist[-1].ivals[:,0,14]-i))/np.sum(np.abs(i))
else:
terr = np.sum(np.abs(xlist[-1].ivals[0,:,14]-i))/np.sum(np.abs(i))
print('terr: ',terr)
if terr < (10*tol): passed+=1
else: failed.append('sphacc intensity')
max_passed+=1
i = load_pickle('test_grtrans_sphacc_spectrum.p')
terr = np.sum(np.abs(xlist[-1].spec-i))/np.sum(np.abs(i))
print('terr: ',terr)
if terr < (10*tol): passed+=1
else: failed.append('sphacc spectrum')
max_passed+=1
else:
pickle.dump(xlist[-1].ivals[:,0,14],open('test_grtrans_sphacc_intensity.p','wb'))
pickle.dump(xlist[-1].spec,open('test_grtrans_sphacc_spectrum.p','wb'))
# ffjet
xlist.append(gr.grtrans())
if pgrtrans==0:
xlist[-1].write_grtrans_inputs('inputs.in',fname='FFJET',jdfile='m87bl09rfp10xi5a998fluidvars.bin',nfreq=1,nmu=1,fmin=3.45e11,fmax=3.45e11,ename='POLSYNCHPL',nvals=4,spin=0.998,standard=1,nn=[100,100,400],uout=0.01,mbh=3.4e9, mumin=.906,mumax=.906,gridvals=[-40,20,-20,40],ntscl=2.,nrscl=70.)
xlist[-1].run_grtrans()
xlist[-1].read_grtrans_output()
else:
xlist[-1].run_pgrtrans(fname='FFJET',fdfile='m87bl09rfp10xi5a998fluidvars.bin',nfreq=1,nmu=1,fmin=3.45e11,fmax=3.45e11,ename='POLSYNCHPL',nvals=4,spin=0.998,standard=1,nn=[100,100,400],uout=0.01,mbh=3.4e9, mumin=.906,mumax=.906,gridvals=[-40,20,-20,40],ntscl=2.,nrscl=70.)
xlist[-1].calc_spec_pgrtrans((np.shape(xlist[-1].ivals))[2])
if save==0:
i = load_pickle('test_grtrans_ffjet.p')
if pgrtrans==0:
terr = np.sum(np.abs(xlist[-1].ivals-i))/np.sum(np.abs(i))
else:
terr = np.sum(np.abs(xlist[-1].ivals.transpose([1,0,2])-i))/np.sum(np.abs(i))
print('terr: ',terr)
if terr < tol: passed+=1
else: failed.append('ffjet')
max_passed+=1
else:
pickle.dump(xlist[-1].ivals,open('test_grtrans_ffjet.p','wb'))
# ffjet with delo integrator
x2=gr.grtrans()
if pgrtrans==0:
x2.write_grtrans_inputs('inputs.in',fname='FFJET',jdfile='m87bl09rfp10xi5a998fluidvars.bin',nfreq=1,nmu=1,fmin=3.45e11,fmax=3.45e11,ename='POLSYNCHPL',nvals=4,spin=0.998,standard=1,nn=[100,100,1600],uout=0.01,mbh=3.4e9, mumin=.906,mumax=.906,gridvals=[-40,20,-20,40],iname='delo',ntscl=2.,nrscl=70.)
x2.run_grtrans()
x2.read_grtrans_output()
else:
x2.run_pgrtrans(fname='FFJET',fdfile='m87bl09rfp10xi5a998fluidvars.bin',nfreq=1,nmu=1,fmin=3.45e11,fmax=3.45e11,ename='POLSYNCHPL',nvals=4,spin=0.998,standard=1,nn=[100,100,1600],uout=0.01,mbh=3.4e9, mumin=.906,mumax=.906,gridvals=[-40,20,-20,40],iname='delo',ntscl=2.,nrscl=70.)
x2.calc_spec_pgrtrans((np.shape(x2.ivals))[2])
terr=10.
terr = np.max(np.abs(x2.spec - xlist[-1].spec)/xlist[-1].spec)
print('terr: ',terr)
if terr < 0.05: passed += 1
else: failed.append('delo')
max_passed+=1
# ffjet with formal rad trans solution from Degl'Innocenti (1985):
x3=gr.grtrans()
if pgrtrans==0:
x3.write_grtrans_inputs('inputs.in',fname='FFJET',jdfile='m87bl09rfp10xi5a998fluidvars.bin',nfreq=1,nmu=1,fmin=3.45e11,fmax=3.45e11,ename='POLSYNCHPL',nvals=4,spin=0.998,standard=1,nn=[100,100,1600],uout=0.01,mbh=3.4e9, mumin=.906,mumax=.906,gridvals=[-40,20,-20,40],iname='formal',ntscl=2.,nrscl=70.)
x3.run_grtrans()
x3.read_grtrans_output()
else:
x3.run_pgrtrans(fname='FFJET',fdfile='m87bl09rfp10xi5a998fluidvars.bin',nfreq=1,nmu=1,fmin=3.45e11,fmax=3.45e11,ename='POLSYNCHPL',nvals=4,spin=0.998,standard=1,nn=[100,100,1600],uout=0.01,mbh=3.4e9, mumin=.906,mumax=.906,gridvals=[-40,20,-20,40],iname='formal',ntscl=2.,nrscl=70.)
x3.calc_spec_pgrtrans((np.shape(x3.ivals))[2])
terr=10.
terr = np.max(np.abs(x3.spec - xlist[-1].spec)/xlist[-1].spec)
print('terr: ',terr)
if terr < 0.05: passed += 1
else: failed.append('formal')
max_passed+=1
x2=0; x3=0
# thindisk
xlist.append(gr.grtrans())
xlist[-1].write_grtrans_inputs('inputs.in',fname='THINDISK',nfreq=25,nmu=1,fmin=2.41e16,fmax=6.31e18,ename='BBPOL',nvals=4,spin=0.9,standard=2,nn=[100,100,1],uout=0.01,mbh=10, mumin=.26,mumax=.26,gridvals=[-21,21,-21,21])
if pgrtrans==0:
xlist[-1].run_grtrans()
xlist[-1].read_grtrans_output()
else:
xlist[-1].run_pgrtrans(fname='THINDISK',nfreq=25,nmu=1,fmin=2.41e16,fmax=6.31e18,ename='BBPOL',nvals=4,spin=0.9,standard=2,nn=[100,100,1],uout=0.01,mbh=10, mumin=.26,mumax=.26,gridvals=[-21,21,-21,21])
xlist[-1].calc_spec_pgrtrans(xlist[-1].nx)
if save==0:
i = load_pickle('test_grtrans_thindisk.p')
if pgrtrans==0:
terr = np.sum(np.abs(xlist[-1].ivals-i))/np.sum(np.abs(i))
else:
terr = np.sum(np.abs(xlist[-1].ivals.transpose([1,0,2])-i))/np.sum(np.abs(i))
print('terr: ',terr)
if terr < tol: passed+=1
else: failed.append('thindisk')
max_passed+=1
else:
pickle.dump(xlist[-1].ivals,open('test_grtrans_thindisk.p','wb'))
# total I w/, w/o pol
xlist.append(gr.grtrans())
xlist[-1].write_grtrans_inputs('inputs.in',fname='FFJET',jdfile='m87bl09rfp10xi5a998fluidvars.bin',nfreq=1,nmu=1,fmin=3.45e11,fmax=3.45e11,ename='SYNCHPL',nvals=1,spin=0.998,standard=1,nn=[100,100,400],uout=0.01,mbh=3.4e9, mumin=.906,mumax=.906,gridvals=[-40,20,-20,40],ntscl=2.,nrscl=70.)
if pgrtrans==0:
xlist[-1].run_grtrans()
xlist[-1].read_grtrans_output()
else:
xlist[-1].run_pgrtrans(fname='FFJET',fdfile='m87bl09rfp10xi5a998fluidvars.bin',nfreq=1,nmu=1,fmin=3.45e11,fmax=3.45e11,ename='SYNCHPL',nvals=1,spin=0.998,standard=1,nn=[100,100,400],uout=0.01,mbh=3.4e9, mumin=.906,mumax=.906,gridvals=[-40,20,-20,40],ntscl=2.,nrscl=70.)
xlist[-1].calc_spec_pgrtrans((np.shape(xlist[-1].ivals))[2])
if save==0:
i = load_pickle('test_grtrans_ffjet.p')
if pgrtrans == 0:
terr = np.sum(np.abs(xlist[-1].ivals[:,0,0]-i[:,0,0]))/np.sum(abs(i[:,0,0]))
else:
terr = np.sum(np.abs(xlist[-1].ivals[0,:,0]-i[:,0,0]))/np.sum(abs(i[:,0,0]))
print('terr: ',terr)
if terr < tol: passed+=1
else: failed.append('polunpol')
max_passed+=1
# harm
xlist.append(gr.grtrans())
xlist[-1].write_grtrans_inputs('inputs.in',fname='HARM',nfreq=1,nmu=1,fmin=2.3e11,fmax=2.3e11,ename='POLSYNCHTH',nvals=1,spin=0.9375,standard=1,nn=[150,150,400],uout=0.04,mbh=4e6, mdotmin=1.57e15,mdotmax=1.57e15,nmdot=1,mumin=.6428,mumax=.6428,gridvals=[-13,13,-13,13],hhfile='dump040',hdfile='dump',hindf=40,hnt=1,muval=1./4.,gmin=1.)
if pgrtrans==0:
xlist[-1].run_grtrans()
xlist[-1].read_grtrans_output()
else:
xlist[-1].run_pgrtrans(fname='HARM',nfreq=1,nmu=1,fmin=2.3e11,fmax=2.3e11,ename='POLSYNCHTH',nvals=1,spin=0.9375,standard=1,nn=[150,150,400],uout=0.04,mbh=4e6, mdotmin=1.57e15,mdotmax=1.57e15,nmdot=1,mumin=.6428,mumax=.6428,gridvals=[-13.,13.,-13.,13.],fhfile='dump040',fdfile='dump',findf=40,fnt=1,muval=1./4.,gmin=1.)
xlist[-1].calc_spec_pgrtrans((np.shape(xlist[-1].ivals))[2])
if save==0:
i = load_pickle('test_grtrans_harm.p')
xlist[-1].ivals = np.where(xlist[-1].ivals==xlist[-1].ivals,xlist[-1].ivals,np.zeros(np.shape(xlist[-1].ivals)))
i = np.where(i==i,i,np.zeros(np.shape(i)))
if pgrtrans==0:
terr = np.sum(np.abs(xlist[-1].ivals[:,0,0]-i[:,0,0]))/np.sum(abs(i[:,0,0]))
else:
terr = np.sum(np.abs(xlist[-1].ivals[0,:,0]-i[:,0,0]))/np.sum(abs(i[:,0,0]))
print('terr: ',terr)
if terr < tol: passed+=1
else: failed.append('harm')
max_passed+=1
else:
pickle.dump(xlist[-1].ivals,open('test_grtrans_harm.p','wb'))
xlist.append(gr.grtrans())
xlist[-1].write_grtrans_inputs('inputs.in',fname='POWERLAW',nfreq=1,nmu=1,fmin=3.45e11,fmax=3.45e11,ename='POLSYNCHTH',nvals=4,spin=0.,standard=1,nn=[200,200,1600],uout=0.00005,mbh=4e6, mumin=0.5,mumax=0.5,nrotype=1,gridvals=[1200.,4000.,0.,2.*np.pi],iname='lsoda',srin=3200.,srout=3300.,ntscl=5e11,sthin=-0.02,sthout=0.02,rcut=4000.,snscl=1e5,phi0=-0.5,sphiin=0.,gmin=1.)
if pgrtrans==0:
xlist[-1].run_grtrans()
xlist[-1].read_grtrans_output()
else:
xlist[-1].run_pgrtrans(fname='POWERLAW',nfreq=1,nmu=1,fmin=3.45e11,fmax=3.45e11,ename='POLSYNCHTH',nvals=4,spin=0.,standard=1,nn=[200,200,1600],uout=0.00005,mbh=4e6, mumin=0.5,mumax=0.5,nrotype=1,gridvals=[1200.,4000.,0.,2.*np.pi],iname='lsoda',srin=3200.,srout=3300.,ntscl=5e11,sthin=-0.02,sthout=0.02,rcut=4000.,snscl=1e5,phi0=-0.5,sphiin=0.,gmin=1.)
xlist[-1].calc_spec_pgrtrans((np.shape(xlist[-1].ivals))[2])
if save==0:
i = load_pickle('test_toroidalfield.p')
xlist[-1].ivals = np.where(xlist[-1].ivals==xlist[-1].ivals,xlist[-1].ivals,np.zeros(np.shape(xlist[-1].ivals)))
i = np.where(i==i,i,np.zeros(np.shape(i)))
# if pgrtrans==0:
# terr = np.sum(np.abs(xlist[-1].ivals[:,0,0]-i[0,:,0]))/np.sum(abs(i[0,:,0]))
# else:
# terr = np.sum(np.abs(xlist[-1].ivals[0,:,0]-i[0,:,0]))/np.sum(abs(i[0,:,0]))
if pgrtrans==0:
terr = np.sum(np.abs(xlist[-1].ivals-i))/np.sum(np.abs(i))
else:
terr = np.sum(np.abs(xlist[-1].ivals.transpose([1,0,2])-i))/np.sum(np.abs(i))
print('terr: ',terr)
if terr < (2*tol): passed+=1
else: failed.append('toroidal')
max_passed+=1
else:
pickle.dump(xlist[-1].ivals,open('test_toroidalfield.p','wb'))
print('tests total: ', max_passed)
print('tests passed: ', passed)
print('tests failed: ',failed)
return passed, max_passed, failed, xlist
|
|
# Author: Alexander M. Terp
# Date created: January, 2016
# Description: Contains code responsible for the functions used in the
# calculations for SVA.
import csv
from math import ceil
# Used to help user in case they enter information incorrectly.
from window import status
# Define global macro values.
SINGLE_HARVEST_CROPS_CSV_FILE = "csv_files/single_harvest_crops.csv"
REGENERATIVE_CROPS_CSV_FILE = "csv_files/regenerative_crops.csv"
DAYS_IN_SEASON = 28
DEBUG = 0
class Crop():
"""
Defines a class for a general crop. Contains properties shared by all
crops, whether they are a one-time harvest or multiple-time harvest However,
contains all properties of a single-harvest crop.
- name: Name for the crop as referred to in-game.
- buy_price: The cheapest cost of an individual seed.
- sell_price: The regular sales price of one unit of produce.
- growth_time: The number of days it takes for the crop to
become harvestable.
- harvest_yield: The average amount of produce per harvest.
"""
def __init__(self, name, buy_price, sell_price, growth_time, harvest_yield):
self.name = name
self.buy_price = buy_price
self.sell_price = sell_price
self.growth_time = growth_time
self.sell_price = sell_price
self.harvest_yield = harvest_yield
# Regenerative crops are defined using the RegenerativeCrop class,
# which will set this property as True instead.
self.regenerative = False
# Initialize net gold income per day (ngid) property. Will be changed
# with future functions.
self.ngid = None
class RegenerativeCrop(Crop):
"""
Defines a class for a regenerative crop, such as a strawberry crop.
- regrowth_time: The number of days it takes for a mature crop to
regrow and become harvestable again after being
harvested.
- max_harvests: The number of times a crop can be harvested in a
season.
"""
def __init__(self, name, buy_price, sell_price, growth_time,
harvest_yield, regrowth_time, max_harvests):
Crop.__init__(self, name, buy_price, sell_price, growth_time,
harvest_yield)
self.regrowth_time = regrowth_time
self.max_harvests = max_harvests
self.regenerative = True
class data:
""" Define an object to hold widely used information. Will allow for easy
argument-passing for functions. """
crops = {}
def import_crops(data):
""" Reads in data from csv files and populates the data object with crop
objects. """
with open(SINGLE_HARVEST_CROPS_CSV_FILE) as crop_values:
crop_values = csv.DictReader(crop_values)
for row in crop_values:
info = [row["name"], int(row["buy_price"]), int(row["sell_price"]),
int(row["growth_time"]), float(row["harvest_yield"])]
crop = Crop(*info)
crop.seasons = ([row["season1"], row["season2"]] if
row["season2"] else [row["season1"]])
data.crops[row["name"]] = crop
with open(REGENERATIVE_CROPS_CSV_FILE) as crop_values:
crop_values = csv.DictReader(crop_values)
for row in crop_values:
info = [row["name"], int(row["buy_price"]), int(row["sell_price"]),
int(row["growth_time"]), float(row["harvest_yield"]),
int(row["regrowth_time"]), int(row["max_harvests"])]
crop = RegenerativeCrop(*info)
crop.seasons = ([row["season1"], row["season2"]] if
row["season2"] else [row["season1"]])
data.crops[row["name"]] = crop
def get_net_income(season, date, budget, max_num_seeds, data, recursive=0):
""" Given inputs taken from the GUI, returns all possible crops that can be
bought and also calculates the amount of gold that the player would have at
the end of the season if only that crop were harvested and returns that
number too. Recursive is equal to 1 when this instance of the function is
called recursively one layer lower on the stack. This occurs at the end of
the first call to calculate final gold for crops that can span two seasons.
"""
# Ensure given arguments are valid. ---------------------------------------
# Check that season is valid.
if season not in ["spring", "summer", "fall"]:
status.config(text = "Error, invalid input (season)")
return
# Check valid date.
if not (str(date).isdigit() and date >= 1 and date <= 28):
status.config(text = "Error, invalid input (date)")
return
# Check valid budget.
if not (str(budget).isdigit() and budget >= 1):
status.config(text = "Error, invalid input (budget)")
return
# Check valid max_num_seeds.
if not (str(max_num_seeds).isdigit() and max_num_seeds >= 1):
status.config(text = "Error, invalid input (Max # seeds)")
return
# Argument checking finished. ---------------------------------------------
if not recursive:
# Must be first call of the function.
num_days = DAYS_IN_SEASON - date
available_crops = [crop for crop in list(data.crops.values()) if
season in crop.seasons and budget >= crop.buy_price]
else:
# Must be calculating for crops that span two seasons.
num_days = DAYS_IN_SEASON * 2 - date
# Eliminate crops that do not span two seasons.
available_crops = [crop for crop in list(data.crops.values()) if
len(crop.seasons) == 2 and budget > crop.buy_price]
possible_paths = []
for crop in available_crops:
if DEBUG: print(crop.name)
if not crop.regenerative:
# Crop is single-harvest.
# Calculate the number of harvesting cycles there's time for.
num_cycles = num_days // crop.growth_time
gold = budget
for i in range(num_cycles):
buy_amount = gold // crop.buy_price
# Make sure we're not buying more than we have room for.
if (buy_amount > max_num_seeds):
buy_amount = max_num_seeds
gold -= buy_amount * crop.buy_price
gold += buy_amount * crop.sell_price * crop.harvest_yield
# Only add this crop if it's profitable i.e. player ends with more
# gold than they started with.
if gold > budget:
possible_paths.append( [crop.name, gold - budget] )
elif crop.regenerative:
# Crop is regenerative.
# Prepare an array that will contain the amount of harvests for each
# day.
planner = [0] * num_days
total_crops = 0
gold = budget
for i in range(num_days):
gold += planner[i] * crop.sell_price * crop.harvest_yield
if i <= (num_days - (crop.growth_time + crop.regrowth_time *
ceil(crop.buy_price / crop.sell_price)) ):
# If pass conditional, must still have time to plant and
# harvest a crop such that it's profitable.
# Calculate how many can be bought.
buy_amount = gold // crop.buy_price
# Make sure we're not buying more than we have room for.
if (total_crops + buy_amount > max_num_seeds):
buy_amount = max_num_seeds - total_crops
# Do logistics (delta gold, total amount of crops).
total_crops += buy_amount
gold -= buy_amount * crop.buy_price
# Calculate which days this crop will be harvestable.
num_harvests = 0
for j in range(i + crop.growth_time, num_days,
crop.regrowth_time):
planner[j] += int(buy_amount)
num_harvests += 1
# If the crop has a maximum number of harvests and we
# have passed it, stop the loop as it can't be harvested
# more.
if (crop.max_harvests != -1 and num_harvests >=
crop.max_harvests):
break
if DEBUG == 2: print(planner)
# Only add this crop if it's profitable i.e. player ends with more
# gold than they started with.
if gold > budget:
possible_paths.append( [crop.name, gold - budget] )
# Now check if there are crops that can span two seasons, including the
# current season.
if season == "summer" and not recursive:
long_possible_paths = get_net_income("summer", date, budget,
max_num_seeds, data, 1)
if DEBUG == 2:
print(possible_paths)
print(long_possible_paths)
print("")
# Merge long-term crops' final golds with their short-term. Put long
# term gold in front of short term gold.
for path in long_possible_paths:
path_names = [short_path[0] for short_path in possible_paths]
try:
index = path_names.index(path[0])
possible_paths[index].insert(1, path[1])
except ValueError:
# Occurs when crop is in long_possible_paths but not (short)
# possible_paths. E.g. Corn in summer with date=12, budget=222,
# max seeds=12. Corn is not profitable if grown just for the
# rest of summer, so not added to possible_paths. However, if
# grown in spring too, it's profitable and so is added to the
# list.
possible_paths.append(path)
possible_paths = sorted(possible_paths, key=lambda x: x[1], reverse=1)
# Return an array of arrays where each inner array contains:
# 0: The name of the crop.
# 1: The gold gained if the player were to plant this all season.
# 2: The gold gained if the player were to plant this for the current and
# next season (if applicable). Otherwise, there's no [2] index.
# The array is sorted by descending gold gained values.
return possible_paths
|
|
#!/usr/bin/env python
"""
Command line interface to interact with a VNC Server
(c) 2010 Marc Sibson
MIT License
"""
import getpass
import optparse
import sys
import os
import shlex
import logging
import logging.handlers
from twisted.python.log import PythonLoggingObserver
from twisted.internet import reactor, protocol
from twisted.python import log
from twisted.python.failure import Failure
from vncdotool.client import VNCDoToolFactory, VNCDoToolClient
from vncdotool.loggingproxy import VNCLoggingServerFactory
log = logging.getLogger()
SUPPORTED_FORMATS = ('png', 'jpg', 'jpeg', 'gif', 'bmp')
class TimeoutError(RuntimeError):
pass
def log_exceptions(type_, value, tb):
log.critical('Unhandled exception:', exc_info=(type_, value, tb))
def log_connected(pcol):
log.info('connected to %s' % pcol.name)
return pcol
def error(reason):
log.critical(reason.getErrorMessage())
reactor.exit_status = 10
reactor.callLater(0.1, reactor.stop)
def stop(pcol):
reactor.exit_status = 0
pcol.transport.loseConnection()
# XXX delay
reactor.callLater(0.1, reactor.stop)
class VNCDoCLIClient(VNCDoToolClient):
def vncRequestPassword(self):
if self.factory.password is None:
self.factory.password = getpass.getpass('VNC password:')
self.sendPassword(self.factory.password)
class ExitingProcess(protocol.ProcessProtocol):
def processExited(self, reason):
reactor.callLater(0.1, reactor.stop)
def errReceived(self, data):
print data
class VNCDoToolOptionParser(optparse.OptionParser):
def format_help(self, **kwargs):
result = optparse.OptionParser.format_help(self, **kwargs)
result += '\n'.join(
['',
'Common Commands (CMD):',
' key KEY\t\tsend KEY to server, alphanumeric or keysym: ctrl-c, del',
' type TEXT\t\tsend alphanumeric string of TEXT',
' move X Y\t\tmove the mouse cursor to position X,Y',
' click BUTTON\t\tsend a mouse BUTTON click',
' capture FILE\t\tsave current screen as FILE',
' expect FILE FUZZ\twait until screen matches FILE',
' pause SECONDS\t\twait SECONDS before sending next command',
'',
'Other Commands (CMD):',
' keyup KEY\t\tsend KEY released',
' keydown KEY\t\tsend KEY pressed',
' mousedown BUTTON\tsend BUTTON down',
' mousemove X Y\t\talias for move',
' mouseup BUTTON\tsend BUTTON up',
' drag X Y\t\tmove the mouse to X,Y in small steps',
' rcapture FILE X Y W H\tcapture a region of the screen',
' rexpect FILE X Y\texpect that matches a region of the screen',
'',
'If a filename is given commands will be read from it, or stdin `-`',
'',
])
return result
def build_command_list(factory, args, delay=None, warp=1.0):
client = VNCDoCLIClient
if delay:
delay = float(delay) / 1000.0
while args:
cmd = args.pop(0)
if cmd == 'key':
key = args.pop(0)
factory.deferred.addCallback(client.keyPress, key)
elif cmd in ('kdown', 'keydown'):
key = args.pop(0)
factory.deferred.addCallback(client.keyDown, key)
elif cmd in ('kup', 'keyup'):
key = args.pop(0)
factory.deferred.addCallback(client.keyUp, key)
elif cmd in ('move', 'mousemove'):
x, y = int(args.pop(0)), int(args.pop(0))
factory.deferred.addCallback(client.mouseMove, x, y)
elif cmd == 'click':
button = int(args.pop(0))
factory.deferred.addCallback(client.mousePress, button)
elif cmd in ('mdown', 'mousedown'):
button = int(args.pop(0))
factory.deferred.addCallback(client.mouseDown, button)
elif cmd in ('mup', 'mouseup'):
button = int(args.pop(0))
factory.deferred.addCallback(client.mouseUp, button)
elif cmd == 'type':
for key in args.pop(0):
factory.deferred.addCallback(client.keyPress, key)
if delay:
factory.deferred.addCallback(client.pause, delay)
elif cmd == 'capture':
filename = args.pop(0)
imgformat = os.path.splitext(filename)[1][1:]
if imgformat not in SUPPORTED_FORMATS:
print 'unsupported image format "%s", choose one of %s' % (
imgformat, SUPPORTED_FORMATS)
else:
factory.deferred.addCallback(client.captureScreen, filename)
elif cmd == 'expect':
filename = args.pop(0)
rms = int(args.pop(0))
factory.deferred.addCallback(client.expectScreen, filename, rms)
elif cmd == 'rcapture':
filename = args.pop(0)
x = int(args.pop(0))
y = int(args.pop(0))
w = int(args.pop(0))
h = int(args.pop(0))
imgformat = os.path.splitext(filename)[1][1:]
if imgformat not in SUPPORTED_FORMATS:
print 'unsupported image format "%s", choose one of %s' % (
imgformat, SUPPORTED_FORMATS)
else:
factory.deferred.addCallback(client.captureRegion, filename, x, y, w, h)
elif cmd == 'rexpect':
filename = args.pop(0)
x = int(args.pop(0))
y = int(args.pop(0))
rms = int(args.pop(0))
factory.deferred.addCallback(client.expectRegion, filename, x, y, rms)
elif cmd in ('pause', 'sleep'):
duration = float(args.pop(0)) / warp
factory.deferred.addCallback(client.pause, duration)
elif cmd in 'drag':
x, y = int(args.pop(0)), int(args.pop(0))
factory.deferred.addCallback(client.mouseDrag, x, y)
elif os.path.isfile(cmd):
lex = shlex.shlex(open(cmd), posix=True)
lex.whitespace_split = True
args = list(lex) + args
else:
print 'unknown cmd "%s"' % cmd
if delay and args:
factory.deferred.addCallback(client.pause, delay)
def build_tool(options, args):
factory = VNCDoToolFactory()
factory.protocol = VNCDoCLIClient
if options.verbose:
factory.deferred.addCallbacks(log_connected)
if args == ['-']:
lex = shlex.shlex(posix=True)
lex.whitespace_split = True
args = list(lex)
build_command_list(factory, args, options.delay, options.warp)
factory.deferred.addCallback(stop)
factory.deferred.addErrback(error)
reactor.connectTCP(options.host, int(options.port), factory)
reactor.exit_status = 1
return factory
def build_proxy(options):
factory = VNCLoggingServerFactory(options.host, int(options.port))
port = reactor.listenTCP(options.listen, factory)
reactor.exit_status = 0
factory.listen_port = port.getHost().port
return factory
def add_standard_options(parser):
parser.disable_interspersed_args()
parser.add_option('-p', '--password', action='store', metavar='PASSWORD',
help='use password to access server')
parser.add_option('-s', '--server', action='store', metavar='SERVER',
default='127.0.0.1',
help='connect to VNC server at ADDRESS[:DISPLAY|::PORT] [%default]')
parser.add_option('--logfile', action='store', metavar='FILE',
help='output logging information to FILE')
parser.add_option('-v', '--verbose', action='count',
help='increase verbosity, use multple times')
return parser
def setup_logging(options):
# route Twisted log messages via stdlib logging
if options.logfile:
handler = logging.handlers.RotatingFileHandler(options.logfile,
maxBytes=5*1024*1024, backupCount=5)
logging.getLogger().addHandler(handler)
sys.excepthook = log_exceptions
logging.basicConfig()
if options.verbose > 1:
logging.getLogger().setLevel(logging.DEBUG)
elif options.verbose:
logging.getLogger().setLevel(logging.INFO)
PythonLoggingObserver().start()
def parse_host(server):
split = server.split(':')
if not split[0]:
host = '127.0.0.1'
else:
host = split[0]
if len(split) == 3: # ::port
port = int(split[2])
elif len(split) == 2: # :display
port = int(split[1]) + 5900
else:
port = 5900
return host, port
def vnclog():
usage = '%prog [options] OUTPUT'
description = 'Capture user interactions with a VNC Server'
op = optparse.OptionParser(usage=usage, description=description)
add_standard_options(op)
op.add_option('--listen', metavar='PORT', type='int',
help='listen for client connections on PORT [%default]')
op.set_defaults(listen=5902)
op.add_option('--forever', action='store_true',
help='continually accept new connections')
op.add_option('--viewer', action='store', metavar='CMD',
help='launch an interactive client using CMD [%default]')
options, args = op.parse_args()
setup_logging(options)
options.host, options.port = parse_host(options.server)
if len(args) != 1:
op.error('incorrect number of arguments')
output = args[0]
factory = build_proxy(options)
if options.forever and os.path.isdir(output):
factory.output = output
elif options.forever:
op.error('--forever requires OUTPUT to be a directory')
elif output == '-':
factory.output = sys.stdout
else:
factory.output = open(output, 'w')
if options.listen == 0:
log.info('accepting connections on ::%d', factory.listen_port)
factory.password = options.password
if options.viewer:
cmdline = '%s localhost::%s' % (options.viewer, factory.listen_port)
proc = reactor.spawnProcess(ExitingProcess(),
options.viewer, cmdline.split(),
env=os.environ)
reactor.run()
sys.exit(reactor.exit_status)
def vncdo():
usage = '%prog [options] CMD CMDARGS|-|filename'
description = 'Command line control of a VNC server'
op = VNCDoToolOptionParser(usage=usage, description=description)
add_standard_options(op)
op.add_option('--delay', action='store', metavar='MILLISECONDS',
default=os.environ.get('VNCDOTOOL_DELAY', 10), type='int',
help='delay MILLISECONDS between actions [%defaultms]')
op.add_option('--force-caps', action='store_true',
help='for non-compliant servers, send shift-LETTER, ensures capitalization works')
op.add_option('--localcursor', action='store_true',
help='mouse pointer drawn client-side, useful when server does not include cursor')
op.add_option('--nocursor', action='store_true',
help='no mouse pointer in screen captures')
op.add_option('-t', '--timeout', action='store', type='int', metavar='TIMEOUT',
help='abort if unable to complete all actions within TIMEOUT seconds')
op.add_option('-w', '--warp', action='store', type='float',
metavar='FACTOR', default=1.0,
help='pause time is accelerated by FACTOR [x%default]')
options, args = op.parse_args()
if not len(args):
op.error('no command provided')
setup_logging(options)
options.host, options.port = parse_host(options.server)
log.info('connecting to %s:%s', options.host, options.port)
factory = build_tool(options, args)
factory.password = options.password
if options.localcursor:
factory.pseudocursor = True
if options.nocursor:
factory.nocursor = True
if options.force_caps:
factory.force_caps = True
if options.timeout:
message = 'TIMEOUT Exceeded (%ss)' % options.timeout
failure = Failure(TimeoutError(message))
reactor.callLater(options.timeout, error, failure)
reactor.run()
sys.exit(reactor.exit_status)
if __name__ == '__main__':
vncdo()
|
|
# drizzle/base.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors <see AUTHORS file>
# Copyright (C) 2010-2011 Monty Taylor <mordred@inaugust.com>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: drizzle
:name: Drizzle
Drizzle is a variant of MySQL. Unlike MySQL, Drizzle's default storage engine
is InnoDB (transactions, foreign-keys) rather than MyISAM. For more
`Notable Differences <http://docs.drizzle.org/mysql_differences.html>`_, visit
the `Drizzle Documentation <http://docs.drizzle.org/index.html>`_.
The SQLAlchemy Drizzle dialect leans heavily on the MySQL dialect, so much of
the :doc:`SQLAlchemy MySQL <mysql>` documentation is also relevant.
"""
from sqlalchemy import exc
from sqlalchemy import log
from sqlalchemy import types as sqltypes
from sqlalchemy.engine import reflection
from sqlalchemy.dialects.mysql import base as mysql_dialect
from sqlalchemy.types import DATE, DATETIME, BOOLEAN, TIME, \
BLOB, BINARY, VARBINARY
class _NumericType(object):
"""Base for Drizzle numeric types."""
def __init__(self, **kw):
super(_NumericType, self).__init__(**kw)
class _FloatType(_NumericType, sqltypes.Float):
def __init__(self, precision=None, scale=None, asdecimal=True, **kw):
if isinstance(self, (REAL, DOUBLE)) and \
(
(precision is None and scale is not None) or
(precision is not None and scale is None)
):
raise exc.ArgumentError(
"You must specify both precision and scale or omit "
"both altogether.")
super(_FloatType, self).__init__(precision=precision,
asdecimal=asdecimal, **kw)
self.scale = scale
class _StringType(mysql_dialect._StringType):
"""Base for Drizzle string types."""
def __init__(self, collation=None, binary=False, **kw):
kw['national'] = False
super(_StringType, self).__init__(collation=collation, binary=binary,
**kw)
class NUMERIC(_NumericType, sqltypes.NUMERIC):
"""Drizzle NUMERIC type."""
__visit_name__ = 'NUMERIC'
def __init__(self, precision=None, scale=None, asdecimal=True, **kw):
"""Construct a NUMERIC.
:param precision: Total digits in this number. If scale and precision
are both None, values are stored to limits allowed by the server.
:param scale: The number of digits after the decimal point.
"""
super(NUMERIC, self).__init__(precision=precision, scale=scale,
asdecimal=asdecimal, **kw)
class DECIMAL(_NumericType, sqltypes.DECIMAL):
"""Drizzle DECIMAL type."""
__visit_name__ = 'DECIMAL'
def __init__(self, precision=None, scale=None, asdecimal=True, **kw):
"""Construct a DECIMAL.
:param precision: Total digits in this number. If scale and precision
are both None, values are stored to limits allowed by the server.
:param scale: The number of digits after the decimal point.
"""
super(DECIMAL, self).__init__(precision=precision, scale=scale,
asdecimal=asdecimal, **kw)
class DOUBLE(_FloatType):
"""Drizzle DOUBLE type."""
__visit_name__ = 'DOUBLE'
def __init__(self, precision=None, scale=None, asdecimal=True, **kw):
"""Construct a DOUBLE.
:param precision: Total digits in this number. If scale and precision
are both None, values are stored to limits allowed by the server.
:param scale: The number of digits after the decimal point.
"""
super(DOUBLE, self).__init__(precision=precision, scale=scale,
asdecimal=asdecimal, **kw)
class REAL(_FloatType, sqltypes.REAL):
"""Drizzle REAL type."""
__visit_name__ = 'REAL'
def __init__(self, precision=None, scale=None, asdecimal=True, **kw):
"""Construct a REAL.
:param precision: Total digits in this number. If scale and precision
are both None, values are stored to limits allowed by the server.
:param scale: The number of digits after the decimal point.
"""
super(REAL, self).__init__(precision=precision, scale=scale,
asdecimal=asdecimal, **kw)
class FLOAT(_FloatType, sqltypes.FLOAT):
"""Drizzle FLOAT type."""
__visit_name__ = 'FLOAT'
def __init__(self, precision=None, scale=None, asdecimal=False, **kw):
"""Construct a FLOAT.
:param precision: Total digits in this number. If scale and precision
are both None, values are stored to limits allowed by the server.
:param scale: The number of digits after the decimal point.
"""
super(FLOAT, self).__init__(precision=precision, scale=scale,
asdecimal=asdecimal, **kw)
def bind_processor(self, dialect):
return None
class INTEGER(sqltypes.INTEGER):
"""Drizzle INTEGER type."""
__visit_name__ = 'INTEGER'
def __init__(self, **kw):
"""Construct an INTEGER."""
super(INTEGER, self).__init__(**kw)
class BIGINT(sqltypes.BIGINT):
"""Drizzle BIGINTEGER type."""
__visit_name__ = 'BIGINT'
def __init__(self, **kw):
"""Construct a BIGINTEGER."""
super(BIGINT, self).__init__(**kw)
class TIME(mysql_dialect.TIME):
"""Drizzle TIME type."""
class TIMESTAMP(sqltypes.TIMESTAMP):
"""Drizzle TIMESTAMP type."""
__visit_name__ = 'TIMESTAMP'
class TEXT(_StringType, sqltypes.TEXT):
"""Drizzle TEXT type, for text up to 2^16 characters."""
__visit_name__ = 'TEXT'
def __init__(self, length=None, **kw):
"""Construct a TEXT.
:param length: Optional, if provided the server may optimize storage
by substituting the smallest TEXT type sufficient to store
``length`` characters.
:param collation: Optional, a column-level collation for this string
value. Takes precedence to 'binary' short-hand.
:param binary: Defaults to False: short-hand, pick the binary
collation type that matches the column's character set. Generates
BINARY in schema. This does not affect the type of data stored,
only the collation of character data.
"""
super(TEXT, self).__init__(length=length, **kw)
class VARCHAR(_StringType, sqltypes.VARCHAR):
"""Drizzle VARCHAR type, for variable-length character data."""
__visit_name__ = 'VARCHAR'
def __init__(self, length=None, **kwargs):
"""Construct a VARCHAR.
:param collation: Optional, a column-level collation for this string
value. Takes precedence to 'binary' short-hand.
:param binary: Defaults to False: short-hand, pick the binary
collation type that matches the column's character set. Generates
BINARY in schema. This does not affect the type of data stored,
only the collation of character data.
"""
super(VARCHAR, self).__init__(length=length, **kwargs)
class CHAR(_StringType, sqltypes.CHAR):
"""Drizzle CHAR type, for fixed-length character data."""
__visit_name__ = 'CHAR'
def __init__(self, length=None, **kwargs):
"""Construct a CHAR.
:param length: Maximum data length, in characters.
:param binary: Optional, use the default binary collation for the
national character set. This does not affect the type of data
stored, use a BINARY type for binary data.
:param collation: Optional, request a particular collation. Must be
compatible with the national character set.
"""
super(CHAR, self).__init__(length=length, **kwargs)
class ENUM(mysql_dialect.ENUM):
"""Drizzle ENUM type."""
def __init__(self, *enums, **kw):
"""Construct an ENUM.
Example:
Column('myenum', ENUM("foo", "bar", "baz"))
:param enums: The range of valid values for this ENUM. Values will be
quoted when generating the schema according to the quoting flag (see
below).
:param strict: Defaults to False: ensure that a given value is in this
ENUM's range of permissible values when inserting or updating rows.
Note that Drizzle will not raise a fatal error if you attempt to
store an out of range value- an alternate value will be stored
instead.
(See Drizzle ENUM documentation.)
:param collation: Optional, a column-level collation for this string
value. Takes precedence to 'binary' short-hand.
:param binary: Defaults to False: short-hand, pick the binary
collation type that matches the column's character set. Generates
BINARY in schema. This does not affect the type of data stored,
only the collation of character data.
:param quoting: Defaults to 'auto': automatically determine enum value
quoting. If all enum values are surrounded by the same quoting
character, then use 'quoted' mode. Otherwise, use 'unquoted' mode.
'quoted': values in enums are already quoted, they will be used
directly when generating the schema - this usage is deprecated.
'unquoted': values in enums are not quoted, they will be escaped and
surrounded by single quotes when generating the schema.
Previous versions of this type always required manually quoted
values to be supplied; future versions will always quote the string
literals for you. This is a transitional option.
"""
super(ENUM, self).__init__(*enums, **kw)
class _DrizzleBoolean(sqltypes.Boolean):
def get_dbapi_type(self, dbapi):
return dbapi.NUMERIC
colspecs = {
sqltypes.Numeric: NUMERIC,
sqltypes.Float: FLOAT,
sqltypes.Time: TIME,
sqltypes.Enum: ENUM,
sqltypes.Boolean: _DrizzleBoolean,
}
# All the types we have in Drizzle
ischema_names = {
'BIGINT': BIGINT,
'BINARY': BINARY,
'BLOB': BLOB,
'BOOLEAN': BOOLEAN,
'CHAR': CHAR,
'DATE': DATE,
'DATETIME': DATETIME,
'DECIMAL': DECIMAL,
'DOUBLE': DOUBLE,
'ENUM': ENUM,
'FLOAT': FLOAT,
'INT': INTEGER,
'INTEGER': INTEGER,
'NUMERIC': NUMERIC,
'TEXT': TEXT,
'TIME': TIME,
'TIMESTAMP': TIMESTAMP,
'VARBINARY': VARBINARY,
'VARCHAR': VARCHAR,
}
class DrizzleCompiler(mysql_dialect.MySQLCompiler):
def visit_typeclause(self, typeclause):
type_ = typeclause.type.dialect_impl(self.dialect)
if isinstance(type_, sqltypes.Integer):
return 'INTEGER'
else:
return super(DrizzleCompiler, self).visit_typeclause(typeclause)
def visit_cast(self, cast, **kwargs):
type_ = self.process(cast.typeclause)
if type_ is None:
return self.process(cast.clause)
return 'CAST(%s AS %s)' % (self.process(cast.clause), type_)
class DrizzleDDLCompiler(mysql_dialect.MySQLDDLCompiler):
pass
class DrizzleTypeCompiler(mysql_dialect.MySQLTypeCompiler):
def _extend_numeric(self, type_, spec):
return spec
def _extend_string(self, type_, defaults, spec):
"""Extend a string-type declaration with standard SQL
COLLATE annotations and Drizzle specific extensions.
"""
def attr(name):
return getattr(type_, name, defaults.get(name))
if attr('collation'):
collation = 'COLLATE %s' % type_.collation
elif attr('binary'):
collation = 'BINARY'
else:
collation = None
return ' '.join([c for c in (spec, collation)
if c is not None])
def visit_NCHAR(self, type):
raise NotImplementedError("Drizzle does not support NCHAR")
def visit_NVARCHAR(self, type):
raise NotImplementedError("Drizzle does not support NVARCHAR")
def visit_FLOAT(self, type_):
if type_.scale is not None and type_.precision is not None:
return "FLOAT(%s, %s)" % (type_.precision, type_.scale)
else:
return "FLOAT"
def visit_BOOLEAN(self, type_):
return "BOOLEAN"
def visit_BLOB(self, type_):
return "BLOB"
class DrizzleExecutionContext(mysql_dialect.MySQLExecutionContext):
pass
class DrizzleIdentifierPreparer(mysql_dialect.MySQLIdentifierPreparer):
pass
@log.class_logger
class DrizzleDialect(mysql_dialect.MySQLDialect):
"""Details of the Drizzle dialect.
Not used directly in application code.
"""
name = 'drizzle'
_supports_cast = True
supports_sequences = False
supports_native_boolean = True
supports_views = False
default_paramstyle = 'format'
colspecs = colspecs
statement_compiler = DrizzleCompiler
ddl_compiler = DrizzleDDLCompiler
type_compiler = DrizzleTypeCompiler
ischema_names = ischema_names
preparer = DrizzleIdentifierPreparer
def on_connect(self):
"""Force autocommit - Drizzle Bug#707842 doesn't set this properly"""
def connect(conn):
conn.autocommit(False)
return connect
@reflection.cache
def get_table_names(self, connection, schema=None, **kw):
"""Return a Unicode SHOW TABLES from a given schema."""
if schema is not None:
current_schema = schema
else:
current_schema = self.default_schema_name
charset = 'utf8'
rp = connection.execute("SHOW TABLES FROM %s" %
self.identifier_preparer.quote_identifier(current_schema))
return [row[0] for row in self._compat_fetchall(rp, charset=charset)]
@reflection.cache
def get_view_names(self, connection, schema=None, **kw):
raise NotImplementedError
def _detect_casing(self, connection):
"""Sniff out identifier case sensitivity.
Cached per-connection. This value can not change without a server
restart.
"""
return 0
def _detect_collations(self, connection):
"""Pull the active COLLATIONS list from the server.
Cached per-connection.
"""
collations = {}
charset = self._connection_charset
rs = connection.execute(
'SELECT CHARACTER_SET_NAME, COLLATION_NAME FROM'
' data_dictionary.COLLATIONS')
for row in self._compat_fetchall(rs, charset):
collations[row[0]] = row[1]
return collations
def _detect_ansiquotes(self, connection):
"""Detect and adjust for the ANSI_QUOTES sql mode."""
self._server_ansiquotes = False
self._backslash_escapes = False
|
|
# -*- coding: utf-8 -*-
'''
Test module for syslog_ng
'''
# Import Python modules
from __future__ import absolute_import
from textwrap import dedent
# Import Salt Testing libs
from salttesting import skipIf, TestCase
from salttesting.helpers import ensure_in_syspath
from salttesting.mock import NO_MOCK, NO_MOCK_REASON, MagicMock, patch
ensure_in_syspath('../../')
# Import Salt libs
import salt
from salt.modules import syslog_ng
syslog_ng.__salt__ = {}
syslog_ng.__opts__ = {}
_VERSION = "3.6.0alpha0"
_MODULES = ("syslogformat,json-plugin,basicfuncs,afstomp,afsocket,cryptofuncs,"
"afmongodb,dbparser,system-source,affile,pseudofile,afamqp,"
"afsocket-notls,csvparser,linux-kmsg-format,afuser,confgen,afprog")
VERSION_OUTPUT = """syslog-ng {0}
Installer-Version: {0}
Revision:
Compile-Date: Apr 4 2014 20:26:18
Error opening plugin module; module='afsocket-tls', error='/home/tibi/install/syslog-ng/lib/syslog-ng/libafsocket-tls.so: undefined symbol: tls_context_setup_session'
Available-Modules: {1}
Enable-Debug: on
Enable-GProf: off
Enable-Memtrace: off
Enable-IPv6: on
Enable-Spoof-Source: off
Enable-TCP-Wrapper: off
Enable-Linux-Caps: off""".format(_VERSION, _MODULES)
STATS_OUTPUT = """SourceName;SourceId;SourceInstance;State;Type;Number
center;;received;a;processed;0
destination;#anon-destination0;;a;processed;0
destination;#anon-destination1;;a;processed;0
source;s_gsoc2014;;a;processed;0
center;;queued;a;processed;0
global;payload_reallocs;;a;processed;0
global;sdata_updates;;a;processed;0
global;msg_clones;;a;processed;0"""
_SYSLOG_NG_NOT_INSTALLED_RETURN_VALUE = {
"retcode": -1, "stderr":
"Unable to execute the command 'syslog-ng'. It is not in the PATH."
}
_SYSLOG_NG_CTL_NOT_INSTALLED_RETURN_VALUE = {
"retcode": -1, "stderr":
"Unable to execute the command 'syslog-ng-ctl'. It is not in the PATH."
}
@skipIf(NO_MOCK, NO_MOCK_REASON)
class SyslogNGTestCase(TestCase):
def test_statement_without_options(self):
s = syslog_ng.Statement("source", "s_local", options=[])
b = s.build()
self.assertEqual(dedent(
"""\
source s_local {
};
"""), b)
def test_non_empty_statement(self):
o1 = syslog_ng.Option("file")
o2 = syslog_ng.Option("tcp")
s = syslog_ng.Statement("source", "s_local", options=[o1, o2])
b = s.build()
self.assertEqual(dedent(
"""\
source s_local {
file(
);
tcp(
);
};
"""), b)
def test_option_with_parameters(self):
o1 = syslog_ng.Option("file")
p1 = syslog_ng.SimpleParameter('"/var/log/messages"')
p2 = syslog_ng.SimpleParameter()
p3 = syslog_ng.TypedParameter()
p3.type = "tls"
p2.value = '"/var/log/syslog"'
o1.add_parameter(p1)
o1.add_parameter(p2)
o1.add_parameter(p3)
b = o1.build()
self.assertEqual(dedent(
"""\
file(
"/var/log/messages",
"/var/log/syslog",
tls(
)
);
"""), b)
def test_parameter_with_values(self):
p = syslog_ng.TypedParameter()
p.type = "tls"
v1 = syslog_ng.TypedParameterValue()
v1.type = 'key_file'
v2 = syslog_ng.TypedParameterValue()
v2.type = 'cert_file'
p.add_value(v1)
p.add_value(v2)
b = p.build()
self.assertEqual(dedent(
"""\
tls(
key_file(
),
cert_file(
)
)"""), b)
def test_value_with_arguments(self):
t = syslog_ng.TypedParameterValue()
t.type = 'key_file'
a1 = syslog_ng.Argument('"/opt/syslog-ng/etc/syslog-ng/key.d/syslog-ng.key"')
a2 = syslog_ng.Argument('"/opt/syslog-ng/etc/syslog-ng/key.d/syslog-ng.key"')
t.add_argument(a1)
t.add_argument(a2)
b = t.build()
self.assertEqual(dedent(
'''\
key_file(
"/opt/syslog-ng/etc/syslog-ng/key.d/syslog-ng.key"
"/opt/syslog-ng/etc/syslog-ng/key.d/syslog-ng.key"
)'''), b)
def test_end_to_end_statement_generation(self):
s = syslog_ng.Statement('source', 's_tls')
o = syslog_ng.Option('tcp')
ip = syslog_ng.TypedParameter('ip')
ip.add_value(syslog_ng.SimpleParameterValue("'192.168.42.2'"))
o.add_parameter(ip)
port = syslog_ng.TypedParameter('port')
port.add_value(syslog_ng.SimpleParameterValue(514))
o.add_parameter(port)
tls = syslog_ng.TypedParameter('tls')
key_file = syslog_ng.TypedParameterValue('key_file')
key_file.add_argument(syslog_ng.Argument('"/opt/syslog-ng/etc/syslog-ng/key.d/syslog-ng.key"'))
cert_file = syslog_ng.TypedParameterValue('cert_file')
cert_file.add_argument(syslog_ng.Argument('"/opt/syslog-ng/etc/syslog-ng/cert.d/syslog-ng.cert"'))
peer_verify = syslog_ng.TypedParameterValue('peer_verify')
peer_verify.add_argument(syslog_ng.Argument('optional-untrusted'))
tls.add_value(key_file)
tls.add_value(cert_file)
tls.add_value(peer_verify)
o.add_parameter(tls)
s.add_child(o)
b = s.build()
self.assertEqual(dedent(
'''\
source s_tls {
tcp(
ip(
'192.168.42.2'
),
port(
514
),
tls(
key_file(
"/opt/syslog-ng/etc/syslog-ng/key.d/syslog-ng.key"
),
cert_file(
"/opt/syslog-ng/etc/syslog-ng/cert.d/syslog-ng.cert"
),
peer_verify(
optional-untrusted
)
)
);
};
'''), b)
def test_version(self):
mock_return_value = {"retcode": 0, 'stdout': VERSION_OUTPUT}
expected_output = {"retcode": 0, "stdout": "3.6.0alpha0"}
mock_args = "syslog-ng -V"
self._assert_template(mock_args,
mock_return_value,
function_to_call=syslog_ng.version,
expected_output=expected_output)
def test_stats(self):
mock_return_value = {"retcode": 0, 'stdout': STATS_OUTPUT}
expected_output = {"retcode": 0, "stdout": STATS_OUTPUT}
mock_args = "syslog-ng-ctl stats"
self._assert_template(mock_args,
mock_return_value,
function_to_call=syslog_ng.stats,
expected_output=expected_output)
def test_modules(self):
mock_return_value = {"retcode": 0, 'stdout': VERSION_OUTPUT}
expected_output = {"retcode": 0, "stdout": _MODULES}
mock_args = "syslog-ng -V"
self._assert_template(mock_args,
mock_return_value,
function_to_call=syslog_ng.modules,
expected_output=expected_output)
def test_config_test_ok(self):
mock_return_value = {"retcode": 0, "stderr": "", "stdout": "Syslog-ng startup text..."}
mock_args = "syslog-ng --syntax-only"
self._assert_template(mock_args,
mock_return_value,
function_to_call=syslog_ng.config_test,
expected_output=mock_return_value)
def test_config_test_fails(self):
mock_return_value = {"retcode": 1, 'stderr': "Syntax error...", "stdout": ""}
mock_args = "syslog-ng --syntax-only"
self._assert_template(mock_args,
mock_return_value,
function_to_call=syslog_ng.config_test,
expected_output=mock_return_value)
def test_config_test_cfgfile(self):
cfgfile = "/path/to/syslog-ng.conf"
mock_return_value = {"retcode": 1, 'stderr': "Syntax error...", "stdout": ""}
mock_args = "syslog-ng --syntax-only --cfgfile={0}".format(cfgfile)
self._assert_template(mock_args,
mock_return_value,
function_to_call=syslog_ng.config_test,
function_args={"cfgfile": cfgfile},
expected_output=mock_return_value)
def _assert_template(self,
mock_function_args,
mock_return_value,
function_to_call,
expected_output,
function_args=None):
if function_args is None:
function_args = {}
installed = True
if not salt.utils.which("syslog-ng"):
installed = False
if "syslog-ng-ctl" in mock_function_args:
expected_output = _SYSLOG_NG_CTL_NOT_INSTALLED_RETURN_VALUE
else:
expected_output = _SYSLOG_NG_NOT_INSTALLED_RETURN_VALUE
mock_function = MagicMock(return_value=mock_return_value)
with patch.dict(syslog_ng.__salt__, {'cmd.run_all': mock_function}):
got = function_to_call(**function_args)
self.assertEqual(expected_output, got)
if installed:
self.assertTrue(mock_function.called)
self.assertEqual(len(mock_function.call_args), 2)
mock_param = mock_function.call_args
self.assertTrue(mock_param[0][0].endswith(mock_function_args))
if __name__ == '__main__':
from integration import run_tests
run_tests(SyslogNGTestCase, needs_daemon=False)
|
|
import os
import json
import gzip
import math
from . import Interpolate
from . import Extrapolate
from . import vehicles
from . import log
class EmissionsJsonParser:
def __init__(self, vehicle, pollutants, filename="roadTransport.json.gz"):
self._filename = filename
self._data = None
self._parsed_data = {}
self._vehicle = vehicle
# TODO: Don't need this?
self._slope = 0
self._pollutants = pollutants
self._read_data()
self._parse_data()
def _read_data(self):
gzip_json = os.path.join(os.path.dirname(__file__), self._filename)
if os.path.isfile(gzip_json):
with gzip.open(gzip_json, "rb") as data_file:
self._data = json.loads(data_file.read())
else:
raise IOError("Can't find file {}".format(self._filename))
@staticmethod
def get_fuel_type(fuel_id):
for k, v in vehicles.FuelTypes.__dict__.items():
if v == fuel_id:
return v
return None
def get_subsegments(self):
"""Util method to list all subsegments for the vehicle
passed when constructing this object
"""
subsegments = set()
categories = self._data.get("Type", None)
if not categories:
raise AttributeError("Missing 'Type' in JSON file. Inspect file!")
for c in categories:
cat_id = c.get("Id")
vehicle_type = vehicles.Vehicle.get_type_for_category(cat_id)
if vehicle_type != self._vehicle.type:
continue
# print("cat_id: {}".format(cat_id))
fuel = c.get("SSC_NAME")
for f in fuel:
fuel_id = f.get("Id")
fuel_type = EmissionsJsonParser.get_fuel_type(fuel_id)
if not fuel_type:
raise ValueError("BAD FUEL TYPE!")
if fuel_type != self._vehicle.fuel_type:
continue
subsegments = f.get("Subsegment")
for s in subsegments:
subsegment_id = s.get("Id").encode("utf-8")
subsegments.add(subsegment_id)
return subsegments
def get_euro_standards(self):
"""Util method to list all euro standards for the vehicle
passed when constructing this object
"""
euro_standards = set()
categories = self._data.get("Type", None)
for c in categories:
cat_id = c.get("Id")
vehicle_type = vehicles.Vehicle.get_type_for_category(cat_id)
if vehicle_type != self._vehicle.type:
continue
# print("cat_id: {}".format(cat_id))
fuel = c.get("SSC_NAME")
for f in fuel:
fuel_id = f.get("Id")
fuel_type = EmissionsJsonParser.get_fuel_type(fuel_id)
if not fuel_type:
raise ValueError("BAD FUEL TYPE!")
if fuel_type != self._vehicle.fuel_type:
continue
subsegments = f.get("Subsegment")
for s in subsegments:
subsegment_id = s.get("Id").encode("utf-8")
if self._vehicle.segment != subsegment_id:
continue
euro_standard = s.get("TEC_NAME")
for es in euro_standard:
es_id = es.get("Id")
euro_standards.add(es_id)
return euro_standards
def get_modes(self):
modes = set()
categories = self._data.get("Type", None)
for c in categories:
cat_id = c.get("Id")
vehicle_type = vehicles.Vehicle.get_type_for_category(cat_id)
if vehicle_type != self._vehicle.type:
continue
fuel = c.get("SSC_NAME")
for f in fuel:
fuel_id = f.get("Id")
fuel_type = EmissionsJsonParser.get_fuel_type(fuel_id)
if not fuel_type:
raise ValueError("BAD FUEL TYPE!")
if fuel_type != self._vehicle.fuel_type:
continue
subsegments = f.get("Subsegment")
for s in subsegments:
subsegment_id = s.get("Id").encode("utf-8")
if self._vehicle.segment != subsegment_id:
continue
euro_standard = s.get("TEC_NAME")
for es in euro_standard:
es_id = es.get("Id")
if self._vehicle.euro_std != es_id:
continue
modes = es.get("Mode")
for m in modes:
m_id = m.get("Id")
modes.add(m_id)
return modes
def _parse_data(self):
if not self._data:
raise ValueError("No data to parse.. Something went wrong trying to read input data..")
categories = self._data.get("Type", None)
if not categories:
raise AttributeError("Missing 'Type' in JSON file. Inspect file!")
mapping = {
vehicles.VehicleTypes.CAR: vehicles.Car,
vehicles.VehicleTypes.VAN: vehicles.Van,
vehicles.VehicleTypes.BUS: vehicles.Bus,
vehicles.VehicleTypes.LCATEGORY: vehicles.LCategory,
vehicles.VehicleTypes.TRUCK: vehicles.Truck
}
# TODO: Refactore - this was primarily done for testing the JSON structure
for c in categories:
cat_id = c.get("Id")
vehicle_type = vehicles.Vehicle.get_type_for_category(cat_id)
if vehicle_type != self._vehicle.type:
continue
# print("cat_id: {}".format(cat_id))
fuel = c.get("SSC_NAME")
for f in fuel:
fuel_id = f.get("Id")
fuel_type = EmissionsJsonParser.get_fuel_type(fuel_id)
if not fuel_type:
raise ValueError("BAD FUEL TYPE!")
if fuel_type != self._vehicle.fuel_type:
continue
subsegments = f.get("Subsegment")
for s in subsegments:
subsegment_id = s.get("Id").encode("utf-8")
if self._vehicle.segment != subsegment_id:
continue
log.debug("subsegment_id: {}".format(subsegment_id))
euro_standard = s.get("TEC_NAME")
for es in euro_standard:
es_id = es.get("Id")
if self._vehicle.euro_std != es_id:
continue
log.debug("es_id: {}".format(es_id))
# continue
modes = es.get("Mode")
for m in modes:
m_id = m.get("Id")
if self._vehicle.mode != m_id:
continue
log.debug("mode_id: {}".format(m_id.encode("utf-8")))
slopes = m.get("Slope")
for s in slopes:
slope_id = s.get("Id")
#if self._vehicle.slope != slope_id:
# continue
log.debug("slope_id: {}".format(slope_id.encode("utf-8")))
loads = s.get("Load")
for l in loads:
l_id = l.get("Id")
if self._vehicle.load > -1:
if self._vehicle.load != float(l_id):
continue
log.debug("load id: ".format(l_id.encode("utf-8")))
pollutants = l.get("Pollutant")
for p in pollutants:
p_id = p.get("Id")
if p_id in self._pollutants:
new_obj = {
"category": cat_id,
"subsegment": subsegment_id,
"euro_standard": es_id,
"slope": float(slope_id) if slope_id != "" else 0.0,
}
new_obj.update(p)
if not self._pollutants.get(p_id, None):
self._pollutants[p_id] = []
self._pollutants[p_id].append(new_obj)
# print("Pollutant: {}".format(p.get("Id")))
# print(" new_obj: {}".format(new_obj))
def get_for_pollutant(self, pollutant_id, slope=None):
if pollutant_id not in self._pollutants:
raise ValueError("Pollutant ID not in list of pollutations to search for..")
log.debug("== POLLUTANT_ID = {}".format(pollutant_id))
pollutant = None
if len(self._pollutants[pollutant_id]) > 1:
positive_slopes = [0, 0.02, 0.04, 0.06]
negative_slopes = [-0.06, -0.04, -0.02, 0]
# Multiple items in list, meaning we have
# various slope values
x = [x for x in self._pollutants[pollutant_id] if x['slope'] == slope]
if any(x):
log.debug("FOUND MATCH: {}".format(slope))
pollutant = x[0]
log.debug(" pollutant: {}".format(pollutant))
else:
# No match was found. Need to Extrapolate / Interpolate the
# emission value
log.debug("NO MATCH: {}".format(slope))
slopes_for_pollutant = []
if slope > 0.0:
tmp_pollutants = [x for x in self._pollutants[pollutant_id] if x['slope'] in positive_slopes]
slopes_for_pollutant = map(EmissionsJsonParser.calculate, tmp_pollutants)
extrapolate = Extrapolate(positive_slopes, slopes_for_pollutant)
tmp = extrapolate[slope]
log.debug("Extrapolated value: {}".format(tmp))
return tmp
else:
tmp_pollutants = [x for x in self._pollutants[pollutant_id] if x['slope'] in negative_slopes]
slopes_for_pollutant = map(EmissionsJsonParser.calculate, tmp_pollutants)
interpolate = Interpolate(negative_slopes, slopes_for_pollutant)
tmp = interpolate[slope]
log.debug("Interpolated value: {}".format(tmp))
return tmp
else:
pollutant = self._pollutants[pollutant_id][0]
tmp = EmissionsJsonParser.calculate(pollutant)
log.debug("Regular value: {}".format(tmp))
return tmp
@staticmethod
def calculate(pollutant):
"""
this calculation is taken from the EU spreadsheet!
"""
alpha = float(pollutant.get("Alpha"))
beta = float(pollutant.get("Beta"))
delta = float(pollutant.get("Delta"))
epsilon = float(pollutant.get("Epsilon"))
gamma = float(pollutant.get("Gamma"))
hta = float(pollutant.get("Hta"))
reduct_fact = float(pollutant.get("Reduction Factor [%]"))
speed = float(pollutant.get("Speed"))
v_max = float(pollutant.get("Vmax"))
v_min = float(pollutant.get("Vmin"))
zita = float(pollutant.get("Zita"))
""" ((alpha*speed^2) + (beta*speed) + gamma + (delta/speed))/((epsilon*speed^2) * (zita * speed + htz))"""
result = (alpha * math.pow(speed, 2)) + (beta * speed) + gamma + (delta / speed)
result /= (epsilon * math.pow(speed, 2)) + ((zita * speed) + hta)
result *= (1 - reduct_fact)
return result
|
|
"""Provides SeriesLoader object and helpers, used to read Series data from disk or other filesystems.
"""
from collections import namedtuple
import json
from numpy import array, arange, frombuffer, load, ndarray, unravel_index, vstack
from numpy import dtype as dtypeFunc
from scipy.io import loadmat
from cStringIO import StringIO
import itertools
import struct
import urlparse
import math
from thunder.rdds.fileio.writers import getParallelWriterForPath
from thunder.rdds.keys import Dimensions
from thunder.rdds.fileio.readers import getFileReaderForPath, FileNotFoundError, appendExtensionToPathSpec
from thunder.rdds.imgblocks.blocks import SimpleBlocks
from thunder.rdds.series import Series
from thunder.utils.common import parseMemoryString, smallestFloatType
class SeriesLoader(object):
"""Loader object used to instantiate Series data stored in a variety of formats.
"""
def __init__(self, sparkContext, minPartitions=None):
"""Initialize a new SeriesLoader object.
Parameters
----------
sparkcontext: SparkContext
The pyspark SparkContext object used by the current Thunder environment.
minPartitions: int
minimum number of partitions to use when loading data. (Used by fromText, fromMatLocal, and fromNpyLocal)
"""
from thunder.utils.aws import AWSCredentials
self.sc = sparkContext
self.minPartitions = minPartitions
self.awsCredentialsOverride = AWSCredentials.fromContext(sparkContext)
def _checkOverwrite(self, outputDirPath):
from thunder.utils.common import raiseErrorIfPathExists
raiseErrorIfPathExists(outputDirPath, awsCredentialsOverride=self.awsCredentialsOverride)
def fromArrays(self, arrays, npartitions=None):
"""
Create a Series object from a sequence of 1d numpy arrays on the driver.
"""
# recast singleton
if isinstance(arrays, ndarray):
arrays = [arrays]
# check shape and dtype
shape = arrays[0].shape
dtype = arrays[0].dtype
for ary in arrays:
if not ary.shape == shape:
raise ValueError("Inconsistent array shapes: first array had shape %s, but other array has shape %s" %
(str(shape), str(ary.shape)))
if not ary.dtype == dtype:
raise ValueError("Inconsistent array dtypes: first array had dtype %s, but other array has dtype %s" %
(str(dtype), str(ary.dtype)))
# generate linear keys
keys = map(lambda k: (k,), xrange(0, len(arrays)))
return Series(self.sc.parallelize(zip(keys, arrays), npartitions), dtype=str(dtype))
def fromArraysAsImages(self, arrays):
"""Create a Series object from a sequence of numpy ndarrays resident in memory on the driver.
The arrays will be interpreted as though each represents a single time point - effectively the same
as if converting Images to a Series, with each array representing a volume image at a particular
point in time. Thus in the resulting Series, the value of the record with key (0,0,0) will be
array([arrays[0][0,0,0], arrays[1][0,0,0],... arrays[n][0,0,0]).
The dimensions of the resulting Series will be *opposite* that of the passed numpy array. Their dtype will not
be changed.
"""
# if passed a single array, cast it to a sequence of length 1
if isinstance(arrays, ndarray):
arrays = [arrays]
# check that shapes of passed arrays are consistent
shape = arrays[0].shape
dtype = arrays[0].dtype
for ary in arrays:
if not ary.shape == shape:
raise ValueError("Inconsistent array shapes: first array had shape %s, but other array has shape %s" %
(str(shape), str(ary.shape)))
if not ary.dtype == dtype:
raise ValueError("Inconsistent array dtypes: first array had dtype %s, but other array has dtype %s" %
(str(dtype), str(ary.dtype)))
# get indices so that fastest index changes first
shapeiters = (xrange(n) for n in shape)
keys = [idx[::-1] for idx in itertools.product(*shapeiters)]
values = vstack([ary.ravel() for ary in arrays]).T
dims = Dimensions.fromTuple(shape[::-1])
return Series(self.sc.parallelize(zip(keys, values), self.minPartitions), dims=dims, dtype=str(dtype))
@staticmethod
def __normalizeDatafilePattern(dataPath, ext):
dataPath = appendExtensionToPathSpec(dataPath, ext)
# we do need to prepend a scheme here, b/c otherwise the Hadoop based readers
# will adopt their default behavior and start looking on hdfs://.
parseResult = urlparse.urlparse(dataPath)
if parseResult.scheme:
# this appears to already be a fully-qualified URI
return dataPath
else:
# this looks like a local path spec
# check whether we look like an absolute or a relative path
import os
dirComponent, fileComponent = os.path.split(dataPath)
if not os.path.isabs(dirComponent):
# need to make relative local paths absolute; our file scheme parsing isn't all that it could be.
dirComponent = os.path.abspath(dirComponent)
dataPath = os.path.join(dirComponent, fileComponent)
return "file://" + dataPath
def fromText(self, dataPath, nkeys=None, ext="txt", dtype='float64'):
"""
Loads Series data from text files.
Parameters
----------
dataPath : string
Specifies the file or files to be loaded. dataPath may be either a URI (with scheme specified) or a path
on the local filesystem.
If a path is passed (determined by the absence of a scheme component when attempting to parse as a URI),
and it is not already a wildcard expression and does not end in <ext>, then it will be converted into a
wildcard pattern by appending '/*.ext'. This conversion can be avoided by passing a "file://" URI.
dtype: dtype or dtype specifier, default 'float64'
"""
dataPath = self.__normalizeDatafilePattern(dataPath, ext)
def parse(line, nkeys_):
vec = [float(x) for x in line.split(' ')]
ts = array(vec[nkeys_:], dtype=dtype)
keys = tuple(int(x) for x in vec[:nkeys_])
return keys, ts
lines = self.sc.textFile(dataPath, self.minPartitions)
data = lines.map(lambda x: parse(x, nkeys))
return Series(data, dtype=str(dtype))
# keytype, valuetype here violate camelCasing convention for consistence with JSON conf file format
BinaryLoadParameters = namedtuple('BinaryLoadParameters', 'nkeys nvalues keytype valuetype')
BinaryLoadParameters.__new__.__defaults__ = (None, None, 'int16', 'int16')
def __loadParametersAndDefaults(self, dataPath, confFilename, nkeys, nvalues, keyType, valueType):
"""Collects parameters to use for binary series loading.
Priority order is as follows:
1. parameters specified as keyword arguments;
2. parameters specified in a conf.json file on the local filesystem;
3. default parameters
Returns
-------
BinaryLoadParameters instance
"""
params = self.loadConf(dataPath, confFilename=confFilename)
# filter dict to include only recognized field names:
for k in params.keys():
if k not in SeriesLoader.BinaryLoadParameters._fields:
del params[k]
keywordParams = {'nkeys': nkeys, 'nvalues': nvalues, 'keytype': keyType, 'valuetype': valueType}
for k, v in keywordParams.items():
if not v:
del keywordParams[k]
params.update(keywordParams)
return SeriesLoader.BinaryLoadParameters(**params)
@staticmethod
def __checkBinaryParametersAreSpecified(paramsObj):
"""Throws ValueError if any of the field values in the passed namedtuple instance evaluate to False.
Note this is okay only so long as zero is not a valid parameter value. Hmm.
"""
missing = []
for paramName, paramVal in paramsObj._asdict().iteritems():
if not paramVal:
missing.append(paramName)
if missing:
raise ValueError("Missing parameters to load binary series files - " +
"these must be given either as arguments or in a configuration file: " +
str(tuple(missing)))
def fromBinary(self, dataPath, ext='bin', confFilename='conf.json',
nkeys=None, nvalues=None, keyType=None, valueType=None,
newDtype='smallfloat', casting='safe', maxPartitionSize='32mb'):
"""
Load a Series object from a directory of binary files.
Parameters
----------
dataPath : string URI or local filesystem path
Specifies the directory or files to be loaded. May be formatted as a URI string with scheme (e.g. "file://",
"s3n://", or "gs://"). If no scheme is present, will be interpreted as a path on the local filesystem. This path
must be valid on all workers. Datafile may also refer to a single file, or to a range of files specified
by a glob-style expression using a single wildcard character '*'.
newDtype : dtype or dtype specifier or string 'smallfloat' or None, optional, default 'smallfloat'
Numpy dtype of output series data. Most methods expect Series data to be floating-point. Input data will be
cast to the requested `newdtype` if not None - see Data `astype()` method.
casting : 'no'|'equiv'|'safe'|'same_kind'|'unsafe', optional, default 'safe'
Casting method to pass on to numpy's `astype()` method; see numpy documentation for details.
maxPartitionSize : str, optional, default = '32mb'
Maximum size of partitions as Java-style memory, will indirectly control the number of partitions
"""
paramsObj = self.__loadParametersAndDefaults(dataPath, confFilename, nkeys, nvalues, keyType, valueType)
self.__checkBinaryParametersAreSpecified(paramsObj)
dataPath = self.__normalizeDatafilePattern(dataPath, ext)
keyDtype = dtypeFunc(paramsObj.keytype)
valDtype = dtypeFunc(paramsObj.valuetype)
keySize = paramsObj.nkeys * keyDtype.itemsize
recordSize = keySize + paramsObj.nvalues * valDtype.itemsize
from thunder.utils.common import parseMemoryString
if isinstance(maxPartitionSize, basestring):
size = parseMemoryString(maxPartitionSize)
else:
raise Exception("Invalid size specification")
hadoopConf = {'recordLength': str(recordSize), 'mapred.max.split.size': str(size)}
lines = self.sc.newAPIHadoopFile(dataPath, 'thunder.util.io.hadoop.FixedLengthBinaryInputFormat',
'org.apache.hadoop.io.LongWritable',
'org.apache.hadoop.io.BytesWritable',
conf=hadoopConf)
data = lines.map(lambda (_, v):
(tuple(int(x) for x in frombuffer(buffer(v, 0, keySize), dtype=keyDtype)),
frombuffer(buffer(v, keySize), dtype=valDtype)))
return Series(data, dtype=str(valDtype), index=arange(paramsObj.nvalues)).astype(newDtype, casting)
def _getSeriesBlocksFromStack(self, dataPath, dims, ext="stack", blockSize="150M", dtype='int16',
newDtype='smallfloat', casting='safe', startIdx=None, stopIdx=None, recursive=False):
"""Create an RDD of <string blocklabel, (int k-tuple indices, array of datatype values)>
Parameters
----------
dataPath: string URI or local filesystem path
Specifies the directory or files to be loaded. May be formatted as a URI string with scheme (e.g. "file://",
"s3n://" or "gs://"). If no scheme is present, will be interpreted as a path on the local filesystem. This path
must be valid on all workers. Datafile may also refer to a single file, or to a range of files specified
by a glob-style expression using a single wildcard character '*'.
dims: tuple of positive int
Dimensions of input image data, ordered with the fastest-changing dimension first.
dtype: dtype or dtype specifier, optional, default 'int16'
Numpy dtype of input stack data
newDtype: floating-point dtype or dtype specifier or string 'smallfloat' or None, optional, default 'smallfloat'
Numpy dtype of output series data. Series data must be floating-point. Input data will be cast to the
requested `newdtype` - see numpy `astype()` method.
casting: 'no'|'equiv'|'safe'|'same_kind'|'unsafe', optional, default 'safe'
Casting method to pass on to numpy's `astype()` method; see numpy documentation for details.
recursive: boolean, default False
If true, will recursively descend directories rooted at dataPath, loading all files in the tree that
have an extension matching 'ext'. Recursive loading is currently only implemented for local filesystems
(not s3).
Returns
---------
pair of (RDD, ntimepoints)
RDD: sequence of keys, values pairs
(call using flatMap)
RDD Key: tuple of int
zero-based indicies of position within original image volume
RDD Value: numpy array of datatype
series of values at position across loaded image volumes
ntimepoints: int
number of time points in returned series, determined from number of stack files found at dataPath
newDtype: string
string representation of numpy data type of returned blocks
"""
dataPath = self.__normalizeDatafilePattern(dataPath, ext)
blockSize = parseMemoryString(blockSize)
totalDim = reduce(lambda x_, y_: x_*y_, dims)
dtype = dtypeFunc(dtype)
if newDtype is None or newDtype == '':
newDtype = str(dtype)
elif newDtype == 'smallfloat':
newDtype = str(smallestFloatType(dtype))
else:
newDtype = str(newDtype)
reader = getFileReaderForPath(dataPath)(awsCredentialsOverride=self.awsCredentialsOverride)
filenames = reader.list(dataPath, startIdx=startIdx, stopIdx=stopIdx, recursive=recursive)
if not filenames:
raise IOError("No files found for path '%s'" % dataPath)
dataSize = totalDim * len(filenames) * dtype.itemsize
nblocks = max(dataSize / blockSize, 1) # integer division
if len(dims) >= 3:
# for 3D stacks, do calculations to ensure that
# different planes appear in distinct files
blocksPerPlane = max(nblocks / dims[-1], 1)
pixPerPlane = reduce(lambda x_, y_: x_*y_, dims[:-1]) # all but last dimension
# get the greatest number of blocks in a plane (up to as many as requested) that still divide the plane
# evenly. This will always be at least one.
kUpdated = [x for x in range(1, blocksPerPlane+1) if not pixPerPlane % x][-1]
nblocks = kUpdated * dims[-1]
blockSizePerStack = (totalDim / nblocks) * dtype.itemsize
else:
# otherwise just round to make contents divide into nearly even blocks
blockSizePerStack = int(math.ceil(totalDim / float(nblocks)))
nblocks = int(math.ceil(totalDim / float(blockSizePerStack)))
blockSizePerStack *= dtype.itemsize
fileSize = totalDim * dtype.itemsize
def readBlock(blockNum):
# copy size out from closure; will modify later:
blockSizePerStack_ = blockSizePerStack
# get start position for this block
position = blockNum * blockSizePerStack_
# adjust if at end of file
if (position + blockSizePerStack_) > fileSize:
blockSizePerStack_ = int(fileSize - position)
# loop over files, loading one block from each
bufs = []
for fname in filenames:
buf = reader.read(fname, startOffset=position, size=blockSizePerStack_)
bufs.append(frombuffer(buf, dtype=dtype))
buf = vstack(bufs).T # dimensions are now linindex x time (images)
del bufs
buf = buf.astype(newDtype, casting=casting, copy=False)
# append subscript keys based on dimensions
itemPosition = position / dtype.itemsize
itemBlocksize = blockSizePerStack_ / dtype.itemsize
linearIdx = arange(itemPosition, itemPosition + itemBlocksize) # zero-based
keys = zip(*map(tuple, unravel_index(linearIdx, dims, order='F')))
return zip(keys, buf)
# map over blocks
return (self.sc.parallelize(range(0, nblocks), nblocks).flatMap(lambda bn: readBlock(bn)),
len(filenames), newDtype)
@staticmethod
def __readMetadataFromFirstPageOfMultiTif(reader, filePath):
import thunder.rdds.fileio.multitif as multitif
# read first page of first file to get expected image size
tiffFP = reader.open(filePath)
tiffParser = multitif.TiffParser(tiffFP, debug=False)
tiffHeaders = multitif.TiffData()
tiffParser.parseFileHeader(destinationTiff=tiffHeaders)
firstIfd = tiffParser.parseNextImageFileDirectory(destinationTiff=tiffHeaders)
if not firstIfd.isLuminanceImage():
raise ValueError(("File %s does not appear to be a luminance " % filePath) +
"(greyscale or bilevel) TIF image, " +
"which are the only types currently supported")
# keep reading pages until we reach the end of the file, in order to get number of planes:
while tiffParser.parseNextImageFileDirectory(destinationTiff=tiffHeaders):
pass
# get dimensions
npages = len(tiffHeaders.ifds)
height = firstIfd.getImageHeight()
width = firstIfd.getImageWidth()
# get datatype
bitsPerSample = firstIfd.getBitsPerSample()
if not (bitsPerSample in (8, 16, 32, 64)):
raise ValueError("Only 8, 16, 32, or 64 bit per pixel TIF images are supported, got %d" % bitsPerSample)
sampleFormat = firstIfd.getSampleFormat()
if sampleFormat == multitif.SAMPLE_FORMAT_UINT:
dtStr = 'uint'
elif sampleFormat == multitif.SAMPLE_FORMAT_INT:
dtStr = 'int'
elif sampleFormat == multitif.SAMPLE_FORMAT_FLOAT:
dtStr = 'float'
else:
raise ValueError("Unknown TIF SampleFormat tag value %d, should be 1, 2, or 3 for uint, int, or float"
% sampleFormat)
dtype = dtStr+str(bitsPerSample)
return height, width, npages, dtype
def _getSeriesBlocksFromMultiTif(self, dataPath, ext="tif", blockSize="150M",
newDtype='smallfloat', casting='safe', startIdx=None, stopIdx=None,
recursive=False):
import thunder.rdds.fileio.multitif as multitif
import itertools
from PIL import Image
import io
dataPath = self.__normalizeDatafilePattern(dataPath, ext)
blockSize = parseMemoryString(blockSize)
reader = getFileReaderForPath(dataPath)(awsCredentialsOverride=self.awsCredentialsOverride)
filenames = reader.list(dataPath, startIdx=startIdx, stopIdx=stopIdx, recursive=recursive)
if not filenames:
raise IOError("No files found for path '%s'" % dataPath)
ntimepoints = len(filenames)
doMinimizeReads = dataPath.lower().startswith("s3") or dataPath.lower().startswith("gs")
# check PIL version to see whether it is actually pillow or indeed old PIL and choose
# conversion function appropriately. See ImagesLoader.fromMultipageTif and common.pil_to_array
# for more explanation.
isPillow = hasattr(Image, "PILLOW_VERSION")
if isPillow:
conversionFcn = array # use numpy's array() function
else:
from thunder.utils.common import pil_to_array
conversionFcn = pil_to_array # use our modified version of matplotlib's pil_to_array
height, width, npages, dtype = SeriesLoader.__readMetadataFromFirstPageOfMultiTif(reader, filenames[0])
if dtype.startswith('int'):
raise ValueError('Signed integer tiff images are not supported in SeriesLoader (shuffle=False);' +
' please try loading as Images (shuffle=True)')
pixelBytesize = dtypeFunc(dtype).itemsize
if newDtype is None or str(newDtype) == '':
newDtype = str(dtype)
elif newDtype == 'smallfloat':
newDtype = str(smallestFloatType(dtype))
else:
newDtype = str(newDtype)
# intialize at one block per plane
bytesPerPlane = height * width * pixelBytesize * ntimepoints
bytesPerBlock = bytesPerPlane
blocksPerPlane = 1
# keep dividing while cutting our size in half still leaves us bigger than the requested size
# should end up no more than 2x blockSize.
while bytesPerBlock >= blockSize * 2:
bytesPerBlock /= 2
blocksPerPlane *= 2
blocklenPixels = max((height * width) / blocksPerPlane, 1) # integer division
while blocksPerPlane * blocklenPixels < height * width: # make sure we're reading the plane fully
blocksPerPlane += 1
# prevent bringing in self in closure:
awsCredentialsOverride = self.awsCredentialsOverride
# keys will be planeidx, blockidx:
keys = list(itertools.product(xrange(npages), xrange(blocksPerPlane)))
def readBlockFromTiff(planeIdxBlockIdx):
planeIdx, blockIdx = planeIdxBlockIdx
blocks = []
planeShape = None
blockStart = None
blockEnd = None
for fname in filenames:
reader_ = getFileReaderForPath(fname)(awsCredentialsOverride=awsCredentialsOverride)
fp = reader_.open(fname)
try:
if doMinimizeReads:
# use multitif module to generate a fake, in-memory
# one-page tif file. the advantage of this is that it
# cuts way down on the many small reads that PIL/pillow
# will make otherwise, which would be a problem for s3
# or Google Storage
tiffParser_ = multitif.TiffParser(fp, debug=False)
tiffFilebuffer = multitif.packSinglePage(tiffParser_, pageIdx=planeIdx)
byteBuf = io.BytesIO(tiffFilebuffer)
try:
pilImg = Image.open(byteBuf)
ary = conversionFcn(pilImg).T
finally:
byteBuf.close()
del tiffFilebuffer, tiffParser_, pilImg, byteBuf
else:
# read tif using PIL directly
pilImg = Image.open(fp)
pilImg.seek(planeIdx)
ary = conversionFcn(pilImg).T
del pilImg
if not planeShape:
planeShape = ary.shape[:]
blockStart = blockIdx * blocklenPixels
blockEnd = min(blockStart+blocklenPixels, planeShape[0]*planeShape[1])
blocks.append(ary.ravel(order='C')[blockStart:blockEnd])
del ary
finally:
fp.close()
buf = vstack(blocks).T # dimensions are now linindex x time (images)
del blocks
buf = buf.astype(newDtype, casting=casting, copy=False)
# append subscript keys based on dimensions
linearIdx = arange(blockStart, blockEnd) # zero-based
seriesKeys = zip(*map(tuple, unravel_index(linearIdx, planeShape, order='C')))
# add plane index to end of keys
if npages > 1:
seriesKeys = [tuple(list(keys_)[::-1]+[planeIdx]) for keys_ in seriesKeys]
else:
seriesKeys = [tuple(list(keys_)[::-1]) for keys_ in seriesKeys]
return zip(seriesKeys, buf)
# map over blocks
rdd = self.sc.parallelize(keys, len(keys)).flatMap(readBlockFromTiff)
if npages > 1:
dims = (npages, width, height)
else:
dims = (width, height)
metadata = (dims, ntimepoints, newDtype)
return rdd, metadata
def fromStack(self, dataPath, dims, ext="stack", blockSize="150M", dtype='int16',
newDtype='smallfloat', casting='safe', startIdx=None, stopIdx=None, recursive=False):
"""Load a Series object directly from binary image stack files.
Parameters
----------
dataPath: string
Path to data files or directory, specified as either a local filesystem path or in a URI-like format,
including scheme. A dataPath argument may include a single '*' wildcard character in the filename.
dims: tuple of positive int
Dimensions of input image data, ordered with the fastest-changing dimension first.
ext: string, optional, default "stack"
Extension required on data files to be loaded.
blockSize: string formatted as e.g. "64M", "512k", "2G", or positive int. optional, default "150M"
Requested size of Series partitions in bytes (or kilobytes, megabytes, gigabytes).
dtype: dtype or dtype specifier, optional, default 'int16'
Numpy dtype of input stack data
newDtype: dtype or dtype specifier or string 'smallfloat' or None, optional, default 'smallfloat'
Numpy dtype of output series data. Most methods expect Series data to be floating-point. Input data will be
cast to the requested `newdtype` if not None - see Data `astype()` method.
casting: 'no'|'equiv'|'safe'|'same_kind'|'unsafe', optional, default 'safe'
Casting method to pass on to numpy's `astype()` method; see numpy documentation for details.
startIdx, stopIdx: nonnegative int. optional.
Indices of the first and last-plus-one data file to load, relative to the sorted filenames matching
`dataPath` and `ext`. Interpreted according to python slice indexing conventions.
recursive: boolean, default False
If true, will recursively descend directories rooted at dataPath, loading all files in the tree that
have an extension matching 'ext'. Recursive loading is currently only implemented for local filesystems
(not s3).
"""
seriesBlocks, npointsInSeries, newDtype = \
self._getSeriesBlocksFromStack(dataPath, dims, ext=ext, blockSize=blockSize, dtype=dtype,
newDtype=newDtype, casting=casting, startIdx=startIdx, stopIdx=stopIdx,
recursive=recursive)
return Series(seriesBlocks, dims=dims, dtype=newDtype, index=arange(npointsInSeries))
def fromTif(self, dataPath, ext="tif", blockSize="150M", newDtype='smallfloat', casting='safe',
startIdx=None, stopIdx=None, recursive=False):
"""Load a Series object from multipage tiff files.
Parameters
----------
dataPath: string
Path to data files or directory, specified as either a local filesystem path or in a URI-like format,
including scheme. A dataPath argument may include a single '*' wildcard character in the filename.
ext: string, optional, default "tif"
Extension required on data files to be loaded.
blockSize: string formatted as e.g. "64M", "512k", "2G", or positive int. optional, default "150M"
Requested size of Series partitions in bytes (or kilobytes, megabytes, gigabytes).
newDtype: dtype or dtype specifier or string 'smallfloat' or None, optional, default 'smallfloat'
Numpy dtype of output series data. Most methods expect Series data to be floating-point. Input data will be
cast to the requested `newdtype` if not None - see Data `astype()` method.
casting: 'no'|'equiv'|'safe'|'same_kind'|'unsafe', optional, default 'safe'
Casting method to pass on to numpy's `astype()` method; see numpy documentation for details.
startIdx, stopIdx: nonnegative int. optional.
Indices of the first and last-plus-one data file to load, relative to the sorted filenames matching
`dataPath` and `ext`. Interpreted according to python slice indexing conventions.
recursive: boolean, default False
If true, will recursively descend directories rooted at dataPath, loading all files in the tree that
have an extension matching 'ext'. Recursive loading is currently only implemented for local filesystems
(not s3).
"""
seriesBlocks, metadata = self._getSeriesBlocksFromMultiTif(dataPath, ext=ext, blockSize=blockSize,
newDtype=newDtype, casting=casting,
startIdx=startIdx, stopIdx=stopIdx,
recursive=recursive)
dims, npointsInSeries, dtype = metadata
return Series(seriesBlocks, dims=Dimensions.fromTuple(dims[::-1]), dtype=dtype,
index=arange(npointsInSeries))
def __saveSeriesRdd(self, seriesBlocks, outputDirPath, dims, npointsInSeries, dtype, overwrite=False):
if not overwrite:
self._checkOverwrite(outputDirPath)
overwrite = True # prevent additional downstream checks for this path
writer = getParallelWriterForPath(outputDirPath)(outputDirPath, overwrite=overwrite,
awsCredentialsOverride=self.awsCredentialsOverride)
def blockToBinarySeries(kvIter):
label = None
keyPacker = None
buf = StringIO()
for seriesKey, series in kvIter:
if keyPacker is None:
keyPacker = struct.Struct('h'*len(seriesKey))
label = SimpleBlocks.getBinarySeriesNameForKey(seriesKey) + ".bin"
buf.write(keyPacker.pack(*seriesKey))
buf.write(series.tostring())
val = buf.getvalue()
buf.close()
return [(label, val)]
seriesBlocks.mapPartitions(blockToBinarySeries).foreach(writer.writerFcn)
writeSeriesConfig(outputDirPath, len(dims), npointsInSeries, valueType=dtype, overwrite=overwrite,
awsCredentialsOverride=self.awsCredentialsOverride)
def saveFromStack(self, dataPath, outputDirPath, dims, ext="stack", blockSize="150M", dtype='int16',
newDtype=None, casting='safe', startIdx=None, stopIdx=None, overwrite=False, recursive=False):
"""Write out data from binary image stack files in the Series data flat binary format.
Parameters
----------
dataPath: string
Path to data files or directory, specified as either a local filesystem path or in a URI-like format,
including scheme. A dataPath argument may include a single '*' wildcard character in the filename.
outputDirPath: string
Path to a directory into which to write Series file output. An outputdir argument may be either a path
on the local file system or a URI-like format, as in dataPath.
dims: tuple of positive int
Dimensions of input image data, ordered with the fastest-changing dimension first.
ext: string, optional, default "stack"
Extension required on data files to be loaded.
blockSize: string formatted as e.g. "64M", "512k", "2G", or positive int. optional, default "150M"
Requested size of Series partitions in bytes (or kilobytes, megabytes, gigabytes).
dtype: dtype or dtype specifier, optional, default 'int16'
Numpy dtype of input stack data
newDtype: floating-point dtype or dtype specifier or string 'smallfloat' or None, optional, default None
Numpy dtype of output series binary data. Input data will be cast to the requested `newdtype` if not None
- see Data `astype()` method.
casting: 'no'|'equiv'|'safe'|'same_kind'|'unsafe', optional, default 'safe'
Casting method to pass on to numpy's `astype()` method; see numpy documentation for details.
startIdx, stopIdx: nonnegative int. optional.
Indices of the first and last-plus-one data file to load, relative to the sorted filenames matching
`dataPath` and `ext`. Interpreted according to python slice indexing conventions.
overwrite: boolean, optional, default False
If true, the directory specified by outputdirpath will first be deleted, along with all its contents, if it
already exists. If false, a ValueError will be thrown if outputdirpath is found to already exist.
"""
if not overwrite:
self._checkOverwrite(outputDirPath)
overwrite = True # prevent additional downstream checks for this path
seriesBlocks, npointsInSeries, newDtype = \
self._getSeriesBlocksFromStack(dataPath, dims, ext=ext, blockSize=blockSize, dtype=dtype,
newDtype=newDtype, casting=casting, startIdx=startIdx, stopIdx=stopIdx,
recursive=recursive)
self.__saveSeriesRdd(seriesBlocks, outputDirPath, dims, npointsInSeries, newDtype, overwrite=overwrite)
def saveFromTif(self, dataPath, outputDirPath, ext="tif", blockSize="150M",
newDtype=None, casting='safe', startIdx=None, stopIdx=None,
overwrite=False, recursive=False):
"""Write out data from multipage tif files in the Series data flat binary format.
Parameters
----------
dataPath: string
Path to data files or directory, specified as either a local filesystem path or in a URI-like format,
including scheme. A dataPath argument may include a single '*' wildcard character in the filename.
outputDirPpath: string
Path to a directory into which to write Series file output. An outputdir argument may be either a path
on the local file system or a URI-like format, as in dataPath.
ext: string, optional, default "stack"
Extension required on data files to be loaded.
blockSize: string formatted as e.g. "64M", "512k", "2G", or positive int. optional, default "150M"
Requested size of Series partitions in bytes (or kilobytes, megabytes, gigabytes).
newDtype: floating-point dtype or dtype specifier or string 'smallfloat' or None, optional, default None
Numpy dtype of output series binary data. Input data will be cast to the requested `newdtype` if not None
- see Data `astype()` method.
casting: 'no'|'equiv'|'safe'|'same_kind'|'unsafe', optional, default 'safe'
Casting method to pass on to numpy's `astype()` method; see numpy documentation for details.
startIdx, stopIdx: nonnegative int. optional.
Indices of the first and last-plus-one data file to load, relative to the sorted filenames matching
`dataPath` and `ext`. Interpreted according to python slice indexing conventions.
overwrite: boolean, optional, default False
If true, the directory specified by outputdirpath will first be deleted, along with all its contents, if it
already exists. If false, a ValueError will be thrown if outputdirpath is found to already exist.
"""
if not overwrite:
self._checkOverwrite(outputDirPath)
overwrite = True # prevent additional downstream checks for this path
seriesBlocks, metadata = self._getSeriesBlocksFromMultiTif(dataPath, ext=ext, blockSize=blockSize,
newDtype=newDtype, casting=casting,
startIdx=startIdx, stopIdx=stopIdx,
recursive=recursive)
dims, npointsInSeries, dtype = metadata
self.__saveSeriesRdd(seriesBlocks, outputDirPath, dims, npointsInSeries, dtype, overwrite=overwrite)
def fromMatLocal(self, dataPath, varName, keyFile=None):
"""Loads Series data stored in a Matlab .mat file.
`datafile` must refer to a path visible to all workers, such as on NFS or similar mounted shared filesystem.
"""
data = loadmat(dataPath)[varName]
if data.ndim > 2:
raise IOError('Input data must be one or two dimensional')
if keyFile:
keys = map(lambda x: tuple(x), loadmat(keyFile)['keys'])
else:
keys = arange(0, data.shape[0])
rdd = Series(self.sc.parallelize(zip(keys, data), self.minPartitions), dtype=str(data.dtype))
return rdd
def fromNpyLocal(self, dataPath, keyFile=None):
"""Loads Series data stored in the numpy save() .npy format.
`datafile` must refer to a path visible to all workers, such as on NFS or similar mounted shared filesystem.
"""
data = load(dataPath)
if data.ndim > 2:
raise IOError('Input data must be one or two dimensional')
if keyFile:
keys = map(lambda x: tuple(x), load(keyFile))
else:
keys = arange(0, data.shape[0])
rdd = Series(self.sc.parallelize(zip(keys, data), self.minPartitions), dtype=str(data.dtype))
return rdd
def loadConf(self, dataPath, confFilename='conf.json'):
"""Returns a dict loaded from a json file.
Looks for file named `conffile` in same directory as `dataPath`
Returns {} if file not found
"""
if not confFilename:
return {}
reader = getFileReaderForPath(dataPath)(awsCredentialsOverride=self.awsCredentialsOverride)
try:
jsonBuf = reader.read(dataPath, filename=confFilename)
except FileNotFoundError:
return {}
params = json.loads(jsonBuf)
if 'format' in params:
raise Exception("Numerical format of value should be specified as 'valuetype', not 'format'")
if 'keyformat' in params:
raise Exception("Numerical format of key should be specified as 'keytype', not 'keyformat'")
return params
def writeSeriesConfig(outputDirPath, nkeys, nvalues, keyType='int16', valueType='int16',
confFilename="conf.json", overwrite=True, awsCredentialsOverride=None):
"""
Helper function to write out a conf.json file with required information to load Series binary data.
"""
import json
from thunder.rdds.fileio.writers import getFileWriterForPath
filewriterClass = getFileWriterForPath(outputDirPath)
# write configuration file
# config JSON keys are lowercased "valuetype", "keytype", not valueType, keyType
conf = {'input': outputDirPath,
'nkeys': nkeys, 'nvalues': nvalues,
'valuetype': str(valueType), 'keytype': str(keyType)}
confWriter = filewriterClass(outputDirPath, confFilename, overwrite=overwrite,
awsCredentialsOverride=awsCredentialsOverride)
confWriter.writeFile(json.dumps(conf, indent=2))
# touch "SUCCESS" file as final action
successWriter = filewriterClass(outputDirPath, "SUCCESS", overwrite=overwrite,
awsCredentialsOverride=awsCredentialsOverride)
successWriter.writeFile('')
|
|
# -*- coding: utf-8 -*-
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of Unix-like du command for cloud storage providers."""
from __future__ import absolute_import
import sys
from gslib.boto_translation import S3_DELETE_MARKER_GUID
from gslib.bucket_listing_ref import BucketListingObject
from gslib.command import Command
from gslib.command_argument import CommandArgument
from gslib.cs_api_map import ApiSelector
from gslib.exception import CommandException
from gslib.ls_helper import LsHelper
from gslib.storage_url import ContainsWildcard
from gslib.storage_url import StorageUrlFromString
from gslib.util import MakeHumanReadable
from gslib.util import NO_MAX
from gslib.util import UTF8
_SYNOPSIS = """
gsutil du url...
"""
_DETAILED_HELP_TEXT = ("""
<B>SYNOPSIS</B>
""" + _SYNOPSIS + """
<B>DESCRIPTION</B>
The du command displays the amount of space (in bytes) being used by the
objects in the file or object hierarchy under a given URL. The syntax emulates
the Linux du command (which stands for disk usage). For example, the command:
gsutil du -s gs://your-bucket/dir
will report the total space used by all objects under gs://your-bucket/dir and
any sub-directories.
<B>OPTIONS</B>
-0 Ends each output line with a 0 byte rather than a newline. This
can be useful to make the output more easily machine-readable.
-a Includes non-current object versions / generations in the listing
(only useful with a versioning-enabled bucket). Also prints
generation and metageneration for each listed object.
-c Produce a grand total.
-e A pattern to exclude from reporting. Example: -e "*.o" would
exclude any object that ends in ".o". Can be specified multiple
times.
-h Prints object sizes in human-readable format (e.g., 1 KiB,
234 MiB, 2GiB, etc.)
-s Display only a summary total for each argument.
-X Similar to -e, but excludes patterns from the given file. The
patterns to exclude should be one per line.
<B>EXAMPLES</B>
To list the size of all objects in a bucket:
gsutil du gs://bucketname
To list the size of all objects underneath a prefix:
gsutil du gs://bucketname/prefix/*
To print the total number of bytes in a bucket, in human-readable form:
gsutil du -ch gs://bucketname
To see a summary of the total bytes in the two given buckets:
gsutil du -s gs://bucket1 gs://bucket2
To list the size of all objects in a versioned bucket, including objects that
are not the latest:
gsutil du -a gs://bucketname
To list all objects in a bucket, except objects that end in ".bak",
with each object printed ending in a null byte:
gsutil du -e "*.bak" -0 gs://bucketname
To get a total of all buckets in a project with a grand total for an entire
project:
gsutil -o GSUtil:default_project_id=project-name du -shc
""")
class DuCommand(Command):
"""Implementation of gsutil du command."""
# Command specification. See base class for documentation.
command_spec = Command.CreateCommandSpec(
'du',
command_name_aliases=[],
usage_synopsis=_SYNOPSIS,
min_args=0,
max_args=NO_MAX,
supported_sub_args='0ace:hsX:',
file_url_ok=False,
provider_url_ok=True,
urls_start_arg=0,
gs_api_support=[ApiSelector.XML, ApiSelector.JSON],
gs_default_api=ApiSelector.JSON,
argparse_arguments=[
CommandArgument.MakeZeroOrMoreCloudURLsArgument()
]
)
# Help specification. See help_provider.py for documentation.
help_spec = Command.HelpSpec(
help_name='du',
help_name_aliases=[],
help_type='command_help',
help_one_line_summary='Display object size usage',
help_text=_DETAILED_HELP_TEXT,
subcommand_help_text={},
)
def _PrintSummaryLine(self, num_bytes, name):
size_string = (MakeHumanReadable(num_bytes)
if self.human_readable else str(num_bytes))
sys.stdout.write('%(size)-10s %(name)s%(ending)s' % {
'size': size_string, 'name': name, 'ending': self.line_ending})
def _PrintInfoAboutBucketListingRef(self, bucket_listing_ref):
"""Print listing info for given bucket_listing_ref.
Args:
bucket_listing_ref: BucketListing being listed.
Returns:
Tuple (number of objects, object size)
Raises:
Exception: if calling bug encountered.
"""
obj = bucket_listing_ref.root_object
url_str = bucket_listing_ref.url_string
if (obj.metadata and S3_DELETE_MARKER_GUID in
obj.metadata.additionalProperties):
size_string = '0'
num_bytes = 0
num_objs = 0
url_str += '<DeleteMarker>'
else:
size_string = (MakeHumanReadable(obj.size)
if self.human_readable else str(obj.size))
num_bytes = obj.size
num_objs = 1
if not self.summary_only:
sys.stdout.write('%(size)-10s %(url)s%(ending)s' % {
'size': size_string,
'url': url_str.encode(UTF8),
'ending': self.line_ending})
return (num_objs, num_bytes)
def RunCommand(self):
"""Command entry point for the du command."""
self.line_ending = '\n'
self.all_versions = False
self.produce_total = False
self.human_readable = False
self.summary_only = False
self.exclude_patterns = []
if self.sub_opts:
for o, a in self.sub_opts:
if o == '-0':
self.line_ending = '\0'
elif o == '-a':
self.all_versions = True
elif o == '-c':
self.produce_total = True
elif o == '-e':
self.exclude_patterns.append(a)
elif o == '-h':
self.human_readable = True
elif o == '-s':
self.summary_only = True
elif o == '-X':
if a == '-':
f = sys.stdin
else:
f = open(a, 'r')
try:
for line in f:
line = line.strip().decode(UTF8)
if line:
self.exclude_patterns.append(line)
finally:
f.close()
if not self.args:
# Default to listing all gs buckets.
self.args = ['gs://']
total_bytes = 0
got_nomatch_errors = False
def _PrintObjectLong(blr):
return self._PrintInfoAboutBucketListingRef(blr)
def _PrintNothing(unused_blr=None):
pass
def _PrintDirectory(num_bytes, blr):
if not self.summary_only:
self._PrintSummaryLine(num_bytes, blr.url_string.encode(UTF8))
for url_arg in self.args:
top_level_storage_url = StorageUrlFromString(url_arg)
if top_level_storage_url.IsFileUrl():
raise CommandException('Only cloud URLs are supported for %s'
% self.command_name)
bucket_listing_fields = ['size']
ls_helper = LsHelper(
self.WildcardIterator, self.logger,
print_object_func=_PrintObjectLong, print_dir_func=_PrintNothing,
print_dir_header_func=_PrintNothing,
print_dir_summary_func=_PrintDirectory,
print_newline_func=_PrintNothing, all_versions=self.all_versions,
should_recurse=True, exclude_patterns=self.exclude_patterns,
fields=bucket_listing_fields)
# ls_helper expands to objects and prefixes, so perform a top-level
# expansion first.
if top_level_storage_url.IsProvider():
# Provider URL: use bucket wildcard to iterate over all buckets.
top_level_iter = self.WildcardIterator(
'%s://*' % top_level_storage_url.scheme).IterBuckets(
bucket_fields=['id'])
elif top_level_storage_url.IsBucket():
top_level_iter = self.WildcardIterator(
'%s://%s' % (top_level_storage_url.scheme,
top_level_storage_url.bucket_name)).IterBuckets(
bucket_fields=['id'])
else:
top_level_iter = [BucketListingObject(top_level_storage_url)]
for blr in top_level_iter:
storage_url = blr.storage_url
if storage_url.IsBucket() and self.summary_only:
storage_url = StorageUrlFromString(
storage_url.CreatePrefixUrl(wildcard_suffix='**'))
_, exp_objs, exp_bytes = ls_helper.ExpandUrlAndPrint(storage_url)
if (storage_url.IsObject() and exp_objs == 0 and
ContainsWildcard(url_arg) and not self.exclude_patterns):
got_nomatch_errors = True
total_bytes += exp_bytes
if self.summary_only:
self._PrintSummaryLine(exp_bytes,
blr.url_string.rstrip('/').encode(UTF8))
if self.produce_total:
self._PrintSummaryLine(total_bytes, 'total')
if got_nomatch_errors:
raise CommandException('One or more URLs matched no objects.')
return 0
|
|
# -*- coding: utf-8 -*-
import base64
import datetime
import os
from unittest import TestSuite, TestLoader
from flask import url_for, current_app
from spkrepo.ext import db
from spkrepo.models import Build, Role, Architecture, Firmware
from spkrepo.tests.common import (BaseTestCase, BuildFactory, create_spk, PackageFactory, UserFactory, IconFactory,
create_info)
def authorization_header(user):
return {'Authorization': b'Basic ' + base64.b64encode(user.api_key.encode('utf-8') + b':')}
class PackagesTestCase(BaseTestCase):
def assertBuildInserted(self, inserted_build, build, publisher):
# build
self.assertEqual(inserted_build.architectures, build.architectures)
self.assertIs(inserted_build.firmware, build.firmware)
self.assertIs(inserted_build.publisher, publisher)
self.assertEqual(inserted_build.extract_size, build.extract_size)
self.assertAlmostEqual(inserted_build.insert_date, datetime.datetime.utcnow().replace(microsecond=0),
delta=datetime.timedelta(seconds=10))
self.assertFalse(inserted_build.active)
# version
self.assertEqual(inserted_build.version.version, build.version.version)
self.assertEqual(inserted_build.version.upstream_version, build.version.upstream_version)
self.assertEqual(inserted_build.version.changelog, build.version.changelog)
self.assertEqual(inserted_build.version.report_url, build.version.report_url)
self.assertEqual(inserted_build.version.distributor, build.version.distributor)
self.assertEqual(inserted_build.version.distributor_url, build.version.distributor_url)
self.assertEqual(inserted_build.version.maintainer, build.version.maintainer)
self.assertEqual(inserted_build.version.maintainer_url, build.version.maintainer_url)
self.assertEqual(inserted_build.version.dependencies, build.version.dependencies)
self.assertEqual(inserted_build.version.conflicts, build.version.conflicts)
self.assertEqual(inserted_build.version.service_dependencies, build.version.service_dependencies)
self.assertDictEqual({l: d.displayname for l, d in inserted_build.version.displaynames.items()},
{l: d.displayname for l, d in build.version.displaynames.items()})
self.assertDictEqual({l: d.description for l, d in inserted_build.version.descriptions.items()},
{l: d.description for l, d in build.version.descriptions.items()})
self.assertEqual(set(inserted_build.version.icons.keys()), set(build.version.icons.keys()))
self.assertEqual(inserted_build.version.install_wizard, build.version.install_wizard)
self.assertEqual(inserted_build.version.upgrade_wizard, build.version.upgrade_wizard)
self.assertEqual(inserted_build.version.startable, build.version.startable)
self.assertEqual(inserted_build.version.license, build.version.license)
# package
self.assertEqual(inserted_build.version.package.name, build.version.package.name)
# filesystem
for icon in inserted_build.version.icons.values():
self.assertTrue(os.path.exists(os.path.join(current_app.config['DATA_PATH'], icon.path)))
self.assertTrue(os.path.exists(os.path.join(current_app.config['DATA_PATH'], inserted_build.path)))
def test_post_anonymous_user(self):
self.assert401(self.client.post(url_for('api.packages')))
def test_post_simple_user(self):
user = UserFactory()
db.session.commit()
self.assert401(self.client.post(url_for('api.packages'), headers=authorization_header(user)))
def test_post_no_data(self):
user = UserFactory(roles=[Role.find('developer')])
db.session.commit()
response = self.client.post(url_for('api.packages'), headers=authorization_header(user))
self.assert400(response)
self.assertIn('No data to process', response.data.decode(response.charset))
def test_post_minimum(self):
user = UserFactory(roles=[Role.find('developer'), Role.find('package_admin')])
db.session.commit()
build = BuildFactory.build()
with create_spk(build) as spk:
self.assert201(self.client.post(url_for('api.packages'), headers=authorization_header(user),
data=spk.read()))
self.assertBuildInserted(Build.query.one(), build, user)
def test_post_conflict(self):
user = UserFactory(roles=[Role.find('developer'), Role.find('package_admin')])
db.session.commit()
architectures = [Architecture.find('88f628x'), Architecture.find('cedarview')]
build = BuildFactory.build(architectures=architectures)
with create_spk(build) as spk:
self.assert201(self.client.post(url_for('api.packages'), headers=authorization_header(user),
data=spk.read()))
spk.seek(0)
response = self.client.post(url_for('api.packages'), headers=authorization_header(user),
data=spk.read())
self.assert409(response)
self.assertIn('Conflicting architectures: 88f628x, cedarview', response.data.decode(response.charset))
def test_post_new_package_not_author_not_maintainer_user(self):
user = UserFactory(roles=[Role.find('developer')])
db.session.commit()
with create_spk(BuildFactory.build()) as spk:
response = self.client.post(url_for('api.packages'), headers=authorization_header(user),
data=spk.read())
self.assert403(response)
self.assertIn('Insufficient permissions to create new packages', response.data.decode(response.charset))
def test_post_existing_package_not_author_not_maintainer_user(self):
user = UserFactory(roles=[Role.find('developer')])
package = PackageFactory()
db.session.commit()
with create_spk(BuildFactory.build(version__package=package)) as spk:
response = self.client.post(url_for('api.packages'), headers=authorization_header(user),
data=spk.read())
self.assert403(response)
self.assertIn('Insufficient permissions on this package', response.data.decode(response.charset))
def test_post_existing_package_maintainer_user(self):
user = UserFactory(roles=[Role.find('developer')])
package = PackageFactory(maintainers=[user])
db.session.commit()
build = BuildFactory.build(version__package=package)
db.session.expire(package)
with create_spk(build) as spk:
self.assert201(self.client.post(url_for('api.packages'), headers=authorization_header(user),
data=spk.read()))
def test_post_unknown_architecture(self):
user = UserFactory(roles=[Role.find('developer')])
db.session.commit()
build = BuildFactory.build(architectures=[Architecture(code='newarch')])
with create_spk(build) as spk:
response = self.client.post(url_for('api.packages'), headers=authorization_header(user),
data=spk.read())
self.assert422(response)
self.assertIn('Unknown architecture: newarch', response.data.decode(response.charset))
def test_post_invalid_firmware(self):
user = UserFactory(roles=[Role.find('developer')])
db.session.commit()
build = BuildFactory.build(firmware=Firmware(version='1.0', build=42))
with create_spk(build) as spk:
response = self.client.post(url_for('api.packages'), headers=authorization_header(user),
data=spk.read())
self.assert422(response)
self.assertIn('Invalid firmware', response.data.decode(response.charset))
def test_post_unknown_firmware(self):
user = UserFactory(roles=[Role.find('developer')])
db.session.commit()
build = BuildFactory.build(firmware=Firmware(version='1.0', build=421))
with create_spk(build) as spk:
response = self.client.post(url_for('api.packages'), headers=authorization_header(user),
data=spk.read())
self.assert422(response)
self.assertIn('Unknown firmware', response.data.decode(response.charset))
def test_post_icons_in_info_only(self):
user = UserFactory(roles=[Role.find('developer'), Role.find('package_admin')])
db.session.commit()
build = BuildFactory.build()
with create_spk(build, with_package_icons=False, with_info_icons=True) as spk:
self.assert201(self.client.post(url_for('api.packages'), headers=authorization_header(user),
data=spk.read()))
self.assertBuildInserted(Build.query.one(), build, user)
def test_post_icons_in_both(self):
user = UserFactory(roles=[Role.find('developer'), Role.find('package_admin')])
db.session.commit()
build = BuildFactory.build()
with create_spk(build, with_package_icons=True, with_info_icons=True) as spk:
self.assert201(self.client.post(url_for('api.packages'), headers=authorization_header(user),
data=spk.read()))
self.assertBuildInserted(Build.query.one(), build, user)
def test_post_no_license(self):
user = UserFactory(roles=[Role.find('developer'), Role.find('package_admin')])
db.session.commit()
build = BuildFactory.build(version__license=None)
with create_spk(build) as spk:
self.assert201(self.client.post(url_for('api.packages'), headers=authorization_header(user),
data=spk.read()))
self.assertBuildInserted(Build.query.one(), build, user)
def test_post_install_wizard(self):
user = UserFactory(roles=[Role.find('developer'), Role.find('package_admin')])
db.session.commit()
build = BuildFactory.build(version__install_wizard=True)
with create_spk(build) as spk:
self.assert201(self.client.post(url_for('api.packages'), headers=authorization_header(user),
data=spk.read()))
self.assertBuildInserted(Build.query.one(), build, user)
def test_post_upgrade_wizard(self):
user = UserFactory(roles=[Role.find('developer'), Role.find('package_admin')])
db.session.commit()
build = BuildFactory.build(version__upgrade_wizard=True)
with create_spk(build) as spk:
self.assert201(self.client.post(url_for('api.packages'), headers=authorization_header(user),
data=spk.read()))
self.assertBuildInserted(Build.query.one(), build, user)
def test_post_120_icon(self):
user = UserFactory(roles=[Role.find('developer'), Role.find('package_admin')])
db.session.commit()
build = BuildFactory.build(version__icons={'120': IconFactory.build(size='120')})
with create_spk(build) as spk:
self.assert201(self.client.post(url_for('api.packages'), headers=authorization_header(user),
data=spk.read()))
self.assertBuildInserted(Build.query.one(), build, user)
def test_post_startable(self):
user = UserFactory(roles=[Role.find('developer'), Role.find('package_admin')])
db.session.commit()
build = BuildFactory.build(version__startable=True)
with create_spk(build) as spk:
self.assert201(self.client.post(url_for('api.packages'), headers=authorization_header(user),
data=spk.read()))
self.assertBuildInserted(Build.query.one(), build, user)
def test_post_not_startable(self):
user = UserFactory(roles=[Role.find('developer'), Role.find('package_admin')])
db.session.commit()
build = BuildFactory.build(version__startable=False)
with create_spk(build) as spk:
self.assert201(self.client.post(url_for('api.packages'), headers=authorization_header(user),
data=spk.read()))
self.assertBuildInserted(Build.query.one(), build, user)
def test_post_wrong_displayname_language(self):
user = UserFactory(roles=[Role.find('developer'), Role.find('package_admin')])
db.session.commit()
build = BuildFactory.build()
info = create_info(build)
info['displayname_zzz'] = 'displayname_zzz'
with create_spk(build, info=info) as spk:
response = self.client.post(url_for('api.packages'), headers=authorization_header(user),
data=spk.read())
self.assert422(response)
self.assertIn('Unknown INFO displayname language', response.data.decode(response.charset))
def test_post_wrong_description_language(self):
user = UserFactory(roles=[Role.find('developer'), Role.find('package_admin')])
db.session.commit()
build = BuildFactory.build()
info = create_info(build)
info['description_zzz'] = 'description_zzz'
with create_spk(build, info=info) as spk:
response = self.client.post(url_for('api.packages'), headers=authorization_header(user),
data=spk.read())
self.assert422(response)
self.assertIn('Unknown INFO description language', response.data.decode(response.charset))
def test_post_wrong_version(self):
user = UserFactory(roles=[Role.find('developer'), Role.find('package_admin')])
db.session.commit()
build = BuildFactory.build()
info = create_info(build)
info['version'] = '1.2.3~4'
with create_spk(build, info=info) as spk:
response = self.client.post(url_for('api.packages'), headers=authorization_header(user),
data=spk.read())
self.assert422(response)
self.assertIn('Invalid version', response.data.decode(response.charset))
def test_post_signed(self):
user = UserFactory(roles=[Role.find('developer'), Role.find('package_admin')])
db.session.commit()
build = BuildFactory.build()
with create_spk(build, signature='signature') as spk:
response = self.client.post(url_for('api.packages'), headers=authorization_header(user),
data=spk.read())
self.assert422(response)
self.assertIn('Package contains a signature', response.data.decode(response.charset))
def test_post_invalid_spk(self):
user = UserFactory(roles=[Role.find('developer'), Role.find('package_admin')])
db.session.commit()
build = BuildFactory.build()
with create_spk(build) as spk:
spk.seek(100)
response = self.client.post(url_for('api.packages'), headers=authorization_header(user),
data=spk.read())
self.assert422(response)
self.assertIn('Invalid SPK', response.data.decode(response.charset))
def suite():
suite = TestSuite()
suite.addTest(TestLoader().loadTestsFromTestCase(PackagesTestCase))
return suite
|
|
from inspect import isclass
from django.conf import settings
from django.core.files.storage import get_storage_class
from celery.datastructures import AttributeDict
from tower import ugettext_lazy as _
__all__ = ('LOG', 'LOG_BY_ID', 'LOG_KEEP',)
class _LOG(object):
action_class = None
class CREATE_ADDON(_LOG):
id = 1
action_class = 'add'
format = _(u'{addon} was created.')
keep = True
class EDIT_PROPERTIES(_LOG):
""" Expects: addon """
id = 2
action_class = 'edit'
format = _(u'{addon} properties edited.')
class EDIT_DESCRIPTIONS(_LOG):
id = 3
action_class = 'edit'
format = _(u'{addon} description edited.')
class EDIT_CATEGORIES(_LOG):
id = 4
action_class = 'edit'
format = _(u'Categories edited for {addon}.')
class ADD_USER_WITH_ROLE(_LOG):
id = 5
action_class = 'add'
format = _(u'{0.name} ({1}) added to {addon}.')
keep = True
class REMOVE_USER_WITH_ROLE(_LOG):
id = 6
action_class = 'delete'
# L10n: {0} is the user being removed, {1} is their role.
format = _(u'{0.name} ({1}) removed from {addon}.')
keep = True
class EDIT_CONTRIBUTIONS(_LOG):
id = 7
action_class = 'edit'
format = _(u'Contributions for {addon}.')
class USER_DISABLE(_LOG):
id = 8
format = _(u'{addon} disabled.')
keep = True
class USER_ENABLE(_LOG):
id = 9
format = _(u'{addon} enabled.')
keep = True
# TODO(davedash): Log these types when pages are present
class SET_PUBLIC_STATS(_LOG):
id = 10
format = _(u'Stats set public for {addon}.')
keep = True
# TODO(davedash): Log these types when pages are present
class UNSET_PUBLIC_STATS(_LOG):
id = 11
format = _(u'{addon} stats set to private.')
keep = True
class CHANGE_STATUS(_LOG):
id = 12
# L10n: {0} is the status
format = _(u'{addon} status changed to {0}.')
keep = True
class ADD_PREVIEW(_LOG):
id = 13
action_class = 'add'
format = _(u'Preview added to {addon}.')
class EDIT_PREVIEW(_LOG):
id = 14
action_class = 'edit'
format = _(u'Preview edited for {addon}.')
class DELETE_PREVIEW(_LOG):
id = 15
action_class = 'delete'
format = _(u'Preview deleted from {addon}.')
class ADD_VERSION(_LOG):
id = 16
action_class = 'add'
format = _(u'{version} added to {addon}.')
keep = True
class EDIT_VERSION(_LOG):
id = 17
action_class = 'edit'
format = _(u'{version} edited for {addon}.')
class DELETE_VERSION(_LOG):
id = 18
action_class = 'delete'
# Note, {0} is a string not a version since the version is deleted.
# L10n: {0} is the version number
format = _(u'Version {0} deleted from {addon}.')
keep = True
class ADD_FILE_TO_VERSION(_LOG):
id = 19
action_class = 'add'
format = _(u'File {0.name} added to {version} of {addon}.')
class DELETE_FILE_FROM_VERSION(_LOG):
"""
Expecting: addon, filename, version
Because the file is being deleted, filename and version
should be strings and not the object.
"""
id = 20
action_class = 'delete'
format = _(u'File {0} deleted from {version} of {addon}.')
class APPROVE_VERSION(_LOG):
id = 21
action_class = 'approve'
format = _(u'{addon} {version} approved.')
short = _(u'Approved')
keep = True
review_email_user = True
review_queue = True
class PRELIMINARY_VERSION(_LOG):
id = 42
action_class = 'approve'
format = _(u'{addon} {version} given preliminary review.')
short = _(u'Preliminarily approved')
keep = True
review_email_user = True
review_queue = True
class REJECT_VERSION(_LOG):
# takes add-on, version, reviewtype
id = 43
action_class = 'reject'
format = _(u'{addon} {version} rejected.')
short = _(u'Rejected')
keep = True
review_email_user = True
review_queue = True
class RETAIN_VERSION(_LOG):
# takes add-on, version, reviewtype
id = 22
format = _(u'{addon} {version} retained.')
short = _(u'Retained')
keep = True
review_email_user = True
review_queue = True
class ESCALATE_VERSION(_LOG):
# takes add-on, version, reviewtype
id = 23
format = _(u'{addon} {version} escalated.')
short = _(u'Escalated')
keep = True
review_email_user = True
review_queue = True
class REQUEST_VERSION(_LOG):
# takes add-on, version, reviewtype
id = 24
format = _(u'{addon} {version} review requested.')
short = _(u'Review requested')
keep = True
review_email_user = True
review_queue = True
class REQUEST_INFORMATION(_LOG):
id = 44
format = _(u'{addon} {version} more information requested.')
short = _(u'More information requested')
keep = True
review_email_user = True
review_queue = True
class REQUEST_SUPER_REVIEW(_LOG):
id = 45
format = _(u'{addon} {version} super review requested.')
short = _(u'Super review requested')
keep = True
review_queue = True
class COMMENT_VERSION(_LOG):
id = 49
format = _(u'Comment on {addon} {version}.')
short = _(u'Comment')
keep = True
review_queue = True
hide_developer = True
class ADD_TAG(_LOG):
id = 25
action_class = 'tag'
format = _(u'{tag} added to {addon}.')
class REMOVE_TAG(_LOG):
id = 26
action_class = 'tag'
format = _(u'{tag} removed from {addon}.')
class ADD_TO_COLLECTION(_LOG):
id = 27
action_class = 'collection'
format = _(u'{addon} added to {collection}.')
class REMOVE_FROM_COLLECTION(_LOG):
id = 28
action_class = 'collection'
format = _(u'{addon} removed from {collection}.')
class ADD_REVIEW(_LOG):
id = 29
action_class = 'review'
format = _(u'{review} for {addon} written.')
# TODO(davedash): Add these when we do the admin site
class ADD_RECOMMENDED_CATEGORY(_LOG):
id = 31
action_class = 'edit'
# L10n: {0} is a category name.
format = _(u'{addon} featured in {0}.')
class REMOVE_RECOMMENDED_CATEGORY(_LOG):
id = 32
action_class = 'edit'
# L10n: {0} is a category name.
format = _(u'{addon} no longer featured in {0}.')
class ADD_RECOMMENDED(_LOG):
id = 33
format = _(u'{addon} is now featured.')
keep = True
class REMOVE_RECOMMENDED(_LOG):
id = 34
format = _(u'{addon} is no longer featured.')
keep = True
class ADD_APPVERSION(_LOG):
id = 35
action_class = 'add'
# L10n: {0} is the application, {1} is the version of the app
format = _(u'{0} {1} added.')
class CHANGE_USER_WITH_ROLE(_LOG):
""" Expects: author.user, role, addon """
id = 36
# L10n: {0} is a user, {1} is their role
format = _(u'{0.name} role changed to {1} for {addon}.')
keep = True
class CHANGE_LICENSE(_LOG):
""" Expects: license, addon """
id = 37
action_class = 'edit'
format = _(u'{addon} is now licensed under {0.name}.')
class CHANGE_POLICY(_LOG):
id = 38
action_class = 'edit'
format = _(u'{addon} policy changed.')
class CHANGE_ICON(_LOG):
id = 39
action_class = 'edit'
format = _(u'{addon} icon changed.')
class APPROVE_REVIEW(_LOG):
id = 40
action_class = 'approve'
format = _(u'{review} for {addon} approved.')
editor_format = _(u'{user} approved {review} for {addon}.')
keep = True
editor_event = True
class DELETE_REVIEW(_LOG):
"""Requires review.id and add-on objects."""
id = 41
action_class = 'review'
format = _(u'Review {0} for {addon} deleted.')
editor_format = _(u'{user} deleted {0} for {addon}.')
keep = True
editor_event = True
class MAX_APPVERSION_UPDATED(_LOG):
id = 46
format = _(u'Application max version for {version} updated.')
class BULK_VALIDATION_EMAILED(_LOG):
id = 47
format = _(u'Authors emailed about compatibility of {version}.')
class BULK_VALIDATION_USER_EMAILED(_LOG):
id = 130
format = _(u'Email sent to Author about add-on compatibility.')
class CHANGE_PASSWORD(_LOG):
id = 48
format = _(u'Password changed.')
class PAYPAL_FAILED(_LOG):
id = 51
format = _(u'{addon} failed checks with PayPal.')
class MANIFEST_UPDATED(_LOG):
id = 52
format = _(u'{addon} manifest updated.')
class APPROVE_VERSION_WAITING(_LOG):
id = 53
action_class = 'approve'
format = _(u'{addon} {version} approved but waiting to be made public.')
short = _(u'Approved but waiting')
keep = True
review_email_user = True
review_queue = True
class PURCHASE_ADDON(_LOG):
id = 54
format = _(u'{addon} purchased.')
class INSTALL_ADDON(_LOG):
id = 55
format = _(u'{addon} installed.')
class USER_EDITED(_LOG):
id = 60
format = _(u'Account updated.')
class ESCALATION_CLEARED(_LOG):
id = 66
format = _(u'Escalation cleared for {addon}.')
short = _(u'Escalation cleared')
keep = True
review_queue = True
class APP_DISABLED(_LOG):
id = 67
format = _(u'{addon} disabled.')
short = _(u'App disabled')
keep = True
review_queue = True
class ESCALATED_HIGH_ABUSE(_LOG):
id = 68
format = _(u'{addon} escalated because of high number of abuse reports.')
short = _(u'High Abuse Reports')
keep = True
review_queue = True
class ESCALATE_MANUAL(_LOG):
id = 73
format = _(u'{addon} escalated by reviewer.')
short = _(u'Reviewer escalation')
keep = True
review_queue = True
# TODO(robhudson): Escalation log for editor escalation..
class VIDEO_ERROR(_LOG):
id = 74
format = _(u'Video removed from {addon} because of a problem with '
'the video. ')
short = _(u'Video removed')
class REREVIEW_DEVICES_ADDED(_LOG):
id = 75
format = _(u'{addon} re-review because of new device(s) added.')
short = _(u'Device(s) Added')
keep = True
review_queue = True
class REVIEW_DEVICE_OVERRIDE(_LOG):
id = 76
format = _(u'{addon} device support manually changed by reviewer.')
short = _(u'Device(s) Changed by Reviewer')
keep = True
review_queue = True
class CUSTOM_TEXT(_LOG):
id = 98
format = '{0}'
class CUSTOM_HTML(_LOG):
id = 99
format = '{0}'
class OBJECT_ADDED(_LOG):
id = 100
format = _(u'Created: {0}.')
admin_event = True
class OBJECT_EDITED(_LOG):
id = 101
format = _(u'Edited field: {2} set to: {0}.')
admin_event = True
class OBJECT_DELETED(_LOG):
id = 102
format = _(u'Deleted: {1}.')
admin_event = True
class ADMIN_USER_EDITED(_LOG):
id = 103
format = _(u'User {user} edited, reason: {1}')
admin_event = True
class ADMIN_USER_ANONYMIZED(_LOG):
id = 104
format = _(u'User {user} anonymized.')
admin_event = True
class ADMIN_USER_RESTRICTED(_LOG):
id = 105
format = _(u'User {user} restricted.')
admin_event = True
class ADMIN_VIEWED_LOG(_LOG):
id = 106
format = _(u'Admin {0} viewed activity log for {user}.')
admin_event = True
class EDIT_REVIEW(_LOG):
id = 107
action_class = 'review'
format = _(u'{review} for {addon} updated.')
class THEME_REVIEW(_LOG):
id = 108
action_class = 'review'
format = _(u'{addon} reviewed.')
class GROUP_USER_ADDED(_LOG):
id = 120
action_class = 'access'
format = _(u'User {0.name} added to {group}.')
keep = True
admin_event = True
class GROUP_USER_REMOVED(_LOG):
id = 121
action_class = 'access'
format = _(u'User {0.name} removed from {group}.')
keep = True
admin_event = True
class REVIEW_FEATURES_OVERRIDE(_LOG):
id = 122
format = _(u'{addon} minimum requirements manually changed by reviewer.')
short = _(u'Requirements Changed by Reviewer')
keep = True
review_queue = True
class REREVIEW_FEATURES_CHANGED(_LOG):
id = 123
format = _(u'{addon} minimum requirements manually changed.')
short = _(u'Requirements Changed')
keep = True
review_queue = True
class CHANGE_VERSION_STATUS(_LOG):
id = 124
# L10n: {0} is the status
format = _(u'{version} status changed to {0}.')
keep = True
class DELETE_USER_LOOKUP(_LOG):
id = 125
# L10n: {0} is the status
format = _(u'User {0.name} {0.id} deleted via lookup tool.')
keep = True
class CONTENT_RATING_TO_ADULT(_LOG):
id = 126
format = _('{addon} content rating changed to Adult.')
review_queue = True
class CONTENT_RATING_CHANGED(_LOG):
id = 127
format = _('{addon} content rating changed.')
LOGS = [x for x in vars().values()
if isclass(x) and issubclass(x, _LOG) and x != _LOG]
LOG_BY_ID = dict((l.id, l) for l in LOGS)
LOG = AttributeDict((l.__name__, l) for l in LOGS)
LOG_ADMINS = [l.id for l in LOGS if hasattr(l, 'admin_event')]
LOG_KEEP = [l.id for l in LOGS if hasattr(l, 'keep')]
LOG_EDITORS = [l.id for l in LOGS if hasattr(l, 'editor_event')]
LOG_REVIEW_QUEUE = [l.id for l in LOGS if hasattr(l, 'review_queue')]
# Is the user emailed the message?
LOG_REVIEW_EMAIL_USER = [l.id for l in LOGS if hasattr(l, 'review_email_user')]
# Logs *not* to show to the developer.
LOG_HIDE_DEVELOPER = [l.id for l in LOGS
if (getattr(l, 'hide_developer', False)
or l.id in LOG_ADMINS)]
def log(action, *args, **kw):
"""
e.g. amo.log(amo.LOG.CREATE_ADDON, []),
amo.log(amo.LOG.ADD_FILE_TO_VERSION, file, version)
"""
from access.models import Group
from addons.models import Addon
from amo import get_user, logger_log
from devhub.models import (ActivityLog, AddonLog, CommentLog, GroupLog,
UserLog, VersionLog)
from users.models import UserProfile
from versions.models import Version
user = kw.get('user', get_user())
if not user:
logger_log.warning('Activity log called with no user: %s' % action.id)
return
al = ActivityLog(user=user, action=action.id)
al.arguments = args
if 'details' in kw:
al.details = kw['details']
al.save()
if 'details' in kw and 'comments' in al.details:
CommentLog(comments=al.details['comments'], activity_log=al).save()
# TODO(davedash): post-remora this may not be necessary.
if 'created' in kw:
al.created = kw['created']
# Double save necessary since django resets the created date on save.
al.save()
for arg in args:
if isinstance(arg, tuple):
if arg[0] == Addon:
AddonLog(addon_id=arg[1], activity_log=al).save()
elif arg[0] == Version:
VersionLog(version_id=arg[1], activity_log=al).save()
elif arg[0] == UserProfile:
UserLog(user_id=arg[1], activity_log=al).save()
elif arg[0] == Group:
GroupLog(group_id=arg[1], activity_log=al).save()
elif isinstance(arg, Addon):
AddonLog(addon=arg, activity_log=al).save()
elif isinstance(arg, Version):
VersionLog(version=arg, activity_log=al).save()
elif isinstance(arg, UserProfile):
# Index by any user who is mentioned as an argument.
UserLog(activity_log=al, user=arg).save()
elif isinstance(arg, Group):
GroupLog(group=arg, activity_log=al).save()
# Index by every user
UserLog(activity_log=al, user=user).save()
return al
|
|
# Copyright 2014 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import netaddr
import re
import time
import xml.etree.ElementTree as ET
import ciscoconfparse
from ncclient import manager
from oslo.config import cfg
from neutron.i18n import _LE, _LI, _LW
from neutron.plugins.cisco.cfg_agent import cfg_exceptions as cfg_exc
from neutron.plugins.cisco.cfg_agent.device_drivers.csr1kv import (
cisco_csr1kv_snippets as snippets)
from neutron.plugins.cisco.cfg_agent.device_drivers import devicedriver_api
LOG = logging.getLogger(__name__)
# N1kv constants
T1_PORT_NAME_PREFIX = 't1_p:' # T1 port/network is for VXLAN
T2_PORT_NAME_PREFIX = 't2_p:' # T2 port/network is for VLAN
class CSR1kvRoutingDriver(devicedriver_api.RoutingDriverBase):
"""CSR1kv Routing Driver.
This driver encapsulates the configuration logic via NETCONF protocol to
configure a CSR1kv Virtual Router (IOS-XE based) for implementing
Neutron L3 services. These services include routing, NAT and floating
IPs (as per Neutron terminology).
"""
DEV_NAME_LEN = 14
def __init__(self, **device_params):
try:
self._csr_host = device_params['management_ip_address']
self._csr_ssh_port = device_params['protocol_port']
credentials = device_params['credentials']
if credentials:
self._csr_user = credentials['username']
self._csr_password = credentials['password']
self._timeout = cfg.CONF.cfg_agent.device_connection_timeout
self._csr_conn = None
self._intfs_enabled = False
except KeyError as e:
LOG.error(_LE("Missing device parameter:%s. Aborting "
"CSR1kvRoutingDriver initialization"), e)
raise cfg_exc.CSR1kvInitializationException()
###### Public Functions ########
def router_added(self, ri):
self._csr_create_vrf(ri)
def router_removed(self, ri):
self._csr_remove_vrf(ri)
def internal_network_added(self, ri, port):
self._csr_create_subinterface(ri, port)
if port.get('ha_info') is not None and ri.ha_info['ha:enabled']:
self._csr_add_ha(ri, port)
def internal_network_removed(self, ri, port):
self._csr_remove_subinterface(port)
def external_gateway_added(self, ri, ex_gw_port):
self._csr_create_subinterface(ri, ex_gw_port)
ex_gw_ip = ex_gw_port['subnet']['gateway_ip']
if ex_gw_ip:
#Set default route via this network's gateway ip
self._csr_add_default_route(ri, ex_gw_ip)
def external_gateway_removed(self, ri, ex_gw_port):
ex_gw_ip = ex_gw_port['subnet']['gateway_ip']
if ex_gw_ip:
#Remove default route via this network's gateway ip
self._csr_remove_default_route(ri, ex_gw_ip)
#Finally, remove external network subinterface
self._csr_remove_subinterface(ex_gw_port)
def enable_internal_network_NAT(self, ri, port, ex_gw_port):
self._csr_add_internalnw_nat_rules(ri, port, ex_gw_port)
def disable_internal_network_NAT(self, ri, port, ex_gw_port):
self._csr_remove_internalnw_nat_rules(ri, [port], ex_gw_port)
def floating_ip_added(self, ri, ex_gw_port, floating_ip, fixed_ip):
self._csr_add_floating_ip(ri, floating_ip, fixed_ip)
def floating_ip_removed(self, ri, ex_gw_port, floating_ip, fixed_ip):
self._csr_remove_floating_ip(ri, ex_gw_port, floating_ip, fixed_ip)
def routes_updated(self, ri, action, route):
self._csr_update_routing_table(ri, action, route)
def clear_connection(self):
self._csr_conn = None
##### Internal Functions ####
def _csr_create_subinterface(self, ri, port):
vrf_name = self._csr_get_vrf_name(ri)
ip_cidr = port['ip_cidr']
netmask = netaddr.IPNetwork(ip_cidr).netmask
gateway_ip = ip_cidr.split('/')[0]
subinterface = self._get_interface_name_from_hosting_port(port)
vlan = self._get_interface_vlan_from_hosting_port(port)
self._create_subinterface(subinterface, vlan, vrf_name,
gateway_ip, netmask)
def _csr_remove_subinterface(self, port):
subinterface = self._get_interface_name_from_hosting_port(port)
self._remove_subinterface(subinterface)
def _csr_add_ha(self, ri, port):
func_dict = {
'HSRP': CSR1kvRoutingDriver._csr_add_ha_HSRP,
'VRRP': CSR1kvRoutingDriver._csr_add_ha_VRRP,
'GBLP': CSR1kvRoutingDriver._csr_add_ha_GBLP
}
#Invoke the right function for the ha type
func_dict[ri.ha_info['ha:type']](self, ri, port)
def _csr_add_ha_HSRP(self, ri, port):
priority = ri.ha_info['priority']
port_ha_info = port['ha_info']
group = port_ha_info['group']
ip = port_ha_info['virtual_port']['fixed_ips'][0]['ip_address']
if ip and group and priority:
vrf_name = self._csr_get_vrf_name(ri)
subinterface = self._get_interface_name_from_hosting_port(port)
self._set_ha_HSRP(subinterface, vrf_name, priority, group, ip)
def _csr_add_ha_VRRP(self, ri, port):
raise NotImplementedError()
def _csr_add_ha_GBLP(self, ri, port):
raise NotImplementedError()
def _csr_remove_ha(self, ri, port):
pass
def _csr_add_internalnw_nat_rules(self, ri, port, ex_port):
vrf_name = self._csr_get_vrf_name(ri)
in_vlan = self._get_interface_vlan_from_hosting_port(port)
acl_no = 'acl_' + str(in_vlan)
internal_cidr = port['ip_cidr']
internal_net = netaddr.IPNetwork(internal_cidr).network
netmask = netaddr.IPNetwork(internal_cidr).hostmask
inner_intfc = self._get_interface_name_from_hosting_port(port)
outer_intfc = self._get_interface_name_from_hosting_port(ex_port)
self._nat_rules_for_internet_access(acl_no, internal_net,
netmask, inner_intfc,
outer_intfc, vrf_name)
def _csr_remove_internalnw_nat_rules(self, ri, ports, ex_port):
acls = []
#First disable nat in all inner ports
for port in ports:
in_intfc_name = self._get_interface_name_from_hosting_port(port)
inner_vlan = self._get_interface_vlan_from_hosting_port(port)
acls.append("acl_" + str(inner_vlan))
self._remove_interface_nat(in_intfc_name, 'inside')
#Wait for two second
LOG.debug("Sleep for 2 seconds before clearing NAT rules")
time.sleep(2)
#Clear the NAT translation table
self._remove_dyn_nat_translations()
# Remove dynamic NAT rules and ACLs
vrf_name = self._csr_get_vrf_name(ri)
ext_intfc_name = self._get_interface_name_from_hosting_port(ex_port)
for acl in acls:
self._remove_dyn_nat_rule(acl, ext_intfc_name, vrf_name)
def _csr_add_default_route(self, ri, gw_ip):
vrf_name = self._csr_get_vrf_name(ri)
self._add_default_static_route(gw_ip, vrf_name)
def _csr_remove_default_route(self, ri, gw_ip):
vrf_name = self._csr_get_vrf_name(ri)
self._remove_default_static_route(gw_ip, vrf_name)
def _csr_add_floating_ip(self, ri, floating_ip, fixed_ip):
vrf_name = self._csr_get_vrf_name(ri)
self._add_floating_ip(floating_ip, fixed_ip, vrf_name)
def _csr_remove_floating_ip(self, ri, ex_gw_port, floating_ip, fixed_ip):
vrf_name = self._csr_get_vrf_name(ri)
out_intfc_name = self._get_interface_name_from_hosting_port(ex_gw_port)
# First remove NAT from outer interface
self._remove_interface_nat(out_intfc_name, 'outside')
#Clear the NAT translation table
self._remove_dyn_nat_translations()
#Remove the floating ip
self._remove_floating_ip(floating_ip, fixed_ip, vrf_name)
#Enable NAT on outer interface
self._add_interface_nat(out_intfc_name, 'outside')
def _csr_update_routing_table(self, ri, action, route):
vrf_name = self._csr_get_vrf_name(ri)
destination_net = netaddr.IPNetwork(route['destination'])
dest = destination_net.network
dest_mask = destination_net.netmask
next_hop = route['nexthop']
if action is 'replace':
self._add_static_route(dest, dest_mask, next_hop, vrf_name)
elif action is 'delete':
self._remove_static_route(dest, dest_mask, next_hop, vrf_name)
else:
LOG.error(_LE('Unknown route command %s'), action)
def _csr_create_vrf(self, ri):
vrf_name = self._csr_get_vrf_name(ri)
self._create_vrf(vrf_name)
def _csr_remove_vrf(self, ri):
vrf_name = self._csr_get_vrf_name(ri)
self._remove_vrf(vrf_name)
def _csr_get_vrf_name(self, ri):
return ri.router_name()[:self.DEV_NAME_LEN]
def _get_connection(self):
"""Make SSH connection to the CSR.
The external ncclient library is used for creating this connection.
This method keeps state of any existing connections and reuses them if
already connected. Also CSR1kv's interfaces (except management) are
disabled by default when it is booted. So if connecting for the first
time, driver will enable all other interfaces and keep that status in
the `_intfs_enabled` flag.
"""
try:
if self._csr_conn and self._csr_conn.connected:
return self._csr_conn
else:
self._csr_conn = manager.connect(host=self._csr_host,
port=self._csr_ssh_port,
username=self._csr_user,
password=self._csr_password,
device_params={'name': "csr"},
timeout=self._timeout)
if not self._intfs_enabled:
self._intfs_enabled = self._enable_intfs(self._csr_conn)
return self._csr_conn
except Exception as e:
conn_params = {'host': self._csr_host, 'port': self._csr_ssh_port,
'user': self._csr_user,
'timeout': self._timeout, 'reason': e.message}
raise cfg_exc.CSR1kvConnectionException(**conn_params)
def _get_interface_name_from_hosting_port(self, port):
vlan = self._get_interface_vlan_from_hosting_port(port)
int_no = self._get_interface_no_from_hosting_port(port)
intfc_name = 'GigabitEthernet%s.%s' % (int_no, vlan)
return intfc_name
@staticmethod
def _get_interface_vlan_from_hosting_port(port):
return port['hosting_info']['segmentation_id']
@staticmethod
def _get_interface_no_from_hosting_port(port):
"""Calculate interface number from the hosting port's name.
Interfaces in the CSR1kv are created in pairs (T1 and T2) where
T1 interface is used for VLAN and T2 interface for VXLAN traffic
respectively. On the neutron side these are named T1 and T2 ports and
follows the naming convention: <Tx_PORT_NAME_PREFIX>:<PAIR_INDEX>
where the `PORT_NAME_PREFIX` indicates either VLAN or VXLAN and
`PAIR_INDEX` is the pair number. `PAIR_INDEX` starts at 1.
In CSR1kv, GigabitEthernet 0 is not present and GigabitEthernet 1
is used as a management interface (Note: this might change in
future). So the first (T1,T2) pair corresponds to
(GigabitEthernet 2, GigabitEthernet 3) and so forth. This function
extracts the `PAIR_INDEX` and calculates the corresponding interface
number.
:param port: neutron port corresponding to the interface.
:return: number of the interface (eg: 1 in case of GigabitEthernet1)
"""
_name = port['hosting_info']['hosting_port_name']
if_type = _name.split(':')[0] + ':'
if if_type == T1_PORT_NAME_PREFIX:
return str(int(_name.split(':')[1]) * 2)
elif if_type == T2_PORT_NAME_PREFIX:
return str(int(_name.split(':')[1]) * 2 + 1)
else:
params = {'attribute': 'hosting_port_name', 'value': _name}
raise cfg_exc.CSR1kvUnknownValueException(**params)
def _get_interfaces(self):
"""Get a list of interfaces on this hosting device.
:return: List of the interfaces
"""
ioscfg = self._get_running_config()
parse = ciscoconfparse.CiscoConfParse(ioscfg)
intfs_raw = parse.find_lines("^interface GigabitEthernet")
intfs = [raw_if.strip().split(' ')[1] for raw_if in intfs_raw]
LOG.info(_LI("Interfaces:%s"), intfs)
return intfs
def _get_interface_ip(self, interface_name):
"""Get the ip address for an interface.
:param interface_name: interface_name as a string
:return: ip address of interface as a string
"""
ioscfg = self._get_running_config()
parse = ciscoconfparse.CiscoConfParse(ioscfg)
children = parse.find_children("^interface %s" % interface_name)
for line in children:
if 'ip address' in line:
ip_address = line.strip().split(' ')[2]
LOG.info(_LI("IP Address:%s"), ip_address)
return ip_address
LOG.warning(_LW("Cannot find interface: %s"), interface_name)
return None
def _interface_exists(self, interface):
"""Check whether interface exists."""
ioscfg = self._get_running_config()
parse = ciscoconfparse.CiscoConfParse(ioscfg)
intfs_raw = parse.find_lines("^interface " + interface)
return len(intfs_raw) > 0
def _enable_intfs(self, conn):
"""Enable the interfaces of a CSR1kv Virtual Router.
When the virtual router first boots up, all interfaces except
management are down. This method will enable all data interfaces.
Note: In CSR1kv, GigabitEthernet 0 is not present. GigabitEthernet 1
is used as management and GigabitEthernet 2 and up are used for data.
This might change in future releases.
Currently only the second and third Gig interfaces corresponding to a
single (T1,T2) pair and configured as trunk for VLAN and VXLAN
is enabled.
:param conn: Connection object
:return: True or False
"""
#ToDo(Hareesh): Interfaces are hard coded for now. Make it dynamic.
interfaces = ['GigabitEthernet 2', 'GigabitEthernet 3']
try:
for i in interfaces:
confstr = snippets.ENABLE_INTF % i
rpc_obj = conn.edit_config(target='running', config=confstr)
if self._check_response(rpc_obj, 'ENABLE_INTF'):
LOG.info(_LI("Enabled interface %s "), i)
time.sleep(1)
except Exception:
return False
return True
def _get_vrfs(self):
"""Get the current VRFs configured in the device.
:return: A list of vrf names as string
"""
vrfs = []
ioscfg = self._get_running_config()
parse = ciscoconfparse.CiscoConfParse(ioscfg)
vrfs_raw = parse.find_lines("^ip vrf")
for line in vrfs_raw:
# raw format ['ip vrf <vrf-name>',....]
vrf_name = line.strip().split(' ')[2]
vrfs.append(vrf_name)
LOG.info(_LI("VRFs:%s"), vrfs)
return vrfs
def _get_capabilities(self):
"""Get the servers NETCONF capabilities.
:return: List of server capabilities.
"""
conn = self._get_connection()
capabilities = []
for c in conn.server_capabilities:
capabilities.append(c)
LOG.debug("Server capabilities: %s", capabilities)
return capabilities
def _get_running_config(self):
"""Get the CSR's current running config.
:return: Current IOS running config as multiline string
"""
conn = self._get_connection()
config = conn.get_config(source="running")
if config:
root = ET.fromstring(config._raw)
running_config = root[0][0]
rgx = re.compile("\r*\n+")
ioscfg = rgx.split(running_config.text)
return ioscfg
def _check_acl(self, acl_no, network, netmask):
"""Check a ACL config exists in the running config.
:param acl_no: access control list (ACL) number
:param network: network which this ACL permits
:param netmask: netmask of the network
:return:
"""
exp_cfg_lines = ['ip access-list standard ' + str(acl_no),
' permit ' + str(network) + ' ' + str(netmask)]
ioscfg = self._get_running_config()
parse = ciscoconfparse.CiscoConfParse(ioscfg)
acls_raw = parse.find_children(exp_cfg_lines[0])
if acls_raw:
if exp_cfg_lines[1] in acls_raw:
return True
LOG.error(_LE("Mismatch in ACL configuration for %s"), acl_no)
return False
LOG.debug("%s is not present in config", acl_no)
return False
def _cfg_exists(self, cfg_str):
"""Check a partial config string exists in the running config.
:param cfg_str: config string to check
:return : True or False
"""
ioscfg = self._get_running_config()
parse = ciscoconfparse.CiscoConfParse(ioscfg)
cfg_raw = parse.find_lines("^" + cfg_str)
LOG.debug("_cfg_exists(): Found lines %s", cfg_raw)
return len(cfg_raw) > 0
def _set_interface(self, name, ip_address, mask):
conn = self._get_connection()
confstr = snippets.SET_INTC % (name, ip_address, mask)
rpc_obj = conn.edit_config(target='running', config=confstr)
self._check_response(rpc_obj, 'SET_INTC')
def _create_vrf(self, vrf_name):
try:
conn = self._get_connection()
confstr = snippets.CREATE_VRF % vrf_name
rpc_obj = conn.edit_config(target='running', config=confstr)
if self._check_response(rpc_obj, 'CREATE_VRF'):
LOG.info(_LI("VRF %s successfully created"), vrf_name)
except Exception:
LOG.exception(_LE("Failed creating VRF %s"), vrf_name)
def _remove_vrf(self, vrf_name):
if vrf_name in self._get_vrfs():
conn = self._get_connection()
confstr = snippets.REMOVE_VRF % vrf_name
rpc_obj = conn.edit_config(target='running', config=confstr)
if self._check_response(rpc_obj, 'REMOVE_VRF'):
LOG.info(_LI("VRF %s removed"), vrf_name)
else:
LOG.warning(_LW("VRF %s not present"), vrf_name)
def _create_subinterface(self, subinterface, vlan_id, vrf_name, ip, mask):
if vrf_name not in self._get_vrfs():
LOG.error(_LE("VRF %s not present"), vrf_name)
confstr = snippets.CREATE_SUBINTERFACE % (subinterface, vlan_id,
vrf_name, ip, mask)
self._edit_running_config(confstr, 'CREATE_SUBINTERFACE')
def _remove_subinterface(self, subinterface):
#Optional : verify this is the correct subinterface
if self._interface_exists(subinterface):
confstr = snippets.REMOVE_SUBINTERFACE % subinterface
self._edit_running_config(confstr, 'REMOVE_SUBINTERFACE')
def _set_ha_HSRP(self, subinterface, vrf_name, priority, group, ip):
if vrf_name not in self._get_vrfs():
LOG.error(_LE("VRF %s not present"), vrf_name)
confstr = snippets.SET_INTC_HSRP % (subinterface, vrf_name, group,
priority, group, ip)
action = "SET_INTC_HSRP (Group: %s, Priority: % s)" % (group, priority)
self._edit_running_config(confstr, action)
def _remove_ha_HSRP(self, subinterface, group):
confstr = snippets.REMOVE_INTC_HSRP % (subinterface, group)
action = ("REMOVE_INTC_HSRP (subinterface:%s, Group:%s)"
% (subinterface, group))
self._edit_running_config(confstr, action)
def _get_interface_cfg(self, interface):
ioscfg = self._get_running_config()
parse = ciscoconfparse.CiscoConfParse(ioscfg)
return parse.find_children('interface ' + interface)
def _nat_rules_for_internet_access(self, acl_no, network,
netmask,
inner_intfc,
outer_intfc,
vrf_name):
"""Configure the NAT rules for an internal network.
Configuring NAT rules in the CSR1kv is a three step process. First
create an ACL for the IP range of the internal network. Then enable
dynamic source NATing on the external interface of the CSR for this
ACL and VRF of the neutron router. Finally enable NAT on the
interfaces of the CSR where the internal and external networks are
connected.
:param acl_no: ACL number of the internal network.
:param network: internal network
:param netmask: netmask of the internal network.
:param inner_intfc: (name of) interface connected to the internal
network
:param outer_intfc: (name of) interface connected to the external
network
:param vrf_name: VRF corresponding to this virtual router
:return: True if configuration succeeded
:raises: neutron.plugins.cisco.cfg_agent.cfg_exceptions.
CSR1kvConfigException
"""
conn = self._get_connection()
# Duplicate ACL creation throws error, so checking
# it first. Remove it in future as this is not common in production
acl_present = self._check_acl(acl_no, network, netmask)
if not acl_present:
confstr = snippets.CREATE_ACL % (acl_no, network, netmask)
rpc_obj = conn.edit_config(target='running', config=confstr)
self._check_response(rpc_obj, 'CREATE_ACL')
confstr = snippets.SET_DYN_SRC_TRL_INTFC % (acl_no, outer_intfc,
vrf_name)
rpc_obj = conn.edit_config(target='running', config=confstr)
self._check_response(rpc_obj, 'CREATE_SNAT')
confstr = snippets.SET_NAT % (inner_intfc, 'inside')
rpc_obj = conn.edit_config(target='running', config=confstr)
self._check_response(rpc_obj, 'SET_NAT')
confstr = snippets.SET_NAT % (outer_intfc, 'outside')
rpc_obj = conn.edit_config(target='running', config=confstr)
self._check_response(rpc_obj, 'SET_NAT')
def _add_interface_nat(self, intfc_name, intfc_type):
conn = self._get_connection()
confstr = snippets.SET_NAT % (intfc_name, intfc_type)
rpc_obj = conn.edit_config(target='running', config=confstr)
self._check_response(rpc_obj, 'SET_NAT ' + intfc_type)
def _remove_interface_nat(self, intfc_name, intfc_type):
conn = self._get_connection()
confstr = snippets.REMOVE_NAT % (intfc_name, intfc_type)
rpc_obj = conn.edit_config(target='running', config=confstr)
self._check_response(rpc_obj, 'REMOVE_NAT ' + intfc_type)
def _remove_dyn_nat_rule(self, acl_no, outer_intfc_name, vrf_name):
conn = self._get_connection()
confstr = snippets.SNAT_CFG % (acl_no, outer_intfc_name, vrf_name)
if self._cfg_exists(confstr):
confstr = snippets.REMOVE_DYN_SRC_TRL_INTFC % (acl_no,
outer_intfc_name,
vrf_name)
rpc_obj = conn.edit_config(target='running', config=confstr)
self._check_response(rpc_obj, 'REMOVE_DYN_SRC_TRL_INTFC')
confstr = snippets.REMOVE_ACL % acl_no
rpc_obj = conn.edit_config(target='running', config=confstr)
self._check_response(rpc_obj, 'REMOVE_ACL')
def _remove_dyn_nat_translations(self):
conn = self._get_connection()
confstr = snippets.CLEAR_DYN_NAT_TRANS
rpc_obj = conn.edit_config(target='running', config=confstr)
self._check_response(rpc_obj, 'CLEAR_DYN_NAT_TRANS')
def _add_floating_ip(self, floating_ip, fixed_ip, vrf):
conn = self._get_connection()
confstr = snippets.SET_STATIC_SRC_TRL % (fixed_ip, floating_ip, vrf)
rpc_obj = conn.edit_config(target='running', config=confstr)
self._check_response(rpc_obj, 'SET_STATIC_SRC_TRL')
def _remove_floating_ip(self, floating_ip, fixed_ip, vrf):
conn = self._get_connection()
confstr = snippets.REMOVE_STATIC_SRC_TRL % (fixed_ip, floating_ip, vrf)
rpc_obj = conn.edit_config(target='running', config=confstr)
self._check_response(rpc_obj, 'REMOVE_STATIC_SRC_TRL')
def _get_floating_ip_cfg(self):
ioscfg = self._get_running_config()
parse = ciscoconfparse.CiscoConfParse(ioscfg)
res = parse.find_lines('ip nat inside source static')
return res
def _add_static_route(self, dest, dest_mask, next_hop, vrf):
conn = self._get_connection()
confstr = snippets.SET_IP_ROUTE % (vrf, dest, dest_mask, next_hop)
rpc_obj = conn.edit_config(target='running', config=confstr)
self._check_response(rpc_obj, 'SET_IP_ROUTE')
def _remove_static_route(self, dest, dest_mask, next_hop, vrf):
conn = self._get_connection()
confstr = snippets.REMOVE_IP_ROUTE % (vrf, dest, dest_mask, next_hop)
rpc_obj = conn.edit_config(target='running', config=confstr)
self._check_response(rpc_obj, 'REMOVE_IP_ROUTE')
def _get_static_route_cfg(self):
ioscfg = self._get_running_config()
parse = ciscoconfparse.CiscoConfParse(ioscfg)
return parse.find_lines('ip route')
def _add_default_static_route(self, gw_ip, vrf):
conn = self._get_connection()
confstr = snippets.DEFAULT_ROUTE_CFG % (vrf, gw_ip)
if not self._cfg_exists(confstr):
confstr = snippets.SET_DEFAULT_ROUTE % (vrf, gw_ip)
rpc_obj = conn.edit_config(target='running', config=confstr)
self._check_response(rpc_obj, 'SET_DEFAULT_ROUTE')
def _remove_default_static_route(self, gw_ip, vrf):
conn = self._get_connection()
confstr = snippets.DEFAULT_ROUTE_CFG % (vrf, gw_ip)
if self._cfg_exists(confstr):
confstr = snippets.REMOVE_DEFAULT_ROUTE % (vrf, gw_ip)
rpc_obj = conn.edit_config(target='running', config=confstr)
self._check_response(rpc_obj, 'REMOVE_DEFAULT_ROUTE')
def _edit_running_config(self, confstr, snippet):
conn = self._get_connection()
rpc_obj = conn.edit_config(target='running', config=confstr)
self._check_response(rpc_obj, snippet)
@staticmethod
def _check_response(rpc_obj, snippet_name):
"""This function checks the rpc response object for status.
This function takes as input the response rpc_obj and the snippet name
that was executed. It parses it to see, if the last edit operation was
a success or not.
<?xml version="1.0" encoding="UTF-8"?>
<rpc-reply message-id="urn:uuid:81bf8082-....-b69a-000c29e1b85c"
xmlns="urn:ietf:params:netconf:base:1.0">
<ok />
</rpc-reply>
In case of error, CSR1kv sends a response as follows.
We take the error type and tag.
<?xml version="1.0" encoding="UTF-8"?>
<rpc-reply message-id="urn:uuid:81bf8082-....-b69a-000c29e1b85c"
xmlns="urn:ietf:params:netconf:base:1.0">
<rpc-error>
<error-type>protocol</error-type>
<error-tag>operation-failed</error-tag>
<error-severity>error</error-severity>
</rpc-error>
</rpc-reply>
:return: True if the config operation completed successfully
:raises: neutron.plugins.cisco.cfg_agent.cfg_exceptions.
CSR1kvConfigException
"""
LOG.debug("RPCReply for %(snippet_name)s is %(rpc_obj)s",
{'snippet_name': snippet_name, 'rpc_obj': rpc_obj.xml})
xml_str = rpc_obj.xml
if "<ok />" in xml_str:
LOG.debug("RPCReply for %s is OK", snippet_name)
LOG.info(_LI("%s successfully executed"), snippet_name)
return True
# Not Ok, we throw a ConfigurationException
e_type = rpc_obj._root[0][0].text
e_tag = rpc_obj._root[0][1].text
params = {'snippet': snippet_name, 'type': e_type, 'tag': e_tag}
raise cfg_exc.CSR1kvConfigException(**params)
|
|
# coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base import deserialize
from twilio.base import values
from twilio.base.instance_context import InstanceContext
from twilio.base.instance_resource import InstanceResource
from twilio.base.list_resource import ListResource
from twilio.base.page import Page
class CredentialList(ListResource):
""" """
def __init__(self, version):
"""
Initialize the CredentialList
:param Version version: Version that contains the resource
:returns: twilio.rest.chat.v2.credential.CredentialList
:rtype: twilio.rest.chat.v2.credential.CredentialList
"""
super(CredentialList, self).__init__(version)
# Path Solution
self._solution = {}
self._uri = '/Credentials'.format(**self._solution)
def stream(self, limit=None, page_size=None):
"""
Streams CredentialInstance records from the API as a generator stream.
This operation lazily loads records as efficiently as possible until the limit
is reached.
The results are returned as a generator, so this operation is memory efficient.
:param int limit: Upper limit for the number of records to return. stream()
guarantees to never return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, stream() will attempt to read the
limit with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.chat.v2.credential.CredentialInstance]
"""
limits = self._version.read_limits(limit, page_size)
page = self.page(page_size=limits['page_size'], )
return self._version.stream(page, limits['limit'], limits['page_limit'])
def list(self, limit=None, page_size=None):
"""
Lists CredentialInstance records from the API as a list.
Unlike stream(), this operation is eager and will load `limit` records into
memory before returning.
:param int limit: Upper limit for the number of records to return. list() guarantees
never to return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, list() will attempt to read the limit
with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.chat.v2.credential.CredentialInstance]
"""
return list(self.stream(limit=limit, page_size=page_size, ))
def page(self, page_token=values.unset, page_number=values.unset,
page_size=values.unset):
"""
Retrieve a single page of CredentialInstance records from the API.
Request is executed immediately
:param str page_token: PageToken provided by the API
:param int page_number: Page Number, this value is simply for client state
:param int page_size: Number of records to return, defaults to 50
:returns: Page of CredentialInstance
:rtype: twilio.rest.chat.v2.credential.CredentialPage
"""
params = values.of({'PageToken': page_token, 'Page': page_number, 'PageSize': page_size, })
response = self._version.page(
'GET',
self._uri,
params=params,
)
return CredentialPage(self._version, response, self._solution)
def get_page(self, target_url):
"""
Retrieve a specific page of CredentialInstance records from the API.
Request is executed immediately
:param str target_url: API-generated URL for the requested results page
:returns: Page of CredentialInstance
:rtype: twilio.rest.chat.v2.credential.CredentialPage
"""
response = self._version.domain.twilio.request(
'GET',
target_url,
)
return CredentialPage(self._version, response, self._solution)
def create(self, type, friendly_name=values.unset, certificate=values.unset,
private_key=values.unset, sandbox=values.unset, api_key=values.unset,
secret=values.unset):
"""
Create a new CredentialInstance
:param CredentialInstance.PushService type: The type of push-notification service the credential is for
:param unicode friendly_name: A string to describe the resource
:param unicode certificate: [APN only] The URL encoded representation of the certificate
:param unicode private_key: [APN only] The URL encoded representation of the private key
:param bool sandbox: [APN only] Whether to send the credential to sandbox APNs
:param unicode api_key: [GCM only] The API key for the project that was obtained from the Google Developer console for your GCM Service application credential
:param unicode secret: [FCM only] The Server key of your project from Firebase console
:returns: Newly created CredentialInstance
:rtype: twilio.rest.chat.v2.credential.CredentialInstance
"""
data = values.of({
'Type': type,
'FriendlyName': friendly_name,
'Certificate': certificate,
'PrivateKey': private_key,
'Sandbox': sandbox,
'ApiKey': api_key,
'Secret': secret,
})
payload = self._version.create(
'POST',
self._uri,
data=data,
)
return CredentialInstance(self._version, payload, )
def get(self, sid):
"""
Constructs a CredentialContext
:param sid: The SID of the Credential resource to fetch
:returns: twilio.rest.chat.v2.credential.CredentialContext
:rtype: twilio.rest.chat.v2.credential.CredentialContext
"""
return CredentialContext(self._version, sid=sid, )
def __call__(self, sid):
"""
Constructs a CredentialContext
:param sid: The SID of the Credential resource to fetch
:returns: twilio.rest.chat.v2.credential.CredentialContext
:rtype: twilio.rest.chat.v2.credential.CredentialContext
"""
return CredentialContext(self._version, sid=sid, )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.IpMessaging.V2.CredentialList>'
class CredentialPage(Page):
""" """
def __init__(self, version, response, solution):
"""
Initialize the CredentialPage
:param Version version: Version that contains the resource
:param Response response: Response from the API
:returns: twilio.rest.chat.v2.credential.CredentialPage
:rtype: twilio.rest.chat.v2.credential.CredentialPage
"""
super(CredentialPage, self).__init__(version, response)
# Path Solution
self._solution = solution
def get_instance(self, payload):
"""
Build an instance of CredentialInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.chat.v2.credential.CredentialInstance
:rtype: twilio.rest.chat.v2.credential.CredentialInstance
"""
return CredentialInstance(self._version, payload, )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.IpMessaging.V2.CredentialPage>'
class CredentialContext(InstanceContext):
""" """
def __init__(self, version, sid):
"""
Initialize the CredentialContext
:param Version version: Version that contains the resource
:param sid: The SID of the Credential resource to fetch
:returns: twilio.rest.chat.v2.credential.CredentialContext
:rtype: twilio.rest.chat.v2.credential.CredentialContext
"""
super(CredentialContext, self).__init__(version)
# Path Solution
self._solution = {'sid': sid, }
self._uri = '/Credentials/{sid}'.format(**self._solution)
def fetch(self):
"""
Fetch a CredentialInstance
:returns: Fetched CredentialInstance
:rtype: twilio.rest.chat.v2.credential.CredentialInstance
"""
params = values.of({})
payload = self._version.fetch(
'GET',
self._uri,
params=params,
)
return CredentialInstance(self._version, payload, sid=self._solution['sid'], )
def update(self, friendly_name=values.unset, certificate=values.unset,
private_key=values.unset, sandbox=values.unset, api_key=values.unset,
secret=values.unset):
"""
Update the CredentialInstance
:param unicode friendly_name: A string to describe the resource
:param unicode certificate: [APN only] The URL encoded representation of the certificate
:param unicode private_key: [APN only] The URL encoded representation of the private key
:param bool sandbox: [APN only] Whether to send the credential to sandbox APNs
:param unicode api_key: [GCM only] The API key for the project that was obtained from the Google Developer console for your GCM Service application credential
:param unicode secret: [FCM only] The Server key of your project from Firebase console
:returns: Updated CredentialInstance
:rtype: twilio.rest.chat.v2.credential.CredentialInstance
"""
data = values.of({
'FriendlyName': friendly_name,
'Certificate': certificate,
'PrivateKey': private_key,
'Sandbox': sandbox,
'ApiKey': api_key,
'Secret': secret,
})
payload = self._version.update(
'POST',
self._uri,
data=data,
)
return CredentialInstance(self._version, payload, sid=self._solution['sid'], )
def delete(self):
"""
Deletes the CredentialInstance
:returns: True if delete succeeds, False otherwise
:rtype: bool
"""
return self._version.delete('delete', self._uri)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.IpMessaging.V2.CredentialContext {}>'.format(context)
class CredentialInstance(InstanceResource):
""" """
class PushService(object):
GCM = "gcm"
APN = "apn"
FCM = "fcm"
def __init__(self, version, payload, sid=None):
"""
Initialize the CredentialInstance
:returns: twilio.rest.chat.v2.credential.CredentialInstance
:rtype: twilio.rest.chat.v2.credential.CredentialInstance
"""
super(CredentialInstance, self).__init__(version)
# Marshaled Properties
self._properties = {
'sid': payload.get('sid'),
'account_sid': payload.get('account_sid'),
'friendly_name': payload.get('friendly_name'),
'type': payload.get('type'),
'sandbox': payload.get('sandbox'),
'date_created': deserialize.iso8601_datetime(payload.get('date_created')),
'date_updated': deserialize.iso8601_datetime(payload.get('date_updated')),
'url': payload.get('url'),
}
# Context
self._context = None
self._solution = {'sid': sid or self._properties['sid'], }
@property
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: CredentialContext for this CredentialInstance
:rtype: twilio.rest.chat.v2.credential.CredentialContext
"""
if self._context is None:
self._context = CredentialContext(self._version, sid=self._solution['sid'], )
return self._context
@property
def sid(self):
"""
:returns: The unique string that identifies the resource
:rtype: unicode
"""
return self._properties['sid']
@property
def account_sid(self):
"""
:returns: The SID of the Account that created the resource
:rtype: unicode
"""
return self._properties['account_sid']
@property
def friendly_name(self):
"""
:returns: The string that you assigned to describe the resource
:rtype: unicode
"""
return self._properties['friendly_name']
@property
def type(self):
"""
:returns: The type of push-notification service the credential is for
:rtype: CredentialInstance.PushService
"""
return self._properties['type']
@property
def sandbox(self):
"""
:returns: [APN only] Whether to send the credential to sandbox APNs
:rtype: unicode
"""
return self._properties['sandbox']
@property
def date_created(self):
"""
:returns: The ISO 8601 date and time in GMT when the resource was created
:rtype: datetime
"""
return self._properties['date_created']
@property
def date_updated(self):
"""
:returns: The ISO 8601 date and time in GMT when the resource was last updated
:rtype: datetime
"""
return self._properties['date_updated']
@property
def url(self):
"""
:returns: The absolute URL of the Credential resource
:rtype: unicode
"""
return self._properties['url']
def fetch(self):
"""
Fetch a CredentialInstance
:returns: Fetched CredentialInstance
:rtype: twilio.rest.chat.v2.credential.CredentialInstance
"""
return self._proxy.fetch()
def update(self, friendly_name=values.unset, certificate=values.unset,
private_key=values.unset, sandbox=values.unset, api_key=values.unset,
secret=values.unset):
"""
Update the CredentialInstance
:param unicode friendly_name: A string to describe the resource
:param unicode certificate: [APN only] The URL encoded representation of the certificate
:param unicode private_key: [APN only] The URL encoded representation of the private key
:param bool sandbox: [APN only] Whether to send the credential to sandbox APNs
:param unicode api_key: [GCM only] The API key for the project that was obtained from the Google Developer console for your GCM Service application credential
:param unicode secret: [FCM only] The Server key of your project from Firebase console
:returns: Updated CredentialInstance
:rtype: twilio.rest.chat.v2.credential.CredentialInstance
"""
return self._proxy.update(
friendly_name=friendly_name,
certificate=certificate,
private_key=private_key,
sandbox=sandbox,
api_key=api_key,
secret=secret,
)
def delete(self):
"""
Deletes the CredentialInstance
:returns: True if delete succeeds, False otherwise
:rtype: bool
"""
return self._proxy.delete()
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.IpMessaging.V2.CredentialInstance {}>'.format(context)
|
|
import datetime
from sqlalchemy import desc
from SpiderKeeper.app import db, Base
class Project(Base):
__tablename__ = 'sk_project'
project_name = db.Column(db.String(50))
@classmethod
def load_project(cls, project_list):
for project in project_list:
existed_project = cls.query.filter_by(project_name=project.project_name).first()
if not existed_project:
db.session.add(project)
db.session.commit()
@classmethod
def find_project_by_id(cls, project_id):
return Project.query.filter_by(id=project_id).first()
def to_dict(self):
return {
"project_id": self.id,
"project_name": self.project_name
}
class SpiderInstance(Base):
__tablename__ = 'sk_spider'
spider_name = db.Column(db.String(100))
project_id = db.Column(db.INTEGER, nullable=False, index=True)
@classmethod
def update_spider_instances(cls, project_id, spider_instance_list):
for spider_instance in spider_instance_list:
existed_spider_instance = cls.query.filter_by(project_id=project_id,
spider_name=spider_instance.spider_name).first()
if not existed_spider_instance:
db.session.add(spider_instance)
db.session.commit()
for spider in cls.query.filter_by(project_id=project_id).all():
existed_spider = any(
spider.spider_name == s.spider_name
for s in spider_instance_list
)
if not existed_spider:
db.session.delete(spider)
db.session.commit()
@classmethod
def list_spider_by_project_id(cls, project_id):
return cls.query.filter_by(project_id=project_id).all()
def to_dict(self):
return dict(spider_instance_id=self.id,
spider_name=self.spider_name,
project_id=self.project_id)
@classmethod
def list_spiders(cls, project_id):
sql_last_runtime = '''
select * from (select a.spider_name,b.date_created from sk_job_instance as a
left join sk_job_execution as b
on a.id = b.job_instance_id
order by b.date_created desc) as c
group by c.spider_name
'''
sql_avg_runtime = '''
select a.spider_name,avg(end_time-start_time) from sk_job_instance as a
left join sk_job_execution as b
on a.id = b.job_instance_id
where b.end_time is not null
group by a.spider_name
'''
last_runtime_list = dict(
(spider_name, last_run_time) for spider_name, last_run_time in db.engine.execute(sql_last_runtime))
avg_runtime_list = dict(
(spider_name, avg_run_time) for spider_name, avg_run_time in db.engine.execute(sql_avg_runtime))
res = []
for spider in cls.query.filter_by(project_id=project_id).all():
last_runtime = last_runtime_list.get(spider.spider_name)
res.append(dict(spider.to_dict(),
**{'spider_last_runtime': last_runtime if last_runtime else '-',
'spider_avg_runtime': avg_runtime_list.get(spider.spider_name)
}))
return res
class JobPriority():
LOW, NORMAL, HIGH, HIGHEST = range(-1, 3)
class JobRunType():
ONETIME = 'onetime'
PERIODIC = 'periodic'
class JobInstance(Base):
__tablename__ = 'sk_job_instance'
spider_name = db.Column(db.String(100), nullable=False, index=True)
project_id = db.Column(db.INTEGER, nullable=False, index=True)
tags = db.Column(db.Text) # job tag(split by , )
spider_arguments = db.Column(db.Text) # job execute arguments(split by , ex.: arg1=foo,arg2=bar)
priority = db.Column(db.INTEGER)
desc = db.Column(db.Text)
cron_minutes = db.Column(db.String(20), default="0")
cron_hour = db.Column(db.String(20), default="*")
cron_day_of_month = db.Column(db.String(20), default="*")
cron_day_of_week = db.Column(db.String(20), default="*")
cron_month = db.Column(db.String(20), default="*")
enabled = db.Column(db.INTEGER, default=0) # 0/-1
run_type = db.Column(db.String(20)) # periodic/onetime
def to_dict(self):
return dict(
job_instance_id=self.id,
spider_name=self.spider_name,
tags=self.tags.split(',') if self.tags else None,
spider_arguments=self.spider_arguments,
priority=self.priority,
desc=self.desc,
cron_minutes=self.cron_minutes,
cron_hour=self.cron_hour,
cron_day_of_month=self.cron_day_of_month,
cron_day_of_week=self.cron_day_of_week,
cron_month=self.cron_month,
enabled=self.enabled == 0,
run_type=self.run_type
)
@classmethod
def list_job_instance_by_project_id(cls, project_id):
return cls.query.filter_by(project_id=project_id).all()
@classmethod
def find_job_instance_by_id(cls, job_instance_id):
return cls.query.filter_by(id=job_instance_id).first()
class SpiderStatus():
PENDING, RUNNING, FINISHED, CANCELED = range(4)
class JobExecution(Base):
__tablename__ = 'sk_job_execution'
project_id = db.Column(db.INTEGER, nullable=False, index=True)
service_job_execution_id = db.Column(db.String(50), nullable=False, index=True)
job_instance_id = db.Column(db.INTEGER, nullable=False, index=True)
create_time = db.Column(db.DATETIME)
start_time = db.Column(db.DATETIME)
end_time = db.Column(db.DATETIME)
running_status = db.Column(db.INTEGER, default=SpiderStatus.PENDING)
running_on = db.Column(db.Text)
def to_dict(self):
job_instance = JobInstance.query.filter_by(id=self.job_instance_id).first()
return {
'project_id': self.project_id,
'job_execution_id': self.id,
'job_instance_id': self.job_instance_id,
'service_job_execution_id': self.service_job_execution_id,
'create_time': self.create_time.strftime('%Y-%m-%d %H:%M:%S') if self.create_time else None,
'start_time': self.start_time.strftime('%Y-%m-%d %H:%M:%S') if self.start_time else None,
'end_time': self.end_time.strftime('%Y-%m-%d %H:%M:%S') if self.end_time else None,
'running_status': self.running_status,
'running_on': self.running_on,
'job_instance': job_instance.to_dict() if job_instance else {}
}
@classmethod
def find_job_by_service_id(cls, service_job_execution_id):
return cls.query.filter_by(service_job_execution_id=service_job_execution_id).first()
@classmethod
def list_job_by_service_ids(cls, service_job_execution_ids):
return cls.query.filter(cls.service_job_execution_id.in_(service_job_execution_ids)).all()
@classmethod
def list_uncomplete_job(cls):
return cls.query.filter(cls.running_status != SpiderStatus.FINISHED,
cls.running_status != SpiderStatus.CANCELED).all()
@classmethod
def list_jobs(cls, project_id, each_status_limit=100):
result = {}
result['PENDING'] = [job_execution.to_dict() for job_execution in
JobExecution.query.filter_by(project_id=project_id,
running_status=SpiderStatus.PENDING).order_by(
desc(JobExecution.date_modified)).limit(each_status_limit)]
result['RUNNING'] = [job_execution.to_dict() for job_execution in
JobExecution.query.filter_by(project_id=project_id,
running_status=SpiderStatus.RUNNING).order_by(
desc(JobExecution.date_modified)).limit(each_status_limit)]
result['COMPLETED'] = [job_execution.to_dict() for job_execution in
JobExecution.query.filter(JobExecution.project_id == project_id).filter(
(JobExecution.running_status == SpiderStatus.FINISHED) | (
JobExecution.running_status == SpiderStatus.CANCELED)).order_by(
desc(JobExecution.date_modified)).limit(each_status_limit)]
return result
@classmethod
def list_run_stats_by_hours(cls, project_id):
result = {}
hour_keys = []
last_time = datetime.datetime.now() - datetime.timedelta(hours=23)
last_time = datetime.datetime(last_time.year, last_time.month, last_time.day, last_time.hour)
for hour in range(23, -1, -1):
time_tmp = datetime.datetime.now() - datetime.timedelta(hours=hour)
hour_key = time_tmp.strftime('%Y-%m-%d %H:00:00')
hour_keys.append(hour_key)
result[hour_key] = 0 # init
for job_execution in JobExecution.query.filter(JobExecution.project_id == project_id,
JobExecution.date_created >= last_time).all():
hour_key = job_execution.create_time.strftime('%Y-%m-%d %H:00:00')
result[hour_key] += 1
return [dict(key=hour_key, value=result[hour_key]) for hour_key in hour_keys]
|
|
from numpy.testing import (
assert_allclose,
assert_array_equal,
)
import numpy as np
import pytest
from sklearn.datasets import make_classification
from sklearn.compose import make_column_transformer
from sklearn.exceptions import NotFittedError
from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVC
from sklearn.svm import SVR
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.metrics import confusion_matrix
# TODO: Remove when https://github.com/numpy/numpy/issues/14397 is resolved
pytestmark = pytest.mark.filterwarnings(
"ignore:In future, it will be an error for 'np.bool_':DeprecationWarning:"
"matplotlib.*"
)
def test_confusion_matrix_display_validation(pyplot):
"""Check that we raise the proper error when validating parameters."""
X, y = make_classification(
n_samples=100, n_informative=5, n_classes=5, random_state=0
)
regressor = SVR().fit(X, y)
y_pred_regressor = regressor.predict(X)
y_pred_classifier = SVC().fit(X, y).predict(X)
err_msg = "ConfusionMatrixDisplay.from_estimator only supports classifiers"
with pytest.raises(ValueError, match=err_msg):
ConfusionMatrixDisplay.from_estimator(regressor, X, y)
err_msg = "Mix type of y not allowed, got types"
with pytest.raises(ValueError, match=err_msg):
# Force `y_true` to be seen as a regression problem
ConfusionMatrixDisplay.from_predictions(y + 0.5, y_pred_classifier)
with pytest.raises(ValueError, match=err_msg):
ConfusionMatrixDisplay.from_predictions(y, y_pred_regressor)
err_msg = "Found input variables with inconsistent numbers of samples"
with pytest.raises(ValueError, match=err_msg):
ConfusionMatrixDisplay.from_predictions(y, y_pred_classifier[::2])
@pytest.mark.parametrize("constructor_name", ["from_estimator", "from_predictions"])
def test_confusion_matrix_display_invalid_option(pyplot, constructor_name):
"""Check the error raise if an invalid parameter value is passed."""
X, y = make_classification(
n_samples=100, n_informative=5, n_classes=5, random_state=0
)
classifier = SVC().fit(X, y)
y_pred = classifier.predict(X)
# safe guard for the binary if/else construction
assert constructor_name in ("from_estimator", "from_predictions")
extra_params = {"normalize": "invalid"}
err_msg = r"normalize must be one of \{'true', 'pred', 'all', None\}"
with pytest.raises(ValueError, match=err_msg):
if constructor_name == "from_estimator":
ConfusionMatrixDisplay.from_estimator(classifier, X, y, **extra_params)
else:
ConfusionMatrixDisplay.from_predictions(y, y_pred, **extra_params)
@pytest.mark.parametrize("constructor_name", ["from_estimator", "from_predictions"])
@pytest.mark.parametrize("with_labels", [True, False])
@pytest.mark.parametrize("with_display_labels", [True, False])
def test_confusion_matrix_display_custom_labels(
pyplot, constructor_name, with_labels, with_display_labels
):
"""Check the resulting plot when labels are given."""
n_classes = 5
X, y = make_classification(
n_samples=100, n_informative=5, n_classes=n_classes, random_state=0
)
classifier = SVC().fit(X, y)
y_pred = classifier.predict(X)
# safe guard for the binary if/else construction
assert constructor_name in ("from_estimator", "from_predictions")
ax = pyplot.gca()
labels = [2, 1, 0, 3, 4] if with_labels else None
display_labels = ["b", "d", "a", "e", "f"] if with_display_labels else None
cm = confusion_matrix(y, y_pred, labels=labels)
common_kwargs = {
"ax": ax,
"display_labels": display_labels,
"labels": labels,
}
if constructor_name == "from_estimator":
disp = ConfusionMatrixDisplay.from_estimator(classifier, X, y, **common_kwargs)
else:
disp = ConfusionMatrixDisplay.from_predictions(y, y_pred, **common_kwargs)
assert_allclose(disp.confusion_matrix, cm)
if with_display_labels:
expected_display_labels = display_labels
elif with_labels:
expected_display_labels = labels
else:
expected_display_labels = list(range(n_classes))
expected_display_labels_str = [str(name) for name in expected_display_labels]
x_ticks = [tick.get_text() for tick in disp.ax_.get_xticklabels()]
y_ticks = [tick.get_text() for tick in disp.ax_.get_yticklabels()]
assert_array_equal(disp.display_labels, expected_display_labels)
assert_array_equal(x_ticks, expected_display_labels_str)
assert_array_equal(y_ticks, expected_display_labels_str)
@pytest.mark.parametrize("constructor_name", ["from_estimator", "from_predictions"])
@pytest.mark.parametrize("normalize", ["true", "pred", "all", None])
@pytest.mark.parametrize("include_values", [True, False])
def test_confusion_matrix_display_plotting(
pyplot,
constructor_name,
normalize,
include_values,
):
"""Check the overall plotting rendering."""
n_classes = 5
X, y = make_classification(
n_samples=100, n_informative=5, n_classes=n_classes, random_state=0
)
classifier = SVC().fit(X, y)
y_pred = classifier.predict(X)
# safe guard for the binary if/else construction
assert constructor_name in ("from_estimator", "from_predictions")
ax = pyplot.gca()
cmap = "plasma"
cm = confusion_matrix(y, y_pred)
common_kwargs = {
"normalize": normalize,
"cmap": cmap,
"ax": ax,
"include_values": include_values,
}
if constructor_name == "from_estimator":
disp = ConfusionMatrixDisplay.from_estimator(classifier, X, y, **common_kwargs)
else:
disp = ConfusionMatrixDisplay.from_predictions(y, y_pred, **common_kwargs)
assert disp.ax_ == ax
if normalize == "true":
cm = cm / cm.sum(axis=1, keepdims=True)
elif normalize == "pred":
cm = cm / cm.sum(axis=0, keepdims=True)
elif normalize == "all":
cm = cm / cm.sum()
assert_allclose(disp.confusion_matrix, cm)
import matplotlib as mpl
assert isinstance(disp.im_, mpl.image.AxesImage)
assert disp.im_.get_cmap().name == cmap
assert isinstance(disp.ax_, pyplot.Axes)
assert isinstance(disp.figure_, pyplot.Figure)
assert disp.ax_.get_ylabel() == "True label"
assert disp.ax_.get_xlabel() == "Predicted label"
x_ticks = [tick.get_text() for tick in disp.ax_.get_xticklabels()]
y_ticks = [tick.get_text() for tick in disp.ax_.get_yticklabels()]
expected_display_labels = list(range(n_classes))
expected_display_labels_str = [str(name) for name in expected_display_labels]
assert_array_equal(disp.display_labels, expected_display_labels)
assert_array_equal(x_ticks, expected_display_labels_str)
assert_array_equal(y_ticks, expected_display_labels_str)
image_data = disp.im_.get_array().data
assert_allclose(image_data, cm)
if include_values:
assert disp.text_.shape == (n_classes, n_classes)
fmt = ".2g"
expected_text = np.array([format(v, fmt) for v in cm.ravel(order="C")])
text_text = np.array([t.get_text() for t in disp.text_.ravel(order="C")])
assert_array_equal(expected_text, text_text)
else:
assert disp.text_ is None
@pytest.mark.parametrize("constructor_name", ["from_estimator", "from_predictions"])
def test_confusion_matrix_display(pyplot, constructor_name):
"""Check the behaviour of the default constructor without using the class
methods."""
n_classes = 5
X, y = make_classification(
n_samples=100, n_informative=5, n_classes=n_classes, random_state=0
)
classifier = SVC().fit(X, y)
y_pred = classifier.predict(X)
# safe guard for the binary if/else construction
assert constructor_name in ("from_estimator", "from_predictions")
cm = confusion_matrix(y, y_pred)
common_kwargs = {
"normalize": None,
"include_values": True,
"cmap": "viridis",
"xticks_rotation": 45.0,
}
if constructor_name == "from_estimator":
disp = ConfusionMatrixDisplay.from_estimator(classifier, X, y, **common_kwargs)
else:
disp = ConfusionMatrixDisplay.from_predictions(y, y_pred, **common_kwargs)
assert_allclose(disp.confusion_matrix, cm)
assert disp.text_.shape == (n_classes, n_classes)
rotations = [tick.get_rotation() for tick in disp.ax_.get_xticklabels()]
assert_allclose(rotations, 45.0)
image_data = disp.im_.get_array().data
assert_allclose(image_data, cm)
disp.plot(cmap="plasma")
assert disp.im_.get_cmap().name == "plasma"
disp.plot(include_values=False)
assert disp.text_ is None
disp.plot(xticks_rotation=90.0)
rotations = [tick.get_rotation() for tick in disp.ax_.get_xticklabels()]
assert_allclose(rotations, 90.0)
disp.plot(values_format="e")
expected_text = np.array([format(v, "e") for v in cm.ravel(order="C")])
text_text = np.array([t.get_text() for t in disp.text_.ravel(order="C")])
assert_array_equal(expected_text, text_text)
def test_confusion_matrix_contrast(pyplot):
"""Check that the text color is appropriate depending on background."""
cm = np.eye(2) / 2
disp = ConfusionMatrixDisplay(cm, display_labels=[0, 1])
disp.plot(cmap=pyplot.cm.gray)
# diagonal text is black
assert_allclose(disp.text_[0, 0].get_color(), [0.0, 0.0, 0.0, 1.0])
assert_allclose(disp.text_[1, 1].get_color(), [0.0, 0.0, 0.0, 1.0])
# off-diagonal text is white
assert_allclose(disp.text_[0, 1].get_color(), [1.0, 1.0, 1.0, 1.0])
assert_allclose(disp.text_[1, 0].get_color(), [1.0, 1.0, 1.0, 1.0])
disp.plot(cmap=pyplot.cm.gray_r)
# diagonal text is white
assert_allclose(disp.text_[0, 1].get_color(), [0.0, 0.0, 0.0, 1.0])
assert_allclose(disp.text_[1, 0].get_color(), [0.0, 0.0, 0.0, 1.0])
# off-diagonal text is black
assert_allclose(disp.text_[0, 0].get_color(), [1.0, 1.0, 1.0, 1.0])
assert_allclose(disp.text_[1, 1].get_color(), [1.0, 1.0, 1.0, 1.0])
# Regression test for #15920
cm = np.array([[19, 34], [32, 58]])
disp = ConfusionMatrixDisplay(cm, display_labels=[0, 1])
disp.plot(cmap=pyplot.cm.Blues)
min_color = pyplot.cm.Blues(0)
max_color = pyplot.cm.Blues(255)
assert_allclose(disp.text_[0, 0].get_color(), max_color)
assert_allclose(disp.text_[0, 1].get_color(), max_color)
assert_allclose(disp.text_[1, 0].get_color(), max_color)
assert_allclose(disp.text_[1, 1].get_color(), min_color)
@pytest.mark.parametrize(
"clf",
[
LogisticRegression(),
make_pipeline(StandardScaler(), LogisticRegression()),
make_pipeline(
make_column_transformer((StandardScaler(), [0, 1])),
LogisticRegression(),
),
],
ids=["clf", "pipeline-clf", "pipeline-column_transformer-clf"],
)
def test_confusion_matrix_pipeline(pyplot, clf):
"""Check the behaviour of the plotting with more complex pipeline."""
n_classes = 5
X, y = make_classification(
n_samples=100, n_informative=5, n_classes=n_classes, random_state=0
)
with pytest.raises(NotFittedError):
ConfusionMatrixDisplay.from_estimator(clf, X, y)
clf.fit(X, y)
y_pred = clf.predict(X)
disp = ConfusionMatrixDisplay.from_estimator(clf, X, y)
cm = confusion_matrix(y, y_pred)
assert_allclose(disp.confusion_matrix, cm)
assert disp.text_.shape == (n_classes, n_classes)
@pytest.mark.parametrize("constructor_name", ["from_estimator", "from_predictions"])
def test_confusion_matrix_with_unknown_labels(pyplot, constructor_name):
"""Check that when labels=None, the unique values in `y_pred` and `y_true`
will be used.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/pull/18405
"""
n_classes = 5
X, y = make_classification(
n_samples=100, n_informative=5, n_classes=n_classes, random_state=0
)
classifier = SVC().fit(X, y)
y_pred = classifier.predict(X)
# create unseen labels in `y_true` not seen during fitting and not present
# in 'classifier.classes_'
y = y + 1
# safe guard for the binary if/else construction
assert constructor_name in ("from_estimator", "from_predictions")
common_kwargs = {"labels": None}
if constructor_name == "from_estimator":
disp = ConfusionMatrixDisplay.from_estimator(classifier, X, y, **common_kwargs)
else:
disp = ConfusionMatrixDisplay.from_predictions(y, y_pred, **common_kwargs)
display_labels = [tick.get_text() for tick in disp.ax_.get_xticklabels()]
expected_labels = [str(i) for i in range(n_classes + 1)]
assert_array_equal(expected_labels, display_labels)
def test_colormap_max(pyplot):
"""Check that the max color is used for the color of the text."""
from matplotlib import cm
gray = cm.get_cmap("gray", 1024)
confusion_matrix = np.array([[1.0, 0.0], [0.0, 1.0]])
disp = ConfusionMatrixDisplay(confusion_matrix)
disp.plot(cmap=gray)
color = disp.text_[1, 0].get_color()
assert_allclose(color, [1.0, 1.0, 1.0, 1.0])
|
|
# Copyright 2010-2015 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Connection Manager for Swift connections that responsible for providing
connection with valid credentials and updated token"""
import logging
from oslo_utils import encodeutils
from glance_store import exceptions
from glance_store.i18n import _, _LI
LOG = logging.getLogger(__name__)
class SwiftConnectionManager(object):
"""Connection Manager class responsible for initializing and managing
swiftclient connections in store. The instance of that class can provide
swift connections with a valid(and refreshed) user token if the token is
going to expire soon.
"""
AUTH_HEADER_NAME = 'X-Auth-Token'
def __init__(self, store, store_location, context=None,
allow_reauth=False):
"""Initialize manager with parameters required to establish connection.
Initialize store and prepare it for interacting with swift. Also
initialize keystone client that need to be used for authentication if
allow_reauth is True.
The method invariant is the following: if method was executed
successfully and self.allow_reauth is True users can safely request
valid(no expiration) swift connections any time. Otherwise, connection
manager initialize a connection once and always returns that connection
to users.
:param store: store that provides connections
:param store_location: image location in store
:param context: user context to access data in Swift
:param allow_reauth: defines if re-authentication need to be executed
when a user request the connection
"""
self._client = None
self.store = store
self.location = store_location
self.context = context
self.allow_reauth = allow_reauth
self.storage_url = self._get_storage_url()
self.connection = self._init_connection()
def get_connection(self):
"""Get swift client connection.
Returns swift client connection. If allow_reauth is True and
connection token is going to expire soon then the method returns
updated connection.
The method invariant is the following: if self.allow_reauth is False
then the method returns the same connection for every call. So the
connection may expire. If self.allow_reauth is True the returned
swift connection is always valid and cannot expire at least for
swift_store_expire_soon_interval.
"""
if self.allow_reauth:
# we are refreshing token only and if only connection manager
# re-authentication is allowed. Token refreshing is setup by
# connection manager users. Also we disable re-authentication
# if there is not way to execute it (cannot initialize trusts for
# multi-tenant or auth_version is not 3)
auth_ref = self.client.session.auth.auth_ref
# if connection token is going to expire soon (keystone checks
# is token is going to expire or expired already)
if self.store.backend_group:
interval = getattr(
self.store.conf, self.store.backend_group
).swift_store_expire_soon_interval
else:
store_conf = self.store.conf.glance_store
interval = store_conf.swift_store_expire_soon_interval
if auth_ref.will_expire_soon(interval):
LOG.info(_LI("Requesting new token for swift connection."))
# request new token with session and client provided by store
auth_token = self.client.session.get_auth_headers().get(
self.AUTH_HEADER_NAME)
LOG.info(_LI("Token has been successfully requested. "
"Refreshing swift connection."))
# initialize new switclient connection with fresh token
self.connection = self.store.get_store_connection(
auth_token, self.storage_url)
return self.connection
@property
def client(self):
"""Return keystone client to request a new token.
Initialize a client lazily from the method provided by glance_store.
The method invariant is the following: if client cannot be
initialized raise exception otherwise return initialized client that
can be used for re-authentication any time.
"""
if self._client is None:
self._client = self._init_client()
return self._client
def _init_connection(self):
"""Initialize and return valid Swift connection."""
auth_token = self.client.session.get_auth_headers().get(
self.AUTH_HEADER_NAME)
return self.store.get_store_connection(
auth_token, self.storage_url)
def _init_client(self):
"""Initialize Keystone client."""
return self.store.init_client(location=self.location,
context=self.context)
def _get_storage_url(self):
"""Request swift storage url."""
raise NotImplementedError()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
pass
class SingleTenantConnectionManager(SwiftConnectionManager):
def _get_storage_url(self):
"""Get swift endpoint from keystone
Return endpoint for swift from service catalog if not overridden in
store configuration. The method works only Keystone v3.
If you are using different version (1 or 2)
it returns None.
:return: swift endpoint
"""
if self.store.conf_endpoint:
return self.store.conf_endpoint
if self.store.auth_version == '3':
try:
return self.client.session.get_endpoint(
service_type=self.store.service_type,
interface=self.store.endpoint_type,
region_name=self.store.region
)
except Exception as e:
# do the same that swift driver does
# when catching ClientException
msg = _("Cannot find swift service endpoint : "
"%s") % encodeutils.exception_to_unicode(e)
raise exceptions.BackendException(msg)
def _init_connection(self):
if self.store.auth_version == '3':
return super(SingleTenantConnectionManager,
self)._init_connection()
else:
# no re-authentication for v1 and v2
self.allow_reauth = False
# use good old connection initialization
return self.store.get_connection(self.location, self.context)
class MultiTenantConnectionManager(SwiftConnectionManager):
def __init__(self, store, store_location, context=None,
allow_reauth=False):
# no context - no party
if context is None:
reason = _("Multi-tenant Swift storage requires a user context.")
raise exceptions.BadStoreConfiguration(store_name="swift",
reason=reason)
super(MultiTenantConnectionManager, self).__init__(
store, store_location, context, allow_reauth)
def __exit__(self, exc_type, exc_val, exc_tb):
if self._client and self.client.trust_id:
# client has been initialized - need to cleanup resources
LOG.info(_LI("Revoking trust %s"), self.client.trust_id)
self.client.trusts.delete(self.client.trust_id)
def _get_storage_url(self):
return self.location.swift_url
def _init_connection(self):
if self.allow_reauth:
try:
return super(MultiTenantConnectionManager,
self)._init_connection()
except Exception as e:
LOG.debug("Cannot initialize swift connection for multi-tenant"
" store with trustee token: %s. Using user token for"
" connection initialization.", e)
# for multi-tenant store we have a token, so we can use it
# for connection initialization but we cannot fetch new token
# with client
self.allow_reauth = False
return self.store.get_store_connection(
self.context.auth_token, self.storage_url)
|
|
import os
import re
import logging
from datetime import datetime
from taca.utils.filesystem import chdir
from taca.illumina.Runs import Run
from taca.utils import misc
from flowcell_parser.classes import SampleSheetParser
from io import open
logger = logging.getLogger(__name__)
TENX_GENO_PAT = re.compile('SI-GA-[A-H][1-9][0-2]?')
TENX_ATAC_PAT = re.compile('SI-NA-[A-H][1-9][0-2]?')
TENX_ST_PAT = re.compile('SI-(?:TT|NT|NN|TN)-[A-H][1-9][0-2]?')
SMARTSEQ_PAT = re.compile('SMARTSEQ[1-9]?-[1-9][0-9]?[A-P]')
IDT_UMI_PAT = re.compile('([ATCG]{4,}N+$)')
class HiSeqX_Run(Run):
def __init__(self, run_dir, samplesheet_folders):
super(HiSeqX_Run, self).__init__( run_dir, samplesheet_folders)
self._set_sequencer_type()
self._set_run_type()
self._copy_samplesheet()
def _set_sequencer_type(self):
self.sequencer_type = 'HiSeqX'
def _set_run_type(self):
self.run_type = 'NGI-RUN'
def _copy_samplesheet(self):
ssname = self._get_samplesheet()
ssparser = SampleSheetParser(ssname)
indexfile = dict()
# Loading index files
try:
indexfile['tenX'] = self.CONFIG['bcl2fastq']['tenX_index_path']
except KeyError:
logger.error('Path to index file (10X) not found in the config file')
raise RuntimeError
try:
indexfile['smartseq'] = self.CONFIG['bcl2fastq']['smartseq_index_path']
except KeyError:
logger.error('Path to index file (Smart-seq) not found in the config file')
raise RuntimeError
# Samplesheet need to be positioned in the FC directory with name SampleSheet.csv (Illumina default)
# If this is not the case then create it and take special care of modification to be done on the SampleSheet
samplesheet_dest = os.path.join(self.run_dir, 'SampleSheet.csv')
# Function that goes through the original sample sheet and check for sample types
self.sample_table = _classify_samples(indexfile, ssparser)
# Check that the samplesheet is not already present. In this case go the next step
if not os.path.exists(samplesheet_dest):
try:
with open(samplesheet_dest, 'w') as fcd:
fcd.write(_generate_clean_samplesheet(ssparser,
indexfile,
rename_samples=True,
rename_qPCR_suffix = True,
fields_qPCR=[ssparser.dfield_snm]))
except Exception as e:
logger.error('Encountered the following exception {}'.format(e))
return False
logger.info(('Created SampleSheet.csv for Flowcell {} in {} '.format(self.id, samplesheet_dest)))
# SampleSheet.csv generated
# When demultiplexing SampleSheet.csv is the one I need to use
# Need to rewrite so that SampleSheet_0.csv is always used.
self.runParserObj.samplesheet = SampleSheetParser(os.path.join(self.run_dir, 'SampleSheet.csv'))
if not self.runParserObj.obj.get('samplesheet_csv'):
self.runParserObj.obj['samplesheet_csv'] = self.runParserObj.samplesheet.data
def demultiplex_run(self):
"""
Demultiplex a run:
- Make sub-samplesheet based on sample classes
- Decide correct bcl2fastq command parameters based on sample classes
- run bcl2fastq conversion
"""
# Check sample types
sample_type_list = []
for lane, lane_contents in self.sample_table.items():
for sample in lane_contents:
sample_detail = sample[1]
sample_type = sample_detail['sample_type']
if sample_type not in sample_type_list:
sample_type_list.append(sample_type)
# Go through sample_table for demultiplexing
bcl2fastq_cmd_counter = 0
for sample_type in sorted(sample_type_list):
# Looking for lanes with multiple masks under the same sample type
lane_table = dict()
for lane, lane_contents in self.sample_table.items():
for sample in lane_contents:
sample_detail = sample[1]
sample_type_t = sample_detail['sample_type']
sample_index_length = sample_detail['index_length']
if sample_type_t == sample_type:
if lane_table.get(lane):
if sample_index_length not in lane_table[lane]:
lane_table[lane].append(sample_index_length)
else:
lane_table.update({lane:[sample_index_length]})
# Determine the number of demux needed for the same sample type
demux_number_with_the_same_sample_type = len(max([v for k, v in lane_table.items()],key=len))
# Prepare sub-samplesheets, masks and commands
for i in range(0,demux_number_with_the_same_sample_type):
# Prepare sub-samplesheet
# A dictionary with lane and sample IDs to include
samples_to_include = dict()
# A dictionary with lane and index length for generating masks
mask_table = dict()
for lane, lane_contents in self.sample_table.items():
try:
index_length = lane_table[lane][i]
mask_table.update({lane:index_length})
for sample in lane_contents:
sample_name = sample[0]
sample_detail = sample[1]
sample_type_t = sample_detail['sample_type']
sample_index_length = sample_detail['index_length']
if sample_type_t == sample_type and sample_index_length == index_length:
if samples_to_include.get(lane):
samples_to_include[lane].append(sample_name)
else:
samples_to_include.update({lane:[sample_name]})
except (KeyError, IndexError) as err:
logger.info(('No corresponding mask in lane {}. Skip it.'.format(lane)))
continue
# Make sub-samplesheet
with chdir(self.run_dir):
samplesheet_dest='SampleSheet_{}.csv'.format(bcl2fastq_cmd_counter)
with open(samplesheet_dest, 'w') as fcd:
fcd.write(_generate_samplesheet_subset(self.runParserObj.samplesheet,
samples_to_include))
# Prepare demultiplexing dir
with chdir(self.run_dir):
# Create Demultiplexing dir, this changes the status to IN_PROGRESS
if not os.path.exists('Demultiplexing'):
os.makedirs('Demultiplexing')
# Prepare demultiplexing command
with chdir(self.run_dir):
cmd = self.generate_bcl_command(sample_type,
mask_table,
bcl2fastq_cmd_counter)
misc.call_external_command_detached(cmd,
with_log_files = True,
prefix='demux_{}'.format(bcl2fastq_cmd_counter))
logger.info(('BCL to FASTQ conversion and demultiplexing ' \
'started for run {} on {}'.format(os.path.basename(self.id),
datetime.now())))
# Demutiplexing done for one mask type and scripts will continue
# Working with the next type. Command counter should increase by 1
bcl2fastq_cmd_counter += 1
return True
def _aggregate_demux_results(self):
"""Take the Stats.json files from the different
demultiplexing folders and merges them into one
"""
# Define lanes as simple or complex
# Simple lanes include samples with only one type and one type of index length
simple_lanes = {}
complex_lanes = {}
for lane, lane_contents in self.sample_table.items():
sample_type_list_per_lane = []
for sample in lane_contents:
sample_detail = sample[1]
sample_type = sample_detail['sample_type']
if sample_type not in sample_type_list_per_lane:
sample_type_list_per_lane.append(sample_type)
if len(sample_type_list_per_lane) > 1:
complex_lanes[lane] = 0
else:
sample_index_length_list_per_lane = [] # Note that there is only one sample type in this case
for sample in lane_contents:
sample_detail = sample[1]
sample_index_length = sample_detail['index_length']
if sample_index_length not in sample_index_length_list_per_lane:
sample_index_length_list_per_lane.append(sample_index_length)
if len(sample_index_length_list_per_lane) > 1:
complex_lanes[lane] = 0
else:
simple_lanes[lane] = 0
self._aggregate_demux_results_simple_complex(simple_lanes, complex_lanes)
def generate_bcl_command(self, sample_type, mask_table, bcl2fastq_cmd_counter):
# I have everything to run demultiplexing now.
logger.info('Building a bcl2fastq command')
per_lane_base_masks = self._generate_per_lane_base_mask(sample_type, mask_table)
with chdir(self.run_dir):
cl = [self.CONFIG.get('bcl2fastq')['bin']]
output_dir = 'Demultiplexing_{}'.format(bcl2fastq_cmd_counter)
cl.extend(['--output-dir', output_dir])
if not os.path.exists(output_dir):
os.makedirs(output_dir)
cl_options = []
if 'options' in self.CONFIG.get('bcl2fastq'):
for option in self.CONFIG['bcl2fastq']['options']:
cl_options.extend([option])
# Add the extra 10X command options if we have 10X Genomic or ATAC samples
if sample_type == '10X_GENO' or sample_type == '10X_ATAC':
cl_options.extend(self.CONFIG['bcl2fastq']['options_10X'])
# Add the extra 10X command options if we have 10X ST samples
if sample_type == '10X_ST':
cl_options.extend(self.CONFIG['bcl2fastq']['options_10X_ST'])
# Add the extra command option if we have samples with IDT UMI
if sample_type == 'IDT_UMI':
cl_options.extend(self.CONFIG['bcl2fastq']['options_IDT_UMI'])
# Add the extra Smart-seq command options if we have 10X ST samples
if sample_type == 'SMARTSEQ':
cl_options.extend(self.CONFIG['bcl2fastq']['options_SMARTSEQ'])
# Add the extra command option if we have samples with single short index
if sample_type == 'short_single_index':
cl_options.extend(self.CONFIG['bcl2fastq']['options_short_single_index'])
# Append all options that appear in the configuration file to the main command.
for option in cl_options:
if isinstance(option, dict):
opt, val = list(option.items())[0]
if 'output-dir' not in opt:
cl.extend(['--{}'.format(opt), str(val)])
else:
cl.append('--{}'.format(option))
cl.extend(['--sample-sheet', os.path.join(os.path.join(self.run_dir, 'SampleSheet_{}.csv'.format(bcl2fastq_cmd_counter)))])
# Add the base_mask for each lane
lanes = list(mask_table.keys())
for lane in sorted(lanes):
# Iterate thorugh each lane and add the correct --use-bases-mask for that lane
base_mask = [per_lane_base_masks[lane][bm]['base_mask'] for bm in per_lane_base_masks[lane]][0] # Get the base_mask
base_mask_expr = '{}:'.format(lane) + ','.join(base_mask)
cl.extend(['--use-bases-mask', base_mask_expr])
return cl
def _generate_per_lane_base_mask(self, sample_type, mask_table):
"""Generate the base mask for each lane included in mask_table.
Hypotesis:
- RunInfo.xml contains the configuration
- this object contains a properly parsed samplesheet
It returns an dict with a key for each lane:
{lane1:
{base_mask_string (e.g., Y150I6N2N8Y150):
[ base_mask , [SampleSheetEntries]]
}
lane2:
}
"""
# Generate new ssparser (from the renamed samplesheet)
runSetup = self.runParserObj.runinfo.get_read_configuration()
base_masks = {}
if not self.runParserObj.samplesheet:
raise RuntimeError('Samplesheet not yet initialised')
for lane, lane_contents in mask_table.items():
if lane not in base_masks:
base_masks[lane] = {}
index1_size = lane_contents[0]
index2_size = lane_contents[1]
is_dual_index = False
if (index1_size != 0 and index2_size != 0) or (index1_size == 0 and index2_size != 0):
is_dual_index = True
# Compute the basemask
base_mask = self._compute_base_mask(runSetup, sample_type, index1_size, is_dual_index, index2_size)
base_mask_string = ''.join(base_mask)
base_masks[lane][base_mask_string] = {'base_mask':base_mask}
return base_masks
def _compute_base_mask(self, runSetup, sample_type, index1_size, is_dual_index, index2_size):
"""
Assumptions:
- if runSetup is of size 3, then single index run
- if runSetup is of size 4, then dual index run
"""
bm = []
dual_index_run = False
if len(runSetup) > 4:
raise RuntimeError('when generating base_masks looks like there are' \
' more than 4 reads in the RunSetup.xml')
for read in runSetup:
cycles = int(read['NumCycles'])
if read['IsIndexedRead'] == 'N':
bm.append('Y' + str(cycles))
else:
if index1_size > cycles:
# The size of the index of the sample sheet is larger than the
# one specified by RunInfo.xml, somethig must be wrong
raise RuntimeError('when generating base_masks found index in' \
' samplesheet larger than the index specifed in RunInfo.xml')
is_first_index_read = int(read['Number']) == 2
# Prepare the base mask for the 1st index read
if is_first_index_read:
i_remainder = cycles - index1_size
if i_remainder > 0:
if sample_type == 'IDT_UMI': # Case of IDT UMI
bm.append('I' + str(index1_size) + 'y*')
elif index1_size == 0:
bm.append('N' + str(cycles)) # Case of NoIndex
else:
bm.append('I' + str(index1_size) + 'N' + str(i_remainder))
else:
bm.append('I' + str(cycles))
else:
# When working on the second read index I need to know if the sample is dual index or not
if is_dual_index:
if sample_type == '10X_ATAC': # Case of 10X scATACseq, demultiplex the whole index 2 cycles as FastQ
bm.append('Y' + str(cycles))
else:
i_remainder = cycles - index2_size
if i_remainder > 0:
if sample_type == 'IDT_UMI': # Case of IDT UMI
bm.append('I' + str(index2_size) + 'y*')
elif index2_size == 0:
bm.append('N' + str(cycles))
else:
bm.append('I' + str(index2_size) + 'N' + str(i_remainder))
else:
bm.append('I' + str(cycles))
else:
# If this sample is not dual index but the run is,
# then I need to ignore the second index completely
bm.append('N' + str(cycles))
return bm
def _generate_clean_samplesheet(ssparser, indexfile, fields_to_remove=None, rename_samples=True, rename_qPCR_suffix = False, fields_qPCR= None):
"""Generate a 'clean' samplesheet, the given fields will be removed.
If rename_samples is True, samples prepended with 'Sample_' are renamed to match the sample name
Will also replace 10X or Smart-seq indicies (e.g. SI-GA-A3 into TGTGCGGG)
"""
output = u''
# Expand the ssparser if there are lanes with 10X or Smart-seq samples
index_dict_tenX = parse_10X_indexes(indexfile['tenX'])
index_dict_smartseq = parse_smartseq_indexes(indexfile['smartseq'])
# Replace 10X or Smart-seq indices
for sample in ssparser.data:
if sample['index'] in index_dict_tenX.keys():
tenX_index = sample['index']
# In the case of 10X ST indexes, replace index and index2
if TENX_ST_PAT.findall(tenX_index):
sample['index'] = index_dict_tenX[tenX_index][0]
sample['index2'] = index_dict_tenX[tenX_index][1]
# In the case of 10X Genomic and ATAC samples, replace the index name with the 4 actual indicies
else:
x = 0
indices_number = len(index_dict_tenX[tenX_index])
while x < indices_number - 1:
new_sample = dict(sample)
new_sample['index'] = index_dict_tenX[tenX_index][x]
ssparser.data.append(new_sample)
x += 1
# Set the original 10X index to the 4th correct index
sample['index'] = index_dict_tenX[tenX_index][x]
elif SMARTSEQ_PAT.findall(sample['index']):
x = 0
smartseq_index = sample['index'].split('-')[1]
indices_number = len(index_dict_smartseq[smartseq_index])
while x < indices_number - 1:
new_sample = dict(sample)
new_sample['index'] = index_dict_smartseq[smartseq_index][x][0]
new_sample['index2'] = index_dict_smartseq[smartseq_index][x][1]
ssparser.data.append(new_sample)
x += 1
sample['index'] = index_dict_smartseq[smartseq_index][x][0]
sample['index2'] = index_dict_smartseq[smartseq_index][x][1]
# Sort to get the added indicies from 10x in the right place
# Python 3 doesn't support sorting a list of dicts implicitly. Sort by lane and then Sample_ID
ssparser.data.sort(key=lambda item: (item.get('Lane'), item.get('Sample_ID')))
if not fields_to_remove:
fields_to_remove = []
# Header
output += '[Header]{}'.format(os.linesep)
for field in sorted(ssparser.header):
output += '{},{}'.format(field.rstrip(), ssparser.header[field].rstrip())
output += os.linesep
# Data
output += '[Data]{}'.format(os.linesep)
datafields = []
for field in ssparser.datafields:
if field not in fields_to_remove:
datafields.append(field)
output += ','.join(datafields)
output += os.linesep
for line in ssparser.data:
line_ar = []
for field in datafields:
value = line[field]
if rename_samples and ssparser.dfield_sid in field:
try:
if rename_qPCR_suffix and ssparser.dfield_snm in fields_qPCR:
# Substitute SampleID with SampleName, add Sample_ as prefix and remove __qPCR_ suffix
value = re.sub('__qPCR_$', '', 'Sample_{}'.format(line[ssparser.dfield_snm]))
else:
# Substitute SampleID with SampleName, add Sample_ as prefix
value ='Sample_{}'.format(line[ssparser.dfield_snm])
except:
# Otherwise add Sample_ as prefix
value = 'Sample_{}'.format(line[ssparser.dfield_sid])
elif rename_qPCR_suffix and field in fields_qPCR:
value = re.sub('__qPCR_$', '', line[field])
line_ar.append(value)
output += ','.join(line_ar)
output += os.linesep
return output
def _classify_samples(indexfile, ssparser):
"""Given an ssparser object, go through all samples and decide sample types."""
sample_table = dict()
index_dict_tenX = parse_10X_indexes(indexfile['tenX'])
index_dict_smartseq = parse_smartseq_indexes(indexfile['smartseq'])
for sample in ssparser.data:
lane = sample['Lane']
sample_name = sample.get('Sample_Name') or sample.get('SampleName')
# 10X Genomic DNA & RNA
if TENX_GENO_PAT.findall(sample['index']):
index_length = [len(index_dict_tenX[sample['index']][0]),0]
sample_type = '10X_GENO'
# 10X scATAC, Note that the number '16' is only a preset value.
# When preparing masks, the whole index 2 will be multiplexed as FastQ
elif TENX_ATAC_PAT.findall(sample['index']):
index_length = [len(index_dict_tenX[sample['index']][0]),16]
sample_type = '10X_ATAC'
# 10X ST
elif TENX_ST_PAT.findall(sample['index']):
index_length = [len(index_dict_tenX[sample['index']][0]),len(index_dict_tenX[sample['index']][1])]
sample_type = '10X_ST'
# IDT UMI samples
elif IDT_UMI_PAT.findall(sample['index']) or IDT_UMI_PAT.findall(sample['index2']):
# Index length after removing "N" part
index_length = [len(sample['index'].replace('N', '')),
len(sample['index2'].replace('N', ''))]
sample_type = 'IDT_UMI'
# Smart-seq
elif SMARTSEQ_PAT.findall(sample['index']):
smartseq_index = sample['index'].split('-')[1]
index_length = [len(index_dict_smartseq[smartseq_index][0][0]),len(index_dict_smartseq[smartseq_index][0][1])]
sample_type = 'SMARTSEQ'
# No Index case. Note that if both index 1 and 2 are empty, it will be the same index type but will be handled in the next case
elif sample['index'].upper() == 'NOINDEX':
index_length = [0, 0]
sample_type = 'ordinary'
# Ordinary samples
else:
index_length = [len(sample['index']),len(sample['index2'])]
# Short single index (<=6nt)
if (index_length[0] <= 6 and index_length[1] == 0) or (index_length[0] == 0 and index_length[1] <= 6):
sample_type = 'short_single_index'
else:
sample_type = 'ordinary'
# Write in sample table
# {'1': [('101', {'sample_type': 'ordinary', 'index_length': [8, 8]}), ('102', {'sample_type': 'ordinary', 'index_length': [8, 8]})]}
if sample_table.get(lane):
sample_table[lane].append((sample_name,
{'sample_type': sample_type,
'index_length': index_length}))
else:
sample_table.update({lane:[(sample_name,
{'sample_type': sample_type,
'index_length': index_length})]})
return sample_table
def parse_10X_indexes(indexfile):
"""
Takes a file of 10X indexes and returns them as a dict.
Todo: Set it up to take the file from config instead
"""
index_dict = {}
with open(indexfile, 'r') as f:
for line in f:
line_ = line.rstrip().split(',')
index_dict[line_[0]] = line_[1:5]
return index_dict
def parse_smartseq_indexes(indexfile):
"""
Takes a file of Smart-seq indexes and returns them as a dict.
Todo: Set it up to take the file from config instead
"""
index_dict = {}
with open(indexfile, 'r') as f:
for line in f:
line_ = line.rstrip().split(',')
if index_dict.get(line_[0]):
index_dict[line_[0]].append((line_[1],line_[2]))
else:
index_dict.update({line_[0]:[(line_[1],line_[2])]})
return index_dict
def _generate_samplesheet_subset(ssparser, samples_to_include):
output = u''
# Header
output += '[Header]{}'.format(os.linesep)
for field in sorted(ssparser.header):
output += '{},{}'.format(field.rstrip(), ssparser.header[field].rstrip())
output += os.linesep
# Data
output += '[Data]{}'.format(os.linesep)
datafields = []
for field in ssparser.datafields:
datafields.append(field)
output += ','.join(datafields)
output += os.linesep
for line in ssparser.data:
sample_name = line.get('Sample_Name') or line.get('SampleName')
lane = line['Lane']
if lane in samples_to_include.keys():
if sample_name in samples_to_include.get(lane):
line_ar = []
for field in datafields:
# Case of no index
if field == 'index' and 'NOINDEX' in line[field].upper():
line[field] = ''
# Case of IDT UMI
if (field == 'index' or field == 'index2') and IDT_UMI_PAT.findall(line[field]):
line[field] = line[field].replace('N', '')
line_ar.append(line[field])
output += ','.join(line_ar)
output += os.linesep
return output
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import mxnet as mx
from . import proposal_target
def get_vgg_feature(data):
# group 1
conv1_1 = mx.symbol.Convolution(
data=data, kernel=(3, 3), pad=(1, 1), num_filter=64, workspace=2048, name="conv1_1")
relu1_1 = mx.symbol.Activation(data=conv1_1, act_type="relu", name="relu1_1")
conv1_2 = mx.symbol.Convolution(
data=relu1_1, kernel=(3, 3), pad=(1, 1), num_filter=64, workspace=2048, name="conv1_2")
relu1_2 = mx.symbol.Activation(data=conv1_2, act_type="relu", name="relu1_2")
pool1 = mx.symbol.Pooling(
data=relu1_2, pool_type="max", kernel=(2, 2), stride=(2, 2), name="pool1")
# group 2
conv2_1 = mx.symbol.Convolution(
data=pool1, kernel=(3, 3), pad=(1, 1), num_filter=128, workspace=2048, name="conv2_1")
relu2_1 = mx.symbol.Activation(data=conv2_1, act_type="relu", name="relu2_1")
conv2_2 = mx.symbol.Convolution(
data=relu2_1, kernel=(3, 3), pad=(1, 1), num_filter=128, workspace=2048, name="conv2_2")
relu2_2 = mx.symbol.Activation(data=conv2_2, act_type="relu", name="relu2_2")
pool2 = mx.symbol.Pooling(
data=relu2_2, pool_type="max", kernel=(2, 2), stride=(2, 2), name="pool2")
# group 3
conv3_1 = mx.symbol.Convolution(
data=pool2, kernel=(3, 3), pad=(1, 1), num_filter=256, workspace=2048, name="conv3_1")
relu3_1 = mx.symbol.Activation(data=conv3_1, act_type="relu", name="relu3_1")
conv3_2 = mx.symbol.Convolution(
data=relu3_1, kernel=(3, 3), pad=(1, 1), num_filter=256, workspace=2048, name="conv3_2")
relu3_2 = mx.symbol.Activation(data=conv3_2, act_type="relu", name="relu3_2")
conv3_3 = mx.symbol.Convolution(
data=relu3_2, kernel=(3, 3), pad=(1, 1), num_filter=256, workspace=2048, name="conv3_3")
relu3_3 = mx.symbol.Activation(data=conv3_3, act_type="relu", name="relu3_3")
pool3 = mx.symbol.Pooling(
data=relu3_3, pool_type="max", kernel=(2, 2), stride=(2, 2), name="pool3")
# group 4
conv4_1 = mx.symbol.Convolution(
data=pool3, kernel=(3, 3), pad=(1, 1), num_filter=512, workspace=2048, name="conv4_1")
relu4_1 = mx.symbol.Activation(data=conv4_1, act_type="relu", name="relu4_1")
conv4_2 = mx.symbol.Convolution(
data=relu4_1, kernel=(3, 3), pad=(1, 1), num_filter=512, workspace=2048, name="conv4_2")
relu4_2 = mx.symbol.Activation(data=conv4_2, act_type="relu", name="relu4_2")
conv4_3 = mx.symbol.Convolution(
data=relu4_2, kernel=(3, 3), pad=(1, 1), num_filter=512, workspace=2048, name="conv4_3")
relu4_3 = mx.symbol.Activation(data=conv4_3, act_type="relu", name="relu4_3")
pool4 = mx.symbol.Pooling(
data=relu4_3, pool_type="max", kernel=(2, 2), stride=(2, 2), name="pool4")
# group 5
conv5_1 = mx.symbol.Convolution(
data=pool4, kernel=(3, 3), pad=(1, 1), num_filter=512, workspace=2048, name="conv5_1")
relu5_1 = mx.symbol.Activation(data=conv5_1, act_type="relu", name="relu5_1")
conv5_2 = mx.symbol.Convolution(
data=relu5_1, kernel=(3, 3), pad=(1, 1), num_filter=512, workspace=2048, name="conv5_2")
relu5_2 = mx.symbol.Activation(data=conv5_2, act_type="relu", name="relu5_2")
conv5_3 = mx.symbol.Convolution(
data=relu5_2, kernel=(3, 3), pad=(1, 1), num_filter=512, workspace=2048, name="conv5_3")
relu5_3 = mx.symbol.Activation(data=conv5_3, act_type="relu", name="relu5_3")
return relu5_3
def get_vgg_top_feature(data):
# group 6
flatten = mx.symbol.Flatten(data=data, name="flatten")
fc6 = mx.symbol.FullyConnected(data=flatten, num_hidden=4096, name="fc6")
relu6 = mx.symbol.Activation(data=fc6, act_type="relu", name="relu6")
drop6 = mx.symbol.Dropout(data=relu6, p=0.5, name="drop6")
# group 7
fc7 = mx.symbol.FullyConnected(data=drop6, num_hidden=4096, name="fc7")
relu7 = mx.symbol.Activation(data=fc7, act_type="relu", name="relu7")
drop7 = mx.symbol.Dropout(data=relu7, p=0.5, name="drop7")
return drop7
def get_vgg_train(anchor_scales, anchor_ratios, rpn_feature_stride,
rpn_pre_topk, rpn_post_topk, rpn_nms_thresh, rpn_min_size, rpn_batch_rois,
num_classes, rcnn_feature_stride, rcnn_pooled_size, rcnn_batch_size,
rcnn_batch_rois, rcnn_fg_fraction, rcnn_fg_overlap, rcnn_bbox_stds):
num_anchors = len(anchor_scales) * len(anchor_ratios)
data = mx.symbol.Variable(name="data")
im_info = mx.symbol.Variable(name="im_info")
gt_boxes = mx.symbol.Variable(name="gt_boxes")
rpn_label = mx.symbol.Variable(name='label')
rpn_bbox_target = mx.symbol.Variable(name='bbox_target')
rpn_bbox_weight = mx.symbol.Variable(name='bbox_weight')
# shared convolutional layers
conv_feat = get_vgg_feature(data)
# RPN layers
rpn_conv = mx.symbol.Convolution(
data=conv_feat, kernel=(3, 3), pad=(1, 1), num_filter=512, name="rpn_conv_3x3")
rpn_relu = mx.symbol.Activation(data=rpn_conv, act_type="relu", name="rpn_relu")
# rpn classification
rpn_cls_score = mx.symbol.Convolution(
data=rpn_relu, kernel=(1, 1), pad=(0, 0), num_filter=2 * num_anchors, name="rpn_cls_score")
rpn_cls_score_reshape = mx.symbol.Reshape(
data=rpn_cls_score, shape=(0, 2, -1, 0), name="rpn_cls_score_reshape")
rpn_cls_prob = mx.symbol.SoftmaxOutput(data=rpn_cls_score_reshape, label=rpn_label, multi_output=True,
normalization='valid', use_ignore=True, ignore_label=-1, name="rpn_cls_prob")
rpn_cls_act = mx.symbol.softmax(
data=rpn_cls_score_reshape, axis=1, name="rpn_cls_act")
rpn_cls_act_reshape = mx.symbol.Reshape(
data=rpn_cls_act, shape=(0, 2 * num_anchors, -1, 0), name='rpn_cls_act_reshape')
# rpn bbox regression
rpn_bbox_pred = mx.symbol.Convolution(
data=rpn_relu, kernel=(1, 1), pad=(0, 0), num_filter=4 * num_anchors, name="rpn_bbox_pred")
rpn_bbox_loss_ = rpn_bbox_weight * mx.symbol.smooth_l1(name='rpn_bbox_loss_', scalar=3.0, data=(rpn_bbox_pred - rpn_bbox_target))
rpn_bbox_loss = mx.sym.MakeLoss(name='rpn_bbox_loss', data=rpn_bbox_loss_, grad_scale=1.0 / rpn_batch_rois)
# rpn proposal
rois = mx.symbol.contrib.MultiProposal(
cls_prob=rpn_cls_act_reshape, bbox_pred=rpn_bbox_pred, im_info=im_info, name='rois',
feature_stride=rpn_feature_stride, scales=anchor_scales, ratios=anchor_ratios,
rpn_pre_nms_top_n=rpn_pre_topk, rpn_post_nms_top_n=rpn_post_topk,
threshold=rpn_nms_thresh, rpn_min_size=rpn_min_size)
# rcnn roi proposal target
group = mx.symbol.Custom(rois=rois, gt_boxes=gt_boxes, op_type='proposal_target',
num_classes=num_classes, batch_images=rcnn_batch_size,
batch_rois=rcnn_batch_rois, fg_fraction=rcnn_fg_fraction,
fg_overlap=rcnn_fg_overlap, box_stds=rcnn_bbox_stds)
rois = group[0]
label = group[1]
bbox_target = group[2]
bbox_weight = group[3]
# rcnn roi pool
roi_pool = mx.symbol.ROIPooling(
name='roi_pool', data=conv_feat, rois=rois, pooled_size=rcnn_pooled_size, spatial_scale=1.0 / rcnn_feature_stride)
# rcnn top feature
top_feat = get_vgg_top_feature(roi_pool)
# rcnn classification
cls_score = mx.symbol.FullyConnected(name='cls_score', data=top_feat, num_hidden=num_classes)
cls_prob = mx.symbol.SoftmaxOutput(name='cls_prob', data=cls_score, label=label, normalization='batch')
# rcnn bbox regression
bbox_pred = mx.symbol.FullyConnected(name='bbox_pred', data=top_feat, num_hidden=num_classes * 4)
bbox_loss_ = bbox_weight * mx.symbol.smooth_l1(name='bbox_loss_', scalar=1.0, data=(bbox_pred - bbox_target))
bbox_loss = mx.sym.MakeLoss(name='bbox_loss', data=bbox_loss_, grad_scale=1.0 / rcnn_batch_rois)
# reshape output
label = mx.symbol.Reshape(data=label, shape=(rcnn_batch_size, -1), name='label_reshape')
cls_prob = mx.symbol.Reshape(data=cls_prob, shape=(rcnn_batch_size, -1, num_classes), name='cls_prob_reshape')
bbox_loss = mx.symbol.Reshape(data=bbox_loss, shape=(rcnn_batch_size, -1, 4 * num_classes), name='bbox_loss_reshape')
# group output
group = mx.symbol.Group([rpn_cls_prob, rpn_bbox_loss, cls_prob, bbox_loss, mx.symbol.BlockGrad(label)])
return group
def get_vgg_test(anchor_scales, anchor_ratios, rpn_feature_stride,
rpn_pre_topk, rpn_post_topk, rpn_nms_thresh, rpn_min_size,
num_classes, rcnn_feature_stride, rcnn_pooled_size, rcnn_batch_size):
num_anchors = len(anchor_scales) * len(anchor_ratios)
data = mx.symbol.Variable(name="data")
im_info = mx.symbol.Variable(name="im_info")
# shared convolutional layers
conv_feat = get_vgg_feature(data)
# rpn feature
rpn_conv = mx.symbol.Convolution(
data=conv_feat, kernel=(3, 3), pad=(1, 1), num_filter=512, name="rpn_conv_3x3")
rpn_relu = mx.symbol.Activation(data=rpn_conv, act_type="relu", name="rpn_relu")
# rpn classification
rpn_cls_score = mx.symbol.Convolution(
data=rpn_relu, kernel=(1, 1), pad=(0, 0), num_filter=2 * num_anchors, name="rpn_cls_score")
rpn_cls_score_reshape = mx.symbol.Reshape(
data=rpn_cls_score, shape=(0, 2, -1, 0), name="rpn_cls_score_reshape")
rpn_cls_act = mx.symbol.softmax(
data=rpn_cls_score_reshape, axis=1, name="rpn_cls_act")
rpn_cls_act_reshape = mx.symbol.Reshape(
data=rpn_cls_act, shape=(0, 2 * num_anchors, -1, 0), name='rpn_cls_act_reshape')
# rpn bbox regression
rpn_bbox_pred = mx.symbol.Convolution(
data=rpn_relu, kernel=(1, 1), pad=(0, 0), num_filter=4 * num_anchors, name="rpn_bbox_pred")
# rpn proposal
rois = mx.symbol.contrib.MultiProposal(
cls_prob=rpn_cls_act_reshape, bbox_pred=rpn_bbox_pred, im_info=im_info, name='rois',
feature_stride=rpn_feature_stride, scales=anchor_scales, ratios=anchor_ratios,
rpn_pre_nms_top_n=rpn_pre_topk, rpn_post_nms_top_n=rpn_post_topk,
threshold=rpn_nms_thresh, rpn_min_size=rpn_min_size)
# rcnn roi pool
roi_pool = mx.symbol.ROIPooling(
name='roi_pool', data=conv_feat, rois=rois, pooled_size=rcnn_pooled_size, spatial_scale=1.0 / rcnn_feature_stride)
# rcnn top feature
top_feat = get_vgg_top_feature(roi_pool)
# rcnn classification
cls_score = mx.symbol.FullyConnected(name='cls_score', data=top_feat, num_hidden=num_classes)
cls_prob = mx.symbol.softmax(name='cls_prob', data=cls_score)
# rcnn bbox regression
bbox_pred = mx.symbol.FullyConnected(name='bbox_pred', data=top_feat, num_hidden=num_classes * 4)
# reshape output
cls_prob = mx.symbol.Reshape(data=cls_prob, shape=(rcnn_batch_size, -1, num_classes), name='cls_prob_reshape')
bbox_pred = mx.symbol.Reshape(data=bbox_pred, shape=(rcnn_batch_size, -1, 4 * num_classes), name='bbox_pred_reshape')
# group output
group = mx.symbol.Group([rois, cls_prob, bbox_pred])
return group
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import threading
import mock
import netaddr
from neutron_lib.api.definitions import floating_ip_port_forwarding as apidef
from neutron_lib.callbacks import exceptions as c_exc
from neutron_lib import exceptions as lib_exc
from neutron_lib.exceptions import l3 as lib_l3_exc
from oslo_utils import uuidutils
from six.moves import queue
from neutron.services.portforwarding.common import exceptions as pf_exc
from neutron.services.portforwarding import pf_plugin
from neutron.tests.unit.plugins.ml2 import base as ml2_test_base
class PortForwardingTestCaseBase(ml2_test_base.ML2TestFramework):
def setUp(self):
super(PortForwardingTestCaseBase, self).setUp()
self.pf_plugin = pf_plugin.PortForwardingPlugin()
def _create_floatingip(self, network_id, port_id=None,
fixed_ip_address=None):
body = {"floating_network_id": network_id,
"port_id": port_id,
"fixed_ip_address": fixed_ip_address,
"tenant_id": self._tenant_id,
"project_id": self._tenant_id}
return self.l3_plugin.create_floatingip(
self.context,
{"floatingip": body})
def _get_floatingip(self, floatingip_id):
return self.l3_plugin.get_floatingip(self.context, floatingip_id)
def _update_floatingip(self, fip_id, update_info):
return self.l3_plugin.update_floatingip(
self.context, fip_id, {"floatingip": update_info})
def _delete_floatingip(self, fip_id):
return self.l3_plugin.delete_floatingip(self.context, fip_id)
def _update_port(self, port_id, update_info):
return self.core_plugin.update_port(
self.context, port_id, {'port': update_info})
def _delete_port(self, port_id):
return self.core_plugin.delete_port(self.context, port_id)
def _add_router_interface(self, router_id, subnet_id):
interface_info = {"subnet_id": subnet_id}
self.l3_plugin.add_router_interface(
self.context, router_id, interface_info=interface_info)
def _set_router_gw(self, router_id, ext_net_id):
body = {
'router':
{'external_gateway_info': {'network_id': ext_net_id}}}
self.l3_plugin.update_router(self.context, router_id, body)
class PortForwardingTestCase(PortForwardingTestCaseBase):
def setUp(self):
super(PortForwardingTestCase, self).setUp()
self._prepare_env()
def _prepare_env(self):
self.router = self._create_router()
self.ext_net = self._create_network(
self.fmt, 'ext-net', True, arg_list=("router:external",),
**{"router:external": True}).json['network']
self.ext_subnet = self._create_subnet(
self.fmt, self.ext_net['id'], '172.24.2.0/24').json['subnet']
self.net = self._create_network(self.fmt, 'private', True).json[
'network']
self.subnet = self._create_subnet(self.fmt, self.net['id'],
'10.0.0.0/24').json['subnet']
self._set_router_gw(self.router['id'], self.ext_net['id'])
self._add_router_interface(self.router['id'], self.subnet['id'])
self.fip = self._create_floatingip(self.ext_net['id'])
self.port = self._create_port(self.fmt, self.net['id']).json['port']
self.port_forwarding = {
apidef.RESOURCE_NAME:
{apidef.EXTERNAL_PORT: 2225,
apidef.INTERNAL_PORT: 25,
apidef.INTERNAL_PORT_ID: self.port['id'],
apidef.PROTOCOL: "tcp",
apidef.INTERNAL_IP_ADDRESS:
self.port['fixed_ips'][0]['ip_address']}}
def test_create_floatingip_port_forwarding(self):
res = self.pf_plugin.create_floatingip_port_forwarding(
self.context, self.fip['id'], self.port_forwarding)
expect = {
"external_port": 2225,
"internal_port": 25,
"internal_port_id": self.port['id'],
"protocol": "tcp",
"internal_ip_address": self.port['fixed_ips'][0]['ip_address'],
'id': mock.ANY,
'router_id': self.router['id'],
'floating_ip_address': self.fip['floating_ip_address'],
'floatingip_id': self.fip['id']}
self.assertEqual(expect, res)
def test_negative_create_floatingip_port_forwarding(self):
self.pf_plugin.create_floatingip_port_forwarding(
self.context, self.fip['id'], self.port_forwarding)
# This will be fail with the same params
self.assertRaises(lib_exc.BadRequest,
self.pf_plugin.create_floatingip_port_forwarding,
self.context, self.fip['id'], self.port_forwarding)
def test_update_floatingip_port_forwarding(self):
# create a test port forwarding
res = self.pf_plugin.create_floatingip_port_forwarding(
self.context, self.fip['id'], self.port_forwarding)
# update the socket port only
update_body = {
apidef.RESOURCE_NAME: {
"external_port": 2226,
"internal_port": 26,
"protocol": "udp"
}
}
update_res = self.pf_plugin.update_floatingip_port_forwarding(
self.context, res['id'], self.fip['id'], update_body)
expect = {
"external_port": 2226,
"internal_port": 26,
"internal_port_id": self.port['id'],
"protocol": "udp",
"internal_ip_address": self.port['fixed_ips'][0]['ip_address'],
'id': res['id'],
'router_id': self.router['id'],
'floating_ip_address': self.fip['floating_ip_address'],
'floatingip_id': self.fip['id']}
self.assertEqual(expect, update_res)
# update the neutron port and success
new_port = self._create_port(self.fmt, self.net['id']).json['port']
update_body = {
apidef.RESOURCE_NAME: {
"external_port": 2227,
"internal_port": 27,
"protocol": "tcp",
"internal_port_id": new_port['id'],
"internal_ip_address": new_port['fixed_ips'][0]['ip_address']
}
}
update_res = self.pf_plugin.update_floatingip_port_forwarding(
self.context, res['id'], self.fip['id'], update_body)
expect = {
"external_port": 2227,
"internal_port": 27,
"internal_port_id": new_port['id'],
"protocol": "tcp",
"internal_ip_address": new_port['fixed_ips'][0]['ip_address'],
'id': res['id'],
'router_id': self.router['id'],
'floating_ip_address': self.fip['floating_ip_address'],
'floatingip_id': self.fip['id']}
self.assertEqual(expect, update_res)
def test_negative_update_floatingip_port_forwarding(self):
# prepare a port forwarding
res = self.pf_plugin.create_floatingip_port_forwarding(
self.context, self.fip['id'], self.port_forwarding)
# prepare another port and make its gateway set on other router
new_router = self._create_router()
new_subnet = self._create_subnet(self.fmt, self.net['id'],
'11.0.0.0/24').json['subnet']
self._set_router_gw(new_router['id'], self.ext_net['id'])
self._add_router_interface(new_router['id'], new_subnet['id'])
# create a port based on the new subnet
new_port = self._create_port(
self.fmt, self.net['id'],
fixed_ips=[{'subnet_id': new_subnet['id']}]).json['port']
update_body = {
apidef.RESOURCE_NAME: {
"external_port": 2227,
"internal_port": 27,
"protocol": "tcp",
"internal_port_id": new_port['id'],
"internal_ip_address": new_port['fixed_ips'][0]['ip_address']
}
}
# This will be fail, as the new found router_id not match.
self.assertRaises(lib_exc.BadRequest,
self.pf_plugin.update_floatingip_port_forwarding,
self.context, res['id'], self.fip['id'], update_body)
# There is already a port forwarding. We create another port forwarding
# with the new_port, and update the new one with the same params of the
# existing one.
new_port = self._create_port(self.fmt, self.net['id']).json['port']
self.port_forwarding[apidef.RESOURCE_NAME].update({
'internal_port_id': new_port['id'],
'internal_ip_address': new_port['fixed_ips'][0]['ip_address'],
'external_port': self.port_forwarding[
apidef.RESOURCE_NAME]['external_port'] + 1
})
new_res = self.pf_plugin.create_floatingip_port_forwarding(
self.context, self.fip['id'], self.port_forwarding)
self.port_forwarding[apidef.RESOURCE_NAME].update({
'internal_port_id': self.port['id'],
'internal_ip_address': self.port['fixed_ips'][0]['ip_address'],
'external_port': self.port_forwarding[
apidef.RESOURCE_NAME]['external_port'] - 1
})
# This will be fail, as the duplicate record.
self.assertRaises(lib_exc.BadRequest,
self.pf_plugin.update_floatingip_port_forwarding,
self.context, new_res['id'], self.fip['id'],
update_body)
def test_delete_floatingip_port_forwarding(self):
# create two port forwardings for a floatingip
pf_1 = self.pf_plugin.create_floatingip_port_forwarding(
self.context, self.fip['id'], self.port_forwarding)
new_port = self._create_port(self.fmt, self.net['id']).json['port']
self.port_forwarding[apidef.RESOURCE_NAME].update({
'external_port': 2226,
'internal_port_id': new_port['id'],
'internal_ip_address': new_port['fixed_ips'][0]['ip_address']
})
pf_2 = self.pf_plugin.create_floatingip_port_forwarding(
self.context, self.fip['id'], self.port_forwarding)
floatingip = self._get_floatingip(self.fip['id'])
self.assertEqual(self.router['id'], floatingip['router_id'])
# delete pf_1, check the router_id of floatingip is not change.
self.pf_plugin.delete_floatingip_port_forwarding(
self.context, pf_1['id'], self.fip['id'])
exist_pfs = self.pf_plugin.get_floatingip_port_forwardings(
self.context, floatingip_id=self.fip['id'])
self.assertEqual(1, len(exist_pfs))
self.assertEqual(pf_2['id'], exist_pfs[0]['id'])
# delete pf_2, it's the last port forwarding of floatingip.
self.pf_plugin.delete_floatingip_port_forwarding(
self.context, pf_2['id'], self.fip['id'])
exist_pfs = self.pf_plugin.get_floatingip_port_forwardings(
self.context, floatingip_id=self.fip['id'])
self.assertEqual(0, len(exist_pfs))
floatingip = self._get_floatingip(self.fip['id'])
self.assertIsNone(floatingip['router_id'])
def test_negative_delete_floatingip_port_forwarding(self):
# prepare a good port forwarding
res = self.pf_plugin.create_floatingip_port_forwarding(
self.context, self.fip['id'], self.port_forwarding)
# pass non-existing port forwarding id
self.assertRaises(pf_exc.PortForwardingNotFound,
self.pf_plugin.delete_floatingip_port_forwarding,
self.context, uuidutils.generate_uuid(),
self.fip['id'])
# pass existing port forwarding but non-existing floatingip_id
self.assertRaises(pf_exc.PortForwardingNotFound,
self.pf_plugin.delete_floatingip_port_forwarding,
self.context, res['id'], uuidutils.generate_uuid())
def _simulate_concurrent_requests_process_and_raise(
self, funcs, args_list):
class SimpleThread(threading.Thread):
def __init__(self, q):
super(SimpleThread, self).__init__()
self.q = q
self.exception = None
def run(self):
try:
while not self.q.empty():
item = None
try:
item = self.q.get(False)
func, func_args = item[0], item[1]
func(*func_args)
except queue.Empty:
pass
finally:
if item:
self.q.task_done()
except Exception as e:
self.exception = e
def get_exception(self):
return self.exception
q = queue.Queue()
for func, func_args in zip(funcs, args_list):
q.put_nowait((func, func_args))
threads = []
for _ in range(len(funcs)):
t = SimpleThread(q)
threads.append(t)
t.start()
q.join()
for t in threads:
e = t.get_exception()
if e:
raise e
def test_concurrent_create_port_forwarding_delete_fip(self):
func1 = self.pf_plugin.create_floatingip_port_forwarding
func2 = self._delete_floatingip
funcs = [func1, func2]
args_list = [(self.context, self.fip['id'], self.port_forwarding),
(self.fip['id'],)]
self.assertRaises(c_exc.CallbackFailure,
self._simulate_concurrent_requests_process_and_raise,
funcs, args_list)
port_forwardings = self.pf_plugin.get_floatingip_port_forwardings(
self.context, floatingip_id=self.fip['id'], fields=['id'])
self.pf_plugin.delete_floatingip_port_forwarding(
self.context, port_forwardings[0][apidef.ID],
floatingip_id=self.fip['id'])
funcs.reverse()
args_list.reverse()
self.assertRaises(lib_l3_exc.FloatingIPNotFound,
self._simulate_concurrent_requests_process_and_raise,
funcs, args_list)
def test_concurrent_create_port_forwarding_update_fip(self):
newport = self._create_port(self.fmt, self.net['id']).json['port']
func1 = self.pf_plugin.create_floatingip_port_forwarding
func2 = self._update_floatingip
funcs = [func1, func2]
args_list = [(self.context, self.fip['id'], self.port_forwarding),
(self.fip['id'], {'port_id': newport['id']})]
self.assertRaises(c_exc.CallbackFailure,
self._simulate_concurrent_requests_process_and_raise,
funcs, args_list)
funcs.reverse()
args_list.reverse()
self.assertRaises(c_exc.CallbackFailure,
self._simulate_concurrent_requests_process_and_raise,
funcs, args_list)
def test_concurrent_create_port_forwarding_update_port(self):
new_ip = str(
netaddr.IPAddress(self.port['fixed_ips'][0]['ip_address']) + 2)
funcs = [self.pf_plugin.create_floatingip_port_forwarding,
self._update_port]
args_list = [(self.context, self.fip['id'], self.port_forwarding),
(self.port['id'], {
'fixed_ips': [{'subnet_id': self.subnet['id'],
'ip_address': new_ip}]})]
self._simulate_concurrent_requests_process_and_raise(funcs, args_list)
self.assertEqual([], self.pf_plugin.get_floatingip_port_forwardings(
self.context, floatingip_id=self.fip['id']))
def test_concurrent_create_port_forwarding_delete_port(self):
funcs = [self.pf_plugin.create_floatingip_port_forwarding,
self._delete_port]
args_list = [(self.context, self.fip['id'], self.port_forwarding),
(self.port['id'],)]
self._simulate_concurrent_requests_process_and_raise(funcs, args_list)
self.assertEqual([], self.pf_plugin.get_floatingip_port_forwardings(
self.context, floatingip_id=self.fip['id']))
|
|
"""
Each function tests specific Config class method.
"""
import sys
import pytest
sys.path.append('../..')
from batchflow import Config
def test_dict_init():
"""
Tests Config.__init__() using input of dictionary type.
For inner structure check Config.flatten() is used.
"""
#Slashed-structured dictionary initialization
init_dict = {'a' : 1, 'b/c' : 2, 'b/d' : 3}
exp_flat = {'a': 1, 'b/c': 2, 'b/d': 3}
config = Config(init_dict)
assert config.flatten() == exp_flat
#Nested-structured dictionary initialization
init_dict = {'a' : {}, 'b' : {'c' : 2, 'd' : 3}}
exp_flat = {'a': {}, 'b/c': 2, 'b/d': 3}
config = Config(init_dict)
assert config.flatten() == exp_flat
#Mixed-structured dictionary initialization
init_dict = {'a' : None, 'b/c' : 2, 'b' : {'d' : 3}}
exp_flat = {'a': None, 'b/c': 2, 'b/d': 3}
config = Config(init_dict)
assert config.flatten() == exp_flat
#Config-structured dictionary initialization
init_dict = {'a' : Config({'b' : 2})}
exp_flat = {'a/b': 2}
config = Config(init_dict)
assert config.flatten() == exp_flat
def test_dict_init_bad():
"""
Tests Config.__init__() using BAD input of dictionary type.
"""
#Int-keyed dictionary initialization
init_dict = {0 : 1}
with pytest.raises(TypeError):
Config(init_dict)
#Bool-keyed dictionary initialization
init_dict = {False : True}
with pytest.raises(TypeError):
Config(init_dict)
def test_list_init():
"""
Tests Config.__init__() using input of list type.
For inner structure check Config.flatten() is used.
"""
#Slashed-structured list initialization
init_list = [('a', 1), ('b/c', 2), ('b/d', 3)]
exp_flat = {'a': 1, 'b/c': 2, 'b/d': 3}
config = Config(init_list)
assert config.flatten() == exp_flat
#Nested-structured list initialization
init_list = [('a', {}), ('b', {'c' : 2, 'd' : 3})]
exp_flat = {'a': {}, 'b/c': 2, 'b/d': 3}
config = Config(init_list)
assert config.flatten() == exp_flat
#Mixed-structured list initialization
init_list = [('a', None), ('b/c', 2), ('b', {'d' : 3})]
exp_flat = {'a': None, 'b/c': 2, 'b/d': 3}
config = Config(init_list)
assert config.flatten() == exp_flat
#Config-structured list initialization
init_list = [('a', Config({'b' : 2}))]
exp_flat = {'a/b': 2}
config = Config(init_list)
assert config.flatten() == exp_flat
def test_list_init_bad():
"""
Tests Config.__init__() using BAD input of list type.
"""
#Int-keyed list initialization
init_list = [(0, 1)]
with pytest.raises(TypeError):
Config(init_list)
#Bool-keyed list initialization
init_list = [(False, True)]
with pytest.raises(TypeError):
Config(init_list)
#Bad-shaped list initialization
init_list = [('a', 0, 1)]
with pytest.raises(ValueError):
Config(init_list)
def test_config_init():
"""
Tests Config.__init__() using input of Config type.
For inner structure check Config.flatten() is used.
"""
#Basically, there nothing to test here,
#but since Config can be initialized with its own instance...
init_config = Config({'a': 0})
exp_flat = {'a' : 0}
config = Config(init_config)
assert config.flatten() == exp_flat
def test_pop():
"""
Tests Config.pop(), comparing the return value with expected one.
For inner structure check Config.flatten() is used.
"""
#Pop scalar value by slashed-structured key
config = Config({'a' : 1, 'b/c' : 2, 'b/d' : 3})
pop_key = 'b/c'
exp_ret = 2
exp_flat = {'a' : 1, 'b/d' : 3}
assert config.pop(pop_key) == exp_ret
assert config.flatten() == exp_flat
#Pop dict value by simple key
config = Config({'a' : 1, 'b/c' : 2, 'b/d' : 3})
pop_key = 'b'
exp_ret = {'c' : 2, 'd' : 3}
exp_flat = {'a' : 1}
assert config.pop(pop_key) == exp_ret
assert config.flatten() == exp_flat
def test_get():
"""
Tests Config.get(), comparing the return value with expected one.
For inner structure check Config.flatten() is used.
"""
#Get scalar value by slashed-structured key
config = Config({'a' : {'b' : 1}})
get_key = 'a/b'
exp_ret = 1
exp_flat = {'a/b' : 1}
assert config.get(get_key) == exp_ret
assert config.flatten() == exp_flat
#Get scalar value by slashed-structured key via dotted access
config = Config({'a' : {'b' : 1}})
get_key = 'a/b'
exp_ret = 1
exp_flat = {'a/b' : 1}
for simple_key in get_key.split('/'):
config = getattr(config, simple_key)
assert config == exp_ret
#Get dict value by simple key
config = Config({'a' : {'b' : 1}})
get_key = 'a'
exp_ret = {'b' : 1}
exp_flat = {'a/b' : 1}
assert config.get(get_key) == exp_ret
assert getattr(config, get_key).flatten() == exp_ret # check dotted access
assert config.flatten() == exp_flat
def test_put():
"""
Tests Config.put(), placing value by key in Config instance.
For inner structure check Config.flatten() is used.
"""
#Put scalar value by simple key
config = Config({'a' : 1})
put_key = 'b'
put_val = 2
exp_flat = {'a' : 1, 'b' : 2}
config.put(put_key, put_val)
assert config.flatten() == exp_flat
#Put scalar value by slashed-structured key
config = Config({'a/b' : 1})
put_key = 'a/c'
put_val = 2
exp_flat = {'a/b' : 1, 'a/c' : 2}
config.put(put_key, put_val)
assert config.flatten() == exp_flat
#Put dict value by simple key
config = Config({'a/b' : 1})
put_key = 'a'
put_val = {'c' : 2}
exp_flat = {'a/b' : 1, 'a/c' : 2}
config.put(put_key, put_val)
assert config.flatten() == exp_flat
def test_flatten():
"""
Tests Config.flatten()
"""
#Flatten none config
config = Config(None)
exp_flat = {}
assert config.flatten() == exp_flat
#Flatten empty config
config = Config({})
exp_flat = {}
assert config.flatten() == exp_flat
#Flatten simple config
config = Config({'a' : 1})
exp_flat = {'a' : 1}
assert config.flatten() == exp_flat
#Flatten nested config
config = Config({'a' : {'b' : {}, 'c' : {'d' : None}}})
exp_flat = {'a/b' : {}, 'a/c/d' : None}
assert config.flatten() == exp_flat
def test_add():
"""
Tests Config.add(), adding up two Config instances.
For result inner structure check Config.flatten() is used.
"""
#Simple summands with non-empty intersection
augend = Config({'a' : 1, 'b' : 2})
addend = Config({'b' : 3, 'c' : 4})
exp_flat = {'a' : 1, 'b' : 3, 'c' : 4}
result = augend + addend
assert result.flatten() == exp_flat
#Nested summands with non-empty intersection
augend = Config({'a/b' : 1, 'a/c' : 2})
addend = Config({'a/c/d' : 3, 'e/f' : 4})
exp_flat = {'a/b' : 1, 'a/c/d' : 3, 'e/f' : 4}
result = augend + addend
assert result.flatten() == exp_flat
#Nested summands with non-standard values such as None and empty dict
augend = Config({'a/b' : 1, 'b/d' : {}})
addend = Config({'a' : {}, 'b/d' : None})
exp_flat = {'a/b': 1, 'b/d': None}
result = augend + addend
assert result.flatten() == exp_flat
def test_iadd_items():
"""
Tests Config.config.__iadd__()
For inner structure check Config.flatten() is used.
"""
config_old = Config({'a/b': 1, 'a/c': 2})
config_old['a'] = 0
exp_flat = {'a': 0}
assert config_old.flatten() == exp_flat
config_old = Config({'a/b': 1, 'a/c': 2})
config_old['a'] = dict(b=0, d=3)
exp_flat = {'a/b': 0, 'a/d': 3}
assert config_old.flatten() == exp_flat
config_old = Config({'a/b': 1, 'a/c': 2})
config_old['a'] += dict(b=0, d=3)
exp_flat = {'a/b': 0, 'a/c': 2, 'a/d': 3}
assert config_old.flatten() == exp_flat
def test_items():
"""
Tests Config.items()
For dict_items conversion cast to list is used.
"""
#Simple
config = Config({'a' : 1})
exp_full = [('a', 1)]
exp_flat = [('a', 1)]
assert list(config.items(flatten=False)) == exp_full
assert list(config.items(flatten=True)) == exp_flat
#Nested
config = Config({'a' : {'b' : 1, 'c' : 2}})
exp_full = [('a', {'b' : 1, 'c' : 2})]
exp_flat = [('a/b', 1), ('a/c', 2)]
assert list(config.items(flatten=False)).sort() == exp_full.sort()
assert list(config.items(flatten=True)).sort() == exp_flat.sort()
#Deeply nested
config = Config({'a' : {'b' : 1, 'c' : {'d' : 2}}})
exp_full = [('a', {'b' : 1, 'c' : {'d' : 2}})]
exp_flat = [('a/b', 1), ('a/c/d', 2)]
assert list(config.items(flatten=False)).sort() == exp_full.sort()
assert list(config.items(flatten=True)).sort() == exp_flat.sort()
def test_keys():
"""
Tests Config.keys()
For dict_keys conversion cast to list is used.
"""
#Simple
config = Config({'a' : 1})
exp_full = ['a']
exp_flat = ['a']
assert list(config.keys(flatten=False)).sort() == exp_full.sort()
assert list(config.keys(flatten=True)).sort() == exp_flat.sort()
#Nested
config = Config({'a' : {'b' : 1, 'c' : 2}})
exp_full = ['a']
exp_flat = ['a/b', 'a/c']
assert list(config.keys(flatten=False)).sort() == exp_full.sort()
assert list(config.keys(flatten=True)).sort() == exp_flat.sort()
#Deeply nested
config = Config({'a' : {'b' : 1, 'c' : {'d' : 2}}})
exp_full = ['a']
exp_flat = ['a/b', 'a/c/d']
assert list(config.keys(flatten=False)).sort() == exp_full.sort()
assert list(config.keys(flatten=True)).sort() == exp_flat.sort()
def test_values():
"""
Tests Config.values()
For dict_values conversion cast to list is used.
"""
#Simple
config = Config({'a' : 1})
exp_full = [1]
exp_flat = [1]
assert list(config.values(flatten=False)).sort() == exp_full.sort()
assert list(config.values(flatten=True)).sort() == exp_flat.sort()
#Nested
config = Config({'a' : {'b' : 1, 'c' : 2}})
exp_full = [{'b' : 1, 'c' : 2}]
exp_flat = [1, 2]
assert list(config.values(flatten=False)).sort() == exp_full.sort()
assert list(config.values(flatten=True)).sort() == exp_flat.sort()
#Deeply nested
config = Config({'a' : {'b' : 1, 'c' : {'d' : 2}}})
exp_full = [{'b' : 1, 'c' : {'d' : 2}}]
exp_flat = [1, 2]
assert list(config.values(flatten=False)).sort() == exp_full.sort()
assert list(config.values(flatten=True)).sort() == exp_flat.sort()
def test_update():
"""
Tests Config.update()
For inner structure check Config.flatten() is used.
"""
#Value replacement by slashed-structured key
config_old = Config({'a/b' : 1, 'a/c' : 2})
config_new = Config({'a/c' : 3, 'a/d' : 4})
exp_flat = {'a/b' : 1, 'a/c' : 3, 'a/d' : 4}
config_old.update(config_new)
assert config_old.flatten() == exp_flat
#Value insertion by slashed-structured key
config_old = Config({'a/b' : 1})
config_new = Config({'a/c/d' : 2})
exp_flat = {'a/b' : 1, 'a/c/d' : 2}
config_old.update(config_new)
assert config_old.flatten() == exp_flat
#Update with Config instance including None and empty dict values
config_old = Config({'a' : {}, 'b' : None})
config_new = Config({'a' : None, 'b' : {}})
config_old.update(config_new)
exp_flat = {'a' : None, 'b' : {}}
assert config_old.flatten() == exp_flat
|
|
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Schema and tranform definition for the Movielens dataset."""
import hashlib
import numpy as np
import tensorflow as tf
import tensorflow_transform as tft
from tensorflow_transform.beam import impl
from tensorflow_transform.tf_metadata import dataset_schema
# Columns of the input file movies.csv.
MOVIE_COLUMNS = ['movie_id', 'title', 'genres']
# Columns for the input file ratings.csv."""
RATING_COLUMNS = ['user_id', 'movie_id', 'rating', 'timestamp']
# Names of feature columns associated with the `Query`. These are the features
# typically included in a recommendation request. In the case of movielens,
# query contains just data about the user. In other applications, there
# could be additional dimensions such as context (i.e. device, time of day, etc)
# The user id.
QUERY_USER_ID = 'query_user_id'
# The ids of movies rated by the user.
QUERY_RATED_MOVIE_IDS = 'query_rated_movie_ids'
# The scores on the rated movies given by the user.
QUERY_RATED_MOVIE_SCORES = 'query_rated_movie_scores'
# The set of genres of the rated movies.
QUERY_RATED_GENRE_IDS = 'query_rated_genre_ids'
# The number of times the user rated each genre.
QUERY_RATED_GENRE_FREQS = 'query_rated_genre_freqs'
# The average rating on each genre.
QUERY_RATED_GENRE_AVG_SCORES = 'query_rated_genre_avg_scores'
# Names of feature columns associated with the `Candidate`. These features
# are used to match a candidate against the query.
# The id of the candidate movie.
CANDIDATE_MOVIE_ID = 'cand_movie_id'
# The set of genres of the candidate movie.
CANDIDATE_GENRE_IDS = 'cand_genre_ids'
# Movie ids used to rank against the target movie. These ranking candidate movie
# ids are used in evaluation only.
RANKING_CANDIDATE_MOVIE_IDS = 'ranking_candidate_movie_ids'
# Names of feature columns defining the label(s), which indicates how well
# a candidate matches a query. There could be multiple labels in each instance.
# Eg. We could have one label for the rating score and another label for the
# number of times a user has watched the movie.
LABEL_RATING_SCORE = 'label_rating_score'
# Each training example consists of a query and a candidate with their
# respective features, as well as one or more labels.
EXAMPLE_COLUMNS = [
QUERY_USER_ID,
QUERY_RATED_MOVIE_IDS,
QUERY_RATED_MOVIE_SCORES,
QUERY_RATED_GENRE_IDS,
QUERY_RATED_GENRE_FREQS,
QUERY_RATED_GENRE_AVG_SCORES,
CANDIDATE_MOVIE_ID,
CANDIDATE_GENRE_IDS,
RANKING_CANDIDATE_MOVIE_IDS,
LABEL_RATING_SCORE,
]
def _hash_fingerprint(user_id, partition_random_seed):
"""Convert user_id to an MD5 hashed integer.
The hashed value is based on the input of user_id + partition_random_seed so
that the output is deterministic for a fixed partition_random_seed and people
still have the option to partition in a different way by using a different
seed.
Args:
user_id: an integer user id.
partition_random_seed: partitioning seed so we can preserve consistent
partitions across runs.
Returns:
An MD5 hashed value encoded as integer.
"""
m = hashlib.md5(str(user_id + partition_random_seed))
return int(m.hexdigest(), 16)
def partition_fn(user_id, partition_random_seed, percent_eval):
"""Partition data to train and eval set.
To generate an unskewed partition that is deterministic, we use
hash_fingerprint(user_id, partition_random_seed) % 100.
Args:
user_id: an integer user id.
partition_random_seed: partitioning seed so we can preserve consistent
partitions across runs.
percent_eval: percentage of the data to use as the eval set.
Returns:
Either 0 or 1.
"""
hash_value = _hash_fingerprint(user_id, partition_random_seed) % 100
return 0 if hash_value >= percent_eval else 1
def create_random_movie_samples(all_movies, movies_to_exclude,
num_movies_to_sample, random_seed):
"""Create random samples of movies excluding "movies_to_exclude" list.
Args:
all_movies: a list of integer movie ids.
movies_to_exclude: a list of integer movie ids to exclude.
num_movies_to_sample: number of movie ids to sample.
random_seed: random seed for numpy random number generator.
Returns:
A list of integer movie ids.
"""
candidate_movie_ids = set(all_movies).difference(movies_to_exclude)
# Sort the set candidate_movie_ids first to make results reproducible for
# expectation test.
return np.random.RandomState(random_seed).choice(
sorted(candidate_movie_ids), num_movies_to_sample,
replace=False).tolist()
def _make_schema(columns, types, default_values):
"""Input schema definition.
Args:
columns: column names for fields appearing in input.
types: column types for fields appearing in input.
default_values: default values for fields appearing in input.
Returns:
feature_set dictionary of string to *Feature.
"""
result = {}
assert len(columns) == len(types)
assert len(columns) == len(default_values)
for c, t, v in zip(columns, types, default_values):
if isinstance(t, list):
result[c] = tf.VarLenFeature(dtype=t[0])
else:
result[c] = tf.FixedLenFeature(shape=[], dtype=t, default_value=v)
return dataset_schema.from_feature_spec(result)
def make_ratings_schema():
return _make_schema(RATING_COLUMNS,
[tf.int64, tf.string, tf.float32, tf.int64],
[-1, '', 0.0, -1])
def make_movies_schema():
return _make_schema(MOVIE_COLUMNS,
[tf.string, tf.string, [tf.string]],
['', '', None])
def make_examples_schema():
return _make_schema(EXAMPLE_COLUMNS, [
tf.int64, [tf.string], [tf.float32], [tf.string], [tf.float32],
[tf.float32], [tf.string], [tf.string], [tf.string], tf.float32
], [-1, None, None, None, None, None, -1, None, None, 0.0])
def make_prediction_schema():
prediction_columns = [column for column in EXAMPLE_COLUMNS
if column != LABEL_RATING_SCORE]
return _make_schema(prediction_columns, [
tf.int64, [tf.string], [tf.float32], [tf.string], [tf.float32],
[tf.float32], [tf.string], [tf.string], [tf.string]
], [-1, None, None, None, None, None, -1, None, None])
def make_preprocessing_fn():
"""Creates a preprocessing function for movielens.
Returns:
A preprocessing function.
"""
def preprocessing_fn(inputs):
"""User defined preprocessing function for movielens columns.
Args:
inputs: a `dict` that maps EXAMPLE_COLUMNS to the corresponding
Tensor/SparseTensor.
Returns:
A `dict` that maps EXAMPLE_COLUMNS to the transformed Tensor/SparseTensor.
"""
result = {column_name: inputs[column_name]
for column_name in EXAMPLE_COLUMNS}
rating_max = tft.max(inputs[QUERY_RATED_MOVIE_SCORES].values)
rating_min = tft.min(inputs[QUERY_RATED_MOVIE_SCORES].values)
def scale_sparse_values(x, min_value, max_value):
"""0-1 normalization of the values of a SparseTensor.
Args:
x: a input sparse tensor.
min_value: minimum value for x.values.
max_value: maximum value for x.values.
Returns:
A sparse tensor y such as that y.values is the result of
0-1 normalization of x.values.
"""
scaled_values = (x.values - min_value) / (max_value - min_value)
return tf.SparseTensor(indices=x.indices, values=scaled_values,
dense_shape=x.dense_shape)
result[QUERY_RATED_MOVIE_SCORES] = scale_sparse_values(
inputs[QUERY_RATED_MOVIE_SCORES],
rating_min, rating_max)
genre_vocab = tft.uniques(tf.concat(
[inputs[QUERY_RATED_GENRE_IDS].values,
inputs[CANDIDATE_GENRE_IDS].values], 0))
movie_vocab = tft.uniques(tf.concat(
[inputs[QUERY_RATED_MOVIE_IDS].values,
inputs[CANDIDATE_MOVIE_ID].values,
inputs[RANKING_CANDIDATE_MOVIE_IDS].values], 0))
def map_to_int(x, vocabulary_or_file):
"""Maps string tensor into indexes using vocab.
Args:
x : a Tensor/SparseTensor of string.
vocabulary_or_file: a Tensor/SparseTensor containing unique string
values within x or a single value for the file where the vocabulary
is stored.
Returns:
A Tensor/SparseTensor of indexes (int) of the same shape as x.
"""
# TODO(b/62489180): Remove this workaround once TFT 0.2.0 is released.
if hasattr(impl,
'_asset_files_supported') and impl._asset_files_supported(): # pylint: disable=protected-access
table = tf.contrib.lookup.string_to_index_table_from_file(
vocabulary_file=vocabulary_or_file, num_oov_buckets=1)
else:
table = tf.contrib.lookup.string_to_index_table_from_tensor(
mapping=vocabulary_or_file, num_oov_buckets=1)
return table.lookup(x)
result[QUERY_RATED_GENRE_IDS] = tft.apply_function(
map_to_int, inputs[QUERY_RATED_GENRE_IDS], genre_vocab)
result[CANDIDATE_GENRE_IDS] = tft.apply_function(
map_to_int, inputs[CANDIDATE_GENRE_IDS], genre_vocab)
result[QUERY_RATED_MOVIE_IDS] = tft.apply_function(
map_to_int, inputs[QUERY_RATED_MOVIE_IDS], movie_vocab)
result[CANDIDATE_MOVIE_ID] = tft.apply_function(
map_to_int, inputs[CANDIDATE_MOVIE_ID], movie_vocab)
result[RANKING_CANDIDATE_MOVIE_IDS] = tft.apply_function(
map_to_int, inputs[RANKING_CANDIDATE_MOVIE_IDS], movie_vocab)
return result
return preprocessing_fn
|
|
"""
Ax_Metrics - Logic for generating TimeFrame steps for a query.
------------------------------------------------------------------------------
Author: Dan Kamins <dos at axonchisel dot net>
Copyright (c) 2014 Dan Kamins, AxonChisel.net
"""
# ----------------------------------------------------------------------------
from datetime import *
from axonchisel.metrics.foundation.ax.obj import AxObj
from . import dtmath
from .timerange import TimeRange
from .framespec import FrameSpec
from .ghost import Ghost
# ----------------------------------------------------------------------------
# What we consider the first day of the week, as offset from Sunday
WEEK_FIRST_DAY = 0
# Internal: map from FrameSpec UNIT to dtmath begin_* function
_ROUND_FUNC_BY_UNIT = {
'SECOND': dtmath.begin_second,
'MINUTE': dtmath.begin_minute,
'MINUTE5': dtmath.begin_minute5,
'MINUTE10': dtmath.begin_minute10,
'MINUTE15': dtmath.begin_minute15,
'MINUTE30': dtmath.begin_minute30,
'HOUR': dtmath.begin_hour,
'DAY': dtmath.begin_day,
'WEEK': dtmath.begin_week,
'MONTH': dtmath.begin_month,
'QUARTER': dtmath.begin_quarter,
'YEAR': dtmath.begin_year,
}
# Internal: map from FrameSpec UNIT to dtmath add_* function
_ADD_FUNC_BY_UNIT = {
'SECOND': dtmath.add_seconds,
'MINUTE': dtmath.add_minutes,
'MINUTE5': dtmath.add_minute5s,
'MINUTE10': dtmath.add_minute10s,
'MINUTE15': dtmath.add_minute15s,
'MINUTE30': dtmath.add_minute30s,
'HOUR': dtmath.add_hours,
'DAY': dtmath.add_days,
'WEEK': dtmath.add_weeks,
'MONTH': dtmath.add_months,
'QUARTER': dtmath.add_quarters,
'YEAR': dtmath.add_years,
}
# ----------------------------------------------------------------------------
class Stepper(AxObj):
"""
Given a FrameSpec, provides TimeRange measurement steps indicated.
Each TimeRange step specifies the time parameters for a data point
in a report.
"""
def __init__(self, tmfrspec, ghost=None):
"""
Initialize around FrameSpec and optional Ghost.
"""
# Set valid default state:
self._tmfrspec = None
self._ghost = None
# Apply initial values from args:
self.tmfrspec = tmfrspec
self.ghost = ghost
#
# Public Methods
#
def analyze(self):
"""
Analyze parameters, bind data to self, and return dict.
Invoked by steps(), but can be called manually too.
Returned dict will have keys:
- tmrange : TimeRange with begin/end time and anchor
"""
self._bind_dt_funcs()
self._bind_dtinc_start()
self._bind_dtexc_end()
tmrange = TimeRange(
inc_begin=self._dtinc_start,
exc_end=self._dtexc_end,
anchor=self._dtwithin,
)
return {
'tmrange': tmrange,
}
def steps(self):
"""
Yield a series of TimeRange objects representing FrameSpec steps.
Each TimeRange is the period over which the measurement point
should be queried, with its anchor representing the time label.
"""
tmfrspec = self.tmfrspec
# Analyze parameters and bind data to self:
self.analyze()
# Iterate to build and yield each step:
dtidx = self._dtinc_start
while dtidx < self._dtexc_end:
# Calc anchor:
dtidx_anchor = dtidx
# Calc step end:
dtidx_exc_end = self._fn_addgran(dtidx, 1)
if not tmfrspec.allow_overflow_end:
if dtidx_exc_end > self._dtexc_end:
dtidx_exc_end = self._dtexc_end
# Calc step begin, applying optional smoothing, accumulation:
dtidx_inc_begin = dtidx
if tmfrspec.accumulate:
dtidx_inc_begin = self._dtinc_start
else:
if tmfrspec.is_smoothed():
dtidx_inc_begin = self._fn_addsmooth(
dtidx_exc_end, -tmfrspec.smooth_val)
if not tmfrspec.allow_overflow_begin:
if dtidx_inc_begin < self._dtinc_start:
dtidx_inc_begin = self._dtinc_start
# Construct and yield TimeRange:
tmrange = TimeRange(
anchor = dtidx_anchor,
inc_begin = dtidx_inc_begin,
exc_end = dtidx_exc_end)
yield tmrange
# Advance to next step:
dtidx = self._fn_addgran(dtidx, 1)
#
# Public Properties
#
@property
def tmfrspec(self):
"""FrameSpec defining frame to step through."""
return self._tmfrspec
@tmfrspec.setter
def tmfrspec(self, val):
self._assert_type("tmfrspec", val, FrameSpec)
self._tmfrspec = val
@property
def ghost(self):
"""Optional Ghost relative alias."""
return self._ghost
@ghost.setter
def ghost(self, val):
if val is not None:
self._assert_type("ghost", val, Ghost)
self._ghost = val
def is_ghost(self, gtype):
"""Return True/False indicating if has Ghost of specific type."""
return self.ghost and (self.ghost.gtype == gtype)
#
# Internal Methods
#
def _bind_dt_funcs(self):
"""Bind to self: dt funcs based on FrameSpec units"""
tmfrspec = self.tmfrspec
# Prep, inspecting FrameSpec and
# binding math functions based on specified units:
fn_round = _ROUND_FUNC_BY_UNIT.get(tmfrspec.range_unit)
fn_add = _ADD_FUNC_BY_UNIT.get(tmfrspec.range_unit)
fn_addgran = _ADD_FUNC_BY_UNIT.get(tmfrspec.gran_unit)
fn_addsmooth = _ADD_FUNC_BY_UNIT.get(tmfrspec.smooth_unit)
# Any of these exceptions are implementation errors in this library,
# as the FrameSpec should not have allowed unsupported data in
# the first place:
def _impl_error(msg):
return ValueError("Implementation Error! "+msg)
if not (fn_round and fn_add):
raise _impl_error("FrameSpec range unit '{0}' missing add/round fn"
.format(tmfrspec.range_unit))
if not (fn_addgran):
raise _impl_error("FrameSpec gran unit '{0}' missing add/round fn"
.format(tmfrspec.gran_unit))
if tmfrspec.is_smoothed() and not (fn_addsmooth):
raise _impl_error("FrameSpec smooth unit '{0}' missing smooth fn"
.format(tmfrspec.smooth_unit))
# Bind:
self._fn_round = fn_round
self._fn_add = fn_add
self._fn_addgran = fn_addgran
self._fn_addsmooth = fn_addsmooth
def _bind_dtinc_start(self):
"""Bind to self: dtinc_start inclusive begin time and dtwithin."""
tmfrspec = self.tmfrspec
# Choose raw unrounded context time, either now or explicit reframed:
dtwithin = tmfrspec.reframe_dt
if dtwithin is None:
dtwithin = datetime.now()
# Round down dtwithin to range_unit:
if tmfrspec.range_unit == 'WEEK':
dtwithin = self._fn_round(dtwithin, day0_sunday_ofs=WEEK_FIRST_DAY)
else:
dtwithin = self._fn_round(dtwithin)
# Rewind range_val range_units to find beginning of range we're in:
dtinc_start = dtwithin
if tmfrspec.range_val > 1:
dtinc_start = self._fn_add(dtinc_start, -(tmfrspec.range_val - 1))
# Handle LASTWHOLE mode to rewind to previous completed range:
if tmfrspec.mode == 'LASTWHOLE':
dtinc_start = self._fn_add(dtinc_start, -1)
# Handle Ghost:
if self.is_ghost('PREV_PERIOD1'):
dtinc_start = self._fn_add(dtinc_start, -1 * tmfrspec.range_val)
elif self.is_ghost('PREV_PERIOD2'):
dtinc_start = self._fn_add(dtinc_start, -2 * tmfrspec.range_val)
elif self.is_ghost('PREV_YEAR1'):
dtinc_start = dtmath.add_years(dtinc_start, -1)
elif self.is_ghost('PREV_YEAR2'):
dtinc_start = dtmath.add_years(dtinc_start, -2)
# Bind:
self._dtinc_start = dtinc_start
self._dtwithin = dtwithin
def _bind_dtexc_end(self):
"""Bind to self: dtexc_end exclusive end time."""
tmfrspec = self.tmfrspec
# Calc dtend (exclusive) based on dtinc_start:
dtexc_end = self._fn_add(self._dtinc_start, tmfrspec.range_val)
# Bind:
self._dtexc_end = dtexc_end
def __unicode__(self):
return (u"Stepper({self.tmfrspec} ghost {self.ghost}"
).format(self=self)
|
|
"""Device that implements a ball save."""
from typing import Optional
from mpf.core.delays import DelayManager
from mpf.core.device_monitor import DeviceMonitor
from mpf.core.events import event_handler
from mpf.core.mode import Mode
from mpf.core.mode_device import ModeDevice
from mpf.core.system_wide_device import SystemWideDevice
MYPY = False
if MYPY: # pragma: no cover
from mpf.core.machine import MachineController # pylint: disable-msg=cyclic-import,unused-import
from mpf.devices.playfield import Playfield # pylint: disable-msg=cyclic-import,unused-import
@DeviceMonitor("saves_remaining", "enabled", "timer_started", "state")
class BallSave(SystemWideDevice, ModeDevice):
"""Ball save device which will give back the ball within a certain time."""
config_section = 'ball_saves'
collection = 'ball_saves'
class_label = 'ball_save'
__slots__ = ["ball_locks", "active_time", "unlimited_saves", "source_playfield", "delay", "enabled",
"timer_started", "saves_remaining", "early_saved", "state", "_scheduled_balls"]
def __init__(self, machine: "MachineController", name: str) -> None:
"""Initialise ball save."""
self.ball_locks = None
self.unlimited_saves = None # type: Optional[bool]
self.source_playfield = None # type: Optional[Playfield]
super().__init__(machine, name)
self.delay = DelayManager(machine)
self.enabled = False
self.timer_started = False
self.saves_remaining = 0
self.early_saved = 0
self.state = 'disabled'
self._scheduled_balls = 0
self.active_time = 0
async def _initialize(self) -> None:
await super()._initialize()
self.ball_locks = self.config['ball_locks']
self.unlimited_saves = self.config['balls_to_save'] == -1
self.source_playfield = self.config['source_playfield']
@property
def can_exist_outside_of_game(self) -> bool:
"""Return true if this device can exist outside of a game."""
return True
def validate_and_parse_config(self, config: dict, is_mode_config: bool, debug_prefix: str = None) -> dict:
"""Make sure timer_start_events are not in enable_events."""
config = super().validate_and_parse_config(config, is_mode_config, debug_prefix)
for event in config['timer_start_events']:
if event in config['enable_events']:
raise AssertionError("{}: event {} in timer_start_events will not work because it is also in "
"enable_events. Omit it!".format(event, str(self)))
if config['delayed_eject_events'] and config['eject_delay']:
raise AssertionError("cannot use delayed_eject_events and eject_delay at the same time.")
return config
def enable(self) -> None:
"""Enable ball save."""
super().enable()
if self.enabled:
return
self.saves_remaining = self.config['balls_to_save']
self.early_saved = 0
self.enabled = True
self.state = 'enabled'
self.active_time = self.config['active_time'].evaluate([])
self.debug_log("Enabling. Auto launch: {}, Balls to save: {}, Active time: {}s".format(
self.config['auto_launch'],
self.config['balls_to_save'],
self.active_time))
# Enable shoot again
self.machine.events.add_handler('ball_drain',
self._ball_drain_while_active,
priority=1000)
if (self.active_time > 0 and
not self.config['timer_start_events']):
self.timer_start()
self.machine.events.post('ball_save_{}_enabled'.format(self.name))
'''event: ball_save_(name)_enabled
desc: The ball save called (name) has just been enabled.
'''
@event_handler(1)
def event_disable(self, **kwargs):
"""Event handler for disable event."""
del kwargs
self.disable()
def disable(self) -> None:
"""Disable ball save."""
if not self.enabled:
return
self.enabled = False
self.state = 'disabled'
self.timer_started = False
self.debug_log("Disabling...")
self.machine.events.remove_handler(self._ball_drain_while_active)
self.delay.remove('disable')
self.delay.remove('hurry_up')
self.delay.remove('grace_period')
self.machine.events.post('ball_save_{}_disabled'.format(self.name))
'''event: ball_save_(name)_disabled
desc: The ball save called (name) has just been disabled.
'''
@event_handler(9)
def event_timer_start(self, **kwargs):
"""Event handler for timer start event."""
del kwargs
self.timer_start()
def timer_start(self) -> None:
"""Start the timer.
This is usually called after the ball was ejected while the ball save may have been enabled earlier.
"""
if self.timer_started or not self.enabled:
return
self.timer_started = True
self.machine.events.post('ball_save_{}_timer_start'.format(self.name))
'''event: ball_save_(name)_timer_start
desc: The ball save called (name) has just start its countdown timer.
'''
if self.active_time > 0:
self.debug_log('Starting ball save timer: %ss',
self.active_time)
active_time_ms = self.active_time * 1000
self.delay.add(name='disable',
ms=(active_time_ms +
self.config['grace_period']),
callback=self.disable)
self.delay.add(name='grace_period',
ms=active_time_ms,
callback=self._grace_period)
self.delay.add(name='hurry_up',
ms=(active_time_ms -
self.config['hurry_up_time']),
callback=self._hurry_up)
def _hurry_up(self) -> None:
self.debug_log("Starting Hurry Up")
self.state = 'hurry_up'
self.machine.events.post('ball_save_{}_hurry_up'.format(self.name))
'''event: ball_save_(name)_hurry_up
desc: The ball save called (name) has just entered its hurry up mode.
'''
def _grace_period(self) -> None:
self.debug_log("Starting Grace Period")
self.state = 'grace_period'
self.machine.events.post('ball_save_{}_grace_period'.format(self.name))
'''event: ball_save_(name)_grace_period
desc: The ball save called (name) has just entered its grace period
time.
'''
def _get_number_of_balls_to_save(self, available_balls: int) -> int:
if self.machine.game and self.machine.game.balls_in_play > 0:
if self.config['only_last_ball'] and self.machine.game.balls_in_play > 1:
self.debug_log("Will only save last ball but %s are in play.", self.machine.game.balls_in_play)
return 0
else:
self.debug_log("Received request to save ball, but no balls are in"
" play. Discarding request.")
return 0
balls_to_save = available_balls
if self.config['only_last_ball'] and balls_to_save > 1:
balls_to_save = 1
if balls_to_save > self.machine.game.balls_in_play:
balls_to_save = self.machine.game.balls_in_play
if balls_to_save > self.saves_remaining and not self.unlimited_saves:
balls_to_save = self.saves_remaining
return balls_to_save
def _reduce_remaining_saves_and_disable_if_zero(self, balls_to_save: int) -> None:
if not self.unlimited_saves:
self.saves_remaining -= balls_to_save
self.debug_log("Saves remaining: %s", self.saves_remaining)
else:
self.debug_log("Unlimited saves remaining")
if self.saves_remaining <= 0 and not self.unlimited_saves:
self.debug_log("Disabling since there are no saves remaining")
self.disable()
def _ball_drain_while_active(self, balls: int, **kwargs) -> Optional[dict]:
del kwargs
if balls <= 0:
return {}
balls_to_save = self._get_number_of_balls_to_save(balls)
self.debug_log("Ball(s) drained while active. Requesting new one(s). "
"Auto launch: %s", self.config['auto_launch'])
self.machine.events.post('ball_save_{}_saving_ball'.format(self.name),
balls=balls_to_save, early_save=False)
'''event: ball_save_(name)_saving_ball
desc: The ball save called (name) has just saved one (or more) balls.
args:
balls: The number of balls this ball saver is saving.
early_save: True if this is an early ball save.
'''
self._schedule_balls(balls_to_save)
self._reduce_remaining_saves_and_disable_if_zero(balls_to_save)
return {'balls': balls - balls_to_save}
@event_handler(8)
def event_early_ball_save(self, **kwargs):
"""Event handler for early_ball_save event."""
del kwargs
self.early_ball_save()
def early_ball_save(self) -> None:
"""Perform early ball save if enabled."""
if not self.enabled:
return
if not self._get_number_of_balls_to_save(1):
return
if self.early_saved > 0:
self.debug_log("Already performed an early ball save. Ball needs to drain first.")
return
self.machine.events.post('ball_save_{}_saving_ball'.format(self.name),
balls=1, early_save=True)
# doc block above
self.debug_log("Performing early ball save.")
self.early_saved += 1
self._schedule_balls(1)
self.machine.events.add_handler('ball_drain',
self._early_ball_save_drain_handler,
priority=1001)
self._reduce_remaining_saves_and_disable_if_zero(1)
def _early_ball_save_drain_handler(self, balls: int, **kwargs) -> dict:
del kwargs
if self.early_saved and balls > 0:
balls -= 1
self.early_saved -= 1
self.debug_log("Early saved ball drained.")
self.machine.events.remove_handler(self._early_ball_save_drain_handler)
return {'balls': balls}
return {}
def _schedule_balls(self, balls_to_save: int) -> None:
if self.config['eject_delay']:
# schedule after delay. to add some drama
self.delay.add(self.config['eject_delay'], self._add_balls, balls_to_save=balls_to_save)
elif self.config['delayed_eject_events']:
# unlimited delay. wait for event
self._scheduled_balls += balls_to_save
else:
# default: no delay. just eject balls right now
self._add_balls(balls_to_save)
@event_handler(4)
def event_delayed_eject(self, **kwargs):
"""Event handler for delayed_eject event."""
del kwargs
self.delayed_eject()
def delayed_eject(self):
"""Trigger eject of all scheduled balls."""
self._add_balls(self._scheduled_balls)
self._scheduled_balls = 0
def _add_balls(self, balls_to_save, **kwargs):
del kwargs
balls_added = 0
# eject balls from locks
for device in self.ball_locks:
balls_to_release = max(min(device.available_balls, balls_to_save - balls_added), 0)
self.source_playfield.add_ball(balls=balls_to_release, source_device=device,
player_controlled=self.config['auto_launch'] ^ 1)
balls_added += balls_to_release
# request remaining balls
if balls_to_save - balls_added > 0:
self.source_playfield.add_ball(balls=balls_to_save - balls_added,
player_controlled=self.config['auto_launch'] ^ 1)
def device_removed_from_mode(self, mode: Mode) -> None:
"""Disable ball save when mode ends."""
super().device_removed_from_mode(mode)
self.debug_log("Removing...")
self.disable()
if self.config['delayed_eject_events']:
self.debug_log("Triggering delayed eject because mode ended.")
self.delayed_eject()
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Este script se asegura de monitorizar la web para asegurarse de que si algo
# se rompe vuelva a estar en estado operativo.
import os
import glob
import re
import socket
import sys
import time
import urllib2
# START config
webapp = 'gamersmafia'
homeurl = 'gamersmafia.com'
max_mem = 250000 # size in KiB (1024)
base_dir = "/home/httpd/websites/%s/current" % webapp
pids_dir = "%s/tmp/pids/" % base_dir
DEBUG = False
# END config
class NoSaneMongrelFound(Exception):
pass
def get_proc_out():
return os.popen('ps aux | grep mongrel_rails').read()
def clear_invalid_pids():
# lee archivos de pids y elimina los que hagan referencia a procesos que no se estan ejecutando
global pids_dir
pids_list = glob.glob("%sdispatch.*.pid" % pids_dir)
proc_out = get_proc_out()
for pid_file in pids_list:
#print pid_file
if os.path.exists("%s" % pid_file):
if DEBUG:
print "cleaning pid file"
pid = open("%s" % pid_file).read()
m = re.search('httpd[\W]+%d[\W]+[0-9.]+[\W]+[0-9.]+[\W]+([0-9]+)' % int(pid), proc_out)
if m == None:
os.unlink("%s" % pid_file)
def spin():
clear_invalid_pids()
FNULL = open('/dev/null', 'w')
os.popen('cd /home/httpd/websites/%s/current && ./script/spin' % webapp)
time.sleep(7)
def get_lucky():
# busca el primer mongrel que responda a requests
global num_processes
for i in range(num_processes):
if mongrel_is_alive(8000+i):
# ya tenemos al lucky one, buscamos su pid
m = re.search('httpd[\W]+([0-9]+)[\W]+[0-9.]+[\W]+[0-9.]+[\W]+([0-9]+).*dispatch.%s.pid' % (8000+i), get_proc_out())
if m == None:
raise NoSaneMongrelFound('Can\'t find lucky one in proc table')
else:
return m.group(1) # TODO to test this
raise NoSaneMongrelFound('Can\'t find lucky one in proc table')
def get_running_mongrels():
# devuelve lista de tuplas (pid, port) de los mongrels en ejecucion
d = os.popen('ps aux | grep mongrel_rails').read()
mongrels = []
for l in d.split('\n'):
if l.find('grep') != -1:
continue
m = re.search('httpd[\W]+([0-9]+)[\W]+[0-9.]+[\W]+[0-9.]+[\W]+[0-9]+[\W]+([0-9]+)', l)
if m:
mport = re.search('dispatch.([0-9]+).pid', l)
if mport == None:
if DEBUG:
print "l NO contiene dispatch.pid!"
print l
else:
mongrels.append((m.group(1), mport.group(1), m.group(2)))
#print "mongrel info %s %s %s" % (m.group(1), mport.group(1), m.group(2))
return mongrels
def mongrel_is_alive(port):
req = urllib2.Request('http://127.0.0.1:%i/' % port)
req.add_header('User-Agent', '%s Maintenance Script' % homeurl)
req.add_header('Host', homeurl)
try:
r = urllib2.urlopen(req)
except urllib2.URLError, e:
return False
else:
return True
def check_mongrel(pid, port, mem):
# mata el pid si no cumple las siguientes condiciones:
# - que responda
# - que tenga pid
# - que no sobrepase el limite de memoria
global pids_dir, max_mem
if not mongrel_is_alive(int(port)):
if DEBUG:
print "mongrel is dead"
kill_mongrel(pid)
elif not os.path.exists("%s/dispatch.%s.pid" % (pids_dir, port)):
if DEBUG:
print "no pid file"
kill_mongrel(pid)
elif file("%s/dispatch.%s.pid" % (pids_dir, port)).read() != pid:
if DEBUG:
print "pid file has different pid"
kill_mongrel(pid)
os.unlink("%s/dispatch.%s.pid" % (pids_dir, port))
elif int(mem) > max_mem:
if DEBUG:
print "max mem"
kill_mongrel(pid)
os.unlink("%s/dispatch.%s.pid" % (pids_dir, port))
def kill_mongrel(pid):
global dirty
dirty = True
if DEBUG:
print "killing mongrel %s" % pid
os.popen('kill -9 %s' % pid)
def get_num_processes():
global base_dir
spin_body = open('%s/script/spin' % base_dir).read()
m = re.search('-i ([0-9]+)$', spin_body)
if m == None:
raise Exception('InvalidSpinFile')
return int(m.group(1))
def check_full_stack():
try:
req = urllib2.Request('http://%s/' % homeurl)
r = urllib2.urlopen(req)
body = r.read()
if not re.search('google-analytics.com', body): # si no hay error pero no sale urchinTracker es que tb ha habido error
print "No urchinTracker found, raising URLError"
raise urllib2.URLError
except urllib2.URLError, e:
if DEBUG:
print "Error al comprobar el stack completo Apache + Mongrel: %s" % e
mainloop()
def mainloop():
global num_processes, dirty
dirty = False
socket.setdefaulttimeout(10)
num_processes = get_num_processes()
pid_lucky = None
while pid_lucky == None:
try:
pid_lucky = get_lucky()
except NoSaneMongrelFound:
if DEBUG:
print "No sane mongrel found, retrying.."
spin()
time.sleep(5)
pid_lucky = get_lucky()
# print "lucky pid: %s" % pid_lucky
for pid, port, mem in get_running_mongrels():
if int(pid) == int(pid_lucky):
port_lucky = port
mem_lucky = mem
else:
check_mongrel(pid, port, mem)
if len(get_running_mongrels()) < num_processes:
if DEBUG:
print "%d mongrels are missing in action" % (num_processes - len(get_running_mongrels()))
dirty = True
if dirty == True: # seguro que algo anda mal
spin()
dirty = False
check_mongrel(pid_lucky, port_lucky, mem_lucky)
if dirty == True: # el lucky era un ilegal
spin()
check_full_stack()
if __name__ == '__main__':
mainloop()
|
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The standard DQN replay memory.
This implementation is an out-of-graph replay memory + in-graph wrapper. It
supports vanilla n-step updates of the form typically found in the literature,
i.e. where rewards are accumulated for n steps and the intermediate trajectory
is not exposed to the agent. This does not allow, for example, performing
off-policy corrections.
"""
import collections
import gzip
import math
import os
import pickle
import gin
import numpy as np
import tensorflow.compat.v1 as tf
from tensorflow.contrib import staging as contrib_staging
# Defines a type describing part of the tuple returned by the replay
# memory. Each element of the tuple is a tensor of shape [batch, ...] where
# ... is defined the 'shape' field of ReplayElement. The tensor type is
# given by the 'type' field. The 'name' field is for convenience and ease of
# debugging.
ReplayElement = (
collections.namedtuple('shape_type', ['name', 'shape', 'type']))
# A prefix that can not collide with variable names for checkpoint files.
STORE_FILENAME_PREFIX = '$store$_'
# This constant determines how many iterations a checkpoint is kept for.
CHECKPOINT_DURATION = 4
def invalid_range(cursor, replay_capacity, stack_size, update_horizon):
"""Returns a array with the indices of cursor-related invalid transitions.
There are update_horizon + stack_size invalid indices:
- The update_horizon indices before the cursor, because we do not have a
valid N-step transition (including the next state).
- The stack_size indices on or immediately after the cursor.
If N = update_horizon, K = stack_size, and the cursor is at c, invalid
indices are:
c - N, c - N + 1, ..., c, c + 1, ..., c + K - 1.
It handles special cases in a circular buffer in the beginning and the end.
Args:
cursor: int, the position of the cursor.
replay_capacity: int, the size of the replay memory.
stack_size: int, the size of the stacks returned by the replay memory.
update_horizon: int, the agent's update horizon.
Returns:
np.array of size stack_size with the invalid indices.
"""
assert cursor < replay_capacity, 'cursor: %d' % cursor
return np.array([(cursor - update_horizon + i) % replay_capacity
for i in range(stack_size + update_horizon)])
class OutOfGraphReplayBuffer(object):
"""A simple out-of-graph Replay Buffer.
Stores transitions, state, action, reward, next_state, terminal (and any
extra contents specified) in a circular buffer and provides a uniform
transition sampling function.
When the states consist of stacks of observations storing the states is
inefficient. This class writes observations and constructs the stacked states
at sample time.
Attributes:
add_count: int, counter of how many transitions have been added (including
the blank ones at the beginning of an episode).
invalid_range: np.array, an array with the indices of cursor-related invalid
transitions
"""
def __init__(self,
observation_shape,
stack_size,
replay_capacity,
batch_size,
update_horizon=1,
gamma=0.99,
max_sample_attempts=1000,
extra_storage_types=None,
observation_dtype=np.uint8,
terminal_dtype=np.uint8,
action_shape=(),
action_dtype=np.int32,
reward_shape=(),
reward_dtype=np.float32,
trajectory_value='return',
replay_forgetting='default'):
"""Initializes OutOfGraphReplayBuffer.
Args:
observation_shape: tuple of ints.
stack_size: int, number of frames to use in state stack.
replay_capacity: int, number of transitions to keep in memory.
batch_size: int.
update_horizon: int, length of update ('n' in n-step update).
gamma: int, the discount factor.
max_sample_attempts: int, the maximum number of attempts allowed to get a
sample.
extra_storage_types: list of ReplayElements defining the type of the extra
contents that will be stored and returned by sample_transition_batch.
observation_dtype: np.dtype, type of the observations. Defaults to
np.uint8 for Atari 2600.
terminal_dtype: np.dtype, type of the terminals. Defaults to np.uint8 for
Atari 2600.
action_shape: tuple of ints, the shape for the action vector. Empty tuple
means the action is a scalar.
action_dtype: np.dtype, type of elements in the action.
reward_shape: tuple of ints, the shape of the reward vector. Empty tuple
means the reward is a scalar.
reward_dtype: np.dtype, type of elements in the reward.
trajectory_value: str, Metric for evaluating quality of trajectories for
forgetting purposes. One of ['return'].
replay_forgetting: str, What strategy to employ for forgetting old
trajectories. One of ['default', 'elephant'].
Raises:
ValueError: If replay_capacity is too small to hold at least one
transition.
"""
assert isinstance(observation_shape, tuple)
if replay_capacity < update_horizon + stack_size:
raise ValueError('There is not enough capacity to cover '
'update_horizon and stack_size.')
tf.logging.info(
'Creating a %s replay memory with the following parameters:',
self.__class__.__name__)
tf.logging.info('\t observation_shape: %s', str(observation_shape))
tf.logging.info('\t observation_dtype: %s', str(observation_dtype))
tf.logging.info('\t terminal_dtype: %s', str(terminal_dtype))
tf.logging.info('\t stack_size: %d', stack_size)
tf.logging.info('\t replay_capacity: %d', replay_capacity)
tf.logging.info('\t batch_size: %d', batch_size)
tf.logging.info('\t update_horizon: %d', update_horizon)
tf.logging.info('\t gamma: %f', gamma)
tf.logging.info('\t trajectory_value: %s', trajectory_value)
tf.logging.info('\t replay_forgetting: %s', replay_forgetting)
self._action_shape = action_shape
self._action_dtype = action_dtype
self._reward_shape = reward_shape
self._reward_dtype = reward_dtype
self._observation_shape = observation_shape
self._stack_size = stack_size
self._state_shape = self._observation_shape + (self._stack_size,)
self._replay_capacity = replay_capacity
self._batch_size = batch_size
self._update_horizon = update_horizon
self._gamma = gamma
self._observation_dtype = observation_dtype
self._terminal_dtype = terminal_dtype
self._max_sample_attempts = max_sample_attempts
if extra_storage_types:
self._extra_storage_types = extra_storage_types
else:
self._extra_storage_types = []
self._create_storage()
self.add_count = np.array(0)
self.invalid_range = np.zeros((self._stack_size))
# When the horizon is > 1, we compute the sum of discounted rewards as a dot
# product using the precomputed vector <gamma^0, gamma^1, ..., gamma^{n-1}>.
self._cumulative_discount_vector = np.array(
[math.pow(self._gamma, n) for n in range(update_horizon)],
dtype=np.float32)
self._trajectory_value = trajectory_value
self._replay_forgetting = replay_forgetting
assert trajectory_value == 'return'
# The cursor for writing into a sorted replay buffer. When the replay
# buffer is not yet full, it tracks the cursor. However, once the replay
# buffer is full, it will always begin writing at the beginning of the
# buffer - overwriting the least valuable trajectories.
self._sorted_cursor = 0
def _create_storage(self):
"""Creates the numpy arrays used to store transitions."""
self._store = {}
for storage_element in self.get_storage_signature():
array_shape = [self._replay_capacity] + list(storage_element.shape)
self._store[storage_element.name] = np.zeros(
array_shape, dtype=storage_element.type)
self._train_counts = np.empty([self._replay_capacity], dtype=np.int32)
def _get_trajectory_spans(self):
"""Compute the span of non-looped trajectories."""
assert (not self.is_empty()), 'Method should not be called on empty buffer.'
# Record the terminal indices which mark episode boundaries.
terminals = np.nonzero(self._store['terminal'])[0].tolist()
num_terminals = len(terminals)
spans = [(terminals[i] + 1, terminals[i + 1] + 1)
for i in xrange(num_terminals - 1)]
return spans
def _compute_trajectory_value(self, spans):
"""Computes for each trajectory the value."""
values = []
for (start_idx, end_idx) in spans:
if self._trajectory_value == 'return':
# We currently assign value as max return over the span.
max_return = max(self._store['return'][start_idx:end_idx])
values.append(max_return)
# We need to handle the looped-case specially.
final_span_end = spans[-1][1]
first_span_beg = spans[0][0]
# Check whether the spans begin or end at the replay buffer boundaries.
if final_span_end == self._replay_capacity:
final_span_max = 0
else:
final_span_max = max(self._store['return'][final_span_end:])
if first_span_beg == 0:
first_span_max = 0
else:
first_span_max = max(self._store['return'][:first_span_beg])
looped_span_value = max(final_span_max, first_span_max)
values.append(looped_span_value)
return values
def sort_replay_buffer_trajectories(self):
"""Sort the trajectories within the replay buffer."""
# We only need to sort the replay buffer once it's full.
if not self.is_full():
return
spans = self._get_trajectory_spans() # [...,(start_idx, end_id), ...]
trajectory_values = self._compute_trajectory_value(spans)
# Sort trajectories in increasing order.
sorted_trajectory_indices = np.argsort(trajectory_values)
# Create an empty tmp_store for sorting.
tmp_store = {}
for storage_element in self.get_storage_signature():
array_shape = [self._replay_capacity] + list(storage_element.shape)
tmp_store[storage_element.name] = np.zeros(
array_shape, dtype=storage_element.type)
# Add the sorted trajectories from self._store.
tmp_cursor = 0
for index in sorted_trajectory_indices:
# Handle the looped span.
if index == len(spans):
final_span_end = spans[-1][1]
first_span_beg = spans[0][0]
traj_len = self._replay_capacity - final_span_end + first_span_beg
else:
(traj_start, traj_end) = spans[index]
traj_len = traj_end - traj_start
# Add all elements to the self._tmp_store.
element_signatures = self.get_storage_signature()
for element in element_signatures:
element_name = element.name
# Handle the looped span.
if index == len(spans):
element_trajectory_end = self._store[element_name][final_span_end:]
element_trajectory_beg = self._store[element_name][:first_span_beg]
element_trajectory = np.concatenate(
[element_trajectory_end, element_trajectory_beg], 0)
else:
element_trajectory = self._store[element_name][traj_start:traj_end]
tmp_store[element_name][tmp_cursor:tmp_cursor +
traj_len] = element_trajectory
# Move the cursor after each trajectory.
tmp_cursor += traj_len
# After all these writes, the tmp_cursor should match the replay buffer
# length.
assert tmp_cursor == self._replay_capacity, 'Mismatched'
# Move the tmp_store to the self._store.
self._store = tmp_store
# Return to writing to front.
assert self.is_full()
self._sorted_cursor = 0
del tmp_store, tmp_cursor
# TODO(liamfedus): reindex the sum_tree in priortized replay
def get_add_args_signature(self):
"""The signature of the add function.
Note - Derived classes may return a different signature.
Returns:
list of ReplayElements defining the type of the argument signature needed
by the add function.
"""
return self.get_storage_signature()
def get_storage_signature(self):
"""Returns a default list of elements to be stored in this replay memory.
Note - Derived classes may return a different signature.
Returns:
list of ReplayElements defining the type of the contents stored.
"""
storage_elements = [
ReplayElement('observation', self._observation_shape,
self._observation_dtype),
ReplayElement('action', self._action_shape, self._action_dtype),
ReplayElement('reward', self._reward_shape, self._reward_dtype),
ReplayElement('terminal', (), self._terminal_dtype),
ReplayElement('step_added', (), np.int32),
]
for extra_replay_element in self._extra_storage_types:
storage_elements.append(extra_replay_element)
return storage_elements
def _add_zero_transition(self):
"""Adds a padding transition filled with zeros (Used in episode beginnings)."""
zero_transition = []
for element_type in self.get_add_args_signature():
zero_transition.append(
np.zeros(element_type.shape, dtype=element_type.type))
self._add(*zero_transition)
def add(self, observation, action, reward, terminal, *args):
"""Adds a transition to the replay memory.
This function checks the types and handles the padding at the beginning of
an episode. Then it calls the _add function.
Since the next_observation in the transition will be the observation added
next there is no need to pass it.
If the replay memory is at capacity the oldest transition will be discarded.
Args:
observation: np.array with shape observation_shape.
action: int, the action in the transition.
reward: float, the reward received in the transition.
terminal: np.dtype, acts as a boolean indicating whether the transition
was terminal (1) or not (0).
*args: extra contents with shapes and dtypes according to
extra_storage_types.
"""
step_added = self.add_count
self._check_add_types(observation, action, reward, terminal, step_added,
*args)
if self.is_empty() or self._store['terminal'][self.cursor() - 1] == 1:
for _ in range(self._stack_size - 1):
# Child classes can rely on the padding transitions being filled with
# zeros. This is useful when there is a priority argument.
self._add_zero_transition()
self._add(observation, action, reward, terminal, step_added, *args)
def _add(self, *args):
"""Internal add method to add to the storage arrays.
Args:
*args: All the elements in a transition.
"""
self._check_args_length(*args)
transition = {
e.name: args[idx] for idx, e in enumerate(self.get_add_args_signature())
}
self._add_transition(transition)
def _add_transition(self, transition):
"""Internal add method to add transition dictionary to storage arrays.
Args:
transition: The dictionary of names and values of the transition to add to
the storage.
"""
cursor = self.cursor()
for arg_name in transition:
self._store[arg_name][cursor] = transition[arg_name]
self._train_counts[cursor] = 0
self.add_count += 1
if self._replay_forgetting == 'elephant':
self._sorted_cursor += 1
self.invalid_range = invalid_range(self.cursor(), self._replay_capacity,
self._stack_size, self._update_horizon)
def _check_args_length(self, *args):
"""Check if args passed to the add method have the same length as storage.
Args:
*args: Args for elements used in storage.
Raises:
ValueError: If args have wrong length.
"""
if len(args) != len(self.get_add_args_signature()):
raise ValueError('Add expects {} elements, received {}'.format(
len(self.get_add_args_signature()), len(args)))
def _check_add_types(self, *args):
"""Checks if args passed to the add method match those of the storage.
Args:
*args: Args whose types need to be validated.
Raises:
ValueError: If args have wrong shape or dtype.
"""
self._check_args_length(*args)
for arg_element, store_element in zip(args, self.get_add_args_signature()):
if isinstance(arg_element, np.ndarray):
arg_shape = arg_element.shape
elif isinstance(arg_element, tuple) or isinstance(arg_element, list):
# TODO(b/80536437). This is not efficient when arg_element is a list.
arg_shape = np.array(arg_element).shape
else:
# Assume it is scalar.
arg_shape = tuple()
store_element_shape = tuple(store_element.shape)
if arg_shape != store_element_shape:
raise ValueError('arg has shape {}, expected {}'.format(
arg_shape, store_element_shape))
def is_empty(self):
"""Is the Replay Buffer empty?"""
return self.add_count == 0
def is_full(self):
"""Is the Replay Buffer full?"""
return self.add_count >= self._replay_capacity
def cursor(self):
"""Index to the location where the next transition will be written."""
if self._replay_forgetting == 'default':
return self.add_count % self._replay_capacity
elif self._replay_forgetting == 'elephant':
# TODO(liamfedus): Check on sorted_cursor
return self._sorted_cursor % self._replay_capacity
def get_range(self, array, start_index, end_index):
"""Returns the range of array at the index handling wraparound if necessary.
Args:
array: np.array, the array to get the stack from.
start_index: int, index to the start of the range to be returned. Range
will wraparound if start_index is smaller than 0.
end_index: int, exclusive end index. Range will wraparound if end_index
exceeds replay_capacity.
Returns:
np.array, with shape [end_index - start_index, array.shape[1:]].
"""
assert end_index > start_index, 'end_index must be larger than start_index'
assert end_index >= 0
assert start_index < self._replay_capacity
if not self.is_full():
assert end_index <= self.cursor(), (
'Index {} has not been added.'.format(start_index))
# Fast slice read when there is no wraparound.
if start_index % self._replay_capacity < end_index % self._replay_capacity:
return_array = array[start_index:end_index, Ellipsis]
# Slow list read.
else:
indices = [(start_index + i) % self._replay_capacity
for i in range(end_index - start_index)]
return_array = array[indices, Ellipsis]
return return_array
def get_observation_stack(self, index):
return self._get_element_stack(index, 'observation')
def _get_element_stack(self, index, element_name):
state = self.get_range(self._store[element_name],
index - self._stack_size + 1, index + 1)
# The stacking axis is 0 but the agent expects as the last axis.
return np.moveaxis(state, 0, -1)
def get_terminal_stack(self, index):
return self.get_range(self._store['terminal'], index - self._stack_size + 1,
index + 1)
def is_valid_transition(self, index):
"""Checks if the index contains a valid transition.
Checks for collisions with the end of episodes and the current position
of the cursor.
Args:
index: int, the index to the state in the transition.
Returns:
Is the index valid: Boolean.
"""
# Check the index is in the valid range
if index < 0 or index >= self._replay_capacity:
return False
if not self.is_full():
# The indices and next_indices must be smaller than the cursor.
if index >= self.cursor() - self._update_horizon:
return False
# The first few indices contain the padding states of the first episode.
if index < self._stack_size - 1:
return False
# Skip transitions that straddle the cursor.
if index in set(self.invalid_range):
return False
# If there are terminal flags in any other frame other than the last one
# the stack is not valid, so don't sample it.
if self.get_terminal_stack(index)[:-1].any():
return False
return True
def _create_batch_arrays(self, batch_size):
"""Create a tuple of arrays with the type of get_transition_elements.
When using the WrappedReplayBuffer with staging enabled it is important to
create new arrays every sample because StaginArea keeps a pointer to the
returned arrays.
Args:
batch_size: (int) number of transitions returned. If None the default
batch_size will be used.
Returns:
Tuple of np.arrays with the shape and type of get_transition_elements.
"""
transition_elements = self.get_transition_elements(batch_size)
batch_arrays = []
for element in transition_elements:
batch_arrays.append(np.empty(element.shape, dtype=element.type))
return tuple(batch_arrays)
def sample_index_batch(self, batch_size):
"""Returns a batch of valid indices sampled uniformly.
Args:
batch_size: int, number of indices returned.
Returns:
list of ints, a batch of valid indices sampled uniformly.
Raises:
RuntimeError: If the batch was not constructed after maximum number of
tries.
"""
if self.is_full():
# add_count >= self._replay_capacity > self._stack_size
min_id = self.cursor() - self._replay_capacity + self._stack_size - 1
max_id = self.cursor() - self._update_horizon
else:
# add_count < self._replay_capacity
min_id = self._stack_size - 1
max_id = self.cursor() - self._update_horizon
if max_id <= min_id:
raise RuntimeError('Cannot sample a batch with fewer than stack size '
'({}) + update_horizon ({}) transitions.'.format(
self._stack_size, self._update_horizon))
indices = []
attempt_count = 0
while (len(indices) < batch_size and
attempt_count < self._max_sample_attempts):
index = np.random.randint(min_id, max_id) % self._replay_capacity
if self.is_valid_transition(index):
indices.append(index)
else:
attempt_count += 1
if len(indices) != batch_size:
raise RuntimeError(
'Max sample attempts: Tried {} times but only sampled {}'
' valid indices. Batch size is {}'.format(self._max_sample_attempts,
len(indices), batch_size))
return indices
def sample_transition_batch(self, batch_size=None, indices=None):
"""Returns a batch of transitions (including any extra contents).
If get_transition_elements has been overridden and defines elements not
stored in self._store, an empty array will be returned and it will be
left to the child class to fill it. For example, for the child class
OutOfGraphPrioritizedReplayBuffer, the contents of the
sampling_probabilities are stored separately in a sum tree.
When the transition is terminal next_state_batch has undefined contents.
NOTE: This transition contains the indices of the sampled elements. These
are only valid during the call to sample_transition_batch, i.e. they may
be used by subclasses of this replay buffer but may point to different data
as soon as sampling is done.
Args:
batch_size: int, number of transitions returned. If None, the default
batch_size will be used.
indices: None or list of ints, the indices of every transition in the
batch. If None, sample the indices uniformly.
Returns:
transition_batch: tuple of np.arrays with the shape and type as in
get_transition_elements().
Raises:
ValueError: If an element to be sampled is missing from the replay buffer.
"""
if batch_size is None:
batch_size = self._batch_size
if indices is None:
indices = self.sample_index_batch(batch_size)
assert len(indices) == batch_size
transition_elements = self.get_transition_elements(batch_size)
batch_arrays = self._create_batch_arrays(batch_size)
for batch_element, state_index in enumerate(indices):
trajectory_indices = [(state_index + j) % self._replay_capacity
for j in range(self._update_horizon)]
trajectory_terminals = self._store['terminal'][trajectory_indices]
is_terminal_transition = trajectory_terminals.any()
if not is_terminal_transition:
trajectory_length = self._update_horizon
else:
# np.argmax of a bool array returns the index of the first True.
trajectory_length = np.argmax(trajectory_terminals.astype(np.bool),
0) + 1
next_state_index = state_index + trajectory_length
trajectory_discount_vector = (
self._cumulative_discount_vector[:trajectory_length])
trajectory_rewards = self.get_range(self._store['reward'], state_index,
next_state_index)
# Fill the contents of each array in the sampled batch.
assert len(transition_elements) == len(batch_arrays)
for element_array, element in zip(batch_arrays, transition_elements):
if element.name == 'state':
element_array[batch_element] = self.get_observation_stack(state_index)
elif element.name == 'reward':
# compute the discounted sum of rewards in the trajectory.
element_array[batch_element] = np.sum(
trajectory_discount_vector * trajectory_rewards, axis=0)
elif element.name == 'next_state':
element_array[batch_element] = self.get_observation_stack(
(next_state_index) % self._replay_capacity)
elif element.name in ('next_action', 'next_reward'):
element_array[batch_element] = (
self._store[element.name.lstrip('next_')][(next_state_index) %
self._replay_capacity])
elif element.name == 'terminal':
element_array[batch_element] = is_terminal_transition
elif element.name == 'indices':
element_array[batch_element] = state_index
elif element.name == 'train_counts':
element_array[batch_element] = self._train_counts[state_index]
elif element.name == 'steps_until_first_train':
step_added = self._store['step_added'][state_index]
steps_since_add = self.add_count - step_added
already_trained = self._train_counts[state_index] > 0
steps_until_first_train = -1 if already_trained else steps_since_add
element_array[batch_element] = steps_until_first_train
elif element.name == 'age':
step_added = self._store['step_added'][state_index]
steps_since_add = self.add_count - step_added
element_array[batch_element] = steps_since_add
elif element.name in self._store.keys():
element_array[batch_element] = (
self._store[element.name][state_index])
# We assume the other elements are filled in by the subclass.
return batch_arrays
def get_transition_elements(self, batch_size=None):
"""Returns a 'type signature' for sample_transition_batch.
Args:
batch_size: int, number of transitions returned. If None, the default
batch_size will be used.
Returns:
signature: A namedtuple describing the method's return type signature.
"""
batch_size = self._batch_size if batch_size is None else batch_size
transition_elements = [
ReplayElement('state', (batch_size,) + self._state_shape,
self._observation_dtype),
ReplayElement('action', (batch_size,) + self._action_shape,
self._action_dtype),
ReplayElement('reward', (batch_size,) + self._reward_shape,
self._reward_dtype),
ReplayElement('next_state', (batch_size,) + self._state_shape,
self._observation_dtype),
ReplayElement('next_action', (batch_size,) + self._action_shape,
self._action_dtype),
ReplayElement('next_reward', (batch_size,) + self._reward_shape,
self._reward_dtype),
ReplayElement('terminal', (batch_size,), self._terminal_dtype),
ReplayElement('indices', (batch_size,), np.int32),
ReplayElement('train_counts', (batch_size,), np.int32),
ReplayElement('steps_until_first_train', (batch_size,), np.int32),
ReplayElement('age', (batch_size,), np.int32),
]
for element in self._extra_storage_types:
transition_elements.append(
ReplayElement(element.name, (batch_size,) + tuple(element.shape),
element.type))
return transition_elements
def update_train_counts(self, indices):
"""Increments the train count for all transitions that were sampled."""
for memory_index in indices:
self._train_counts[memory_index] += 1
def _generate_filename(self, checkpoint_dir, name, suffix):
return os.path.join(checkpoint_dir, '{}_ckpt.{}.gz'.format(name, suffix))
def _return_checkpointable_elements(self):
"""Return the dict of elements of the class for checkpointing.
Returns:
checkpointable_elements: dict containing all non private (starting with
_) members + all the arrays inside self._store.
"""
checkpointable_elements = {}
for member_name, member in self.__dict__.items():
if member_name == '_store':
for array_name, array in self._store.items():
checkpointable_elements[STORE_FILENAME_PREFIX + array_name] = array
elif not member_name.startswith('_'):
checkpointable_elements[member_name] = member
elif member_name in ['_train_counts']: # Exceptions to the above rule.
checkpointable_elements[member_name] = member
return checkpointable_elements
def save(self, checkpoint_dir, iteration_number):
"""Save the OutOfGraphReplayBuffer attributes into a file.
This method will save all the replay buffer's state in a single file.
Args:
checkpoint_dir: str, the directory where numpy checkpoint files should be
saved.
iteration_number: int, iteration_number to use as a suffix in naming numpy
checkpoint files.
"""
if not tf.gfile.Exists(checkpoint_dir):
return
checkpointable_elements = self._return_checkpointable_elements()
for attr in checkpointable_elements:
filename = self._generate_filename(checkpoint_dir, attr, iteration_number)
with tf.gfile.Open(filename, 'wb') as f:
with gzip.GzipFile(fileobj=f) as outfile:
# Checkpoint the np arrays in self._store with np.save instead of
# pickling the dictionary is critical for file size and performance.
# STORE_FILENAME_PREFIX indicates that the variable is contained in
# self._store.
if attr.startswith(STORE_FILENAME_PREFIX):
array_name = attr[len(STORE_FILENAME_PREFIX):]
np.save(outfile, self._store[array_name], allow_pickle=False)
# Some numpy arrays might not be part of storage
elif isinstance(self.__dict__[attr], np.ndarray):
np.save(outfile, self.__dict__[attr], allow_pickle=False)
else:
pickle.dump(self.__dict__[attr], outfile)
# After writing a checkpoint file, we garbage collect the checkpoint file
# that is four versions old.
stale_iteration_number = iteration_number - CHECKPOINT_DURATION
if stale_iteration_number >= 0:
stale_filename = self._generate_filename(checkpoint_dir, attr,
stale_iteration_number)
try:
tf.gfile.Remove(stale_filename)
except tf.errors.NotFoundError:
pass
def load(self, checkpoint_dir, suffix):
"""Restores the object from bundle_dictionary and numpy checkpoints.
Args:
checkpoint_dir: str, the directory where to read the numpy checkpointed
files from.
suffix: str, the suffix to use in numpy checkpoint files.
Raises:
NotFoundError: If not all expected files are found in directory.
"""
save_elements = self._return_checkpointable_elements()
# We will first make sure we have all the necessary files available to avoid
# loading a partially-specified (i.e. corrupted) replay buffer.
for attr in save_elements:
filename = self._generate_filename(checkpoint_dir, attr, suffix)
if not tf.gfile.Exists(filename):
raise tf.errors.NotFoundError(None, None,
'Missing file: {}'.format(filename))
# If we've reached this point then we have verified that all expected files
# are available.
for attr in save_elements:
filename = self._generate_filename(checkpoint_dir, attr, suffix)
with tf.gfile.Open(filename, 'rb') as f:
with gzip.GzipFile(fileobj=f) as infile:
if attr.startswith(STORE_FILENAME_PREFIX):
array_name = attr[len(STORE_FILENAME_PREFIX):]
self._store[array_name] = np.load(infile, allow_pickle=False)
elif isinstance(self.__dict__[attr], np.ndarray):
self.__dict__[attr] = np.load(infile, allow_pickle=False)
else:
self.__dict__[attr] = pickle.load(infile)
@gin.configurable(
denylist=['observation_shape', 'stack_size', 'update_horizon', 'gamma'])
class WrappedReplayBuffer(object):
"""Wrapper of OutOfGraphReplayBuffer with an in graph sampling mechanism.
Usage:
To add a transition: call the add function.
To sample a batch: Construct operations that depend on any of the
tensors is the transition dictionary. Every sess.run
that requires any of these tensors will sample a new
transition.
"""
def __init__(self,
observation_shape,
stack_size,
use_staging=True,
replay_capacity=1000000,
batch_size=32,
update_horizon=1,
gamma=0.99,
wrapped_memory=None,
max_sample_attempts=1000,
extra_storage_types=None,
observation_dtype=np.uint8,
terminal_dtype=np.uint8,
action_shape=(),
action_dtype=np.int32,
reward_shape=(),
reward_dtype=np.float32,
replay_forgetting='default'):
"""Initializes WrappedReplayBuffer.
Args:
observation_shape: tuple of ints.
stack_size: int, number of frames to use in state stack.
use_staging: bool, when True it would use a staging area to prefetch the
next sampling batch.
replay_capacity: int, number of transitions to keep in memory.
batch_size: int.
update_horizon: int, length of update ('n' in n-step update).
gamma: int, the discount factor.
wrapped_memory: The 'inner' memory data structure. If None, it creates the
standard DQN replay memory.
max_sample_attempts: int, the maximum number of attempts allowed to get a
sample.
extra_storage_types: list of ReplayElements defining the type of the extra
contents that will be stored and returned by sample_transition_batch.
observation_dtype: np.dtype, type of the observations. Defaults to
np.uint8 for Atari 2600.
terminal_dtype: np.dtype, type of the terminals. Defaults to np.uint8 for
Atari 2600.
action_shape: tuple of ints, the shape for the action vector. Empty tuple
means the action is a scalar.
action_dtype: np.dtype, type of elements in the action.
reward_shape: tuple of ints, the shape of the reward vector. Empty tuple
means the reward is a scalar.
reward_dtype: np.dtype, type of elements in the reward.
replay_forgetting: str, What strategy to employ for forgetting old
trajectories. One of ['default', 'elephant'].
Raises:
ValueError: If update_horizon is not positive.
ValueError: If discount factor is not in [0, 1].
"""
if replay_capacity < update_horizon + 1:
raise ValueError('Update horizon ({}) should be significantly smaller '
'than replay capacity ({}).'.format(
update_horizon, replay_capacity))
if not update_horizon >= 1:
raise ValueError('Update horizon must be positive.')
if not 0.0 <= gamma <= 1.0:
raise ValueError('Discount factor (gamma) must be in [0, 1].')
self.batch_size = batch_size
# Mainly used to allow subclasses to pass self.memory.
if wrapped_memory is not None:
self.memory = wrapped_memory
else:
self.memory = OutOfGraphReplayBuffer(
observation_shape,
stack_size,
replay_capacity,
batch_size,
update_horizon,
gamma,
max_sample_attempts,
observation_dtype=observation_dtype,
terminal_dtype=terminal_dtype,
extra_storage_types=extra_storage_types,
action_shape=action_shape,
action_dtype=action_dtype,
reward_shape=reward_shape,
reward_dtype=reward_dtype,
replay_forgetting=replay_forgetting)
self.create_sampling_ops(use_staging)
tf.logging.info('\t replay_forgetting: %s', replay_forgetting)
def add(self, observation, action, reward, terminal, *args):
"""Adds a transition to the replay memory.
Since the next_observation in the transition will be the observation added
next there is no need to pass it.
If the replay memory is at capacity the oldest transition will be discarded.
Args:
observation: np.array with shape observation_shape.
action: int, the action in the transition.
reward: float, the reward received in the transition.
terminal: np.dtype, acts as a boolean indicating whether the transition
was terminal (1) or not (0).
*args: extra contents with shapes and dtypes according to
extra_storage_types.
"""
self.memory.add(observation, action, reward, terminal, *args)
def create_sampling_ops(self, use_staging):
"""Creates the ops necessary to sample from the replay buffer.
Creates the transition dictionary containing the sampling tensors.
Args:
use_staging: bool, when True it would use a staging area to prefetch the
next sampling batch.
"""
with tf.name_scope('sample_replay'):
with tf.device('/cpu:*'):
transition_type = self.memory.get_transition_elements()
transition_tensors = tf.py_func(
self.memory.sample_transition_batch, [],
[return_entry.type for return_entry in transition_type],
name='replay_sample_py_func')
self._set_transition_shape(transition_tensors, transition_type)
if use_staging:
transition_tensors = self._set_up_staging(transition_tensors)
self._set_transition_shape(transition_tensors, transition_type)
# Unpack sample transition into member variables.
self.unpack_transition(transition_tensors, transition_type)
def _set_transition_shape(self, transition, transition_type):
"""Set shape for each element in the transition.
Args:
transition: tuple of tf.Tensors.
transition_type: tuple of ReplayElements descriving the shapes of the
respective tensors.
"""
for element, element_type in zip(transition, transition_type):
element.set_shape(element_type.shape)
def _set_up_staging(self, transition):
"""Sets up staging ops for prefetching the next transition.
This allows us to hide the py_func latency. To do so we use a staging area
to pre-fetch the next batch of transitions.
Args:
transition: tuple of tf.Tensors with shape
memory.get_transition_elements().
Returns:
prefetched_transition: tuple of tf.Tensors with shape
memory.get_transition_elements() that have been previously prefetched.
"""
transition_type = self.memory.get_transition_elements()
# Create the staging area in CPU.
prefetch_area = contrib_staging.StagingArea(
[shape_with_type.type for shape_with_type in transition_type])
# Store prefetch op for tests, but keep it private -- users should not be
# calling _prefetch_batch.
self._prefetch_batch = prefetch_area.put(transition)
initial_prefetch = tf.cond(
tf.equal(prefetch_area.size(), 0),
lambda: prefetch_area.put(transition), tf.no_op)
# Every time a transition is sampled self.prefetch_batch will be
# called. If the staging area is empty, two put ops will be called.
with tf.control_dependencies([self._prefetch_batch, initial_prefetch]):
prefetched_transition = prefetch_area.get()
return prefetched_transition
def unpack_transition(self, transition_tensors, transition_type):
"""Unpacks the given transition into member variables.
Args:
transition_tensors: tuple of tf.Tensors.
transition_type: tuple of ReplayElements matching transition_tensors.
"""
self.transition = collections.OrderedDict()
for element, element_type in zip(transition_tensors, transition_type):
self.transition[element_type.name] = element
# TODO(bellemare): These are legacy and should probably be removed in
# future versions.
self.states = self.transition['state']
self.actions = self.transition['action']
self.rewards = self.transition['reward']
self.next_states = self.transition['next_state']
self.next_actions = self.transition['next_action']
self.next_rewards = self.transition['next_reward']
self.terminals = self.transition['terminal']
self.indices = self.transition['indices']
def save(self, checkpoint_dir, iteration_number):
"""Save the underlying replay buffer's contents in a file.
Args:
checkpoint_dir: str, the directory where to read the numpy checkpointed
files from.
iteration_number: int, the iteration_number to use as a suffix in naming
numpy checkpoint files.
"""
self.memory.save(checkpoint_dir, iteration_number)
def load(self, checkpoint_dir, suffix):
"""Loads the replay buffer's state from a saved file.
Args:
checkpoint_dir: str, the directory where to read the numpy checkpointed
files from.
suffix: str, the suffix to use in numpy checkpoint files.
"""
self.memory.load(checkpoint_dir, suffix)
def tf_update_train_counts(self, indices):
"""Updates the train counts for the given indices.
Args:
indices: tf.Tensor with dtype int32 and shape [n].
Returns:
A tf op updating the train count.
"""
return tf.py_func(
self.memory.update_train_counts, [indices], [],
name='replay_update_train_counts_py_func')
|
|
from random import *
from math import *
deg_to_rad = 0.01745329252
rad_to_deg = 57.2957795131
def EquipLoadout(UI, loadout):
UI.EquipLoadout(loadout)
def AutoConfigurePlatform(UI, setupName):
UI.AutoConfigurePlatform(setupName)
def MovePlatform(UI, lon, lat):
UI.MovePlatform(lon, lat)
def split_multi(s, separators):
rr = [s]
for sep in separators:
s, rr = rr, []
for seq in s:
rr += seq.split(sep)
return rr
def MovePlatformString(UI, s):
s = s.replace('W','-')
s = s.replace('S','-')
s = s.replace('E',' ')
s = s.replace('N',' ')
svect = split_multi(s, [' ',',','\'',':'])
sfilt = []
for k in range(0, len(svect)):
try:
x = float(svect[k])
sfilt.append(x)
except:
pass # do nothing and skip
if (len(sfilt) == 2): # lat DD.DDD lon DD.DDD
lat = sfilt[0]
lon = sfilt[1]
elif (len(sfilt) == 4): # lat DD MM.MMM lon DD MM.MMM
if (sfilt[0] < 0):
lat_sign = -1
else:
lat_sign = 1
lat = sfilt[0] + 0.0166666667*lat_sign*sfilt[1]
if (sfilt[2] < 0):
lon_sign = -1
else:
lon_sign = 1
lon = sfilt[2] + 0.0166666667*lon_sign*sfilt[3]
elif (len(sfilt) == 6): # lat DD MM SS.SSS lon DD MM SS.SSS
if (sfilt[0] < 0):
lat_sign = -1
else:
lat_sign = 1
lat = sfilt[0] + 0.0166666667*lat_sign*sfilt[1] + 0.000277777778*lat_sign*sfilt[2]
if (sfilt[3] < 0):
lon_sign = -1
else:
lon_sign = 1
lon = sfilt[3] + 0.0166666667*lon_sign*sfilt[4] + 0.000277777778*lon_sign*sfilt[5]
else:
UI.DisplayMessage('Bad coordinate string (%s)' % s)
if ((lat > 90) or (lat < -90) or (lon < -180.0) or (lon > 180.0)):
UI.DisplayMessage('Bad coordinate (%.5f, %.5f)' % (lat, lon))
else:
UI.DisplayMessage('Moving to %.5f %.5f' % (lat, lon))
UI.MovePlatform(deg_to_rad*lon, deg_to_rad*lat)
def MoveGroup(GI, lon, lat):
unit_count = GI.GetUnitCount()
if (unit_count < 1):
return
UI = GI.GetPlatformInterface(0)
leader_track = UI.GetTrackById(UI.GetPlatformId())
dlon = lon-leader_track.Lon
dlat = lat-leader_track.Lat
for n in range(0, unit_count):
UI = GI.GetPlatformInterface(n)
track = UI.GetTrackById(UI.GetPlatformId())
UI.MovePlatform(track.Lon+dlon, track.Lat+dlat)
def RotateGroup(GI, angle_rad):
unit_count = GI.GetUnitCount()
if (unit_count < 2):
return
# get centroid
lat_cen = 0
lon_cen = 0
unit_count = GI.GetUnitCount()
for n in range(0, unit_count):
UI = GetPlatformOrWeaponInterface(GI, n)
lat_cen = lat_cen + UI.GetLatitude()
lon_cen = lon_cen + UI.GetLongitude() # won't work near 180E
scale = 1.0 / float(unit_count)
lat_cen = lat_cen * scale
lon_cen = lon_cen * scale
cos_latc = cos(lat_cen)
inv_cos_latc = 1.0 / cos_latc
for n in range(0, unit_count):
UI = GetPlatformOrWeaponInterface(GI, n)
lon_n = UI.GetLongitude()
lat_n = UI.GetLatitude()
dlon = lon_n - lon_cen
dlat = lat_n - lat_cen
rot_lon = lon_cen + dlon*cos(angle_rad) + inv_cos_latc*dlat*sin(angle_rad)
rot_lat = lat_cen + dlat*cos(angle_rad) - cos_latc*dlon*sin(angle_rad)
UI.MovePlatform(rot_lon, rot_lat)
def DeletePlatform(UI):
UI.DeletePlatform()
def DeleteGroup(GI):
names_to_delete = []
UI_to_delete = [] # to work with both platform and weapon interface types
unit_count = GI.GetUnitCount()
for n in range(0, unit_count):
UI = GetPlatformOrWeaponInterface(GI, n)
names_to_delete.append(UI.GetPlatformName())
UI_to_delete.append(UI)
for n in range(0, len(names_to_delete)):
UI = UI_to_delete[n]
UI.DeletePlatform()
def RenamePlatform(UI, name):
UI.RenamePlatform(name)
def SetAirGroupName(SM, name):
SM.SetAirGroupName(name)
def SetAirGroupSize(SM, n):
SM.SetAirGroupCount(int(n))
def SetAirGroupNameUnit(UI, name):
UI.SetAirGroupName(name)
def SetAirGroupSizeUnit(UI, n):
UI.SetAirGroupCount(int(n))
def SetMagazineAddCount(UI, n):
UI.SetMagazineAddCount(int(n))
def AddItemToMagazine(UI, item):
magazineAddCount = UI.GetMagazineAddCount()
UI.AddItemToMagazine(item, magazineAddCount)
def SetSeaState(SM, sea_state):
SM.SetSeaState(sea_state)
def SetSonarTemplate(SM, id):
SM.SetSonarTemplate(id)
def SetScenarioName(SM, name):
SM.SetScenarioName(name)
def SetScenarioDescription(SM, description):
SM.SetScenarioDescription(description)
def SetAllianceCountry(SM, name):
user_alliance = SM.GetUserAlliance()
SM.SetAllianceDefaultCountry(user_alliance, name)
# Imports briefing from file
def ImportBriefing(SM, filename):
infile = open(filename, 'r')
text = infile.read()
infile.close()
SM.SetSimpleBriefing(SM.GetUserAlliance(), text)
def SetBriefing(SM, text):
SM.SetSimpleBriefing(SM.GetUserAlliance(), text)
def SetAllianceROE(SM, roeMode):
# set all types to same level for backward compatibility
SM.SetAllianceROE(SM.GetUserAlliance(), roeMode, roeMode, roeMode, roeMode)
def SetAllianceROEByType(SM, airMode, surfMode, subMode, landMode):
SM.SetAllianceROE(SM.GetUserAlliance(), airMode, surfMode, subMode, landMode)
# Saves game to "Saved/<datetime string>.py"
def SaveGame(SM):
SM.SaveGame('Saved')
# Switch user alliance to next alliance
# Assumes alliances range from 1 to 16
def ToggleAlliance(SM):
user_alliance = SM.GetUserAlliance()
for n in range(user_alliance+1, 16):
if (SM.AllianceExists(n)):
SM.SetUserAlliance(n)
return
SM.SetUserAlliance(1)
# Adds new platform at specified coordinates
def AddNewPlatform(SM, lon, lat, className):
user_alliance = SM.GetUserAlliance()
unit = SM.GetDefaultUnit()
unit.className = className
unit.unitName = SM.GetRandomPlatformName(className, 'Temp') # 'Temp %d' % int(1000*random()) # old code
unit.SetPosition(rad_to_deg*lon, rad_to_deg*lat, 0) # lon, lat, alt
unit.heading = 90
unit.speed = 3
SM.AddUnitToAlliance(unit, user_alliance)
# Adds copy of platform at specified coordinates
def CopyPlatform(UI_ref, lon, lat):
refName = UI_ref.GetPlatformName()
SM = UI_ref.GetScenarioInterface()
unit = SM.GetDefaultUnit()
unit.className = UI_ref.GetPlatformClass()
unit.unitName = SM.GetRandomPlatformName(unit.className, refName) # 'Temp %d' % int(1000*random()) # old code
unit.SetPosition(rad_to_deg*lon, rad_to_deg*lat, UI_ref.GetAltitude()) # lon, lat, alt
unit.heading = UI_ref.GetHeading() # deg
unit.speed = UI_ref.GetSpeed() # kts
try:
unit.throttle = UI_ref.GetThrottle()
except:
pass
alliance = UI_ref.GetPlatformAlliance()
SM.AddUnitToAlliance(unit, alliance)
# duplicate loadout
try:
nLaunchers = UI_ref.GetLauncherCount()
for n in range(0, nLaunchers):
SM.SetUnitLauncherItem(unit.unitName, n, UI_ref.GetLauncherWeaponName(n), UI_ref.GetLauncherQuantity(n))
# duplicate magazine items
magItems = UI_ref.GetMagazineItems()
nItems = magItems.Size()
for n in range(0, nItems):
itemName = magItems.GetString(n)
qty = UI_ref.GetMagazineQuantity(itemName)
SM.AddToUnitMagazine(unit.unitName, itemName, qty)
SM.DuplicateUnitTasking(refName, unit.unitName)
except:
pass
# returns platform or weapon interface based on type of unit n in group
def GetPlatformOrWeaponInterface(GI, n):
if (GI.IsPlatform(n)):
return GI.GetPlatformInterface(n)
else: # assume weapon type
return GI.GetWeaponInterface(n)
# Adds copy of group at specified coordinates
def CopyGroup(GI, lon, lat):
# get centroid
lat_cen = 0
lon_cen = 0
unit_count = GI.GetUnitCount()
for n in range(0, unit_count):
UI = GetPlatformOrWeaponInterface(GI, n)
lat_cen = lat_cen + UI.GetLatitude()
lon_cen = lon_cen + UI.GetLongitude() # won't work near 180E
scale = 1.0 / float(unit_count)
lat_cen = lat_cen * scale
lon_cen = lon_cen * scale
for n in range(0, unit_count):
UI = GetPlatformOrWeaponInterface(GI, n)
CopyPlatform(UI, lon+UI.GetLongitude()-lon_cen, lat+UI.GetLatitude()-lat_cen)
# changes root name of group and renumbers starting with 1
def RenameGroup(GI, new_root):
SM = GI.GetScenarioInterface()
SM.ConsoleText('Rename Group Called')
parsed = SM.GetParsedUnitName(new_root)
if (parsed.isValid):
start_id = parsed.id
root = parsed.root
separator = parsed.separator
else:
start_id = 1
root = new_root
separator = '-'
# increment start id until non-existing unit is found
searching = 1
tries = 0
while (searching and (tries < 100)):
unitName = '%s%s%d' % (root, separator, start_id)
if (SM.GetUnitIdByName(unitName) != -1):
start_id = start_id + 1
tries = tries + 1
else:
searching = 0
SM.ConsoleText('Renaming group with root %s' % root)
unit_count = GI.GetUnitCount()
for n in range(0, unit_count):
UI = GetPlatformOrWeaponInterface(GI, n)
UI.RenamePlatform('%s%s%d' % (root, separator, start_id+n))
def AddNewPlatformToFlightDeck(SM, host_id, className):
group_name = SM.GetAirGroupName()
group_count = SM.GetAirGroupCount()
start_id = SM.GetAirUnitId()
for n in range(0, group_count):
hostName = SM.GetUnitNameById(host_id)
unitName = '%s-%d' % (group_name, start_id+n)
UI = SM.GetUnitInterface()
BB = UI.GetBlackboardInterface()
if BB.KeyExists('MagTonnage'):
BB.Erase('MagTonnage')
SM.AddUnitToFlightDeck(hostName, className, unitName, 3)
# version for edit mode with platform hooked
# adds group according to current group count, automatically names based on current group name
def AddToMyFlightDeck(UI, className):
UI.AddUnitToFlightDeck(className)
def AddMapLabel(SM, lon, lat, labelText):
SM.OverlayText(labelText, rad_to_deg*lon, rad_to_deg*lat)
def DeleteGoal(SM, goal_id):
SM.DeleteGoalById(goal_id)
def ChangeGoalTarget(SM, target_id, goal_id):
goal = SM.GetGoalById(goal_id)
unit_name = SM.GetUnitNameById(target_id)
if (len(unit_name) < 1):
return
if (goal.GetTypeString() == 'Destroy'):
destroy_goal = goal.AsDestroyGoal()
destroy_goal.SetTargetString(unit_name)
elif (goal.GetTypeString() == 'Protect'):
protect_goal = goal.AsProtectGoal()
protect_goal.SetTargetString(unit_name)
def AddCompoundGoal(SM, goal_id):
goal = SM.GetGoalById(goal_id)
if (goal.GetTypeString() != 'Compound'):
return
compound_goal = goal.AsCompoundGoal()
new_goal = SM.CompoundGoal(0)
compound_goal.AddGoal(new_goal)
def AddTimeGoal(SM, goal_id):
goal = SM.GetGoalById(goal_id)
if (goal.GetTypeString() != 'Compound'):
return
compound_goal = goal.AsCompoundGoal()
new_goal = SM.TimeGoal()
new_goal.SetFailTimeout(3600.0)
new_goal.SetPassTimeout(59940.0)
compound_goal.AddGoal(new_goal)
def AddDestroyGoal(SM, target_id, goal_id):
goal = SM.GetGoalById(goal_id)
if (goal.GetTypeString() != 'Compound'):
return
compound_goal = goal.AsCompoundGoal()
unit_name = SM.GetUnitNameById(target_id)
new_goal = SM.DestroyGoal(unit_name)
compound_goal.AddGoal(new_goal)
def AddProtectGoal(SM, target_id, goal_id):
goal = SM.GetGoalById(goal_id)
if (goal.GetTypeString() != 'Compound'):
return
compound_goal = goal.AsCompoundGoal()
unit_name = SM.GetUnitNameById(target_id)
new_goal = SM.ProtectGoal(unit_name)
compound_goal.AddGoal(new_goal)
def AddAreaGoal(SM, lon, lat, goal_id):
goal = SM.GetGoalById(goal_id)
if (goal.GetTypeString() != 'Compound'):
return
compound_goal = goal.AsCompoundGoal()
new_goal = SM.AreaGoal()
new_goal.AddPoint(lon + 0.001, lat + 0.001)
new_goal.AddPoint(lon + 0.001, lat - 0.001)
new_goal.AddPoint(lon - 0.001, lat - 0.001)
new_goal.AddPoint(lon - 0.001, lat + 0.001)
compound_goal.AddGoal(new_goal)
# versions that add as top level alliance goal
def AddCompoundGoalAlliance(SM, alliance):
new_goal = SM.CompoundGoal(0)
SM.SetAllianceGoal(alliance, new_goal)
def AddTimeGoalAlliance(SM, alliance):
new_goal = SM.TimeGoal()
new_goal.SetFailTimeout(3600.0)
new_goal.SetPassTimeout(59940.0)
SM.SetAllianceGoal(alliance, new_goal)
def AddDestroyGoalAlliance(SM, alliance):
new_goal = SM.DestroyGoal('')
SM.SetAllianceGoal(alliance, new_goal)
def AddProtectGoalAlliance(SM, alliance):
new_goal = SM.ProtectGoal('')
SM.SetAllianceGoal(alliance, new_goal)
def AddAreaGoalAlliance(SM, lon, lat, alliance):
new_goal = SM.AreaGoal()
new_goal.AddPoint(lon + 0.001, lat + 0.001)
new_goal.AddPoint(lon + 0.001, lat - 0.001)
new_goal.AddPoint(lon - 0.001, lat - 0.001)
new_goal.AddPoint(lon - 0.001, lat + 0.001)
SM.SetAllianceGoal(alliance, new_goal)
def ChangePassTime(SM, time_minutes, goal_id):
goal = SM.GetGoalById(goal_id)
if (goal.GetTypeString() != 'Time'):
return
time_goal = goal.AsTimeGoal()
time_goal.SetPassTimeout(60.0*float(time_minutes))
time_goal.SetFailTimeout(31556926.0)
def ChangeFailTime(SM, time_minutes, goal_id):
goal = SM.GetGoalById(goal_id)
if (goal.GetTypeString() != 'Time'):
return
time_goal = goal.AsTimeGoal()
time_goal.SetFailTimeout(60.0*float(time_minutes))
time_goal.SetPassTimeout(31556926.0)
# toggle compound goal between OR and AND type
def ToggleCompoundType(SM, goal_id):
goal = SM.GetGoalById(goal_id)
if (goal.GetTypeString() != 'Compound'):
return
compound_goal = goal.AsCompoundGoal()
if (compound_goal.GetLogicType() == 0):
compound_goal.SetLogicType(1)
else:
compound_goal.SetLogicType(0)
# param_str, first char is 0 or 1 for type, remaining is goal_id
# set state of area goal, 1 is enter type, 0 is avoid type
def SetAreaEnter(SM, param_str):
state = int(param_str[0])
goal_id = int(param_str[1:])
goal = SM.GetGoalById(goal_id)
if (goal.GetTypeString() != 'Area'):
return
area_goal = goal.AsAreaGoal()
area_goal.SetEnterGoal(state)
# param_str, first char is 0 or 1 for logic type, remaining is goal_id
# set state of area goal, 1 is ANY logic, 0 is ALL logic
def SetAreaLogic(SM, param_str):
state = int(param_str[0])
goal_id = int(param_str[1:])
goal = SM.GetGoalById(goal_id)
if (goal.GetTypeString() != 'Area'):
return
area_goal = goal.AsAreaGoal()
area_goal.SetLogicAny(state)
# adds an additional named platform as 'target' for area goal
def AddAreaTarget(SM, target_id, goal_id):
goal = SM.GetGoalById(goal_id)
if (goal.GetTypeString() != 'Area'):
return
area_goal = goal.AsAreaGoal()
unit_name = SM.GetUnitNameById(target_id)
if (len(unit_name) > 1):
area_goal.AddToTargetList(unit_name)
# first 4 characters of string are goal id, remaining characters are target type string
def SetAreaTargets(SM, param_str):
goal_id = int(param_str[0:4])
target_string = param_str[4:]
goal = SM.GetGoalById(goal_id)
if (goal.GetTypeString() != 'Area'):
return
area_goal = goal.AsAreaGoal()
area_goal.SetTargetList(target_string)
def SetAreaTimeDelay(SM, timeDelayMinutesString, goal_id):
goal = SM.GetGoalById(goal_id)
if (goal.GetTypeString() != 'Area'):
return
area_goal = goal.AsAreaGoal()
timeObjective_s = 60.0 * float(timeDelayMinutesString)
area_goal.SetTimeObjective(timeObjective_s)
def AddGoalTarget(SM, target_id, goal_id):
targetName = SM.GetUnitNameById(target_id)
goal = SM.GetGoalById(goal_id)
if (goal.GetTypeString() == 'Destroy'):
goal = goal.AsDestroyGoal()
elif (goal.GetTypeString() == 'Protect'):
goal = goal.AsProtectGoal()
else:
return
if (len(targetName) == 0):
return
goal.AddTarget(targetName)
def AddGoalTargetArea(SM, lon1, lat1, lon2, lat2, goal_id):
goal = SM.GetGoalById(goal_id)
if (goal.GetTypeString() == 'Destroy'):
goal = goal.AsDestroyGoal()
elif (goal.GetTypeString() == 'Protect'):
goal = goal.AsProtectGoal()
else:
return
unitList = SM.GetUnitList(lon1, lat1, lon2, lat2, -1)
nUnits = unitList.Size()
for n in range(0, nUnits):
targetName = unitList.GetString(n)
goal.AddTarget(targetName)
def RemoveGoalTarget(SM, param_str):
goal_id = int(param_str[0:8])
targetName = param_str[8:]
goal = SM.GetGoalById(goal_id)
if (goal.GetTypeString() == 'Destroy'):
goal = goal.AsDestroyGoal()
elif (goal.GetTypeString() == 'Protect'):
goal = goal.AsProtectGoal()
else:
return
goal.RemoveTarget(targetName)
def SetGoalQuantity(SM, param_str):
goal_id = int(param_str[0:8])
quantity = int(param_str[8:])
goal = SM.GetGoalById(goal_id)
if (goal.GetTypeString() == 'Destroy'):
goal = goal.AsDestroyGoal()
elif (goal.GetTypeString() == 'Protect'):
goal = goal.AsProtectGoal()
elif (goal.GetTypeString() == 'Area'):
goal = goal.AsAreaGoal()
else:
return
goal.SetQuantity(quantity)
# version that takes input from user text box
def SetGoalQuantity2(SM, param_str, goal_id):
quantity = int(param_str)
goal = SM.GetGoalById(goal_id)
if (goal.GetTypeString() == 'Destroy'):
goal = goal.AsDestroyGoal()
elif (goal.GetTypeString() == 'Protect'):
goal = goal.AsProtectGoal()
elif (goal.GetTypeString() == 'Area'):
goal = goal.AsAreaGoal()
else:
return
goal.SetQuantity(quantity)
def SetIncludeProbability(UI, prob):
UI.SetIncludeProbability(float(prob))
def AddRandomBox(UI):
my_track = UI.GetTrackById(UI.GetPlatformId())
lat_deg = 57.296*my_track.Lat
lon_deg = 57.296*my_track.Lon
UI.AddRandomBox(lon_deg+0.01, lon_deg+0.05, lat_deg-0.02, lat_deg+0.02)
UI.UpdateMissionEditGraphics() # avoid rehook to show box graphic
def DeleteAllRandomBoxes(UI):
UI.DeleteAllRandomBoxes()
def SetDateTimeString(SM, str):
SM.SetDateTimeByString(str)
def SetAlwaysVisible(UI, state):
UI.SetAlwaysVisible(state)
def SetAlliancePlayable(SM, state):
current_side = SM.GetUserAlliance()
SM.SetAlliancePlayable(current_side, state)
# set custom cost for this unit for scoring, string cost in millions
def SetCustomCost(UI, cost_million):
try:
x = 1e6 * float(cost_million)
UI.SetCost(x)
UI.DisplayMessage('Changed cost to %.1f M' % (1e-6 * x))
except:
UI.DisplayMessage('Error with cost string (%s)' % cost_million)
def SetFilterByYear(SM, state):
SM.SetFilterByYear(state)
def SetFilterByCountry(SM, state):
SM.SetFilterByCountry(state)
|
|
# Copyright 2016 Pinterest, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -*- coding: utf-8 -*-
"""Collection of all env promote config views
"""
import json
from django.http import HttpResponse
from django.middleware.csrf import get_token
from django.shortcuts import render
from django.template.loader import render_to_string
from django.views.generic import View
import common
from helpers import environs_helper, clusters_helper
from helpers import baseimages_helper
from deploy_board.settings import IS_PINTEREST
class EnvCapacityConfigView(View):
def get(self, request, name, stage):
# cluster manager
provider_list = None
basic_cluster_info = None
create_new = False
adv = False
env = environs_helper.get_env_by_stage(request, name, stage)
if IS_PINTEREST:
provider_list = baseimages_helper.get_all_providers(request)
basic_cluster_info = clusters_helper.get_cluster(request, env.get('clusterName'))
if basic_cluster_info:
base_image_id = basic_cluster_info.get('baseImageId')
base_image = baseimages_helper.get_by_id(request, base_image_id)
if base_image.get('abstract_name') != 'CMP-DOCKER':
adv = True
params = request.GET
if params.get('adv'):
adv = params.get('adv')
if params.get('create_new'):
create_new = params.get('create_new')
if request.is_ajax():
# return data for ajax calls
hosts = environs_helper.get_env_capacity(request, name, stage, capacity_type="HOST")
groups = common.get_non_cmp_group(request, name, stage)
html = render_to_string("configs/capacity.tmpl", {
"env": env,
"hosts": ','.join(hosts),
"groups": ','.join(groups),
"csrf_token": get_token(request),
'is_pinterest': IS_PINTEREST,
'provider_list': provider_list,
'basic_cluster_info': basic_cluster_info,
'adv': adv,
'create_new': create_new,
})
return HttpResponse(json.dumps({'html': html}), content_type="application/json")
# otherwise, return a page
envs = environs_helper.get_all_env_stages(request, name)
stages, env = common.get_all_stages(envs, stage)
hosts = environs_helper.get_env_capacity(request, name, stage, capacity_type="HOST")
groups = common.get_non_cmp_group(request, name, stage)
return render(request, 'configs/capacity.html', {
"envs": envs,
"env": env,
"stages": stages,
"hosts": ','.join(hosts),
"groups": ','.join(groups),
'is_pinterest': IS_PINTEREST,
'provider_list': provider_list,
'basic_cluster_info': basic_cluster_info,
'adv': adv,
'create_new': create_new,
})
def post(self, request, name, stage):
query_dict = request.POST
hosts_str = query_dict["hosts"]
hosts = []
if hosts_str:
hosts = [x.strip() for x in hosts_str.split(',')]
environs_helper.update_env_capacity(request, name, stage, capacity_type="HOST", data=hosts)
groups_str = query_dict["groups"]
groups = []
if groups_str:
groups = [x.strip() for x in groups_str.split(',')]
if IS_PINTEREST:
cluster_name = common.get_cluster_name(request, name, stage)
basic_cluster_info = clusters_helper.get_cluster(request, cluster_name)
if basic_cluster_info:
cluster_name = common.get_cluster_name(request, name, stage)
groups.append(cluster_name)
environs_helper.update_env_capacity(request, name, stage, capacity_type="GROUP",
data=groups)
return self.get(request, name, stage)
''' TODO figure out how to update detach and attach later
def update_capacity(request, name, stage):
query_dict = request.POST
groups_str = query_dict["updatedGroups"]
hosts_str = query_dict["updatedHosts"]
canary_hosts_str = None
attached_hosts_str = None
if "canaryHosts" in query_dict:
canary_hosts_str = query_dict["canaryHosts"]
if "attachedHosts" in query_dict:
attached_hosts_str = query_dict["attachedHosts"]
groups = []
if groups_str:
groups = [x.strip() for x in groups_str.split(',')]
hosts = []
if hosts_str:
hosts = [x.strip() for x in hosts_str.split(',')]
canary_hosts = []
if canary_hosts_str:
canary_hosts = [x.strip() for x in canary_hosts_str.split(',')]
attached_hosts = []
if attached_hosts_str:
attached_hosts = [x.strip() for x in attached_hosts_str.split(',')]
# compare groups with original groups
orig_groups = client.getEnvGroups(name, stage)
removed_groups = []
for group in orig_groups:
if group not in groups:
# need to remove this host
removed_groups.append(group)
else:
groups.remove(group)
if removed_groups:
client.removeGroupsFromEnv(name, stage, removed_groups, request.teletraan_user_id)
# these are the new groups
client.addGroupsToEnv(name, stage, groups, request.teletraan_user_id)
# compare hosts with original hosts
orig_hosts = client.getEnvHosts(name, stage)
removed_hosts = []
for host in orig_hosts:
if host not in hosts:
removed_hosts.append(host)
else:
hosts.remove(host)
if removed_hosts:
client.removeHostsFromEnv(name, stage, removed_hosts, request.teletraan_user_id)
# there are new hosts
client.addHostsToEnv(name, stage, hosts, request.teletraan_user_id)
# detach hosts from ASG
for host_name in canary_hosts:
hostInfo = client.getHostInfos(host_name)
if len(hostInfo) > 0:
group_names = client.getASGNamesByHostId(hostInfo[0].hostId)
for group_name in group_names:
hostId = []
hostId.append(hostInfo[0].hostId)
try:
client.detachInstancesFromAutoScalingGroup(hostId, group_name)
except:
log.error(traceback.format_exc())
raise
# attach host to ASG
for host_name in attached_hosts:
hostInfo = client.getHostInfos(host_name)
if len(hostInfo) > 0:
group_names = client.getASGNamesByHostId(hostInfo[0].hostId)
for group_name in group_names:
hostId = []
hostId.append(hostInfo[0].hostId)
try:
client.attachInstancesToAutoScalingGroup(hostId, group_name)
except:
log.error(traceback.format_exc())
raise
# TODO set a confirmation first, and ask for confirmation if
# delete or shutdown services are needed
return redirect('/env/{}/{}/config/'.format(name, stage))
'''
|
|
#!/usr/bin/python
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import re
import subprocess
import fbchisellldbbase as fb
import fbchisellldbobjcruntimehelpers as runtimeHelpers
import fbchisellldbviewcontrollerhelpers as vcHelpers
import fbchisellldbviewhelpers as viewHelpers
import lldb
def lldbcommands():
return [
FBPrintViewHierarchyCommand(),
FBPrintViewControllerHierarchyCommand(),
FBPrintIsExecutingInAnimationBlockCommand(),
FBPrintInheritanceHierarchy(),
FBPrintUpwardResponderChain(),
FBPrintOnscreenTableView(),
FBPrintOnscreenTableViewCells(),
FBPrintInternals(),
FBPrintInstanceVariable(),
FBPrintKeyPath(),
FBPrintApplicationDocumentsPath(),
FBPrintApplicationBundlePath(),
FBPrintData(),
FBPrintTargetActions(),
FBPrintJSON(),
FBPrintSwiftJSON(),
FBPrintAsCurl(),
FBPrintToClipboard(),
FBPrintObjectInObjc(),
]
class FBPrintViewHierarchyCommand(fb.FBCommand):
def name(self):
return "pviews"
def description(self):
return "Print the recursion description of <aView>."
def options(self):
return [
fb.FBCommandArgument(
short="-u",
long="--up",
arg="upwards",
boolean=True,
default=False,
help="Print only the hierarchy directly above the view, up to its window.",
),
fb.FBCommandArgument(
short="-d",
long="--depth",
arg="depth",
type="int",
default="0",
help="Print only to a given depth. 0 indicates infinite depth.",
),
fb.FBCommandArgument(
short="-w",
long="--window",
arg="window",
type="int",
default="0",
help='Specify the window to print a description of. Check which windows exist with "po (id)[[UIApplication sharedApplication] windows]".',
),
fb.FBCommandArgument(
short="-s",
long="--short",
arg="short",
boolean=True,
default=False,
help="Print a short description of the view",
),
fb.FBCommandArgument(
short="-m",
long="--medium",
arg="medium",
boolean=True,
default=False,
help="Print a medium description of the view",
),
]
def args(self):
return [
fb.FBCommandArgument(
arg="aView",
type="UIView*/NSView*",
help="The view to print the description of.",
default="__keyWindow_dynamic__",
)
]
def run(self, arguments, options):
maxDepth = int(options.depth)
window = int(options.window)
isMac = runtimeHelpers.isMacintoshArch()
if window > 0:
if isMac:
arguments[0] = (
"(id)[[[[NSApplication sharedApplication] windows] objectAtIndex:"
+ str(window)
+ "] contentView]"
)
else:
arguments[0] = (
"(id)[[[UIApplication sharedApplication] windows] objectAtIndex:"
+ str(window)
+ "]"
)
elif arguments[0] == "__keyWindow_dynamic__":
if isMac:
arguments[
0
] = "(id)[[[[NSApplication sharedApplication] windows] objectAtIndex:0] contentView]"
else:
arguments[0] = "(id)[[UIApplication sharedApplication] keyWindow]"
if options.upwards:
view = arguments[0]
description = viewHelpers.upwardsRecursiveDescription(view, maxDepth)
if description:
print(description)
else:
print(
"Failed to walk view hierarchy. Make sure you pass a view, not any other kind of object or expression."
)
else:
printingMethod = "recursiveDescription"
if isMac:
printingMethod = "_subtreeDescription"
description = fb.evaluateExpressionValue(
"(id)[" + arguments[0] + " " + printingMethod + "]"
).GetObjectDescription()
if maxDepth > 0:
separator = re.escape(" | ")
prefixToRemove = separator * maxDepth + " "
description += "\n"
description = re.sub(r"%s.*\n" % (prefixToRemove), r"", description)
if options.short:
toRemove = ":.*(?:\n|$)"
description = re.sub(toRemove, r">\n", description)
elif options.medium:
toRemove = ";.*(?:\n|$)"
description = re.sub(toRemove, r">\n", description)
print(description)
class FBPrintViewControllerHierarchyCommand(fb.FBCommand):
def name(self):
return "pvc"
def description(self):
return "Print the recursion description of <aViewController>."
def args(self):
return [
fb.FBCommandArgument(
arg="aViewController",
type="UIViewController*",
help="The view controller to print the description of.",
default="__keyWindow_rootVC_dynamic__",
)
]
def run(self, arguments, options):
isMac = runtimeHelpers.isMacintoshArch()
if arguments[0] == "__keyWindow_rootVC_dynamic__":
if fb.evaluateBooleanExpression(
"[UIViewController respondsToSelector:@selector(_printHierarchy)]"
):
print(fb.describeObject("[UIViewController _printHierarchy]"))
return
arguments[
0
] = "(id)[(id)[[UIApplication sharedApplication] keyWindow] rootViewController]"
if isMac:
arguments[
0
] = "(id)[[[[NSApplication sharedApplication] windows] objectAtIndex:0] contentViewController]"
print(vcHelpers.viewControllerRecursiveDescription(arguments[0]))
class FBPrintIsExecutingInAnimationBlockCommand(fb.FBCommand):
def name(self):
return "panim"
def description(self):
return (
"Prints if the code is currently execution with a UIView animation block."
)
def run(self, arguments, options):
lldb.debugger.HandleCommand("p (BOOL)[UIView _isInAnimationBlock]")
def _printIterative(initialValue, generator):
indent = 0
for currentValue in generator(initialValue):
print(" | " * indent + currentValue)
indent += 1
class FBPrintInheritanceHierarchy(fb.FBCommand):
def name(self):
return "pclass"
def description(self):
return "Print the inheritance starting from an instance of any class."
def args(self):
return [
fb.FBCommandArgument(
arg="object", type="id", help="The instance to examine."
)
]
def run(self, arguments, options):
_printIterative(arguments[0], _inheritanceHierarchy)
def _inheritanceHierarchy(instanceOfAClass):
instanceAddress = fb.evaluateExpression(instanceOfAClass)
instanceClass = fb.evaluateExpression("(id)[(id)" + instanceAddress + " class]")
while int(instanceClass, 16):
yield fb.evaluateExpressionValue(instanceClass).GetObjectDescription()
instanceClass = fb.evaluateExpression(
"(id)[(id)" + instanceClass + " superclass]"
)
class FBPrintUpwardResponderChain(fb.FBCommand):
def name(self):
return "presponder"
def description(self):
return "Print the responder chain starting from a specific responder."
def args(self):
return [
fb.FBCommandArgument(
arg="startResponder",
type="UIResponder *",
help="The responder to use to start walking the chain.",
)
]
def run(self, arguments, options):
startResponder = fb.evaluateInputExpression(arguments[0])
isMac = runtimeHelpers.isMacintoshArch()
responderClass = "UIResponder"
if isMac:
responderClass = "NSResponder"
if not fb.evaluateBooleanExpression(
"(BOOL)[(id)"
+ startResponder
+ " isKindOfClass:["
+ responderClass
+ " class]]"
):
print("Whoa, " + startResponder + " is not a " + responderClass + ". =(")
return
_printIterative(startResponder, _responderChain)
def _responderChain(startResponder):
responderAddress = fb.evaluateExpression(startResponder)
while int(responderAddress, 16):
yield fb.evaluateExpressionValue(responderAddress).GetObjectDescription()
responderAddress = fb.evaluateExpression(
"(id)[(id)" + responderAddress + " nextResponder]"
)
def tableViewInHierarchy():
viewDescription = fb.evaluateExpressionValue(
"(id)[(id)[[UIApplication sharedApplication] keyWindow] recursiveDescription]"
).GetObjectDescription()
searchView = None
# Try to find an instance of
classPattern = re.compile(r"UITableView: (0x[0-9a-fA-F]+);")
for match in re.finditer(classPattern, viewDescription):
searchView = match.group(1)
break
# Try to find a direct subclass
if not searchView:
subclassPattern = re.compile(r"(0x[0-9a-fA-F]+); baseClass = UITableView;")
for match in re.finditer(subclassPattern, viewDescription):
searchView = match.group(1)
break
# SLOW: check every pointer in town
if not searchView:
pattern = re.compile(r"(0x[0-9a-fA-F]+)[;>]")
for view in re.findall(pattern, viewDescription):
if fb.evaluateBooleanExpression(
"[" + view + " isKindOfClass:(id)[UITableView class]]"
):
searchView = view
break
return searchView
class FBPrintOnscreenTableView(fb.FBCommand):
def name(self):
return "ptv"
def description(self):
return "Print the highest table view in the hierarchy."
def run(self, arguments, options):
tableView = tableViewInHierarchy()
if tableView:
viewValue = fb.evaluateExpressionValue(tableView)
print(viewValue.GetObjectDescription())
cmd = 'echo %s | tr -d "\n" | pbcopy' % tableView
os.system(cmd)
else:
print("Sorry, chump. I couldn't find a table-view. :'(")
class FBPrintOnscreenTableViewCells(fb.FBCommand):
def name(self):
return "pcells"
def description(self):
return "Print the visible cells of the highest table view in the hierarchy."
def run(self, arguments, options):
tableView = tableViewInHierarchy()
print(
fb.evaluateExpressionValue(
"(id)[(id)" + tableView + " visibleCells]"
).GetObjectDescription()
)
class FBPrintInternals(fb.FBCommand):
def name(self):
return "pinternals"
def description(self):
return "Show the internals of an object by dereferencing it as a pointer."
def args(self):
return [
fb.FBCommandArgument(
arg="object", type="id", help="Object expression to be evaluated."
)
]
def options(self):
return [
fb.FBCommandArgument(
arg="appleWay",
short="-a",
long="--apple",
boolean=True,
default=False,
help="Print ivars the apple way",
)
]
def run(self, arguments, options):
object = fb.evaluateObjectExpression(arguments[0])
if options.appleWay:
if fb.evaluateBooleanExpression(
"[{} respondsToSelector:@selector(_ivarDescription)]".format(object)
):
command = "po [{} _ivarDescription]".format(object)
else:
print("Sorry, but it seems Apple dumped the _ivarDescription method")
return
else:
objectClass = fb.evaluateExpressionValue(
"(id)[(id)(" + object + ") class]"
).GetObjectDescription()
command = "p *(({} *)((id){}))".format(objectClass, object)
lldb.debugger.HandleCommand(command)
class FBPrintInstanceVariable(fb.FBCommand):
def name(self):
return "pivar"
def description(self):
return "Print the value of an object's named instance variable."
def args(self):
return [
fb.FBCommandArgument(
arg="object", type="id", help="Object expression to be evaluated."
),
fb.FBCommandArgument(
arg="ivarName", help="Name of instance variable to print."
),
]
def run(self, arguments, options):
object = fb.evaluateInputExpression(arguments[0])
ivarName = arguments[1]
objectClass = fb.evaluateExpressionValue(
"(id)[(" + object + ") class]"
).GetObjectDescription()
ivarTypeCommand = '((char *)ivar_getTypeEncoding((void*)object_getInstanceVariable((id){}, "{}", 0)))[0]'.format(
object, ivarName
)
ivarTypeEncodingFirstChar = fb.evaluateExpression(ivarTypeCommand)
result = fb.evaluateExpressionValue(
"(({} *)({}))->{}".format(objectClass, object, ivarName)
)
print(
result.GetObjectDescription()
if "@" in ivarTypeEncodingFirstChar
else result
)
class FBPrintKeyPath(fb.FBCommand):
def name(self):
return "pkp"
def description(self):
return "Print out the value of the key path expression using -valueForKeyPath:"
def args(self):
return [
fb.FBCommandArgument(
arg="keypath", type="NSString *", help="The keypath to print"
)
]
def run(self, arguments, options):
command = arguments[0]
if len(command.split(".")) == 1:
lldb.debugger.HandleCommand("po " + command)
else:
objectToMessage, keypath = command.split(".", 1)
object = fb.evaluateObjectExpression(objectToMessage)
print(
fb.describeObject('[{} valueForKeyPath:@"{}"]'.format(object, keypath))
)
class FBPrintApplicationDocumentsPath(fb.FBCommand):
def name(self):
return "pdocspath"
def description(self):
return "Print application's 'Documents' directory path."
def options(self):
return [
fb.FBCommandArgument(
short="-o",
long="--open",
arg="open",
boolean=True,
default=False,
help="open in Finder",
)
]
def run(self, arguments, options):
# in iOS SDK NSDocumentDirectory == 9 NSUserDomainMask == 1
NSDocumentDirectory = "9"
NSUserDomainMask = "1"
path = fb.evaluateExpressionValue(
"(NSString*)[NSSearchPathForDirectoriesInDomains("
+ NSDocumentDirectory
+ ", "
+ NSUserDomainMask
+ ", YES) lastObject]"
)
pathString = "{}".format(path).split('"')[1]
cmd = 'echo {} | tr -d "\n" | pbcopy'.format(pathString)
os.system(cmd)
print(pathString)
if options.open:
os.system("open " + pathString)
class FBPrintApplicationBundlePath(fb.FBCommand):
def name(self):
return "pbundlepath"
def description(self):
return "Print application's bundle directory path."
def options(self):
return [
fb.FBCommandArgument(
short="-o",
long="--open",
arg="open",
boolean=True,
default=False,
help="open in Finder",
)
]
def run(self, arguments, options):
path = fb.evaluateExpressionValue(
"(NSString*)[[NSBundle mainBundle] bundlePath]"
)
pathString = "{}".format(path).split('"')[1]
cmd = 'echo {} | tr -d "\n" | pbcopy'.format(pathString)
os.system(cmd)
print(pathString)
if options.open:
os.system("open " + pathString)
class FBPrintData(fb.FBCommand):
def name(self):
return "pdata"
def description(self):
return (
"Print the contents of NSData object as string.\n"
"Supported encodings:\n"
"- ascii,\n"
"- utf8,\n"
"- utf16, unicode,\n"
"- utf16l (Little endian),\n"
"- utf16b (Big endian),\n"
"- utf32,\n"
"- utf32l (Little endian),\n"
"- utf32b (Big endian),\n"
"- latin1, iso88591 (88591),\n"
"- latin2, iso88592 (88592),\n"
"- cp1251 (1251),\n"
"- cp1252 (1252),\n"
"- cp1253 (1253),\n"
"- cp1254 (1254),\n"
"- cp1250 (1250),"
)
def options(self):
return [
fb.FBCommandArgument(
arg="encoding",
short="-e",
long="--encoding",
type="string",
help="Used encoding (default utf-8).",
default="utf-8",
)
]
def args(self):
return [
fb.FBCommandArgument(arg="data", type="NSData *", help="NSData object.")
]
def run(self, arguments, option): # noqa C901
# Normalize encoding.
encoding_text = option.encoding.lower().replace(" -", "")
enc = 4 # Default encoding UTF-8.
if encoding_text == "ascii":
enc = 1
elif encoding_text == "utf8":
enc = 4
elif (
encoding_text == "latin1"
or encoding_text == "88591"
or encoding_text == "iso88591"
):
enc = 5
elif (
encoding_text == "latin2"
or encoding_text == "88592"
or encoding_text == "iso88592"
):
enc = 9
elif encoding_text == "unicode" or encoding_text == "utf16":
enc = 10
elif encoding_text == "1251" or encoding_text == "cp1251":
enc = 11
elif encoding_text == "1252" or encoding_text == "cp1252":
enc = 12
elif encoding_text == "1253" or encoding_text == "cp1253":
enc = 13
elif encoding_text == "1254" or encoding_text == "cp1254":
enc = 14
elif encoding_text == "1250" or encoding_text == "cp1250":
enc = 15
elif encoding_text == "utf16b":
enc = 0x90000100
elif encoding_text == "utf16l":
enc = 0x94000100
elif encoding_text == "utf32":
enc = 0x8C000100
elif encoding_text == "utf32b":
enc = 0x98000100
elif encoding_text == "utf32l":
enc = 0x9C000100
print(
fb.describeObject(
"[[NSString alloc] initWithData:{} encoding:{}]".format(
arguments[0], enc
)
)
)
class FBPrintTargetActions(fb.FBCommand):
def name(self):
return "pactions"
def description(self):
return "Print the actions and targets of a control."
def args(self):
return [
fb.FBCommandArgument(
arg="control",
type="UIControl *",
help="The control to inspect the actions of.",
)
]
def run(self, arguments, options):
control = fb.evaluateInputExpression(arguments[0])
targets = fb.evaluateObjectExpression(
"[[{control} allTargets] allObjects]".format(control=control)
)
targetCount = fb.evaluateIntegerExpression(
"[{targets} count]".format(targets=targets)
)
for index in range(0, targetCount):
target = fb.evaluateObjectExpression(
"[{targets} objectAtIndex:{index}]".format(targets=targets, index=index)
)
actions = fb.evaluateObjectExpression(
"[{control} actionsForTarget:{target} forControlEvent:0]".format(
control=control, target=target
)
)
targetDescription = fb.evaluateExpressionValue(
"(id){target}".format(target=target)
).GetObjectDescription()
actionsDescription = fb.evaluateExpressionValue(
'(id)[{actions} componentsJoinedByString:@", "]'.format(actions=actions)
).GetObjectDescription()
print(
"{target}: {actions}".format(
target=targetDescription, actions=actionsDescription
)
)
class FBPrintJSON(fb.FBCommand):
def name(self):
return "pjson"
def description(self):
return "Print JSON representation of NSDictionary or NSArray object"
def options(self):
return [
fb.FBCommandArgument(
arg="plain",
short="-p",
long="--plain",
boolean=True,
default=False,
help="Plain JSON",
)
]
def args(self):
return [
fb.FBCommandArgument(
arg="object",
type="id",
help="The NSDictionary or NSArray object to print",
)
]
def run(self, arguments, options):
objectToPrint = fb.evaluateInputExpression(arguments[0])
pretty = 1 if options.plain is None else 0
jsonData = fb.evaluateObjectExpression(
"[NSJSONSerialization dataWithJSONObject:(id){} options:{} error:nil]".format(
objectToPrint, pretty
)
)
jsonString = fb.evaluateExpressionValue(
"(NSString*)[[NSString alloc] initWithData:(id){} encoding:4]".format(
jsonData
)
).GetObjectDescription()
print(jsonString)
class FBPrintSwiftJSON(fb.FBCommand):
def name(self):
return "psjson"
def description(self):
return "Print JSON representation of Swift Dictionary or Swift Array object"
def options(self):
return [
fb.FBCommandArgument(
arg="plain",
short="-p",
long="--plain",
boolean=True,
default=False,
help="Plain JSON",
)
]
def args(self):
return [
fb.FBCommandArgument(
arg="object",
type="NSObject *",
help="The Swift Dictionary or Swift Array to print",
)
]
def run(self, arguments, options):
# Convert to NSObject first to allow for objc runtime to process it
objectToPrint = fb.evaluateInputExpression(
"{obj} as NSObject".format(obj=arguments[0])
)
pretty = 1 if options.plain is None else 0
jsonData = fb.evaluateObjectExpression(
"[NSJSONSerialization dataWithJSONObject:(NSObject*){} options:{} error:nil]".format(
objectToPrint, pretty
)
)
jsonString = fb.evaluateExpressionValue(
"(NSString*)[[NSString alloc] initWithData:(NSObject*){} encoding:4]".format(
jsonData
)
).GetObjectDescription()
print(jsonString)
class FBPrintAsCurl(fb.FBCommand):
def name(self):
return "pcurl"
def description(self):
return "Print the NSURLRequest (HTTP) as curl command."
def options(self):
return [
fb.FBCommandArgument(
short="-e",
long="--embed-data",
arg="embed",
boolean=True,
default=False,
help="Embed request data as base64.",
)
]
def args(self):
return [
fb.FBCommandArgument(
arg="request",
type="NSURLRequest*/NSMutableURLRequest*",
help="The request to convert to the curl command.",
)
]
def generateTmpFilePath(self):
return "/tmp/curl_data_{}".format(
fb.evaluateExpression(
"(NSTimeInterval)[NSDate timeIntervalSinceReferenceDate]"
)
)
def run(self, arguments, options):
request = fb.evaluateInputExpression(arguments[0])
HTTPHeaderSring = ""
HTTPMethod = fb.evaluateExpressionValue(
"(id)[{} HTTPMethod]".format(request)
).GetObjectDescription()
URL = fb.evaluateExpressionValue(
"(id)[{} URL]".format(request)
).GetObjectDescription()
timeout = fb.evaluateExpression(
"(NSTimeInterval)[{} timeoutInterval]".format(request)
)
HTTPHeaders = fb.evaluateObjectExpression(
"(id)[{} allHTTPHeaderFields]".format(request)
)
HTTPHeadersCount = fb.evaluateIntegerExpression(
"[{} count]".format(HTTPHeaders)
)
allHTTPKeys = fb.evaluateObjectExpression("[{} allKeys]".format(HTTPHeaders))
for index in range(0, HTTPHeadersCount):
key = fb.evaluateObjectExpression(
"[{} objectAtIndex:{}]".format(allHTTPKeys, index)
)
keyDescription = fb.evaluateExpressionValue(
"(id){}".format(key)
).GetObjectDescription()
value = fb.evaluateExpressionValue(
"(id)[(id){} objectForKey:{}]".format(HTTPHeaders, key)
).GetObjectDescription()
if len(HTTPHeaderSring) > 0:
HTTPHeaderSring += " "
HTTPHeaderSring += '-H "{}: {}"'.format(keyDescription, value)
HTTPData = fb.evaluateObjectExpression("[{} HTTPBody]".format(request))
dataFile = None
dataAsString = None
if fb.evaluateIntegerExpression("[{} length]".format(HTTPData)) > 0:
if options.embed:
if fb.evaluateIntegerExpression(
"[{} respondsToSelector:@selector(base64EncodedStringWithOptions:)]".format(
HTTPData
)
):
dataAsString = fb.evaluateExpressionValue(
"(id)[(id){} base64EncodedStringWithOptions:0]".format(HTTPData)
).GetObjectDescription()
else:
print("This version of OS doesn't supports base64 data encoding")
return False
elif not runtimeHelpers.isIOSDevice():
dataFile = self.generateTmpFilePath()
if not fb.evaluateBooleanExpression(
'(BOOL)[{} writeToFile:@"{}" atomically:NO]'.format(
HTTPData, dataFile
)
):
print("Can't write data to file {}".format(dataFile))
return False
else:
print(
'HTTPBody data for iOS Device is supported only with "--embed-data" flag'
)
return False
commandString = ""
if dataAsString is not None and len(dataAsString) > 0:
dataFile = self.generateTmpFilePath()
commandString += 'echo "{}" | base64 -D -o "{}" && '.format(
dataAsString, dataFile
)
commandString += "curl -X {} --connect-timeout {}".format(HTTPMethod, timeout)
if len(HTTPHeaderSring) > 0:
commandString += " " + HTTPHeaderSring
if dataFile is not None:
commandString += ' --data-binary @"{}"'.format(dataFile)
commandString += ' "{}"'.format(URL)
print(commandString)
class FBPrintToClipboard(fb.FBCommand):
def name(self):
return "pbcopy"
def description(self):
return "Print object and copy output to clipboard"
def args(self):
return [
fb.FBCommandArgument(arg="object", type="id", help="The object to print")
]
def run(self, arguments, options):
lldbOutput = fb.evaluateExpressionValue(
"[{changeset} description]".format(changeset=arguments[0])
).GetObjectDescription()
process = subprocess.Popen(
"pbcopy", env={"LANG": "en_US.UTF-8"}, stdin=subprocess.PIPE
)
process.communicate(lldbOutput.encode("utf-8"))
print("Object copied to clipboard")
class FBPrintObjectInObjc(fb.FBCommand):
def name(self):
return "poobjc"
def description(self):
return 'Print the expression result, with the expression run in an ObjC++ context. (Shortcut for "expression -O -l ObjC++ -- " )'
def args(self):
return [
fb.FBCommandArgument(
arg="expression", help="ObjC expression to evaluate and print."
)
]
def run(self, arguments, options):
expression = arguments[0]
lldb.debugger.HandleCommand("expression -O -l ObjC++ -- " + expression)
|
|
# Brain Tumor Classification
# Load and Split dataset into training set,
# validation set and testing set.
# Author: Qixun QU
# Copyleft: MIT Licience
# ,,, ,,,
# ;" '; ;' ",
# ; @.ss$$$$$$s.@ ;
# `s$$$$$$$$$$$$$$$'
# $$$$$$$$$$$$$$$$$$
# $$$$P""Y$$$Y""W$$$$$
# $$$$ p"$$$"q $$$$$
# $$$$ .$$$$$. $$$$'
# $$$DaU$$O$$DaU$$$'
# '$$$$'.^.'$$$$'
# '&$$$$$&'
from __future__ import print_function
import os
import numpy as np
import pandas as pd
import nibabel as nib
from random import seed, shuffle
from keras.utils import to_categorical
class BTCDataset(object):
def __init__(self,
hgg_dir, lgg_dir,
volume_type="t1ce",
train_prop=0.6,
valid_prop=0.2,
random_state=0,
is_augment=True,
pre_trainset_path=None,
pre_validset_path=None,
pre_testset_path=None,
data_format=".nii.gz"):
'''__INIT__
Intialize configurations for loading
and partitioning dataset.
Important variables:
- train_x, train_y
- valid_x, valid_y
- test_x, test_y
(x: brain images, y: labels)
Inputs:
-------
- hgg_dir: string, path of directory contains HGG subjects.
- lgg_dir: string, path of directory contains LGG subjects.
- subj_separated: boolean, True: partition scans according to
subjects or False: randomly partition all scans.
Default is True.
- volume_type: string, type of brain tissue, "t1ce", "flair",
"t1" or "t2". Default is "t1ce".
- train_prop: float between 0 and 1, proportion of training
data to whole dataset. Default is 0.6.
- valid_prop: float between 0 and 1, proportion of validation
data to whole dataset. Default is 0.2.
- random_state: int, seed for reproducibly partition dataset.
- is_augment: boolean, if True, do augmentation by flipping
image from left to right. Defalut is False.
- pre_trainset_path, pre_validset_path, ore_testset_path:
string, path of csv file, gives information of subjects (IDs
and labels) in training set, validation set and testing set.
- data_format: string, format of brain images, defalut is ".nii.gz".
'''
self.hgg_dir = hgg_dir
self.lgg_dir = lgg_dir
self.volume_type = volume_type
self.train_prop = train_prop
self.valid_prop = valid_prop
self.random_state = int(random_state)
self.is_augment = is_augment
self.pre_trainset = pre_trainset_path
self.pre_validset = pre_validset_path
self.pre_testset = pre_testset_path
self.data_format = data_format
self.train_x, self.train_y = None, None
self.valid_x, self.valid_y = None, None
self.test_x, self.test_y = None, None
return
def run(self, pre_split=True,
save_split=False,
save_split_dir=None):
'''RUN
Partition dataset.
Inputs:
-------
- pre_split: boolean, if True, read csv files to get information
of partitions that have been split. Default is True.
- save_split: boolean, if True, save partition to csv files.
Default is False.
- save_split_dir: string, path of directory to save partition
information. It is useful if save_split is True.
Default is None.
'''
print("\nSplitting dataset to train, valide and test.\n")
# Load partition's information from csv file
# or generate new partitions
trainset, validset, testset = \
self._get_pre_datasplit() if pre_split else \
self._get_new_datasplit()
# Load images acording to partition information
self._load_dataset(trainset, validset, testset)
if save_split and (not pre_split):
# Save new partitions into csv files
self.save_split_dir = save_split_dir
self._save_dataset(trainset, validset, testset)
return
def _get_pre_datasplit(self):
'''_GET_PRE_DATASPLIT
Load partition inforamtion from csv files for
training set, validation set and testing set.
In each csv file, information includes:
- ID: subject's ID.
- label: subject's label, 1 for HGG and 0 for LGG.
Outputs:
--------
- trainset, validset, testset: list of information,
each element is [subject_path, label].
'''
# Parameters for function to load csv
paras = {"hgg_dir": self.hgg_dir,
"lgg_dir": self.lgg_dir,
"data_format": self.data_format,
"csv_path": None}
# Load partition of training set
paras["csv_path"] = self.pre_trainset
trainset = self.load_datasplit(**paras)
# Load partition of validation set
paras["csv_path"] = self.pre_validset
validset = self.load_datasplit(**paras)
# Load partition of testing set
paras["csv_path"] = self.pre_testset
testset = self.load_datasplit(**paras)
return trainset, validset, testset
def _get_new_datasplit(self):
'''_GET_NEW_DATASPLIT
Obtain new partition of dataset.
-1- Generate paths of all subjects.
-2- Randomly reoarrange the path list.
-3- Partition dataset according to proportions.
-4- Merge HGG and LGG subjects.
Outputs:
--------
- trainset, validset, testset: list of information,
each element is [subject_path, label].
'''
# Parameters for function to load subject's paths
paras = {"label": None,
"dir_path": None,
"volume_type": self.volume_type,
"random_state": self.random_state}
# Load HGG subjects' paths
paras["label"], paras["dir_path"] = 1, self.hgg_dir
hgg_subjects = self.get_subjects_path(**paras)
# Load LGG subjects' paths
paras["label"], paras["dir_path"] = 0, self.lgg_dir
lgg_subjects = self.get_subjects_path(**paras)
# Parameters for function to partition dataset
paras = {"subjects": None,
"train_prop": self.train_prop,
"valid_prop": self.valid_prop}
# Partition HGG subjects into three sets
paras["subjects"] = hgg_subjects
hgg_train, hgg_valid, hgg_test = self.split_dataset(**paras)
# Partition LGG subjects into three sets
paras["subjects"] = lgg_subjects
lgg_train, lgg_valid, lgg_test = self.split_dataset(**paras)
# Merge HGG and LGG subjects
trainset = hgg_train + lgg_train
validset = hgg_valid + lgg_valid
testset = hgg_test + lgg_test
return trainset, validset, testset
def _load_dataset(self, trainset, validset, testset):
'''_LOAD_DATASET
Load images and labels for three partitions:
training set, validation set and testing set.
'''
# Load images and labels of subjects in testing set
self.test_x, test_y = self.load_data(testset, "test set")
self.test_y = to_categorical(test_y, num_classes=2)
# Load images and labels of subjects in validation set
self.valid_x, valid_y = self.load_data(validset, "valid set")
self.valid_y = to_categorical(valid_y, num_classes=2)
# Load images and labels of subjects in training set
train_x, train_y = self.load_data(trainset, "train set")
if self.is_augment:
# Augmentation on LGG subjects
train_x, train_y = self.augment(train_x, train_y)
self.train_x = train_x
self.train_y = to_categorical(train_y, num_classes=2)
return
def _save_dataset(self, trainset, validset, testset):
'''_SAVE_DATASET
Save partition informatio into csv files.
Outputs:
--------
- trainset_[random_state].csv
- validset_[random_state].csv
- testset_[random_state].csv
'''
# Generate paths for output csv files
ap = str(self.random_state) + ".csv"
trainset_path = os.path.join(self.save_split_dir, "trainset_" + ap)
validset_path = os.path.join(self.save_split_dir, "validset_" + ap)
testset_path = os.path.join(self.save_split_dir, "testset_" + ap)
# Save information into csv files
self.save_datasplit(trainset, trainset_path)
self.save_datasplit(validset, validset_path)
self.save_datasplit(testset, testset_path)
return
@staticmethod
def load_datasplit(hgg_dir, lgg_dir, csv_path,
data_format=".nii.gz"):
'''LOAD_DATASPLIT
Load partition information from given csv file.
Inputs:
-------
- hgg_dir: string, directory path of HGG subjects.
- lgg_dir: string, directory path of LGG subjects.
- csv_path: string, path of csv file which contains
partition information.
- data_format: string, dormat of input images,
default is ".nii.gz".
Output:
-------
- info: list of partition information, each element is
[subject_path, label].
'''
# Load IDs and labels form csv file
df = pd.read_csv(csv_path)
IDs = df["ID"].values.tolist()
labels = df["label"].values.tolist()
info = []
for ID, label in zip(IDs, labels):
# Generate directopy path of each subject
target_dir = hgg_dir if label else lgg_dir
path = os.path.join(target_dir, ID[:-5],
ID + data_format)
info.append([path, label])
return info
@staticmethod
def save_datasplit(dataset, to_path):
'''SAVE_DATASPLIT
Save partition information into csv file.
Inputs:
-------
- dataset: list, information of partition, each element
is [subject_path, label].
- to_path: string, the path of csv file to be saved.
Output:
-------
- A csv table with two columns, "ID" and "label".
'''
IDs, labels = [], []
for i in dataset:
# Extract ID from subject's path
IDs.append(i[0].split("/")[-1].split(".")[0])
# Extract label
labels.append(i[1])
# Create pandas DataFrame and save it into csv file
df = pd.DataFrame(data={"ID": IDs, "label": labels})
df.to_csv(to_path, index=False)
return
@staticmethod
def get_subjects_path(dir_path, volume_type, label,
random_state=0):
'''GET_SUBJECTS_PATH
Obtain subjects' paths of HGG or LGG.
Inputs:
-------
- dir_path: string, directory path of HGG or LGG subjects.
- volume_type: string, type of brain tissue, "t1ce", "flair",
"t1" or "t2".
- label: int, 1 for HGG and o for LGG.
- random_state: int, seed for shuffle paths list.
Output:
-------
- subjects_paths: list with two columns, each element is
[subject_path, label].
'''
# Obtain all subjects' names
subjects = os.listdir(dir_path)
# Set seed and shuffle list
# Different seed leads to different shuffled list
# to change subjects in partitions
seed(random_state)
shuffle(subjects)
subjects_paths = []
for subject in subjects:
subject_dir = os.path.join(dir_path, subject)
for scan_name in os.listdir(subject_dir):
if volume_type not in scan_name:
# Not target volume
continue
# Element [subject_dir, label]
scan_path = os.path.join(subject_dir, scan_name)
subjects_paths.append([scan_path, label])
return subjects_paths
@staticmethod
def split_dataset(subjects, train_prop=0.6, valid_prop=0.2):
'''SPLIT_DATASET
Partition dataset into three parts according
to proportions.
Inputs:
-------
- subjects: list with two columns, information of all
subjects, each element is [subject_path, label].
- train_prop: float between 0 and 1, proportion of training
data to whole dataset. Default is 0.6.
- valid_prop: float between 0 and 1, proportion of validation
data to whole dataset. Default is 0.2.
Outputs:
- trainset, validset, testset: partition information,including
subjects' paths and labels.
'''
subj_num = len(subjects)
# Extract subjects for testing set
train_valid_num = subj_num * (train_prop + valid_prop)
train_valid_idx = int(round(train_valid_num))
testset = subjects[train_valid_idx:]
# Extract subjects validation set
valid_idx = int(round(subj_num * valid_prop))
validset = subjects[:valid_idx]
# Extract subjects for training set
trainset = subjects[valid_idx:train_valid_idx]
return trainset, validset, testset
@staticmethod
def load_data(dataset, mode):
'''LOAD_DATA
Load images from partition information.
Inputs:
-------
- dataset: list with two columns, [subject_path, label].
- mode: string, indicates which partition, "train set",
"valid set" or "test set".
Outputs:
--------
- x: numpy ndarray in shape [n, 112, 96, 96, 1], n is the
number of scans in one partition. Input images.
- y: numpy ndarray in shape [n, 1]. Labels of subjects.
'''
x, y = [], []
print("Loading {} data ...".format(mode))
for subject in dataset:
volume_path, label = subject[0], subject[1]
# Load image and rotate it to standard space
volume = nib.load(volume_path).get_data()
volume = np.transpose(volume, axes=[1, 0, 2])
volume = np.flipud(volume)
# Extract mean and std from brain object
volume_obj = volume[volume > 0]
obj_mean = np.mean(volume_obj)
obj_std = np.std(volume_obj)
# Normalize whole image
volume = (volume - obj_mean) / obj_std
volume = np.expand_dims(volume, axis=3)
x.append(volume.astype(np.float32))
y.append(label)
x = np.array(x)
y = np.array(y).reshape((-1, 1))
return x, y
@staticmethod
def augment(train_x, train_y):
'''AUGMENT
Do augmentation of LGG subjects in training set
by flipping each image from left to right.
Inputs:
-------
- train_x: numpy ndarray, images array of training set.
- train_y: numpy ndarray, labels of training set.
Outputs:
--------
- train_x: augmented training images, which are double as original.
- train_y: augmented labels of training set.
'''
print("Do Augmentation on LGG Samples ...")
train_x_aug, train_y_aug = [], []
for i in range(len(train_y)):
train_x_aug.append(train_x[i])
train_y_aug.append(train_y[i])
if train_y[i] == 0:
# Flip image if it is LGG
train_x_aug.append(np.fliplr(train_x[i]))
train_y_aug.append(np.array([0]))
train_x = np.array(train_x_aug)
train_y = np.array(train_y_aug).reshape((-1, 1))
return train_x, train_y
if __name__ == "__main__":
import gc
parent_dir = os.path.dirname(os.getcwd())
# Set dirctory for input images (separated subjects)
data_dir = os.path.join(parent_dir, "data")
hgg_dir = os.path.join(data_dir, "HGGSegTrimmed")
lgg_dir = os.path.join(data_dir, "LGGSegTrimmed")
# Test 1
# Load and split dataset
data = BTCDataset(hgg_dir, lgg_dir,
volume_type="t1ce",
train_prop=0.6,
valid_prop=0.2,
random_state=0)
data.run(pre_split=False,
save_split=True,
save_split_dir="DataSplit")
print(data.train_x.shape, data.train_y.shape)
del data
gc.collect()
# Test 2
# Load dataset which has been split
data = BTCDataset(hgg_dir, lgg_dir,
volume_type="t1ce",
pre_trainset_path="DataSplit/trainset.csv",
pre_validset_path="DataSplit/validset.csv",
pre_testset_path="DataSplit/testset.csv")
data.run(pre_split=True)
print(data.train_x.shape, data.train_y.shape)
del data
gc.collect()
|
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import numpy as np
import random
from collections import deque, namedtuple
from DeepRTS.contrib.agents import Agent
BUFFER_SIZE = int(1e5) # Replay memory size
BATCH_SIZE = 64 # Number of experiences to sample from memory
GAMMA = 0.99 # Discount factor
TAU = 1e-3 # Soft update parameter for updating fixed q network
LR = 1e-4 # Q Network learning rate
UPDATE_EVERY = 4 # How often to update Q network
class QNetwork(nn.Module):
def __init__(self, input_dims, output_dims, seed):
"""
Build a convolutional neural network
state_size (int): State dimension
action_size (int): Action dimension
seed (int): random seed
"""
super(QNetwork, self).__init__()
self.conv1 = nn.Conv2d(input_dims[0], 32, 7, 3)
self.conv2 = nn.Conv2d(32, 64, 3, 1)
fc_input_dims = self.calculate_conv_output_dims(input_dims)
self.fc1 = nn.Linear(fc_input_dims, 512)
self.fc2 = nn.Linear(512, output_dims)
def calculate_conv_output_dims(self, input_dims):
state = torch.zeros(1, *input_dims)
dims = self.conv1(state)
dims = self.conv2(dims)
return int(np.prod(dims.size()))
def forward(self, state):
some = self.conv1(state)
conv1 = F.relu(some)
conv2 = F.relu(self.conv2(conv1))
# conv3 shape is BS x n_filters x H x W
conv_state = conv2.view(conv2.size()[0], -1)
# conv_state shape is BS x (n_filters * H * W)
flat1 = F.relu(self.fc1(conv_state))
actions = self.fc2(flat1)
return actions
class ReplayBuffer:
def __init__(self, buffer_size, batch_size, seed):
"""
Replay memory allow agent to record experiences and learn from them
buffer_size (int): maximum size of internal memory
batch_size (int): sample size from experience
seed (int): random seed
"""
self.batch_size = batch_size
self.seed = 0
self.memory = deque(maxlen=buffer_size)
self.experience = namedtuple("Experience", field_names=["state", "action", "reward", "next_state", "done"])
def add(self, state, action, reward, next_state, done):
"""Add experience"""
experience = self.experience(state, action, reward, next_state, done)
self.memory.append(experience)
def sample(self):
"""
Sample randomly and return (state, action, reward, next_state, done) tuple as torch tensors
"""
experiences = random.sample(self.memory, k=self.batch_size)
# Convert to torch tensors
states = torch.from_numpy(
np.stack([experience.state for experience in experiences if experience is not None])).float()
actions = torch.from_numpy(
np.vstack([experience.action for experience in experiences if experience is not None])).long()
rewards = torch.from_numpy(
np.vstack([experience.reward for experience in experiences if experience is not None])).float()
next_states = torch.from_numpy(
np.stack([experience.next_state for experience in experiences if experience is not None])).float()
# Convert done from boolean to int
dones = torch.from_numpy(
np.vstack([experience.done for experience in experiences if experience is not None]).astype(
np.uint8)).float()
return (states, actions, rewards, next_states, dones)
def __len__(self):
return len(self.memory)
class DiegoConvAgent(Agent):
def __init__(self, state_size, action_size, seed=0):
"""
DQN Agent interacts with the environment,
stores the experience and learns from it
state_size (int): Dimension of state
action_size (int): Dimension of action
seed (int): random seed
"""
self.state_size = state_size
self.action_size = action_size
self.seed = random.seed(seed)
# Initialize Q and Fixed Q networks
self.q_network = QNetwork(state_size, action_size, seed)
self.fixed_network = QNetwork(state_size, action_size, seed)
self.optimizer = optim.Adam(self.q_network.parameters())
# Initiliase memory
self.memory = ReplayBuffer(BUFFER_SIZE, BATCH_SIZE, seed)
self.timestep = 0
def update(self, state, action, reward, next_state, done):
"""
Update Agent's knowledge
Parameters
state (array_like): Current state of environment
action (int): Action taken in current state
reward (float): Reward received after taking action
next_state (array_like): Next state returned by the environment after taking action
done (bool): whether the episode ended after taking action
"""
self.memory.add(state, action, reward, next_state, done)
self.timestep += 1
if self.timestep % UPDATE_EVERY == 0:
if len(self.memory) > BATCH_SIZE:
sampled_experiences = self.memory.sample()
self.learn(sampled_experiences)
def learn(self, experiences):
"""
Learn from experience by training the q_network
Parameters
experiences (array_like): List of experiences sampled from agent's memory
"""
states, actions, rewards, next_states, dones = experiences
# Get the action with max Q value
action_values = self.fixed_network(next_states).detach()
max_action_values = action_values.max(1)[0].unsqueeze(1)
# If done just use reward, else update Q_target with discounted action values
Q_target = rewards + (GAMMA * max_action_values * (1 - dones))
Q_expected = self.q_network(states).gather(1, actions)
# Calculate loss
loss = F.mse_loss(Q_expected, Q_target)
self.optimizer.zero_grad()
# backward pass
loss.backward()
# update weights
self.optimizer.step()
# Update fixed weights
self.update_fixed_network(self.q_network, self.fixed_network)
def update_fixed_network(self, q_network, fixed_network):
"""
Update fixed network by copying weights from Q network using TAU param
q_network (PyTorch model): Q network
fixed_network (PyTorch model): Fixed target network
"""
for source_parameters, target_parameters in zip(q_network.parameters(), fixed_network.parameters()):
target_parameters.data.copy_(TAU * source_parameters.data + (1.0 - TAU) * target_parameters.data)
def get_action(self, state, eps=0):
"""
Choose the action
state (array_like): current state of environment
eps (float): epsilon for epsilon-greedy action selection
"""
rnd = random.random()
if rnd < eps:
return np.random.randint(self.action_size)
else:
state = torch.from_numpy(state).float().unsqueeze(0)
# set the network into evaluation mode
self.q_network.eval()
with torch.no_grad():
action_values = self.q_network(state)
# Back to training mode
self.q_network.train()
action = np.argmax(action_values.cpu().data.numpy())
return action
def load(self, file):
self.q_network.load_state_dict(torch.load(file))
def save(self, filename):
torch.save(self.q_network.state_dict(), filename)
|
|
import collections.abc
import copy
import pickle
import sys
import unittest
class DictSetTest(unittest.TestCase):
def test_constructors_not_callable(self):
kt = type({}.keys())
self.assertRaises(TypeError, kt, {})
self.assertRaises(TypeError, kt)
it = type({}.items())
self.assertRaises(TypeError, it, {})
self.assertRaises(TypeError, it)
vt = type({}.values())
self.assertRaises(TypeError, vt, {})
self.assertRaises(TypeError, vt)
def test_dict_keys(self):
d = {1: 10, "a": "ABC"}
keys = d.keys()
self.assertEqual(len(keys), 2)
self.assertEqual(set(keys), {1, "a"})
self.assertEqual(keys, {1, "a"})
self.assertNotEqual(keys, {1, "a", "b"})
self.assertNotEqual(keys, {1, "b"})
self.assertNotEqual(keys, {1})
self.assertNotEqual(keys, 42)
self.assertIn(1, keys)
self.assertIn("a", keys)
self.assertNotIn(10, keys)
self.assertNotIn("Z", keys)
self.assertEqual(d.keys(), d.keys())
e = {1: 11, "a": "def"}
self.assertEqual(d.keys(), e.keys())
del e["a"]
self.assertNotEqual(d.keys(), e.keys())
def test_dict_items(self):
d = {1: 10, "a": "ABC"}
items = d.items()
self.assertEqual(len(items), 2)
self.assertEqual(set(items), {(1, 10), ("a", "ABC")})
self.assertEqual(items, {(1, 10), ("a", "ABC")})
self.assertNotEqual(items, {(1, 10), ("a", "ABC"), "junk"})
self.assertNotEqual(items, {(1, 10), ("a", "def")})
self.assertNotEqual(items, {(1, 10)})
self.assertNotEqual(items, 42)
self.assertIn((1, 10), items)
self.assertIn(("a", "ABC"), items)
self.assertNotIn((1, 11), items)
self.assertNotIn(1, items)
self.assertNotIn((), items)
self.assertNotIn((1,), items)
self.assertNotIn((1, 2, 3), items)
self.assertEqual(d.items(), d.items())
e = d.copy()
self.assertEqual(d.items(), e.items())
e["a"] = "def"
self.assertNotEqual(d.items(), e.items())
def test_dict_mixed_keys_items(self):
d = {(1, 1): 11, (2, 2): 22}
e = {1: 1, 2: 2}
self.assertEqual(d.keys(), e.items())
self.assertNotEqual(d.items(), e.keys())
def test_dict_values(self):
d = {1: 10, "a": "ABC"}
values = d.values()
self.assertEqual(set(values), {10, "ABC"})
self.assertEqual(len(values), 2)
def test_dict_repr(self):
d = {1: 10, "a": "ABC"}
self.assertIsInstance(repr(d), str)
r = repr(d.items())
self.assertIsInstance(r, str)
self.assertTrue(r == "dict_items([('a', 'ABC'), (1, 10)])" or
r == "dict_items([(1, 10), ('a', 'ABC')])")
r = repr(d.keys())
self.assertIsInstance(r, str)
self.assertTrue(r == "dict_keys(['a', 1])" or
r == "dict_keys([1, 'a'])")
r = repr(d.values())
self.assertIsInstance(r, str)
self.assertTrue(r == "dict_values(['ABC', 10])" or
r == "dict_values([10, 'ABC'])")
def test_keys_set_operations(self):
d1 = {'a': 1, 'b': 2}
d2 = {'b': 3, 'c': 2}
d3 = {'d': 4, 'e': 5}
self.assertEqual(d1.keys() & d1.keys(), {'a', 'b'})
self.assertEqual(d1.keys() & d2.keys(), {'b'})
self.assertEqual(d1.keys() & d3.keys(), set())
self.assertEqual(d1.keys() & set(d1.keys()), {'a', 'b'})
self.assertEqual(d1.keys() & set(d2.keys()), {'b'})
self.assertEqual(d1.keys() & set(d3.keys()), set())
self.assertEqual(d1.keys() & tuple(d1.keys()), {'a', 'b'})
self.assertEqual(d1.keys() | d1.keys(), {'a', 'b'})
self.assertEqual(d1.keys() | d2.keys(), {'a', 'b', 'c'})
self.assertEqual(d1.keys() | d3.keys(), {'a', 'b', 'd', 'e'})
self.assertEqual(d1.keys() | set(d1.keys()), {'a', 'b'})
self.assertEqual(d1.keys() | set(d2.keys()), {'a', 'b', 'c'})
self.assertEqual(d1.keys() | set(d3.keys()),
{'a', 'b', 'd', 'e'})
self.assertEqual(d1.keys() | (1, 2), {'a', 'b', 1, 2})
self.assertEqual(d1.keys() ^ d1.keys(), set())
self.assertEqual(d1.keys() ^ d2.keys(), {'a', 'c'})
self.assertEqual(d1.keys() ^ d3.keys(), {'a', 'b', 'd', 'e'})
self.assertEqual(d1.keys() ^ set(d1.keys()), set())
self.assertEqual(d1.keys() ^ set(d2.keys()), {'a', 'c'})
self.assertEqual(d1.keys() ^ set(d3.keys()),
{'a', 'b', 'd', 'e'})
self.assertEqual(d1.keys() ^ tuple(d2.keys()), {'a', 'c'})
self.assertEqual(d1.keys() - d1.keys(), set())
self.assertEqual(d1.keys() - d2.keys(), {'a'})
self.assertEqual(d1.keys() - d3.keys(), {'a', 'b'})
self.assertEqual(d1.keys() - set(d1.keys()), set())
self.assertEqual(d1.keys() - set(d2.keys()), {'a'})
self.assertEqual(d1.keys() - set(d3.keys()), {'a', 'b'})
self.assertEqual(d1.keys() - (0, 1), {'a', 'b'})
self.assertFalse(d1.keys().isdisjoint(d1.keys()))
self.assertFalse(d1.keys().isdisjoint(d2.keys()))
self.assertFalse(d1.keys().isdisjoint(list(d2.keys())))
self.assertFalse(d1.keys().isdisjoint(set(d2.keys())))
self.assertTrue(d1.keys().isdisjoint({'x', 'y', 'z'}))
self.assertTrue(d1.keys().isdisjoint(['x', 'y', 'z']))
self.assertTrue(d1.keys().isdisjoint(set(['x', 'y', 'z'])))
self.assertTrue(d1.keys().isdisjoint(set(['x', 'y'])))
self.assertTrue(d1.keys().isdisjoint(['x', 'y']))
self.assertTrue(d1.keys().isdisjoint({}))
self.assertTrue(d1.keys().isdisjoint(d3.keys()))
de = {}
self.assertTrue(de.keys().isdisjoint(set()))
self.assertTrue(de.keys().isdisjoint([]))
self.assertTrue(de.keys().isdisjoint(de.keys()))
self.assertTrue(de.keys().isdisjoint([1]))
def test_items_set_operations(self):
d1 = {'a': 1, 'b': 2}
d2 = {'a': 2, 'b': 2}
d3 = {'d': 4, 'e': 5}
self.assertEqual(
d1.items() & d1.items(), {('a', 1), ('b', 2)})
self.assertEqual(d1.items() & d2.items(), {('b', 2)})
self.assertEqual(d1.items() & d3.items(), set())
self.assertEqual(d1.items() & set(d1.items()),
{('a', 1), ('b', 2)})
self.assertEqual(d1.items() & set(d2.items()), {('b', 2)})
self.assertEqual(d1.items() & set(d3.items()), set())
self.assertEqual(d1.items() | d1.items(),
{('a', 1), ('b', 2)})
self.assertEqual(d1.items() | d2.items(),
{('a', 1), ('a', 2), ('b', 2)})
self.assertEqual(d1.items() | d3.items(),
{('a', 1), ('b', 2), ('d', 4), ('e', 5)})
self.assertEqual(d1.items() | set(d1.items()),
{('a', 1), ('b', 2)})
self.assertEqual(d1.items() | set(d2.items()),
{('a', 1), ('a', 2), ('b', 2)})
self.assertEqual(d1.items() | set(d3.items()),
{('a', 1), ('b', 2), ('d', 4), ('e', 5)})
self.assertEqual(d1.items() ^ d1.items(), set())
self.assertEqual(d1.items() ^ d2.items(),
{('a', 1), ('a', 2)})
self.assertEqual(d1.items() ^ d3.items(),
{('a', 1), ('b', 2), ('d', 4), ('e', 5)})
self.assertEqual(d1.items() - d1.items(), set())
self.assertEqual(d1.items() - d2.items(), {('a', 1)})
self.assertEqual(d1.items() - d3.items(), {('a', 1), ('b', 2)})
self.assertEqual(d1.items() - set(d1.items()), set())
self.assertEqual(d1.items() - set(d2.items()), {('a', 1)})
self.assertEqual(d1.items() - set(d3.items()), {('a', 1), ('b', 2)})
self.assertFalse(d1.items().isdisjoint(d1.items()))
self.assertFalse(d1.items().isdisjoint(d2.items()))
self.assertFalse(d1.items().isdisjoint(list(d2.items())))
self.assertFalse(d1.items().isdisjoint(set(d2.items())))
self.assertTrue(d1.items().isdisjoint({'x', 'y', 'z'}))
self.assertTrue(d1.items().isdisjoint(['x', 'y', 'z']))
self.assertTrue(d1.items().isdisjoint(set(['x', 'y', 'z'])))
self.assertTrue(d1.items().isdisjoint(set(['x', 'y'])))
self.assertTrue(d1.items().isdisjoint({}))
self.assertTrue(d1.items().isdisjoint(d3.items()))
de = {}
self.assertTrue(de.items().isdisjoint(set()))
self.assertTrue(de.items().isdisjoint([]))
self.assertTrue(de.items().isdisjoint(de.items()))
self.assertTrue(de.items().isdisjoint([1]))
def test_recursive_repr(self):
d = {}
d[42] = d.values()
r = repr(d)
# Cannot perform a stronger test, as the contents of the repr
# are implementation-dependent. All we can say is that we
# want a str result, not an exception of any sort.
self.assertIsInstance(r, str)
d[42] = d.items()
r = repr(d)
# Again.
self.assertIsInstance(r, str)
def test_deeply_nested_repr(self):
d = {}
for i in range(sys.getrecursionlimit() + 100):
d = {42: d.values()}
self.assertRaises(RecursionError, repr, d)
def test_copy(self):
d = {1: 10, "a": "ABC"}
self.assertRaises(TypeError, copy.copy, d.keys())
self.assertRaises(TypeError, copy.copy, d.values())
self.assertRaises(TypeError, copy.copy, d.items())
def test_compare_error(self):
class Exc(Exception):
pass
class BadEq:
def __hash__(self):
return 7
def __eq__(self, other):
raise Exc
k1, k2 = BadEq(), BadEq()
v1, v2 = BadEq(), BadEq()
d = {k1: v1}
self.assertIn(k1, d)
self.assertIn(k1, d.keys())
self.assertIn(v1, d.values())
self.assertIn((k1, v1), d.items())
self.assertRaises(Exc, d.__contains__, k2)
self.assertRaises(Exc, d.keys().__contains__, k2)
self.assertRaises(Exc, d.items().__contains__, (k2, v1))
self.assertRaises(Exc, d.items().__contains__, (k1, v2))
with self.assertRaises(Exc):
v2 in d.values()
def test_pickle(self):
d = {1: 10, "a": "ABC"}
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.assertRaises((TypeError, pickle.PicklingError),
pickle.dumps, d.keys(), proto)
self.assertRaises((TypeError, pickle.PicklingError),
pickle.dumps, d.values(), proto)
self.assertRaises((TypeError, pickle.PicklingError),
pickle.dumps, d.items(), proto)
def test_abc_registry(self):
d = dict(a=1)
self.assertIsInstance(d.keys(), collections.abc.KeysView)
self.assertIsInstance(d.keys(), collections.abc.MappingView)
self.assertIsInstance(d.keys(), collections.abc.Set)
self.assertIsInstance(d.keys(), collections.abc.Sized)
self.assertIsInstance(d.keys(), collections.abc.Iterable)
self.assertIsInstance(d.keys(), collections.abc.Container)
self.assertIsInstance(d.values(), collections.abc.ValuesView)
self.assertIsInstance(d.values(), collections.abc.MappingView)
self.assertIsInstance(d.values(), collections.abc.Sized)
self.assertIsInstance(d.items(), collections.abc.ItemsView)
self.assertIsInstance(d.items(), collections.abc.MappingView)
self.assertIsInstance(d.items(), collections.abc.Set)
self.assertIsInstance(d.items(), collections.abc.Sized)
self.assertIsInstance(d.items(), collections.abc.Iterable)
self.assertIsInstance(d.items(), collections.abc.Container)
if __name__ == "__main__":
unittest.main()
|
|
from elasticsearch import Elasticsearch, TransportError
from elasticsearch.helpers import scan
import requests
import pandas as pd
import numpy as np
import re
from ipaddress import IPv4Address as ipv4, AddressValueError
import time
from bokeh.plotting import figure, output_file, show, save
from bokeh.models import FuncTickFormatter, FixedTicker, NumeralTickFormatter, Div, Title, LinearAxis, Range1d
from bokeh.charts import Bar, Donut
from bokeh.layouts import gridplot, column, row
odin = 'http://odin.cadc.dao.nrc.ca:9200'
class Init():
def __init__(self, url = None, timeout = 120):
self.timeout = timeout
if not url:
self.url = odin
else:
self.url = url
if not requests.get(self.url):
print("Connection incorrect!")
exit(0)
def connect(self):
return Elasticsearch(self.url, timeout = self.timeout)
def ip2dom(ip):
try:
if ipv4(ip) >= ipv4("132.246.0.0") and ipv4(ip) <= ipv4("132.246.255.255"):
if (ipv4(ip) >= ipv4("132.246.195.0") and ipv4(ip) <= ipv4("132.246.195.24")) or (ipv4(ip) >= ipv4("132.246.217.0") and ipv4(ip) <= ipv4("132.246.217.24")) or (ipv4(ip) >= ipv4("132.246.194.0") and ipv4(ip) <= ipv4("132.246.194.24")):
return "CADC"
else:
return "NRC"
elif ipv4(ip) >= ipv4("206.12.0.0") and ipv4(ip) <= ipv4("206.12.255.255"):
return "CC"
elif ipv4(ip) >= ipv4("192.168.0.0") and ipv4(ip) <= ipv4("192.168.255.255"):
return "CADC"
else:
return "Others"
except AddressValueError:
print("ip address cannot be handled {0}".format(ip))
return "Error"
def timing(func):
def wrapper(*args):
t_i = time.time()
r = func(*args)
t_f = time.time() - t_i
print("{0} took {1:.3f}s".format(func.__name__, t_f))
return r
return wrapper
def fig1(conn, idx):
method = ["PUT","GET"]
service = ["transfer_ws", "data_ws", "vospace_ws"]
p = 1
plots = []
for j, s in enumerate(service):
for i, m in enumerate(method):
query = {
"query" : {
"bool" : {
"must" : [
{ "term" : { "service" : s } },
{ "term" : { "phase" : "END" } },
{ "term" : { "method" : m } }
]
}
},
"aggs": {
"req_by_dom": {
"terms": {"field": "clientdomain", "size": 6}
}
}
}
try:
res = conn.search(index = idx, body = query)
except TransportError as e:
print(e.info)
raise
df = pd.DataFrame.from_dict(res["aggregations"]["req_by_dom"]["buckets"])
_ = pd.DataFrame([res["aggregations"]["req_by_dom"]["sum_other_doc_count"], "Others"]).T
_.columns = df.columns
df = df.append(_, ignore_index = True)
df.columns = ["Events", "Domains"]
plots.append(Donut(df, label = "Domains", values = "Events", title = "service: {0}, method: {1}".format(s, m)))
grid = gridplot(plots, ncols = 2, plot_width = 600, plot_height = 600, title = "IS THIS A TITLE? nooooo its not working asdaw34q2AEWTQ!#@$$@%")
output_file("fig1.html")
show(column(Div(text = "<h1>Number of Data Transfers by Domain</h1>", width = 1200), grid))
def fig2(conn, idx):
service = ["transfer_ws", "data_ws", "vospace_ws"]
method = ["GET", "PUT"]
pos = [0, 1, -1]
clr = ["blue", "purple", "green"]
plots = []
for j, s in enumerate(service):
for i, m in enumerate(method):
query = {
"query" : {
"bool" : {
"must" : [
{ "term" : { "service" : s } },
{ "term" : { "phase" : "END" } },
{ "term" : { "method" : m } }
]
}
},
"aggs" : {
"avgdur_perwk" : {
"date_histogram" : {
"field" : "@timestamp",
"interval" : "1W",
"format" : "yyyy-MM-dd"
},
"aggs": {
"avgdur" : {
"avg" : {
"field" : "duration"
}
}
}
}
}
}
try:
res = conn.search(index = idx, body = query)
except TransportError as e:
print(e.info)
raise
wk = [_["key_as_string"] for _ in res["aggregations"]["avgdur_perwk"]["buckets"]]
avg_dur = [_["avgdur"]["value"] for _ in res["aggregations"]["avgdur_perwk"]["buckets"]]
df = pd.DataFrame(list(zip(wk, avg_dur)), columns = ["time", "avg_dur"])
df["avg_dur"] = df["avg_dur"] / 1000
plots.append(Bar(df, "time", "avg_dur", legend = False, xlabel = None, yscale = "log", ylabel = "Average Duration", title = "Average Duration per Week (Sec): service: {0}, method: {1}".format(service[j], method[i])))
grid = gridplot(plots, ncols = 1, plot_width = 1200, plot_height = 300)
output_file("fig2.html")
show(column(Div(text = "<h1>Time Evolution of Data Transfers</h1>", width = 1200), grid))
def fig3(conn, idx):
query = {
"query" : {
"bool" : {
"must" : [
{ "term" : { "service" : "transfer_ws" } },
{ "term" : { "phase" : "END" } },
{ "term" : { "method" : "GET"} },
{ "term" : { "clientip" : "206.12.48.85" } }
]
}
},
"aggs" : {
"avgrate_perwk" : {
"date_histogram" : {
"field" : "@timestamp",
"interval" : "week",
"format" : "yyyy-MM-dd"
},
"aggs": {
"avgrate" : {
"avg" : {
"field" : "rate"
}
}
}
}
}
}
try:
res = conn.search(index = idx, body = query)
except TransportError as e:
print(e.info)
raise
wk = [_["key_as_string"] for _ in res["aggregations"]["avgrate_perwk"]["buckets"]]
avg_rate = [_["avgrate"]["value"] for _ in res["aggregations"]["avgrate_perwk"]["buckets"]]
df = pd.DataFrame(list(zip(wk, avg_rate)), columns = ["time", "avg_rate"]).set_index("time")
query2 = {
"query" : {
"match_all": {}
},
"aggs" : {
"numjobs_perwk" : {
"date_histogram" : {
"field" : "@timestamp",
"interval" : "week",
"format" : "yyyy-MM-dd"
}
}
}
}
conn2 = Elasticsearch("http://elastic:cadcstats@206.12.59.36:9200")
try:
res = conn2.search(index = "logs-condor", body = query2)
except TransportError as e:
print(e.info)
raise
wk2 = [_["key_as_string"] for _ in res["aggregations"]["numjobs_perwk"]["buckets"]]
numjobs = [_["doc_count"] for _ in res["aggregations"]["numjobs_perwk"]["buckets"]]
df2 = pd.DataFrame(list(zip(wk2, numjobs)), columns = ["time", "numjobs"]).set_index("time")
df = df.join(df2)
df = df[pd.notnull(df["numjobs"])].fillna(0)
x = [_ for _ in range(len(df))]
p = figure(plot_width = 1200, toolbar_location = "above")
p.vbar(x = x, top = df["avg_rate"], bottom = 0, width = 0.5, legend = "Avg Rate")
p.y_range = Range1d(0, df["avg_rate"].max() * 1.3)
p.yaxis.axis_label = "Average Transfer Rate"
p.extra_y_ranges = {"right_yaxis": Range1d(0, df["numjobs"].max() * 1.1)}
p.add_layout(LinearAxis(y_range_name = "right_yaxis", axis_label = "Number of Batch Jobs"), "right")
p.line(x = x, y = df["numjobs"], line_width = 2, y_range_name = "right_yaxis", color = "red", legend = "Batch Jobs")
p.legend.location = "top_left"
d = dict(zip(x, df.index))
p.xaxis[0].ticker = FixedTicker(ticks = x)
p.xaxis[0].formatter = FuncTickFormatter(code = """dic = """ + str(d) + """
if (tick in dic) {
return dic[tick]
}
else {
return ''
}""")
p.xaxis.major_label_orientation = np.pi/4
output_file("fig3.html")
show(column(Div(text = "<h1>Average Transfer Rate of <i>batch.canfar.net</i> VS Number of Batch Jobs</h1>", width = 1000), p))
#@timing
def fig4(conn, idx):
iprange = {("132.246.194.0", "132.246.194.24"):"CADC", ("132.246.195.0", "132.246.195.24"):"CADC", ("132.246.217.0", "132.246.217.24"):"CADC", ("132.246.0.0", "132.246.255.255"):"NRC+CADC", ("192.168.0.0", "192.168.255.255"):"CADC-Private", ("206.12.0.0", "206.12.255.255"):"CC"}
method = ["GET", "PUT"]
i = 0
plots = []
for m in method:
events, gbs = [], []
for _ in iprange:
query = {
"query" : {
"bool" : {
"must" : [
{ "term" : { "service" : "transfer_ws" } },
{ "term" : { "phase" : "END" } },
{ "term" : { "method" : m } },
{ "term" : { "success" : True } }
]
}
},
"aggs" : {
"start_date" : {
"min" : { "field" : "@timestamp" }
},
"end_date" : {
"max" : { "field" : "@timestamp" }
},
"ip_ranges" : {
"ip_range" : {
"field" : "clientip",
"ranges" : [
{ "from" : _[0], "to" : _[1] }
]
},
"aggs" : {
"gigabytes" : {
"sum" : { "field" : "gbytes" }
}
}
},
"tot_giga" : {
"sum" : { "field" : "gbytes" }
}
}
}
try:
res = conn.search(index = idx, body = query)
#res = scan(conn, index = idx, query = query, scroll = "30m", size = 500)
except TransportError as e:
print(e.info)
raise
gbs.append({iprange[_]:res["aggregations"]["ip_ranges"]["buckets"][0]["gigabytes"]["value"]})
events.append({iprange[_]:res["aggregations"]["ip_ranges"]["buckets"][0]["doc_count"]})
tot_gbs = res["aggregations"]["tot_giga"]["value"]
tot_events = res["hits"]["total"]
start = res["aggregations"]["start_date"]['value_as_string']
end = res["aggregations"]["end_date"]['value_as_string']
df_gbs = pd.DataFrame.from_dict(gbs).sum().to_frame().T
df_events = pd.DataFrame.from_dict(events).sum().to_frame().T
df = pd.concat([df_gbs, df_events], ignore_index = True)
df["NRC"] = df["NRC+CADC"] - df["CADC"]
df["CADC"] = df["CADC"] + df["CADC-Private"]
df["Others"] = pd.DataFrame([tot_gbs, tot_events])[0] - df["CADC"] - df["NRC"] - df["CC"]
df = df[["CADC","NRC","CC", "Others"]].T.reset_index()
df.columns = ["Domains", "Size", "Events"]
for j in ["Size", "Events"]:
p = Donut(df, label = "Domains", values = j, title = "Total: {0:.0f} {1:s}".format(tot_gbs / 1024 if j == "Size" else tot_events / 1e6, "TB" if j == "Size" else "million files") )
if i >= 2:
p.add_layout( Title(text = "In {0:s}".format("Size" if j == "Size" else "Number of Files"), align = "center" ), "below" )
if j == "Size":
p.add_layout( Title(text = "Downloads" if i == 0 else "Uploads", align = "center"), "left" )
i += 1
plots.append(p)
grid = gridplot(plots, ncols = 2, plot_width = 600, plot_height = 600)
output_file("fig4.html")
show(column(Div(text = "<h1><center>Data Transfer From {0:s} To {1:s}</center></h1>".format(re.match("(\d{4}-\d{2}-\d{2})T", start).group(1), re.match("(\d{4}-\d{2}-\d{2})T", end).group(1)), width = 600), grid))
def fig5(conn, idx):
iprange = {("132.246.194.0", "132.246.194.24"):"CADC", ("132.246.195.0", "132.246.195.24"):"CADC", ("132.246.217.0", "132.246.217.24"):"CADC", ("132.246.0.0", "132.246.255.255"):"NRC+CADC", ("192.168.0.0", "192.168.255.255"):"CADC-Private", ("206.12.0.0", "206.12.255.255"):"CC"}
service = ["data_ws", "vospace_ws"]
method = ["GET", "PUT"]
i = 0
plots = []
for m in method:
for j, s in enumerate(service):
events = []
for _ in iprange:
query = {
"query" : {
"bool" : {
"must" : [
{ "term" : { "service" : s } },
{ "term" : { "phase" : "END" } },
{ "term" : { "method" : m } },
{ "term" : { "success" : True } }
]
}
},
"aggs" : {
"start_date" : {
"min" : { "field" : "@timestamp" }
},
"end_date" : {
"max" : { "field" : "@timestamp" }
},
"ip_ranges" : {
"ip_range" : {
"field" : "clientip",
"ranges" : [
{ "from" : _[0], "to" : _[1] }
]
}
}
}
}
try:
res = conn.search(index = idx, body = query)
#res = scan(conn, index = idx, query = query, scroll = "30m", size = 500)
except TransportError as e:
print(e.info)
raise
events.append({iprange[_]:res["aggregations"]["ip_ranges"]["buckets"][0]["doc_count"]})
tot_events = res["hits"]["total"]
start = res["aggregations"]["start_date"]['value_as_string']
end = res["aggregations"]["end_date"]['value_as_string']
df_events = pd.DataFrame.from_dict(events).sum().to_frame().T
df = pd.concat([df_events], ignore_index = True)
df["NRC"] = df["NRC+CADC"] - df["CADC"]
df["CADC"] = df["CADC"] + df["CADC-Private"]
df["Others"] = pd.DataFrame([tot_events])[0] - df["CADC"] - df["NRC"] - df["CC"]
df = df[["CADC","NRC","CC", "Others"]].T.reset_index()
df.columns = ["Domains", "Events"]
p = Donut(df, label = "Domains", values = "Events", title = "Total Events: {}".format(tot_events) )
if i >= 2:
p.add_layout( Title(text = "data_ws" if j == 0 else "vospace_ws", align = "center" ), "below" )
if j == 0:
p.add_layout( Title(text = "Downloads" if i == 0 else "Uploads", align = "center"), "left" )
i += 1
plots.append(p)
grid = gridplot(plots, ncols = 2, plot_width = 600, plot_height = 600)
output_file("fig5.html")
show(column(Div(text = "<h1>Number of data_ws and vospace_ws From {0:s} To {1:s}</h1>".format(re.match("(\d{4}-\d{2}-\d{2})T", start).group(1), re.match("(\d{4}-\d{2}-\d{2})T", end).group(1)), width = 1200), grid))
if __name__ == "__main__":
conn = Init(timeout = 300).connect()
#fig1(conn, "delivery_history-*")
#fig2(conn, "delivery_history-*")
fig3(conn, "delivery_history-*")
#fig4(conn, "delivery_history-*")
#fig5(conn, "delivery_history-*")
|
|
from app import app, db
from app.models import User, Role
from flask import jsonify, request, g
import jwt
from app.exceptions import UserNotFound, UserCannotRegister, ErrorNoToken, InvalidUsage, NotAuthorized, InvalidToken
from app.api.v1.resources.utils import get_users_json
from flask_restful import Resource
from marshmallow import pprint
from oauth2client import client, crypt
""" Decorators """
def require_login(func):
from functools import wraps
@wraps(func)
def check_token(*args, **kwargs):
# Check to see if it's in their session
auth_token = get_token_from_header()
try:
user = get_user_from_token(auth_token)
#print(type(user))
g.user = user
except:
raise InvalidToken
#print(auth_token)
# Otherwise just send them where they wanted to go
return func(*args, **kwargs)
return check_token
def require_admin(func):
from functools import wraps
@wraps(func)
def check_admin(*args, **kwargs):
auth_token = get_token_from_header()
user = get_user_from_token(auth_token)
g.user = user
if user is None:
raise UserNotFound
if user.is_admin():
return func(*args, **kwargs)
else:
raise NotAuthorized
return check_admin
def require_roles(*roles):
def real_require_roles(func):
from functools import wraps
@wraps(func)
def check_roles(*args, **kwargs):
auth_token = get_token_from_header()
user = get_user_from_token(auth_token)
print("email: ", user.email)
g.user = user
if user is None:
raise UserNotFound
#if set(roles) == set([rol.name for rol in user.roles]):
if (set([rol.name for rol in user.roles]) & set(roles)) == set(roles):
return func(*args, **kwargs)
else:
raise NotAuthorized
return check_roles
return real_require_roles
def require_me_or_admin(func):
from functools import wraps
@wraps(func)
def check_admin(*args, **kwargs):
auth_token = get_token_from_header()
user = get_user_from_token(auth_token)
g.user = user
if user is None:
raise NotAuthorized
if user.is_admin() or kwargs.get('id', None) == user.id:
return func(*args, **kwargs)
else:
raise NotAuthorized
return check_admin
""" Auth functions """
def check_if_admin(user):
if "admin" in user.roles:
return True
else:
raise NotAuthorized
def requre_self_or_admin(current_user, requested_user):
"""
Check if request user is current user or admin
:param current_user: the user requesting
:param requested_user: the user rquested
:return: True if admin or current user else exception InvalidUsage
"""
if "admin" in current_user.roles:
is_admin = True
else:
is_admin = False
if current_user is requested_user or is_admin:
return True
else:
raise NotAuthorized
def get_token_from_header():
"""
Gets the token from the header
:return: token
"""
auth_token = request.headers.get('Authorization')
if auth_token is None:
raise ErrorNoToken()
return auth_token
def get_user_from_header():
"""
Gets user from the header
:return: user
"""
token = get_token_from_header()
return get_user_from_token(token)
def get_user_from_token(token):
"""
Get user object from the id_token
:param token:
:return user object or UserNotFound() error
"""
try:
user_string = jwt.decode(token, key=app.config.get('SECRET_KEY'))
except jwt.InvalidTokenError:
raise InvalidUsage("Invalid token")
google_sub = user_string['google_sub']
user = User.query.filter(User.google_sub == google_sub).first()
# print("get_user_from_token: email: ", user.email)
if user is not None:
g.user = user
return user
else:
raise UserNotFound
def check_valid_user_from_sub(sub):
user = g.get('user', None)
if user is None or (sub != user.google_sub):
raise UserNotFound()
else:
return True
def decode_token(token):
"""
This method decodes the auth token
:param token:
:return:
"""
try:
user_dict = jwt.decode(token, key=app.config.get('SECRET_KEY'))
return user_dict
except:
return None
# @app.route('/secret')
# @require_api_token
# def secret(token):
# user = User.get_user_from_token(token)
# return user.firstName
def get_info_from_google_id_token(id_token):
""" Parses google id_token and returns user info
:param id_token: token received from google
:return: dict of user info
"""
CLIENT_ID = app.config.get('CLIENT_ID')
try:
idinfo = client.verify_id_token(id_token, CLIENT_ID)
if idinfo['aud'] not in [CLIENT_ID]:
#raise crypt.AppIdentityError("Unrecognized client.")
return False
if idinfo['iss'] not in ['accounts.google.com', 'https://accounts.google.com']:
raise crypt.AppIdentityError("Wrong issuer.")
except crypt.AppIdentityError:
# Invalid token
return "Invalid token", 401
except Exception:
return False
return idinfo
def check_user_exists(google_sub):
"""
Check if the user exists
:param google_sub: subject field from google data
:return: user if exists else false
"""
user = User.query.filter_by(google_sub=google_sub).first()
if user is None:
raise UserNotFound()
else:
return user
def generate_token(data):
try:
token = jwt.encode(data, key=app.config.get('SECRET_KEY'))
return token
except:
raise InvalidUsage("Couldn't generate token. Retry")
def register_user(google_info):
"""
Register user from the info from google
:param google_info: Info got from google
:return: registered user or None
"""
email = google_info['email']
firstName = google_info['given_name']
lastName = google_info['family_name']
google_sub = google_info['sub']
try:
user = User(email, firstName, lastName, google_sub)
db.session.add(user)
db.session.commit()
return user
except:
raise UserCannotRegister()
def get_user_from_id(id):
user = User.query.filter(User.id == id).first()
if user is None:
raise UserNotFound()
else:
return user
|
|
"""Matrix factorization with Sparse PCA"""
# Author: Vlad Niculae, Gael Varoquaux, Alexandre Gramfort
# License: BSD
import numpy as np
from ..utils import check_random_state
from ..linear_model import ridge_regression
from ..base import BaseEstimator, TransformerMixin
from .dict_learning import dict_learning, dict_learning_online
class SparsePCA(BaseEstimator, TransformerMixin):
"""Sparse Principal Components Analysis (SparsePCA)
Finds the set of sparse components that can optimally reconstruct
the data. The amount of sparseness is controllable by the coefficient
of the L1 penalty, given by the parameter alpha.
Parameters
----------
n_components: int,
Number of sparse atoms to extract.
alpha: float,
Sparsity controlling parameter. Higher values lead to sparser
components.
ridge_alpha: float,
Amount of ridge shrinkage to apply in order to improve
conditioning when calling the transform method.
max_iter: int,
Maximum number of iterations to perform.
tol: float,
Tolerance for the stopping condition.
method: {'lars', 'cd'}
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
n_jobs: int,
Number of parallel jobs to run.
U_init: array of shape (n_samples, n_atoms),
Initial values for the loadings for warm restart scenarios.
V_init: array of shape (n_atoms, n_features),
Initial values for the components for warm restart scenarios.
verbose:
Degree of verbosity of the printed output.
random_state: int or RandomState
Pseudo number generator state used for random sampling.
Attributes
----------
components_: array, [n_components, n_features]
Sparse components extracted from the data.
error_: array
Vector of errors at each iteration.
See also
--------
PCA
"""
def __init__(self, n_components, alpha=1, ridge_alpha=0.01, max_iter=1000,
tol=1e-8, method='lars', n_jobs=1, U_init=None,
V_init=None, verbose=False, random_state=None):
self.n_components = n_components
self.alpha = alpha
self.ridge_alpha = ridge_alpha
self.max_iter = max_iter
self.tol = tol
self.method = method
self.n_jobs = n_jobs
self.U_init = U_init
self.V_init = V_init
self.verbose = verbose
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model from data in X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
self.random_state = check_random_state(self.random_state)
X = np.asanyarray(X)
code_init = self.V_init.T if self.V_init is not None else None
dict_init = self.U_init.T if self.U_init is not None else None
Vt, _, E = dict_learning(X.T, self.n_components, self.alpha,
tol=self.tol, max_iter=self.max_iter,
method=self.method, n_jobs=self.n_jobs,
verbose=self.verbose,
random_state=self.random_state,
code_init=code_init,
dict_init=dict_init)
self.components_ = Vt.T
self.error_ = E
return self
def transform(self, X, ridge_alpha=None):
"""Least Squares projection of the data onto the sparse components.
To avoid instability issues in case the system is under-determined,
regularization can be applied (Ridge regression) via the
`ridge_alpha` parameter.
Note that Sparse PCA components orthogonality is not enforced as in PCA
hence one cannot use a simple linear projection.
Parameters
----------
X: array of shape (n_samples, n_features)
Test data to be transformed, must have the same number of
features as the data used to train the model.
ridge_alpha: float, default: 0.01
Amount of ridge shrinkage to apply in order to improve
conditioning.
Returns
-------
X_new array, shape (n_samples, n_components)
Transformed data.
"""
ridge_alpha = self.ridge_alpha if ridge_alpha is None else ridge_alpha
U = ridge_regression(self.components_.T, X.T, ridge_alpha,
solver='dense_cholesky')
U /= np.sqrt((U ** 2).sum(axis=0))
return U
class MiniBatchSparsePCA(SparsePCA):
"""Mini-batch Sparse Principal Components Analysis
Finds the set of sparse components that can optimally reconstruct
the data. The amount of sparseness is controllable by the coefficient
of the L1 penalty, given by the parameter alpha.
Parameters
----------
n_components: int,
number of sparse atoms to extract
alpha: int,
Sparsity controlling parameter. Higher values lead to sparser
components.
ridge_alpha: float,
Amount of ridge shrinkage to apply in order to improve
conditioning when calling the transform method.
n_iter: int,
number of iterations to perform for each mini batch
callback: callable,
callable that gets invoked every five iterations
chunk_size: int,
the number of features to take in each mini batch
verbose:
degree of output the procedure will print
shuffle: boolean,
whether to shuffle the data before splitting it in batches
n_jobs: int,
number of parallel jobs to run, or -1 to autodetect.
method: {'lars', 'cd'}
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
random_state: int or RandomState
Pseudo number generator state used for random sampling.
"""
def __init__(self, n_components, alpha=1, ridge_alpha=0.01, n_iter=100,
callback=None, chunk_size=3, verbose=False, shuffle=True,
n_jobs=1, method='lars', random_state=None):
self.n_components = n_components
self.alpha = alpha
self.ridge_alpha = ridge_alpha
self.n_iter = n_iter
self.callback = callback
self.chunk_size = chunk_size
self.verbose = verbose
self.shuffle = shuffle
self.n_jobs = n_jobs
self.method = method
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model from data in X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
self.random_state = check_random_state(self.random_state)
X = np.asanyarray(X)
Vt, _ = dict_learning_online(X.T, self.n_components, alpha=self.alpha,
n_iter=self.n_iter, return_code=True,
dict_init=None, verbose=self.verbose,
callback=self.callback,
chunk_size=self.chunk_size,
shuffle=self.shuffle,
n_jobs=self.n_jobs, method=self.method,
random_state=self.random_state)
self.components_ = Vt.T
return self
|
|
#!/usr/bin/env python
import logging
from operator import itemgetter
import os
import re
import sys
import tempfile
from apiclient.discovery import build
from apiclient.http import MediaFileUpload
from httplib2 import Http
from oauth2client.client import AccessTokenCredentials
import requests
from robobrowser import RoboBrowser
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.INFO)
logger.addHandler(console_handler)
class DriveFolder(object):
def __init__(self, folder_name):
self.folder_name = folder_name
self.logger = logger.getChild('DriveFolder')
self.service = self._get_drive_service(self._get_access_token())
self.folder = self.ensure_folder()
self.log(
logging.INFO, '__init__',
'Saving to folder ID {0}'.format(self.folder['id']))
def log(self, level, method_name, message, *args, **kwargs):
child_logger = self.logger.getChild(method_name)
child_logger.log(level, message, *args, **kwargs)
def upload_files(self, local_filenames):
for file_name in local_filenames:
self.log(
logging.INFO, 'upload_files', 'Saving {0}'.format(file_name))
self.remove_file_if_exists(file_name, 'application/pdf')
remote_file = self.upload_file(file_name, 'application/pdf')
self.move_to_parent(remote_file)
self.log(
logging.INFO, 'upload_files',
'Done with {0}'.format(file_name))
def list_items(self):
# Use the cached version of this list if available.
if hasattr(self, '_items'):
self.log(logging.INFO, 'list_items', 'Using cached list of items')
return self._items
# Get a list of all items this application has stored, traversing
# paginated results as necessary.
items = []
resource = self.service.files()
request = resource.list()
while request is not None:
page = request.execute()
items.extend(page['items'])
request = resource.list_next(request, page)
# Cache and return the list.
self.log(logging.INFO, 'list_items', 'Caching list of items')
self._items = items
return items
def ensure_folder(self):
# Look for a folder with the given name. If we find it, return it.
folder_type = 'application/vnd.google-apps.folder'
for item in self.list_items():
if (
item['mimeType'] == folder_type and
item['title'] == self.folder_name):
return item
# If we're here, we haven't found one. Create one and return it.
folder = self.service.files().insert(body={
'title': self.folder_name, 'mimeType': folder_type}).execute()
return folder
def remove_file_if_exists(self, file_name, mime_type):
base_name = os.path.basename(file_name)
for item in self.list_items():
if item['title'] == base_name and item['mimeType'] == mime_type:
self.service.files().delete(fileId=item['id']).execute()
self.log(
logging.INFO, 'remove_file_if_exists',
'Removed existing {0}'.format(base_name))
return True
return False
def upload_file(self, file_name, mime_type):
media_body = MediaFileUpload(
file_name, mimetype=mime_type, resumable=True)
base_name = os.path.basename(file_name)
body = {
'description': base_name,
'title': base_name,
'mimeType': mime_type
}
response = self.service.files().insert(
body=body, media_body=media_body).execute()
self.log(logging.INFO, 'upload_file', 'Uploaded {0}'.format(file_name))
return response
def move_to_parent(self, file_to_move):
file_to_move['parents'] = [self.folder]
response = self.service.files().update(
fileId=file_to_move['id'], body=file_to_move).execute()
self.log(
logging.INFO, 'upload_files',
'Moved {0}'.format(file_to_move['title']))
return response
def _get_access_token(self):
r = requests.post(
'https://www.googleapis.com/oauth2/v3/token',
data={
'client_id': os.environ['GOOGLE_CLIENT_ID'],
'client_secret': os.environ['GOOGLE_CLIENT_SECRET'],
'grant_type': 'refresh_token',
'refresh_token': os.environ['GOOGLE_REFRESH_TOKEN'],
})
return r.json()['access_token']
def _get_drive_service(self, access_token):
http = AccessTokenCredentials(
access_token, 'stitchbot/1.0').authorize(Http())
service = build('drive', 'v2', http=http)
return service
class StitchBot(object):
def __init__(self, output_path=None, username=None, password=None):
self.browser = RoboBrowser(history=True)
self.output_path = output_path or tempfile.TemporaryDirectory().name
self.username = username or os.environ['STITCHBOT_USERNAME']
self.password = password or os.environ['STITCHBOT_PASSWORD']
self.logger = logger.getChild('StitchBot')
def log(self, level, method_name, message, *args, **kwargs):
child_logger = self.logger.getChild(method_name)
child_logger.log(level, message, *args, **kwargs)
def scrape(self):
self.log(logging.INFO, 'scrape', 'Starting scrape')
self.log_in()
self.navigate_to_free_pattern()
scraped_filenames = self.download_pattern()
self.log(logging.INFO, 'scrape', 'Scrape complete')
return scraped_filenames
def log_in(self):
self.log(logging.INFO, 'log_in', 'Logging in')
self.browser.open('http://dailycrossstitch.com/my-account/')
form = self.browser.get_form(class_='login')
form['username'] = self.username
form['password'] = self.password
self.browser.submit_form(form)
self.log(logging.INFO, 'log_in', 'Logged in')
def navigate_to_free_pattern(self):
self.log(
logging.INFO, 'navigate_to_free_pattern', 'Finding free pattern')
self.browser.open('http://dailycrossstitch.com/')
free_button = self.browser.find('a', class_='button', string='FREE')
self.browser.follow_link(free_button)
self.log(
logging.INFO, 'navigate_to_free_pattern', 'Found free pattern')
def download_pattern(self):
self.log(logging.INFO, 'download_pattern', 'Downloading pattern')
download_buttons = self.browser.find_all(
'a', class_='single_add_to_cart_button')
download_urls = list(map(itemgetter('href'), download_buttons))
local_filenames = [
self.download_pattern_file(url) for url in download_urls]
self.log(logging.INFO, 'download_pattern', 'Downloaded pattern')
return local_filenames
def download_pattern_file(self, url):
self.log(
logging.INFO, 'download_pattern_file',
'Downloading pattern file at {0}'.format(url))
self.browser.open(url)
download_script = self.browser.find(
'script', string=re.compile(r'^\s*function startDownload'))
if not download_script:
return
pdf_url_match = re.search(r'(http.+\.pdf)', download_script.string)
if not pdf_url_match:
return
pdf_url = pdf_url_match.group(1)
self.browser.open(pdf_url)
output_filename = self.save_pattern(self.browser.response)
self.log(
logging.INFO, 'download_pattern_file',
'Downloaded pattern file at {0}'.format(url))
return output_filename
def save_pattern(self, response):
self.log(logging.INFO, 'save_pattern', 'Saving pattern')
try:
os.makedirs(self.output_path)
except OSError:
pass
filename = self.get_filename(response.headers)
output_filename = os.path.join(self.output_path, filename)
with open(output_filename, 'wb') as output_file:
output_file.write(response.content)
self.log(
logging.INFO, 'save_pattern',
'Saved pattern to {0}'.format(output_filename))
return output_filename
def get_filename(self, headers, default_filename='pattern.pdf'):
filename_match = re.search(
r'filename="?([^"]+)"?', headers.get('Content-Disposition', ''))
if not filename_match:
return default_filename
return filename_match.group(1)
def main(output_path=None, *args):
local_filenames = StitchBot(output_path).scrape()
DriveFolder('Stitchbot patterns').upload_files(local_filenames)
if __name__ == '__main__':
main(*sys.argv[1:])
|
|
'''
MFEM example 20
See c++ version in the MFEM library for more detail
'''
import os
import mfem.ser as mfem
from mfem.ser import intArray
from os.path import expanduser, join, dirname
import numpy as np
from numpy import sin, cos, exp, sqrt
m_ = 1.0
k_ = 1.0
def run(order=1,
prob=0,
nsteps=100,
dt=0.1,
sc=1.0,
visualization=False):
class GradT(mfem.Operator):
def __init__(self):
mfem.Operator.__init__(self, 1)
def Mult(self, x, y):
y.Set(1.0/m_, x)
class NegGradV(mfem.TimeDependentOperator):
def __init__(self):
mfem.TimeDependentOperator.__init__(self, 1)
def Mult(self, x, y):
if prob == 1:
y[0] = - k_ * sin(x[0])
elif prob == 2:
y[0] = - k_ * x[0] * exp(-0.5 * x[0] * x[0])
elif prob == 3:
y[0] = - k_ * (1.0 + 2.0 * x[0] * x[0]) * x[0]
elif prob == 4:
y[0] = - k_ * (1.0 - 0.25 * x[0] * x[0]) * x[0]
else:
y[0] = - k_ * x[0]
def hamiltonian(q, p, t):
h = 1.0 - 0.5 / m_ + 0.5 * p * p / m_
if prob == 1:
h += k_ * (1.0 - cos(q))
elif prob == 2:
h += k_ * (1.0 - exp(-0.5 * q * q))
elif prob == 3:
h += 0.5 * k_ * (1.0 + q * q) * q * q
elif prob == 4:
h += 0.5 * k_ * (1.0 - 0.125 * q * q) * q * q
else:
h += 0.5 * k_ * q * q
return h
# 2. Create and Initialize the Symplectic Integration Solver
siaSolver = mfem.SIAVSolver(order)
P = GradT()
F = NegGradV()
siaSolver.Init(P, F)
# 3. Set the initial conditions
t = 0.0
q = mfem.Vector(1)
p = mfem.Vector(1)
e = mfem.Vector(nsteps+1)
q[0] = 0.0
p[0] = 1.0
# 5. Create a Mesh for visualization in phase space
nverts = 2*(nsteps+1) if visualization else 0
nelems = nsteps if visualization else 0
mesh = mfem.Mesh(2, nverts, nelems, 0, 3)
x0 = mfem.Vector(3)
x0.Assign(0.0)
x1 = mfem.Vector(3)
x1.Assign(0.0)
v = mfem.intArray(4)
# 6. Perform time-stepping
e_mean = 0.0
for i in range(nsteps):
if i == 0:
e[0] = hamiltonian(q[0], p[0], t)
e_mean += e[0]
if visualization:
x1[0] = q[0]
x1[1] = p[0]
x1[2] = 0.0
mesh.AddVertex(x0)
# These are all same.
# mesh.AddVertex(x0.GetDataArray())
# mesh.AddVertex(x0,GetData())
mesh.AddVertex(x1)
# 6b. Advance the state of the system
t, dt = siaSolver.Step(q, p, t, dt)
e[i+1] = hamiltonian(q[0], p[0], t)
e_mean += e[i+1]
# 6d. Add results to GLVis visualization
if visualization:
x0[2] = t
x1[0] = q[0]
x1[1] = p[0]
x1[2] = t
mesh.AddVertex(x0)
mesh.AddVertex(x1)
v[0] = 2*i
v[1] = 2*(i+1)
v[2] = 2*(i+1)+1
v[3] = 2*i+1
mesh.AddQuad(v)
# this also works ;D
# mesh.AddQuad(v.ToList())
#mesh.AddQuad(np.array(v.ToList(), dtype=np.int32))
# 7. Compute and display mean and standard deviation of the energy
e_mean /= (nsteps + 1)
e_var = 0.0
for i in range(nsteps+1):
e_var += (e[i] - e_mean)**2
e_var /= (nsteps + 1)
print("\n".join(["",
"Mean and standard deviation of the energy",
"{:g}".format(e_mean) + "\t" + "{:g}".format(sqrt(e_var))]))
# 9. Finalize the GLVis output
if visualization:
mesh.FinalizeQuadMesh(1)
fec = mfem.H1_FECollection(1, 2)
fespace = mfem.FiniteElementSpace(mesh, fec)
energy = mfem.GridFunction(fespace)
energy.Assign(0.0)
for i in range(nsteps+1):
energy[2*i+0] = e[i]
energy[2*i+1] = e[i]
sock = mfem.socketstream("localhost", 19916)
sock.precision(8)
sock << "solution\n" << mesh << energy
sock << "window_title 'Energy in Phase Space'\n"
sock << "keys\n maac\n" << "axis_labels 'q' 'p' 't'\n"
sock.flush()
if __name__ == "__main__":
from mfem.common.arg_parser import ArgParser
parser = ArgParser(description='Ex20 (Sympletic ODE)')
parser.add_argument('-m', '--mesh',
default='star.mesh',
action='store', type=str,
help='Mesh file to use.')
parser.add_argument("-p",
"--problem-type",
action='store', type=int, default=0,
help=''.join(["Problem Type:\n",
"\t 0 - Simple Harmonic Oscillator\n",
"\t 1 - Pendulum\n",
"\t 2 - Gaussian Potential Well\n",
"\t 3 - Quartic Potential\n",
"\t 4 - Negative Quartic Potential", ]))
parser.add_argument('-o', '--order',
action='store', default=1, type=int,
help="Time integration order")
parser.add_argument('-n', '--number-of-steps',
action='store', default=100, type=int,
help="Number of time steps")
parser.add_argument('-dt', '--time-step',
action='store', default=0.1, type=float,
help="Time step size")
parser.add_argument('-k', '--spring-constant',
action='store', default=1, type=float,
help="Sprint constant")
parser.add_argument('-vis', '--visualization',
action='store_true',
default=True,
help='Enable GLVis visualization')
parser.add_argument('-no-gp', '--no-gnuplot',
action='store_true',
default=True,
help='Disable GnuPlot visualization')
args = parser.parse_args()
parser.print_options(args)
prob = args.problem_type
visualization = args.visualization
order = args.order
nsteps = args.number_of_steps
dt = args.time_step
sc = args.spring_constant
np_gp = args.no_gnuplot
run(order=order,
prob=prob,
nsteps=nsteps,
dt=dt,
sc=sc,
visualization=visualization)
|
|
import experience_replay as er
import match_processing as mp
import champion_info as cinfo
import draft_db_ops as dbo
from draftstate import DraftState
from models.inference_model import QNetInferenceModel, SoftmaxInferenceModel
import json
import pandas as pd
import numpy as np
import tensorflow as tf
import sqlite3
import math
#path_to_model = "model_predictions/spring_2018/week_3/run_2/model_E10"
#path_to_model = "tmp/models/model_E10"
#path_to_model = "tmp/model_E{}".format(45)
path_to_model = "tmp/ddqn_model_E{}".format(45)
model = QNetInferenceModel(name="ddqn", path=path_to_model)
#path_to_model = "tmp/softmax_model_E{}".format(45)
#model = SoftmaxInferenceModel(name="softmax", path=path_to_model)
print("***")
print("Loading Model From: {}".format(path_to_model))
print("***")
out_dir = "model_predictions/dump"
print("***")
print("Outputting To: {}".format(out_dir))
print("***")
specific_team = None#"tsm"
print("***")
if(specific_team):
print("Looking at drafts by team:{}".format(specific_team))
else:
print("Looking at drafts submitted by winning team")
print("***")
#with open('worlds_matchids_by_stage.txt','r') as infile:
# data = json.load(infile)
#match_ids = data["groups"]
#match_ids.extend(data["knockouts"])
#match_ids.extend(data["finals"])
#match_ids.extend(data["play_ins_rd1"])
#match_ids.extend(data["play_ins_rd2"])
with open('match_pool.txt','r') as infile:
data = json.load(infile)
match_ids = data['validation_ids']
#match_ids = data['training_ids']
#match_ids.extend(data['training_ids'])
dbName = "competitiveGameData.db"
conn = sqlite3.connect("tmp/"+dbName)
cur = conn.cursor()
#match_ids = dbo.get_game_ids_by_tournament(cur,"2017/INTL/WRLDS")
matches = [dbo.get_match_data(cur,match_id) for match_id in match_ids]
conn.close()
if(specific_team):
matches = [match for match in matches if (match["blue_team"]==specific_team or match["red_team"]==specific_team)]
count = 0
print("************************")
print("Match Schedule:")
print("************************")
with open("{}/_match_schedule.txt".format(out_dir),'w') as outfile:
outfile.write("************************\n")
for match in matches:
output_string = "Match {:2}: id: {:5} tourn: {:20} game_no: {:3} {:6} vs {:6} winner: {:2}".format(count, match["id"], match["tournament"], match["tourn_game_id"], match["blue_team"], match["red_team"], match["winner"])
print(output_string)
outfile.write(output_string+'\n')
count += 1
outfile.write("************************\n")
with open("{}/match_data.json".format(out_dir),'w') as outfile:
json.dump(matches,outfile)
count = 0
k = 5 # Rank to look for in topk range
full_diag = {"top1":0, "topk":0, "target":0, "l2":[],"k":k}
no_rd1_ban_diag = {"top1":0, "topk":0, "target":0, "l2":[],"k":k}
no_ban_diag = {"top1":0, "topk":0, "target":0, "l2":[],"k":k}
second_phase_only = {"top1":0, "topk":0, "target":0, "l2":[],"k":k}
bans_only = {"top1":0, "topk":0, "target":0, "l2":[],"k":k}
model_diagnostics = {"full":full_diag, "no_rd1_ban":no_rd1_ban_diag, "no_bans":no_ban_diag, "phase_2_only":second_phase_only, "bans":bans_only}
position_distributions = {"phase_1":[0,0,0,0,0], "phase_2":[0,0,0,0,0]}
actual_pos_distributions = {"phase_1":[0,0,0,0,0], "phase_2":[0,0,0,0,0]}
augmentable_picks = {DraftState.BLUE_TEAM:[0,1,4,6,8], DraftState.RED_TEAM:[0,1,3,6]}
targets = [10,10,10,9,8,7,6,6,6,5]
for match in matches:
# if(specific_team):
# team = DraftState.RED_TEAM if match["red_team"]==specific_team else DraftState.BLUE_TEAM
# else:
# team = DraftState.RED_TEAM if match["winner"]==1 else DraftState.BLUE_TEAM
# teams = [DraftState.BLUE_TEAM, DraftState.RED_TEAM]
teams = [DraftState.RED_TEAM if match["winner"]==1 else DraftState.BLUE_TEAM]
for team in teams:
experiences = mp.process_match(match, team, augment_data=False)
print("")
print("Match: {:2} {:6} vs {:6} winner: {:2}".format(count, match["blue_team"], match["red_team"], match["winner"]))
for pick_count, exp in enumerate(experiences):
print(" === ")
print(" Match {}, Pick {}".format(count, pick_count))
print(" === ")
state,act,rew,next_state = exp
cid,pos = act
if cid == None:
continue
predicted_q_values = model.predict([state])
predicted_q_values = predicted_q_values[0,:]
submitted_action_id = state.get_action(*act)
data = [(a,*state.format_action(a),predicted_q_values[a]) for a in range(len(predicted_q_values))]
data = [(a,cinfo.champion_name_from_id(cid),pos,Q) for (a,cid,pos,Q) in data]
df = pd.DataFrame(data, columns=['act_id','cname','pos','Q(s,a)'])
df.sort_values('Q(s,a)',ascending=False,inplace=True)
df.reset_index(drop=True,inplace=True)
df['rank'] = df.index
df['error'] = abs(df['Q(s,a)'][0] - df['Q(s,a)'])/abs(df['Q(s,a)'][0])
submitted_row = df[df['act_id']==submitted_action_id]
print(" Submitted action:")
print(submitted_row)
rank = submitted_row['rank'].iloc[0]
err = submitted_row['error'].iloc[0]
# For picks submitted back-to-back look ahead to next action to see if it was possibly recommended
if (rank >= k and pick_count in augmentable_picks[team]):#if False:
_,next_action,_,_ = experiences[pick_count+1]
cid,_ = next_action
if(cid):
next_action_id = state.get_action(*next_action)
next_row = df[df['act_id']==next_action_id]
next_rank = next_row['rank'].iloc[0]
if(next_rank < k):
result = state.update(*next_action)
new_exp = (state, act, rew, None)
experiences[pick_count+1] = new_exp
rank = next_rank
print(" AUGMENTED ACTION:")
print(next_row)
t = targets[pick_count]
# Norms measuring all submissions
if(rank == 0):
model_diagnostics["full"]["top1"] += 1
if(rank < t):
model_diagnostics["full"]["target"] += 1
if(rank < k):
model_diagnostics["full"]["topk"] += 1
model_diagnostics["full"]["l2"].append(err)
# Norms excluding round 1 bans
if(pick_count > 2):
if(rank == 0):
model_diagnostics["no_rd1_ban"]["top1"] += 1
if(rank < t):
model_diagnostics["no_rd1_ban"]["target"] += 1
if(rank < k):
model_diagnostics["no_rd1_ban"]["topk"] += 1
model_diagnostics["no_rd1_ban"]["l2"].append(err)
# Norms excluding round 1 completely
if(pick_count > 5):
if(rank == 0):
model_diagnostics["phase_2_only"]["top1"] += 1
if(rank < t):
model_diagnostics["phase_2_only"]["target"] += 1
if(rank < k):
model_diagnostics["phase_2_only"]["topk"] += 1
model_diagnostics["phase_2_only"]["l2"].append(err)
# Norms excluding all bans
if(pos != -1):
if(rank == 0):
model_diagnostics["no_bans"]["top1"] += 1
if(rank < t):
model_diagnostics["no_bans"]["target"] += 1
if(rank < k):
model_diagnostics["no_bans"]["topk"] += 1
model_diagnostics["no_bans"]["l2"].append(err)
# Norms for bans only
if(pos == -1):
if(rank == 0):
model_diagnostics["bans"]["top1"] += 1
if(rank < t):
model_diagnostics["bans"]["target"] += 1
if(rank < k):
model_diagnostics["bans"]["topk"] += 1
model_diagnostics["bans"]["l2"].append(err)
if(rank >= t):
print(" Top predictions:")
print(df.head()) # Print top 5 choices for network
#df.to_pickle("{}/match{}_pick{}.pkl".format(out_dir,count,pick_count))
# Position distribution for picks
if(pos > 0):
top_pos = df.head()["pos"].values.tolist()
if(pick_count <=5):
actual_pos_distributions["phase_1"][pos-1] += 1
for pos in top_pos:
position_distributions["phase_1"][pos-1] += 1
else:
actual_pos_distributions["phase_2"][pos-1] += 1
for pos in top_pos:
position_distributions["phase_2"][pos-1] += 1
pick_count += 1
count += 1
print("******************")
print("Pick position distributions:")
for phase in ["phase_1", "phase_2"]:
print("{}: Recommendations".format(phase))
count = sum(position_distributions[phase])
for pos in range(len(position_distributions[phase])):
pos_ratio = position_distributions[phase][pos] / count
print(" Position {}: Count {:3}, Ratio {:.3}".format(pos+1, position_distributions[phase][pos], pos_ratio))
print("{}: Actual".format(phase))
count = sum(actual_pos_distributions[phase])
for pos in range(len(actual_pos_distributions[phase])):
pos_ratio = actual_pos_distributions[phase][pos] / count
print(" Position {}: Count {:3}, Ratio {:.3}".format(pos+1, actual_pos_distributions[phase][pos], pos_ratio))
print("******************")
print("Norm Information:")
for key in sorted(model_diagnostics.keys()):
print(" {}".format(key))
err_list = model_diagnostics[key]["l2"]
err = math.sqrt((sum([e**2 for e in err_list])/len(err_list)))
num_predictions = len(err_list)
top1 = model_diagnostics[key]["top1"]
topk = model_diagnostics[key]["topk"]
target = model_diagnostics[key]["target"]
k = model_diagnostics[key]["k"]
print(" Num_predictions = {}".format(num_predictions))
print(" top 1: count {} -> acc: {:.4}".format(top1, top1/num_predictions))
print(" top {}: count {} -> acc: {:.4}".format(k, topk, topk/num_predictions))
print(" target: count {} -> acc: {:.4}".format(target, target/num_predictions))
print(" l2 error: {:.4}".format(err))
print("---")
print("******************")
|
|
# Copyright 2015 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing classes related to AWS VM networking.
The Firewall class provides a way of opening VM ports. The Network class allows
VMs to communicate via internal ips and isolates PerfKitBenchmarker VMs from
others in
the same project. See https://aws.amazon.com/documentation/vpc/
for more information about AWS Virtual Private Clouds.
"""
import json
import logging
import threading
import uuid
from perfkitbenchmarker import flags
from perfkitbenchmarker import network
from perfkitbenchmarker import resource
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.providers.aws import util
FLAGS = flags.FLAGS
AWS = 'AWS'
class AwsFirewall(network.BaseFirewall):
"""An object representing the AWS Firewall."""
CLOUD = AWS
def __init__(self):
self.firewall_set = set()
self._lock = threading.Lock()
def AllowPort(self, vm, port):
"""Opens a port on the firewall.
Args:
vm: The BaseVirtualMachine object to open the port for.
port: The local port to open.
"""
if vm.is_static:
return
entry = (port, vm.group_id)
if entry in self.firewall_set:
return
with self._lock:
if entry in self.firewall_set:
return
authorize_cmd = util.AWS_PREFIX + [
'ec2',
'authorize-security-group-ingress',
'--region=%s' % vm.region,
'--group-id=%s' % vm.group_id,
'--port=%s' % port,
'--cidr=0.0.0.0/0']
util.IssueRetryableCommand(
authorize_cmd + ['--protocol=tcp'])
util.IssueRetryableCommand(
authorize_cmd + ['--protocol=udp'])
self.firewall_set.add(entry)
def DisallowAllPorts(self):
"""Closes all ports on the firewall."""
pass
class AwsVpc(resource.BaseResource):
"""An object representing an Aws VPC."""
def __init__(self, region):
super(AwsVpc, self).__init__()
self.region = region
self.id = None
def _Create(self):
"""Creates the VPC."""
create_cmd = util.AWS_PREFIX + [
'ec2',
'create-vpc',
'--region=%s' % self.region,
'--cidr-block=10.0.0.0/16']
stdout, _, _ = vm_util.IssueCommand(create_cmd)
response = json.loads(stdout)
self.id = response['Vpc']['VpcId']
self._EnableDnsHostnames()
util.AddDefaultTags(self.id, self.region)
def _Exists(self):
"""Returns true if the VPC exists."""
describe_cmd = util.AWS_PREFIX + [
'ec2',
'describe-vpcs',
'--region=%s' % self.region,
'--filter=Name=vpc-id,Values=%s' % self.id]
stdout, _ = util.IssueRetryableCommand(describe_cmd)
response = json.loads(stdout)
vpcs = response['Vpcs']
assert len(vpcs) < 2, 'Too many VPCs.'
return len(vpcs) > 0
def _EnableDnsHostnames(self):
"""Sets the enableDnsHostnames attribute of this VPC to True.
By default, instances launched in non-default VPCs are assigned an
unresolvable hostname. This breaks the hadoop benchmark. Setting the
enableDnsHostnames attribute to 'true' on the VPC resolves this. See:
http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_DHCP_Options.html
"""
enable_hostnames_command = util.AWS_PREFIX + [
'ec2',
'modify-vpc-attribute',
'--region=%s' % self.region,
'--vpc-id', self.id,
'--enable-dns-hostnames',
'{ "Value": true }']
util.IssueRetryableCommand(enable_hostnames_command)
def _Delete(self):
"""Delete's the VPC."""
delete_cmd = util.AWS_PREFIX + [
'ec2',
'delete-vpc',
'--region=%s' % self.region,
'--vpc-id=%s' % self.id]
vm_util.IssueCommand(delete_cmd)
class AwsSubnet(resource.BaseResource):
"""An object representing an Aws subnet."""
def __init__(self, zone, vpc_id, cidr_block='10.0.0.0/24'):
super(AwsSubnet, self).__init__()
self.zone = zone
self.region = util.GetRegionFromZone(zone)
self.vpc_id = vpc_id
self.id = None
self.cidr_block = cidr_block
def _Create(self):
"""Creates the subnet."""
create_cmd = util.AWS_PREFIX + [
'ec2',
'create-subnet',
'--region=%s' % self.region,
'--vpc-id=%s' % self.vpc_id,
'--cidr-block=%s' % self.cidr_block]
if not util.IsRegion(self.zone):
create_cmd.append('--availability-zone=%s' % self.zone)
stdout, _, _ = vm_util.IssueCommand(create_cmd)
response = json.loads(stdout)
self.id = response['Subnet']['SubnetId']
util.AddDefaultTags(self.id, self.region)
def _Delete(self):
"""Deletes the subnet."""
logging.info('Deleting subnet %s. This may fail if all instances in the '
'subnet have not completed termination, but will be retried.',
self.id)
delete_cmd = util.AWS_PREFIX + [
'ec2',
'delete-subnet',
'--region=%s' % self.region,
'--subnet-id=%s' % self.id]
vm_util.IssueCommand(delete_cmd)
def _Exists(self):
"""Returns true if the subnet exists."""
describe_cmd = util.AWS_PREFIX + [
'ec2',
'describe-subnets',
'--region=%s' % self.region,
'--filter=Name=subnet-id,Values=%s' % self.id]
stdout, _ = util.IssueRetryableCommand(describe_cmd)
response = json.loads(stdout)
subnets = response['Subnets']
assert len(subnets) < 2, 'Too many subnets.'
return len(subnets) > 0
class AwsInternetGateway(resource.BaseResource):
"""An object representing an Aws Internet Gateway."""
def __init__(self, region):
super(AwsInternetGateway, self).__init__()
self.region = region
self.vpc_id = None
self.id = None
self.attached = False
def _Create(self):
"""Creates the internet gateway."""
create_cmd = util.AWS_PREFIX + [
'ec2',
'create-internet-gateway',
'--region=%s' % self.region]
stdout, _, _ = vm_util.IssueCommand(create_cmd)
response = json.loads(stdout)
self.id = response['InternetGateway']['InternetGatewayId']
util.AddDefaultTags(self.id, self.region)
def _Delete(self):
"""Deletes the internet gateway."""
delete_cmd = util.AWS_PREFIX + [
'ec2',
'delete-internet-gateway',
'--region=%s' % self.region,
'--internet-gateway-id=%s' % self.id]
vm_util.IssueCommand(delete_cmd)
def _Exists(self):
"""Returns true if the internet gateway exists."""
describe_cmd = util.AWS_PREFIX + [
'ec2',
'describe-internet-gateways',
'--region=%s' % self.region,
'--filter=Name=internet-gateway-id,Values=%s' % self.id]
stdout, _ = util.IssueRetryableCommand(describe_cmd)
response = json.loads(stdout)
internet_gateways = response['InternetGateways']
assert len(internet_gateways) < 2, 'Too many internet gateways.'
return len(internet_gateways) > 0
def Attach(self, vpc_id):
"""Attaches the internetgateway to the VPC."""
if not self.attached:
self.vpc_id = vpc_id
attach_cmd = util.AWS_PREFIX + [
'ec2',
'attach-internet-gateway',
'--region=%s' % self.region,
'--internet-gateway-id=%s' % self.id,
'--vpc-id=%s' % self.vpc_id]
util.IssueRetryableCommand(attach_cmd)
self.attached = True
def Detach(self):
"""Detaches the internetgateway from the VPC."""
if self.attached:
detach_cmd = util.AWS_PREFIX + [
'ec2',
'detach-internet-gateway',
'--region=%s' % self.region,
'--internet-gateway-id=%s' % self.id,
'--vpc-id=%s' % self.vpc_id]
util.IssueRetryableCommand(detach_cmd)
self.attached = False
class AwsRouteTable(resource.BaseResource):
"""An object representing a route table."""
def __init__(self, region, vpc_id):
super(AwsRouteTable, self).__init__()
self.region = region
self.vpc_id = vpc_id
def _Create(self):
"""Creates the route table.
This is a no-op since every VPC has a default route table.
"""
pass
def _Delete(self):
"""Deletes the route table.
This is a no-op since the default route table gets deleted with the VPC.
"""
pass
@vm_util.Retry()
def _PostCreate(self):
"""Gets data about the route table."""
describe_cmd = util.AWS_PREFIX + [
'ec2',
'describe-route-tables',
'--region=%s' % self.region,
'--filters=Name=vpc-id,Values=%s' % self.vpc_id]
stdout, _ = util.IssueRetryableCommand(describe_cmd)
response = json.loads(stdout)
self.id = response['RouteTables'][0]['RouteTableId']
def CreateRoute(self, internet_gateway_id):
"""Adds a route to the internet gateway."""
create_cmd = util.AWS_PREFIX + [
'ec2',
'create-route',
'--region=%s' % self.region,
'--route-table-id=%s' % self.id,
'--gateway-id=%s' % internet_gateway_id,
'--destination-cidr-block=0.0.0.0/0']
util.IssueRetryableCommand(create_cmd)
class AwsPlacementGroup(resource.BaseResource):
"""Object representing an AWS Placement Group.
Attributes:
region: The AWS region the Placement Group is in.
name: The name of the Placement Group.
"""
def __init__(self, region):
"""Init method for AwsPlacementGroup.
Args:
region: A string containing the AWS region of the Placement Group.
"""
super(AwsPlacementGroup, self).__init__()
self.name = (
'perfkit-%s-%s' % (FLAGS.run_uri, str(uuid.uuid4())[-12:]))
self.region = region
def _Create(self):
"""Creates the Placement Group."""
create_cmd = util.AWS_PREFIX + [
'ec2',
'create-placement-group',
'--region=%s' % self.region,
'--group-name=%s' % self.name,
'--strategy=cluster']
vm_util.IssueCommand(create_cmd)
def _Delete(self):
"""Deletes the Placement Group."""
delete_cmd = util.AWS_PREFIX + [
'ec2',
'delete-placement-group',
'--region=%s' % self.region,
'--group-name=%s' % self.name]
vm_util.IssueCommand(delete_cmd)
def _Exists(self):
"""Returns true if the Placement Group exists."""
describe_cmd = util.AWS_PREFIX + [
'ec2',
'describe-placement-groups',
'--region=%s' % self.region,
'--filter=Name=group-name,Values=%s' % self.name]
stdout, _ = util.IssueRetryableCommand(describe_cmd)
response = json.loads(stdout)
placement_groups = response['PlacementGroups']
assert len(placement_groups) < 2, 'Too many placement groups.'
return len(placement_groups) > 0
class AwsNetwork(network.BaseNetwork):
"""Object representing an AWS Network.
Attributes:
region: The AWS region the Network is in.
vpc_id: The id of the Network's Virtual Private Cloud (VPC).
subnet_id: The id of the Subnet of the Network's VPC.
internet_gateway_id: The id of the Network's Internet Gateway.
route_table_id: The id of the Route Table of the Networks's VPC.
"""
CLOUD = AWS
def __init__(self, spec):
"""Initializes AwsNetwork instances.
Args:
spec: A BaseNetworkSpec object.
"""
super(AwsNetwork, self).__init__(spec)
self.region = util.GetRegionFromZone(spec.zone)
self.vpc = AwsVpc(self.region)
self.internet_gateway = AwsInternetGateway(self.region)
self.subnet = None
self.route_table = None
self.placement_group = AwsPlacementGroup(self.region)
def Create(self):
"""Creates the network."""
self.vpc.Create()
self.internet_gateway.Create()
self.internet_gateway.Attach(self.vpc.id)
if self.route_table is None:
self.route_table = AwsRouteTable(self.region, self.vpc.id)
self.route_table.Create()
self.route_table.CreateRoute(self.internet_gateway.id)
if self.subnet is None:
self.subnet = AwsSubnet(self.zone, self.vpc.id)
self.subnet.Create()
self.placement_group.Create()
def Delete(self):
"""Deletes the network."""
self.placement_group.Delete()
if self.subnet:
self.subnet.Delete()
self.internet_gateway.Detach()
self.internet_gateway.Delete()
self.vpc.Delete()
|
|
# -*- coding: utf-8 -*-
"""
The MIT License (MIT)
Copyright (c) 2015-2019 Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
import asyncio
from collections import deque, namedtuple, OrderedDict
import copy
import datetime
import itertools
import logging
import math
import weakref
import inspect
import gc
from .guild import Guild
from .activity import _ActivityTag
from .user import User, ClientUser
from .emoji import Emoji
from .partial_emoji import PartialEmoji
from .message import Message
from .relationship import Relationship
from .channel import *
from .raw_models import *
from .member import Member
from .role import Role
from .enums import ChannelType, try_enum, Status, Enum
from . import utils
from .embeds import Embed
from .object import Object
class ListenerType(Enum):
chunk = 0
query_members = 1
Listener = namedtuple('Listener', ('type', 'future', 'predicate'))
log = logging.getLogger(__name__)
ReadyState = namedtuple('ReadyState', ('launch', 'guilds'))
class ConnectionState:
def __init__(self, *, dispatch, chunker, handlers, syncer, http, loop, **options):
self.loop = loop
self.http = http
self.max_messages = options.get('max_messages', 1000)
if self.max_messages is not None and self.max_messages <= 0:
self.max_messages = 1000
self.dispatch = dispatch
self.chunker = chunker
self.syncer = syncer
self.is_bot = None
self.handlers = handlers
self.shard_count = None
self._ready_task = None
self._fetch_offline = options.get('fetch_offline_members', True)
self.heartbeat_timeout = options.get('heartbeat_timeout', 60.0)
self.guild_subscriptions = options.get('guild_subscriptions', True)
# Only disable cache if both fetch_offline and guild_subscriptions are off.
self._cache_members = (self._fetch_offline or self.guild_subscriptions)
self._listeners = []
activity = options.get('activity', None)
if activity:
if not isinstance(activity, _ActivityTag):
raise TypeError('activity parameter must be one of Game, Streaming, or Activity.')
activity = activity.to_dict()
status = options.get('status', None)
if status:
if status is Status.offline:
status = 'invisible'
else:
status = str(status)
self._activity = activity
self._status = status
self.parsers = parsers = {}
for attr, func in inspect.getmembers(self):
if attr.startswith('parse_'):
parsers[attr[6:].upper()] = func
self.clear()
def clear(self):
self.user = None
self._users = weakref.WeakValueDictionary()
self._emojis = {}
self._calls = {}
self._guilds = {}
self._voice_clients = {}
# LRU of max size 128
self._private_channels = OrderedDict()
# extra dict to look up private channels by user id
self._private_channels_by_user = {}
self._messages = self.max_messages and deque(maxlen=self.max_messages)
# In cases of large deallocations the GC should be called explicitly
# To free the memory more immediately, especially true when it comes
# to reconnect loops which cause mass allocations and deallocations.
gc.collect()
def process_listeners(self, listener_type, argument, result):
removed = []
for i, listener in enumerate(self._listeners):
if listener.type != listener_type:
continue
future = listener.future
if future.cancelled():
removed.append(i)
continue
try:
passed = listener.predicate(argument)
except Exception as exc:
future.set_exception(exc)
removed.append(i)
else:
if passed:
future.set_result(result)
removed.append(i)
if listener.type == ListenerType.chunk:
break
for index in reversed(removed):
del self._listeners[index]
def call_handlers(self, key, *args, **kwargs):
try:
func = self.handlers[key]
except KeyError:
pass
else:
func(*args, **kwargs)
@property
def self_id(self):
u = self.user
return u.id if u else None
@property
def voice_clients(self):
return list(self._voice_clients.values())
def _get_voice_client(self, guild_id):
return self._voice_clients.get(guild_id)
def _add_voice_client(self, guild_id, voice):
self._voice_clients[guild_id] = voice
def _remove_voice_client(self, guild_id):
self._voice_clients.pop(guild_id, None)
def _update_references(self, ws):
for vc in self.voice_clients:
vc.main_ws = ws
def store_user(self, data):
# this way is 300% faster than `dict.setdefault`.
user_id = int(data['id'])
try:
return self._users[user_id]
except KeyError:
user = User(state=self, data=data)
if user.discriminator != '0000':
self._users[user_id] = user
return user
def get_user(self, id):
return self._users.get(id)
def store_emoji(self, guild, data):
emoji_id = int(data['id'])
self._emojis[emoji_id] = emoji = Emoji(guild=guild, state=self, data=data)
return emoji
@property
def guilds(self):
return list(self._guilds.values())
def _get_guild(self, guild_id):
return self._guilds.get(guild_id)
def _add_guild(self, guild):
self._guilds[guild.id] = guild
def _remove_guild(self, guild):
self._guilds.pop(guild.id, None)
for emoji in guild.emojis:
self._emojis.pop(emoji.id, None)
del guild
# Much like clear(), if we have a massive deallocation
# then it's better to explicitly call the GC
gc.collect()
@property
def emojis(self):
return list(self._emojis.values())
def get_emoji(self, emoji_id):
return self._emojis.get(emoji_id)
@property
def private_channels(self):
return list(self._private_channels.values())
def _get_private_channel(self, channel_id):
try:
value = self._private_channels[channel_id]
except KeyError:
return None
else:
self._private_channels.move_to_end(channel_id)
return value
def _get_private_channel_by_user(self, user_id):
return self._private_channels_by_user.get(user_id)
def _add_private_channel(self, channel):
channel_id = channel.id
self._private_channels[channel_id] = channel
if self.is_bot and len(self._private_channels) > 128:
_, to_remove = self._private_channels.popitem(last=False)
if isinstance(to_remove, DMChannel):
self._private_channels_by_user.pop(to_remove.recipient.id, None)
if isinstance(channel, DMChannel):
self._private_channels_by_user[channel.recipient.id] = channel
def add_dm_channel(self, data):
channel = DMChannel(me=self.user, state=self, data=data)
self._add_private_channel(channel)
return channel
def _remove_private_channel(self, channel):
self._private_channels.pop(channel.id, None)
if isinstance(channel, DMChannel):
self._private_channels_by_user.pop(channel.recipient.id, None)
def _get_message(self, msg_id):
return utils.find(lambda m: m.id == msg_id, reversed(self._messages)) if self._messages else None
def _add_guild_from_data(self, guild):
guild = Guild(data=guild, state=self)
self._add_guild(guild)
return guild
def chunks_needed(self, guild):
for _ in range(math.ceil(guild._member_count / 1000)):
yield self.receive_chunk(guild.id)
def _get_guild_channel(self, data):
channel_id = int(data['channel_id'])
try:
guild = self._get_guild(int(data['guild_id']))
except KeyError:
channel = self.get_channel(channel_id)
guild = None
else:
channel = guild and guild.get_channel(channel_id)
return channel or Object(id=channel_id), guild
async def request_offline_members(self, guilds):
# get all the chunks
chunks = []
for guild in guilds:
chunks.extend(self.chunks_needed(guild))
# we only want to request ~75 guilds per chunk request.
splits = [guilds[i:i + 75] for i in range(0, len(guilds), 75)]
for split in splits:
await self.chunker(split)
# wait for the chunks
if chunks:
try:
await utils.sane_wait_for(chunks, timeout=len(chunks) * 30.0)
except asyncio.TimeoutError:
log.info('Somehow timed out waiting for chunks.')
async def query_members(self, guild, query, limit, cache):
guild_id = guild.id
ws = self._get_websocket(guild_id)
if ws is None:
raise RuntimeError('Somehow do not have a websocket for this guild_id')
# Limits over 1000 cannot be supported since
# the main use case for this is guild_subscriptions being disabled
# and they don't receive GUILD_MEMBER events which make computing
# member_count impossible. The only way to fix it is by limiting
# the limit parameter to 1 to 1000.
future = self.receive_member_query(guild_id, query)
try:
# start the query operation
await ws.request_chunks(guild_id, query, limit)
members = await asyncio.wait_for(future, timeout=5.0)
if cache:
for member in members:
guild._add_member(member)
return members
except asyncio.TimeoutError:
log.info('Timed out waiting for chunks with query %r and limit %d for guild_id %d', query, limit, guild_id)
raise
async def _delay_ready(self):
try:
launch = self._ready_state.launch
# only real bots wait for GUILD_CREATE streaming
if self.is_bot:
while not launch.is_set():
# this snippet of code is basically waiting 2 seconds
# until the last GUILD_CREATE was sent
launch.set()
await asyncio.sleep(2)
guilds = next(zip(*self._ready_state.guilds), [])
if self._fetch_offline:
await self.request_offline_members(guilds)
for guild, unavailable in self._ready_state.guilds:
if unavailable is False:
self.dispatch('guild_available', guild)
else:
self.dispatch('guild_join', guild)
# remove the state
try:
del self._ready_state
except AttributeError:
pass # already been deleted somehow
# call GUILD_SYNC after we're done chunking
if not self.is_bot:
log.info('Requesting GUILD_SYNC for %s guilds', len(self.guilds))
await self.syncer([s.id for s in self.guilds])
except asyncio.CancelledError:
pass
else:
# dispatch the event
self.call_handlers('ready')
self.dispatch('ready')
finally:
self._ready_task = None
def parse_ready(self, data):
if self._ready_task is not None:
self._ready_task.cancel()
self._ready_state = ReadyState(launch=asyncio.Event(), guilds=[])
self.clear()
self.user = user = ClientUser(state=self, data=data['user'])
self._users[user.id] = user
guilds = self._ready_state.guilds
for guild_data in data['guilds']:
guild = self._add_guild_from_data(guild_data)
if (not self.is_bot and not guild.unavailable) or guild.large:
guilds.append((guild, guild.unavailable))
for relationship in data.get('relationships', []):
try:
r_id = int(relationship['id'])
except KeyError:
continue
else:
user._relationships[r_id] = Relationship(state=self, data=relationship)
for pm in data.get('private_channels', []):
factory, _ = _channel_factory(pm['type'])
self._add_private_channel(factory(me=user, data=pm, state=self))
self.dispatch('connect')
self._ready_task = asyncio.ensure_future(self._delay_ready(), loop=self.loop)
def parse_resumed(self, data):
self.dispatch('resumed')
def parse_message_create(self, data):
channel, _ = self._get_guild_channel(data)
message = Message(channel=channel, data=data, state=self)
self.dispatch('message', message)
if self._messages is not None:
self._messages.append(message)
if channel and channel.__class__ is TextChannel:
channel.last_message_id = message.id
def parse_message_delete(self, data):
raw = RawMessageDeleteEvent(data)
found = self._get_message(raw.message_id)
raw.cached_message = found
self.dispatch('raw_message_delete', raw)
if self._messages is not None and found is not None:
self.dispatch('message_delete', found)
self._messages.remove(found)
def parse_message_delete_bulk(self, data):
raw = RawBulkMessageDeleteEvent(data)
if self._messages:
found_messages = [message for message in self._messages if message.id in raw.message_ids]
else:
found_messages = []
raw.cached_messages = found_messages
self.dispatch('raw_bulk_message_delete', raw)
if found_messages:
self.dispatch('bulk_message_delete', found_messages)
for msg in found_messages:
self._messages.remove(msg)
def parse_message_update(self, data):
raw = RawMessageUpdateEvent(data)
message = self._get_message(raw.message_id)
if message is not None:
older_message = copy.copy(message)
raw.cached_message = older_message
self.dispatch('raw_message_edit', raw)
message._update(data)
self.dispatch('message_edit', older_message, message)
else:
self.dispatch('raw_message_edit', raw)
def parse_message_reaction_add(self, data):
emoji = data['emoji']
emoji_id = utils._get_as_snowflake(emoji, 'id')
emoji = PartialEmoji.with_state(self, animated=emoji.get('animated', False), id=emoji_id, name=emoji['name'])
raw = RawReactionActionEvent(data, emoji, 'REACTION_ADD')
self.dispatch('raw_reaction_add', raw)
# rich interface here
message = self._get_message(raw.message_id)
if message is not None:
emoji = self._upgrade_partial_emoji(emoji)
reaction = message._add_reaction(data, emoji, raw.user_id)
user = self._get_reaction_user(message.channel, raw.user_id)
if user:
self.dispatch('reaction_add', reaction, user)
def parse_message_reaction_remove_all(self, data):
raw = RawReactionClearEvent(data)
self.dispatch('raw_reaction_clear', raw)
message = self._get_message(raw.message_id)
if message is not None:
old_reactions = message.reactions.copy()
message.reactions.clear()
self.dispatch('reaction_clear', message, old_reactions)
def parse_message_reaction_remove(self, data):
emoji = data['emoji']
emoji_id = utils._get_as_snowflake(emoji, 'id')
emoji = PartialEmoji.with_state(self, animated=emoji.get('animated', False), id=emoji_id, name=emoji['name'])
raw = RawReactionActionEvent(data, emoji, 'REACTION_REMOVE')
self.dispatch('raw_reaction_remove', raw)
message = self._get_message(raw.message_id)
if message is not None:
emoji = self._upgrade_partial_emoji(emoji)
try:
reaction = message._remove_reaction(data, emoji, raw.user_id)
except (AttributeError, ValueError): # eventual consistency lol
pass
else:
user = self._get_reaction_user(message.channel, raw.user_id)
if user:
self.dispatch('reaction_remove', reaction, user)
def parse_presence_update(self, data):
guild_id = utils._get_as_snowflake(data, 'guild_id')
guild = self._get_guild(guild_id)
if guild is None:
log.warning('PRESENCE_UPDATE referencing an unknown guild ID: %s. Discarding.', guild_id)
return
user = data['user']
member_id = int(user['id'])
member = guild.get_member(member_id)
if member is None:
if 'username' not in user:
# sometimes we receive 'incomplete' member data post-removal.
# skip these useless cases.
return
member, old_member = Member._from_presence_update(guild=guild, data=data, state=self)
guild._add_member(member)
else:
old_member = Member._copy(member)
user_update = member._presence_update(data=data, user=user)
if user_update:
self.dispatch('user_update', user_update[0], user_update[1])
self.dispatch('member_update', old_member, member)
def parse_user_update(self, data):
self.user._update(data)
def parse_channel_delete(self, data):
guild = self._get_guild(utils._get_as_snowflake(data, 'guild_id'))
channel_id = int(data['id'])
if guild is not None:
channel = guild.get_channel(channel_id)
if channel is not None:
guild._remove_channel(channel)
self.dispatch('guild_channel_delete', channel)
else:
# the reason we're doing this is so it's also removed from the
# private channel by user cache as well
channel = self._get_private_channel(channel_id)
if channel is not None:
self._remove_private_channel(channel)
self.dispatch('private_channel_delete', channel)
def parse_channel_update(self, data):
channel_type = try_enum(ChannelType, data.get('type'))
channel_id = int(data['id'])
if channel_type is ChannelType.group:
channel = self._get_private_channel(channel_id)
old_channel = copy.copy(channel)
channel._update_group(data)
self.dispatch('private_channel_update', old_channel, channel)
return
guild_id = utils._get_as_snowflake(data, 'guild_id')
guild = self._get_guild(guild_id)
if guild is not None:
channel = guild.get_channel(channel_id)
if channel is not None:
old_channel = copy.copy(channel)
channel._update(guild, data)
self.dispatch('guild_channel_update', old_channel, channel)
else:
log.warning('CHANNEL_UPDATE referencing an unknown channel ID: %s. Discarding.', channel_id)
else:
log.warning('CHANNEL_UPDATE referencing an unknown guild ID: %s. Discarding.', guild_id)
def parse_channel_create(self, data):
factory, ch_type = _channel_factory(data['type'])
if factory is None:
log.warning('CHANNEL_CREATE referencing an unknown channel type %s. Discarding.', data['type'])
return
channel = None
if ch_type in (ChannelType.group, ChannelType.private):
channel_id = int(data['id'])
if self._get_private_channel(channel_id) is None:
channel = factory(me=self.user, data=data, state=self)
self._add_private_channel(channel)
self.dispatch('private_channel_create', channel)
else:
guild_id = utils._get_as_snowflake(data, 'guild_id')
guild = self._get_guild(guild_id)
if guild is not None:
channel = factory(guild=guild, state=self, data=data)
guild._add_channel(channel)
self.dispatch('guild_channel_create', channel)
else:
log.warning('CHANNEL_CREATE referencing an unknown guild ID: %s. Discarding.', guild_id)
return
def parse_channel_pins_update(self, data):
channel_id = int(data['channel_id'])
channel = self.get_channel(channel_id)
if channel is None:
log.warning('CHANNEL_PINS_UPDATE referencing an unknown channel ID: %s. Discarding.', channel_id)
return
last_pin = utils.parse_time(data['last_pin_timestamp']) if data['last_pin_timestamp'] else None
try:
# I have not imported discord.abc in this file
# the isinstance check is also 2x slower than just checking this attribute
# so we're just gonna check it since it's easier and faster and lazier
channel.guild
except AttributeError:
self.dispatch('private_channel_pins_update', channel, last_pin)
else:
self.dispatch('guild_channel_pins_update', channel, last_pin)
def parse_channel_recipient_add(self, data):
channel = self._get_private_channel(int(data['channel_id']))
user = self.store_user(data['user'])
channel.recipients.append(user)
self.dispatch('group_join', channel, user)
def parse_channel_recipient_remove(self, data):
channel = self._get_private_channel(int(data['channel_id']))
user = self.store_user(data['user'])
try:
channel.recipients.remove(user)
except ValueError:
pass
else:
self.dispatch('group_remove', channel, user)
def parse_guild_member_add(self, data):
guild = self._get_guild(int(data['guild_id']))
if guild is None:
log.warning('GUILD_MEMBER_ADD referencing an unknown guild ID: %s. Discarding.', data['guild_id'])
return
member = Member(guild=guild, data=data, state=self)
if self._cache_members:
guild._add_member(member)
guild._member_count += 1
self.dispatch('member_join', member)
def parse_guild_member_remove(self, data):
guild = self._get_guild(int(data['guild_id']))
if guild is not None:
user_id = int(data['user']['id'])
member = guild.get_member(user_id)
if member is not None:
guild._remove_member(member)
guild._member_count -= 1
self.dispatch('member_remove', member)
else:
log.warning('GUILD_MEMBER_REMOVE referencing an unknown guild ID: %s. Discarding.', data['guild_id'])
def parse_guild_member_update(self, data):
guild = self._get_guild(int(data['guild_id']))
user = data['user']
user_id = int(user['id'])
if guild is None:
log.warning('GUILD_MEMBER_UPDATE referencing an unknown guild ID: %s. Discarding.', data['guild_id'])
return
member = guild.get_member(user_id)
if member is not None:
old_member = copy.copy(member)
member._update(data)
self.dispatch('member_update', old_member, member)
else:
log.warning('GUILD_MEMBER_UPDATE referencing an unknown member ID: %s. Discarding.', user_id)
def parse_guild_emojis_update(self, data):
guild = self._get_guild(int(data['guild_id']))
if guild is None:
log.warning('GUILD_EMOJIS_UPDATE referencing an unknown guild ID: %s. Discarding.', data['guild_id'])
return
before_emojis = guild.emojis
for emoji in before_emojis:
self._emojis.pop(emoji.id, None)
guild.emojis = tuple(map(lambda d: self.store_emoji(guild, d), data['emojis']))
self.dispatch('guild_emojis_update', guild, before_emojis, guild.emojis)
def _get_create_guild(self, data):
if data.get('unavailable') is False:
# GUILD_CREATE with unavailable in the response
# usually means that the guild has become available
# and is therefore in the cache
guild = self._get_guild(int(data['id']))
if guild is not None:
guild.unavailable = False
guild._from_data(data)
return guild
return self._add_guild_from_data(data)
async def _chunk_and_dispatch(self, guild, unavailable):
chunks = list(self.chunks_needed(guild))
await self.chunker(guild)
if chunks:
try:
await utils.sane_wait_for(chunks, timeout=len(chunks))
except asyncio.TimeoutError:
log.info('Somehow timed out waiting for chunks.')
if unavailable is False:
self.dispatch('guild_available', guild)
else:
self.dispatch('guild_join', guild)
def parse_guild_create(self, data):
unavailable = data.get('unavailable')
if unavailable is True:
# joined a guild with unavailable == True so..
return
guild = self._get_create_guild(data)
# check if it requires chunking
if guild.large:
if unavailable is False:
# check if we're waiting for 'useful' READY
# and if we are, we don't want to dispatch any
# event such as guild_join or guild_available
# because we're still in the 'READY' phase. Or
# so we say.
try:
state = self._ready_state
state.launch.clear()
state.guilds.append((guild, unavailable))
except AttributeError:
# the _ready_state attribute is only there during
# processing of useful READY.
pass
else:
return
# since we're not waiting for 'useful' READY we'll just
# do the chunk request here if wanted
if self._fetch_offline:
asyncio.ensure_future(self._chunk_and_dispatch(guild, unavailable), loop=self.loop)
return
# Dispatch available if newly available
if unavailable is False:
self.dispatch('guild_available', guild)
else:
self.dispatch('guild_join', guild)
def parse_guild_sync(self, data):
guild = self._get_guild(int(data['id']))
guild._sync(data)
def parse_guild_update(self, data):
guild = self._get_guild(int(data['id']))
if guild is not None:
old_guild = copy.copy(guild)
guild._from_data(data)
self.dispatch('guild_update', old_guild, guild)
else:
log.warning('GUILD_UPDATE referencing an unknown guild ID: %s. Discarding.', data['id'])
def parse_guild_delete(self, data):
guild = self._get_guild(int(data['id']))
if guild is None:
log.warning('GUILD_DELETE referencing an unknown guild ID: %s. Discarding.', data['id'])
return
if data.get('unavailable', False) and guild is not None:
# GUILD_DELETE with unavailable being True means that the
# guild that was available is now currently unavailable
guild.unavailable = True
self.dispatch('guild_unavailable', guild)
return
# do a cleanup of the messages cache
if self._messages is not None:
self._messages = deque((msg for msg in self._messages if msg.guild != guild), maxlen=self.max_messages)
self._remove_guild(guild)
self.dispatch('guild_remove', guild)
def parse_guild_ban_add(self, data):
# we make the assumption that GUILD_BAN_ADD is done
# before GUILD_MEMBER_REMOVE is called
# hence we don't remove it from cache or do anything
# strange with it, the main purpose of this event
# is mainly to dispatch to another event worth listening to for logging
guild = self._get_guild(int(data['guild_id']))
if guild is not None:
try:
user = User(data=data['user'], state=self)
except KeyError:
pass
else:
member = guild.get_member(user.id) or user
self.dispatch('member_ban', guild, member)
def parse_guild_ban_remove(self, data):
guild = self._get_guild(int(data['guild_id']))
if guild is not None:
if 'user' in data:
user = self.store_user(data['user'])
self.dispatch('member_unban', guild, user)
def parse_guild_role_create(self, data):
guild = self._get_guild(int(data['guild_id']))
if guild is None:
log.warning('GUILD_ROLE_CREATE referencing an unknown guild ID: %s. Discarding.', data['guild_id'])
return
role_data = data['role']
role = Role(guild=guild, data=role_data, state=self)
guild._add_role(role)
self.dispatch('guild_role_create', role)
def parse_guild_role_delete(self, data):
guild = self._get_guild(int(data['guild_id']))
if guild is not None:
role_id = int(data['role_id'])
try:
role = guild._remove_role(role_id)
except KeyError:
return
else:
self.dispatch('guild_role_delete', role)
else:
log.warning('GUILD_ROLE_DELETE referencing an unknown guild ID: %s. Discarding.', data['guild_id'])
def parse_guild_role_update(self, data):
guild = self._get_guild(int(data['guild_id']))
if guild is not None:
role_data = data['role']
role_id = int(role_data['id'])
role = guild.get_role(role_id)
if role is not None:
old_role = copy.copy(role)
role._update(role_data)
self.dispatch('guild_role_update', old_role, role)
else:
log.warning('GUILD_ROLE_UPDATE referencing an unknown guild ID: %s. Discarding.', data['guild_id'])
def parse_guild_members_chunk(self, data):
guild_id = int(data['guild_id'])
guild = self._get_guild(guild_id)
members = [Member(guild=guild, data=member, state=self) for member in data.get('members', [])]
log.info('Processed a chunk for %s members in guild ID %s.', len(members), guild_id)
if self._cache_members:
for member in members:
existing = guild.get_member(member.id)
if existing is None or existing.joined_at is None:
guild._add_member(member)
self.process_listeners(ListenerType.chunk, guild, len(members))
names = [x.name.lower() for x in members]
self.process_listeners(ListenerType.query_members, (guild_id, names), members)
def parse_guild_integrations_update(self, data):
guild = self._get_guild(int(data['guild_id']))
if guild is not None:
self.dispatch('guild_integrations_update', guild)
else:
log.warning('GUILD_INTEGRATIONS_UPDATE referencing an unknown guild ID: %s. Discarding.', data['guild_id'])
def parse_webhooks_update(self, data):
channel = self.get_channel(int(data['channel_id']))
if channel is not None:
self.dispatch('webhooks_update', channel)
else:
log.warning('WEBHOOKS_UPDATE referencing an unknown channel ID: %s. Discarding.', data['channel_id'])
def parse_voice_state_update(self, data):
guild = self._get_guild(utils._get_as_snowflake(data, 'guild_id'))
channel_id = utils._get_as_snowflake(data, 'channel_id')
if guild is not None:
if int(data['user_id']) == self.user.id:
voice = self._get_voice_client(guild.id)
if voice is not None:
ch = guild.get_channel(channel_id)
if ch is not None:
voice.channel = ch
member, before, after = guild._update_voice_state(data, channel_id)
if member is not None:
self.dispatch('voice_state_update', member, before, after)
else:
log.warning('VOICE_STATE_UPDATE referencing an unknown member ID: %s. Discarding.', data['user_id'])
else:
# in here we're either at private or group calls
call = self._calls.get(channel_id)
if call is not None:
call._update_voice_state(data)
def parse_voice_server_update(self, data):
try:
key_id = int(data['guild_id'])
except KeyError:
key_id = int(data['channel_id'])
vc = self._get_voice_client(key_id)
if vc is not None:
asyncio.ensure_future(vc._create_socket(key_id, data))
def parse_typing_start(self, data):
channel, guild = self._get_guild_channel(data)
if channel is not None:
member = None
user_id = utils._get_as_snowflake(data, 'user_id')
if isinstance(channel, DMChannel):
member = channel.recipient
elif isinstance(channel, TextChannel) and guild is not None:
member = guild.get_member(user_id)
elif isinstance(channel, GroupChannel):
member = utils.find(lambda x: x.id == user_id, channel.recipients)
if member is not None:
timestamp = datetime.datetime.utcfromtimestamp(data.get('timestamp'))
self.dispatch('typing', channel, member, timestamp)
def parse_relationship_add(self, data):
key = int(data['id'])
old = self.user.get_relationship(key)
new = Relationship(state=self, data=data)
self.user._relationships[key] = new
if old is not None:
self.dispatch('relationship_update', old, new)
else:
self.dispatch('relationship_add', new)
def parse_relationship_remove(self, data):
key = int(data['id'])
try:
old = self.user._relationships.pop(key)
except KeyError:
pass
else:
self.dispatch('relationship_remove', old)
def _get_reaction_user(self, channel, user_id):
if isinstance(channel, TextChannel):
return channel.guild.get_member(user_id)
return self.get_user(user_id)
def get_reaction_emoji(self, data):
emoji_id = utils._get_as_snowflake(data, 'id')
if not emoji_id:
return data['name']
try:
return self._emojis[emoji_id]
except KeyError:
return PartialEmoji(animated=data.get('animated', False), id=emoji_id, name=data['name'])
def _upgrade_partial_emoji(self, emoji):
emoji_id = emoji.id
if not emoji_id:
return emoji.name
try:
return self._emojis[emoji_id]
except KeyError:
return emoji
def get_channel(self, id):
if id is None:
return None
pm = self._get_private_channel(id)
if pm is not None:
return pm
for guild in self.guilds:
channel = guild.get_channel(id)
if channel is not None:
return channel
def create_message(self, *, channel, data):
return Message(state=self, channel=channel, data=data)
def receive_chunk(self, guild_id):
future = self.loop.create_future()
listener = Listener(ListenerType.chunk, future, lambda s: s.id == guild_id)
self._listeners.append(listener)
return future
def receive_member_query(self, guild_id, query):
def predicate(args, *, guild_id=guild_id, query=query.lower()):
request_guild_id, names = args
return request_guild_id == guild_id and all(n.startswith(query) for n in names)
future = self.loop.create_future()
listener = Listener(ListenerType.query_members, future, predicate)
self._listeners.append(listener)
return future
class AutoShardedConnectionState(ConnectionState):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._ready_task = None
async def request_offline_members(self, guilds, *, shard_id):
# get all the chunks
chunks = []
for guild in guilds:
chunks.extend(self.chunks_needed(guild))
# we only want to request ~75 guilds per chunk request.
splits = [guilds[i:i + 75] for i in range(0, len(guilds), 75)]
for split in splits:
await self.chunker(split, shard_id=shard_id)
# wait for the chunks
if chunks:
try:
await utils.sane_wait_for(chunks, timeout=len(chunks) * 30.0)
except asyncio.TimeoutError:
log.info('Somehow timed out waiting for chunks.')
async def _delay_ready(self):
launch = self._ready_state.launch
while not launch.is_set():
# this snippet of code is basically waiting 2 seconds
# until the last GUILD_CREATE was sent
launch.set()
await asyncio.sleep(2.0 * self.shard_count)
if self._fetch_offline:
guilds = sorted(self._ready_state.guilds, key=lambda g: g[0].shard_id)
for shard_id, sub_guilds_info in itertools.groupby(guilds, key=lambda g: g[0].shard_id):
sub_guilds, sub_available = zip(*sub_guilds_info)
await self.request_offline_members(sub_guilds, shard_id=shard_id)
for guild, unavailable in zip(sub_guilds, sub_available):
if unavailable is False:
self.dispatch('guild_available', guild)
else:
self.dispatch('guild_join', guild)
self.dispatch('shard_ready', shard_id)
else:
for guild, unavailable in self._ready_state.guilds:
if unavailable is False:
self.dispatch('guild_available', guild)
else:
self.dispatch('guild_join', guild)
# remove the state
try:
del self._ready_state
except AttributeError:
pass # already been deleted somehow
# regular users cannot shard so we won't worry about it here.
# clear the current task
self._ready_task = None
# dispatch the event
self.call_handlers('ready')
self.dispatch('ready')
def parse_ready(self, data):
if not hasattr(self, '_ready_state'):
self._ready_state = ReadyState(launch=asyncio.Event(), guilds=[])
self.user = user = ClientUser(state=self, data=data['user'])
self._users[user.id] = user
guilds = self._ready_state.guilds
for guild_data in data['guilds']:
guild = self._add_guild_from_data(guild_data)
if guild.large:
guilds.append((guild, guild.unavailable))
for pm in data.get('private_channels', []):
factory, _ = _channel_factory(pm['type'])
self._add_private_channel(factory(me=user, data=pm, state=self))
self.dispatch('connect')
if self._ready_task is None:
self._ready_task = asyncio.ensure_future(self._delay_ready(), loop=self.loop)
|
|
'Utility functions for the main scripts.'
#
# System includes
#
from baxter_interface import CHECK_VERSION
import Queue
import baxter_dataflow
import baxter_interface
import math
import re
import rospy
import select
import time
#
# File-local variables
#
_button_presses = Queue.Queue(1) # thread-safe place for button-press events
_L_BUTTON = 'left' # Element to add to _button_presses for left button
_R_BUTTON = 'right' # Element to add to _button_presses for right button
_ROS_SHUTDOWN = 'shutdown' # Element to add to _button_presses for quit
_left_arm = None
_left_joints = None
_right_arm = None
_right_joints = None
def connect_to_baxter(nodename):
'''
Connects to baxter, and initializes this process as a node.
This function should be called before other functions that control Baxter.
Try to think of a unique nodename to give this connection.
'''
global _left_arm
global _right_arm
rospy.init_node(nodename)
baxter_enabler = baxter_interface.RobotEnable(CHECK_VERSION)
baxter_enabler.enable()
rospy.on_shutdown(_cleanup)
left_button = baxter_interface.DigitalIO('left_itb_button0')
left_button.state_changed.connect(_store_left_button_press)
right_button = baxter_interface.DigitalIO('right_itb_button0')
right_button.state_changed.connect(_store_right_button_press)
_left_arm = baxter_interface.Limb('left')
_right_arm = baxter_interface.Limb('right')
def is_baxter_running():
return not rospy.is_shutdown()
def set_left_arm_speed(ratio):
'''
ratio is a number between 0 and 1. 0 means stop, 1 means maximum speed,
0.5 means half speed.
'''
assert _left_arm is not None, 'You need to call connect_to_baxter() first'
_left_arm.set_joint_position_speed(ratio)
def set_right_arm_speed(ratio):
'''
ratio is a number between 0 and 1. 0 means stop, 1 means maximum speed,
0.5 means half speed
'''
assert _right_arm is not None, 'You need to call connect_to_baxter() first'
_right_arm.set_joint_position_speed(ratio)
def left_arm_joint_angles():
'''
Returns a list of the joint angles in the same order as
left_arm_joint_names() in degrees
'''
assert _left_arm is not None, 'You need to call connect_to_baxter() first'
return _arm_joint_angles(_left_arm)
def right_arm_joint_angles():
'''
Returns a list of the joint angles in the same order as
right_arm_joint_names()
'''
assert _right_arm is not None, 'You need to call connect_to_baxter() first'
return _arm_joint_angles(_right_arm)
def left_arm_joint_names():
'''
Returns a list of the joint names starting from the shoulder to the wrist
of the left Baxter arm
'''
assert _left_arm is not None, 'You need to call connect_to_baxter() first'
return _left_arm.joint_names()
def right_arm_joint_names():
'''
Returns a list of the joint names starting from the shoulder to the wrist
of the right Baxter arm
'''
assert _right_arm is not None, 'You need to call connect_to_baxter() first'
return _right_arm.joint_names()
def move_left_arm_joint(joint, angle):
'''
Move one joint from Baxter's left arm.
@param joint: the name of the joint to move
@param angle: The angle to move it to in degrees
'''
assert _left_arm is not None, 'You need to call connect_to_baxter() first'
_move_to_positions(_left_arm, {joint: angle})
def move_right_arm_joint(joint, angle):
'''
Move one joint from Baxter's left arm.
@param joint: the name of the joint to move
@param angle: The angle to move it to
'''
assert _right_arm is not None, 'You need to call connect_to_baxter() first'
_move_to_positions(_right_arm, {joint: angle})
def move_left_arm_to_positions(positions):
'''
Move Baxter's left arm to the given positions.
@param positions: A list of joint angles for the joints starting at the
shoulder and going to the wrist (same order of joint
names)
'''
assert _left_arm is not None, 'You need to call connect_to_baxter() first'
positions_dictionary = dict(zip(_left_arm.joint_names(), positions))
_move_to_positions(_left_arm, positions_dictionary)
def move_right_arm_to_positions(positions):
'''
Move Baxter's right arm to the given positions.
@param positions: A list of joint angles for the joints starting at the
shoulder and going to the wrist (same order of joint
names)
'''
assert _right_arm is not None, 'You need to call connect_to_baxter() first'
positions_dictionary = dict(zip(_right_arm.joint_names(), positions))
_move_to_positions(_right_arm, positions_dictionary)
def try_get_line(in_stream):
'Returns a line if there is one, else an empty string'
line = ''
file_number = in_stream.fileno()
timeout = 0.01 # seconds
ready_to_read, _, _ = select.select([file_number], [], [], timeout)
if file_number in ready_to_read:
line = in_stream.readline()
return line[:-1] # Remove the newline
def wait_for_button_press():
'''
Waits for a button press signal or a ROS shutdown signal. It will return
'left' for the left button being pressed and 'right' for the right arm
being pressed. If there was a ROS shutdown signal, then this method will
raise a rospy.ROSException.
Note, this is a blocking call until one of the three things happen, so be
sure you called connect_to_baxter().
'''
# Wait for one to be pushed
one_year = 365 * 24 * 60 * 60
which_button = _button_presses.get(timeout=one_year)
if which_button == _ROS_SHUTDOWN:
raise rospy.ROSException()
return which_button
def save_joint_angles(filename, names, angles, digits=1):
'''
Saves the angles dictionary to filename in python notation
Will save it into a list of lists called path.
For example
save_joint_angles('out.py', ['a', 'b'], [1, 2])
will write to 'out.py' something like:
path = []
joint_names = [
'a',
'b',
]
positions = [
1,
2,
]
path.append(positions)
'''
# Check for first-time initialization
is_path_defined = False
is_joint_names_defined = False
try:
with open(filename, 'r') as infile:
for line in infile:
if re.match('path =', line):
is_path_defined = True
if re.match('joint_names =', line):
is_joint_names_defined = True
except IOError:
pass # Ignore the problem that the file doesn't yet exist
with open(filename, 'a') as outfile:
if not is_path_defined:
outfile.write('path = []\n')
if not is_joint_names_defined:
outfile.write('joint_names = [\n "')
outfile.write('",\n "'.join(names))
outfile.write('"\n ]\n')
outfile.write('\n')
outfile.write('positions = [\n ')
format_str = '{0:' + str(digits+5) + '.0' + str(digits) + 'f}'
outfile.write(',\n '.join([format_str.format(x) for x in angles]))
outfile.write('\n ]\n')
outfile.write('path.append(positions)\n')
def register_note_right_arm(note, above_position, down_position):
_register_note('right', note, above_position, down_position)
def register_note_left_arm(note, above_position, down_position):
_register_note('left', note, above_position, down_position)
def play_notes_right_arm(notes):
_play_notes('right', notes)
def play_notes_left_arm(notes):
_play_notes('left', notes)
## Private functions
def _rad_to_deg(angle):
'Convert radians to degrees'
return angle * 180 / math.pi
def _deg_to_rad(angle):
'Convert radians to degrees'
return angle * math.pi / 180
def _arm_joint_angles(arm):
'''
Returns a list of the joint angles in the same order as
arm.joint_names() in degrees
'''
positions_dict = arm.joint_angles()
joint_names = arm.joint_names()
angles = [_rad_to_deg(positions_dict[joint]) for joint in joint_names]
return angles
_left_note_above_positions = {}
_left_note_down_positions = {}
_right_note_above_positions = {}
_right_note_down_positions = {}
def _register_note(arm, note, above_position, down_position):
if arm == 'left':
above_dict = _left_note_above_positions
down_dict = _left_note_down_positions
else:
above_dict = _right_note_above_positions
down_dict = _right_note_down_positions
above_dict[note] = above_position
down_dict[note] = down_position
def _play_notes(arm, notes):
if arm == 'left':
above_dict = _left_note_above_positions
down_dict = _left_note_down_positions
mover = move_left_arm_to_positions
else:
above_dict = _right_note_above_positions
down_dict = _right_note_down_positions
mover = move_right_arm_to_positions
for note in notes:
if note == '-':
time.sleep(0.5)
continue
mover(above_dict[note])
time.sleep(0.1)
mover(down_dict[note])
time.sleep(0.1)
mover(above_dict[note])
def _store_left_button_press(state):
'Callback for left button press. Stores _L_BUTTON into _button_presses.'
if state:
try:
_button_presses.put_nowait(_L_BUTTON)
except Queue.Full:
pass # ignore that we haven't processed the last one yet
def _store_right_button_press(state):
'Callback for right button press. Stores _R_BUTTON into _button_presses.'
if state:
try:
_button_presses.put_nowait(_R_BUTTON)
except Queue.Full:
pass # ignore that we haven't processed the last one yet
def _cleanup():
'Callback for ROS shutdown. Stores _ROS_SHUTDOWN into _button_presses'
try:
_button_presses.get(block=False)
except Queue.Empty:
pass # ignore
_button_presses.put(_ROS_SHUTDOWN)
def _move_to_positions(limb, pos_dict):
'''
Moves the limb to the desired positions.
@param limb - The baxter_interface.Limb object to move
@param pos_dict - Dictionary of joint -> angle positions in degrees
Note, to change how fast it goes, you can call
>>> limb.set_joint_position_speed(ratio)
where ratio is a number between 0 and 1 with 1 being pretty fast and 0
being stopped.
'''
# Convert to radians first thing
for key in pos_dict:
pos_dict[key] = _deg_to_rad(pos_dict[key])
cmd = limb.joint_angles()
def filtered_cmd():
# First Order Filter - 0.2 Hz Cutoff
for joint, value in pos_dict.iteritems():
cmd[joint] = 0.07 * value + 0.93 * cmd[joint]
return cmd
diff = lambda joint, angle: abs(angle - limb.joint_angle(joint))
#threshold = baxter_interface.settings.JOINT_ANGLE_TOLERANCE
# We want a bigger threshold so that it doesn't wiggle at the end, but
# says "good enough" and goes to the next point
threshold = 0.02
timeout = 15.0 # seconds
# Otherwise, go there without the filter
#limb.set_joint_positions(filtered_cmd())
limb.set_joint_positions(pos_dict)
baxter_dataflow.wait_for(
lambda: (all(diff(j, a) < threshold for j, a in pos_dict.iteritems())),
timeout=timeout,
rate=100,
raise_on_error=False,
body=lambda: limb.set_joint_positions(pos_dict)
)
|
|
"""Tests for Momentum."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.python.platform
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
class MomentumOptimizerTest(tf.test.TestCase):
def testBasic(self):
with self.test_session():
var0 = tf.Variable([1.0, 2.0])
var1 = tf.Variable([3.0, 4.0])
grads0 = tf.constant([0.1, 0.1])
grads1 = tf.constant([0.01, 0.01])
mom_opt = tf.train.MomentumOptimizer(learning_rate=2.0, momentum=0.9)
mom_update = mom_opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
tf.initialize_all_variables().run()
# Check we have slots
self.assertEqual(["momentum"], mom_opt.get_slot_names())
slot0 = mom_opt.get_slot(var0, "momentum")
self.assertEquals(slot0.get_shape(), var0.get_shape())
self.assertFalse(slot0 in tf.trainable_variables())
slot1 = mom_opt.get_slot(var1, "momentum")
self.assertEquals(slot1.get_shape(), var1.get_shape())
self.assertFalse(slot1 in tf.trainable_variables())
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
# Step 1: the momentum accumulators where 0. So we should see a normal
# update: v -= grad * learning_rate
mom_update.run()
# Check that the momentum accumulators have been updated.
self.assertAllClose(np.array([0.1, 0.1]), slot0.eval())
self.assertAllClose(np.array([0.01, 0.01]), slot1.eval())
# Check that the parameters have been updated.
self.assertAllClose(np.array([1.0 - (0.1 * 2.0),
2.0 - (0.1 * 2.0)]),
var0.eval())
self.assertAllClose(np.array([3.0 - (0.01 * 2.0),
4.0 - (0.01 * 2.0)]),
var1.eval())
# Step 2: the momentum accumulators contain the previous update.
mom_update.run()
# Check that the momentum accumulators have been updated.
self.assertAllClose(np.array([(0.9 * 0.1 + 0.1), (0.9 * 0.1 + 0.1)]),
slot0.eval())
self.assertAllClose(np.array([(0.9 * 0.01 + 0.01), (0.9 * 0.01 + 0.01)]),
slot1.eval())
# Check that the parameters have been updated.
self.assertAllClose(
np.array([1.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0),
2.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0)]),
var0.eval())
self.assertAllClose(np.array([2.98 - ((0.9 * 0.01 + 0.01) * 2.0),
3.98 - ((0.9 * 0.01 + 0.01) * 2.0)]),
var1.eval())
def testFloat64(self):
with self.test_session():
opt = tf.train.MomentumOptimizer(learning_rate=2.0, momentum=0.9)
# compute_gradients.
values = [1.0, 3.0]
good_vars = [tf.Variable([v]) for v in values]
bad_loss = tf.constant(2.0, tf.float64, name="bad_loss")
self.assertRaisesRegexp(
ValueError, r"Invalid type.*float64.*bad_loss.*expected.*float32",
opt.compute_gradients, bad_loss, good_vars)
bad_vars = [
tf.Variable(np.array([v], np.float64), name="bad_var")
for v in values]
self.assertRaisesRegexp(
ValueError, r"Invalid type.*float64.*bad_var.*expected.*float32",
opt.compute_gradients, tf.cast(bad_vars[0] + bad_vars[1], tf.float32),
bad_vars)
opt.compute_gradients(good_vars[0] + good_vars[1], good_vars)
# apply_gradients.
bad_grads = [
tf.constant([0.1], dtype=np.float64, name="bad_grad"),
tf.constant([0.01])]
self.assertRaisesRegexp(
ValueError, r"Invalid type.*float64.*bad_grad.*expected.*float32",
opt.apply_gradients, zip(bad_grads, good_vars))
good_grads = [tf.constant([0.01]), tf.constant([0.02])]
self.assertRaisesRegexp(
ValueError, r"Invalid type.*float64.*bad_var.*expected.*float32",
opt.apply_gradients, zip(good_grads, bad_vars))
opt.apply_gradients(zip(good_grads, good_vars))
def _dbParamsMom01(self):
"""Return dist-belief momentum values.
Return values been generated from the dist-belief momentum unittest,
running with a learning rate of 0.1 and a momemntum of 0.1.
These values record how a parameter vector of size 10, initialized with 0.0,
gets updated with 10 consecutive momentum steps. It uses random gradients.
Returns:
db_grad: The gradients to apply
db_out: The parameters after the momentum update.
"""
db_grad = [[]] * 10
db_out = [[]] * 10
# pylint: disable=line-too-long
db_grad[0] = [0.00096264342, 0.17914793, 0.93945462, 0.41396621, 0.53037018, 0.93197989, 0.78648776, 0.50036013, 0.55345792, 0.96722615]
db_out[0] = [-9.6264346e-05, -0.017914793, -0.093945466, -0.041396622, -0.053037018, -0.093197994, -0.078648776, -0.050036013, -0.055345792, -0.096722618]
db_grad[1] = [0.17075552, 0.88821375, 0.20873757, 0.25236958, 0.57578111, 0.15312378, 0.5513742, 0.94687688, 0.16012503, 0.22159521]
db_out[1] = [-0.017181443, -0.10852765, -0.12421377, -0.070773244, -0.11591884, -0.11783017, -0.14165108, -0.14972731, -0.076892875, -0.1285544]
db_grad[2] = [0.35077485, 0.47304362, 0.44412705, 0.44368884, 0.078527533, 0.81223965, 0.31168157, 0.43203235, 0.16792089, 0.24644311]
db_out[2] = [-0.053967446, -0.1648933, -0.1716533, -0.1180798, -0.13005978, -0.20151734, -0.17911947, -0.20289968, -0.095839672, -0.15638189]
db_grad[3] = [0.9694621, 0.75035888, 0.28171822, 0.83813518, 0.53807181, 0.3728098, 0.81454384, 0.03848977, 0.89759839, 0.93665648]
db_out[3] = [-0.15459226, -0.24556576, -0.20456907, -0.20662397, -0.18528105, -0.24716705, -0.2643207, -0.21206589, -0.18749419, -0.2528303]
db_grad[4] = [0.38578293, 0.8536852, 0.88722926, 0.66276771, 0.13678469, 0.94036359, 0.69107032, 0.81897682, 0.5433259, 0.67860287]
db_out[4] = [-0.20323303, -0.33900154, -0.29658359, -0.28175515, -0.20448165, -0.34576839, -0.34194785, -0.29488021, -0.25099224, -0.33033544]
db_grad[5] = [0.27885768, 0.76100707, 0.24625534, 0.81354135, 0.18959245, 0.48038563, 0.84163809, 0.41172323, 0.83259648, 0.44941229]
db_out[5] = [-0.23598288, -0.42444581, -0.33041057, -0.3706224, -0.22536094, -0.40366709, -0.43387437, -0.34433398, -0.34060168, -0.38302717]
db_grad[6] = [0.27233034, 0.056316052, 0.5039115, 0.24105175, 0.35697976, 0.75913221, 0.73577434, 0.16014607, 0.57500273, 0.071136251]
db_out[6] = [-0.26649091, -0.43862185, -0.38418442, -0.40361428, -0.26314685, -0.48537019, -0.51664448, -0.36529395, -0.40706289, -0.39540997]
db_grad[7] = [0.58697265, 0.2494842, 0.08106143, 0.39954534, 0.15892942, 0.12683646, 0.74053431, 0.16033, 0.66625422, 0.73515922]
db_out[7] = [-0.32823896, -0.46498787, -0.39766794, -0.446868, -0.28281838, -0.50622416, -0.59897494, -0.38342294, -0.48033443, -0.47016418]
db_grad[8] = [0.8215279, 0.41994119, 0.95172721, 0.68000203, 0.79439718, 0.43384039, 0.55561525, 0.22567581, 0.93331909, 0.29438227]
db_out[8] = [-0.41656655, -0.50961858, -0.49418902, -0.51919359, -0.36422527, -0.55169362, -0.6627695, -0.40780342, -0.58099347, -0.50707781]
db_grad[9] = [0.68297005, 0.67758518, 0.1748755, 0.13266537, 0.70697063, 0.055731893, 0.68593478, 0.50580865, 0.12602448, 0.093537711]
db_out[9] = [-0.49369633, -0.58184016, -0.52132869, -0.5396927, -0.44306302, -0.56181377, -0.73774242, -0.46082234, -0.60366184, -0.52012295]
# pylint: enable=line-too-long
return db_grad, db_out
def testLikeDistBeliefMom01(self):
with self.test_session():
db_grad, db_out = self._dbParamsMom01()
num_samples = len(db_grad)
var0 = tf.Variable([0.0] * num_samples)
grads0 = tf.constant([0.0] * num_samples)
mom_opt = tf.train.MomentumOptimizer(learning_rate=0.1, momentum=0.1)
mom_update = mom_opt.apply_gradients(zip([grads0], [var0]))
tf.initialize_all_variables().run()
for i in xrange(num_samples):
mom_update.run(feed_dict={grads0: db_grad[i]})
self.assertAllClose(np.array(db_out[i]), var0.eval())
def testSparse(self):
with self.test_session():
var0 = tf.Variable(tf.zeros([4, 2]))
var1 = tf.Variable(
tf.constant(1.0, tf.float32, [4, 2]))
grads0 = tf.IndexedSlices(tf.constant([[.1, .1]]),
tf.constant([1]),
tf.constant([4, 2]))
grads1 = tf.IndexedSlices(tf.constant([[.01, .01], [.01, .01]]),
tf.constant([2, 3]),
tf.constant([4, 2]))
mom_opt = tf.train.MomentumOptimizer(learning_rate=2.0, momentum=0.9)
mom_update = mom_opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
tf.initialize_all_variables().run()
# Check we have slots
self.assertEqual(["momentum"], mom_opt.get_slot_names())
slot0 = mom_opt.get_slot(var0, "momentum")
self.assertEquals(slot0.get_shape(), var0.get_shape())
slot1 = mom_opt.get_slot(var1, "momentum")
self.assertEquals(slot1.get_shape(), var1.get_shape())
# Fetch params to validate initial values
self.assertAllClose([0, 0], var0.eval()[0])
self.assertAllClose([0, 0], var0.eval()[1])
self.assertAllClose([1, 1], var1.eval()[2])
# Step 1: the momentum accumulators are 0. So we should see a normal
# update: v -= grad * learning_rate
mom_update.run()
# Check that the momentum accumulators have been updated.
self.assertAllClose(np.array([0, 0]), slot0.eval()[0])
self.assertAllClose(np.array([.1, .1]), slot0.eval()[1])
self.assertAllClose(np.array([.01, .01]), slot1.eval()[2])
# Check that the parameters have been updated.
self.assertAllClose(np.array([0, 0]), var0.eval()[0])
self.assertAllClose(np.array([- (0.1 * 2.0),
- (0.1 * 2.0)]),
var0.eval()[1])
self.assertAllClose(np.array([1.0 - (0.01 * 2.0),
1.0 - (0.01 * 2.0)]),
var1.eval()[2])
# Step 2: the momentum accumulators contain the previous update.
mom_update.run()
# Check that the momentum accumulators have been updated.
self.assertAllClose(np.array([0, 0]), slot0.eval()[0])
self.assertAllClose(np.array([(0.9 * 0.1 + 0.1),
(0.9 * 0.1 + 0.1)]),
slot0.eval()[1])
self.assertAllClose(np.array([(0.9 * 0.01 + 0.01),
(0.9 * 0.01 + 0.01)]),
slot1.eval()[2])
# Check that the parameters have been updated.
self.assertAllClose(np.array([0, 0]), var0.eval()[0])
self.assertAllClose(
np.array([- (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0),
- (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0)]),
var0.eval()[1])
self.assertAllClose(np.array([0.98 - ((0.9 * 0.01 + 0.01) * 2.0),
0.98 - ((0.9 * 0.01 + 0.01) * 2.0)]),
var1.eval()[2])
def testSharing(self):
with self.test_session():
var0 = tf.Variable([1.0, 2.0])
var1 = tf.Variable([3.0, 4.0])
grads0 = tf.constant([0.1, 0.1])
grads1 = tf.constant([0.01, 0.01])
mom_opt = tf.train.MomentumOptimizer(learning_rate=2.0, momentum=0.9)
mom_update1 = mom_opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
mom_update2 = mom_opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
tf.initialize_all_variables().run()
self.assertEqual(["momentum"], mom_opt.get_slot_names())
slot0 = mom_opt.get_slot(var0, "momentum")
self.assertEquals(slot0.get_shape(), var0.get_shape())
slot1 = mom_opt.get_slot(var1, "momentum")
self.assertEquals(slot1.get_shape(), var1.get_shape())
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
# Step 1: the momentum accumulators where 0. So we should see a normal
# update: v -= grad * learning_rate
mom_update1.run()
# Check that the momentum accumulators have been updated.
self.assertAllClose(np.array([0.1, 0.1]), slot0.eval())
self.assertAllClose(np.array([0.01, 0.01]), slot1.eval())
# Check that the parameters have been updated.
self.assertAllClose(np.array([1.0 - (0.1 * 2.0),
2.0 - (0.1 * 2.0)]),
var0.eval())
self.assertAllClose(np.array([3.0 - (0.01 * 2.0),
4.0 - (0.01 * 2.0)]),
var1.eval())
# Step 2: the second momentum accumulators contain the previous update.
mom_update2.run()
# Check that the momentum accumulators have been updated.
self.assertAllClose(np.array([(0.9 * 0.1 + 0.1), (0.9 * 0.1 + 0.1)]),
slot0.eval())
self.assertAllClose(np.array([(0.9 * 0.01 + 0.01), (0.9 * 0.01 + 0.01)]),
slot1.eval())
# Check that the parameters have been updated.
self.assertAllClose(
np.array([1.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0),
2.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0)]),
var0.eval())
self.assertAllClose(np.array([2.98 - ((0.9 * 0.01 + 0.01) * 2.0),
3.98 - ((0.9 * 0.01 + 0.01) * 2.0)]),
var1.eval())
if __name__ == "__main__":
tf.test.main()
|
|
import json
import os
from ctypes import addressof, byref, c_double
from django.contrib.gis.gdal.base import GDALBase
from django.contrib.gis.gdal.driver import Driver
from django.contrib.gis.gdal.error import GDALException
from django.contrib.gis.gdal.prototypes import raster as capi
from django.contrib.gis.gdal.raster.band import GDALBand
from django.contrib.gis.gdal.srs import SpatialReference, SRSException
from django.contrib.gis.geometry.regex import json_regex
from django.utils import six
from django.utils.encoding import (
force_bytes, force_text, python_2_unicode_compatible,
)
from django.utils.functional import cached_property
from django.utils.six.moves import range
class TransformPoint(list):
indices = {
'origin': (0, 3),
'scale': (1, 5),
'skew': (2, 4),
}
def __init__(self, raster, prop):
x = raster.geotransform[self.indices[prop][0]]
y = raster.geotransform[self.indices[prop][1]]
list.__init__(self, [x, y])
self._raster = raster
self._prop = prop
@property
def x(self):
return self[0]
@x.setter
def x(self, value):
gtf = self._raster.geotransform
gtf[self.indices[self._prop][0]] = value
self._raster.geotransform = gtf
@property
def y(self):
return self[1]
@y.setter
def y(self, value):
gtf = self._raster.geotransform
gtf[self.indices[self._prop][1]] = value
self._raster.geotransform = gtf
@python_2_unicode_compatible
class GDALRaster(GDALBase):
"""
Wraps a raster GDAL Data Source object.
"""
def __init__(self, ds_input, write=False):
self._write = 1 if write else 0
Driver.ensure_registered()
# Preprocess json inputs. This converts json strings to dictionaries,
# which are parsed below the same way as direct dictionary inputs.
if isinstance(ds_input, six.string_types) and json_regex.match(ds_input):
ds_input = json.loads(ds_input)
# If input is a valid file path, try setting file as source.
if isinstance(ds_input, six.string_types):
if not os.path.exists(ds_input):
raise GDALException('Unable to read raster source input "{}"'.format(ds_input))
try:
# GDALOpen will auto-detect the data source type.
self._ptr = capi.open_ds(force_bytes(ds_input), self._write)
except GDALException as err:
raise GDALException('Could not open the datasource at "{}" ({}).'.format(ds_input, err))
elif isinstance(ds_input, dict):
# A new raster needs to be created in write mode
self._write = 1
# Create driver (in memory by default)
driver = Driver(ds_input.get('driver', 'MEM'))
# For out of memory drivers, check filename argument
if driver.name != 'MEM' and 'name' not in ds_input:
raise GDALException('Specify name for creation of raster with driver "{}".'.format(driver.name))
# Check if width and height where specified
if 'width' not in ds_input or 'height' not in ds_input:
raise GDALException('Specify width and height attributes for JSON or dict input.')
# Check if srid was specified
if 'srid' not in ds_input:
raise GDALException('Specify srid for JSON or dict input.')
# Create GDAL Raster
self._ptr = capi.create_ds(
driver._ptr,
force_bytes(ds_input.get('name', '')),
ds_input['width'],
ds_input['height'],
ds_input.get('nr_of_bands', len(ds_input.get('bands', []))),
ds_input.get('datatype', 6),
None
)
# Set band data if provided
for i, band_input in enumerate(ds_input.get('bands', [])):
self.bands[i].data(band_input['data'])
if 'nodata_value' in band_input:
self.bands[i].nodata_value = band_input['nodata_value']
# Set SRID, default to 0 (this assures SRS is always instanciated)
self.srs = ds_input.get('srid')
# Set additional properties if provided
if 'origin' in ds_input:
self.origin.x, self.origin.y = ds_input['origin']
if 'scale' in ds_input:
self.scale.x, self.scale.y = ds_input['scale']
if 'skew' in ds_input:
self.skew.x, self.skew.y = ds_input['skew']
else:
raise GDALException('Invalid data source input type: "{}".'.format(type(ds_input)))
def __del__(self):
if self._ptr and capi:
capi.close_ds(self._ptr)
def __str__(self):
return self.name
def __repr__(self):
"""
Short-hand representation because WKB may be very large.
"""
return '<Raster object at %s>' % hex(addressof(self._ptr))
def _flush(self):
"""
Flush all data from memory into the source file if it exists.
The data that needs flushing are geotransforms, coordinate systems,
nodata_values and pixel values. This function will be called
automatically wherever it is needed.
"""
# Raise an Exception if the value is being changed in read mode.
if not self._write:
raise GDALException('Raster needs to be opened in write mode to change values.')
capi.flush_ds(self._ptr)
@property
def name(self):
"""
Returns the name of this raster. Corresponds to filename
for file-based rasters.
"""
return force_text(capi.get_ds_description(self._ptr))
@cached_property
def driver(self):
"""
Returns the GDAL Driver used for this raster.
"""
ds_driver = capi.get_ds_driver(self._ptr)
return Driver(ds_driver)
@property
def width(self):
"""
Width (X axis) in pixels.
"""
return capi.get_ds_xsize(self._ptr)
@property
def height(self):
"""
Height (Y axis) in pixels.
"""
return capi.get_ds_ysize(self._ptr)
@property
def srs(self):
"""
Returns the SpatialReference used in this GDALRaster.
"""
try:
wkt = capi.get_ds_projection_ref(self._ptr)
if not wkt:
return None
return SpatialReference(wkt, srs_type='wkt')
except SRSException:
return None
@srs.setter
def srs(self, value):
"""
Sets the spatial reference used in this GDALRaster. The input can be
a SpatialReference or any parameter accepted by the SpatialReference
constructor.
"""
if isinstance(value, SpatialReference):
srs = value
elif isinstance(value, six.integer_types + six.string_types):
srs = SpatialReference(value)
else:
raise ValueError('Could not create a SpatialReference from input.')
capi.set_ds_projection_ref(self._ptr, srs.wkt.encode())
self._flush()
@property
def geotransform(self):
"""
Returns the geotransform of the data source.
Returns the default geotransform if it does not exist or has not been
set previously. The default is [0.0, 1.0, 0.0, 0.0, 0.0, -1.0].
"""
# Create empty ctypes double array for data
gtf = (c_double * 6)()
capi.get_ds_geotransform(self._ptr, byref(gtf))
return list(gtf)
@geotransform.setter
def geotransform(self, values):
"Sets the geotransform for the data source."
if sum([isinstance(x, (int, float)) for x in values]) != 6:
raise ValueError('Geotransform must consist of 6 numeric values.')
# Create ctypes double array with input and write data
values = (c_double * 6)(*values)
capi.set_ds_geotransform(self._ptr, byref(values))
self._flush()
@property
def origin(self):
"""
Coordinates of the raster origin.
"""
return TransformPoint(self, 'origin')
@property
def scale(self):
"""
Pixel scale in units of the raster projection.
"""
return TransformPoint(self, 'scale')
@property
def skew(self):
"""
Skew of pixels (rotation parameters).
"""
return TransformPoint(self, 'skew')
@property
def extent(self):
"""
Returns the extent as a 4-tuple (xmin, ymin, xmax, ymax).
"""
# Calculate boundary values based on scale and size
xval = self.origin.x + self.scale.x * self.width
yval = self.origin.y + self.scale.y * self.height
# Calculate min and max values
xmin = min(xval, self.origin.x)
xmax = max(xval, self.origin.x)
ymin = min(yval, self.origin.y)
ymax = max(yval, self.origin.y)
return xmin, ymin, xmax, ymax
@cached_property
def bands(self):
"""
Returns the bands of this raster as a list of GDALBand instances.
"""
bands = []
for idx in range(1, capi.get_ds_raster_count(self._ptr) + 1):
bands.append(GDALBand(self, idx))
return bands
|
|
import json
import logging
import re
import requests
import six
import socket
import time
import websocket
from .exceptions import SocketIOError, ConnectionError, TimeoutError
TRANSPORTS = 'websocket', 'xhr-polling', 'jsonp-polling'
BOUNDARY = six.u('\ufffd')
TIMEOUT_IN_SECONDS = 3
_log = logging.getLogger(__name__)
class _AbstractTransport(object):
def __init__(self):
self._packet_id = 0
self._callback_by_packet_id = {}
self._wants_to_disconnect = False
self._packets = []
def disconnect(self, path=''):
if not path:
self._wants_to_disconnect = True
if not self.connected:
return
if path:
self.send_packet(0, path)
else:
self.close()
def connect(self, path):
self.send_packet(1, path)
def send_heartbeat(self):
self.send_packet(2)
def message(self, path, data, callback):
if isinstance(data, basestring):
code = 3
else:
code = 4
data = json.dumps(data, ensure_ascii=False)
self.send_packet(code, path, data, callback)
def emit(self, path, event, args, callback):
data = json.dumps(dict(name=event, args=args), ensure_ascii=False)
self.send_packet(5, path, data, callback)
def ack(self, path, packet_id, *args):
packet_id = packet_id.rstrip('+')
data = '%s+%s' % (
packet_id,
json.dumps(args, ensure_ascii=False),
) if args else packet_id
self.send_packet(6, path, data)
def noop(self, path=''):
self.send_packet(8, path)
def send_packet(self, code, path='', data='', callback=None):
packet_id = self.set_ack_callback(callback) if callback else ''
packet_parts = str(code), packet_id, path, data
packet_text = ':'.join(packet_parts)
self.send(packet_text)
_log.debug('[packet sent] %s', packet_text)
def recv_packet(self):
try:
while self._packets:
yield self._packets.pop(0)
except IndexError:
pass
for packet_text in self.recv():
_log.debug('[packet received] %s', packet_text)
try:
packet_parts = packet_text.split(':', 3)
except AttributeError:
_log.warn('[packet error] %s', packet_text)
continue
code, packet_id, path, data = None, None, None, None
packet_count = len(packet_parts)
if 4 == packet_count:
code, packet_id, path, data = packet_parts
elif 3 == packet_count:
code, packet_id, path = packet_parts
elif 1 == packet_count:
code = packet_parts[0]
yield code, packet_id, path, data
def _enqueue_packet(self, packet):
self._packets.append(packet)
def set_ack_callback(self, callback):
'Set callback to be called after server sends an acknowledgment'
self._packet_id += 1
self._callback_by_packet_id[str(self._packet_id)] = callback
return '%s+' % self._packet_id
def get_ack_callback(self, packet_id):
'Get callback to be called after server sends an acknowledgment'
callback = self._callback_by_packet_id[packet_id]
del self._callback_by_packet_id[packet_id]
return callback
@property
def has_ack_callback(self):
return True if self._callback_by_packet_id else False
class _WebsocketTransport(_AbstractTransport):
def __init__(self, socketIO_session, is_secure, base_url, **kw):
super(_WebsocketTransport, self).__init__()
url = '%s://%s/websocket/%s' % (
'wss' if is_secure else 'ws',
base_url, socketIO_session.id)
try:
self._connection = websocket.create_connection(url)
except socket.timeout as e:
raise ConnectionError(e)
except socket.error as e:
raise ConnectionError(e)
self._connection.settimeout(TIMEOUT_IN_SECONDS)
@property
def connected(self):
return self._connection.connected
def send(self, packet_text):
try:
self._connection.send(packet_text)
except websocket.WebSocketTimeoutException as e:
message = 'timed out while sending %s (%s)' % (packet_text, e)
_log.warn(message)
raise TimeoutError(e)
except socket.error as e:
message = 'disconnected while sending %s (%s)' % (packet_text, e)
_log.warn(message)
raise ConnectionError(message)
def recv(self):
try:
yield self._connection.recv()
except websocket.WebSocketTimeoutException as e:
raise TimeoutError(e)
except websocket.SSLError as e:
if 'timed out' in e.message:
raise TimeoutError(e)
else:
raise ConnectionError(e)
except websocket.WebSocketConnectionClosedException as e:
raise ConnectionError('connection closed (%s)' % e)
except socket.error as e:
raise ConnectionError(e)
def close(self):
self._connection.close()
class _XHR_PollingTransport(_AbstractTransport):
def __init__(self, socketIO_session, is_secure, base_url, **kw):
super(_XHR_PollingTransport, self).__init__()
self._url = '%s://%s/xhr-polling/%s' % (
'https' if is_secure else 'http',
base_url, socketIO_session.id)
self._connected = True
self._http_session = _prepare_http_session(kw)
# Create connection
for packet in self.recv_packet():
self._enqueue_packet(packet)
@property
def connected(self):
return self._connected
@property
def _params(self):
return dict(t=int(time.time()))
def send(self, packet_text):
_get_response(
self._http_session.post,
self._url,
params=self._params,
data=packet_text,
timeout=TIMEOUT_IN_SECONDS)
def recv(self):
response = _get_response(
self._http_session.get,
self._url,
params=self._params,
timeout=TIMEOUT_IN_SECONDS)
response_text = response.text
if not response_text.startswith(BOUNDARY):
yield response_text
return
for packet_text in _yield_text_from_framed_data(response_text):
yield packet_text
def close(self):
_get_response(
self._http_session.get,
self._url,
params=dict(self._params.items() + [('disconnect', True)]))
self._connected = False
class _JSONP_PollingTransport(_AbstractTransport):
RESPONSE_PATTERN = re.compile(r'io.j\[(\d+)\]\("(.*)"\);')
def __init__(self, socketIO_session, is_secure, base_url, **kw):
super(_JSONP_PollingTransport, self).__init__()
self._url = '%s://%s/jsonp-polling/%s' % (
'https' if is_secure else 'http',
base_url, socketIO_session.id)
self._connected = True
self._http_session = _prepare_http_session(kw)
self._id = 0
# Create connection
for packet in self.recv_packet():
self._enqueue_packet(packet)
@property
def connected(self):
return self._connected
@property
def _params(self):
return dict(t=int(time.time()), i=self._id)
def send(self, packet_text):
_get_response(
self._http_session.post,
self._url,
params=self._params,
data='d=%s' % requests.utils.quote(json.dumps(packet_text)),
headers={'content-type': 'application/x-www-form-urlencoded'},
timeout=TIMEOUT_IN_SECONDS)
def recv(self):
'Decode the JavaScript response so that we can parse it as JSON'
response = _get_response(
self._http_session.get,
self._url,
params=self._params,
headers={'content-type': 'text/javascript; charset=UTF-8'},
timeout=TIMEOUT_IN_SECONDS)
response_text = response.text
try:
self._id, response_text = self.RESPONSE_PATTERN.match(
response_text).groups()
except AttributeError:
_log.warn('[packet error] %s', response_text)
return
if not response_text.startswith(BOUNDARY):
yield response_text.decode('unicode_escape')
return
for packet_text in _yield_text_from_framed_data(
response_text, parse=lambda x: x.decode('unicode_escape')):
yield packet_text
def close(self):
_get_response(
self._http_session.get,
self._url,
params=dict(self._params.items() + [('disconnect', True)]))
self._connected = False
def _negotiate_transport(
client_supported_transports, session,
is_secure, base_url, **kw):
server_supported_transports = session.server_supported_transports
for supported_transport in client_supported_transports:
if supported_transport in server_supported_transports:
_log.debug('[transport selected] %s', supported_transport)
return {
'websocket': _WebsocketTransport,
'xhr-polling': _XHR_PollingTransport,
'jsonp-polling': _JSONP_PollingTransport,
}[supported_transport](session, is_secure, base_url, **kw)
raise SocketIOError(' '.join([
'could not negotiate a transport:',
'client supports %s but' % ', '.join(client_supported_transports),
'server supports %s' % ', '.join(server_supported_transports),
]))
def _yield_text_from_framed_data(framed_data, parse=lambda x: x):
parts = [parse(x) for x in framed_data.split(BOUNDARY)]
for text_length, text in zip(parts[1::2], parts[2::2]):
if text_length != str(len(text)):
warning = 'invalid declared length=%s for packet_text=%s' % (
text_length, text)
_log.warn('[packet error] %s', warning)
continue
yield text
def _get_response(request, *args, **kw):
try:
response = request(*args, **kw)
except requests.exceptions.Timeout as e:
raise TimeoutError(e)
except requests.exceptions.ConnectionError as e:
raise ConnectionError(e)
except requests.exceptions.SSLError as e:
raise ConnectionError('could not negotiate SSL (%s)' % e)
status = response.status_code
if 200 != status:
raise ConnectionError('unexpected status code (%s)' % status)
return response
def _prepare_http_session(kw):
http_session = requests.Session()
http_session.headers.update(kw.get('headers', {}))
http_session.auth = kw.get('auth')
http_session.proxies.update(kw.get('proxies', {}))
http_session.hooks.update(kw.get('hooks', {}))
http_session.params.update(kw.get('params', {}))
http_session.verify = kw.get('verify')
http_session.cert = kw.get('cert')
http_session.cookies.update(kw.get('cookies', {}))
return http_session
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Classes and functions related to train_and_evaluate."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import json
import os
import time
import six
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.estimator import estimator as estimator_lib
from tensorflow.python.estimator import exporter as exporter_lib
from tensorflow.python.estimator import run_config as run_config_lib
from tensorflow.python.framework import ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import server_lib
from tensorflow.python.training import session_run_hook
from tensorflow.python.util import compat
from tensorflow.python.util.tf_export import tf_export
_MAX_DELAY_SECS = 60
_DELAY_SECS_PER_WORKER = 5
_TF_CONFIG_ENV = 'TF_CONFIG'
_ENVIRONMENT_KEY = 'environment'
_ENVIRONMENT_GOOGLE_VALUE = 'google'
_TRAINER_JOBS = (run_config_lib.TaskType.CHIEF, run_config_lib.TaskType.MASTER,
run_config_lib.TaskType.WORKER)
def _validate_input_fn(input_fn):
"""Validates the `input_fn`."""
if not callable(input_fn):
raise TypeError('`input_fn` must be callable, given: {}'.format(input_fn))
def _validate_hooks(hooks):
"""Validates the `hooks`."""
hooks = tuple(hooks or [])
for hook in hooks:
if not isinstance(hook, session_run_hook.SessionRunHook):
raise TypeError(
'All hooks must be `SessionRunHook` instances, given: {}'.format(
hook))
return hooks
def _validate_exporters(exporters):
"""Validates `exporters` and returns them as a tuple."""
if not exporters:
return ()
if isinstance(exporters, exporter_lib.Exporter):
exporters = [exporters]
unique_names = [] # `Exporter`s should have unique names.
try:
for exporter in exporters:
if not isinstance(exporter, exporter_lib.Exporter):
# Error message will be printed out by the outer try/except.
raise TypeError
if not exporter.name:
full_list_of_names = [e.name for e in exporters]
raise ValueError('An Exporter cannot have a name that is `None` or'
' empty. All exporter names:'
' {}'.format(full_list_of_names))
if not isinstance(exporter.name, six.string_types):
raise ValueError('An Exporter must have a string name. Given: '
'{}'.format(type(exporter.name)))
if exporter.name in unique_names:
full_list_of_names = [e.name for e in exporters]
raise ValueError(
'`exporters` must have unique names. Such a name cannot be `None`.'
' All exporter names: {}'.format(full_list_of_names))
unique_names.append(exporter.name)
except TypeError:
# Two possibilities:
# - `exporters` is neither `Exporter` nor iterable. Python has
# raised a `TypeError` when iterating over `exporters`.
# - an `exporter` was None or not of type `Exporter`, so we raised a
# `TypeError`.
raise TypeError('`exporters` must be an Exporter,'
' an iterable of Exporter, or `None`,'
' found %s.' % exporters)
return tuple(exporters)
def _is_google_env():
"""Detects whether current environment is google."""
tf_config = json.loads(os.environ.get(_TF_CONFIG_ENV) or '{}')
if not tf_config:
logging.warn('TF_CONFIG should not be empty in distributed environment.')
return tf_config.get(_ENVIRONMENT_KEY) == _ENVIRONMENT_GOOGLE_VALUE
@tf_export('estimator.TrainSpec')
class TrainSpec(
collections.namedtuple('TrainSpec', ['input_fn', 'max_steps', 'hooks'])):
"""Configuration for the "train" part for the `train_and_evaluate` call.
`TrainSpec` determines the input data for the training, as well as the
duration. Optional hooks run at various stages of training.
"""
def __new__(cls, input_fn, max_steps=None, hooks=None):
"""Creates a validated `TrainSpec` instance.
Args:
input_fn: Training input function returning a tuple of:
features - `Tensor` or dictionary of string feature name to `Tensor`.
labels - `Tensor` or dictionary of `Tensor` with labels.
max_steps: Int. Positive number of total steps for which to train model.
If `None`, train forever. The training `input_fn` is not expected to
generate `OutOfRangeError` or `StopIteration` exceptions. See the
`train_and_evaluate` stop condition section for details.
hooks: Iterable of `tf.train.SessionRunHook` objects to run
on all workers (including chief) during training.
Returns:
A validated `TrainSpec` object.
Raises:
ValueError: If any of the input arguments is invalid.
TypeError: If any of the arguments is not of the expected type.
"""
# Validate input_fn.
_validate_input_fn(input_fn)
# Validate max_steps.
if max_steps is not None and max_steps <= 0:
raise ValueError(
'Must specify max_steps > 0, given: {}'.format(max_steps))
# Validate hooks.
hooks = _validate_hooks(hooks)
return super(TrainSpec, cls).__new__(
cls, input_fn=input_fn, max_steps=max_steps, hooks=hooks)
@tf_export('estimator.EvalSpec')
class EvalSpec(
collections.namedtuple('EvalSpec', [
'input_fn', 'steps', 'name', 'hooks', 'exporters', 'start_delay_secs',
'throttle_secs'
])):
"""Configuration for the "eval" part for the `train_and_evaluate` call.
`EvalSpec` combines details of evaluation of the trained model as well as its
export. Evaluation consists of computing metrics to judge the performance of
the trained model. Export writes out the trained model on to external
storage.
"""
def __new__(cls,
input_fn,
steps=100,
name=None,
hooks=None,
exporters=None,
start_delay_secs=120,
throttle_secs=600):
"""Creates a validated `EvalSpec` instance.
Args:
input_fn: Evaluation input function returning a tuple of:
features - `Tensor` or dictionary of string feature name to `Tensor`.
labels - `Tensor` or dictionary of `Tensor` with labels.
steps: Int. Positive number of steps for which to evaluate model. If
`None`, evaluates until `input_fn` raises an end-of-input exception.
See `Estimator.evaluate` for details.
name: String. Name of the evaluation if user needs to run multiple
evaluations on different data sets. Metrics for different evaluations
are saved in separate folders, and appear separately in tensorboard.
hooks: Iterable of `tf.train.SessionRunHook` objects to run
during evaluation.
exporters: Iterable of `Exporter`s, or a single one, or `None`.
`exporters` will be invoked after each evaluation.
start_delay_secs: Int. Start evaluating after waiting for this many
seconds.
throttle_secs: Int. Do not re-evaluate unless the last evaluation was
started at least this many seconds ago. Of course, evaluation does not
occur if no new checkpoints are available, hence, this is the minimum.
Returns:
A validated `EvalSpec` object.
Raises:
ValueError: If any of the input arguments is invalid.
TypeError: If any of the arguments is not of the expected type.
"""
# Validate input_fn.
_validate_input_fn(input_fn)
# Validate steps.
if steps is not None and steps <= 0:
raise ValueError('Must specify steps > 0, given: {}'.format(steps))
# Validate name.
if name is not None and not isinstance(name, six.string_types):
raise TypeError('`name` must be string, given: {}'.format(name))
# Validate hooks.
hooks = _validate_hooks(hooks)
# Validate exporters.
exporters = _validate_exporters(exporters)
# Validate start_delay_secs.
if start_delay_secs < 0:
raise ValueError('Must specify start_delay_secs >= 0, given: {}'.format(
start_delay_secs))
# Validate throttle_secs.
if throttle_secs < 0:
raise ValueError(
'Must specify throttle_secs >= 0, given: {}'.format(throttle_secs))
return super(EvalSpec, cls).__new__(
cls,
input_fn=input_fn,
steps=steps,
name=name,
hooks=hooks,
exporters=exporters,
start_delay_secs=start_delay_secs,
throttle_secs=throttle_secs)
@tf_export('estimator.train_and_evaluate')
def train_and_evaluate(estimator, train_spec, eval_spec):
"""Train and evaluate the `estimator`.
This utility function trains, evaluates, and (optionally) exports the model by
using the given `estimator`. All training related specification is held in
`train_spec`, including training `input_fn` and training max steps, etc. All
evaluation and export related specification is held in `eval_spec`, including
evaluation `input_fn`, steps, etc.
This utility function provides consistent behavior for both local
(non-distributed) and distributed configurations. Currently, the only
supported distributed training configuration is between-graph replication.
Overfitting: In order to avoid overfitting, it is recommended to set up the
training `input_fn` to shuffle the training data properly. It is also
recommended to train the model a little longer, say multiple epochs, before
performing evaluation, as the input pipeline starts from scratch for each
training. It is particularly important for local training and evaluation.
Stop condition: In order to support both distributed and non-distributed
configuration reliably, the only supported stop condition for model
training is `train_spec.max_steps`. If `train_spec.max_steps` is `None`, the
model is trained forever. *Use with care* if model stop condition is
different. For example, assume that the model is expected to be trained with
one epoch of training data, and the training `input_fn` is configured to throw
`OutOfRangeError` after going through one epoch, which stops the
`Estimator.train`. For a three-training-worker distributed configuration, each
training worker is likely to go through the whole epoch independently. So, the
model will be trained with three epochs of training data instead of one epoch.
Example of local (non-distributed) training:
```python
# Set up feature columns.
categorial_feature_a = categorial_column_with_hash_bucket(...)
categorial_feature_a_emb = embedding_column(
categorical_column=categorial_feature_a, ...)
... # other feature columns
estimator = DNNClassifier(
feature_columns=[categorial_feature_a_emb, ...],
hidden_units=[1024, 512, 256])
# Or set up the model directory
# estimator = DNNClassifier(
# config=tf.estimator.RunConfig(
# model_dir='/my_model', save_summary_steps=100),
# feature_columns=[categorial_feature_a_emb, ...],
# hidden_units=[1024, 512, 256])
# Input pipeline for train and evaluate.
def train_input_fn: # returns x, y
# please shuffle the data.
pass
def eval_input_fn_eval: # returns x, y
pass
train_spec = tf.estimator.TrainSpec(input_fn=train_input_fn, max_steps=1000)
eval_spec = tf.estimator.EvalSpec(input_fn=eval_input_fn)
tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
```
Example of distributed training:
Regarding the example of distributed training, the code above can be used
without a change (Please do make sure that the `RunConfig.model_dir` for all
workers is set to the same directory, i.e., a shared file system all workers
can read and write). The only extra work to do is setting the environment
variable `TF_CONFIG` properly for each worker correspondingly.
Also see: https://www.tensorflow.org/deploy/distributed
Setting environment variable depends on the platform. For example, on Linux,
it can be done as follows (`$` is the shell prompt):
```
$ TF_CONFIG='<replace_with_real_content>' python train_model.py
```
For the content in `TF_CONFIG`, assume that the training cluster spec looks
like:
```
cluster = {"chief": ["host0:2222"],
"worker": ["host1:2222", "host2:2222", "host3:2222"],
"ps": ["host4:2222", "host5:2222"]}
```
Example of `TF_CONFIG` for chief training worker (must have one and only one):
```
# This should be a JSON string, which is set as environment variable. Usually
# the cluster manager handles that.
TF_CONFIG='{
"cluster": {
"chief": ["host0:2222"],
"worker": ["host1:2222", "host2:2222", "host3:2222"],
"ps": ["host4:2222", "host5:2222"]
},
"task": {"type": "chief", "index": 0}
}'
```
Note that the chief worker also does the model training job, similar to other
non-chief training workers (see next paragraph). In addition to the model
training, it manages some extra work, e.g., checkpoint saving and restoring,
writing summaries, etc.
Example of `TF_CONFIG` for non-chief training worker (optional, could be
multiple):
```
# This should be a JSON string, which is set as environment variable. Usually
# the cluster manager handles that.
TF_CONFIG='{
"cluster": {
"chief": ["host0:2222"],
"worker": ["host1:2222", "host2:2222", "host3:2222"],
"ps": ["host4:2222", "host5:2222"]
},
"task": {"type": "worker", "index": 0}
}'
```
where the `task.index` should be set as 0, 1, 2, in this example, respectively
for non-chief training workers.
Example of `TF_CONFIG` for parameter server, aka ps (could be multiple):
```
# This should be a JSON string, which is set as environment variable. Usually
# the cluster manager handles that.
TF_CONFIG='{
"cluster": {
"chief": ["host0:2222"],
"worker": ["host1:2222", "host2:2222", "host3:2222"],
"ps": ["host4:2222", "host5:2222"]
},
"task": {"type": "ps", "index": 0}
}'
```
where the `task.index` should be set as 0 and 1, in this example, respectively
for parameter servers.
Example of `TF_CONFIG` for evaluator task. Evaluator is a special task that is
not part of the training cluster. There could be only one. It is used for
model evaluation.
```
# This should be a JSON string, which is set as environment variable. Usually
# the cluster manager handles that.
TF_CONFIG='{
"cluster": {
"chief": ["host0:2222"],
"worker": ["host1:2222", "host2:2222", "host3:2222"],
"ps": ["host4:2222", "host5:2222"]
},
"task": {"type": "evaluator", "index": 0}
}'
```
Args:
estimator: An `Estimator` instance to train and evaluate.
train_spec: A `TrainSpec` instance to specify the training specification.
eval_spec: A `EvalSpec` instance to specify the evaluation and export
specification.
Raises:
ValueError: if environment variable `TF_CONFIG` is incorrectly set.
"""
executor = _TrainingExecutor(
estimator=estimator, train_spec=train_spec, eval_spec=eval_spec)
config = estimator.config
if (config.task_type == run_config_lib.TaskType.EVALUATOR and
config.task_id > 0):
raise ValueError(
'For distributed training, there can only be one `evaluator` task '
'(with task id 0). Given task id {}'.format(config.task_id))
executor.run()
class _StopAtSecsHook(session_run_hook.SessionRunHook):
"""Stops given secs after begin is called."""
def __init__(self, stop_after_secs):
self._stop_after_secs = stop_after_secs
self._start_time = None
def begin(self):
self._start_time = time.time()
def after_run(self, run_context, run_values):
del run_values
if time.time() - self._start_time >= self._stop_after_secs:
run_context.request_stop()
class _TrainingExecutor(object):
"""The executor to run `Estimator` training and evaluation.
This implementation supports both distributed and non-distributed (aka local)
training and evaluation based on the setting in `tf.estimator.RunConfig`.
"""
def __init__(self,
estimator,
train_spec,
eval_spec,
train_hooks=None,
continuous_eval_listener=None):
if not isinstance(estimator, estimator_lib.Estimator):
raise TypeError('`estimator` must have type `tf.estimator.Estimator`.')
self._estimator = estimator
if not isinstance(train_spec, TrainSpec):
raise TypeError('`train_spec` must have type `tf.estimator.TrainSpec`.')
self._train_spec = train_spec
if not isinstance(eval_spec, EvalSpec):
raise TypeError('`eval_spec` must have type `tf.estimator.EvalSpec`.')
self._eval_spec = eval_spec
self._train_hooks = _validate_hooks(train_hooks)
if (continuous_eval_listener and
not isinstance(continuous_eval_listener, _ContinuousEvalListener)):
raise TypeError('`continuous_eval_listener` must have type '
'`_ContinuousEvalListener`.')
self._continuous_eval_listener = (
continuous_eval_listener or _ContinuousEvalListener())
@property
def estimator(self):
return self._estimator
def run(self):
"""Executes the run_foo for task type `foo`.
`_TrainingExecutor` predefines the procedure for task type 'chief',
'worker', 'ps', and 'evaluator'. For task type `foo`, the corresponding
procedure is `run_foo'. This `run` method invoke the procedure base on the
`RunConfig.task_type`.
Raises:
ValueError: if the estimator.config is mis-configured.
"""
config = self._estimator.config
if (not config.cluster_spec and
config.task_type != run_config_lib.TaskType.EVALUATOR):
logging.info('Running training and evaluation locally (non-distributed).')
self.run_local()
return
# Distributed case.
if not config.task_type:
# TODO(xiejw): Improve the error message about how to set the TF_CONFIG
# correctly.
raise ValueError(
'`estimator.config` must have task_type set. This usually means '
'TF_CONFIG environment is not set correctly.')
if config.task_type == 'local':
raise ValueError(
'`task.type` in TF_CONFIG cannot be `local`. Leaving `cluster` and '
'`task` properties in TF_CONFIG absent triggers train and evaluate '
'`Estimator` locally (non-distributed).')
# For task type foo, call executor.run_foo.
available_tasks = [
x for x in dir(self)
if x.startswith('run_') and x != 'run_local' and
callable(getattr(self, x))
]
task_to_run = 'run_' + config.task_type
if task_to_run not in available_tasks:
raise ValueError(
'Task type {} is not supported. Supported task types are {}'.format(
config.task_type, [x[len('run_'):] for x in available_tasks]))
getattr(self, task_to_run)()
def run_chief(self):
"""Runs task chief."""
# TODO(xiejw): To allow execution framework to add train hooks.
return self._start_distributed_training()
def run_worker(self):
"""Runs task (training) worker."""
# TODO(xiejw): To allow execution framework to add train hooks.
return self._start_distributed_training()
def run_master(self):
"""Runs task master."""
class NewCheckpointListener(
basic_session_run_hooks.CheckpointSaverListener):
def __init__(self, evaluator, eval_throttle_secs):
self._evaluator = evaluator
self._eval_throttle_secs = eval_throttle_secs
def begin(self):
self._timer = basic_session_run_hooks.SecondOrStepTimer(
every_secs=self._eval_throttle_secs)
def after_save(self, session, global_step_value):
del session # unused; required by signature.
if self._timer.should_trigger_for_step(global_step_value):
self._timer.update_last_triggered_step(global_step_value)
self._evaluator.evaluate_and_export()
else:
logging.info('Skip the current checkpoint eval due to throttle secs '
'({} secs).'.format(self._eval_throttle_secs))
# Final export signal: For any eval result with global_step >= train
# max_steps, the evaluator will send the final export signal. There is a
# small chance that the Estimator.train stopping logic sees a different
# global_step value (due to global step race condition and the fact the
# saver sees a larger value for checkpoing saving), which does not end
# the training. When the training ends, a new checkpoint is generated, which
# triggers the listener again. So, it could be the case the final export is
# triggered twice.
#
# But here, throttle_secs will skip the next intermediate checkpoint and,
# so, the double final export chance is very small.
evaluator = _TrainingExecutor._Evaluator(self._estimator, self._eval_spec,
self._train_spec.max_steps)
# When the underlying `Estimator` object saves a new checkpoint, we would
# like this callback to be called so that evaluation and export can trigger.
saving_listeners = [
NewCheckpointListener(evaluator, self._eval_spec.throttle_secs)
]
self._start_distributed_training(saving_listeners=saving_listeners)
if not evaluator.is_final_export_triggered:
logging.info('Training has already ended. But the last eval is skipped '
'due to eval throttle_secs. Now evaluating the final '
'checkpoint.')
evaluator.evaluate_and_export()
def run_evaluator(self):
"""Runs task evaluator."""
# TODO(xiejw): To allow execution framework to add continuous eval listener.
return self._start_continuous_evaluation()
def run_ps(self):
"""Runs task parameter server (in training cluster spec)."""
config = self._estimator.config
server = self._start_std_server(config)
server.join()
def run_local(self):
"""Runs training and evaluation locally (non-distributed)."""
def _should_stop_local_train(global_step):
if self._train_spec.max_steps is None:
return False
if global_step >= self._train_spec.max_steps:
return True
return False
if self._eval_spec.throttle_secs <= 0:
raise ValueError('eval_spec.throttle_secs should be positive, given: {}.'
'It is used do determine how long each training '
'iteration should go when train and evaluate '
'locally.'.format(self._eval_spec.throttle_secs))
stop_hook = _StopAtSecsHook(self._eval_spec.throttle_secs)
train_hooks = (
list(self._train_spec.hooks) + [stop_hook] + list(self._train_hooks))
logging.info('Start train and evaluate loop. The evaluate will happen '
'after {} secs (eval_spec.throttle_secs) or training is '
'finished.'.format(self._eval_spec.throttle_secs))
evaluator = _TrainingExecutor._Evaluator(self._estimator, self._eval_spec,
self._train_spec.max_steps)
while True:
self._estimator.train(
input_fn=self._train_spec.input_fn,
max_steps=self._train_spec.max_steps,
hooks=train_hooks)
# Final export signal: For any eval result with global_step >= train
# max_steps, the evaluator will send the final export signal. The
# _should_stop_local_train will then end the while True as the stopping
# condition is satisfied (both checks use the same global_step value,
# i.e., no race condition)
eval_result = evaluator.evaluate_and_export()
if eval_result.status != _EvalStatus.EVALUATED:
# This is unexpected; should never happen.
# Training should always end with a new checkpoint.
raise RuntimeError('There was no new checkpoint after the training. '
'Eval status: {}'.format(eval_result.status))
if _should_stop_local_train(
eval_result.metrics[ops.GraphKeys.GLOBAL_STEP]):
break
def _start_std_server(self, config):
"""Creates, starts, and returns a server_lib.Server."""
if (not config.cluster_spec or not config.task_type or
config.task_id is None):
raise RuntimeError('Could not start server; be sure to specify '
'cluster_spec, task_type, and task in '
'RunConfig or set the TF_CONFIG environment variable.')
if not config.master:
jobs = config.cluster_spec.jobs
if (len(jobs) == 1 and
len(config.cluster_spec.job_tasks(jobs[0])) == 1 and
config.task_type in _TRAINER_JOBS):
# For distributed training, config.master is empty if and only if it has
# a single node in the cluster spec. In this case, we should not start
# the server.
logging.info('Skip starting Tensorflow server as there is only one '
'node in the cluster.')
return
else:
raise RuntimeError(
'Could not start server; be sure to specify master in '
'RunConfig or set the TF_CONFIG environment variable.')
logging.info('Start Tensorflow server.')
if config.session_config is None:
session_config = config_pb2.ConfigProto(log_device_placement=False)
else:
session_config = config_pb2.ConfigProto(
log_device_placement=False,
gpu_options=config.session_config.gpu_options)
server = server_lib.Server(
config.cluster_spec,
job_name=config.task_type,
task_index=config.task_id,
config=session_config,
start=False)
server.start()
return server
def _start_distributed_training(self, saving_listeners=None):
"""Calls `Estimator` train in a distributed setting."""
config = self._estimator.config
# Start in-process TensorFlow server if needed. It's important to start the
# server before we (optionally) sleep. Otherwise, the servers will wait to
# connect to each other before starting to train.
if not _is_google_env():
self._start_std_server(config)
# Delay worker to start. For asynchronous training, this usually helps model
# to converge faster. Chief starts the training immediately, so, worker
# with task id x (0-based) should wait (x+1) * _DELAY_SECS_PER_WORKER.
start_delay_secs = 0
if config.task_type == run_config_lib.TaskType.WORKER:
# TODO(xiejw): Replace the hard code logic (task_id + 1) with unique id in
# training cluster.
start_delay_secs = min(_MAX_DELAY_SECS,
(config.task_id + 1) * _DELAY_SECS_PER_WORKER)
if start_delay_secs > 0:
logging.info('Waiting %d secs before starting training.',
start_delay_secs)
time.sleep(start_delay_secs)
self._estimator.train(
input_fn=self._train_spec.input_fn,
max_steps=self._train_spec.max_steps,
hooks=list(self._train_spec.hooks) + list(self._train_hooks),
saving_listeners=saving_listeners)
def _start_continuous_evaluation(self):
"""Repeatedly calls `Estimator` evaluate and export until training ends."""
start_delay_secs = self._eval_spec.start_delay_secs
if start_delay_secs:
logging.info('Waiting %f secs before starting eval.', start_delay_secs)
time.sleep(start_delay_secs)
latest_eval_result = None
evaluator = _TrainingExecutor._Evaluator(self._estimator, self._eval_spec,
self._train_spec.max_steps)
should_early_stop = False
while not should_early_stop:
if (latest_eval_result and
latest_eval_result.status == _EvalStatus.EVALUATED):
global_step = latest_eval_result.metrics.get(ops.GraphKeys.GLOBAL_STEP)
if (global_step and self._train_spec.max_steps and
global_step >= self._train_spec.max_steps):
logging.info(
'Exiting evaluation, global_step=%s >= train max_steps=%s',
global_step, self._train_spec.max_steps)
return
latest_eval_result, should_early_stop = self._execute_evaluator_once(
evaluator, self._continuous_eval_listener,
self._eval_spec.throttle_secs)
def _execute_evaluator_once(self, evaluator, continuous_eval_listener,
throttle_secs):
"""Executes the `evaluator`."""
start = time.time()
eval_result = None
should_early_stop = False
if not continuous_eval_listener.before_eval():
logging.info('Exiting evaluation, as requested by '
'_ContinuousEvalListener.before_eval.')
should_early_stop = True
return (eval_result, should_early_stop)
# Final export signal: For any eval result with global_step >= train
# max_steps, the evaluator will send the final export signal. The next
# iteration of while loop will end the continuous eval as the stopping
# condition is satisfied (both checks use the same global_step value,
# i.e., no race condition)
eval_result = evaluator.evaluate_and_export()
if not self._continuous_eval_listener.after_eval(eval_result):
logging.info('Exiting evaluation, as requested by '
'_ContinuousEvalListener.after_eval.')
should_early_stop = True
return (eval_result, should_early_stop)
# Throttle if necessary.
elapsed_time = time.time() - start
difference = throttle_secs - elapsed_time
if difference > 0:
logging.info('Waiting %f secs before starting next eval run.', difference)
time.sleep(difference)
return (eval_result, should_early_stop)
class _Evaluator(object):
"""A helper class to call `Estimator.evaluate` and export model."""
def __init__(self, estimator, eval_spec, max_training_steps):
self._estimator = estimator
self._eval_spec = eval_spec
self._is_final_export_triggered = False
self._previous_ckpt_path = None
self._last_warning_time = 0
self._max_training_steps = max_training_steps
@property
def is_final_export_triggered(self):
return self._is_final_export_triggered
def evaluate_and_export(self):
"""Evaluate and (maybe) export the current model.
Returns:
An `EvalResult` instance.
Raises:
RuntimeError: for any unexpected internal error.
TypeError: if evaluation result has wrong type.
"""
latest_ckpt_path = self._estimator.latest_checkpoint()
if not latest_ckpt_path:
self._log_err_msg('Estimator is not trained yet. Will start an '
'evaluation when a checkpoint is ready.')
return _EvalResult(status=_EvalStatus.MISSING_CHECKPOINT)
if latest_ckpt_path == self._previous_ckpt_path:
self._log_err_msg(
'No new checkpoint ready for evaluation. Skip the current '
'evaluation pass as evaluation results are expected to be same '
'for the same checkpoint.')
return _EvalResult(status=_EvalStatus.NO_NEW_CHECKPOINT)
metrics = self._estimator.evaluate(
input_fn=self._eval_spec.input_fn,
steps=self._eval_spec.steps,
name=self._eval_spec.name,
checkpoint_path=latest_ckpt_path,
hooks=self._eval_spec.hooks)
# _EvalResult validates the metrics.
eval_result = _EvalResult(
status=_EvalStatus.EVALUATED,
metrics=metrics,
checkpoint_path=latest_ckpt_path)
is_the_final_export = (
eval_result.metrics[ops.GraphKeys.GLOBAL_STEP] >=
self._max_training_steps if self._max_training_steps else False)
self._export_eval_result(eval_result, is_the_final_export)
if is_the_final_export:
logging.debug('Calling exporter with the `is_the_final_export=True`.')
self._is_final_export_triggered = True
self._last_warning_time = 0
self._previous_ckpt_path = latest_ckpt_path
return eval_result
def _log_err_msg(self, message):
"""Prints warning `message` every 10 mins."""
current_time = time.time()
if current_time - self._last_warning_time > 600:
logging.warning(message)
self._last_warning_time = current_time
def _export_eval_result(self, eval_result, is_the_final_export):
"""Export `eval_result` according to exporters in `EvalSpec`."""
export_dir_base = os.path.join(
compat.as_str_any(self._estimator.model_dir),
compat.as_str_any('export'))
for exporter in self._eval_spec.exporters:
exporter.export(
estimator=self._estimator,
export_path=os.path.join(
compat.as_str_any(export_dir_base),
compat.as_str_any(exporter.name)),
checkpoint_path=eval_result.checkpoint_path,
eval_result=eval_result.metrics,
is_the_final_export=is_the_final_export)
class _EvalStatus(object):
"""The status of an evaluation event.
For local training and evaluation, the status can only be `EVALUATED` as
`Estimator.train` always generates a new checkpoint.
For distributed training and evaluation, a separated evaluator keeps looking
for new checkpoint. So, multiple situations might occur:
- EVALUATED: A new checkpoint is found since last evaluation.
`Estimator.evaluate` will be invoked.
- MISSING_CHECKPOINT: No checkpoint can be found. Typically, this means
the trainer has not yet produced any checkpoint.
- NO_NEW_CHECKPOINT: No new checkpoint can be found since last evaluation.
Typically, this means the trainer has not yet produced any new checkpoint.
"""
EVALUATED = 'evaluated'
MISSING_CHECKPOINT = 'missing checkpoint'
NO_NEW_CHECKPOINT = 'no new checkpoint'
class _EvalResult(
collections.namedtuple('EvalResult',
['status', 'metrics', 'checkpoint_path'])):
"""_EvalResult holds the result of an evaluation event."""
def __new__(cls, status, metrics=None, checkpoint_path=None):
"""Creates a validated `_EvalResult`.
Args:
status: See `_EvalStatus`.
metrics: The evaluation results returned by `Estimator.evaluate`. Only set
if status is `EVALUATED`.
checkpoint_path: The corresponding checkpoint path for the `metrics`. Only
set if status is `EVALUATED`.
Returns:
A validated `_EvalResult` object.
Raises:
ValueError: If validation fails.
TypeError: If any of the arguments is not the expected type.
"""
if status != _EvalStatus.EVALUATED:
if metrics:
raise ValueError(
'metrics must be `None` if status is not {}; got status {},'
' metrics {}'.format(_EvalStatus.EVALUATED, status, metrics))
if checkpoint_path:
raise ValueError(
'checkpoint must be `None` if status is not {}; got status {}, '
'checkpoint_path {}'.format(_EvalStatus.EVALUATED, status,
checkpoint_path))
return super(_EvalResult, cls).__new__(cls, status, metrics,
checkpoint_path)
# Now, evaluated case.
assert status == _EvalStatus.EVALUATED
# Validates metrics.
if not metrics:
raise ValueError(
'Internal error: `Estimator.evaluate` should never return empty '
'metrics.')
if not isinstance(metrics, dict):
raise TypeError(
'`Estimator.evaluate` should return dict. Given {}.'.format(
type(metrics)))
if ops.GraphKeys.GLOBAL_STEP not in metrics:
raise ValueError(
'Internal error: `Estimator.evaluate` result should have '
'`global_step` in result. Given {}'.format(metrics))
# Validates checkpoint_path.
if not checkpoint_path:
raise ValueError(
'Internal error: `checkpoint_path` should never be empty.')
return super(_EvalResult, cls).__new__(cls, status, metrics,
checkpoint_path)
class _ContinuousEvalListener(object):
"""Interface for listeners that take action before or after evaluation."""
def before_eval(self):
"""Called before evaluation.
Returns:
`False` if you want to skip the current evaluation and early stop the
continuous evaluation; `True` otherwise.
"""
return True
def after_eval(self, eval_result):
"""Called after the evaluation is executed.
Args:
eval_result: An `_EvalResult` instance.
Returns:
False if you want to early stop continuous evaluation; `True` otherwise.
"""
del eval_result
return True
|
|
# Copyright 2011 OpenStack Foundation
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import uuidutils
import six
import webob
from nova.api.openstack import api_version_request
from nova.api.openstack.compute import migrate_server as \
migrate_server_v21
from nova import exception
from nova import objects
from nova import test
from nova.tests.unit.api.openstack.compute import admin_only_action_common
from nova.tests.unit.api.openstack import fakes
class MigrateServerTestsV21(admin_only_action_common.CommonTests):
migrate_server = migrate_server_v21
controller_name = 'MigrateServerController'
validation_error = exception.ValidationError
_api_version = '2.1'
disk_over_commit = False
force = None
async_ = False
host_name = None
def setUp(self):
super(MigrateServerTestsV21, self).setUp()
self.controller = getattr(self.migrate_server, self.controller_name)()
self.compute_api = self.controller.compute_api
self.stub_out('nova.api.openstack.compute.migrate_server.'
'MigrateServerController',
lambda *a, **kw: self.controller)
def _get_migration_body(self, **kwargs):
return {'os-migrateLive': self._get_params(**kwargs)}
def _get_params(self, **kwargs):
return {'host': kwargs.get('host'),
'block_migration': kwargs.get('block_migration') or False,
'disk_over_commit': self.disk_over_commit}
def test_migrate(self):
method_translations = {'_migrate': 'resize',
'_migrate_live': 'live_migrate'}
body_map = {'_migrate_live': self._get_migration_body(host='hostname')}
args_map = {'_migrate_live': ((False, self.disk_over_commit,
'hostname', self.force, self.async_),
{}),
'_migrate': ((), {'host_name': self.host_name})}
self._test_actions(['_migrate', '_migrate_live'], body_map=body_map,
method_translations=method_translations,
args_map=args_map)
def test_migrate_none_hostname(self):
method_translations = {'_migrate': 'resize',
'_migrate_live': 'live_migrate'}
body_map = {'_migrate_live': self._get_migration_body(host=None)}
args_map = {'_migrate_live': ((False, self.disk_over_commit, None,
self.force, self.async_),
{}),
'_migrate': ((), {'host_name': None})}
self._test_actions(['_migrate', '_migrate_live'], body_map=body_map,
method_translations=method_translations,
args_map=args_map)
def test_migrate_with_non_existed_instance(self):
body_map = {'_migrate_live':
self._get_migration_body(host='hostname')}
self._test_actions_with_non_existed_instance(
['_migrate', '_migrate_live'], body_map=body_map)
def test_migrate_raise_conflict_on_invalid_state(self):
method_translations = {'_migrate': 'resize',
'_migrate_live': 'live_migrate'}
body_map = {'_migrate_live':
self._get_migration_body(host='hostname')}
args_map = {'_migrate_live': ((False, self.disk_over_commit,
'hostname', self.force, self.async_),
{}),
'_migrate': ((), {'host_name': self.host_name})}
exception_arg = {'_migrate': 'migrate',
'_migrate_live': 'os-migrateLive'}
self._test_actions_raise_conflict_on_invalid_state(
['_migrate', '_migrate_live'], body_map=body_map,
args_map=args_map, method_translations=method_translations,
exception_args=exception_arg)
def test_actions_with_locked_instance(self):
method_translations = {'_migrate': 'resize',
'_migrate_live': 'live_migrate'}
body_map = {'_migrate_live':
self._get_migration_body(host='hostname')}
args_map = {'_migrate_live': ((False, self.disk_over_commit,
'hostname', self.force, self.async_),
{}),
'_migrate': ((), {'host_name': self.host_name})}
self._test_actions_with_locked_instance(
['_migrate', '_migrate_live'], body_map=body_map,
args_map=args_map, method_translations=method_translations)
def _test_migrate_exception(self, exc_info, expected_result):
instance = self._stub_instance_get()
with mock.patch.object(self.compute_api, 'resize',
side_effect=exc_info) as mock_resize:
self.assertRaises(expected_result,
self.controller._migrate,
self.req, instance['uuid'],
body={'migrate': None})
mock_resize.assert_called_once_with(
self.context, instance, host_name=self.host_name)
self.mock_get.assert_called_once_with(self.context, instance.uuid,
expected_attrs=None)
def test_migrate_too_many_instances(self):
exc_info = exception.TooManyInstances(overs='', req='', used=0,
allowed=0)
self._test_migrate_exception(exc_info, webob.exc.HTTPForbidden)
def _test_migrate_live_succeeded(self, param):
instance = self._stub_instance_get()
live_migrate_method = self.controller._migrate_live
with mock.patch.object(self.compute_api,
'live_migrate') as mock_live_migrate:
live_migrate_method(self.req, instance.uuid,
body={'os-migrateLive': param})
self.assertEqual(202, live_migrate_method.wsgi_code)
mock_live_migrate.assert_called_once_with(
self.context, instance, False, self.disk_over_commit,
'hostname', self.force, self.async_)
self.mock_get.assert_called_once_with(self.context, instance.uuid,
expected_attrs=None)
def test_migrate_live_enabled(self):
param = self._get_params(host='hostname')
self._test_migrate_live_succeeded(param)
def test_migrate_live_enabled_with_string_param(self):
param = {'host': 'hostname',
'block_migration': "False",
'disk_over_commit': "False"}
self._test_migrate_live_succeeded(param)
def test_migrate_live_without_host(self):
body = self._get_migration_body()
del body['os-migrateLive']['host']
self.assertRaises(self.validation_error,
self.controller._migrate_live,
self.req, fakes.FAKE_UUID, body=body)
def test_migrate_live_without_block_migration(self):
body = self._get_migration_body()
del body['os-migrateLive']['block_migration']
self.assertRaises(self.validation_error,
self.controller._migrate_live,
self.req, fakes.FAKE_UUID, body=body)
def test_migrate_live_without_disk_over_commit(self):
body = {'os-migrateLive':
{'host': 'hostname',
'block_migration': False}}
self.assertRaises(self.validation_error,
self.controller._migrate_live,
self.req, fakes.FAKE_UUID, body=body)
def test_migrate_live_with_invalid_block_migration(self):
body = self._get_migration_body(block_migration='foo')
self.assertRaises(self.validation_error,
self.controller._migrate_live,
self.req, fakes.FAKE_UUID, body=body)
def test_migrate_live_with_invalid_disk_over_commit(self):
body = {'os-migrateLive':
{'host': 'hostname',
'block_migration': False,
'disk_over_commit': "foo"}}
self.assertRaises(self.validation_error,
self.controller._migrate_live,
self.req, fakes.FAKE_UUID, body=body)
def test_migrate_live_missing_dict_param(self):
body = self._get_migration_body(host='hostname')
del body['os-migrateLive']['host']
body['os-migrateLive']['dummy'] = 'hostname'
self.assertRaises(self.validation_error,
self.controller._migrate_live,
self.req, fakes.FAKE_UUID, body=body)
def _test_migrate_live_failed_with_exception(
self, fake_exc,
uuid=None,
expected_exc=webob.exc.HTTPBadRequest,
check_response=True):
instance = self._stub_instance_get(uuid=uuid)
body = self._get_migration_body(host='hostname')
with mock.patch.object(
self.compute_api, 'live_migrate',
side_effect=fake_exc) as mock_live_migrate:
ex = self.assertRaises(expected_exc,
self.controller._migrate_live,
self.req, instance.uuid, body=body)
if check_response:
self.assertIn(six.text_type(fake_exc), ex.explanation)
mock_live_migrate.assert_called_once_with(
self.context, instance, False, self.disk_over_commit,
'hostname', self.force, self.async_)
self.mock_get.assert_called_once_with(self.context, instance.uuid,
expected_attrs=None)
def test_migrate_live_compute_service_unavailable(self):
self._test_migrate_live_failed_with_exception(
exception.ComputeServiceUnavailable(host='host'))
def test_migrate_live_compute_service_not_found(self):
self._test_migrate_live_failed_with_exception(
exception.ComputeHostNotFound(host='host'))
def test_migrate_live_invalid_hypervisor_type(self):
self._test_migrate_live_failed_with_exception(
exception.InvalidHypervisorType())
def test_migrate_live_invalid_cpu_info(self):
self._test_migrate_live_failed_with_exception(
exception.InvalidCPUInfo(reason=""))
def test_migrate_live_unable_to_migrate_to_self(self):
uuid = uuidutils.generate_uuid()
self._test_migrate_live_failed_with_exception(
exception.UnableToMigrateToSelf(instance_id=uuid,
host='host'),
uuid=uuid)
def test_migrate_live_destination_hypervisor_too_old(self):
self._test_migrate_live_failed_with_exception(
exception.DestinationHypervisorTooOld())
def test_migrate_live_no_valid_host(self):
self._test_migrate_live_failed_with_exception(
exception.NoValidHost(reason=''))
def test_migrate_live_invalid_local_storage(self):
self._test_migrate_live_failed_with_exception(
exception.InvalidLocalStorage(path='', reason=''))
def test_migrate_live_invalid_shared_storage(self):
self._test_migrate_live_failed_with_exception(
exception.InvalidSharedStorage(path='', reason=''))
def test_migrate_live_hypervisor_unavailable(self):
self._test_migrate_live_failed_with_exception(
exception.HypervisorUnavailable(host=""))
def test_migrate_live_instance_not_active(self):
self._test_migrate_live_failed_with_exception(
exception.InstanceInvalidState(
instance_uuid='', state='', attr='', method=''),
expected_exc=webob.exc.HTTPConflict,
check_response=False)
def test_migrate_live_pre_check_error(self):
self._test_migrate_live_failed_with_exception(
exception.MigrationPreCheckError(reason=''))
def test_migrate_live_migration_with_unexpected_error(self):
self._test_migrate_live_failed_with_exception(
exception.MigrationError(reason=''),
expected_exc=webob.exc.HTTPInternalServerError,
check_response=False)
class MigrateServerTestsV225(MigrateServerTestsV21):
# We don't have disk_over_commit in v2.25
disk_over_commit = None
def setUp(self):
super(MigrateServerTestsV225, self).setUp()
self.req.api_version_request = api_version_request.APIVersionRequest(
'2.25')
def _get_params(self, **kwargs):
return {'host': kwargs.get('host'),
'block_migration': kwargs.get('block_migration') or False}
def test_migrate_live_enabled_with_string_param(self):
param = {'host': 'hostname',
'block_migration': "False"}
self._test_migrate_live_succeeded(param)
def test_migrate_live_without_disk_over_commit(self):
pass
def test_migrate_live_with_invalid_disk_over_commit(self):
pass
def test_live_migrate_block_migration_auto(self):
method_translations = {'_migrate_live': 'live_migrate'}
body_map = {'_migrate_live': {'os-migrateLive': {'host': 'hostname',
'block_migration': 'auto'}}}
args_map = {'_migrate_live': ((None, None, 'hostname', self.force,
self.async_), {})}
self._test_actions(['_migrate_live'], body_map=body_map,
method_translations=method_translations,
args_map=args_map)
def test_migrate_live_with_disk_over_commit_raise(self):
body = {'os-migrateLive':
{'host': 'hostname',
'block_migration': 'auto',
'disk_over_commit': False}}
self.assertRaises(self.validation_error,
self.controller._migrate_live,
self.req, fakes.FAKE_UUID, body=body)
class MigrateServerTestsV230(MigrateServerTestsV225):
force = False
def setUp(self):
super(MigrateServerTestsV230, self).setUp()
self.req.api_version_request = api_version_request.APIVersionRequest(
'2.30')
def _test_live_migrate(self, force=False):
if force is True:
litteral_force = 'true'
else:
litteral_force = 'false'
method_translations = {'_migrate_live': 'live_migrate'}
body_map = {'_migrate_live': {'os-migrateLive': {'host': 'hostname',
'block_migration': 'auto',
'force': litteral_force}}}
args_map = {'_migrate_live': ((None, None, 'hostname', force,
self.async_), {})}
self._test_actions(['_migrate_live'], body_map=body_map,
method_translations=method_translations,
args_map=args_map)
def test_live_migrate(self):
self._test_live_migrate()
def test_live_migrate_with_forced_host(self):
self._test_live_migrate(force=True)
def test_forced_live_migrate_with_no_provided_host(self):
body = {'os-migrateLive':
{'force': 'true'}}
self.assertRaises(self.validation_error,
self.controller._migrate_live,
self.req, fakes.FAKE_UUID, body=body)
class MigrateServerTestsV234(MigrateServerTestsV230):
async_ = True
def setUp(self):
super(MigrateServerTestsV234, self).setUp()
self.req.api_version_request = api_version_request.APIVersionRequest(
'2.34')
# NOTE(tdurakov): for REST API version 2.34 and above, tests below are not
# valid, as they are made in background.
def test_migrate_live_compute_service_unavailable(self):
pass
def test_migrate_live_compute_service_not_found(self):
pass
def test_migrate_live_invalid_hypervisor_type(self):
pass
def test_migrate_live_invalid_cpu_info(self):
pass
def test_migrate_live_unable_to_migrate_to_self(self):
pass
def test_migrate_live_destination_hypervisor_too_old(self):
pass
def test_migrate_live_no_valid_host(self):
pass
def test_migrate_live_invalid_local_storage(self):
pass
def test_migrate_live_invalid_shared_storage(self):
pass
def test_migrate_live_hypervisor_unavailable(self):
pass
def test_migrate_live_instance_not_active(self):
pass
def test_migrate_live_pre_check_error(self):
pass
def test_migrate_live_migration_precheck_client_exception(self):
pass
def test_migrate_live_migration_with_unexpected_error(self):
pass
def test_migrate_live_migration_with_old_nova_not_supported(self):
pass
def test_migrate_live_compute_host_not_found(self):
body = {'os-migrateLive':
{'host': 'hostname', 'block_migration': 'auto'}}
exc = exception.ComputeHostNotFound(
reason="Compute host %(host)s could not be found.",
host='hostname')
instance = self._stub_instance_get()
with mock.patch.object(self.compute_api, 'live_migrate',
side_effect=exc) as mock_live_migrate:
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._migrate_live,
self.req, instance.uuid, body=body)
mock_live_migrate.assert_called_once_with(
self.context, instance, None, self.disk_over_commit,
'hostname', self.force, self.async_)
self.mock_get.assert_called_once_with(self.context, instance.uuid,
expected_attrs=None)
def test_migrate_live_unexpected_error(self):
body = {'os-migrateLive':
{'host': 'hostname', 'block_migration': 'auto'}}
exc = exception.InvalidHypervisorType(
reason="The supplied hypervisor type of is invalid.")
instance = self._stub_instance_get()
with mock.patch.object(self.compute_api, 'live_migrate',
side_effect=exc) as mock_live_migrate:
self.assertRaises(webob.exc.HTTPInternalServerError,
self.controller._migrate_live,
self.req, instance.uuid, body=body)
mock_live_migrate.assert_called_once_with(
self.context, instance, None, self.disk_over_commit,
'hostname', self.force, self.async_)
self.mock_get.assert_called_once_with(self.context, instance.uuid,
expected_attrs=None)
class MigrateServerTestsV256(MigrateServerTestsV234):
host_name = 'fake-host'
method_translations = {'_migrate': 'resize'}
body_map = {'_migrate': {'migrate': {'host': host_name}}}
args_map = {'_migrate': ((), {'host_name': host_name})}
def setUp(self):
super(MigrateServerTestsV256, self).setUp()
self.req.api_version_request = api_version_request.APIVersionRequest(
'2.56')
def _test_migrate_validation_error(self, body):
self.assertRaises(self.validation_error,
self.controller._migrate,
self.req, fakes.FAKE_UUID, body=body)
def _test_migrate_exception(self, exc_info, expected_result):
@mock.patch.object(self.compute_api, 'get')
@mock.patch.object(self.compute_api, 'resize', side_effect=exc_info)
def _test(mock_resize, mock_get):
instance = objects.Instance(uuid=uuids.instance)
self.assertRaises(expected_result,
self.controller._migrate,
self.req, instance['uuid'],
body={'migrate': {'host': self.host_name}})
_test()
def test_migrate(self):
self._test_actions(['_migrate'], body_map=self.body_map,
method_translations=self.method_translations,
args_map=self.args_map)
def test_migrate_without_host(self):
# The request body is: '{"migrate": null}'
body_map = {'_migrate': {'migrate': None}}
args_map = {'_migrate': ((), {'host_name': None})}
self._test_actions(['_migrate'], body_map=body_map,
method_translations=self.method_translations,
args_map=args_map)
def test_migrate_none_hostname(self):
# The request body is: '{"migrate": {"host": null}}'
body_map = {'_migrate': {'migrate': {'host': None}}}
args_map = {'_migrate': ((), {'host_name': None})}
self._test_actions(['_migrate'], body_map=body_map,
method_translations=self.method_translations,
args_map=args_map)
def test_migrate_with_non_existed_instance(self):
self._test_actions_with_non_existed_instance(
['_migrate'], body_map=self.body_map)
def test_migrate_raise_conflict_on_invalid_state(self):
exception_arg = {'_migrate': 'migrate'}
self._test_actions_raise_conflict_on_invalid_state(
['_migrate'], body_map=self.body_map,
args_map=self.args_map,
method_translations=self.method_translations,
exception_args=exception_arg)
def test_actions_with_locked_instance(self):
self._test_actions_with_locked_instance(
['_migrate'], body_map=self.body_map,
args_map=self.args_map,
method_translations=self.method_translations)
def test_migrate_without_migrate_object(self):
self._test_migrate_validation_error({})
def test_migrate_invalid_migrate_object(self):
self._test_migrate_validation_error({'migrate': 'fake-host'})
def test_migrate_with_additional_property(self):
self._test_migrate_validation_error(
{'migrate': {'host': self.host_name,
'additional': 'foo'}})
def test_migrate_with_host_length_more_than_255(self):
self._test_migrate_validation_error(
{'migrate': {'host': 'a' * 256}})
def test_migrate_nonexistent_host(self):
exc_info = exception.ComputeHostNotFound(host='nonexistent_host')
self._test_migrate_exception(exc_info, webob.exc.HTTPBadRequest)
def test_migrate_no_request_spec(self):
exc_info = exception.CannotMigrateWithTargetHost()
self._test_migrate_exception(exc_info, webob.exc.HTTPConflict)
def test_migrate_to_same_host(self):
exc_info = exception.CannotMigrateToSameHost()
self._test_migrate_exception(exc_info, webob.exc.HTTPBadRequest)
class MigrateServerPolicyEnforcementV21(test.NoDBTestCase):
def setUp(self):
super(MigrateServerPolicyEnforcementV21, self).setUp()
self.controller = migrate_server_v21.MigrateServerController()
self.req = fakes.HTTPRequest.blank('')
def test_migrate_policy_failed(self):
rule_name = "os_compute_api:os-migrate-server:migrate"
self.policy.set_rules({rule_name: "project:non_fake"})
exc = self.assertRaises(
exception.PolicyNotAuthorized,
self.controller._migrate, self.req,
fakes.FAKE_UUID,
body={'migrate': {}})
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
def test_migrate_live_policy_failed(self):
rule_name = "os_compute_api:os-migrate-server:migrate_live"
self.policy.set_rules({rule_name: "project:non_fake"})
body_args = {'os-migrateLive': {'host': 'hostname',
'block_migration': False,
'disk_over_commit': False}}
exc = self.assertRaises(
exception.PolicyNotAuthorized,
self.controller._migrate_live, self.req,
fakes.FAKE_UUID,
body=body_args)
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
|
|
# Copyright 2018 The Exoplanet ML Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pipeline for making predictions on BLS detections with an AstroNet model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging as stdlogging
import os.path
from absl import app
from absl import flags
from absl import logging
import apache_beam as beam
from beam import utils
from beam.light_curve import light_curve_fns
from box_least_squares import box_least_squares_pb2 as bls_pb2
from experimental.beam.transit_search import bls_fns
from experimental.beam.transit_search import prediction_fns
flags.DEFINE_string("input_dir", None, "Output directory.")
flags.DEFINE_integer("detections_per_target", None,
"Number of detections made per target.")
flags.DEFINE_string("astronet_model", None, "Name of the AstroNet model class.")
flags.DEFINE_string(
"astronet_config_name", None,
"Name of the AstroNet configuration. Exactly one of "
"--astronet_config_name or --astronet_config_json is required.")
flags.DEFINE_string(
"astronet_config_json", None,
"JSON string or JSON file containing the AstroNet configuration. Exactly "
"one of --astronet_config_name or --astronet_config_json is required.")
flags.DEFINE_string("astronet_model_dir", None,
"Directory containing an AstroNet checkpoint.")
flags.DEFINE_string("kepler_data_dir", None,
"Base folder containing kepler data.")
flags.DEFINE_string("output_dir", None, "Output directory.")
flags.DEFINE_string("injected_group", None,
"Optional. One of 'inj1', 'inj2', 'inj3'.")
flags.DEFINE_float("upward_outlier_sigma_cut", None,
"Outlier cut before making predictions.")
flags.DEFINE_float(
"complete_transit_fraction", 0.5,
"Fraction of expected in-transit points to count as complete.")
FLAGS = flags.FLAGS
# pylint: disable=expression-not-assigned
def _pair_with_kepid(inputs):
return (inputs["kepler_id"], inputs)
class PairLightCurveAndDetectionsDoFn(beam.DoFn):
"""Pairs light curves and detections."""
def process(self, inputs):
kepid, (light_curve, detections) = inputs
assert len(light_curve) == 1
light_curve = light_curve[0]
assert kepid == light_curve["kepler_id"]
for detection in detections:
assert kepid == detection["kepler_id"]
detection.update(light_curve)
yield detection
class PrepareInputs(beam.DoFn):
def __init__(self, planet_num):
self.planet_num = planet_num
def process(self, inputs):
kepler_id, top_results = inputs
yield kepler_id, {
"kepler_id": kepler_id,
"top_results": top_results,
"planet_num": self.planet_num,
"tce_id": "%s_%s" % (kepler_id, self.planet_num),
}
def _write_examples(pcollection):
"""Convenience function for writing serialized TensorFlow examples."""
return utils.write_to_tfrecord(
pcollection,
output_dir=FLAGS.output_dir,
output_name="examples",
value_name="serialized_example")
def main(unused_argv):
stdlogging.getLogger().setLevel(stdlogging.INFO)
def pipeline(root):
"""Beam pipeline for generating light curve periodograms."""
# Initialize DoFns.
read_light_curve = light_curve_fns.ReadLightCurveDoFn(
FLAGS.kepler_data_dir, injected_group=FLAGS.injected_group)
get_top_result = bls_fns.GetTopResultDoFn("median_flattened")
count_transits = light_curve_fns.CountTransitsDoFn(
FLAGS.complete_transit_fraction)
process_light_curve = light_curve_fns.ProcessLightCurveDoFn(
gap_width=0.75,
normalize_method="spline",
normalize_args={
"bkspace_min": 0.5,
"bkspace_max": 20,
"bkspace_num": 20,
"penalty_coeff": 1.0,
},
upward_outlier_sigma_cut=FLAGS.upward_outlier_sigma_cut,
output_name="light_curve_for_predictions")
make_predictions = prediction_fns.MakePredictionsDoFn(
FLAGS.astronet_model, FLAGS.astronet_config_name,
FLAGS.astronet_config_json, FLAGS.astronet_model_dir)
to_csv = prediction_fns.ToCsvDoFn()
top_results = []
for planet_num in range(FLAGS.detections_per_target):
read_stage_name = "read_top_results-%d" % planet_num
prepare_inputs_stage_name = "prepare_inputs-%d" % planet_num
top_results.append(
root
| read_stage_name >> beam.io.tfrecordio.ReadFromTFRecord(
os.path.join(FLAGS.input_dir, "top-results-%d*" % planet_num),
coder=beam.coders.ProtoCoder(bls_pb2.TopResults))
| prepare_inputs_stage_name >> beam.ParDo(PrepareInputs(planet_num)))
# Output: PCollection({
# "kepler_id",
# "raw_light_curve",
# "light_curve_for_predictions",
# })
light_curves = (
# TODO(shallue): replace top_results[0] with getting all keys and
# deduping and removing the reshuffle.
top_results[0]
| "reshuffle_top_results" >> beam.Reshuffle()
| "get_kepids" >> beam.Map(lambda kv: {"kepler_id": kv[0]})
| "read_light_curves" >> beam.ParDo(read_light_curve)
| "process_light_curves" >> beam.ParDo(process_light_curve)
| "pair_lc_with_kepid" >> beam.Map(_pair_with_kepid))
all_detections = top_results | "flatten_top_results" >> beam.Flatten()
detections_and_light_curves = (
[light_curves, all_detections]
| "group_by_kepid" >> beam.CoGroupByKey()
| "pair_light_curves_and_detections" >> beam.ParDo(
PairLightCurveAndDetectionsDoFn()))
predictions = (
detections_and_light_curves
| "get_top_result" >> beam.ParDo(get_top_result)
| "count_transits" >> beam.ParDo(count_transits)
| "make_predictions" >> beam.ParDo(make_predictions))
# Write predictions
(predictions | "to_csv" >> beam.ParDo(to_csv)
| "reshuffle_csv_lines" >> beam.Reshuffle()
| "write_csv" >> beam.io.WriteToText(
os.path.join(FLAGS.output_dir, "predictions.csv"),
num_shards=1,
header=to_csv.csv_header(),
shard_name_template=""))
# Write local and global views.
_write_examples(predictions)
pipeline.run()
logging.info("Job completed successfully")
if __name__ == "__main__":
app.run()
|
|
# Copyright 2015 ETH Zurich
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
:mod:`lib_path_store_test` --- lib.path_store unit tests
==========================================================================
"""
# Stdlib
import math
from unittest.mock import patch, MagicMock
# External packages
import nose
import nose.tools as ntools
# SCION
from lib.errors import SCIONPathPolicyViolated
from lib.packet.pcb import PathSegment
from lib.path_store import (
PathPolicy,
PathStore,
PathStoreRecord
)
from test.testcommon import create_mock, create_mock_full
class TestPathPolicyCheckFilters(object):
"""
Unit tests for lib.path_store.PathPolicy.check_filters
"""
def _setup(self, unwanted=None, reasons=None, remote_ia=None):
inst = PathPolicy()
inst._check_unwanted_ases = create_mock()
inst._check_unwanted_ases.return_value = unwanted
inst._check_property_ranges = create_mock()
inst._check_property_ranges.return_value = reasons
inst._check_remote_ifid = create_mock()
inst._check_remote_ifid.return_value = remote_ia
pcb = create_mock(["short_desc"], class_=PathSegment)
return inst, pcb
def test_basic(self):
inst, pcb = self._setup()
# Call
inst.check_filters(pcb)
def test_unwanted_ases(self):
inst, pcb = self._setup("unwanted AS")
# Call
ntools.assert_raises(SCIONPathPolicyViolated, inst.check_filters, pcb)
def test_property_ranges(self):
inst, pcb = self._setup(reasons="reasons")
ntools.assert_raises(SCIONPathPolicyViolated, inst.check_filters, pcb)
class TestPathPolicyCheckPropertyRanges(object):
"""
Unit tests for lib.path_store.PathPolicy._check_property_ranges
"""
def _setup(self, max_bw=20):
inst = PathPolicy()
inst.property_ranges = {
'PeerLinks': [0, 1], 'HopsLength': [0, 1], 'DelayTime': [0, 1],
'GuaranteedBandwidth': [0, max_bw],
'AvailableBandwidth': [0, max_bw], 'TotalBandwidth': [0, max_bw]
}
pcb = create_mock(["get_n_peer_links", "get_n_hops", "get_timestamp"])
return inst, pcb
@patch("lib.path_store.SCIONTime.get_time", new_callable=create_mock)
def test_success(self, get_time):
inst, pcb = self._setup()
pcb.get_n_peer_links.return_value = 0.5
pcb.get_n_hops.return_value = 0.5
pcb.get_timestamp.return_value = 0.5
# Call
ntools.eq_(inst._check_property_ranges(pcb), [])
@patch("lib.path_store.SCIONTime.get_time", new_callable=create_mock)
def test_failure(self, get_time):
inst, pcb = self._setup(max_bw=9)
pcb.get_n_peer_links.return_value = 2
pcb.get_n_hops.return_value = -1
pcb.get_timestamp.return_value = -0.1
# Call
ntools.eq_(len(inst._check_property_ranges(pcb)), 6)
@patch("lib.path_store.SCIONTime.get_time", new_callable=create_mock)
def test_no_checks(self, get_time):
inst, pcb = self._setup(max_bw=9)
for key in inst.property_ranges:
inst.property_ranges[key] = []
pcb.get_n_peer_links.return_value = 2
pcb.get_n_hops.return_value = -1
pcb.get_timestamp.return_value = -0.1
# Call
ntools.eq_(inst._check_property_ranges(pcb), [])
class TestPathPolicyParseDict(object):
"""
Unit tests for lib.path_store.PathPolicy.parse_dict
"""
def test_basic(self):
dict_ = {}
dict_['BestSetSize'] = "best_set_size"
dict_['CandidatesSetSize'] = "candidates_set_size"
dict_['HistoryLimit'] = "history_limit"
dict_['UpdateAfterNumber'] = "update_after_number"
dict_['UpdateAfterTime'] = "update_after_time"
dict_['UnwantedASes'] = "1-11,2-12"
dict_['PropertyRanges'] = {'key1': "1-11", 'key2': "2-12"}
dict_['PropertyWeights'] = "property_weights"
pth_pol2 = PathPolicy()
pth_pol2.parse_dict(dict_)
ntools.eq_(pth_pol2.best_set_size, "best_set_size")
ntools.eq_(pth_pol2.candidates_set_size, "candidates_set_size")
ntools.eq_(pth_pol2.history_limit, "history_limit")
ntools.eq_(pth_pol2.update_after_number, "update_after_number")
ntools.eq_(pth_pol2.update_after_time, "update_after_time")
ntools.eq_(pth_pol2.property_ranges, {'key1': (1, 11), 'key2': (2, 12)})
ntools.eq_(pth_pol2.property_weights, "property_weights")
class TestPathStoreRecordInit(object):
"""
Unit tests for lib.path_store.PathStoreRecord.__init__
"""
@patch("lib.path_store.PathStoreRecord.update", autospec=True)
@patch("lib.path_store.SCIONTime.get_time", new_callable=create_mock)
def test(self, get_time, update):
pcb = create_mock(['get_hops_hash', 'get_n_hops', 'get_n_peer_links'],
class_=PathSegment)
get_time.return_value = PathStoreRecord.DEFAULT_OFFSET + 1
# Call
inst = PathStoreRecord(pcb)
# Tests
ntools.eq_(inst.id, pcb.get_hops_hash.return_value)
ntools.eq_(inst.peer_links, pcb.get_n_peer_links.return_value)
ntools.eq_(inst.hops_length, pcb.get_n_hops.return_value)
ntools.eq_(inst.fidelity, 0)
ntools.eq_(inst.disjointness, 0)
ntools.eq_(inst.last_sent_time, 1)
ntools.eq_(inst.guaranteed_bandwidth, 0)
ntools.eq_(inst.available_bandwidth, 0)
ntools.eq_(inst.total_bandwidth, 0)
update.assert_called_once_with(inst, pcb)
class TestPathStoreRecordUpdate(object):
"""
Unit tests for lib.path_store.PathStoreRecord.update
"""
@patch("lib.path_store.SCIONTime.get_time", new_callable=create_mock)
@patch("lib.path_store.PathStoreRecord.__init__", autospec=True,
return_value=None)
def test(self, init, get_time):
inst = PathStoreRecord("pcb")
get_time.return_value = 100
pcb = create_mock(["copy", "get_hops_hash", "get_timestamp",
"get_expiration_time"])
inst.id = pcb.get_hops_hash.return_value
pcb.get_timestamp.return_value = 95
# Call
inst.update(pcb)
# Tests
pcb.copy.assert_called_once_with()
ntools.eq_(inst.delay_time, 5)
ntools.eq_(inst.last_seen_time, 100)
ntools.eq_(inst.expiration_time, pcb.get_expiration_time.return_value)
class TestPathStoreRecordUpdateFidelity(object):
"""
Unit tests for lib.path_store.PathStoreRecord.update_fidelity
"""
@patch("lib.path_store.SCIONTime.get_time", new_callable=create_mock)
@patch("lib.path_store.PathStoreRecord.__init__", autospec=True,
return_value=None)
def test_basic(self, init, time_):
path_policy = PathPolicy()
path_policy.property_weights['PeerLinks'] = 10
path_policy.property_weights['HopsLength'] = 1
path_policy.property_weights['Disjointness'] = 2
path_policy.property_weights['LastSentTime'] = 3
path_policy.property_weights['LastSeenTime'] = 4
path_policy.property_weights['DelayTime'] = 5
path_policy.property_weights['ExpirationTime'] = 6
path_policy.property_weights['GuaranteedBandwidth'] = 7
path_policy.property_weights['AvailableBandwidth'] = 8
path_policy.property_weights['TotalBandwidth'] = 9
pth_str_rec = PathStoreRecord("pcb")
pth_str_rec.peer_links = 10 ** 5
pth_str_rec.hops_length = (1 / (10 ** 4))
pth_str_rec.disjointness = 10 ** 3
pth_str_rec.last_sent_time = -99
pth_str_rec.last_seen_time = 10
pth_str_rec.delay_time = 1
pth_str_rec.expiration_time = 10 / 9
pth_str_rec.guaranteed_bandwidth = 10 ** -2
pth_str_rec.available_bandwidth = 10 ** -3
pth_str_rec.total_bandwidth = 10 ** -4
time_.return_value = 1
pth_str_rec.update_fidelity(path_policy)
ntools.assert_almost_equal(pth_str_rec.fidelity, 1012345.6789)
class TestPathStoreAddSegment(object):
"""
Unit tests for lib.path_store.PathStore.add_segment
"""
def _setup(self, filter_=True):
inst = PathStore("path_policy")
inst.path_policy = create_mock(["check_filters"])
if not filter_:
inst.path_policy.check_filters.side_effect = SCIONPathPolicyViolated()
pcb = create_mock(["get_hops_hash", "get_timestamp"],
class_=PathSegment)
return inst, pcb
@patch("lib.path_store.PathStore.__init__", autospec=True,
return_value=None)
def test_filters(self, psi):
"""
Try to add a path that does not meet the filter requirements.
"""
inst, pcb = self._setup(filter_=False)
# Call
inst.add_segment(pcb)
# Tests
inst.path_policy.check_filters.assert_called_once_with(pcb)
@patch("lib.path_store.PathStore.__init__", autospec=True,
return_value=None)
def test_already_in_store(self, init):
"""
Try to add a path that is already in the path store.
"""
inst, pcb = self._setup()
candidate = create_mock(['id', 'update'])
candidate.id = pcb.get_hops_hash.return_value
inst.candidates = [candidate]
# Call
inst.add_segment(pcb)
# Tests
candidate.update.assert_called_once_with(pcb)
@patch("lib.path_store.PathStoreRecord", autospec=True)
@patch("lib.path_store.PathStore.__init__", autospec=True,
return_value=None)
def test_adding(self, psi, psr):
"""
Add a single path segment to the set of candidate paths.
"""
inst, pcb = self._setup()
inst.candidates = []
inst._trim_candidates = create_mock()
# Call
inst.add_segment(pcb)
# Tests
ntools.eq_(inst.candidates, [psr.return_value])
inst._trim_candidates.assert_called_once_with()
class TestPathStoreTrimCandidates(object):
"""
Unit tests for lib.path_store.PathStore._trim_candidates
"""
@patch("lib.path_store.PathStore.__init__", autospec=True,
return_value=None)
def test_expire_paths(self, psi):
"""
Test trimming the size of the candidate set by removing an expired
segment.
"""
pth_str = PathStore("path_policy")
pth_str.path_policy = MagicMock(spec_set=['candidates_set_size'])
pth_str.path_policy.candidates_set_size = 0
pth_str.candidates = [0]
pth_str._remove_expired_segments = (
lambda: pth_str.candidates.pop())
pth_str._trim_candidates()
ntools.eq_(pth_str.candidates, [])
@patch("lib.path_store.PathStore.__init__", autospec=True,
return_value=None)
def test_remove_low_fidelity_path(self, psi):
"""
Add a path, find that the candidate set size is too large, and remove
the lowest-fidelity path.
"""
pth_str = PathStore("path_policy")
pth_str.path_policy = MagicMock(spec_set=['candidates_set_size'])
pth_str.path_policy.candidates_set_size = 2
pth_str.candidates = [create_mock(['fidelity']) for i in range(3)]
pth_str.candidates[0].fidelity = 2
pth_str.candidates[1].fidelity = 0
pth_str.candidates[2].fidelity = 1
remainder = [pth_str.candidates[0], pth_str.candidates[2]]
pth_str._remove_expired_segments = create_mock()
pth_str._update_all_fidelity = create_mock()
pth_str._trim_candidates()
pth_str._remove_expired_segments.assert_called_once_with()
pth_str._update_all_fidelity.assert_called_once_with()
ntools.eq_(pth_str.candidates, remainder)
class TestPathStoreUpdateDisjointnessDB(object):
"""
Unit tests for lib.path_store._update_disjointness_db
"""
@patch("lib.path_store.SCIONTime.get_time", spec_set=[],
new_callable=MagicMock)
def test_basic(self, time_):
path_policy = MagicMock(spec_set=['history_limit'])
path_policy.history_limit = 3
pth_str = PathStore(path_policy)
pth_str.disjointness = {0: math.e, 1: math.e**2}
pth_str.last_dj_update = 22
time_.return_value = 23
pth_str._update_disjointness_db()
ntools.eq_(pth_str.last_dj_update, time_.return_value)
ntools.assert_almost_equal(pth_str.disjointness[0], 1.0)
ntools.assert_almost_equal(pth_str.disjointness[1], math.e)
class TestPathStoreUpdateAllDisjointness(object):
"""
Unit tests for lib.path_store._update_all_disjointness
"""
def test(self):
inst = PathStore(create_mock_full({'history_limit': 3}))
numCandidates = 5
pathLength = 5
inst.candidates = []
inst.disjointness = {}
for i in range(numCandidates):
id_ = i * (2 * pathLength + 1)
asms = []
for j in range(pathLength):
isdas = 9, id_ + j + 1
hof = create_mock_full({'egress_if': isdas[1] + pathLength})
pcbm = create_mock_full({'hof()': hof})
asms.append(create_mock_full({
"isd_as()": isdas, "pcbm()": pcbm}))
inst.disjointness[isdas[1]] = 1.0
inst.disjointness[hof.egress_if] = 1.0
pcb = create_mock_full({"iter_asms()": asms})
record = create_mock_full(
{'pcb': pcb, 'disjointness': 0, 'id': id_})
inst.disjointness[id_] = 1.0
inst.candidates.append(record)
inst._update_disjointness_db = create_mock()
inst._update_all_disjointness()
for i in range(numCandidates):
ntools.assert_almost_equal(inst.candidates[i].disjointness, 1.0)
class TestPathStoreUpdateAllDelayTime(object):
"""
Unit tests for lib.path_store._update_all_delay_time
"""
def test_basic(self):
path_policy = MagicMock(spec_set=['history_limit'])
path_policy.history_limit = 3
pth_str = PathStore(path_policy)
pth_str.candidates = [MagicMock(spec_set=['pcb', 'delay_time',
'last_seen_time'])
for i in range(5)]
for i in range(5):
pcb = MagicMock(spec_set=['get_timestamp'])
pcb.get_timestamp.return_value = 1
pth_str.candidates[i].pcb = pcb
pth_str.candidates[i].last_seen_time = 2 * i + 2
pth_str._update_all_delay_time()
for i in range(5):
pth_str.candidates[i].pcb.get_timestamp.assert_called_once_with()
ntools.assert_almost_equal(pth_str.candidates[i].delay_time,
((2 * i + 2) / 10))
class TestPathStoreUpdateAllFidelity(object):
"""
Unit tests for lib.path_store._update_all_fidelity
"""
def test_basic(self):
path_policy = MagicMock(spec_set=['history_limit'])
path_policy.history_limit = 3
pth_str = PathStore(path_policy)
pth_str._update_all_disjointness = MagicMock(spec_set=[])
pth_str._update_all_delay_time = MagicMock(spec_set=[])
pth_str.candidates = [MagicMock(spec_set=['update_fidelity'])
for i in range(5)]
pth_str._update_all_fidelity()
pth_str._update_all_disjointness.assert_called_once_with()
pth_str._update_all_delay_time.assert_called_once_with()
for i in range(5):
pth_str.candidates[i].update_fidelity.assert_called_once_with(
path_policy)
class TestPathStoreGetBestSegments(object):
"""
Unit tests for lib.path_store.PathStore.get_best_segments
"""
def _setup(self):
inst = PathStore("path_policy")
inst._remove_expired_segments = create_mock()
inst._update_all_fidelity = create_mock()
inst.candidates = []
for i, fidelity in enumerate([0, 5, 2, 6, 3]):
candidate = create_mock(["pcb", "fidelity", "sending"])
candidate.pcb = "pcb%d" % i
candidate.fidelity = fidelity
inst.candidates.append(candidate)
return inst
@patch("lib.path_store.PathStore.__init__", autospec=True,
return_value=None)
def test_full(self, init):
inst = self._setup()
# Call
ntools.eq_(inst.get_best_segments(k=3, sending=False),
["pcb3", "pcb1", "pcb4"])
# Tests
inst._remove_expired_segments.assert_called_once_with()
inst._update_all_fidelity.assert_called_once_with()
for i in inst.candidates:
ntools.assert_false(i.sending.called)
@patch("lib.path_store.PathStore.__init__", autospec=True,
return_value=None)
def test_less_arg(self, init):
inst = self._setup()
inst.path_policy = create_mock(["best_set_size"])
inst.path_policy.best_set_size = 1
# Call
ntools.eq_(inst.get_best_segments(), ["pcb3"])
# Tests
for i in inst.candidates:
if i.fidelity == 6:
i.sending.assert_called_once_with()
else:
ntools.assert_false(i.sending.called)
class TestPathStoreGetLatestHistorySnapshot(object):
"""
Unit tests for lib.path_store.get_latest_history_snapshot
"""
def _setup(self, attrs=None):
def_attrs = {'history_limit': 3}
if attrs:
def_attrs.update(attrs)
path_policy = MagicMock(spec_set=list(def_attrs.keys()))
path_policy.history_limit = 3
for k, v in def_attrs.items():
setattr(path_policy, k, v)
return path_policy
def test_basic(self):
pth_str = PathStore(self._setup())
pth_str.best_paths_history = []
pth_str.best_paths_history.append([MagicMock(spec_set=['pcb'])
for i in range(5)])
for i in range(5):
pth_str.best_paths_history[0][i].pcb = i
ntools.eq_(pth_str.get_latest_history_snapshot(3), [0, 1, 2])
def test_less_arg(self):
pth_str = PathStore(self._setup({'best_set_size': 4}))
pth_str.best_paths_history = []
pth_str.best_paths_history.append([MagicMock(spec_set=['pcb'])
for i in range(5)])
for i in range(5):
pth_str.best_paths_history[0][i].pcb = i
ntools.eq_(pth_str.get_latest_history_snapshot(), [0, 1, 2, 3])
def test_false(self):
pth_str = PathStore(self._setup())
ntools.eq_(pth_str.get_latest_history_snapshot(3), [])
class TestPathStoreRemoveExpiredSegments(object):
"""
Unit tests for lib.path_store._remove_expired_segments
"""
@patch("lib.path_store.SCIONTime.get_time", spec_set=[],
new_callable=MagicMock)
def test_basic(self, time_):
path_policy = MagicMock(spec_set=['history_limit'])
path_policy.history_limit = 3
pth_str = PathStore(path_policy)
pth_str.candidates = [MagicMock(spec_set=['expiration_time', 'id'])
for i in range(5)]
for i in range(5):
pth_str.candidates[i].expiration_time = i
pth_str.candidates[i].id = i
time_.return_value = 2
pth_str.remove_segments = MagicMock(spec_set=[])
pth_str._remove_expired_segments()
pth_str.remove_segments.assert_called_once_with([0, 1, 2])
class TestPathStoreRemoveSegments(object):
"""
Unit tests for lib.path_store.remove_segments
"""
def setUp(self):
self.path_policy = MagicMock(spec_set=['history_limit'])
self.path_policy.history_limit = 3
def tearDown(self):
del self.path_policy
def test_basic(self):
pth_str = PathStore(self.path_policy)
pth_str.candidates = [MagicMock(spec_set=['id', 'fidelity'])
for i in range(5)]
for i in range(5):
pth_str.candidates[i].id = i
pth_str.candidates[i].fidelity = i
pth_str._update_all_fidelity = MagicMock(spec_set=[])
pth_str.remove_segments([1, 2, 3])
ntools.eq_(len(pth_str.candidates), 2)
ntools.eq_(pth_str.candidates[0].id, 4)
ntools.eq_(pth_str.candidates[1].id, 0)
pth_str._update_all_fidelity.assert_called_once_with()
def test_none(self):
pth_str = PathStore(self.path_policy)
pth_str.candidates = [MagicMock(spec_set=['id']) for i in range(5)]
for i in range(5):
pth_str.candidates[i].id = i
pth_str.remove_segments([0, 1, 2, 3, 4])
ntools.eq_(pth_str.candidates, [])
class TestPathStoreGetSegment(object):
"""
Unit tests for lib.path_store.get_segment
"""
def setUp(self):
self.path_policy = MagicMock(spec_set=['history_limit'])
self.path_policy.history_limit = 3
def tearDown(self):
del self.path_policy
def test_basic(self):
pth_str = PathStore(self.path_policy)
pth_str.candidates = [MagicMock(spec_set=['id', 'pcb'])
for i in range(5)]
for i in range(5):
pth_str.candidates[i].id = i
pth_str.candidates[i].pcb = i
ntools.eq_(pth_str.get_segment(2), 2)
def test_not_present(self):
pth_str = PathStore(self.path_policy)
pth_str.candidates = [MagicMock(spec_set=['id']) for i in range(5)]
ntools.assert_is_none(pth_str.get_segment(2))
if __name__ == "__main__":
nose.run(defaultTest=__name__)
|
|
from . import base
from . import messages
from grow.pods import locales
from grow.pods import urls
import fnmatch
import mimetypes
import re
import webob
import os
mimetypes.add_type('application/font-woff', '.woff')
mimetypes.add_type('image/svg+xml', '.svg')
mimetypes.add_type('text/css', '.css')
SKIP_PATTERNS = [
'**/.**',
]
class Error(Exception):
pass
class BadStaticFileError(Error):
pass
class StaticFile(object):
def __init__(self, pod_path, serving_path, locale=None, localization=None,
controller=None, pod=None):
self.pod = pod
self.default_locale = pod.podspec.default_locale
self.locale = pod.normalize_locale(locale)
self.localization = localization
self.pod_path = pod_path
self.serving_path = serving_path
self.controller = controller
self.basename = os.path.basename(pod_path)
self.base, self.ext = os.path.splitext(self.basename)
def __repr__(self):
if self.locale:
return "<StaticFile({}, locale='{}')>".format(self.pod_path, self.locale)
return "<StaticFile({})>".format(self.pod_path)
def __eq__(self, other):
return (self.pod_path == other.pod_path and self.pod == other.pod
and other.locale == self.locale)
def __ne__(self, other):
return not self.__eq__(other)
@property
def exists(self):
return self.pod.file_exists(self.pod_path)
@property
def modified(self):
return self.pod.file_modified(self.pod_path)
@property
def url(self):
serving_path = self.serving_path
path_format = self.controller.path_format.replace('{filename}', '')
suffix = serving_path.replace(path_format, '')
if self.localization:
localized_pod_path = self.localization['static_dir'] + suffix
localized_pod_path = localized_pod_path.format(locale=self.locale)
if self.pod.file_exists(localized_pod_path):
# Internal paths use Babel locales, serving paths use aliases.
locale = self.locale.alias if self.locale is not None else self.locale
localized_serving_path = self.localization['serve_at'] + suffix
localized_serving_path = localized_serving_path.format(locale=locale)
serving_path = localized_serving_path
return urls.Url(path=serving_path) if serving_path else None
class StaticController(base.BaseController):
KIND = messages.Kind.STATIC
def __init__(self, path_format, source_format=None, localized=False,
localization=None, pod=None):
# path_format: "serve_at"
# source_format: "static_dir"
self.path_format = path_format.replace('<grow:', '{').replace('>', '}')
self.source_format = source_format.replace('<grow:', '{').replace('>', '}')
self.pod = pod
self.localized = localized
self.localization = localization
self.route_params = {}
def __repr__(self):
return '<Static(format=\'{}\')>'.format(self.source_format)
def get_localized_pod_path(self):
if (self.localization
and '{locale}' in self.localization['static_dir']
and 'locale' in self.route_params):
source_format = self.localization['serve_at']
source_format += '/{filename}'
source_format = source_format.replace('//', '/')
kwargs = self.route_params
if 'locale' in kwargs:
locale = locales.Locale.from_alias(self.pod, kwargs['locale'])
kwargs['locale'] = str(locale)
pod_path = source_format.format(**kwargs)
if self.pod.file_exists(pod_path):
return pod_path
def get_pod_path(self):
# If a localized file exists, serve it. Otherwise, serve the base file.
return (self.get_localized_pod_path()
or self.source_format.format(**self.route_params))
def validate(self):
if not self.pod.file_exists(self.get_pod_path()):
path = self.pod.abs_path(self.get_pod_path())
message = '{} does not exist.'.format(path)
raise webob.exc.HTTPNotFound(message)
def render(self):
return self.pod.read_file(self.get_pod_path())
@property
def mimetype(self):
return mimetypes.guess_type(self.get_pod_path())[0]
def get_http_headers(self):
path = self.pod.abs_path(self.get_pod_path())
headers = super(StaticController, self).get_http_headers()
self.pod.storage.update_headers(headers, path)
modified = str(self.pod.storage.modified(path))
headers['Last-Modified'] = modified.split('.')[0]
headers['Cache-Control'] = 'max-age'
return headers
def match_pod_path(self, pod_path):
if self.path_format == pod_path:
return self.path_format
tokens = re.findall('.?{([^>]+)}.?', self.path_format)
if 'filename' in tokens:
source_regex = self.source_format.replace(
'{filename}', '(?P<filename>.*)')
source_regex = source_regex.replace('{locale}', '(?P<locale>[^/]*)')
source_regex = source_regex.replace('{locale}', '(?P<root>[^/])')
match = re.match(source_regex, pod_path)
if match:
kwargs = match.groupdict()
kwargs['root'] = self.pod.podspec.root
if 'locale' in kwargs:
locale = locales.Locale.from_alias(self.pod, kwargs['locale'])
kwargs['locale'] = str(locale)
path = self.path_format.format(**kwargs)
path = path.replace('//', '/')
return path
def list_concrete_paths(self):
concrete_paths = set()
tokens = re.findall('.?{([^}]+)}.?', self.path_format)
source_regex = self.source_format.replace('{filename}', '(?P<filename>.*)')
source_regex = source_regex.replace('{locale}', '(?P<locale>[^/]*)')
if '{' not in self.path_format:
concrete_paths.add(self.path_format)
elif 'filename' in tokens:
# NOTE: This should be updated to support globbing directories,
# and not simply strip all sub-paths beneath {locale}.
source = self.source_format.replace('{filename}', '')[1:]
source = re.sub('{locale}.*', '', source)
source = source.rstrip('/')
paths = self.pod.list_dir(source)
paths = [('/' + source + path).replace(self.pod.root, '')
for path in paths]
# Exclude paths matched by skip patterns.
for pattern in SKIP_PATTERNS:
# .gitignore-style treatment of paths without slashes.
if '/' not in pattern:
pattern = '**{}**'.format(pattern)
for skip_paths in fnmatch.filter(paths, pattern):
paths = [path for path in paths
if path.replace(self.pod.root, '') not in skip_paths]
for path in paths:
match = re.match(source_regex, path)
# Skip adding localized paths in subfolders of other rules.
if not self.localized and self.localization:
localized_source_format = self.localization['static_dir']
localized_source_regex = localized_source_format.replace(
'{filename}', '(?P<filename>.*)')
localized_source_regex = localized_source_regex.replace(
'{locale}', '(?P<locale>[^/]*)')
if re.match(localized_source_regex, path):
continue
if match:
kwargs = match.groupdict()
kwargs['root'] = self.pod.podspec.root
if 'locale' in kwargs:
normalized_locale = self.pod.normalize_locale(kwargs['locale'])
kwargs['locale'] = (
normalized_locale.alias if normalized_locale is not None
else normalized_locale)
matched_path = self.path_format.format(**kwargs)
matched_path = matched_path.replace('//', '/')
concrete_paths.add(matched_path)
return list(concrete_paths)
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Helpers for working with signatures in tf.saved_model.save."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.eager import def_function
from tensorflow.python.eager import function as defun
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_spec
from tensorflow.python.saved_model import revived_types
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.training.tracking import base
from tensorflow.python.util import compat
from tensorflow.python.util import nest
from tensorflow.python.util.compat import collections_abc
DEFAULT_SIGNATURE_ATTR = "_default_save_signature"
SIGNATURE_ATTRIBUTE_NAME = "signatures"
def _get_signature(function):
if (isinstance(function, (defun.Function, def_function.Function)) and
function.input_signature is not None):
function = function.get_concrete_function()
if not isinstance(function, defun.ConcreteFunction):
return None
return function
def _valid_signature(concrete_function):
"""Returns whether concrete function can be converted to a signature."""
if not concrete_function.outputs:
# Functions without outputs don't make sense as signatures. We just don't
# have any way to run an Operation with no outputs as a SignatureDef in the
# 1.x style.
return False
try:
_normalize_outputs(concrete_function.structured_outputs, "unused", "unused")
except ValueError:
return False
return True
def find_function_to_export(saveable_view):
"""Function to export, None if no suitable function was found."""
# If the user did not specify signatures, check the root object for a function
# that can be made into a signature.
functions = saveable_view.list_functions(saveable_view.root)
signature = functions.get(DEFAULT_SIGNATURE_ATTR, None)
if signature is not None:
return signature
# TODO(andresp): Discuss removing this behaviour. It can lead to WTFs when a
# user decides to annotate more functions with tf.function and suddenly
# serving that model way later in the process stops working.
possible_signatures = []
for function in functions.values():
concrete = _get_signature(function)
if concrete is not None and _valid_signature(concrete):
possible_signatures.append(concrete)
if len(possible_signatures) == 1:
single_function = possible_signatures[0]
signature = _get_signature(single_function)
if signature and _valid_signature(signature):
return signature
return None
def canonicalize_signatures(signatures):
"""Converts `signatures` into a dictionary of concrete functions."""
if signatures is None:
return {}
if not isinstance(signatures, collections_abc.Mapping):
signatures = {
signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: signatures}
concrete_signatures = {}
for signature_key, function in signatures.items():
signature_function = _get_signature(function)
if signature_function is None:
raise ValueError(
("Expected a TensorFlow function to generate a signature for, but "
"got {}. Only `tf.functions` with an input signature or "
"concrete functions can be used as a signature.").format(function))
# Re-wrap the function so that it returns a dictionary of Tensors. This
# matches the format of 1.x-style signatures.
# pylint: disable=cell-var-from-loop
@def_function.function
def signature_wrapper(**kwargs):
structured_outputs = signature_function(**kwargs)
return _normalize_outputs(
structured_outputs, signature_function.name, signature_key)
# TODO(b/123902469): Use ConcreteFunction.structured_inputs once their names
# always match keyword arguments.
tensor_spec_signature = {}
for keyword, tensor in zip(
signature_function._arg_keywords, # pylint: disable=protected-access
signature_function.inputs):
keyword = compat.as_str(keyword)
tensor_spec_signature[keyword] = tensor_spec.TensorSpec.from_tensor(
tensor, name=keyword)
final_concrete = signature_wrapper.get_concrete_function(
**tensor_spec_signature)
# pylint: disable=protected-access
if len(final_concrete._arg_keywords) == 1:
# If there is only one input to the signature, a very common case, then
# ordering is unambiguous and we can let people pass a positional
# argument. Since SignatureDefs are unordered (protobuf "map") multiple
# arguments means we need to be keyword-only.
final_concrete._num_positional_args = 1
else:
final_concrete._num_positional_args = 0
# pylint: enable=protected-access
concrete_signatures[signature_key] = final_concrete
# pylint: enable=cell-var-from-loop
return concrete_signatures
def _is_flat(sequence):
sequence_flat = nest.flatten(sequence)
try:
nest.assert_same_structure(sequence_flat, sequence, check_types=False)
return True
except ValueError:
return False
except TypeError:
return False
def _normalize_outputs(outputs, function_name, signature_key):
"""Construct an output dictionary from unnormalized function outputs."""
if isinstance(outputs, collections_abc.Mapping):
for key, value in outputs.items():
if not isinstance(value, ops.Tensor):
raise ValueError(
("Got a dictionary containing non-Tensor value {} for key {} "
"in the output of the function {} used to generate a SavedModel "
"signature. Dictionaries outputs for functions used as signatures "
"should have one Tensor output per string key.")
.format(value, key, compat.as_str_any(function_name)))
return outputs
else:
original_outputs = outputs
if not isinstance(outputs, collections_abc.Sequence):
outputs = [outputs]
if not _is_flat(outputs):
raise ValueError(
("Got non-flat outputs '{}' from '{}' for SavedModel "
"signature '{}'. Signatures have one Tensor per output, so "
"to have predictable names Python functions used to generate "
"these signatures should avoid outputting Tensors in nested "
"structures.")
.format(original_outputs, function_name, signature_key))
return {("output_{}".format(output_index)): output
for output_index, output
in enumerate(outputs)}
# _SignatureMap is immutable to ensure that users do not expect changes to be
# reflected in the SavedModel. Using public APIs, tf.saved_model.load() is the
# only way to create a _SignatureMap and there is no way to modify it. So we can
# safely ignore/overwrite ".signatures" attributes attached to objects being
# saved if they contain a _SignatureMap. A ".signatures" attribute containing
# any other type (e.g. a regular dict) will raise an exception asking the user
# to first "del obj.signatures" if they want it overwritten.
class _SignatureMap(collections_abc.Mapping, base.Trackable):
"""A collection of SavedModel signatures."""
def __init__(self):
self._signatures = {}
def _add_signature(self, name, concrete_function):
"""Adds a signature to the _SignatureMap."""
# Ideally this object would be immutable, but restore is streaming so we do
# need a private API for adding new signatures to an existing object.
self._signatures[name] = concrete_function
def __getitem__(self, key):
return self._signatures[key]
def __iter__(self):
return iter(self._signatures)
def __len__(self):
return len(self._signatures)
def __repr__(self):
return "_SignatureMap({})".format(self._signatures)
def _list_functions_for_serialization(self, unused_serialization_cache):
return {
key: value for key, value in self.items()
if isinstance(value, (def_function.Function, defun.ConcreteFunction))
}
revived_types.register_revived_type(
"signature_map",
lambda obj: isinstance(obj, _SignatureMap),
versions=[revived_types.VersionedTypeRegistration(
# Standard dependencies are enough to reconstruct the trackable
# items in dictionaries, so we don't need to save any extra information.
object_factory=lambda proto: _SignatureMap(),
version=1,
min_producer_version=1,
min_consumer_version=1,
setter=_SignatureMap._add_signature # pylint: disable=protected-access
)])
def create_signature_map(signatures):
"""Creates an object containing `signatures`."""
signature_map = _SignatureMap()
for name, func in signatures.items():
# This true of any signature that came from canonicalize_signatures. Here as
# a sanity check on saving; crashing on load (e.g. in _add_signature) would
# be more problematic in case future export changes violated these
# assertions.
assert isinstance(func, defun.ConcreteFunction)
assert isinstance(func.structured_outputs, collections_abc.Mapping)
# pylint: disable=protected-access
if len(func._arg_keywords) == 1:
assert 1 == func._num_positional_args
else:
assert 0 == func._num_positional_args
signature_map._add_signature(name, func)
# pylint: enable=protected-access
return signature_map
def validate_saveable_view(saveable_view):
"""Performs signature-related sanity checks on `saveable_view`."""
for name, dep in saveable_view.list_dependencies(
saveable_view.root):
if name == SIGNATURE_ATTRIBUTE_NAME:
if not isinstance(dep, _SignatureMap):
raise ValueError(
("Exporting an object {} which has an attribute named "
"'{signatures}'. This is a reserved attribute used to store "
"SavedModel signatures in objects which come from "
"`tf.saved_model.load`. Delete this attribute "
"(e.g. 'del obj.{signatures}') before saving if this shadowing is "
"acceptable.").format(
saveable_view.root,
signatures=SIGNATURE_ATTRIBUTE_NAME))
break
|
|
import numpy as np
import tensorflow as tf
import tflearn
from common_settings import CommonSettings
GAMMA = 0.99
A_DIM = CommonSettings.A_DIM
ENTROPY_WEIGHT = 0.5
ENTROPY_EPS = 1e-6
# S_INFO = 4
class ActorNetwork(object):
"""
Input to the network is the state, output is the distribution
of all actions.
"""
def __init__(self, sess, state_dim, action_dim, learning_rate):
self.sess = sess
self.s_dim = state_dim
self.a_dim = action_dim
self.lr_rate = learning_rate
# Create the actor network
self.inputs, self.out = self.create_actor_network()
# Get all network parameters
self.network_params = \
tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='actor')
# Set all network parameters
self.input_network_params = []
for param in self.network_params:
self.input_network_params.append(
tf.placeholder(tf.float32, shape=param.get_shape()))
self.set_network_params_op = []
for idx, param in enumerate(self.input_network_params):
self.set_network_params_op.append(self.network_params[idx].assign(param))
# Selected action, 0-1 vector
self.acts = tf.placeholder(tf.float32, [None, self.a_dim])
# This gradient will be provided by the critic network
self.act_grad_weights = tf.placeholder(tf.float32, [None, 1])
# Compute the objective (log action_vector and entropy)
self.obj = tf.reduce_sum(tf.multiply(
tf.log(tf.reduce_sum(tf.multiply(self.out, self.acts),
reduction_indices=1, keep_dims=True)),
-self.act_grad_weights)) \
+ ENTROPY_WEIGHT * tf.reduce_sum(tf.multiply(self.out,
tf.log(self.out + ENTROPY_EPS)))
# Combine the gradients here
self.actor_gradients = tf.gradients(self.obj, self.network_params)
# Optimization Op
self.optimize = tf.train.RMSPropOptimizer(self.lr_rate). \
apply_gradients(zip(self.actor_gradients, self.network_params))
def create_actor_network(self):
with tf.variable_scope('actor'):
inputs = tflearn.input_data(shape=[None, self.s_dim[0], self.s_dim[1]])
split_0 = tflearn.fully_connected(inputs[:, 0:1, -1], 128, activation='relu')
split_1 = tflearn.fully_connected(inputs[:, 1:2, -1], 128, activation='relu')
split_2 = tflearn.conv_1d(inputs[:, 2:3, :], 128, 4, activation='relu')
split_3 = tflearn.conv_1d(inputs[:, 3:4, :], 128, 4, activation='relu')
split_4 = tflearn.conv_1d(inputs[:, 4:5, :A_DIM], 128, 4, activation='relu')
split_5 = tflearn.fully_connected(inputs[:, 4:5, -1], 128, activation='relu')
split_2_flat = tflearn.flatten(split_2)
split_3_flat = tflearn.flatten(split_3)
split_4_flat = tflearn.flatten(split_4)
merge_net = tflearn.merge([split_0, split_1, split_2_flat, split_3_flat, split_4_flat, split_5], 'concat')
dense_net_0 = tflearn.fully_connected(merge_net, 128, activation='relu')
out = tflearn.fully_connected(dense_net_0, self.a_dim, activation='softmax')
return inputs, out
def train(self, inputs, acts, act_grad_weights):
self.sess.run(self.optimize, feed_dict={
self.inputs: inputs,
self.acts: acts,
self.act_grad_weights: act_grad_weights
})
def predict(self, inputs):
return self.sess.run(self.out, feed_dict={
self.inputs: inputs
})
def get_gradients(self, inputs, acts, act_grad_weights):
return self.sess.run(self.actor_gradients, feed_dict={
self.inputs: inputs,
self.acts: acts,
self.act_grad_weights: act_grad_weights
})
def apply_gradients(self, actor_gradients):
return self.sess.run(self.optimize, feed_dict={
i: d for i, d in zip(self.actor_gradients, actor_gradients)
})
def get_network_params(self):
return self.sess.run(self.network_params)
def set_network_params(self, input_network_params):
self.sess.run(self.set_network_params_op, feed_dict={
i: d for i, d in zip(self.input_network_params, input_network_params)
})
class CriticNetwork(object):
"""
Input to the network is the state and action, output is V(s).
On policy: the action must be obtained from the output of the Actor network.
"""
def __init__(self, sess, state_dim, learning_rate):
self.sess = sess
self.s_dim = state_dim
self.lr_rate = learning_rate
# Create the critic network
self.inputs, self.out = self.create_critic_network()
# Get all network parameters
self.network_params = \
tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='critic')
# Set all network parameters
self.input_network_params = []
for param in self.network_params:
self.input_network_params.append(
tf.placeholder(tf.float32, shape=param.get_shape()))
self.set_network_params_op = []
for idx, param in enumerate(self.input_network_params):
self.set_network_params_op.append(self.network_params[idx].assign(param))
# Network target V(s)
self.td_target = tf.placeholder(tf.float32, [None, 1])
# Temporal Difference, will also be weights for actor_gradients
self.td = tf.subtract(self.td_target, self.out)
# Mean square error
self.loss = tflearn.mean_square(self.td_target, self.out)
# Compute critic gradient
self.critic_gradients = tf.gradients(self.loss, self.network_params)
# Optimization Op
self.optimize = tf.train.RMSPropOptimizer(self.lr_rate). \
apply_gradients(zip(self.critic_gradients, self.network_params))
def create_critic_network(self):
with tf.variable_scope('critic'):
inputs = tflearn.input_data(shape=[None, self.s_dim[0], self.s_dim[1]])
split_0 = tflearn.fully_connected(inputs[:, 0:1, -1], 128, activation='relu')
split_1 = tflearn.fully_connected(inputs[:, 1:2, -1], 128, activation='relu')
split_2 = tflearn.conv_1d(inputs[:, 2:3, :], 128, 4, activation='relu')
split_3 = tflearn.conv_1d(inputs[:, 3:4, :], 128, 4, activation='relu')
split_4 = tflearn.conv_1d(inputs[:, 4:5, :A_DIM], 128, 4, activation='relu')
split_5 = tflearn.fully_connected(inputs[:, 4:5, -1], 128, activation='relu')
split_2_flat = tflearn.flatten(split_2)
split_3_flat = tflearn.flatten(split_3)
split_4_flat = tflearn.flatten(split_4)
merge_net = tflearn.merge([split_0, split_1, split_2_flat, split_3_flat, split_4_flat, split_5], 'concat')
dense_net_0 = tflearn.fully_connected(merge_net, 128, activation='relu')
out = tflearn.fully_connected(dense_net_0, 1, activation='linear')
return inputs, out
def train(self, inputs, td_target):
return self.sess.run([self.loss, self.optimize], feed_dict={
self.inputs: inputs,
self.td_target: td_target
})
def predict(self, inputs):
return self.sess.run(self.out, feed_dict={
self.inputs: inputs
})
def get_td(self, inputs, td_target):
return self.sess.run(self.td, feed_dict={
self.inputs: inputs,
self.td_target: td_target
})
def get_gradients(self, inputs, td_target):
return self.sess.run(self.critic_gradients, feed_dict={
self.inputs: inputs,
self.td_target: td_target
})
def apply_gradients(self, critic_gradients):
return self.sess.run(self.optimize, feed_dict={
i: d for i, d in zip(self.critic_gradients, critic_gradients)
})
def get_network_params(self):
return self.sess.run(self.network_params)
def set_network_params(self, input_network_params):
self.sess.run(self.set_network_params_op, feed_dict={
i: d for i, d in zip(self.input_network_params, input_network_params)
})
def compute_gradients(s_batch, a_batch, r_batch, terminal, actor, critic):
"""
batch of s, a, r is from samples in a sequence
the format is in np.array([batch_size, s/a/r_dim])
terminal is True when sequence ends as a terminal state
"""
assert s_batch.shape[0] == a_batch.shape[0]
assert s_batch.shape[0] == r_batch.shape[0]
ba_size = s_batch.shape[0]
v_batch = critic.predict(s_batch)
R_batch = np.zeros(r_batch.shape)
if terminal:
R_batch[-1, 0] = 0 # terminal state
else:
R_batch[-1, 0] = v_batch[-1, 0] # boot strap from last state
for t in reversed(range(ba_size - 1)):
R_batch[t, 0] = r_batch[t] + GAMMA * R_batch[t + 1, 0]
td_batch = R_batch - v_batch
actor_gradients = actor.get_gradients(s_batch, a_batch, td_batch)
critic_gradients = critic.get_gradients(s_batch, R_batch)
return actor_gradients, critic_gradients, td_batch
def discount(x, gamma):
"""
Given vector x, computes a vector y such that
y[i] = x[i] + gamma * x[i+1] + gamma^2 x[i+2] + ...
"""
out = np.zeros(len(x))
out[-1] = x[-1]
for i in reversed(range(len(x) - 1)):
out[i] = x[i] + gamma * out[i + 1]
assert x.ndim >= 1
# More efficient version:
# scipy.signal.lfilter([1],[1,-gamma],x[::-1], axis=0)[::-1]
return out
def compute_entropy(x):
"""
Given vector x, computes the entropy
H(x) = - sum( p * log(p))
"""
H = 0.0
for i in range(len(x)):
if 0 < x[i] < 1:
H -= x[i] * np.log(x[i])
return H
def build_summaries():
td_loss = tf.Variable(0.)
tf.summary.scalar("TD_loss", td_loss)
eps_total_reward = tf.Variable(0.)
tf.summary.scalar("Eps_total_reward", eps_total_reward)
avg_entropy = tf.Variable(0.)
tf.summary.scalar("Avg_entropy", avg_entropy)
summary_vars = [td_loss, eps_total_reward, avg_entropy]
summary_ops = tf.summary.merge_all()
return summary_ops, summary_vars
|
|
"""geomath.py: transcription of GeographicLib::Math class."""
# geomath.py
#
# This is a rather literal translation of the GeographicLib::Math class to
# python. See the documentation for the C++ class for more information at
#
# https://geographiclib.sourceforge.io/html/annotated.html
#
# Copyright (c) Charles Karney (2011-2017) <charles@karney.com> and
# licensed under the MIT/X11 License. For more information, see
# https://geographiclib.sourceforge.io/
######################################################################
import sys
import math
class Math(object):
"""
Additional math routines for GeographicLib.
This defines constants:
epsilon, difference between 1 and the next bigger number
digits, the number of digits in the fraction of a real number
minval, minimum normalized positive number
maxval, maximum finite number
nan, not a number
inf, infinity
"""
digits = 53
epsilon = math.pow(2.0, 1-digits)
minval = math.pow(2.0, -1022)
maxval = math.pow(2.0, 1023) * (2 - epsilon)
inf = float("inf") if sys.version_info > (2, 6) else 2 * maxval
nan = float("nan") if sys.version_info > (2, 6) else inf - inf
def sq(x):
"""Square a number"""
return x * x
sq = staticmethod(sq)
def cbrt(x):
"""Real cube root of a number"""
y = math.pow(abs(x), 1/3.0)
return y if x >= 0 else -y
cbrt = staticmethod(cbrt)
def log1p(x):
"""log(1 + x) accurate for small x (missing from python 2.5.2)"""
if sys.version_info > (2, 6):
return math.log1p(x)
y = 1 + x
z = y - 1
# Here's the explanation for this magic: y = 1 + z, exactly, and z
# approx x, thus log(y)/z (which is nearly constant near z = 0) returns
# a good approximation to the true log(1 + x)/x. The multiplication x *
# (log(y)/z) introduces little additional error.
return x if z == 0 else x * math.log(y) / z
log1p = staticmethod(log1p)
def atanh(x):
"""atanh(x) (missing from python 2.5.2)"""
if sys.version_info > (2, 6):
return math.atanh(x)
y = abs(x) # Enforce odd parity
y = Math.log1p(2 * y/(1 - y))/2
return -y if x < 0 else y
atanh = staticmethod(atanh)
def copysign(x, y):
"""return x with the sign of y (missing from python 2.5.2)"""
if sys.version_info > (2, 6):
return math.copysign(x, y)
return math.fabs(x) * (-1 if y < 0 or (y == 0 and 1/y < 0) else 1)
copysign = staticmethod(copysign)
def norm(x, y):
"""Private: Normalize a two-vector."""
r = math.hypot(x, y)
return x/r, y/r
norm = staticmethod(norm)
def sum(u, v):
"""Error free transformation of a sum."""
# Error free transformation of a sum. Note that t can be the same as one
# of the first two arguments.
s = u + v
up = s - v
vpp = s - up
up -= u
vpp -= v
t = -(up + vpp)
# u + v = s + t
# = round(u + v) + t
return s, t
sum = staticmethod(sum)
def polyval(N, p, s, x):
"""Evaluate a polynomial."""
y = float(0 if N < 0 else p[s]) # make sure the returned value is a float
while N > 0:
N -= 1; s += 1
y = y * x + p[s]
return y
polyval = staticmethod(polyval)
def AngRound(x):
"""Private: Round an angle so that small values underflow to zero."""
# The makes the smallest gap in x = 1/16 - nextafter(1/16, 0) = 1/2^57
# for reals = 0.7 pm on the earth if x is an angle in degrees. (This
# is about 1000 times more resolution than we get with angles around 90
# degrees.) We use this to avoid having to deal with near singular
# cases when x is non-zero but tiny (e.g., 1.0e-200).
z = 1/16.0
y = abs(x)
# The compiler mustn't "simplify" z - (z - y) to y
if y < z: y = z - (z - y)
return 0.0 if x == 0 else (-y if x < 0 else y)
AngRound = staticmethod(AngRound)
def AngNormalize(x):
"""reduce angle to (-180,180]"""
y = math.fmod(x, 360)
# On Windows 32-bit with python 2.7, math.fmod(-0.0, 360) = +0.0
# This fixes this bug. See also Math::AngNormalize in the C++ library.
# sincosd has a similar fix.
y = x if x == 0 else y
return (y + 360 if y <= -180 else
(y if y <= 180 else y - 360))
AngNormalize = staticmethod(AngNormalize)
def LatFix(x):
"""replace angles outside [-90,90] by NaN"""
return Math.nan if abs(x) > 90 else x
LatFix = staticmethod(LatFix)
def AngDiff(x, y):
"""compute y - x and reduce to [-180,180] accurately"""
d, t = Math.sum(Math.AngNormalize(-x), Math.AngNormalize(y))
d = Math.AngNormalize(d)
return Math.sum(-180 if d == 180 and t > 0 else d, t)
AngDiff = staticmethod(AngDiff)
def sincosd(x):
"""Compute sine and cosine of x in degrees."""
r = math.fmod(x, 360)
q = Math.nan if Math.isnan(r) else int(math.floor(r / 90 + 0.5))
r -= 90 * q; r = math.radians(r)
s = math.sin(r); c = math.cos(r)
q = q % 4
if q == 1:
s, c = c, -s
elif q == 2:
s, c = -s, -c
elif q == 3:
s, c = -c, s
# Remove the minus sign on -0.0 except for sin(-0.0).
# On Windows 32-bit with python 2.7, math.fmod(-0.0, 360) = +0.0
# (x, c) here fixes this bug. See also Math::sincosd in the C++ library.
# AngNormalize has a similar fix.
s, c = (x, c) if x == 0 else (0.0+s, 0.0+c)
return s, c
sincosd = staticmethod(sincosd)
def atan2d(y, x):
"""compute atan2(y, x) with the result in degrees"""
if abs(y) > abs(x):
q = 2; x, y = y, x
else:
q = 0
if x < 0:
q += 1; x = -x
ang = math.degrees(math.atan2(y, x))
if q == 1:
ang = (180 if y >= 0 else -180) - ang
elif q == 2:
ang = 90 - ang
elif q == 3:
ang = -90 + ang
return ang
atan2d = staticmethod(atan2d)
def isfinite(x):
"""Test for finiteness"""
return abs(x) <= Math.maxval
isfinite = staticmethod(isfinite)
def isnan(x):
"""Test if nan"""
return math.isnan(x) if sys.version_info > (2, 6) else x != x
isnan = staticmethod(isnan)
|
|
import re
from cStringIO import StringIO
from datetime import datetime
from django import forms
from django.conf import settings
from django.contrib.auth.models import User
from django.core.files.uploadedfile import UploadedFile
from django.forms.models import BaseInlineFormSet, inlineformset_factory
import django_filters
import happyforms
from PIL import Image
from tower import ugettext as _, ugettext_lazy as _lazy
from mozillians.groups.models import Skill
from mozillians.phonebook.models import Invite
from mozillians.phonebook.validators import validate_username
from mozillians.phonebook.widgets import MonthYearWidget
from mozillians.users import get_languages_for_locale
from mozillians.users.models import ExternalAccount, Language, UserProfile
REGEX_NUMERIC = re.compile('\d+', re.IGNORECASE)
class ExternalAccountForm(happyforms.ModelForm):
class Meta:
model = ExternalAccount
fields = ['type', 'identifier', 'privacy']
def clean(self):
cleaned_data = super(ExternalAccountForm, self).clean()
identifier = cleaned_data.get('identifier')
account_type = cleaned_data.get('type')
if account_type and identifier:
# If the Account expects an identifier and user provided a
# full URL, try to extract the identifier from the URL.
url = ExternalAccount.ACCOUNT_TYPES[account_type].get('url')
if url and identifier.startswith('http'):
url_pattern_re = url.replace('{identifier}', '(.+)')
identifier = identifier.rstrip('/')
url_pattern_re = url_pattern_re.rstrip('/')
match = re.match(url_pattern_re, identifier)
if match:
identifier = match.groups()[0]
validator = ExternalAccount.ACCOUNT_TYPES[account_type].get('validator')
if validator:
identifier = validator(identifier)
cleaned_data['identifier'] = identifier
return cleaned_data
AccountsFormset = inlineformset_factory(UserProfile, ExternalAccount,
form=ExternalAccountForm, extra=1)
class SearchForm(happyforms.Form):
q = forms.CharField(required=False)
limit = forms.IntegerField(
widget=forms.HiddenInput, required=False, min_value=1,
max_value=settings.ITEMS_PER_PAGE)
include_non_vouched = forms.BooleanField(
label=_lazy(u'Include non-vouched'), required=False)
def clean_limit(self):
limit = self.cleaned_data['limit'] or settings.ITEMS_PER_PAGE
return limit
def filter_vouched(qs, choice):
if choice == SearchFilter.CHOICE_ONLY_VOUCHED:
return qs.filter(is_vouched=True)
elif choice == SearchFilter.CHOICE_ONLY_UNVOUCHED:
return qs.filter(is_vouched=False)
return qs
class SearchFilter(django_filters.FilterSet):
CHOICE_ONLY_VOUCHED = 'yes'
CHOICE_ONLY_UNVOUCHED = 'no'
CHOICE_ALL = 'all'
CHOICES = (
(CHOICE_ONLY_VOUCHED, _lazy('Vouched')),
(CHOICE_ONLY_UNVOUCHED, _lazy('Unvouched')),
(CHOICE_ALL, _lazy('All')),
)
vouched = django_filters.ChoiceFilter(
name='vouched', label=_lazy('Display only'), required=False,
choices=CHOICES, action=filter_vouched)
class Meta:
model = UserProfile
fields = ['vouched', 'skills', 'groups', 'timezone']
def __init__(self, *args, **kwargs):
super(SearchFilter, self).__init__(*args, **kwargs)
self.filters['timezone'].field.choices.insert(0, ('', _lazy(u'All timezones')))
class UserForm(happyforms.ModelForm):
"""Instead of just inhereting form a UserProfile model form, this
base class allows us to also abstract over methods that have to do
with the User object that need to exist in both Registration and
Profile.
"""
username = forms.CharField(label=_lazy(u'Username'))
class Meta:
model = User
fields = ['username']
def clean_username(self):
username = self.cleaned_data['username']
if not username:
return self.instance.username
# Don't be jacking somebody's username
# This causes a potential race condition however the worst that can
# happen is bad UI.
if (User.objects.filter(username=username).
exclude(pk=self.instance.id).exists()):
raise forms.ValidationError(_(u'This username is in use. Please try'
u' another.'))
# No funky characters in username.
if not re.match(r'^[\w.@+-]+$', username):
raise forms.ValidationError(_(u'Please use only alphanumeric'
u' characters'))
if not validate_username(username):
raise forms.ValidationError(_(u'This username is not allowed, '
u'please choose another.'))
return username
class ProfileForm(happyforms.ModelForm):
photo = forms.ImageField(label=_lazy(u'Profile Photo'), required=False)
photo_delete = forms.BooleanField(label=_lazy(u'Remove Profile Photo'),
required=False)
date_mozillian = forms.DateField(
required=False,
label=_lazy(u'When did you get involved with Mozilla?'),
widget=MonthYearWidget(years=range(1998, datetime.today().year + 1),
required=False))
skills = forms.CharField(
label='',
help_text=_lazy(u'Start typing to add a skill (example: Python, '
u'javascript, Graphic Design, User Research)'),
required=False)
lat = forms.FloatField(widget=forms.HiddenInput)
lng = forms.FloatField(widget=forms.HiddenInput)
savecountry = forms.BooleanField(
label=_lazy('Required'),
initial=True, required=False,
widget=forms.CheckboxInput(attrs={'disabled': 'disabled'})
)
saveregion = forms.BooleanField(label=_lazy('Save'), required=False, show_hidden_initial=True)
savecity = forms.BooleanField(label=_lazy('Save'), required=False, show_hidden_initial=True)
class Meta:
model = UserProfile
fields = ('full_name', 'ircname', 'bio', 'photo',
'allows_community_sites', 'tshirt',
'title', 'allows_mozilla_sites',
'date_mozillian', 'story_link', 'timezone',
'privacy_photo', 'privacy_full_name', 'privacy_ircname',
'privacy_email', 'privacy_timezone', 'privacy_tshirt',
'privacy_bio', 'privacy_geo_city', 'privacy_geo_region',
'privacy_geo_country', 'privacy_groups',
'privacy_skills', 'privacy_languages',
'privacy_date_mozillian', 'privacy_story_link', 'privacy_title')
widgets = {'bio': forms.Textarea()}
def clean_photo(self):
"""Clean possible bad Image data.
Try to load EXIF data from image. If that fails, remove EXIF
data by re-saving the image. Related bug 919736.
"""
photo = self.cleaned_data['photo']
if photo and isinstance(photo, UploadedFile):
image = Image.open(photo.file)
try:
image._get_exif()
except (AttributeError, IOError, KeyError, IndexError):
cleaned_photo = StringIO()
if image.mode != 'RGB':
image = image.convert('RGB')
image.save(cleaned_photo, format='JPEG', quality=95)
photo.file = cleaned_photo
photo.size = cleaned_photo.tell()
return photo
def clean_skills(self):
if not re.match(r'^[a-zA-Z0-9 +.:,-]*$', self.cleaned_data['skills']):
# Commas cannot be included in skill names because we use them to
# separate names in a list
raise forms.ValidationError(_(u'Skills can only contain '
u'alphanumeric characters '
u'and +.:-.'))
skills = self.cleaned_data['skills']
return filter(lambda x: x,
map(lambda x: x.strip() or False,
skills.lower().split(',')))
def clean(self):
# If lng/lat were provided, make sure they point at a country somewhere...
if self.cleaned_data.get('lat') is not None and self.cleaned_data.get('lng') is not None:
# We only want to call reverse_geocode if some location data changed.
if ('lat' in self.changed_data or 'lng' in self.changed_data or
'saveregion' in self.changed_data or 'savecity' in self.changed_data):
self.instance.lat = self.cleaned_data['lat']
self.instance.lng = self.cleaned_data['lng']
self.instance.reverse_geocode()
if not self.instance.geo_country:
error_msg = _('Location must be inside a country.')
self.errors['savecountry'] = self.error_class([error_msg])
del self.cleaned_data['savecountry']
# If the user doesn't want their region/city saved, respect it.
if not self.cleaned_data.get('saveregion'):
if not self.cleaned_data.get('savecity'):
self.instance.geo_region = None
else:
error_msg = _('Region must also be saved if city is saved.')
self.errors['saveregion'] = self.error_class([error_msg])
if not self.cleaned_data.get('savecity'):
self.instance.geo_city = None
else:
self.errors['location'] = self.error_class([_('Search for your country on the map.')])
self.errors['savecountry'] = self.error_class([_('Country cannot be empty.')])
del self.cleaned_data['savecountry']
return self.cleaned_data
def save(self, *args, **kwargs):
"""Save the data to profile."""
self.instance.set_membership(Skill, self.cleaned_data['skills'])
super(ProfileForm, self).save(*args, **kwargs)
class BaseLanguageFormSet(BaseInlineFormSet):
def __init__(self, *args, **kwargs):
self.locale = kwargs.pop('locale', 'en')
super(BaseLanguageFormSet, self).__init__(*args, **kwargs)
def add_fields(self, form, index):
super(BaseLanguageFormSet, self).add_fields(form, index)
choices = [('', '---------')] + get_languages_for_locale(self.locale)
form.fields['code'].choices = choices
class Meta:
models = Language
fields = ['code']
LanguagesFormset = inlineformset_factory(UserProfile, Language,
formset=BaseLanguageFormSet,
extra=1)
class EmailForm(happyforms.Form):
email = forms.EmailField(label=_lazy(u'Email'))
def clean_email(self):
email = self.cleaned_data['email']
if (User.objects
.exclude(pk=self.initial['user_id']).filter(email=email).exists()):
raise forms.ValidationError(_(u'Email is currently associated with another user.'))
return email
def email_changed(self):
return self.cleaned_data['email'] != self.initial['email']
class RegisterForm(ProfileForm):
optin = forms.BooleanField(
widget=forms.CheckboxInput(attrs={'class': 'checkbox'}),
required=True)
class VouchForm(happyforms.Form):
"""Vouching is captured via a user's id and a description of the reason for vouching."""
description = forms.CharField(
label=_lazy(u'Provide a reason for vouching with relevant links'),
widget=forms.Textarea(attrs={'rows': 10, 'cols': 20, 'maxlength': 500}),
max_length=500,
error_messages={'required': _(u'You must enter a reason for vouching for this person.')}
)
class InviteForm(happyforms.ModelForm):
message = forms.CharField(
label=_lazy(u'Personal message to be included in the invite email'),
required=False, widget=forms.Textarea(),
)
recipient = forms.EmailField(label=_lazy("Recipient's email"))
def clean_recipient(self):
recipient = self.cleaned_data['recipient']
if User.objects.filter(email=recipient,
userprofile__is_vouched=True).exists():
raise forms.ValidationError(
_(u'You cannot invite someone who has already been vouched.'))
return recipient
class Meta:
model = Invite
fields = ['recipient']
|
|
#!/usr/bin/python3
''' Elementary black-box testing tool.
This module provides a bunch of classes and subroutines for automated tests launching and stress testing. All that is left to a programmer is a test generation per se. Typical testing script could look like any of the following ones.
# Predefined tests
from blackbox import Test, test
tests = [
Test('input1', 'output1'),
Test('input2', 'output2'),
Test('immense input', tags={Test.TL_TAG}),
# ...
]
test(tests, './program')
# Stress testing
from blackbox import Test, stress
def genTest():
while True:
yield 'Yet another test case'
stress(genTest(), './program', './trivial')
'''
import subprocess, sys, operator, functools
from tempfile import TemporaryFile
from signal import signal, SIGINT, SIG_IGN
class SignalHandler:
def __init__(self):
self.signalled = False
signal(SIGINT, self.signal)
def signal(self, *vargs):
if self.signalled:
# Force shutdown
sys.exit(0)
self.signalled = True
def childBehavior(self):
signal(SIGINT, SIG_IGN)
signalHandler = SignalHandler()
class TemporaryFileStorage:
def __init__(self):
self.temporaryFile = TemporaryFile()
def __call__(self, data, bytesEncoding='utf8'):
if type(data) != bytes:
data = str(data).encode(bytesEncoding)
# Write string data
self.temporaryFile.seek(0)
self.temporaryFile.truncate()
self.temporaryFile.write(data)
return self
def buffer(self):
self.temporaryFile.seek(0)
return self.temporaryFile
def __excerpt(msg, stringLengthLimit=60):
return repr(msg if len(msg) < stringLengthLimit else msg[:stringLengthLimit-3] + '...')
class TimeLimitExpiredException(Exception):
def __init__(self, timeLimit):
self.timeLimit = timeLimit
class OutputMismatchException(Exception):
pass
class StressOutputMismatchException(OutputMismatchException):
def __init__(self, testedOutput, trivialOutput):
self.testedOutput, self.trivialOutput = testedOutput, trivialOutput
class CompareOutputMismatchException(OutputMismatchException):
def __init__(self, output, expectedOutput):
self.output, self.expectedOutput = output, expectedOutput
class Test:
''' Represents a single test. Each test has an ordinal number and a tag assigned to it.
'''
# Temporary buffer
__storage = None
@classmethod
def __store(cls, data):
if cls.__storage is None:
cls.__storage = TemporaryFileStorage()
return cls.__storage(data)
# Total number of tests
count = 0
# Set static tag names depending on output facilities
if sys.stdout.isatty():
TL_TAG = '\033[36mTL\033[0m'
ML_TAG = '\033[35mML\033[0m'
BUG_TAG = '\033[33mBUG\033[0m'
else:
TL_TAG, ML_TAG = 'TL', 'ML'
BUG_TAG = 'BUG'
# Instance methods
def __init__(self, inputData, outputData=None, tags=set(), ignoreMarginalWhitespace=True):
''' inputData -- string describing the input.
outputData -- string describing the output. Can be omitted.
tags -- tag set, predefined tags are "Test.TL_TAG" for time-consuming tests, "Test.ML_TAG" for memory-consuming tests, and "Test.BUG_TAG" for once caught bugs.
ignoreMarginalWhitespace -- whether ignore leading and trailing whitespace or not.
'''
type(self).count += 1
self.index = type(self).count
if outputData is not None:
self.hasRightAnswer = lambda: True
self.ignoreMarginalWhitespace = ignoreMarginalWhitespace
if ignoreMarginalWhitespace:
outputData = outputData.strip()
else:
self.hasRightAnswer = lambda: False
self.input, self.output = inputData, outputData
self.tag = ' {{{}}}'.format(', '.join(map(str, tags))) if tags else ''
def run(self, binaryFile, timeLimit=1, outputEncoding='utf8', storage=None):
''' Run the program on the test data and return produced output.
binaryFile -- path to the file going to be tested.
timeLimit -- time limit for this test.
outputEncoding -- encoding of the produced output.
storage -- test input is already stored in this buffer (defaults to None).
'''
if not storage:
storage = self.__store(self.input)
try:
return subprocess.check_output([binaryFile], stdin=storage.buffer(),
timeout=timeLimit, preexec_fn=signalHandler.childBehavior
).decode(outputEncoding)
except subprocess.TimeoutExpired:
raise TimeLimitExpiredException(timeLimit)
class OutputChecker:
def __init__(self, testedBinary, compare=None):
self.testedBinary = testedBinary
self.equal = compare if compare else operator.eq
def check(self, test, **kwargs):
output = test.run(self.testedBinary, **kwargs)
if test.hasRightAnswer():
if test.ignoreMarginalWhitespace:
output = output.strip()
if not self.equal(test.output, output):
raise CompareOutputMismatchException(output, test.output)
return output
class OutputComparator:
__storage = None
def __init__(self, testedBinary, trivialBinary, compare=None, ignoreMarginalWhitespace=True):
self.testedBinary, self.trivialBinary = testedBinary, trivialBinary
if compare:
# Custom comparing function
self.equal = compare
elif ignoreMarginalWhitespace:
self.equal = lambda output1, output2: output1.strip() == output2.strip()
else:
self.equal = operator.eq
if self.__storage is None:
type(self).__storage = TemporaryFileStorage()
def check(self, test, **kwargs):
self.__storage(test.input)
kwargs['storage'] = self.__storage
testedOutput = test.run(self.testedBinary, **kwargs)
trivialOutput = test.run(self.trivialBinary, **kwargs)
if not self.equal(testedOutput, trivialOutput):
raise StressOutputMismatchException(testedOutput, trivialOutput)
def test(tests, binaryFile, compare=None, haltOnError=True, **kwargs):
''' Run the tests one by one.
tests -- iterable collection of Tests.
binaryFile -- path to the program being investigated.
haltOnError -- break script execution if a test fails.
**kwargs -- arguments for 'Test.run'.
'''
if sys.stdout.isatty():
successMessage, failMessage = '\033[32mPassed\033[0m', '\033[31mFailed\033[0m'
else:
successMessage, failMessage = 'Passed', 'Failed'
padding = ' ' * 6
checker = OutputChecker(binaryFile, compare)
for test in tests:
# Print the header
print('[Test #{}]{}'.format(test.index, test.tag))
print(padding + 'Input:', __excerpt(test.input))
if test.output:
print(padding + 'Expected output:', __excerpt(test.output))
try:
showResult = lambda output: print(padding + 'Output:', __excerpt(output))
try:
showResult(checker.check(test, **kwargs))
print(padding + successMessage)
except OutputMismatchException as e:
showResult(e.output)
raise
except:
raise
except TimeLimitExpiredException:
print(padding + '{} by timeout'.format(failMessage))
if haltOnError:
sys.exit(1)
continue
except OutputMismatchException:
print(padding + failMessage)
if haltOnError:
sys.exit(1)
if signalHandler.signalled:
sys.exit(0)
def stress(testGenerator, testedBinary, trivialBinary, compare=None, **kwargs):
''' Run stress testing.
testGenerator -- generator yielding tests either as strings or Test instances.
testedBinary, trivialBinary -- executable files to test.
compare -- comparing function, should take 2 string outputs ("tested", then "trivial") and return either True or False. 'None' means the default one.
**kwargs -- arguments for 'Test.run'.
'''
if not sys.stdout.isatty():
print('Stress test should be run in a terminal!')
sys.exit(2)
comparator = OutputComparator(testedBinary, trivialBinary, compare)
count = 0
for test in testGenerator:
count += 1
if not type(test) == Test:
test = Test(str(test))
print('\rTest #{}:{} {}\033[0K'.format(count, test.tag, __excerpt(test.input)), end='', flush=True)
try:
comparator.check(test, **kwargs)
if signalHandler.signalled:
print('\n\nStress testing mode: {} tests passed.'.format(Test.count),
'No difference spotted.', end='')
sys.exit(0)
except OutputMismatchException as mismatch:
print('\n\033[31mFailed!\033[0m\n')
print('Test:', repr(test.input))
print('Tested algo output:', repr(mismatch.testedOutput))
print('Trivial algo output:', repr(mismatch.trivialOutput))
sys.exit(1)
|
|
# Copyright 2014-2015 Insight Software Consortium.
# Copyright 2004-2008 Roman Yakovenko.
# Distributed under the Boost Software License, Version 1.0.
# See http://www.boost.org/LICENSE_1_0.txt
"""Defines :class:`scopedef_t` class"""
import time
import warnings
from . import algorithm
from . import templates
from . import declaration
from . import mdecl_wrapper
from pygccxml import utils
from . import matcher as matcher_module
import collections
class scopedef_t(declaration.declaration_t):
"""
Base class for :class:`namespace_t` and :class:`class_t` classes.
This is the base class for all declaration classes that may have
children nodes. The children can be accessed via the
:attr:`scopedef_t.declarations` property.
Also this class provides "get/select/find" interface. Using this class you
can get instance or instances of internal declaration(s).
You can find declaration(s) using next criteria:
1. `name` - declaration name, could be full qualified name
2. `header_dir` - directory, to which belongs file, that the
declaration was declared in.
`header_dir` should be absolute path.
3. `header_file` - file that the declaration was declared in.
4. `function` - user ( your ) custom criteria. The interesting thing
is that this function will be joined with other arguments
(criteria).
5. `recursive` - the search declaration range, if True will be search
in internal declarations too.
Every ""query"" API, takes name or function as the first argument.
.. code-block:: python
global_namespace.member_function("do_something)
the statement returns reference to member function named "do_something".
If there the function doesn't exist or more than one function exists,
an exception is raised.
If you want to query for many declarations, use other function(s):
.. code-block:: python
do_something = global_namespace.member_functions("do_something")
the statement returns :class:`mdecl_wrapper_t` instance. That object will
save you writing `for` loops. For more information see
:class:`the class <mdecl_wrapper_t>` documentation.
"""
RECURSIVE_DEFAULT = True
ALLOW_EMPTY_MDECL_WRAPPER = False
declaration_not_found_t = matcher_module.matcher.declaration_not_found_t
multiple_declarations_found_t = \
matcher_module.matcher.multiple_declarations_found_t
# this class variable is used to prevent recursive imports
_impl_matchers = {}
# this class variable is used to prevent recursive imports
_impl_decl_types = {}
# this class variable is used to prevent recursive imports
_impl_all_decl_types = []
def __init__(self, name=''):
declaration.declaration_t.__init__(self, name)
self._optimized = False
self._type2decls = {}
self._type2name2decls = {}
self._type2decls_nr = {}
self._type2name2decls_nr = {}
self._all_decls = None
self._all_decls_not_recursive = None
@property
def _logger(self):
"""reference to :attr:`pygccxml.utils.loggers.queries_engine` logger"""
return utils.loggers.queries_engine
def _get__cmp__scope_items(self):
"""implementation details"""
raise NotImplementedError()
def _get__cmp__items(self):
"""implementation details"""
items = []
if self._optimized:
# in this case we don't need to build class internal declarations
# list
items.append(self._sorted_list(self._all_decls_not_recursive))
else:
items.append(self._sorted_list(self.declarations))
items.extend(self._get__cmp__scope_items())
return items
def __eq__(self, other):
if not declaration.declaration_t.__eq__(self, other):
return False
return self._sorted_list(self.declarations[:]) \
== other._sorted_list(other.declarations[:])
# self_decls = self._all_decls_not_recursive
# if not self._optimized:
# self_decls = self._sorted_list(self.declarations[:])
# other_decls = other._all_decls_not_recursive[:]
# if not other._optimized:
# other_decls = other._sorted_list(other.declarations[:])
# else:
# return self_decls == other_decls
def __hash__(self):
return super.__hash__(self)
def _get_declarations_impl(self):
raise NotImplementedError()
@property
def declarations(self):
"""list of children :class:`declarations <declaration_t>`"""
if self._optimized:
return self._all_decls_not_recursive
else:
return self._get_declarations_impl()
def remove_declaration(self, decl):
raise NotImplementedError()
def __decl_types(self, decl):
"""implementation details"""
types = []
bases = list(decl.__class__.__bases__)
visited = set()
if 'pygccxml' in decl.__class__.__module__:
types.append(decl.__class__)
while bases:
base = bases.pop()
if base is declaration.declaration_t:
continue
if base in visited:
continue
if 'pygccxml' not in base.__module__:
continue
types.append(base)
bases.extend(base.__bases__)
return types
def clear_optimizer(self):
"""Cleans query optimizer state"""
self._optimized = False
self._type2decls = {}
self._type2name2decls = {}
self._type2decls_nr = {}
self._type2name2decls_nr = {}
self._all_decls = None
self._all_decls_not_recursive = None
for decl in self.declarations:
if isinstance(decl, scopedef_t):
decl.clear_optimizer()
def init_optimizer(self):
"""
Initializes query optimizer state.
There are 4 internals hash tables:
1. from type to declarations
2. from type to declarations for non-recursive queries
3. from type to name to declarations
4. from type to name to declarations for non-recursive queries
Almost every query includes declaration type information. Also very
common query is to search some declaration(s) by name or full name.
Those hash tables allows to search declaration very quick.
"""
if self.name == '::':
self._logger.debug(
"preparing data structures for query optimizer - started")
start_time = time.clock()
self.clear_optimizer()
for dtype in scopedef_t._impl_all_decl_types:
self._type2decls[dtype] = []
self._type2decls_nr[dtype] = []
self._type2name2decls[dtype] = {}
self._type2name2decls_nr[dtype] = {}
self._all_decls_not_recursive = self.declarations
self._all_decls = algorithm.make_flatten(
self._all_decls_not_recursive)
for decl in self._all_decls:
types = self.__decl_types(decl)
for type_ in types:
self._type2decls[type_].append(decl)
name2decls = self._type2name2decls[type_]
if decl.name not in name2decls:
name2decls[decl.name] = []
name2decls[decl.name].append(decl)
if self is decl.parent:
self._type2decls_nr[type_].append(decl)
name2decls_nr = self._type2name2decls_nr[type_]
if decl.name not in name2decls_nr:
name2decls_nr[decl.name] = []
name2decls_nr[decl.name].append(decl)
for decl in self._all_decls_not_recursive:
if isinstance(decl, scopedef_t):
decl.init_optimizer()
if self.name == '::':
self._logger.debug((
"preparing data structures for query optimizer - " +
"done( %f seconds ). ") % (time.clock() - start_time))
self._optimized = True
def _build_operator_function(self, name, function):
if isinstance(name, collections.Callable):
return name
else:
return function
def _build_operator_name(self, name, function, symbol):
"""implementation details"""
def add_operator(sym):
if 'new' in sym or 'delete' in sym:
return 'operator ' + sym
else:
return 'operator' + sym
if isinstance(name, collections.Callable) and None is function:
name = None
if name:
if 'operator' not in name:
name = add_operator(name)
return name
elif symbol:
return add_operator(symbol)
return name # both name and symbol are None
def _on_rename(self):
for decl in self.decls(allow_empty=True):
decl.cache.reset_name_based()
# I am not sure whether to introduce this or not?
# It could be very time consuming operation + it changes optimize query
# data structures.
# if self.parent:
# if self.parent._optimized:
# self.parent.init_optimizer()
def __normalize_args(self, **keywds):
"""implementation details"""
if isinstance(keywds['name'], collections.Callable) and \
None is keywds['function']:
keywds['function'] = keywds['name']
keywds['name'] = None
return keywds
def __findout_recursive(self, **keywds):
"""implementation details"""
if None is keywds['recursive']:
return self.RECURSIVE_DEFAULT
else:
return keywds['recursive']
def __findout_allow_empty(self, **keywds):
"""implementation details"""
if None is keywds['allow_empty']:
return self.ALLOW_EMPTY_MDECL_WRAPPER
else:
return keywds['allow_empty']
def __findout_decl_type(self, match_class, **keywds):
"""implementation details"""
if 'decl_type' in keywds:
return keywds['decl_type']
matcher_args = keywds.copy()
del matcher_args['function']
del matcher_args['recursive']
if 'allow_empty' in matcher_args:
del matcher_args['allow_empty']
matcher = match_class(**matcher_args)
if matcher.decl_type:
return matcher.decl_type
return None
def __create_matcher(self, match_class, **keywds):
"""implementation details"""
matcher_args = keywds.copy()
del matcher_args['function']
del matcher_args['recursive']
if 'allow_empty' in matcher_args:
del matcher_args['allow_empty']
matcher = match_class(**matcher_args)
if keywds['function']:
self._logger.debug(
'running query: %s and <user defined function>' %
str(matcher))
return lambda decl: matcher(decl) and keywds['function'](decl)
else:
self._logger.debug('running query: %s' % str(matcher))
return matcher
def __findout_range(self, name, decl_type, recursive):
"""implementation details"""
if not self._optimized:
self._logger.debug(
'running non optimized query - optimization has not been done')
decls = self.declarations
if recursive:
decls = algorithm.make_flatten(self.declarations)
if decl_type:
decls = [d for d in decls if isinstance(d, decl_type)]
return decls
if name and templates.is_instantiation(name):
# templates has tricky mode to compare them, so lets check the
# whole range
name = None
if name and decl_type:
matcher = scopedef_t._impl_matchers[scopedef_t.decl](name=name)
if matcher.is_full_name():
name = matcher.decl_name_only
if recursive:
self._logger.debug(
'query has been optimized on type and name')
if name in self._type2name2decls[decl_type]:
return self._type2name2decls[decl_type][name]
else:
return []
else:
self._logger.debug(
'non recursive query has been optimized on type and name')
if name in self._type2name2decls_nr[decl_type]:
return self._type2name2decls_nr[decl_type][name]
else:
return []
elif decl_type:
if recursive:
self._logger.debug('query has been optimized on type')
return self._type2decls[decl_type]
else:
self._logger.debug(
'non recursive query has been optimized on type')
return self._type2decls_nr[decl_type]
else:
if recursive:
self._logger.debug((
'query has not been optimized ( hint: query does not ' +
'contain type and/or name )'))
return self._all_decls
else:
self._logger.debug((
'non recursive query has not been optimized ( hint: ' +
'query does not contain type and/or name )'))
return self._all_decls_not_recursive
def _find_single(self, match_class, **keywds):
"""implementation details"""
self._logger.debug('find single query execution - started')
start_time = time.clock()
norm_keywds = self.__normalize_args(**keywds)
matcher = self.__create_matcher(match_class, **norm_keywds)
dtype = self.__findout_decl_type(match_class, **norm_keywds)
recursive_ = self.__findout_recursive(**norm_keywds)
decls = self.__findout_range(norm_keywds['name'], dtype, recursive_)
found = matcher_module.matcher.get_single(matcher, decls, False)
self._logger.debug(
'find single query execution - done( %f seconds )' %
(time.clock() - start_time))
return found
def _find_multiple(self, match_class, **keywds):
"""implementation details"""
self._logger.debug('find all query execution - started')
start_time = time.clock()
norm_keywds = self.__normalize_args(**keywds)
matcher = self.__create_matcher(match_class, **norm_keywds)
dtype = self.__findout_decl_type(match_class, **norm_keywds)
recursive_ = self.__findout_recursive(**norm_keywds)
allow_empty = self.__findout_allow_empty(**norm_keywds)
decls = self.__findout_range(norm_keywds['name'], dtype, recursive_)
found = matcher_module.matcher.find(matcher, decls, False)
mfound = mdecl_wrapper.mdecl_wrapper_t(found)
self._logger.debug('%d declaration(s) that match query' % len(mfound))
self._logger.debug('find single query execution - done( %f seconds )'
% (time.clock() - start_time))
if not mfound and not allow_empty:
raise RuntimeError(
"Multi declaration query returned 0 declarations.")
return mfound
def decl(
self,
name=None,
function=None,
decl_type=None,
header_dir=None,
header_file=None,
recursive=None):
"""returns reference to declaration, that is matched defined
criteria"""
return (
self._find_single(
self._impl_matchers[
scopedef_t.decl],
name=name,
function=function,
decl_type=decl_type,
header_dir=header_dir,
header_file=header_file,
recursive=recursive)
)
def decls(
self,
name=None,
function=None,
decl_type=None,
header_dir=None,
header_file=None,
recursive=None,
allow_empty=None):
"""returns a set of declarations, that are matched defined criteria"""
return (
self._find_multiple(
self._impl_matchers[
scopedef_t.decl],
name=name,
function=function,
decl_type=decl_type,
header_dir=header_dir,
header_file=header_file,
recursive=recursive,
allow_empty=allow_empty)
)
def class_(
self,
name=None,
function=None,
header_dir=None,
header_file=None,
recursive=None):
"""returns reference to class declaration, that is matched defined
criteria"""
return (
self._find_single(
self._impl_matchers[scopedef_t.class_],
name=name,
function=function,
decl_type=self._impl_decl_types[
scopedef_t.class_],
header_dir=header_dir,
header_file=header_file,
recursive=recursive)
)
def classes(
self,
name=None,
function=None,
header_dir=None,
header_file=None,
recursive=None,
allow_empty=None):
"""returns a set of class declarations, that are matched defined
criteria"""
return (
self._find_multiple(
self._impl_matchers[scopedef_t.class_],
name=name,
function=function,
decl_type=self._impl_decl_types[
scopedef_t.class_],
header_dir=header_dir,
header_file=header_file,
recursive=recursive,
allow_empty=allow_empty)
)
def variable(
self,
name=None,
function=None,
type=None,
header_dir=None,
header_file=None,
recursive=None):
"""returns reference to variable declaration, that is matched defined
criteria"""
return (
self._find_single(
self._impl_matchers[
scopedef_t.variable],
name=name,
function=function,
type=type,
header_dir=header_dir,
header_file=header_file,
recursive=recursive)
)
def var(self,
name=None,
function=None,
type=None,
header_dir=None,
header_file=None,
recursive=None):
warnings.warn(
"The var() method is deprecated. \n" +
"Please use the variable() method instead.",
DeprecationWarning)
return self.variable(
name, function, type, header_dir, header_file, recursive)
def variables(
self,
name=None,
function=None,
type=None,
header_dir=None,
header_file=None,
recursive=None,
allow_empty=None):
"""returns a set of variable declarations, that are matched defined
criteria"""
return (
self._find_multiple(
self._impl_matchers[
scopedef_t.variable],
name=name,
function=function,
type=type,
header_dir=header_dir,
header_file=header_file,
recursive=recursive,
allow_empty=allow_empty)
)
def vars(
self,
name=None,
function=None,
type=None,
header_dir=None,
header_file=None,
recursive=None,
allow_empty=None):
warnings.warn(
"The vars() method is deprecated. \n" +
"Please use the variables() method instead.",
DeprecationWarning)
return self.variables(
name, function, type, header_dir,
header_file, recursive, allow_empty)
def calldef(
self,
name=None,
function=None,
return_type=None,
arg_types=None,
header_dir=None,
header_file=None,
recursive=None):
"""returns reference to "calldef" declaration, that is matched defined
criteria"""
return (
self._find_single(
self._impl_matchers[scopedef_t.calldef],
name=name,
function=function,
decl_type=self._impl_decl_types[
scopedef_t.calldef],
return_type=return_type,
arg_types=arg_types,
header_dir=header_dir,
header_file=header_file,
recursive=recursive)
)
def calldefs(
self,
name=None,
function=None,
return_type=None,
arg_types=None,
header_dir=None,
header_file=None,
recursive=None,
allow_empty=None):
"""returns a set of :class:`calldef_t` declarations, that are matched
defined criteria"""
return (
self._find_multiple(
self._impl_matchers[scopedef_t.calldef],
name=name,
function=function,
decl_type=self._impl_decl_types[
scopedef_t.calldef],
return_type=return_type,
arg_types=arg_types,
header_dir=header_dir,
header_file=header_file,
recursive=recursive,
allow_empty=allow_empty)
)
def operator(
self,
name=None,
function=None,
symbol=None,
return_type=None,
arg_types=None,
header_dir=None,
header_file=None,
recursive=None):
"""returns reference to operator declaration, that is matched
defined criteria"""
return (
self._find_single(
self._impl_matchers[scopedef_t.operator],
name=self._build_operator_name(name,
function,
symbol),
symbol=symbol,
function=self._build_operator_function(name,
function),
decl_type=self._impl_decl_types[scopedef_t.operator],
return_type=return_type,
arg_types=arg_types,
header_dir=header_dir,
header_file=header_file,
recursive=recursive)
)
def operators(
self,
name=None,
function=None,
symbol=None,
return_type=None,
arg_types=None,
header_dir=None,
header_file=None,
recursive=None,
allow_empty=None):
"""returns a set of operator declarations, that are matched
defined criteria"""
return (
self._find_multiple(
self._impl_matchers[scopedef_t.operator],
name=self._build_operator_name(name,
function,
symbol),
symbol=symbol,
function=self._build_operator_function(name,
function),
decl_type=self._impl_decl_types[scopedef_t.operator],
return_type=return_type,
arg_types=arg_types,
header_dir=header_dir,
header_file=header_file,
recursive=recursive,
allow_empty=allow_empty)
)
def member_function(
self,
name=None,
function=None,
return_type=None,
arg_types=None,
header_dir=None,
header_file=None,
recursive=None):
"""returns reference to member declaration, that is matched
defined criteria"""
return (
self._find_single(
self._impl_matchers[scopedef_t.member_function],
name=name,
function=function,
decl_type=self._impl_decl_types[
scopedef_t.member_function],
return_type=return_type,
arg_types=arg_types,
header_dir=header_dir,
header_file=header_file,
recursive=recursive)
)
mem_fun = member_function
def member_functions(
self,
name=None,
function=None,
return_type=None,
arg_types=None,
header_dir=None,
header_file=None,
recursive=None,
allow_empty=None):
"""returns a set of member function declarations, that are matched
defined criteria"""
return (
self._find_multiple(
self._impl_matchers[scopedef_t.member_function],
name=name,
function=function,
decl_type=self._impl_decl_types[
scopedef_t.member_function],
return_type=return_type,
arg_types=arg_types,
header_dir=header_dir,
header_file=header_file,
recursive=recursive,
allow_empty=allow_empty)
)
mem_funs = member_functions
def constructor(
self,
name=None,
function=None,
return_type=None,
arg_types=None,
header_dir=None,
header_file=None,
recursive=None):
"""returns reference to constructor declaration, that is matched
defined criteria"""
return (
self._find_single(
self._impl_matchers[scopedef_t.constructor],
name=name,
function=function,
decl_type=self._impl_decl_types[
scopedef_t.constructor],
return_type=return_type,
arg_types=arg_types,
header_dir=header_dir,
header_file=header_file,
recursive=recursive)
)
def constructors(
self,
name=None,
function=None,
return_type=None,
arg_types=None,
header_dir=None,
header_file=None,
recursive=None,
allow_empty=None):
"""returns a set of constructor declarations, that are matched
defined criteria"""
return (
self._find_multiple(
self._impl_matchers[scopedef_t.constructor],
name=name,
function=function,
decl_type=self._impl_decl_types[
scopedef_t.constructor],
return_type=return_type,
arg_types=arg_types,
header_dir=header_dir,
header_file=header_file,
recursive=recursive,
allow_empty=allow_empty)
)
def member_operator(
self,
name=None,
function=None,
symbol=None,
return_type=None,
arg_types=None,
header_dir=None,
header_file=None,
recursive=None):
"""returns reference to member operator declaration, that is matched
defined criteria"""
return (
self._find_single(
self._impl_matchers[scopedef_t.member_operator],
name=self._build_operator_name(name,
function,
symbol),
symbol=symbol,
function=self._build_operator_function(name,
function),
decl_type=self._impl_decl_types[scopedef_t.member_operator],
return_type=return_type,
arg_types=arg_types,
header_dir=header_dir,
header_file=header_file,
recursive=recursive)
)
mem_oper = member_operator
def member_operators(
self,
name=None,
function=None,
symbol=None,
return_type=None,
arg_types=None,
header_dir=None,
header_file=None,
recursive=None,
allow_empty=None):
"""returns a set of member operator declarations, that are matched
defined criteria"""
return (
self._find_multiple(
self._impl_matchers[scopedef_t.member_operator],
name=self._build_operator_name(name,
function,
symbol),
symbol=symbol,
function=self._build_operator_function(name,
function),
decl_type=self._impl_decl_types[scopedef_t.member_operator],
return_type=return_type,
arg_types=arg_types,
header_dir=header_dir,
header_file=header_file,
recursive=recursive,
allow_empty=allow_empty)
)
mem_opers = member_operators
def casting_operator(
self,
name=None,
function=None,
return_type=None,
arg_types=None,
header_dir=None,
header_file=None,
recursive=None):
"""returns reference to casting operator declaration, that is matched
defined criteria"""
return (
self._find_single(
self._impl_matchers[scopedef_t.casting_operator],
name=name,
function=function,
decl_type=self._impl_decl_types[
scopedef_t.casting_operator],
return_type=return_type,
arg_types=arg_types,
header_dir=header_dir,
header_file=header_file,
recursive=recursive)
)
def casting_operators(
self,
name=None,
function=None,
return_type=None,
arg_types=None,
header_dir=None,
header_file=None,
recursive=None,
allow_empty=None):
"""returns a set of casting operator declarations, that are matched
defined criteria"""
return (
self._find_multiple(
self._impl_matchers[scopedef_t.casting_operator],
name=name,
function=function,
decl_type=self._impl_decl_types[
scopedef_t.casting_operator],
return_type=return_type,
arg_types=arg_types,
header_dir=header_dir,
header_file=header_file,
recursive=recursive,
allow_empty=allow_empty)
)
def enumeration(
self,
name=None,
function=None,
header_dir=None,
header_file=None,
recursive=None):
"""returns reference to enumeration declaration, that is matched
defined criteria"""
return (
self._find_single(
self._impl_matchers[scopedef_t.enumeration],
name=name,
function=function,
decl_type=self._impl_decl_types[
scopedef_t.enumeration],
header_dir=header_dir,
header_file=header_file,
recursive=recursive)
)
enum = enumeration
"""adding small aliase to enumeration method"""
def enumerations(
self,
name=None,
function=None,
header_dir=None,
header_file=None,
recursive=None,
allow_empty=None):
"""returns a set of enumeration declarations, that are matched
defined criteria"""
return (
self._find_multiple(
self._impl_matchers[scopedef_t.enumeration],
name=name,
function=function,
decl_type=self._impl_decl_types[
scopedef_t.enumeration],
header_dir=header_dir,
header_file=header_file,
recursive=recursive,
allow_empty=allow_empty)
)
# adding small aliase
enums = enumerations
def typedef(
self,
name=None,
function=None,
header_dir=None,
header_file=None,
recursive=None):
"""returns reference to typedef declaration, that is matched
defined criteria"""
return (
self._find_single(
self._impl_matchers[scopedef_t.typedef],
name=name,
function=function,
decl_type=self._impl_decl_types[
scopedef_t.typedef],
header_dir=header_dir,
header_file=header_file,
recursive=recursive)
)
def typedefs(
self,
name=None,
function=None,
header_dir=None,
header_file=None,
recursive=None,
allow_empty=None):
"""returns a set of typedef declarations, that are matched
defined criteria"""
return (
self._find_multiple(
self._impl_matchers[scopedef_t.typedef],
name=name,
function=function,
decl_type=self._impl_decl_types[
scopedef_t.typedef],
header_dir=header_dir,
header_file=header_file,
recursive=recursive,
allow_empty=allow_empty)
)
def __getitem__(self, name_or_function):
"""
Allow simple name based find of declarations. Internally just calls
`decls` method.
:param name_or_function: Name of `decl` to lookup or finder function.
"""
return self.decls(name_or_function)
|
|
import struct
import hashlib
import math
import binascii
from functools import reduce
class SvgNode:
fillColor = ''
strokeColor = ''
strokeWidth = ''
class Svg(SvgNode):
width = ''
height = ''
children = []
def __init__(self, width, height):
self.width = width
self.height = height
self.children = []
def addChild(self, child):
self.children.append(child)
def __str__(self):
return '<svg xmlns="http://www.w3.org/2000/svg" version="1.1" width="{0}" height="{1}" viewBox="0 0 {0} {1}">{2}</svg>'.format(self.width, self.height, "\n".join(self.children))
class Rectangle(SvgNode):
def __init__(self, x, y, width, height):
self.x = x
self.y = y
self.width = width
self.height = height
def __str__(self):
return '<rect x="{}" y="{}" width="{}" height="{}" fill="{}" stroke="{}" stroke-width="{}" />'.format(
self.x,
self.y,
self.width,
self.height,
self.fillColor,
self.strokeColor,
self.strokeWidth
)
class CircleSvg(SvgNode):
def __init__(self, x, y, radius):
self.x = x
self.y = y
self.radius = radius
def __str__(self):
return '<circle cx="{}" cy="{}" r="{}" fill="{}" stroke="{}" stroke-width="{}" />'.format(self.x, self.y, self.radius, self.fillColor, self.strokeColor, self.strokeWidth)
class Path(SvgNode):
path = []
def __init__(self, x, y):
self.x = x
self.y = y
self.path = ['M {},{}'.format(x, y)]
def lineTo(self, x, y, relative=False):
relative = 'l' if relative else 'L'
self.path.append('{} {},{}'.format(relative, x, y))
def arcTo(self, x, y, xRadius, yRadius, xRotation, largeArc, sweepClockwise, relative = False):
self.path.append(
'{} {},{} {} {} {} {},{}'.format(
'a' if relative else 'A',
xRadius, yRadius, xRotation,
1 if largeArc else 0,
1 if sweepClockwise else 0,
x,y))
def __str__(self):
return '<path fill="{}" stroke="{}" stroke-width="{}" d="{}" />'.format(self.fillColor, self.strokeColor, self.strokeWidth, ''.join(self.path))
class BaseGen:
def getColorFromHex(self, hex_color):
hex_color = hex_color.replace('#', '')
hex_binary = binascii.a2b_hex(str.encode(hex_color))
return 'rgb'+str(struct.unpack('BBB', hex_binary))
def getColor(self, hash_string):
return self.getColorFromHex(hash_string[0:6])
class Pixel(BaseGen):
def __init__(self, hash, color, size=480):
self.svg = Svg(size, size)
self.bgColor = color
self.size = size
self.fgColor = self.getColor(hash)
self.svg.addChild(self.getBackground())
for i in range(0, 5):
for x in range(0, 5):
if self.showPixel(i, x, hash):
self.svg.addChild(self.getPixel(i, x, self.fgColor))
def getPixel(self, x, y, color):
size_offset = self.size / 6
size_padding = size_offset / 2
r = Rectangle((x * size_offset) + size_padding, (y * size_offset) + size_padding, size_offset, size_offset)
r.fillColor = color
r.strokeWidth = 0
return str(r)
def showPixel(self, x, y, hash):
min = 6 + abs(2-x) * 5 + y
return int(hash[min:min+1], 16) % 2 == 0
def render(self):
return str(self.svg)
def getBackground(self):
r = Rectangle(0, 0, self.size, self.size)
r.fillColor = self.bgColor
r.strokeWidth = 0
return str(r)
class Circle(BaseGen):
sideLength = 1000
def __init__(self, hash, color=(255, 255, 255), size = 1000):
self.sideLength = size
self.svg = Svg(self.sideLength, self.sideLength)
self.bgColor = color
self.fgColor = self.getColor(hash)
self.svg.addChild(self.getBackground())
if self.showCenter(hash):
self.svg.addChild(self.getCenter(self.fgColor))
for i in range(0, 4):
self.svg.addChild(self.getArc(
self.fgColor,
self.getRadius(),
self.getRadius(),
self.getRingRadius(i),
self.getRingAngle(i, hash),
self.getRingWidth(),
self.getRingRotation(i, hash)
))
def getRadius(self):
return self.sideLength / 2
def getMultiplier(self):
return self.sideLength / 1000
def getCenterRadius(self):
return 125 * self.getMultiplier()
def getRingWidth(self):
return 125 * self.getMultiplier()
def getRingAngle(self, ring, hash):
return 10 * pow(2, 3 - ring) * reduce(lambda total, step: int(total) + (int(step, 16) % 2), self.split2len(hash, pow(2, 3 - ring)), 0)
def getRingRotation(self, ring, hash):
return 36 * reduce(lambda total, step: int(total) + (int(step[ring-1], 16) % 2), self.split2len(hash[0:30], 3), 0)
def getRingRadius(self, ring):
return ring * 120 * self.getMultiplier()
def render(self):
return str(self.svg)
def getBackground(self):
c = CircleSvg(self.getRadius(), self.getRadius(), self.getRadius())
c.fillColor = self.bgColor
return str(c)
def showCenter(self, hash):
return int(hash[24:24+8], 16) % 2 == 0
def getCenter(self, color):
c = CircleSvg(self.getRadius(), self.getRadius(), self.getRadius())
c.fillColor = self.bgColor
return str(c)
def split2len(self, s, n):
def _f(s, n):
while s:
yield s[:n]
s = s[n:]
return list(_f(s, n))
def getArc(self, color, x, y, radius, angle, width, start = 0):
p = Path(x + radius * math.cos(math.radians(start)), y + radius * math.sin(math.radians(start)))
p.fillColor = color
p.strokeColor = color
p.strokeWidth = 1
p.arcTo(x + radius * math.cos(math.radians(start + angle)),
y + radius * math.sin(math.radians(start + angle)),
radius,
radius,
0,
angle > 180,
1)
p.lineTo(
x + (radius + width) * math.cos(math.radians(start + angle)),
y + (radius + width) * math.sin(math.radians(start + angle))
)
p.arcTo(
x + (radius + width) * math.cos(math.radians(start)),
y + (radius + width) * math.sin(math.radians(start)),
radius + width,
radius + width,
0,
angle > 180,
0
)
p.lineTo(
x + radius * math.cos(math.radians(start)),
y + radius * math.sin(math.radians(start))
)
return str(p)
class Iden:
backgroundColor = 'rgb(255, 255, 255)'
hash_string = ''
type_iden = ''
def __init__(self, text, type_iden='pixel', size=None):
m = hashlib.md5()
m.update(text.encode('UTF-8'))
self.type_iden = type_iden
self.size = size
self.hash_string = m.hexdigest()
def getColorFromHex(self, hex_color):
hex_color = hex_color.replace('#', '')
hex_binary = binascii.a2b_hex(str.encode(hex_color))
return 'rgb'+str(struct.unpack('BBB', hex_binary))
def setBackgroundColor(self, hex_color):
if hex_color is False:
hex_color = self.hash_string[7:13]
self.backgroundColor = self.getColorFromHex(hex_color)
self.backgroundColor = self.getColorFromHex(hex_color)
# generate and return svg code
def getIcon(self):
if self.type_iden == 'pixel':
if not self.size:
self.size = 480
aid = Pixel(self.hash_string, self.backgroundColor, self.size)
if self.type_iden == 'circle':
if not self.size or self.size < 1000:
self.size = 1000
aid = Circle(self.hash_string, self.backgroundColor, self.size)
return aid.render()
def getColor(self, hash_string):
return self.getColorFromHex(hash_string[0:6])
# generate and save svg
def save(self, file_path):
f = open(file_path, 'w')
f.write(self.getIcon())
f.close()
|
|
import os, asana, json
from datetime import datetime
from asana.error import ForbiddenError
class IGF_asana:
'''
A python class for accessing Asana
:params asana_config: A json config file with personal token
e.g. { "asana_personal_token" : "zyx" }
:param asana_project_id: A project id
'''
def __init__(self, asana_config, asana_project_id, **asana_label):
asana_label.setdefault('asana_personal_token_label','asana_personal_token')
self.asana_personal_token_label=asana_label['asana_personal_token_label']
self.asana_personal_token = None
self.asana_config = asana_config
self.asana_project_id = asana_project_id # project name can change, project id is stable
self._read_and_set_asana_config() # read config file and set parameters
self.asanaclient = asana.Client.access_token(self.asana_personal_token) # create asana client instance
self.asanaclient.headers = {'asana-enable': 'string_ids'} # fix for string ids
self.asana_personal_token = None # reset asana token value
self._get_user_details() # load user information
self._check_project_id() # check user given project id
def get_asana_task_id(self,task_name,strict_check=False):
'''
A method for fetching task id from asana server
:param task_name: A task name
:param strict_check: Perform strict checking for task id count, default False
:returns: A asana task gid
'''
try:
asana_task_id = None
matched_tasks = list()
try:
all_asana_tasks = \
self.asanaclient.\
tasks.\
find_all({'project':self.asana_project_id})
matched_tasks = [
p for p in all_asana_tasks
if p['name']==task_name ]
asana_task_id = matched_tasks[0]['gid']
except:
pass
if strict_check and \
len(matched_tasks) > 1:
raise ValueError('received more than one entry for task: {0}'.\
format(task_name))
return asana_task_id
except:
raise
def rename_asana_task(self,task_name,new_name):
'''
A method for renaming asana task
:params task_name: A task name
:params new_name: A new task name
'''
try:
asana_task_id = self.get_asana_task_id(task_name=task_name)
self.asanaclient.\
tasks.\
update(str(asana_task_id),{'name':new_name})
except:
raise
def create_asana_task(self,task_name, notes=''):
'''
A method for creating new task in Asana. Tasks will get assigned to the creator.
:params task_name: A task name
:param notes: A task description
'''
try:
task_id = None
results = \
self.asanaclient.tasks.\
create_in_workspace(
self.asana_workspace_id,
{'name':task_name,
'projects':str(self.asana_project_id),
'notes':notes,
'assignee':str(self.asana_user_id),
})
task_id = results['gid'] # fetching gid
return task_id
except:
raise
def fetch_task_id_for_task_name(self, task_name):
'''
A method for fetching task id from asana. It will create a new task
if its not present yet
:params task_name: A task name
:returns: asana task id
'''
try:
task_id=self.get_asana_task_id(task_name)
if task_id is None:
task_id=self.create_asana_task(task_name)
return task_id
except:
raise
def comment_asana_task(self, task_name, comment,rename_task=True):
'''
A method for adding comments to asana tasks. Task will be created if it doesn't exist.
:params task_name: A task name
:param comment: A comment for the target task
:param rename_task: Rename task if cvoan't comment, default True
:returns: A output story as dictionary
'''
try:
asana_task_id = self.fetch_task_id_for_task_name(task_name)
res = \
self.asanaclient.\
stories.\
create_on_task(
asana_task_id,
{'text':comment})
return res
except ForbiddenError:
if rename_task:
time_stamp = datetime.strftime(datetime.now(),'%Y-%M-%d-%H-%M-%S')
new_task_name = '{0}_{1}'.format(task_name,time_stamp)
self.rename_asana_task(
task_name=task_name,
new_name=new_task_name) # rename task if can't comment on it, likely due to 1k limit
raise
except:
raise
def add_notes_for_task(self,task_name, notes):
'''
A method for adding comment to the existing or new task
:params task_name: A task name
:param notes: A text note
'''
try:
asana_task_id = self.fetch_task_id_for_task_name(task_name)
res = \
self.asanaclient.\
tasks.\
update(
task=str(asana_task_id),
params={'notes':notes})
return res
except:
raise
def attach_file_to_asana_task(self,task_name, filepath, remote_filename=None,
comment=None):
'''
A method for uploading files to asana
:params task_name: A task name
:param filepath: A filepath to upload
:param remote_filename: Name of the uploaded file, default None for original name
:param comment: A text comment, default None
'''
try:
if not os.path.exists(filepath):
raise IOError('file {0} not found'.format(filepath))
if comment:
_ = self.comment_asana_task(task_name,comment) # adding comment to asana)
asana_task_id = self.fetch_task_id_for_task_name(task_name) # get task_id from asana
if os.stat(filepath).st_size > 5000000:
comment = \
'skipped uploading file {0}, size {1}'.\
format(
os.path.basename(filepath),
os.stat(filepath).st_size) # skip uploading files more than 5Mb in size
_ = \
self.asanaclient.\
stories.\
create_on_task(
asana_task_id,
{'text':comment})
else:
if remote_filename is not None:
file_name = remote_filename
else:
file_name = os.path.basename(filepath)
_ = \
self.asanaclient.attachments.\
create_on_task(
task_id=str(asana_task_id),
file_content=open(os.path.join(filepath),'rb'),
file_name=file_name) # upload file to task_id
except:
raise
def _check_project_id(self):
'''
An internal method for checking user given project id
'''
try:
all_asana_projects = \
self.asanaclient.\
projects.\
find_all({'workspace':str(self.asana_workspace_id)})
matched_projects = [
p for p in all_asana_projects
if p['gid']==str(self.asana_project_id) ] # checking gid
if len(matched_projects)==0:
raise ValueError(
'project id {0} not found in workspace {1}'.\
format(
self.asana_project_id,
self.asana_workspace_name))
except:
raise
def _get_user_details(self):
'''
An internal method for loading user and workspace information
'''
try:
user_detail = self.asanaclient.users.me()
self.asana_workspace_id = user_detail['workspaces'][0]['gid'] # fetching gid
self.asana_workspace_name = user_detail['workspaces'][0]['name']
self.asana_user_id = user_detail['gid'] # fetching gid
self.asana_user_name = user_detail['name']
except:
raise
def _read_and_set_asana_config(self):
'''
An internal method for reading asana config json file
'''
try:
asana_params = dict()
with open(self.asana_config,'r') as json_data:
asana_params = json.load(json_data)
if self.asana_personal_token_label in asana_params:
self.asana_personal_token = asana_params[self.asana_personal_token_label]
except:
raise
|
|
from __future__ import generators
import sys
import os.path
from itertools import count
packagedir = os.path.dirname(__file__)
# look for ctypes in the system path, then try looking for a private ctypes
# distribution
try:
import ctypes
except ImportError:
private_ctypes = os.path.join(packagedir, 'pvt_ctypes')
sys.path.insert(0, private_ctypes)
sys.path.insert(0, os.path.join(private_ctypes, 'ctypes.zip'))
import ctypes
from cStringIO import StringIO
import weakref
from tidy.error import *
# search the path for libtidy using the known names; try the package
# directory too
thelib=None
os.environ['PATH'] = "%s%s%s" % (packagedir, os.pathsep, os.environ['PATH'])
for libname in ('cygtidy-0-99-0', 'libtidy', 'libtidy.so',
'libtidy-0.99.so.0', 'tidylib'):
try:
thelib = getattr(ctypes.cdll, libname)
break
except OSError:
pass
if not thelib:
raise OSError("Couldn't find libtidy, please make sure it is installed.")
class Loader:
"""I am a trivial wrapper that eliminates the need for tidy.tidyFoo,
so you can just access tidy.Foo
"""
def __init__(self):
self.lib=thelib
def __getattr__(self, name):
try:
return getattr(self.lib, "tidy%s" % name)
# current ctypes uses ValueError, future will use AttributeError
except (ValueError, AttributeError):
return getattr(self.lib, name)
_tidy=Loader()
# define a callback to pass to Tidylib
def _putByte(handle, c):
"""Lookup sink by handle and call its putByte method"""
sinkfactory[handle].putByte(c)
return 0
PUTBYTEFUNC=ctypes.CFUNCTYPE(ctypes.c_int, ctypes.c_int, ctypes.c_char)
putByte=PUTBYTEFUNC(_putByte)
class _OutputSink(ctypes.Structure):
_fields_=[("sinkData", ctypes.c_int),
("putByte", PUTBYTEFUNC),
]
class _Sink:
def __init__(self):
self._data = StringIO()
self.struct = _OutputSink()
self.struct.putByte = putByte
def putByte(self, c):
self._data.write(c)
def __str__(self):
return self._data.getvalue()
class ReportItem:
def __init__(self, err):
self.err = err
if err.startswith('line'):
tokens = err.split(' ',6)
self.severity = tokens[5][0] # W or E
self.line = int(tokens[1])
self.col = int(tokens[3])
self.message = tokens[6]
else:
tokens = err.split(' ',1)
self.severity = tokens[0][0]
self.message = tokens[1]
self.line = None
self.col = None
# TODO - parse emacs mode
def __str__(self):
severities = dict(W='Warning', E='Error', C='Config')
try:
if self.line:
return "line %d col %d - %s: %s" % (self.line, self.col,
severities[self.severity],
self.message)
else:
return "%s: %s" % (severities[self.severity], self.message)
except KeyError:
return self.err
def __repr__(self):
return "%s('%s')" % (self.__class__.__name__,
str(self).replace("'", "\\'"))
class FactoryDict(dict):
"""I am a dict with a create method and no __setitem__. This allows
me to control my own keys.
"""
def create(self):
"""Subclasses should implement me to generate a new item"""
def _setitem(self, name, value):
dict.__setitem__(self, name, value)
def __setitem__(self, name, value):
raise TypeError, "Use create() to get a new object"
class SinkFactory(FactoryDict):
"""Mapping for lookup of sinks by handle"""
def __init__(self):
FactoryDict.__init__(self)
self.lastsink = 0
def create(self):
sink = _Sink()
sink.struct.sinkData = self.lastsink
FactoryDict._setitem(self, self.lastsink, sink)
self.lastsink = self.lastsink+1
return sink
sinkfactory=SinkFactory()
class _Document(object):
def __init__(self):
self.cdoc = _tidy.Create()
self.errsink = sinkfactory.create()
_tidy.SetErrorSink(self.cdoc, ctypes.byref(self.errsink.struct))
def write(self, stream):
stream.write(str(self))
def get_errors(self):
ret = []
for line in str(self.errsink).split('\n'):
line = line.strip(' \n\r')
if line: ret.append(ReportItem(line))
return ret
errors=property(get_errors)
def __str__(self):
stlen = ctypes.c_int(8192)
st = ctypes.c_buffer(stlen.value)
rc = _tidy.SaveString(self.cdoc, st, ctypes.byref(stlen))
if rc==-12: # buffer too small
st = ctypes.c_buffer(stlen.value)
_tidy.SaveString(self.cdoc, st, ctypes.byref(stlen))
return st.value
errors = {'missing or malformed argument for option: ': OptionArgError,
'unknown option: ': InvalidOptionError,
}
class DocumentFactory(FactoryDict):
def _setOptions(self, doc, **options):
for k in options.keys():
# this will flush out most argument type errors...
if options[k] is None: options[k] = ''
_tidy.OptParseValue(doc.cdoc,
k.replace('_', '-'),
str(options[k]))
if doc.errors:
match=filter(doc.errors[-1].message.startswith, errors.keys())
if match:
raise errors[match[0]](doc.errors[-1].message)
def load(self, doc, arg, loader):
loader(doc.cdoc, arg)
_tidy.CleanAndRepair(doc.cdoc)
def loadFile(self, doc, filename):
self.load(doc, filename, _tidy.ParseFile)
def loadString(self, doc, st):
self.load(doc, st, _tidy.ParseString)
def _create(self, *args, **kwargs):
doc = _Document()
self._setOptions(doc, **kwargs)
ref = weakref.ref(doc, self.releaseDoc)
FactoryDict._setitem(self, ref, doc.cdoc)
return doc
def parse(self, filename, *args, **kwargs):
"""Open and process filename as an HTML file, returning a
processed document object.
@param kwargs: named options to pass to TidyLib for processing
the input file.
@param filename: the name of a file to process
@return: a document object
"""
doc = self._create(**kwargs)
self.loadFile(doc, filename)
return doc
def parseString(self, st, *args, **kwargs):
"""Use st as an HTML file, and process it, returning a
document object.
@param kwargs: named options to pass to TidyLib for processing
the input file.
@param st: the string to parse
@return: a document object
"""
doc = self._create(**kwargs)
self.loadString(doc, st)
return doc
def releaseDoc(self, ref):
_tidy.Release(self[ref])
docfactory = DocumentFactory()
parse = docfactory.parse
parseString = docfactory.parseString
|
|
import cvxpy as cvx
import numpy as np
from sklearn.metrics import make_scorer
from sklearn.utils import check_X_y
from .base_cvxproblem import Relevance_CVXProblem
from .base_initmodel import InitModel
from .base_type import ProblemType
class OrdinalRegression(ProblemType):
@classmethod
def parameters(cls):
return ["C"]
@property
def get_initmodel_template(cls):
return OrdinalRegression_SVM
@property
def get_cvxproblem_template(cls):
return OrdinalRegression_Relevance_Bound
def relax_factors(cls):
return ["loss_slack", "w_l1_slack"]
def preprocessing(self, data, **kwargs):
X, y = data
# Check that X and y have correct shape
X, y = check_X_y(X, y)
if np.min(y) > 0:
print("First ordinal class has index > 0. Shifting index...")
y = y - np.min(y)
return X, y
class OrdinalRegression_SVM(InitModel):
HYPERPARAMETER = ["C"]
def __init__(self, C=1):
super().__init__()
self.C = C
def fit(self, X, y, **kwargs):
(n, d) = X.shape
C = self.get_params()["C"]
self.classes_ = np.unique(y)
original_bins = sorted(self.classes_)
n_bins = len(original_bins)
bins = np.arange(n_bins)
get_old_bin = dict(zip(bins, original_bins))
w = cvx.Variable(shape=(d), name="w")
# For ordinal regression we use two slack variables, we observe the slack in both directions
slack_left = cvx.Variable(shape=(n), name="slack_left")
slack_right = cvx.Variable(shape=(n), name="slack_right")
# We have an offset for every bin boundary
b_s = cvx.Variable(shape=(n_bins - 1), name="bias")
objective = cvx.Minimize(cvx.norm(w, 1) + C * cvx.sum(slack_left + slack_right))
constraints = [slack_left >= 0, slack_right >= 0]
# Add constraints for slack into left neighboring bins
for i in range(n_bins - 1):
indices = np.where(y == get_old_bin[i])
constraints.append(X[indices] @ w - slack_left[indices] <= b_s[i] - 1)
# Add constraints for slack into right neighboring bins
for i in range(1, n_bins):
indices = np.where(y == get_old_bin[i])
constraints.append(X[indices] @ w + slack_right[indices] >= b_s[i - 1] + 1)
# Add explicit constraint, that all bins are ascending
for i in range(n_bins - 2):
constraints.append(b_s[i] <= b_s[i + 1])
# Solve problem.
problem = cvx.Problem(objective, constraints)
problem.solve(**self.SOLVER_PARAMS)
w = w.value
b_s = b_s.value
slack_left = np.asarray(slack_left.value).flatten()
slack_right = np.asarray(slack_right.value).flatten()
self.model_state = {"w": w, "b_s": b_s, "slack": (slack_left, slack_right)}
loss = np.sum(slack_left + slack_right)
w_l1 = np.linalg.norm(w, ord=1)
self.constraints = {"loss": loss, "w_l1": w_l1}
return self
def predict(self, X):
w = self.model_state["w"]
b_s = self.model_state["b_s"]
scores = np.dot(X, w.T)[np.newaxis]
bin_thresholds = np.append(b_s, np.inf)
# If thresholds are smaller than score the value belongs to the bigger bin
# after subtracting we check for positive elements
indices = np.sum(scores.T - bin_thresholds >= 0, -1)
return self.classes_[indices]
def score(self, X, y, error_type="mmae", return_error=False, **kwargs):
X, y = check_X_y(X, y)
prediction = self.predict(X)
score = ordinal_scores(y, prediction, error_type, return_error=return_error)
return score
def make_scorer(self):
# Use multiple scores for ordinal regression
mze = make_scorer(ordinal_scores, error_type="mze")
mae = make_scorer(ordinal_scores, error_type="mae")
mmae = make_scorer(ordinal_scores, error_type="mmae")
scorer = {"mze": mze, "mae": mae, "mmae": mmae}
return scorer, "mmae"
def ordinal_scores(y, prediction, error_type, return_error=False):
"""Score function for ordinal problems.
Parameters
----------
y : target class vector
Truth vector
prediction : prediction class vector
Predicted classes
error_type : str
Error type "mze","mae","mmae"
return_error : bool, optional
Return error (lower is better) or score (inverted, higher is better)
Returns
-------
float
Error or score depending on 'return_error'
Raises
------
ValueError
When using wrong error_type
"""
n = len(y)
classes = np.unique(y)
n_bins = len(classes)
max_dist = n_bins - 1
# If only one class available, we dont need to average
if max_dist == 0:
error_type = "mze"
def mze(prediction, y):
return np.sum(prediction != y)
def mae(prediction, y):
return np.sum(np.abs(prediction - y))
# Score based on mean zero-one error
if error_type == "mze":
error = mze(prediction, y) / n
score = 1 - error
# Score based on mean absolute error
elif error_type == "mae":
error = mae(prediction, y) / n
score = (max_dist - error) / max_dist
# Score based on macro-averaged mean absolute error
elif error_type == "mmae":
sum = 0
for i in range(n_bins):
samples = y == i
n_samples = np.sum(samples)
if n_samples > 0:
bin_error = mae(prediction[samples], y[samples]) / n_samples
sum += bin_error
error = sum / n_bins
score = (max_dist - error) / max_dist
else:
raise ValueError("error_type {} not available!'".format(error_type))
if return_error:
return error
else:
return score
class OrdinalRegression_Relevance_Bound(Relevance_CVXProblem):
def init_objective_UB(self, sign=None, **kwargs):
self.add_constraint(
self.feature_relevance <= sign * self.w[self.current_feature]
)
self._objective = cvx.Maximize(self.feature_relevance)
def init_objective_LB(self, **kwargs):
self.add_constraint(
cvx.abs(self.w[self.current_feature]) <= self.feature_relevance
)
self._objective = cvx.Minimize(self.feature_relevance)
def _init_constraints(self, parameters, init_model_constraints):
n_bins = len(np.unique(self.y))
# Upper constraints from initial model
l1_w = init_model_constraints["w_l1"]
init_loss = init_model_constraints["loss"]
C = parameters["C"]
# New Variables
self.w = cvx.Variable(shape=(self.d), name="w")
# For ordinal regression we use two slack variables, we observe the slack in both directions
self.slack_left = cvx.Variable(shape=(self.n), name="slack_left", nonneg=True)
self.slack_right = cvx.Variable(shape=(self.n), name="slack_right", nonneg=True)
# We have an offset for every bin boundary
self.b_s = cvx.Variable(shape=(n_bins - 1), name="bias")
# New Constraints
self.loss = cvx.sum(self.slack_left + self.slack_right)
self.weight_norm = cvx.norm(self.w, 1)
for i in range(n_bins - 1):
indices = np.where(self.y == i)
self.add_constraint(
self.X[indices] @ self.w - self.slack_left[indices] <= self.b_s[i] - 1
)
for i in range(1, n_bins):
indices = np.where(self.y == i)
self.add_constraint(
self.X[indices] @ self.w + self.slack_right[indices]
>= self.b_s[i - 1] + 1
)
for i in range(n_bins - 2):
self.add_constraint(self.b_s[i] <= self.b_s[i + 1])
self.add_constraint(self.weight_norm <= l1_w)
self.add_constraint(C * self.loss <= C * init_loss)
self.feature_relevance = cvx.Variable(nonneg=True, name="Feature Relevance")
|
|
# Copyright (c) 2007, 2008, 2009, 2010, 2011, 2012 Andrey Golovizin
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# Based on a similar script from Pygments.
"""Generate HTML documentation
"""
import os
import sys
import re
import shutil
from datetime import datetime
from cgi import escape
from glob import glob
from docutils import nodes
from docutils.parsers.rst import directives, Directive
from docutils.core import publish_parts
from docutils.writers import html4css1
from jinja2 import Environment, PackageLoader
from pygments import highlight
from pygments.lexers import get_lexer_by_name
from pygments.formatters import HtmlFormatter
from pybtex.__version__ import version
from .mystyle import MyHiglightStyle
e = Environment(loader=PackageLoader('pybtex', 'docgen'))
PYGMENTS_FORMATTER = HtmlFormatter(style=MyHiglightStyle, cssclass='sourcecode')
#CHANGELOG = file(os.path.join(os.path.dirname(__file__), os.pardir, 'CHANGES'))\
# .read().decode('utf-8')
DATE_FORMAT = '%d %B %y (%a)'
def get_bzr_modification_date(filename):
from bzrlib.osutils import format_date
mtime, timezone = get_bzr_timestamp(filename)
return format_date(mtime, timezone, 'utc', date_fmt=DATE_FORMAT, show_offset=False)
def get_bzr_timestamp(filename):
from bzrlib import workingtree
if os.path.basename(filename) == 'history.rst':
root_dir = os.path.dirname(os.path.dirname(os.path.dirname(filename)))
filename = os.path.join(root_dir, 'CHANGES')
tree = workingtree.WorkingTree.open_containing(filename)[0]
tree.lock_read()
rel_path = tree.relpath(os.path.abspath(filename))
file_id = tree.inventory.path2id(rel_path)
last_revision = get_last_bzr_revision(tree.branch, file_id)
tree.unlock()
return last_revision.timestamp, last_revision.timezone
def get_last_bzr_revision(branch, file_id):
history = branch.repository.iter_reverse_revision_history(branch.last_revision())
last_revision_id = branch.last_revision()
current_inventory = branch.repository.get_inventory(last_revision_id)
current_sha1 = current_inventory[file_id].text_sha1
for revision_id in history:
inv = branch.repository.get_inventory(revision_id)
if not file_id in inv or inv[file_id].text_sha1 != current_sha1:
return branch.repository.get_revision(last_revision_id)
last_revision_id = revision_id
def pygments_directive(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
try:
lexer = get_lexer_by_name(arguments[0])
except ValueError:
# no lexer found
lexer = get_lexer_by_name('text')
parsed = highlight(u'\n'.join(content), lexer, PYGMENTS_FORMATTER)
return [nodes.raw('', parsed, format="html")]
pygments_directive.arguments = (1, 0, 1)
pygments_directive.content = 1
class DownloadLinks(Directive):
has_content = False
def run(self):
tarball_uri = 'http://pypi.python.org/packages/source/p/pybtex/pybtex-%s.tar.bz2' % version
current_version_is = nodes.Text('Current version is ')
pybtex_xx = nodes.reference('', 'Pybtex %s' % version,
name='Pybtex %s' % version,
refuri=tarball_uri)
download = nodes.reference('', 'download', name='download',
refname='download')
see_whats_new = nodes.reference('', "see what's new",
name="see what's new", refuri='history.txt')
content = (
current_version_is,
pybtex_xx,
nodes.Text(' ('),
download, nodes.Text(', '),
see_whats_new,
nodes.Text(')')
)
paragraph = nodes.paragraph('', '', *content)
link_block = nodes.block_quote('', paragraph, classes=["pull-quote"])
return [link_block]
class NoopDirective(Directive):
has_content = False
def run(self):
return []
def register_directives(for_site=False):
directives.register_directive('sourcecode', pygments_directive)
directives.register_directive('download-links', DownloadLinks if for_site else NoopDirective)
def mark_tail(phrase, keyword, pattern = '%s<span class="tail"> %s</span>'):
"""Finds and highlights a 'tail' in the sentense.
A tail consists of several lowercase words and a keyword.
>>> print mark_tail('The Manual of Pybtex', 'Pybtex')
The Manual<span class="tail"> of Pybtex</span>
Look at the generated documentation for further explanation.
"""
words = phrase.split()
if words[-1] == keyword:
pos = -[not word.islower() for word in reversed(words[:-1])].index(True) - 1
return pattern % (' '.join(words[:pos]), ' '.join(words[pos:]))
else:
return phrase
e.filters['mark_tail'] = mark_tail
def create_translator(link_style):
class Translator(html4css1.HTMLTranslator):
def visit_reference(self, node):
refuri = node.get('refuri')
if refuri is not None and '/' not in refuri and refuri.endswith('.txt'):
node['refuri'] = link_style(refuri[:-4])
html4css1.HTMLTranslator.visit_reference(self, node)
return Translator
class DocumentationWriter(html4css1.Writer):
def __init__(self, link_style):
html4css1.Writer.__init__(self)
self.translator_class = create_translator(link_style)
def translate(self):
html4css1.Writer.translate(self)
# generate table of contents
contents = self.build_contents(self.document)
contents_doc = self.document.copy()
contents_doc.children = contents
contents_visitor = self.translator_class(contents_doc)
contents_doc.walkabout(contents_visitor)
self.parts['toc'] = self._generated_toc
def build_contents(self, node, level=0):
sections = []
i = len(node) - 1
while i >= 0 and isinstance(node[i], nodes.section):
sections.append(node[i])
i -= 1
sections.reverse()
toc = []
for section in sections:
try:
reference = nodes.reference('', '', refid=section['ids'][0], *section[0])
except IndexError:
continue
ref_id = reference['refid']
text = escape(reference.astext().encode('utf-8'))
toc.append((ref_id, text))
self._generated_toc = [('#%s' % href, caption) for href, caption in toc]
# no further processing
return []
def generate_documentation(data, link_style):
writer = DocumentationWriter(link_style)
parts = publish_parts(
data,
writer=writer,
settings_overrides={
'initial_header_level': 2,
'field_name_limit': 50,
}
)
return {
'title': parts['title'],
'body': parts['body'],
'toc': parts['toc'],
}
def handle_file(filename, fp, dst, for_site):
title = os.path.splitext(os.path.basename(filename))[0]
content = fp.read()
cwd = os.getcwd()
os.chdir(os.path.dirname(filename))
parts = generate_documentation(content, (lambda x: './%s.html' % x))
os.chdir(cwd)
c = dict(parts)
if for_site:
c['modification_date'] = get_bzr_modification_date(filename)
c['file_id'] = title
c['for_site'] = for_site
tmpl = e.get_template('template.html')
result = file(os.path.join(dst, title + '.html'), 'w')
result.write(tmpl.render(c).encode('utf-8'))
result.close()
def run(src_dir, dst_dir, for_site, sources=(), handle_file=handle_file):
if not sources:
sources = glob(os.path.join(src_dir, '*.rst'))
try:
shutil.rmtree(dst_dir)
except OSError:
pass
os.mkdir(dst_dir)
for filename in glob(os.path.join(src_dir, '*.css')):
shutil.copy(filename, dst_dir)
pygments_css = PYGMENTS_FORMATTER.get_style_defs('.sourcecode')
file(os.path.join(dst_dir, 'pygments.css'), 'w').write(pygments_css)
for fn in sources:
if not os.path.isfile(fn):
continue
print 'Processing %s' % fn
f = open(fn)
try:
handle_file(fn, f, dst_dir, for_site)
finally:
f.close()
def generate_html(doc_dir, for_site=False, *sources):
register_directives(for_site)
src_dir = os.path.realpath(os.path.join(doc_dir, 'rst'))
dst_dir = os.path.realpath(os.path.join(doc_dir, 'site' if for_site else 'html'))
run(src_dir, dst_dir, for_site, sources)
def generate_site(doc_dir):
generate_html(doc_dir, for_site=True)
os.system('rsync -rv --delete --exclude hg/ %s ero-sennin,pybtex@web.sourceforge.net:/home/groups/p/py/pybtex/htdocs'
% os.path.join(doc_dir, 'site/'))
|
|
# -*- coding: utf-8 -*-
# Copyright (c) 2016-2017, Zhijiang Yao, Jie Dong and Dongsheng Cao
# All rights reserved.
# This file is part of the PyBioMed.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the PyBioMed source tree.
"""
##############################################################################
The calculation of Kier and Hall's kappa indices based on its topological
structure. You can get 7 molecular kappa descriptors. You can
freely use and distribute it. If you hava any problem, you could contact
with us timely!
Authors: Zhijiang Yao and Dongsheng Cao.
Date: 2016.06.04
Email: gadsby@163.com and oriental-cds@163.com
##############################################################################
"""
# Third party modules
from rdkit import Chem
from rdkit.Chem import rdchem
try:
# TODO: Is this deprecated?
# https://github.com/rdkit/rdkit/issues/2741#issuecomment-546709239
from rdkit.Chem import pyPeriodicTable as PeriodicTable
except ImportError:
from rdkit.Chem import PeriodicTable
periodicTable = rdchem.GetPeriodicTable()
Version = 1.0
################################################################
def CalculateKappa1(mol):
"""
#################################################################
Calculation of molecular shape index for one bonded fragment
---->kappa1
Usage:
result=CalculateKappa1(mol)
Input: mol is a molecule object.
Output: result is a numeric value.
#################################################################
"""
P1 = mol.GetNumBonds(onlyHeavy=1)
A = mol.GetNumAtoms(onlyHeavy=1)
denom = P1 + 0.0
if denom:
kappa = (A) * (A - 1) ** 2 / denom ** 2
else:
kappa = 0.0
return round(kappa, 3)
def CalculateKappa2(mol):
"""
#################################################################
Calculation of molecular shape index for two bonded fragment
---->kappa2
Usage:
result=CalculateKappa2(mol)
Input: mol is a molecule object.
Output: result is a numeric value.
#################################################################
"""
P2 = len(Chem.FindAllPathsOfLengthN(mol, 2))
A = mol.GetNumAtoms(onlyHeavy=1)
denom = P2 + 0.0
if denom:
kappa = (A - 1) * (A - 2) ** 2 / denom ** 2
else:
kappa = 0.0
return round(kappa, 3)
def CalculateKappa3(mol):
"""
#################################################################
Calculation of molecular shape index for three bonded fragment
---->kappa3
Usage:
result=CalculateKappa3(mol)
Input: mol is a molecule object.
Output: result is a numeric value.
#################################################################
"""
P3 = len(Chem.FindAllPathsOfLengthN(mol, 3))
A = mol.GetNumAtoms(onlyHeavy=1)
denom = P3 + 0.0
if denom:
if A % 2 == 1:
kappa = (A - 1) * (A - 3) ** 2 / denom ** 2
else:
kappa = (A - 3) * (A - 2) ** 2 / denom ** 2
else:
kappa = 0.0
return round(kappa, 3)
def _HallKierAlpha(mol):
"""
#################################################################
*Internal Use Only*
Calculation of the Hall-Kier alpha value for a molecule
#################################################################
"""
alphaSum = 0.0
rC = PeriodicTable.nameTable["C"][5]
for atom in mol.GetAtoms():
atNum = atom.GetAtomicNum()
if not atNum:
continue
symb = atom.GetSymbol()
alphaV = PeriodicTable.hallKierAlphas.get(symb, None)
if alphaV is not None:
hyb = atom.GetHybridization() - 2
if hyb < len(alphaV):
alpha = alphaV[hyb]
if alpha is None:
alpha = alphaV[-1]
else:
alpha = alphaV[-1]
else:
rA = PeriodicTable.nameTable[symb][5]
alpha = rA / rC - 1
alphaSum += alpha
return alphaSum
def CalculateKappaAlapha1(mol):
"""
#################################################################
Calculation of molecular shape index for one bonded fragment
with Alapha
---->kappam1
Usage:
result=CalculateKappaAlapha1(mol)
Input: mol is a molecule object.
Output: result is a numeric value.
#################################################################
"""
P1 = mol.GetNumBonds(onlyHeavy=1)
A = mol.GetNumAtoms(onlyHeavy=1)
alpha = _HallKierAlpha(mol)
denom = P1 + alpha
if denom:
kappa = (A + alpha) * (A + alpha - 1) ** 2 / denom ** 2
else:
kappa = 0.0
return round(kappa, 3)
def CalculateKappaAlapha2(mol):
"""
#################################################################
Calculation of molecular shape index for two bonded fragment
with Alapha
---->kappam2
Usage:
result=CalculateKappaAlapha2(mol)
Input: mol is a molecule object.
Output: result is a numeric value.
#################################################################
"""
P2 = len(Chem.FindAllPathsOfLengthN(mol, 2))
A = mol.GetNumAtoms(onlyHeavy=1)
alpha = _HallKierAlpha(mol)
denom = P2 + alpha
if denom:
kappa = (A + alpha - 1) * (A + alpha - 2) ** 2 / denom ** 2
else:
kappa = 0.0
return round(kappa, 3)
def CalculateKappaAlapha3(mol):
"""
#################################################################
Calculation of molecular shape index for three bonded fragment
with Alapha
---->kappam3
Usage:
result=CalculateKappaAlapha3(mol)
Input: mol is a molecule object.
Output: result is a numeric value.
#################################################################
"""
P3 = len(Chem.FindAllPathsOfLengthN(mol, 3))
A = mol.GetNumAtoms(onlyHeavy=1)
alpha = _HallKierAlpha(mol)
denom = P3 + alpha
if denom:
if A % 2 == 1:
kappa = (A + alpha - 1) * (A + alpha - 3) ** 2 / denom ** 2
else:
kappa = (A + alpha - 3) * (A + alpha - 2) ** 2 / denom ** 2
else:
kappa = 0.0
return round(kappa, 3)
def CalculateFlexibility(mol):
"""
#################################################################
Calculation of Kier molecular flexibility index
---->phi
Usage:
result=CalculateFlexibility(mol)
Input: mol is a molecule object.
Output: result is a numeric value.
#################################################################
"""
kappa1 = CalculateKappaAlapha1(mol)
kappa2 = CalculateKappaAlapha2(mol)
A = mol.GetNumAtoms(onlyHeavy=1)
phi = kappa1 * kappa2 / (A + 0.0)
return phi
def GetKappa(mol):
"""
#################################################################
Calculation of all kappa values.
Usage:
result=GetKappa(mol)
Input: mol is a molecule object.
Output: result is a dcit form containing 6 kappa values.
#################################################################
"""
res = {}
res["kappa1"] = CalculateKappa1(mol)
res["kappa2"] = CalculateKappa2(mol)
res["kappa3"] = CalculateKappa3(mol)
res["kappam1"] = CalculateKappaAlapha1(mol)
res["kappam2"] = CalculateKappaAlapha2(mol)
res["kappam3"] = CalculateKappaAlapha3(mol)
res["phi"] = CalculateFlexibility(mol)
return res
################################################################
if __name__ == "__main__":
smis = ["CCCC", "CCCCC", "CCCCCC", "CC(N)C(=O)O", "CC(N)C(=O)[O-].[Na+]"]
for index, smi in enumerate(smis):
m = Chem.MolFromSmiles(smi)
print(index + 1)
print(smi)
print("\t", GetKappa(m))
print("\t", len(GetKappa(m)))
|
|
# ----------------------------------------------------------------------------
# cocos2d
# Copyright (c) 2008-2012 Daniel Moisset, Ricardo Quesada, Rayentray Tappa,
# Lucio Torre
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of cocos2d nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
from ctypes import *
from pyglet.gl import *
class GLSLException(Exception): pass
def glsl_log(handle):
if handle == 0:
return ''
log_len = c_int(0)
glGetObjectParameterivARB(handle, GL_OBJECT_INFO_LOG_LENGTH_ARB,
byref(log_len))
if log_len.value == 0:
return ''
log = create_string_buffer(log_len.value) # does log_len include the NUL?
chars_written = c_int(0)
glGetInfoLogARB(handle, log_len.value, byref(chars_written), log)
return log.value
class Shader(object):
s_tag = 0
def __init__(self, name, prog):
self.name = name
self.prog = prog
self.shader = 0
self.compiling = False
self.tag = -1
self.dependencies = []
def __del__(self):
self.destroy()
def _source(self):
if self.tag == Shader.s_tag: return []
self.tag = Shader.s_tag
r = []
for d in self.dependencies:
r.extend(d._source())
r.append(self.prog)
return r
def _compile(self):
if self.shader: return
if self.compiling : return
self.compiling = True
self.shader = glCreateShaderObjectARB(self.shaderType())
if self.shader == 0:
raise GLSLException('faled to create shader object')
prog = c_char_p(self.prog)
length = c_int(-1)
glShaderSourceARB(self.shader,
1,
cast(byref(prog), POINTER(POINTER(c_char))),
byref(length))
glCompileShaderARB(self.shader)
self.compiling = False
compile_status = c_int(0)
glGetObjectParameterivARB(self.shader, GL_OBJECT_COMPILE_STATUS_ARB, byref(compile_status))
if not compile_status.value:
err = glsl_log(self.shader)
glDeleteObjectARB(self.shader)
self.shader = 0
raise GLSLException('failed to compile shader', err)
def _attachTo(self, program):
if self.tag == Shader.s_tag: return
self.tag = Shader.s_tag
for d in self.dependencies:
d._attachTo(program)
if self.isCompiled():
glAttachObjectARB(program, self.shader)
def addDependency(self, shader):
self.dependencies.append(shader)
return self
def destroy(self):
if self.shader != 0: glDeleteObjectARB(self.shader)
def shaderType(self):
raise NotImplementedError()
def isCompiled(self):
return self.shader != 0
def attachTo(self, program):
Shader.s_tag = Shader.s_tag + 1
self._attachTo(program)
# ATI/apple's glsl compiler is broken.
def attachFlat(self, program):
if self.isCompiled():
glAttachObjectARB(program, self.shader)
def compileFlat(self):
if self.isCompiled(): return
self.shader = glCreateShaderObjectARB(self.shaderType())
if self.shader == 0:
raise GLSLException('faled to create shader object')
all_source = ['\n'.join(self._source())]
prog = (c_char_p * len(all_source))(*all_source)
length = (c_int * len(all_source))(-1)
glShaderSourceARB(self.shader,
len(all_source),
cast(prog, POINTER(POINTER(c_char))),
length)
glCompileShaderARB(self.shader)
compile_status = c_int(0)
glGetObjectParameterivARB(self.shader, GL_OBJECT_COMPILE_STATUS_ARB, byref(compile_status))
if not compile_status.value:
err = glsl_log(self.shader)
glDeleteObjectARB(self.shader)
self.shader = 0
raise GLSLException('failed to compile shader', err)
def compile(self):
if self.isCompiled(): return
for d in self.dependencies:
d.compile()
self._compile()
class VertexShader(Shader):
def shaderType(self): return GL_VERTEX_SHADER_ARB
class FragmentShader(Shader):
def shaderType(self): return GL_FRAGMENT_SHADER_ARB
class ShaderProgram(object):
def __init__(self, vertex_shader=None, fragment_shader=None):
self.vertex_shader = vertex_shader
self.fragment_shader = fragment_shader
self.program = 0
def __del__(self):
self.destroy()
def destroy(self):
if self.program != 0: glDeleteObjectARB(self.program)
def setShader(self, shader):
if isinstance(shader, FragmentShader):
self.fragment_shader = shader
if isinstance(shader, VertexShader):
self.vertex_shader = shader
if self.program != 0: glDeleteObjectARB(self.program)
def link(self):
if self.vertex_shader is not None: self.vertex_shader.compileFlat()
if self.fragment_shader is not None: self.fragment_shader.compileFlat()
self.program = glCreateProgramObjectARB()
if self.program == 0:
raise GLSLException('failed to create program object')
if self.vertex_shader is not None: self.vertex_shader.attachFlat(self.program)
if self.fragment_shader is not None: self.fragment_shader.attachFlat(self.program)
glLinkProgramARB(self.program)
link_status = c_int(0)
glGetObjectParameterivARB(self.program, GL_OBJECT_LINK_STATUS_ARB, byref(link_status))
if link_status.value == 0:
err = glsl_log(self.program)
glDeleteObjectARB(self.program)
self.program = 0
raise GLSLException('failed to link shader', err)
self.__class__._uloc_ = {}
self.__class__._vloc_ = {}
return self.program
def prog(self):
if self.program: return self.program
return self.link()
def install(self):
p = self.prog()
if p != 0:
glUseProgramObjectARB(p)
def uninstall(self):
glUseProgramObjectARB(0)
def uniformLoc(self, var):
try:
return self.__class__._uloc_[var]
except:
if self.program == 0:
self.link()
self.__class__._uloc_[var] = v = glGetUniformLocationARB(self.program, var)
return v
def uset1F(self, var, x):
glUniform1fARB(self.uniformLoc(var), x)
def uset2F(self, var, x, y):
glUniform2fARB(self.uniformLoc(var), x, y)
def uset3F(self, var, x, y, z):
glUniform3fARB(self.uniformLoc(var), x, y, z)
def uset4F(self, var, x, y, z, w):
glUniform4fARB(self.uniformLoc(var), x, y, z, w)
def uset1I(self, var, x):
glUniform1iARB(self.uniformLoc(var), x)
def uset3I(self, var, x, y, z):
glUniform1iARB(self.uniformLoc(var), x, y, z)
def usetM4F(self, var, m):
pass
# glUniform1iARB(self.uniformLoc(var), x, y, z)
def usetTex(self, var, u, v):
glUniform1iARB(self.uniformLoc(var), u)
glActiveTexture(GL_TEXTURE0 + u)
glBindTexture(v.gl_tgt, v.gl_id)
__all__ = ['VertexShader', 'FragmentShader', 'ShaderProgram', 'GLSLException']
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Unit test VTA's instructions """
import tvm
from tvm import te
import numpy as np
from tvm import topi
from tvm.contrib import utils
import vta
import vta.testing
from vta.testing import simulator
np.random.seed(0xDEADB)
def test_save_load_out():
"""Test save/store output command"""
def _run(env, remote):
n = 6
x = te.placeholder((n, n, env.BATCH, env.BLOCK_OUT), name="x", dtype=env.acc_dtype)
x_buf = te.compute((n, n, env.BATCH, env.BLOCK_OUT), lambda *i: x(*i), "x_buf")
# insert no-op that won't be optimized away
y_buf = te.compute((n, n, env.BATCH, env.BLOCK_OUT), lambda *i: x_buf(*i) >> 0, "y_buf")
y = te.compute(
(n, n, env.BATCH, env.BLOCK_OUT), lambda *i: y_buf(*i).astype(env.inp_dtype), "y"
)
# schedule
s = te.create_schedule(y.op)
s[x_buf].set_scope(env.acc_scope)
s[x_buf].pragma(x_buf.op.axis[0], env.dma_copy)
s[y_buf].set_scope(env.acc_scope)
s[y_buf].pragma(y_buf.op.axis[0], env.alu)
s[y].pragma(y.op.axis[0], env.dma_copy)
# verification
with vta.build_config():
m = vta.build(s, [x, y], tvm.target.Target("ext_dev", host=env.target_host))
if not remote:
return
temp = utils.tempdir()
m.save(temp.relpath("load_act.o"))
remote.upload(temp.relpath("load_act.o"))
f = remote.load_module("load_act.o")
# verify
dev = remote.ext_dev(0)
x_np = np.random.randint(1, 10, size=(n, n, env.BATCH, env.BLOCK_OUT)).astype(x.dtype)
y_np = x_np.astype(y.dtype)
x_nd = tvm.nd.array(x_np, dev)
y_nd = tvm.nd.empty(y_np.shape, device=dev, dtype=y_np.dtype)
if env.TARGET in ["sim", "tsim"]:
simulator.clear_stats()
f(x_nd, y_nd)
np.testing.assert_equal(y_np, y_nd.numpy())
if env.TARGET in ["sim", "tsim"]:
sim_stats = simulator.stats()
print("Save load execution statistics:")
for k, v in sim_stats.items():
print("\t{:<16}: {:>16}".format(k, v))
vta.testing.run(_run)
def test_padded_load():
"""Test padded load."""
def _run(env, remote):
def check_padded_load(pad_before, pad_after, test_name=None):
# declare
n = 3
m = 5
x = te.placeholder((n, m, env.BATCH, env.BLOCK_OUT), name="x", dtype=env.acc_dtype)
x_buf = topi.nn.pad(x, pad_before, pad_after, name="y")
# insert no-op that won't be optimized away
y_buf = te.compute(
(
n + pad_before[0] + pad_after[0],
m + pad_before[1] + pad_after[1],
env.BATCH,
env.BLOCK_OUT,
),
lambda *i: x_buf(*i) >> 0,
"y_buf",
)
y = te.compute(
(
n + pad_before[0] + pad_after[0],
m + pad_before[1] + pad_after[1],
env.BATCH,
env.BLOCK_OUT,
),
lambda *i: y_buf(*i).astype(env.inp_dtype),
"y",
)
# schedule
s = te.create_schedule(y.op)
s[x_buf].set_scope(env.acc_scope)
s[x_buf].pragma(x_buf.op.axis[0], env.dma_copy)
s[y_buf].set_scope(env.acc_scope)
s[y_buf].pragma(y_buf.op.axis[0], env.alu)
s[y].pragma(y.op.axis[0], env.dma_copy)
# build
with vta.build_config():
mod = vta.build(s, [x, y], tvm.target.Target("ext_dev", host=env.target_host))
if not remote:
return
temp = utils.tempdir()
mod.save(temp.relpath("padded_load.o"))
remote.upload(temp.relpath("padded_load.o"))
f = remote.load_module("padded_load.o")
# verify
dev = remote.ext_dev(0)
x_np = np.random.randint(0, 10, size=(n, m, env.BATCH, env.BLOCK_OUT)).astype(x.dtype)
y_np = np.zeros(
(
n + pad_before[0] + pad_after[0],
m + pad_before[1] + pad_after[1],
env.BATCH,
env.BLOCK_OUT,
)
).astype(y.dtype)
y_np[pad_before[0] : pad_before[0] + n, pad_before[1] : pad_before[1] + m, :] = x_np
x_nd = tvm.nd.array(x_np, dev)
y_nd = tvm.nd.empty(y_np.shape, device=dev, dtype=y_np.dtype)
if env.TARGET in ["sim", "tsim"]:
simulator.clear_stats()
f(x_nd, y_nd)
np.testing.assert_equal(y_np, y_nd.numpy())
if env.TARGET in ["sim", "tsim"]:
sim_stats = simulator.stats()
print("Padded {} load execution statistics:".format(test_name))
for k, v in sim_stats.items():
print("\t{:<16}: {:>16}".format(k, v))
check_padded_load([2, 0, 0, 0], [0, 0, 0, 0], test_name="Y0")
check_padded_load([0, 2, 0, 0], [0, 0, 0, 0], test_name="Y1")
check_padded_load([0, 0, 0, 0], [2, 0, 0, 0], test_name="X0")
check_padded_load([0, 0, 0, 0], [0, 2, 0, 0], test_name="X1")
check_padded_load([1, 1, 0, 0], [1, 1, 0, 0], test_name="all")
vta.testing.run(_run)
def test_gemm():
"""Test GEMM."""
def _run(env, remote):
# declare
o = 4
n = 1
m = 4
x = te.placeholder((o, n, env.BATCH, env.BLOCK_IN), name="x", dtype=env.inp_dtype)
w = te.placeholder((m, n, env.BLOCK_OUT, env.BLOCK_IN), name="w", dtype=env.wgt_dtype)
x_buf = te.compute((o, n, env.BATCH, env.BLOCK_IN), lambda *i: x(*i), "x_buf")
w_buf = te.compute((m, n, env.BLOCK_OUT, env.BLOCK_IN), lambda *i: w(*i), "w_buf")
ko = te.reduce_axis((0, n), name="ko")
ki = te.reduce_axis((0, env.BLOCK_IN), name="ki")
y_gem = te.compute(
(o, m, env.BATCH, env.BLOCK_OUT),
lambda bo, co, bi, ci: te.sum(
x_buf[bo, ko, bi, ki].astype(env.acc_dtype)
* w_buf[co, ko, ci, ki].astype(env.acc_dtype),
axis=[ko, ki],
),
name="y_gem",
)
y_shf = te.compute(
(o, m, env.BATCH, env.BLOCK_OUT), lambda *i: y_gem(*i) >> 8, name="y_shf"
)
y_max = te.compute(
(o, m, env.BATCH, env.BLOCK_OUT), lambda *i: tvm.te.max(y_shf(*i), 0), "y_max"
) # relu
y_min = te.compute(
(o, m, env.BATCH, env.BLOCK_OUT),
lambda *i: tvm.te.min(y_max(*i), (1 << (env.INP_WIDTH - 1)) - 1),
"y_min",
) # relu
y = te.compute(
(o, m, env.BATCH, env.BLOCK_OUT), lambda *i: y_min(*i).astype(env.inp_dtype), name="y"
)
if not remote:
return
def verify(s, name=None):
mod = vta.build(s, [x, w, y], tvm.target.Target("ext_dev", host=env.target_host))
temp = utils.tempdir()
mod.save(temp.relpath("gemm.o"))
remote.upload(temp.relpath("gemm.o"))
f = remote.load_module("gemm.o")
# verify
dev = remote.ext_dev(0)
x_np = np.random.randint(-128, 128, size=(o, n, env.BATCH, env.BLOCK_IN)).astype(
x.dtype
)
w_np = np.random.randint(-128, 128, size=(m, n, env.BLOCK_OUT, env.BLOCK_IN)).astype(
w.dtype
)
y_np = np.zeros((o, m, env.BATCH, env.BLOCK_OUT)).astype(y.dtype)
x_nd = tvm.nd.array(x_np, dev)
w_nd = tvm.nd.array(w_np, dev)
y_nd = tvm.nd.array(y_np, dev)
y_np = y_np.astype(env.acc_dtype)
for b in range(o):
for i in range(m):
for j in range(n):
y_np[b, i, :] += np.dot(
x_np[b, j, :].astype(env.acc_dtype), w_np[i, j].T.astype(env.acc_dtype)
)
y_np = np.right_shift(y_np, 8)
y_np = np.clip(y_np, 0, (1 << (env.INP_WIDTH - 1)) - 1).astype(y.dtype)
if env.TARGET in ["sim", "tsim"]:
simulator.clear_stats()
f(x_nd, w_nd, y_nd)
np.testing.assert_equal(y_np, y_nd.numpy())
if env.TARGET in ["sim", "tsim"]:
sim_stats = simulator.stats()
print("GEMM schedule:{} execution statistics:".format(name))
for k, v in sim_stats.items():
print("\t{:<16}: {:>16}".format(k, v))
def test_schedule1():
# default schedule with no smt
s = te.create_schedule(y.op)
# set the scope of the SRAM buffers
s[x_buf].set_scope(env.inp_scope)
s[w_buf].set_scope(env.wgt_scope)
s[y_gem].set_scope(env.acc_scope)
s[y_shf].set_scope(env.acc_scope)
s[y_max].set_scope(env.acc_scope)
s[y_min].set_scope(env.acc_scope)
# set pragmas for DMA transfer and ALU ops
s[x_buf].compute_at(s[y_gem], ko)
s[x_buf].pragma(s[x_buf].op.axis[0], env.dma_copy)
s[w_buf].compute_at(s[y_gem], ko)
s[w_buf].pragma(s[w_buf].op.axis[0], env.dma_copy)
s[y_shf].pragma(s[y_shf].op.axis[0], env.alu)
s[y_max].pragma(s[y_max].op.axis[0], env.alu)
s[y_min].pragma(s[y_min].op.axis[0], env.alu)
s[y].pragma(s[y].op.axis[0], env.dma_copy)
# tensorization
s[y_gem].reorder(
ko,
s[y_gem].op.axis[0],
s[y_gem].op.axis[1],
s[y_gem].op.axis[2],
s[y_gem].op.axis[3],
ki,
)
s[y_gem].tensorize(s[y_gem].op.axis[2], env.gemm)
verify(s, name="default")
def test_smt():
# test smt schedule
s = te.create_schedule(y.op)
s[x_buf].set_scope(env.inp_scope)
s[w_buf].set_scope(env.wgt_scope)
s[y_gem].set_scope(env.acc_scope)
s[y_shf].set_scope(env.acc_scope)
s[y_max].set_scope(env.acc_scope)
s[y_min].set_scope(env.acc_scope)
abo, aco, abi, aci = s[y].op.axis
abo1, abo2 = s[y].split(abo, nparts=2)
s[y].bind(abo1, te.thread_axis("cthread"))
s[y_gem].compute_at(s[y], abo1)
s[y_shf].compute_at(s[y], abo1)
s[y_max].compute_at(s[y], abo1)
s[y_min].compute_at(s[y], abo1)
s[y_gem].reorder(
ko,
s[y_gem].op.axis[0],
s[y_gem].op.axis[1],
s[y_gem].op.axis[2],
s[y_gem].op.axis[3],
ki,
)
s[y_gem].tensorize(s[y_gem].op.axis[2], env.gemm)
s[y_shf].pragma(s[y_shf].op.axis[0], env.alu)
s[y_max].pragma(s[y_max].op.axis[0], env.alu)
s[y_min].pragma(s[y_min].op.axis[0], env.alu)
s[x_buf].compute_at(s[y_gem], ko)
s[x_buf].pragma(s[x_buf].op.axis[0], env.dma_copy)
s[w_buf].compute_at(s[y_gem], ko)
s[w_buf].pragma(s[w_buf].op.axis[0], env.dma_copy)
s[y].pragma(abo2, env.dma_copy)
verify(s, name="smt")
test_schedule1()
test_smt()
vta.testing.run(_run)
def test_alu():
def _run(env, remote):
def check_alu(tvm_op, np_op=None, use_imm=False, test_name=None):
"""Test ALU"""
m = 8
n = 8
imm = np.random.randint(1, 5)
# compute
a = te.placeholder((m, n, env.BATCH, env.BLOCK_OUT), name="a", dtype=env.acc_dtype)
a_buf = te.compute(
(m, n, env.BATCH, env.BLOCK_OUT), lambda *i: a(*i), "a_buf"
) # DRAM->SRAM
if use_imm:
res_buf = te.compute(
(m, n, env.BATCH, env.BLOCK_OUT), lambda *i: tvm_op(a_buf(*i), imm), "res_buf"
) # compute
else:
b = te.placeholder((m, n, env.BATCH, env.BLOCK_OUT), name="b", dtype=env.acc_dtype)
b_buf = te.compute(
(m, n, env.BATCH, env.BLOCK_OUT), lambda *i: b(*i), "b_buf"
) # DRAM->SRAM
res_buf = te.compute(
(m, n, env.BATCH, env.BLOCK_OUT),
lambda *i: tvm_op(a_buf(*i), b_buf(*i)),
"res_buf",
) # compute5B
res = te.compute(
(m, n, env.BATCH, env.BLOCK_OUT),
lambda *i: res_buf(*i).astype(env.inp_dtype),
"res",
) # SRAM->DRAM
# schedule
s = te.create_schedule(res.op)
s[a_buf].set_scope(env.acc_scope) # SRAM
s[a_buf].pragma(a_buf.op.axis[0], env.dma_copy) # DRAM->SRAM
s[res_buf].set_scope(env.acc_scope) # SRAM
s[res_buf].pragma(res_buf.op.axis[0], env.alu) # compute
s[res].pragma(res.op.axis[0], env.dma_copy) # SRAM->DRAM
if not use_imm:
s[b_buf].set_scope(env.acc_scope) # SRAM
s[b_buf].pragma(b_buf.op.axis[0], env.dma_copy) # DRAM->SRAM
if not remote:
return
# build
with vta.build_config():
if use_imm:
mod = vta.build(s, [a, res], tvm.target.Target("ext_dev", host=env.target_host))
else:
mod = vta.build(
s, [a, b, res], tvm.target.Target("ext_dev", host=env.target_host)
)
temp = utils.tempdir()
mod.save(temp.relpath("load_act.o"))
remote.upload(temp.relpath("load_act.o"))
f = remote.load_module("load_act.o")
# verify
dev = remote.ext_dev(0)
a_np = np.random.randint(-16, 16, size=(m, n, env.BATCH, env.BLOCK_OUT)).astype(a.dtype)
if use_imm:
res_np = np_op(a_np, imm) if np_op else tvm_op(a_np, imm)
else:
b_np = np.random.randint(-16, 16, size=(m, n, env.BATCH, env.BLOCK_OUT)).astype(
b.dtype
)
res_np = np_op(a_np, b_np) if np_op else tvm_op(a_np, b_np)
res_np = res_np.astype(res.dtype)
a_nd = tvm.nd.array(a_np, dev)
res_nd = tvm.nd.array(np.zeros((m, n, env.BATCH, env.BLOCK_OUT)).astype(res.dtype), dev)
if env.TARGET in ["sim", "tsim"]:
simulator.clear_stats()
if use_imm:
f(a_nd, res_nd)
else:
b_nd = tvm.nd.array(b_np, dev)
f(a_nd, b_nd, res_nd)
np.testing.assert_equal(res_np, res_nd.numpy())
if env.TARGET in ["sim", "tsim"]:
sim_stats = simulator.stats()
print("ALU {} execution statistics:".format(test_name))
for k, v in sim_stats.items():
print("\t{:<16}: {:>16}".format(k, v))
check_alu(lambda x, y: x << y, np.left_shift, use_imm=True, test_name="SHL")
check_alu(tvm.te.max, np.maximum, use_imm=True, test_name="MAX")
check_alu(tvm.te.max, np.maximum, test_name="MAX")
check_alu(lambda x, y: x + y, use_imm=True, test_name="ADD")
check_alu(lambda x, y: x + y, test_name="ADD")
check_alu(lambda x, y: x >> y, np.right_shift, use_imm=True, test_name="SHR")
vta.testing.run(_run)
def test_relu():
"""Test RELU on ALU"""
def _run(env, remote):
m = 8
n = 10
# compute
a = te.placeholder((m, n, env.BATCH, env.BLOCK_OUT), name="a", dtype=env.acc_dtype)
a_buf = te.compute(
(m, n, env.BATCH, env.BLOCK_OUT), lambda *i: a(*i), "a_buf"
) # DRAM->SRAM
max_buf = te.compute(
(m, n, env.BATCH, env.BLOCK_OUT), lambda *i: tvm.te.max(a_buf(*i), 0), "res_buf"
) # relu
min_buf = te.compute(
(m, n, env.BATCH, env.BLOCK_OUT),
lambda *i: tvm.te.min(max_buf(*i), (1 << (env.INP_WIDTH - 1)) - 1),
"max_buf",
) # relu
res = te.compute(
(m, n, env.BATCH, env.BLOCK_OUT),
lambda *i: min_buf(*i).astype(env.inp_dtype),
"min_buf",
) # SRAM->DRAM
# schedule
s = te.create_schedule(res.op)
s[a_buf].set_scope(env.acc_scope) # SRAM
s[a_buf].pragma(a_buf.op.axis[0], env.dma_copy) # DRAM->SRAM
s[max_buf].set_scope(env.acc_scope) # SRAM
s[min_buf].set_scope(env.acc_scope) # SRAM
s[max_buf].pragma(max_buf.op.axis[0], env.alu) # compute
s[min_buf].pragma(min_buf.op.axis[0], env.alu) # compute
s[res].pragma(res.op.axis[0], env.dma_copy) # SRAM->DRAM
# build
with vta.build_config():
mod = vta.build(s, [a, res], tvm.target.Target("ext_dev", host=env.target_host))
if not remote:
return
temp = utils.tempdir()
mod.save(temp.relpath("load_act.o"))
remote.upload(temp.relpath("load_act.o"))
f = remote.load_module("load_act.o")
# verify
dev = remote.ext_dev(0)
a_np = np.random.randint(-256, 256, size=(m, n, env.BATCH, env.BLOCK_OUT)).astype(a.dtype)
res_np = np.clip(a_np, 0, (1 << (env.INP_WIDTH - 1)) - 1).astype(res.dtype)
a_nd = tvm.nd.array(a_np, dev)
res_nd = tvm.nd.array(np.zeros((m, n, env.BATCH, env.BLOCK_OUT)).astype(res.dtype), dev)
if env.TARGET in ["sim", "tsim"]:
simulator.clear_stats()
f(a_nd, res_nd)
np.testing.assert_equal(res_np, res_nd.numpy())
if env.TARGET in ["sim", "tsim"]:
sim_stats = simulator.stats()
print("Relu execution statistics:")
for k, v in sim_stats.items():
print("\t{:<16}: {:>16}".format(k, v))
vta.testing.run(_run)
def test_shift_and_scale():
"""Test shift and scale on ALU"""
def _run(env, remote):
m = 2
n = 8
imm_shift = np.random.randint(0, 8)
imm_scale = np.random.randint(1, 5)
# compute
a = te.placeholder((m, n, env.BATCH, env.BLOCK_OUT), name="a", dtype=env.acc_dtype)
a_buf = te.compute(
(m, n, env.BATCH, env.BLOCK_OUT), lambda *i: a(*i), "a_buf"
) # DRAM->SRAM
res_shift = te.compute(
(m, n, env.BATCH, env.BLOCK_OUT), lambda *i: a_buf(*i) + imm_shift, "res_shift"
) # compute
res_scale = te.compute(
(m, n, env.BATCH, env.BLOCK_OUT), lambda *i: res_shift(*i) >> imm_scale, "res_scale"
) # compute
res = te.compute(
(m, n, env.BATCH, env.BLOCK_OUT), lambda *i: res_scale(*i).astype(env.inp_dtype), "res"
) # SRAM->DRAM
# schedule
s = te.create_schedule(res.op)
s[a_buf].set_scope(env.acc_scope) # SRAM
s[res_shift].set_scope(env.acc_scope) # SRAM
s[res_scale].set_scope(env.acc_scope) # SRAM
s[a_buf].pragma(a_buf.op.axis[0], env.dma_copy) # DRAM->SRAM
s[res_shift].pragma(res_shift.op.axis[0], env.alu) # compute
s[res_scale].pragma(res_scale.op.axis[0], env.alu) # compute
s[res].pragma(res.op.axis[0], env.dma_copy) # SRAM->DRAM
# build
mod = vta.build(s, [a, res], tvm.target.Target("ext_dev", host=env.target_host))
if not remote:
return
temp = utils.tempdir()
mod.save(temp.relpath("load_act.o"))
remote.upload(temp.relpath("load_act.o"))
f = remote.load_module("load_act.o")
# verify
dev = remote.ext_dev(0)
a_np = np.random.randint(-10, 10, size=(m, n, env.BATCH, env.BLOCK_OUT)).astype(a.dtype)
res_np = np.right_shift((a_np + imm_shift), imm_scale)
res_np = res_np.astype(res.dtype)
a_nd = tvm.nd.array(a_np, dev)
res_nd = tvm.nd.array(np.zeros((m, n, env.BATCH, env.BLOCK_OUT)).astype(res.dtype), dev)
if env.TARGET in ["sim", "tsim"]:
simulator.clear_stats()
f(a_nd, res_nd)
np.testing.assert_equal(res_np, res_nd.numpy())
if env.TARGET in ["sim", "tsim"]:
sim_stats = simulator.stats()
print("Shift and scale execution statistics:")
for k, v in sim_stats.items():
print("\t{:<16}: {:>16}".format(k, v))
vta.testing.run(_run)
def test_runtime_array():
def _run(env, remote):
n = 100
dev = remote.ext_dev(0)
x_np = np.random.randint(1, 10, size=(n, n, env.BATCH, env.BLOCK_OUT)).astype("int8")
x_nd = tvm.nd.array(x_np, dev)
np.testing.assert_equal(x_np, x_nd.numpy())
vta.testing.run(_run)
if __name__ == "__main__":
test_runtime_array()
test_save_load_out()
test_padded_load()
test_gemm()
test_alu()
test_relu()
test_shift_and_scale()
|
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 KuraLabs S.R.L
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Utilities for git repositories.
"""
from shutil import which
from .command import run
from ..logging import get_logger
log = get_logger(__name__)
class GitError(Exception):
"""
Typed exception raised when a call to a git executable failed.
"""
class GitNotFound(Exception):
"""
Typed exception raised when the git executable wasn't found on the system.
"""
def __init__(self):
super().__init__('Git executable not found in your system.')
def find_git():
"""
Find the git executable.
:return: Absolute path to the git executable.
:rtype: str
"""
git = which('git')
if git is None:
log.debug('Git executable not found in your system.')
raise GitNotFound()
return git
def find_tag(git=None, directory='.'):
"""
Find the tag for the current revision.
:param str git: Path to git executable.
If None, the default, will try to find it using :func:`find_git`.
:param str directory: Run as if git was started in ``directory`` instead of
the current working directory.
:return: The name of the tag pointing to the current revision.
Raises GitError if no tag is found.
:rtype: str
"""
if git is None:
git = find_git()
# Get current revision
call = run([
git, '-C', directory,
'describe', '--tags', '--exact-match', 'HEAD'
])
if call.returncode != 0:
raise GitError('Unable to determine git tag:\n{}'.format(
call.stderr
))
return call.stdout
def find_root(git=None, directory='.'):
"""
Find the root of the git repository.
:param str git: Path to git executable.
If None, the default, will try to find it using :func:`find_git`.
:param str directory: Run as if git was started in ``directory`` instead of
the current working directory.
:return: Absolute path to root of the git repository.
:rtype: str
"""
if git is None:
git = find_git()
call = run([
git, '-C', directory,
'rev-parse', '--show-toplevel'
])
if call.returncode != 0:
raise GitError('Unable to determine git repository root:\n{}'.format(
call.stderr
))
return call.stdout
def find_branch(git=None, directory='.'):
"""
Find the current branch of the git repository.
:param str git: Path to git executable.
If None, the default, will try to find it using :func:`find_git`.
:param str directory: Run as if git was started in ``directory`` instead of
the current working directory.
:return: The name of the branch the git repository is currently on.
:rtype: str
"""
if git is None:
git = find_git()
# Get current branch
call = run([
git, '-C', directory,
'rev-parse', '--abbrev-ref', 'HEAD'
])
if call.returncode != 0:
raise GitError('Unable to determine git branch:\n{}'.format(
call.stderr
))
return call.stdout
def find_revision(git=None, directory='.'):
"""
Find the current revision (short hash) of the git repository.
:param str git: Path to git executable.
If None, the default, will try to find it using :func:`find_git`.
:param str directory: Run as if git was started in ``directory`` instead of
the current working directory.
:return: The short version of the revision the git repository is currently
on.
:rtype: str
"""
if git is None:
git = find_git()
# Get current revision
call = run([
git, '-C', directory,
'rev-parse', '--short', '--verify', 'HEAD'
])
if call.returncode != 0:
raise GitError('Unable to determine git revision:\n{}'.format(
call.stderr
))
return call.stdout
def find_name(git=None, directory='.'):
"""
Find the name of the author of the current revision.
:param str git: Path to git executable.
If None, the default, will try to find it using :func:`find_git`.
:param str directory: Run as if git was started in ``directory`` instead of
the current working directory.
:return: The name of the author of the current revision.
:rtype: str
"""
if git is None:
git = find_git()
# Get current revision
call = run([
git, '-C', directory,
'log', '-1', '--pretty=format:%an'
])
if call.returncode != 0:
raise GitError('Unable to determine git author name:\n{}'.format(
call.stderr
))
return call.stdout
def find_email(git=None, directory='.'):
"""
Find the email of the author of the current revision.
:param str git: Path to git executable.
If None, the default, will try to find it using :func:`find_git`.
:param str directory: Run as if git was started in ``directory`` instead of
the current working directory.
:return: The email of the author of the current revision.
:rtype: str
"""
if git is None:
git = find_git()
# Get current revision
call = run([
git, '-C', directory,
'log', '-1', '--pretty=format:%ae'
])
if call.returncode != 0:
raise GitError('Unable to determine git author email:\n{}'.format(
call.stderr
))
return call.stdout
def find_subject(git=None, directory='.'):
"""
Find the commit message subject of current revision.
:param str git: Path to git executable.
If None, the default, will try to find it using :func:`find_git`.
:param str directory: Run as if git was started in ``directory`` instead of
the current working directory.
:return: The commit message subject of current revision.
:rtype: str
"""
if git is None:
git = find_git()
# Get current revision
call = run([
git, '-C', directory,
'log', '-1', '--pretty=format:%s'
])
if call.returncode != 0:
raise GitError('Unable to determine git commit subject:\n{}'.format(
call.stderr
))
return call.stdout
def find_body(git=None, directory='.'):
"""
Find the commit message body of current revision.
:param str git: Path to git executable.
If None, the default, will try to find it using :func:`find_git`.
:param str directory: Run as if git was started in ``directory`` instead of
the current working directory.
:return: The commit message body of current revision.
:rtype: str
"""
if git is None:
git = find_git()
# Get current revision
call = run([
git, '-C', directory,
'log', '-1', '--pretty=format:%b'
])
if call.returncode != 0:
raise GitError('Unable to determine git commit body:\n{}'.format(
call.stderr
))
return call.stdout
def find_date(git=None, directory='.'):
"""
Find the commit date in strict ISO 8601 format.
:param str git: Path to git executable.
If None, the default, will try to find it using :func:`find_git`.
:param str directory: Run as if git was started in ``directory`` instead of
the current working directory.
:return: The commit date in strict ISO 8601 format.
:rtype: str
"""
if git is None:
git = find_git()
# Get current revision
call = run([
git, '-C', directory,
'log', '-1', '--pretty=format:%aI'
])
if call.returncode != 0:
raise GitError('Unable to determine git commit date:\n{}'.format(
call.stderr
))
return call.stdout
__all__ = [
'find_git',
'find_tag',
'find_root',
'find_branch',
'find_revision',
'find_name',
'find_email',
'find_subject',
'find_body',
'find_date',
]
|
|
#!/usr/bin/env python2.6
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2012,2013 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for testing switch discovery."""
import unittest
if __name__ == "__main__":
import utils
utils.import_depends()
from brokertest import TestBrokerCommand
from switchtest import VerifySwitchMixin
class TestDiscoverSwitch(TestBrokerCommand, VerifySwitchMixin):
def test_100_add_swsync(self):
ip = self.net.unknown[20].usable[0]
self.dsdb_expect_add("swsync.aqd-unittest.ms.com", ip, "mgmt0")
self.noouttest(["add", "switch", "--type", "misc",
"--switch", "swsync.aqd-unittest.ms.com",
"--interface", "mgmt0", "--ip", ip, "--rack", "ut3",
"--model", "temp_switch"])
self.dsdb_verify()
def test_110_add_swsync_ifaces(self):
self.noouttest(["add", "interface", "--switch", "swsync",
"--interface", "vlan100"])
self.noouttest(["add", "interface", "--switch", "swsync",
"--interface", "vlan200"])
self.noouttest(["add", "interface", "--switch", "swsync",
"--interface", "vlan300"])
self.noouttest(["add", "interface", "--switch", "swsync",
"--interface", "vlan400"])
def test_120_add_swsync_addrs(self):
ip1 = self.net.unknown[20].usable[1]
ip2 = self.net.unknown[20].usable[2]
ip3 = self.net.unknown[20].usable[3]
self.dsdb_expect_add("swsync-vlan100.aqd-unittest.ms.com",
ip1, "vlan100",
primary="swsync.aqd-unittest.ms.com")
self.dsdb_expect_add("swsync-nomatch.aqd-unittest.ms.com",
ip2, "vlan200",
primary="swsync.aqd-unittest.ms.com")
self.dsdb_expect_add("swsync-vlan300.aqd-unittest.ms.com",
ip3, "vlan300",
primary="swsync.aqd-unittest.ms.com")
self.noouttest(["add", "interface", "address", "--switch", "swsync",
"--interface", "vlan100", "--ip", ip1,
"--fqdn", "swsync-vlan100.aqd-unittest.ms.com"])
self.noouttest(["add", "interface", "address", "--switch", "swsync",
"--interface", "vlan200", "--ip", ip2,
"--fqdn", "swsync-nomatch.aqd-unittest.ms.com"])
self.noouttest(["add", "interface", "address", "--switch", "swsync",
"--interface", "vlan300", "--ip", ip3,
"--fqdn", "swsync-vlan300.aqd-unittest.ms.com"])
self.dsdb_verify()
def test_200_show(self):
ip1 = self.net.unknown[20].usable[1]
ip2 = self.net.unknown[20].usable[2]
ip3 = self.net.unknown[20].usable[3]
ip4 = self.net.unknown[20].usable[4]
ip5 = self.net.unknown[20].usable[5]
command = ["show", "switch", "--switch", "swsync", "--discover"]
out = self.commandtest(command)
self.matchoutput(out,
"aq update_switch --switch swsync.aqd-unittest.ms.com "
"--model ws-c2960-48tt-l --vendor cisco "
"--comments 'T1 T2'",
command)
self.matchoutput(out, "aq del_interface_address "
"--switch swsync.aqd-unittest.ms.com "
"--interface vlan100 --ip %s" % ip1, command)
self.matchoutput(out, "aq del_interface "
"--switch swsync.aqd-unittest.ms.com "
"--interface vlan400", command)
self.matchoutput(out, "aq update_interface "
"--switch swsync.aqd-unittest.ms.com "
"--interface vlan200 --rename_to vlan210", command)
self.matchoutput(out, "aq update_interface "
"--switch swsync.aqd-unittest.ms.com "
"--interface vlan300 --rename_to vlan310", command)
self.matchoutput(out, "aq add_interface_address "
"--switch swsync.aqd-unittest.ms.com "
"--interface vlan100 --ip %s --label hsrp" % ip1,
command)
self.matchoutput(out, "aq add_interface_address "
"--switch swsync.aqd-unittest.ms.com "
"--interface vlan100 --ip %s" % ip4, command)
self.matchoutput(out, "aq add_interface "
"--switch swsync.aqd-unittest.ms.com "
"--interface vlan500 --type oa", command)
self.matchoutput(out, "aq add_interface_address "
"--switch swsync.aqd-unittest.ms.com "
"--interface vlan500 --ip %s" % ip5, command)
self.matchoutput(out, "qip-set-router %s" % ip1, command)
def test_210_update(self):
ip1 = self.net.unknown[20].usable[1]
ip4 = self.net.unknown[20].usable[4]
ip5 = self.net.unknown[20].usable[5]
self.dsdb_expect_update("swsync.aqd-unittest.ms.com",
"mgmt0", comments="T1 T2")
self.dsdb_expect_update("swsync-vlan100.aqd-unittest.ms.com",
"vlan100", ip4, comments="T1 T2")
self.dsdb_expect_add("swsync-vlan100-hsrp.aqd-unittest.ms.com", ip1,
"vlan100_hsrp", comments="T1 T2",
primary="swsync.aqd-unittest.ms.com")
self.dsdb_expect_update("swsync-nomatch.aqd-unittest.ms.com",
"vlan200", comments="T1 T2")
self.dsdb_expect_update("swsync-vlan300.aqd-unittest.ms.com",
"vlan300", comments="T1 T2")
self.dsdb_expect_rename("swsync-nomatch.aqd-unittest.ms.com",
iface="vlan200", new_iface="vlan210")
self.dsdb_expect_rename("swsync-vlan300.aqd-unittest.ms.com",
"swsync-vlan310.aqd-unittest.ms.com",
"vlan300", "vlan310")
self.dsdb_expect_add("swsync-vlan500.aqd-unittest.ms.com", ip5,
"vlan500", comments="T1 T2",
primary="swsync.aqd-unittest.ms.com")
command = ["update", "switch", "--switch", "swsync", "--discover"]
out, err = self.successtest(command)
self.matchoutput(err,
"Using jump host nyaqd1.ms.com from service instance "
"poll_helper/unittest to run CheckNet for switch "
"swsync.aqd-unittest.ms.com.",
command)
self.matchoutput(err, "You should run 'qip-set-router %s'." % ip1,
command)
self.dsdb_verify()
def test_300_verify(self):
ip = self.net.unknown[20].usable[0]
ip1 = self.net.unknown[20].usable[1]
ip2 = self.net.unknown[20].usable[2]
ip3 = self.net.unknown[20].usable[3]
ip4 = self.net.unknown[20].usable[4]
ip5 = self.net.unknown[20].usable[5]
out, command = self.verifyswitch("swsync.aqd-unittest.ms.com",
"cisco", "ws-c2960-48tt-l", "ut3", "a",
"3", switch_type="misc",
ip=self.net.unknown[20].usable[0],
interface="mgmt0",
comments="T1 T2")
# TODO: the interface type is not updated, it's not clear if it should
self.searchoutput(out,
r"Interface: mgmt0 \(no MAC addr\)\s*"
r"Type: oa\s*"
r"Network Environment: internal\s*"
r"Provides: swsync.aqd-unittest.ms.com \[%s\]"
% ip, command)
self.searchoutput(out,
r"Interface: vlan100 \(no MAC addr\)\s*"
r"Type: oa\s*"
r"Network Environment: internal\s*"
r"Provides: swsync-vlan100.aqd-unittest.ms.com \[%s\]\s*"
r"Provides: swsync-vlan100-hsrp.aqd-unittest.ms.com \[%s\] \(label: hsrp\)"
% (ip4, ip1), command)
self.searchoutput(out,
r"Interface: vlan210 \(no MAC addr\)\s*"
r"Type: oa\s*"
r"Network Environment: internal\s*"
r"Provides: swsync-nomatch.aqd-unittest.ms.com \[%s\]"
% ip2, command)
self.searchoutput(out,
r"Interface: vlan310 \(no MAC addr\)\s*"
r"Type: oa\s*"
r"Network Environment: internal\s*"
r"Provides: swsync-vlan310.aqd-unittest.ms.com \[%s\]"
% ip3, command)
self.searchoutput(out,
r"Interface: vlan500 \(no MAC addr\)\s*"
r"Type: oa\s*"
r"Network Environment: internal\s*"
r"Provides: swsync-vlan500.aqd-unittest.ms.com \[%s\]"
% ip5, command)
def test_400_del_swsync_addrs(self):
ip1 = self.net.unknown[20].usable[1]
ip2 = self.net.unknown[20].usable[2]
ip3 = self.net.unknown[20].usable[3]
ip4 = self.net.unknown[20].usable[4]
ip5 = self.net.unknown[20].usable[5]
self.dsdb_expect_delete(ip1)
self.dsdb_expect_delete(ip2)
self.dsdb_expect_delete(ip3)
self.dsdb_expect_delete(ip4)
self.dsdb_expect_delete(ip5)
self.noouttest(["del", "interface", "address", "--switch", "swsync",
"--interface", "vlan100", "--ip", ip1])
self.noouttest(["del", "interface", "address", "--switch", "swsync",
"--interface", "vlan100", "--ip", ip4])
self.noouttest(["del", "interface", "address", "--switch", "swsync",
"--interface", "vlan210", "--ip", ip2])
self.noouttest(["del", "interface", "address", "--switch", "swsync",
"--interface", "vlan310", "--ip", ip3])
self.noouttest(["del", "interface", "address", "--switch", "swsync",
"--interface", "vlan500", "--ip", ip5])
self.dsdb_verify()
def test_410_del_swsync(self):
self.dsdb_expect_delete(self.net.unknown[20].usable[0])
self.noouttest(["del", "switch", "--switch", "swsync"])
self.dsdb_verify()
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestDiscoverSwitch)
unittest.TextTestRunner(verbosity=2).run(suite)
|
|
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import itertools
import sys
from mox3 import mox
from neutronclient.common import exceptions
from neutronclient.neutron.v2_0 import network
from neutronclient.openstack.common import jsonutils
from neutronclient import shell
from neutronclient.tests.unit import test_cli20
class CLITestV20NetworkJSON(test_cli20.CLITestV20Base):
def setUp(self):
super(CLITestV20NetworkJSON, self).setUp(plurals={'tags': 'tag'})
def test_create_network(self):
"""Create net: myname."""
resource = 'network'
cmd = network.CreateNetwork(test_cli20.MyApp(sys.stdout), None)
name = 'myname'
myid = 'myid'
args = [name, ]
position_names = ['name', ]
position_values = [name, ]
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values)
def test_create_network_with_unicode(self):
"""Create net: u'\u7f51\u7edc'."""
resource = 'network'
cmd = network.CreateNetwork(test_cli20.MyApp(sys.stdout), None)
name = u'\u7f51\u7edc'
myid = 'myid'
args = [name, ]
position_names = ['name', ]
position_values = [name, ]
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values)
def test_create_network_tenant(self):
"""Create net: --tenant_id tenantid myname."""
resource = 'network'
cmd = network.CreateNetwork(test_cli20.MyApp(sys.stdout), None)
name = 'myname'
myid = 'myid'
args = ['--tenant_id', 'tenantid', name]
position_names = ['name', ]
position_values = [name, ]
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values,
tenant_id='tenantid')
# Test dashed options
args = ['--tenant-id', 'tenantid', name]
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values,
tenant_id='tenantid')
def test_create_network_tags(self):
"""Create net: myname --tags a b."""
resource = 'network'
cmd = network.CreateNetwork(test_cli20.MyApp(sys.stdout), None)
name = 'myname'
myid = 'myid'
args = [name, '--tags', 'a', 'b']
position_names = ['name', ]
position_values = [name, ]
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values,
tags=['a', 'b'])
def test_create_network_state(self):
"""Create net: --admin_state_down myname."""
resource = 'network'
cmd = network.CreateNetwork(test_cli20.MyApp(sys.stdout), None)
name = 'myname'
myid = 'myid'
args = ['--admin_state_down', name, ]
position_names = ['name', ]
position_values = [name, ]
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values,
admin_state_up=False)
# Test dashed options
args = ['--admin-state-down', name, ]
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values,
admin_state_up=False)
def test_list_nets_empty_with_column(self):
resources = "networks"
cmd = network.ListNetwork(test_cli20.MyApp(sys.stdout), None)
self.mox.StubOutWithMock(cmd, "get_client")
self.mox.StubOutWithMock(self.client.httpclient, "request")
self.mox.StubOutWithMock(network.ListNetwork, "extend_list")
network.ListNetwork.extend_list(mox.IsA(list), mox.IgnoreArg())
cmd.get_client().MultipleTimes().AndReturn(self.client)
reses = {resources: []}
resstr = self.client.serialize(reses)
# url method body
query = "id=myfakeid"
args = ['-c', 'id', '--', '--id', 'myfakeid']
path = getattr(self.client, resources + "_path")
self.client.httpclient.request(
test_cli20.MyUrlComparator(test_cli20.end_url(path, query),
self.client),
'GET',
body=None,
headers=mox.ContainsKeyValue(
'X-Auth-Token',
test_cli20.TOKEN)).AndReturn(
(test_cli20.MyResp(200), resstr))
self.mox.ReplayAll()
cmd_parser = cmd.get_parser("list_" + resources)
shell.run_command(cmd, cmd_parser, args)
self.mox.VerifyAll()
self.mox.UnsetStubs()
_str = self.fake_stdout.make_string()
self.assertEqual('\n', _str)
def _test_list_networks(self, cmd, detail=False, tags=(),
fields_1=(), fields_2=(), page_size=None,
sort_key=(), sort_dir=()):
resources = "networks"
self.mox.StubOutWithMock(network.ListNetwork, "extend_list")
network.ListNetwork.extend_list(mox.IsA(list), mox.IgnoreArg())
self._test_list_resources(resources, cmd, detail, tags,
fields_1, fields_2, page_size=page_size,
sort_key=sort_key, sort_dir=sort_dir)
def test_list_nets_pagination(self):
cmd = network.ListNetwork(test_cli20.MyApp(sys.stdout), None)
self.mox.StubOutWithMock(network.ListNetwork, "extend_list")
network.ListNetwork.extend_list(mox.IsA(list), mox.IgnoreArg())
self._test_list_resources_with_pagination("networks", cmd)
def test_list_nets_sort(self):
"""list nets: --sort-key name --sort-key id --sort-dir asc
--sort-dir desc
"""
cmd = network.ListNetwork(test_cli20.MyApp(sys.stdout), None)
self._test_list_networks(cmd, sort_key=['name', 'id'],
sort_dir=['asc', 'desc'])
def test_list_nets_sort_with_keys_more_than_dirs(self):
"""list nets: --sort-key name --sort-key id --sort-dir desc
"""
cmd = network.ListNetwork(test_cli20.MyApp(sys.stdout), None)
self._test_list_networks(cmd, sort_key=['name', 'id'],
sort_dir=['desc'])
def test_list_nets_sort_with_dirs_more_than_keys(self):
"""list nets: --sort-key name --sort-dir desc --sort-dir asc
"""
cmd = network.ListNetwork(test_cli20.MyApp(sys.stdout), None)
self._test_list_networks(cmd, sort_key=['name'],
sort_dir=['desc', 'asc'])
def test_list_nets_limit(self):
"""list nets: -P."""
cmd = network.ListNetwork(test_cli20.MyApp(sys.stdout), None)
self._test_list_networks(cmd, page_size=1000)
def test_list_nets_detail(self):
"""list nets: -D."""
cmd = network.ListNetwork(test_cli20.MyApp(sys.stdout), None)
self._test_list_networks(cmd, True)
def test_list_nets_tags(self):
"""List nets: -- --tags a b."""
cmd = network.ListNetwork(test_cli20.MyApp(sys.stdout), None)
self._test_list_networks(cmd, tags=['a', 'b'])
def test_list_nets_tags_with_unicode(self):
"""List nets: -- --tags u'\u7f51\u7edc'."""
cmd = network.ListNetwork(test_cli20.MyApp(sys.stdout), None)
self._test_list_networks(cmd, tags=[u'\u7f51\u7edc'])
def test_list_nets_detail_tags(self):
"""List nets: -D -- --tags a b."""
cmd = network.ListNetwork(test_cli20.MyApp(sys.stdout), None)
self._test_list_networks(cmd, detail=True, tags=['a', 'b'])
def _test_list_nets_extend_subnets(self, data, expected):
def setup_list_stub(resources, data, query):
reses = {resources: data}
resstr = self.client.serialize(reses)
resp = (test_cli20.MyResp(200), resstr)
path = getattr(self.client, resources + '_path')
self.client.httpclient.request(
test_cli20.MyUrlComparator(
test_cli20.end_url(path, query), self.client),
'GET',
body=None,
headers=mox.ContainsKeyValue(
'X-Auth-Token', test_cli20.TOKEN)).AndReturn(resp)
cmd = network.ListNetwork(test_cli20.MyApp(sys.stdout), None)
self.mox.StubOutWithMock(cmd, 'get_client')
self.mox.StubOutWithMock(self.client.httpclient, 'request')
cmd.get_client().AndReturn(self.client)
setup_list_stub('networks', data, '')
cmd.get_client().AndReturn(self.client)
filters = ''
for n in data:
for s in n['subnets']:
filters = filters + "&id=%s" % s
setup_list_stub('subnets',
[{'id': 'mysubid1', 'cidr': '192.168.1.0/24'},
{'id': 'mysubid2', 'cidr': '172.16.0.0/24'},
{'id': 'mysubid3', 'cidr': '10.1.1.0/24'}],
query='fields=id&fields=cidr' + filters)
self.mox.ReplayAll()
args = []
cmd_parser = cmd.get_parser('list_networks')
parsed_args = cmd_parser.parse_args(args)
result = cmd.get_data(parsed_args)
self.mox.VerifyAll()
self.mox.UnsetStubs()
_result = [x for x in result[1]]
self.assertEqual(len(_result), len(expected))
for res, exp in zip(_result, expected):
self.assertEqual(len(res), len(exp))
for a, b in zip(res, exp):
self.assertEqual(a, b)
def test_list_nets_extend_subnets(self):
data = [{'id': 'netid1', 'name': 'net1', 'subnets': ['mysubid1']},
{'id': 'netid2', 'name': 'net2', 'subnets': ['mysubid2',
'mysubid3']}]
# id, name, subnets
expected = [('netid1', 'net1', 'mysubid1 192.168.1.0/24'),
('netid2', 'net2',
'mysubid2 172.16.0.0/24\nmysubid3 10.1.1.0/24')]
self._test_list_nets_extend_subnets(data, expected)
def test_list_nets_extend_subnets_no_subnet(self):
data = [{'id': 'netid1', 'name': 'net1', 'subnets': ['mysubid1']},
{'id': 'netid2', 'name': 'net2', 'subnets': ['mysubid4']}]
# id, name, subnets
expected = [('netid1', 'net1', 'mysubid1 192.168.1.0/24'),
('netid2', 'net2', 'mysubid4 ')]
self._test_list_nets_extend_subnets(data, expected)
def test_list_nets_fields(self):
"""List nets: --fields a --fields b -- --fields c d."""
cmd = network.ListNetwork(test_cli20.MyApp(sys.stdout), None)
self._test_list_networks(cmd,
fields_1=['a', 'b'], fields_2=['c', 'd'])
def _test_list_nets_columns(self, cmd, returned_body,
args=('-f', 'json')):
resources = 'networks'
self.mox.StubOutWithMock(network.ListNetwork, "extend_list")
network.ListNetwork.extend_list(mox.IsA(list), mox.IgnoreArg())
self._test_list_columns(cmd, resources, returned_body, args=args)
def test_list_nets_defined_column(self):
cmd = network.ListNetwork(test_cli20.MyApp(sys.stdout), None)
returned_body = {"networks": [{"name": "buildname3",
"id": "id3",
"tenant_id": "tenant_3",
"subnets": []}]}
self._test_list_nets_columns(cmd, returned_body,
args=['-f', 'json', '-c', 'id'])
_str = self.fake_stdout.make_string()
returned_networks = jsonutils.loads(_str)
self.assertEqual(1, len(returned_networks))
net = returned_networks[0]
self.assertEqual(1, len(net))
self.assertEqual("id", net.keys()[0])
def test_list_nets_with_default_column(self):
cmd = network.ListNetwork(test_cli20.MyApp(sys.stdout), None)
returned_body = {"networks": [{"name": "buildname3",
"id": "id3",
"tenant_id": "tenant_3",
"subnets": []}]}
self._test_list_nets_columns(cmd, returned_body)
_str = self.fake_stdout.make_string()
returned_networks = jsonutils.loads(_str)
self.assertEqual(1, len(returned_networks))
net = returned_networks[0]
self.assertEqual(3, len(net))
self.assertEqual(0, len(set(net) ^ set(cmd.list_columns)))
def test_list_external_nets_empty_with_column(self):
resources = "networks"
cmd = network.ListExternalNetwork(test_cli20.MyApp(sys.stdout), None)
self.mox.StubOutWithMock(cmd, "get_client")
self.mox.StubOutWithMock(self.client.httpclient, "request")
self.mox.StubOutWithMock(network.ListNetwork, "extend_list")
network.ListNetwork.extend_list(mox.IsA(list), mox.IgnoreArg())
cmd.get_client().MultipleTimes().AndReturn(self.client)
reses = {resources: []}
resstr = self.client.serialize(reses)
# url method body
query = "router%3Aexternal=True&id=myfakeid"
args = ['-c', 'id', '--', '--id', 'myfakeid']
path = getattr(self.client, resources + "_path")
self.client.httpclient.request(
test_cli20.MyUrlComparator(
test_cli20.end_url(path, query), self.client),
'GET',
body=None,
headers=mox.ContainsKeyValue(
'X-Auth-Token',
test_cli20.TOKEN)).AndReturn(
(test_cli20.MyResp(200), resstr))
self.mox.ReplayAll()
cmd_parser = cmd.get_parser("list_" + resources)
shell.run_command(cmd, cmd_parser, args)
self.mox.VerifyAll()
self.mox.UnsetStubs()
_str = self.fake_stdout.make_string()
self.assertEqual('\n', _str)
def _test_list_external_nets(self, resources, cmd,
detail=False, tags=(),
fields_1=(), fields_2=()):
self.mox.StubOutWithMock(cmd, "get_client")
self.mox.StubOutWithMock(self.client.httpclient, "request")
self.mox.StubOutWithMock(network.ListNetwork, "extend_list")
network.ListNetwork.extend_list(mox.IsA(list), mox.IgnoreArg())
cmd.get_client().MultipleTimes().AndReturn(self.client)
reses = {resources: [{'id': 'myid1', },
{'id': 'myid2', }, ], }
resstr = self.client.serialize(reses)
# url method body
query = ""
args = detail and ['-D', ] or []
if fields_1:
for field in fields_1:
args.append('--fields')
args.append(field)
if tags:
args.append('--')
args.append("--tag")
for tag in tags:
args.append(tag)
if (not tags) and fields_2:
args.append('--')
if fields_2:
args.append("--fields")
for field in fields_2:
args.append(field)
for field in itertools.chain(fields_1, fields_2):
if query:
query += "&fields=" + field
else:
query = "fields=" + field
if query:
query += '&router%3Aexternal=True'
else:
query += 'router%3Aexternal=True'
for tag in tags:
if query:
query += "&tag=" + tag
else:
query = "tag=" + tag
if detail:
query = query and query + '&verbose=True' or 'verbose=True'
path = getattr(self.client, resources + "_path")
self.client.httpclient.request(
test_cli20.MyUrlComparator(
test_cli20.end_url(path, query), self.client),
'GET',
body=None,
headers=mox.ContainsKeyValue('X-Auth-Token', test_cli20.TOKEN)
).AndReturn((test_cli20.MyResp(200), resstr))
self.mox.ReplayAll()
cmd_parser = cmd.get_parser("list_" + resources)
shell.run_command(cmd, cmd_parser, args)
self.mox.VerifyAll()
self.mox.UnsetStubs()
_str = self.fake_stdout.make_string()
self.assertIn('myid1', _str)
def test_list_external_nets_detail(self):
"""list external nets: -D."""
resources = "networks"
cmd = network.ListExternalNetwork(test_cli20.MyApp(sys.stdout), None)
self._test_list_external_nets(resources, cmd, True)
def test_list_external_nets_tags(self):
"""List external nets: -- --tags a b."""
resources = "networks"
cmd = network.ListExternalNetwork(test_cli20.MyApp(sys.stdout), None)
self._test_list_external_nets(resources,
cmd, tags=['a', 'b'])
def test_list_external_nets_detail_tags(self):
"""List external nets: -D -- --tags a b."""
resources = "networks"
cmd = network.ListExternalNetwork(test_cli20.MyApp(sys.stdout), None)
self._test_list_external_nets(resources, cmd,
detail=True, tags=['a', 'b'])
def test_list_externel_nets_fields(self):
"""List external nets: --fields a --fields b -- --fields c d."""
resources = "networks"
cmd = network.ListExternalNetwork(test_cli20.MyApp(sys.stdout), None)
self._test_list_external_nets(resources, cmd,
fields_1=['a', 'b'],
fields_2=['c', 'd'])
def test_update_network_exception(self):
"""Update net: myid."""
resource = 'network'
cmd = network.UpdateNetwork(test_cli20.MyApp(sys.stdout), None)
self.assertRaises(exceptions.CommandError, self._test_update_resource,
resource, cmd, 'myid', ['myid'], {})
def test_update_network(self):
"""Update net: myid --name myname --tags a b."""
resource = 'network'
cmd = network.UpdateNetwork(test_cli20.MyApp(sys.stdout), None)
self._test_update_resource(resource, cmd, 'myid',
['myid', '--name', 'myname',
'--tags', 'a', 'b'],
{'name': 'myname', 'tags': ['a', 'b'], }
)
def test_update_network_with_unicode(self):
"""Update net: myid --name u'\u7f51\u7edc' --tags a b."""
resource = 'network'
cmd = network.UpdateNetwork(test_cli20.MyApp(sys.stdout), None)
self._test_update_resource(resource, cmd, 'myid',
['myid', '--name', u'\u7f51\u7edc',
'--tags', 'a', 'b'],
{'name': u'\u7f51\u7edc',
'tags': ['a', 'b'], }
)
def test_show_network(self):
"""Show net: --fields id --fields name myid."""
resource = 'network'
cmd = network.ShowNetwork(test_cli20.MyApp(sys.stdout), None)
args = ['--fields', 'id', '--fields', 'name', self.test_id]
self._test_show_resource(resource, cmd, self.test_id, args,
['id', 'name'])
def test_delete_network(self):
"""Delete net: myid."""
resource = 'network'
cmd = network.DeleteNetwork(test_cli20.MyApp(sys.stdout), None)
myid = 'myid'
args = [myid]
self._test_delete_resource(resource, cmd, myid, args)
def _test_extend_list(self, mox_calls):
data = [{'id': 'netid%d' % i, 'name': 'net%d' % i,
'subnets': ['mysubid%d' % i]}
for i in range(10)]
self.mox.StubOutWithMock(self.client.httpclient, "request")
path = getattr(self.client, 'subnets_path')
cmd = network.ListNetwork(test_cli20.MyApp(sys.stdout), None)
self.mox.StubOutWithMock(cmd, "get_client")
cmd.get_client().MultipleTimes().AndReturn(self.client)
mox_calls(path, data)
self.mox.ReplayAll()
known_args, _vs = cmd.get_parser('create_subnets').parse_known_args()
cmd.extend_list(data, known_args)
self.mox.VerifyAll()
def _build_test_data(self, data):
subnet_ids = []
response = []
filters = ""
for n in data:
if 'subnets' in n:
subnet_ids.extend(n['subnets'])
for subnet_id in n['subnets']:
filters = "%s&id=%s" % (filters, subnet_id)
response.append({'id': subnet_id,
'cidr': '192.168.0.0/16'})
resp_str = self.client.serialize({'subnets': response})
resp = (test_cli20.MyResp(200), resp_str)
return filters, resp
def test_extend_list(self):
def mox_calls(path, data):
filters, response = self._build_test_data(data)
self.client.httpclient.request(
test_cli20.MyUrlComparator(test_cli20.end_url(
path, 'fields=id&fields=cidr' + filters), self.client),
'GET',
body=None,
headers=mox.ContainsKeyValue(
'X-Auth-Token', test_cli20.TOKEN)).AndReturn(response)
self._test_extend_list(mox_calls)
def test_extend_list_exceed_max_uri_len(self):
def mox_calls(path, data):
sub_data_lists = [data[:len(data) - 1], data[len(data) - 1:]]
filters, response = self._build_test_data(data)
# 1 char of extra URI len will cause a split in 2 requests
self.mox.StubOutWithMock(self.client, "_check_uri_length")
self.client._check_uri_length(mox.IgnoreArg()).AndRaise(
exceptions.RequestURITooLong(excess=1))
for data in sub_data_lists:
filters, response = self._build_test_data(data)
self.client._check_uri_length(mox.IgnoreArg()).AndReturn(None)
self.client.httpclient.request(
test_cli20.MyUrlComparator(
test_cli20.end_url(
path, 'fields=id&fields=cidr%s' % filters),
self.client),
'GET',
body=None,
headers=mox.ContainsKeyValue(
'X-Auth-Token', test_cli20.TOKEN)).AndReturn(response)
self._test_extend_list(mox_calls)
class CLITestV20NetworkXML(CLITestV20NetworkJSON):
format = 'xml'
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import msgpack
import redis
import pretend
import pytest
from pyramid import viewderivers
import warehouse.sessions
from warehouse.sessions import (
InvalidSession, Session, SessionFactory, includeme, session_view,
)
from warehouse.utils import crypto
class TestInvalidSession:
@pytest.mark.parametrize(
"method",
[
# IDict methods
"__contains__",
"__delitem__",
"__getitem__",
"__iter__",
"__len__",
"__setitem__",
"clear",
"copy",
"fromkeys",
"get",
"items",
"keys",
"pop",
"popitem",
"setdefault",
"update",
"values",
# ISession methods
"invalidate",
"flash",
"changed",
"get_csrf_token",
"peek_flash",
"new_csrf_token",
"pop_flash",
# Our custom methods.
"should_save",
],
)
def test_methods_raise(self, method):
session = InvalidSession()
with pytest.raises(RuntimeError):
getattr(session, method)()
@pytest.mark.parametrize("name", ["created", "new", "sid"])
def test_propery_raises(self, name):
session = InvalidSession()
with pytest.raises(RuntimeError):
getattr(session, name)
class TestSession:
@pytest.mark.parametrize(
("data", "expected"),
[
(None, {}),
({}, {}),
({"foo": "bar"}, {"foo": "bar"}),
]
)
def test_create_new(self, monkeypatch, data, expected):
monkeypatch.setattr(time, "time", lambda: 100)
monkeypatch.setattr(crypto, "random_token", lambda: "123456")
session = Session(data)
assert session == expected
assert session.sid == "123456"
assert session.new
assert session.created == 100
assert not session.invalidated
@pytest.mark.parametrize(
("data", "expected", "new"),
[
(None, {}, True),
({}, {}, True),
({"foo": "bar"}, {"foo": "bar"}, True),
(None, {}, False),
({}, {}, False),
({"foo": "bar"}, {"foo": "bar"}, False),
]
)
def test_create_with_session_id(self, monkeypatch, data, expected, new):
monkeypatch.setattr(time, "time", lambda: 100)
session = Session(data, "wat", new)
assert session == expected
assert session.sid == "wat"
assert session.new is new
assert session.created == 100
assert not session.invalidated
def test_changed_marks_as_changed(self):
session = Session()
assert not session._changed
session.changed()
assert session._changed
def test_invalidate(self, monkeypatch):
session_ids = iter(["123456", "7890"])
monkeypatch.setattr(crypto, "random_token", lambda: next(session_ids))
session = Session({"foo": "bar"}, "original id", False)
assert session == {"foo": "bar"}
assert session.sid == "original id"
assert not session.new
assert not session.invalidated
session.invalidate()
assert session == {}
assert session.sid == "123456"
assert session.new
assert session.invalidated == {"original id"}
session.invalidate()
assert session == {}
assert session.sid == "7890"
assert session.new
assert session.invalidated == {"original id", "123456"}
def test_invalidate_empty(self):
session = Session({"foo": "bar"})
session.invalidate()
assert session == {}
assert session.invalidated == set()
def test_should_save(self):
session = Session()
assert not session.should_save()
session.changed()
assert session.should_save()
@pytest.mark.parametrize(
("data", "method", "args"),
[
({"foo": "bar"}, "__delitem__", ["foo"]),
({}, "__setitem__", ["foo", "bar"]),
({}, "clear", []),
({"foo": "bar"}, "pop", ["foo"]),
({"foo": "bar"}, "popitem", []),
({}, "setdefault", ["foo", "bar"]),
({}, "update", [{"foo": "bar"}]),
],
)
def test_methods_call_changed(self, data, method, args):
session = Session(data)
session.changed = pretend.call_recorder(lambda: None)
getattr(session, method)(*args)
assert session.changed.calls == [pretend.call()]
@pytest.mark.parametrize(
("queue", "expected"),
[
(None, "_flash_messages"),
("foobar", "_flash_messages.foobar"),
],
)
def test_generate_flash_key(self, queue, expected):
session = Session()
assert session._get_flash_queue_key(queue) == expected
def test_flash_messages(self):
session = Session()
assert session.peek_flash() == []
assert session.peek_flash(queue="foo") == []
assert session.pop_flash() == []
assert session.pop_flash(queue="foo") == []
session.flash("A Flash Message")
assert session.peek_flash() == ["A Flash Message"]
assert session.peek_flash(queue="foo") == []
session.flash("Another Flash Message", queue="foo")
assert session.peek_flash() == ["A Flash Message"]
assert session.peek_flash(queue="foo") == ["Another Flash Message"]
session.flash("A Flash Message")
assert session.peek_flash() == ["A Flash Message", "A Flash Message"]
assert session.peek_flash(queue="foo") == ["Another Flash Message"]
session.flash("A Flash Message", allow_duplicate=True)
assert session.peek_flash() == [
"A Flash Message",
"A Flash Message",
"A Flash Message",
]
assert session.peek_flash(queue="foo") == ["Another Flash Message"]
session.flash("A Flash Message", allow_duplicate=False)
assert session.peek_flash() == [
"A Flash Message",
"A Flash Message",
"A Flash Message",
]
assert session.peek_flash(queue="foo") == ["Another Flash Message"]
assert session.pop_flash() == [
"A Flash Message",
"A Flash Message",
"A Flash Message",
]
assert session.pop_flash(queue="foo") == ["Another Flash Message"]
assert session.peek_flash() == []
assert session.peek_flash(queue="foo") == []
assert session.pop_flash() == []
assert session.pop_flash(queue="foo") == []
def test_csrf_token(self, monkeypatch):
tokens = iter(["123456", "7890"])
monkeypatch.setattr(crypto, "random_token", lambda: next(tokens))
session = Session()
assert session._csrf_token_key not in session
assert session.new_csrf_token() == "123456"
assert session._csrf_token_key in session
assert session.get_csrf_token() == "123456"
assert session.new_csrf_token() == "7890"
assert session._csrf_token_key in session
assert session.get_csrf_token() == "7890"
def test_get_csrf_token_empty(self):
session = Session()
session.new_csrf_token = pretend.call_recorder(lambda: "123456")
assert session.get_csrf_token() == "123456"
assert session.new_csrf_token.calls == [pretend.call()]
class TestSessionFactory:
def test_initialize(self, monkeypatch):
timestamp_signer_obj = pretend.stub()
timestamp_signer_create = pretend.call_recorder(
lambda secret, salt: timestamp_signer_obj
)
monkeypatch.setattr(crypto, "TimestampSigner", timestamp_signer_create)
strict_redis_obj = pretend.stub()
strict_redis_cls = pretend.stub(
from_url=pretend.call_recorder(lambda url: strict_redis_obj),
)
monkeypatch.setattr(redis, "StrictRedis", strict_redis_cls)
session_factory = SessionFactory("mysecret", "my url")
assert session_factory.signer is timestamp_signer_obj
assert session_factory.redis is strict_redis_obj
assert timestamp_signer_create.calls == [
pretend.call("mysecret", salt="session"),
]
assert strict_redis_cls.from_url.calls == [pretend.call("my url")]
def test_redis_key(self):
session_factory = SessionFactory(
"mysecret", "redis://redis://localhost:6379/0",
)
assert session_factory._redis_key("my_session_id") == \
"warehouse/session/data/my_session_id"
def test_no_current_session(self, pyramid_request):
session_factory = SessionFactory(
"mysecret", "redis://redis://localhost:6379/0",
)
session_factory._process_response = pretend.stub()
session = session_factory(pyramid_request)
assert len(pyramid_request.response_callbacks) == 1
assert pyramid_request.response_callbacks[0] is \
session_factory._process_response
assert isinstance(session, Session)
assert session._sid is None
assert session.new
def test_invalid_session_id(self, pyramid_request):
pyramid_request.cookies["session_id"] = "invalid!"
session_factory = SessionFactory(
"mysecret", "redis://redis://localhost:6379/0",
)
session_factory._process_response = pretend.stub()
session = session_factory(pyramid_request)
assert len(pyramid_request.response_callbacks) == 1
assert pyramid_request.response_callbacks[0] is \
session_factory._process_response
assert isinstance(session, Session)
assert session._sid is None
assert session.new
def test_valid_session_id_no_data(self, pyramid_request):
pyramid_request.cookies["session_id"] = "123456"
session_factory = SessionFactory(
"mysecret", "redis://redis://localhost:6379/0",
)
session_factory.signer.unsign = pretend.call_recorder(
lambda session_id, max_age: b"123456"
)
session_factory.redis = pretend.stub(
get=pretend.call_recorder(lambda key: None),
)
session_factory._process_response = pretend.stub()
session = session_factory(pyramid_request)
assert len(pyramid_request.response_callbacks) == 1
assert pyramid_request.response_callbacks[0] is \
session_factory._process_response
assert session_factory.signer.unsign.calls == [
pretend.call("123456", max_age=12 * 60 * 60),
]
assert session_factory.redis.get.calls == [
pretend.call("warehouse/session/data/123456"),
]
assert isinstance(session, Session)
assert session._sid is None
assert session.new
def test_valid_session_id_invalid_data(self, pyramid_request):
pyramid_request.cookies["session_id"] = "123456"
session_factory = SessionFactory(
"mysecret", "redis://redis://localhost:6379/0",
)
session_factory.signer.unsign = pretend.call_recorder(
lambda session_id, max_age: b"123456"
)
session_factory.redis = pretend.stub(
get=pretend.call_recorder(lambda key: b"invalid data"),
)
session_factory._process_response = pretend.stub()
session = session_factory(pyramid_request)
assert len(pyramid_request.response_callbacks) == 1
assert pyramid_request.response_callbacks[0] is \
session_factory._process_response
assert session_factory.signer.unsign.calls == [
pretend.call("123456", max_age=12 * 60 * 60),
]
assert session_factory.redis.get.calls == [
pretend.call("warehouse/session/data/123456"),
]
assert isinstance(session, Session)
assert session._sid is None
assert session.new
def test_valid_session_id_valid_data(self, monkeypatch, pyramid_request):
msgpack_unpackb = pretend.call_recorder(
lambda bdata, encoding, use_list: {"foo": "bar"}
)
monkeypatch.setattr(msgpack, "unpackb", msgpack_unpackb)
pyramid_request.cookies["session_id"] = "123456"
session_factory = SessionFactory(
"mysecret", "redis://redis://localhost:6379/0",
)
session_factory.signer.unsign = pretend.call_recorder(
lambda session_id, max_age: b"123456"
)
session_factory.redis = pretend.stub(
get=pretend.call_recorder(lambda key: b"valid data"),
)
session_factory._process_response = pretend.stub()
session = session_factory(pyramid_request)
assert len(pyramid_request.response_callbacks) == 1
assert pyramid_request.response_callbacks[0] is \
session_factory._process_response
assert session_factory.signer.unsign.calls == [
pretend.call("123456", max_age=12 * 60 * 60),
]
assert session_factory.redis.get.calls == [
pretend.call("warehouse/session/data/123456"),
]
assert msgpack_unpackb.calls == [
pretend.call(b"valid data", encoding="utf8", use_list=True),
]
assert isinstance(session, Session)
assert session == {"foo": "bar"}
assert session.sid == "123456"
assert not session.new
def test_no_save_invalid_session(self, pyramid_request):
session_factory = SessionFactory(
"mysecret", "redis://redis://localhost:6379/0",
)
session_factory.redis = pretend.stub()
pyramid_request.session = InvalidSession()
response = pretend.stub()
session_factory._process_response(pyramid_request, response)
def test_noop_unused_session(self, pyramid_request):
session_factory = SessionFactory(
"mysecret", "redis://redis://localhost:6379/0",
)
session_factory.redis = pretend.stub()
pyramid_request.session.invalidated = set()
pyramid_request.session.should_save = pretend.call_recorder(
lambda: False
)
response = pretend.stub()
session_factory._process_response(pyramid_request, response)
assert pyramid_request.session.should_save.calls == [pretend.call()]
def test_invalidated_deletes_no_save(self, pyramid_request):
session_factory = SessionFactory(
"mysecret", "redis://redis://localhost:6379/0",
)
session_factory.redis = pretend.stub(
delete=pretend.call_recorder(lambda key: None)
)
pyramid_request.session.invalidated = ["1", "2"]
pyramid_request.session.should_save = pretend.call_recorder(
lambda: False
)
response = pretend.stub(
delete_cookie=pretend.call_recorder(lambda cookie: None),
)
session_factory._process_response(pyramid_request, response)
assert session_factory.redis.delete.calls == [
pretend.call("warehouse/session/data/1"),
pretend.call("warehouse/session/data/2"),
]
assert pyramid_request.session.should_save.calls == [
pretend.call(),
pretend.call(),
]
assert response.delete_cookie.calls == [pretend.call("session_id")]
def test_invalidated_deletes_save_non_secure(self, monkeypatch,
pyramid_request):
msgpack_packb = pretend.call_recorder(
lambda data, encoding, use_bin_type: b"msgpack data"
)
monkeypatch.setattr(msgpack, "packb", msgpack_packb)
session_factory = SessionFactory(
"mysecret", "redis://redis://localhost:6379/0",
)
session_factory.redis = pretend.stub(
delete=pretend.call_recorder(lambda key: None),
setex=pretend.call_recorder(lambda key, age, data: None),
)
session_factory.signer.sign = pretend.call_recorder(
lambda data: "cookie data"
)
pyramid_request.scheme = "http"
pyramid_request.session.sid = "123456"
pyramid_request.session.invalidated = ["1", "2"]
pyramid_request.session.should_save = pretend.call_recorder(
lambda: True
)
response = pretend.stub(
set_cookie=pretend.call_recorder(
lambda cookie, data, max_age, httponly, secure: None
)
)
session_factory._process_response(pyramid_request, response)
assert session_factory.redis.delete.calls == [
pretend.call("warehouse/session/data/1"),
pretend.call("warehouse/session/data/2"),
]
assert msgpack_packb.calls == [
pretend.call(
pyramid_request.session,
encoding="utf8",
use_bin_type=True,
),
]
assert session_factory.redis.setex.calls == [
pretend.call(
"warehouse/session/data/123456",
12 * 60 * 60,
b"msgpack data",
),
]
assert pyramid_request.session.should_save.calls == [
pretend.call(),
pretend.call(),
]
assert session_factory.signer.sign.calls == [pretend.call(b"123456")]
assert response.set_cookie.calls == [
pretend.call(
"session_id",
"cookie data",
max_age=12 * 60 * 60,
httponly=True,
secure=False,
),
]
class TestSessionView:
def test_has_options(self):
assert set(session_view.options) == {"uses_session"}
@pytest.mark.parametrize("uses_session", [False, None])
def test_invalid_session(self, uses_session):
context = pretend.stub()
request = pretend.stub(session=pretend.stub())
response = pretend.stub()
@pretend.call_recorder
def view(context, request):
assert isinstance(request.session, InvalidSession)
return response
info = pretend.stub(options={}, exception_only=False)
if uses_session is not None:
info.options["uses_session"] = uses_session
derived_view = session_view(view, info)
assert derived_view(context, request) is response
assert view.calls == [pretend.call(context, request)]
def test_valid_session(self, monkeypatch):
add_vary_cb = pretend.call_recorder(lambda fn: fn)
add_vary = pretend.call_recorder(lambda vary: add_vary_cb)
monkeypatch.setattr(warehouse.sessions, "add_vary", add_vary)
context = pretend.stub()
request = pretend.stub(session=Session())
response = pretend.stub()
@pretend.call_recorder
def view(context, request):
assert isinstance(request.session, Session)
return response
info = pretend.stub(options={"uses_session": True})
derived_view = session_view(view, info)
assert derived_view(context, request) is response
assert view.calls == [pretend.call(context, request)]
assert add_vary.calls == [pretend.call("Cookie")]
assert add_vary_cb.calls == [pretend.call(view)]
def test_includeme(monkeypatch):
session_factory_obj = pretend.stub()
session_factory_cls = pretend.call_recorder(
lambda secret, url: session_factory_obj
)
monkeypatch.setattr(
warehouse.sessions,
"SessionFactory",
session_factory_cls,
)
config = pretend.stub(
set_session_factory=pretend.call_recorder(lambda factory: None),
registry=pretend.stub(
settings={
"sessions.secret": "my secret",
"sessions.url": "my url",
},
),
add_view_deriver=pretend.call_recorder(lambda *a, **kw: None),
)
includeme(config)
assert config.set_session_factory.calls == [
pretend.call(session_factory_obj),
]
assert session_factory_cls.calls == [pretend.call("my secret", "my url")]
assert config.add_view_deriver.calls == [
pretend.call(
session_view,
over="csrf_view",
under=viewderivers.INGRESS,
),
]
|
|
# -*- coding: utf-8 -*-
# Copyright 2013 Metacloud, Inc.
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Workflow Logic the Assignment service."""
import copy
import uuid
import six
from six.moves import urllib
from keystone.common import controller
from keystone.common import dependency
from keystone import config
from keystone import exception
from keystone.openstack.common.gettextutils import _
from keystone.openstack.common import log
CONF = config.CONF
LOG = log.getLogger(__name__)
@dependency.requires('assignment_api', 'identity_api', 'token_api')
class Tenant(controller.V2Controller):
@controller.v2_deprecated
def get_all_projects(self, context, **kw):
"""Gets a list of all tenants for an admin user."""
if 'name' in context['query_string']:
return self.get_project_by_name(
context, context['query_string'].get('name'))
self.assert_admin(context)
tenant_refs = self.assignment_api.list_projects_in_domain(
CONF.identity.default_domain_id)
for tenant_ref in tenant_refs:
tenant_ref = self.filter_domain_id(tenant_ref)
params = {
'limit': context['query_string'].get('limit'),
'marker': context['query_string'].get('marker'),
}
return self._format_project_list(tenant_refs, **params)
@controller.v2_deprecated
def get_projects_for_token(self, context, **kw):
"""Get valid tenants for token based on token used to authenticate.
Pulls the token from the context, validates it and gets the valid
tenants for the user in the token.
Doesn't care about token scopedness.
"""
try:
token_ref = self.token_api.get_token(context['token_id'])
except exception.NotFound as e:
LOG.warning(_('Authentication failed: %s'), e)
raise exception.Unauthorized(e)
user_ref = token_ref['user']
tenant_refs = (
self.assignment_api.list_projects_for_user(user_ref['id']))
tenant_refs = [self.filter_domain_id(ref) for ref in tenant_refs
if ref['domain_id'] == CONF.identity.default_domain_id]
params = {
'limit': context['query_string'].get('limit'),
'marker': context['query_string'].get('marker'),
}
return self._format_project_list(tenant_refs, **params)
@controller.v2_deprecated
def get_project(self, context, tenant_id):
# TODO(termie): this stuff should probably be moved to middleware
self.assert_admin(context)
ref = self.assignment_api.get_project(tenant_id)
return {'tenant': self.filter_domain_id(ref)}
@controller.v2_deprecated
def get_project_by_name(self, context, tenant_name):
self.assert_admin(context)
ref = self.assignment_api.get_project_by_name(
tenant_name, CONF.identity.default_domain_id)
return {'tenant': self.filter_domain_id(ref)}
# CRUD Extension
@controller.v2_deprecated
def create_project(self, context, tenant):
tenant_ref = self._normalize_dict(tenant)
if 'name' not in tenant_ref or not tenant_ref['name']:
msg = _('Name field is required and cannot be empty')
raise exception.ValidationError(message=msg)
self.assert_admin(context)
tenant_ref['id'] = tenant_ref.get('id', uuid.uuid4().hex)
tenant = self.assignment_api.create_project(
tenant_ref['id'],
self._normalize_domain_id(context, tenant_ref))
return {'tenant': self.filter_domain_id(tenant)}
@controller.v2_deprecated
def update_project(self, context, tenant_id, tenant):
self.assert_admin(context)
# Remove domain_id if specified - a v2 api caller should not
# be specifying that
clean_tenant = tenant.copy()
clean_tenant.pop('domain_id', None)
tenant_ref = self.assignment_api.update_project(
tenant_id, clean_tenant)
return {'tenant': tenant_ref}
@controller.v2_deprecated
def delete_project(self, context, tenant_id):
self.assert_admin(context)
self.assignment_api.delete_project(tenant_id)
@controller.v2_deprecated
def get_project_users(self, context, tenant_id, **kw):
self.assert_admin(context)
user_refs = []
user_ids = self.assignment_api.list_user_ids_for_project(tenant_id)
for user_id in user_ids:
try:
user_ref = self.identity_api.get_user(user_id)
except exception.UserNotFound:
# Log that user is missing and continue on.
message = _("User %(user_id)s in project %(project_id)s "
"doesn't exist.")
LOG.debug(message,
{'user_id': user_id, 'project_id': tenant_id})
else:
user_refs.append(self.v3_to_v2_user(user_ref))
return {'users': user_refs}
def _format_project_list(self, tenant_refs, **kwargs):
marker = kwargs.get('marker')
first_index = 0
if marker is not None:
for (marker_index, tenant) in enumerate(tenant_refs):
if tenant['id'] == marker:
# we start pagination after the marker
first_index = marker_index + 1
break
else:
msg = _('Marker could not be found')
raise exception.ValidationError(message=msg)
limit = kwargs.get('limit')
last_index = None
if limit is not None:
try:
limit = int(limit)
if limit < 0:
raise AssertionError()
except (ValueError, AssertionError):
msg = _('Invalid limit value')
raise exception.ValidationError(message=msg)
last_index = first_index + limit
tenant_refs = tenant_refs[first_index:last_index]
for x in tenant_refs:
if 'enabled' not in x:
x['enabled'] = True
o = {'tenants': tenant_refs,
'tenants_links': []}
return o
@dependency.requires('assignment_api')
class Role(controller.V2Controller):
# COMPAT(essex-3)
@controller.v2_deprecated
def get_user_roles(self, context, user_id, tenant_id=None):
"""Get the roles for a user and tenant pair.
Since we're trying to ignore the idea of user-only roles we're
not implementing them in hopes that the idea will die off.
"""
self.assert_admin(context)
if tenant_id is None:
raise exception.NotImplemented(message='User roles not supported: '
'tenant ID required')
roles = self.assignment_api.get_roles_for_user_and_project(
user_id, tenant_id)
return {'roles': [self.assignment_api.get_role(x)
for x in roles]}
# CRUD extension
@controller.v2_deprecated
def get_role(self, context, role_id):
self.assert_admin(context)
return {'role': self.assignment_api.get_role(role_id)}
@controller.v2_deprecated
def create_role(self, context, role):
role = self._normalize_dict(role)
self.assert_admin(context)
if 'name' not in role or not role['name']:
msg = _('Name field is required and cannot be empty')
raise exception.ValidationError(message=msg)
role_id = uuid.uuid4().hex
role['id'] = role_id
role_ref = self.assignment_api.create_role(role_id, role)
return {'role': role_ref}
@controller.v2_deprecated
def delete_role(self, context, role_id):
self.assert_admin(context)
self.assignment_api.delete_role(role_id)
@controller.v2_deprecated
def get_roles(self, context):
self.assert_admin(context)
return {'roles': self.assignment_api.list_roles()}
@controller.v2_deprecated
def add_role_to_user(self, context, user_id, role_id, tenant_id=None):
"""Add a role to a user and tenant pair.
Since we're trying to ignore the idea of user-only roles we're
not implementing them in hopes that the idea will die off.
"""
self.assert_admin(context)
if tenant_id is None:
raise exception.NotImplemented(message='User roles not supported: '
'tenant_id required')
self.assignment_api.add_role_to_user_and_project(
user_id, tenant_id, role_id)
role_ref = self.assignment_api.get_role(role_id)
return {'role': role_ref}
@controller.v2_deprecated
def remove_role_from_user(self, context, user_id, role_id, tenant_id=None):
"""Remove a role from a user and tenant pair.
Since we're trying to ignore the idea of user-only roles we're
not implementing them in hopes that the idea will die off.
"""
self.assert_admin(context)
if tenant_id is None:
raise exception.NotImplemented(message='User roles not supported: '
'tenant_id required')
# This still has the weird legacy semantics that adding a role to
# a user also adds them to a tenant, so we must follow up on that
self.assignment_api.remove_role_from_user_and_project(
user_id, tenant_id, role_id)
# COMPAT(diablo): CRUD extension
@controller.v2_deprecated
def get_role_refs(self, context, user_id):
"""Ultimate hack to get around having to make role_refs first-class.
This will basically iterate over the various roles the user has in
all tenants the user is a member of and create fake role_refs where
the id encodes the user-tenant-role information so we can look
up the appropriate data when we need to delete them.
"""
self.assert_admin(context)
tenants = self.assignment_api.list_projects_for_user(user_id)
o = []
for tenant in tenants:
# As a v2 call, we should limit the response to those projects in
# the default domain.
if tenant['domain_id'] != CONF.identity.default_domain_id:
continue
role_ids = self.assignment_api.get_roles_for_user_and_project(
user_id, tenant['id'])
for role_id in role_ids:
ref = {'roleId': role_id,
'tenantId': tenant['id'],
'userId': user_id}
ref['id'] = urllib.parse.urlencode(ref)
o.append(ref)
return {'roles': o}
# COMPAT(diablo): CRUD extension
@controller.v2_deprecated
def create_role_ref(self, context, user_id, role):
"""This is actually used for adding a user to a tenant.
In the legacy data model adding a user to a tenant required setting
a role.
"""
self.assert_admin(context)
# TODO(termie): for now we're ignoring the actual role
tenant_id = role.get('tenantId')
role_id = role.get('roleId')
self.assignment_api.add_role_to_user_and_project(
user_id, tenant_id, role_id)
role_ref = self.assignment_api.get_role(role_id)
return {'role': role_ref}
# COMPAT(diablo): CRUD extension
@controller.v2_deprecated
def delete_role_ref(self, context, user_id, role_ref_id):
"""This is actually used for deleting a user from a tenant.
In the legacy data model removing a user from a tenant required
deleting a role.
To emulate this, we encode the tenant and role in the role_ref_id,
and if this happens to be the last role for the user-tenant pair,
we remove the user from the tenant.
"""
self.assert_admin(context)
# TODO(termie): for now we're ignoring the actual role
role_ref_ref = urllib.parse.parse_qs(role_ref_id)
tenant_id = role_ref_ref.get('tenantId')[0]
role_id = role_ref_ref.get('roleId')[0]
self.assignment_api.remove_role_from_user_and_project(
user_id, tenant_id, role_id)
@dependency.requires('assignment_api')
class DomainV3(controller.V3Controller):
collection_name = 'domains'
member_name = 'domain'
def __init__(self):
super(DomainV3, self).__init__()
self.get_member_from_driver = self.assignment_api.get_domain
@controller.protected()
def create_domain(self, context, domain):
self._require_attribute(domain, 'name')
ref = self._assign_unique_id(self._normalize_dict(domain))
ref = self.assignment_api.create_domain(ref['id'], ref)
return DomainV3.wrap_member(context, ref)
@controller.filterprotected('enabled', 'name')
def list_domains(self, context, filters):
hints = DomainV3.build_driver_hints(context, filters)
refs = self.assignment_api.list_domains(hints=hints)
return DomainV3.wrap_collection(context, refs, hints=hints)
@controller.protected()
def get_domain(self, context, domain_id):
ref = self.assignment_api.get_domain(domain_id)
return DomainV3.wrap_member(context, ref)
@controller.protected()
def update_domain(self, context, domain_id, domain):
self._require_matching_id(domain_id, domain)
ref = self.assignment_api.update_domain(domain_id, domain)
return DomainV3.wrap_member(context, ref)
@controller.protected()
def delete_domain(self, context, domain_id):
return self.assignment_api.delete_domain(domain_id)
@dependency.requires('assignment_api')
class ProjectV3(controller.V3Controller):
collection_name = 'projects'
member_name = 'project'
def __init__(self):
super(ProjectV3, self).__init__()
self.get_member_from_driver = self.assignment_api.get_project
@controller.protected()
def create_project(self, context, project):
self._require_attribute(project, 'name')
ref = self._assign_unique_id(self._normalize_dict(project))
ref = self._normalize_domain_id(context, ref)
ref = self.assignment_api.create_project(ref['id'], ref)
return ProjectV3.wrap_member(context, ref)
@controller.filterprotected('domain_id', 'enabled', 'name')
def list_projects(self, context, filters):
hints = ProjectV3.build_driver_hints(context, filters)
refs = self.assignment_api.list_projects(hints=hints)
return ProjectV3.wrap_collection(context, refs, hints=hints)
@controller.filterprotected('enabled', 'name')
def list_user_projects(self, context, filters, user_id):
hints = ProjectV3.build_driver_hints(context, filters)
refs = self.assignment_api.list_projects_for_user(user_id,
hints=hints)
return ProjectV3.wrap_collection(context, refs, hints=hints)
@controller.protected()
def get_project(self, context, project_id):
ref = self.assignment_api.get_project(project_id)
return ProjectV3.wrap_member(context, ref)
@controller.protected()
def update_project(self, context, project_id, project):
self._require_matching_id(project_id, project)
self._require_matching_domain_id(
project_id, project, self.assignment_api.get_project)
ref = self.assignment_api.update_project(project_id, project)
return ProjectV3.wrap_member(context, ref)
@controller.protected()
def delete_project(self, context, project_id):
return self.assignment_api.delete_project(project_id)
@dependency.requires('assignment_api', 'identity_api')
class RoleV3(controller.V3Controller):
collection_name = 'roles'
member_name = 'role'
def __init__(self):
super(RoleV3, self).__init__()
self.get_member_from_driver = self.assignment_api.get_role
@controller.protected()
def create_role(self, context, role):
self._require_attribute(role, 'name')
ref = self._assign_unique_id(self._normalize_dict(role))
ref = self.assignment_api.create_role(ref['id'], ref)
return RoleV3.wrap_member(context, ref)
@controller.filterprotected('name')
def list_roles(self, context, filters):
hints = RoleV3.build_driver_hints(context, filters)
refs = self.assignment_api.list_roles(
hints=hints)
return RoleV3.wrap_collection(context, refs, hints=hints)
@controller.protected()
def get_role(self, context, role_id):
ref = self.assignment_api.get_role(role_id)
return RoleV3.wrap_member(context, ref)
@controller.protected()
def update_role(self, context, role_id, role):
self._require_matching_id(role_id, role)
ref = self.assignment_api.update_role(role_id, role)
return RoleV3.wrap_member(context, ref)
@controller.protected()
def delete_role(self, context, role_id):
self.assignment_api.delete_role(role_id)
def _require_domain_xor_project(self, domain_id, project_id):
if (domain_id and project_id) or (not domain_id and not project_id):
msg = _('Specify a domain or project, not both')
raise exception.ValidationError(msg)
def _require_user_xor_group(self, user_id, group_id):
if (user_id and group_id) or (not user_id and not group_id):
msg = _('Specify a user or group, not both')
raise exception.ValidationError(msg)
def _check_if_inherited(self, context):
return (CONF.os_inherit.enabled and
context['path'].startswith('/OS-INHERIT') and
context['path'].endswith('/inherited_to_projects'))
def _check_grant_protection(self, context, protection, role_id=None,
user_id=None, group_id=None,
domain_id=None, project_id=None):
"""Check protection for role grant APIs.
The policy rule might want to inspect attributes of any of the entities
involved in the grant. So we get these and pass them to the
check_protection() handler in the controller.
"""
ref = {}
if role_id:
ref['role'] = self.assignment_api.get_role(role_id)
if user_id:
ref['user'] = self.identity_api.get_user(user_id)
else:
ref['group'] = self.identity_api.get_group(group_id)
if domain_id:
ref['domain'] = self.assignment_api.get_domain(domain_id)
else:
ref['project'] = self.assignment_api.get_project(project_id)
self.check_protection(context, protection, ref)
@controller.protected(callback=_check_grant_protection)
def create_grant(self, context, role_id, user_id=None,
group_id=None, domain_id=None, project_id=None):
"""Grants a role to a user or group on either a domain or project."""
self._require_domain_xor_project(domain_id, project_id)
self._require_user_xor_group(user_id, group_id)
self.assignment_api.create_grant(
role_id, user_id, group_id, domain_id, project_id,
self._check_if_inherited(context))
@controller.protected(callback=_check_grant_protection)
def list_grants(self, context, user_id=None,
group_id=None, domain_id=None, project_id=None):
"""Lists roles granted to user/group on either a domain or project."""
self._require_domain_xor_project(domain_id, project_id)
self._require_user_xor_group(user_id, group_id)
refs = self.assignment_api.list_grants(
user_id, group_id, domain_id, project_id,
self._check_if_inherited(context))
return RoleV3.wrap_collection(context, refs)
@controller.protected(callback=_check_grant_protection)
def check_grant(self, context, role_id, user_id=None,
group_id=None, domain_id=None, project_id=None):
"""Checks if a role has been granted on either a domain or project."""
self._require_domain_xor_project(domain_id, project_id)
self._require_user_xor_group(user_id, group_id)
self.assignment_api.get_grant(
role_id, user_id, group_id, domain_id, project_id,
self._check_if_inherited(context))
@controller.protected(callback=_check_grant_protection)
def revoke_grant(self, context, role_id, user_id=None,
group_id=None, domain_id=None, project_id=None):
"""Revokes a role from user/group on either a domain or project."""
self._require_domain_xor_project(domain_id, project_id)
self._require_user_xor_group(user_id, group_id)
self.assignment_api.delete_grant(
role_id, user_id, group_id, domain_id, project_id,
self._check_if_inherited(context))
@dependency.requires('assignment_api', 'identity_api')
class RoleAssignmentV3(controller.V3Controller):
# TODO(henry-nash): The current implementation does not provide a full
# first class entity for role-assignment. There is no role_assignment_id
# and only the list_role_assignment call is supported. Further, since it
# is not a first class entity, the links for the individual entities
# reference the individual role grant APIs.
collection_name = 'role_assignments'
member_name = 'role_assignment'
@classmethod
def wrap_member(cls, context, ref):
# NOTE(henry-nash): Since we are not yet a true collection, we override
# the wrapper as have already included the links in the entities
pass
def _format_entity(self, context, entity):
"""Format an assignment entity for API response.
The driver layer returns entities as dicts containing the ids of the
actor (e.g. user or group), target (e.g. domain or project) and role.
If it is an inherited role, then this is also indicated. Examples:
{'user_id': user_id,
'project_id': domain_id,
'role_id': role_id}
or, for an inherited role:
{'user_id': user_id,
'domain_id': domain_id,
'role_id': role_id,
'inherited_to_projects': true}
This function maps this into the format to be returned via the API,
e.g. for the second example above:
{
'user': {
{'id': user_id}
},
'scope': {
'domain': {
{'id': domain_id}
},
'OS-INHERIT:inherited_to': 'projects
},
'role': {
{'id': role_id}
},
'links': {
'assignment': '/domains/domain_id/users/user_id/roles/'
'role_id/inherited_to_projects'
}
}
"""
formatted_entity = {}
suffix = ""
if 'user_id' in entity:
formatted_entity['user'] = {'id': entity['user_id']}
actor_link = 'users/%s' % entity['user_id']
if 'group_id' in entity:
formatted_entity['group'] = {'id': entity['group_id']}
actor_link = 'groups/%s' % entity['group_id']
if 'role_id' in entity:
formatted_entity['role'] = {'id': entity['role_id']}
if 'project_id' in entity:
formatted_entity['scope'] = (
{'project': {'id': entity['project_id']}})
target_link = '/projects/%s' % entity['project_id']
if 'domain_id' in entity:
formatted_entity['scope'] = (
{'domain': {'id': entity['domain_id']}})
if 'inherited_to_projects' in entity:
formatted_entity['scope']['OS-INHERIT:inherited_to'] = (
'projects')
target_link = '/OS-INHERIT/domains/%s' % entity['domain_id']
suffix = '/inherited_to_projects'
else:
target_link = '/domains/%s' % entity['domain_id']
formatted_entity.setdefault('links', {})
path = '%(target)s/%(actor)s/roles/%(role)s%(suffix)s' % {
'target': target_link,
'actor': actor_link,
'role': entity['role_id'],
'suffix': suffix}
formatted_entity['links']['assignment'] = self.base_url(context, path)
return formatted_entity
def _expand_indirect_assignments(self, context, refs):
"""Processes entity list into all-direct assignments.
For any group role assignments in the list, create a role assignment
entity for each member of that group, and then remove the group
assignment entity itself from the list.
If the OS-INHERIT extension is enabled, then honor any inherited
roles on the domain by creating the equivalent on all projects
owned by the domain.
For any new entity created by virtue of group membership, add in an
additional link to that membership.
"""
def _get_group_members(ref):
"""Get a list of group members.
Get the list of group members. If this fails with
GroupNotFound, then log this as a warning, but allow
overall processing to continue.
"""
try:
members = self.identity_api.list_users_in_group(
ref['group']['id'])
except exception.GroupNotFound:
members = []
# The group is missing, which should not happen since
# group deletion should remove any related assignments, so
# log a warning
if 'domain' in ref:
target = 'Domain: %s' % ref['domain'].get('domain_id')
elif 'project' in ref:
target = 'Project: %s' % ref['project'].get('project_id')
else:
# Should always be a domain or project, but since to get
# here things have gone astray, let's be cautious.
target = 'Unknown'
LOG.warning(
_('Group %(group)s not found for role-assignment - '
'%(target)s with Role: %(role)s'), {
'group': ref['group_id'], 'target': target,
'role': ref.get('role_id')})
return members
def _build_user_assignment_equivalent_of_group(
user, group_id, template):
"""Create a user assignment equivalent to the group one.
The template has had the 'group' entity removed, so
substitute a 'user' one. The 'assignment' link stays as it is,
referring to the group assignment that led to this role.
A 'membership' link is added that refers to this particular
user's membership of this group.
"""
user_entry = copy.deepcopy(template)
user_entry['user'] = {'id': user['id']}
user_entry['links']['membership'] = (
self.base_url(context, '/groups/%s/users/%s' %
(group_id, user['id'])))
return user_entry
def _build_project_equivalent_of_user_domain_role(
project_id, domain_id, template):
"""Create a user project assignment equivalent to the domain one.
The template has had the 'domain' entity removed, so
substitute a 'project' one, modifying the 'assignment' link
to match.
"""
project_entry = copy.deepcopy(template)
project_entry['scope']['project'] = {'id': project_id}
project_entry['links']['assignment'] = (
self.base_url(
context,
'/OS-INHERIT/domains/%s/users/%s/roles/%s'
'/inherited_to_projects' % (
domain_id, project_entry['user']['id'],
project_entry['role']['id'])))
return project_entry
def _build_project_equivalent_of_group_domain_role(
user_id, group_id, project_id, domain_id, template):
"""Create a user project equivalent to the domain group one.
The template has had the 'domain' and 'group' entities removed, so
substitute a 'user-project' one, modifying the 'assignment' link
to match.
"""
project_entry = copy.deepcopy(template)
project_entry['user'] = {'id': user_id}
project_entry['scope']['project'] = {'id': project_id}
project_entry['links']['assignment'] = (
self.base_url(context,
'/OS-INHERIT/domains/%s/groups/%s/roles/%s'
'/inherited_to_projects' % (
domain_id, group_id,
project_entry['role']['id'])))
project_entry['links']['membership'] = (
self.base_url(context, '/groups/%s/users/%s' %
(group_id, user_id)))
return project_entry
# Scan the list of entities for any assignments that need to be
# expanded.
#
# If the OS-INERIT extension is enabled, the refs lists may
# contain roles to be inherited from domain to project, so expand
# these as well into project equivalents
#
# For any regular group entries, expand these into user entries based
# on membership of that group.
#
# Due to the potentially large expansions, rather than modify the
# list we are enumerating, we build a new one as we go.
#
new_refs = []
for r in refs:
if 'OS-INHERIT:inherited_to' in r['scope']:
# It's an inherited domain role - so get the list of projects
# owned by this domain. A domain scope is guaranteed since we
# checked this when we built the refs list
project_ids = (
[x['id'] for x in
self.assignment_api.list_projects_in_domain(
r['scope']['domain']['id'])])
base_entry = copy.deepcopy(r)
domain_id = base_entry['scope']['domain']['id']
base_entry['scope'].pop('domain')
# For each project, create an equivalent role assignment
for p in project_ids:
# If it's a group assignment, then create equivalent user
# roles based on membership of the group
if 'group' in base_entry:
members = _get_group_members(base_entry)
sub_entry = copy.deepcopy(base_entry)
group_id = sub_entry['group']['id']
sub_entry.pop('group')
for m in members:
new_entry = (
_build_project_equivalent_of_group_domain_role(
m['id'], group_id, p,
domain_id, sub_entry))
new_refs.append(new_entry)
else:
new_entry = (
_build_project_equivalent_of_user_domain_role(
p, domain_id, base_entry))
new_refs.append(new_entry)
elif 'group' in r:
# It's a non-inherited group role assignment, so get the list
# of members.
members = _get_group_members(r)
# Now replace that group role assignment entry with an
# equivalent user role assignment for each of the group members
base_entry = copy.deepcopy(r)
group_id = base_entry['group']['id']
base_entry.pop('group')
for m in members:
user_entry = _build_user_assignment_equivalent_of_group(
m, group_id, base_entry)
new_refs.append(user_entry)
else:
new_refs.append(r)
return new_refs
def _query_filter_is_true(self, filter_value):
"""Determine if bool query param is 'True'.
We treat this the same way as we do for policy
enforcement:
{bool_param}=0 is treated as False
Any other value is considered to be equivalent to
True, including the absence of a value
"""
if (isinstance(filter_value, six.string_types) and
filter_value == '0'):
val = False
else:
val = True
return val
def _filter_inherited(self, entry):
if ('inherited_to_projects' in entry and
not CONF.os_inherit.enabled):
return False
else:
return True
@controller.filterprotected('group.id', 'role.id',
'scope.domain.id', 'scope.project.id',
'scope.OS-INHERIT:inherited_to', 'user.id')
def list_role_assignments(self, context, filters):
# TODO(henry-nash): This implementation uses the standard filtering
# in the V3.wrap_collection. Given the large number of individual
# assignments, this is pretty inefficient. An alternative would be
# to pass the filters into the driver call, so that the list size is
# kept a minimum.
hints = self.build_driver_hints(context, filters)
refs = self.assignment_api.list_role_assignments()
formatted_refs = (
[self._format_entity(context, x) for x in refs
if self._filter_inherited(x)])
if ('effective' in context['query_string'] and
self._query_filter_is_true(
context['query_string']['effective'])):
formatted_refs = self._expand_indirect_assignments(context,
formatted_refs)
return self.wrap_collection(context, formatted_refs, hints=hints)
@controller.protected()
def get_role_assignment(self, context):
raise exception.NotImplemented()
@controller.protected()
def update_role_assignment(self, context):
raise exception.NotImplemented()
@controller.protected()
def delete_role_assignment(self, context):
raise exception.NotImplemented()
|
|
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <https://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
from mock import ANY
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
try:
from ansible.modules.network.fortios import fortios_webfilter_urlfilter
except ImportError:
pytest.skip("Could not load required modules for testing", allow_module_level=True)
@pytest.fixture(autouse=True)
def connection_mock(mocker):
connection_class_mock = mocker.patch('ansible.modules.network.fortios.fortios_webfilter_urlfilter.Connection')
return connection_class_mock
fos_instance = FortiOSHandler(connection_mock)
def test_webfilter_urlfilter_creation(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'webfilter_urlfilter': {
'comment': 'Optional comments.',
'id': '4',
'ip_addr_block': 'enable',
'name': 'default_name_6',
'one_arm_ips_urlfilter': 'enable'
},
'vdom': 'root'}
is_error, changed, response = fortios_webfilter_urlfilter.fortios_webfilter(input_data, fos_instance)
expected_data = {
'comment': 'Optional comments.',
'id': '4',
'ip-addr-block': 'enable',
'name': 'default_name_6',
'one-arm-ips-urlfilter': 'enable'
}
set_method_mock.assert_called_with('webfilter', 'urlfilter', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_webfilter_urlfilter_creation_fails(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'webfilter_urlfilter': {
'comment': 'Optional comments.',
'id': '4',
'ip_addr_block': 'enable',
'name': 'default_name_6',
'one_arm_ips_urlfilter': 'enable'
},
'vdom': 'root'}
is_error, changed, response = fortios_webfilter_urlfilter.fortios_webfilter(input_data, fos_instance)
expected_data = {
'comment': 'Optional comments.',
'id': '4',
'ip-addr-block': 'enable',
'name': 'default_name_6',
'one-arm-ips-urlfilter': 'enable'
}
set_method_mock.assert_called_with('webfilter', 'urlfilter', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_webfilter_urlfilter_removal(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
delete_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
delete_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.delete', return_value=delete_method_result)
input_data = {
'username': 'admin',
'state': 'absent',
'webfilter_urlfilter': {
'comment': 'Optional comments.',
'id': '4',
'ip_addr_block': 'enable',
'name': 'default_name_6',
'one_arm_ips_urlfilter': 'enable'
},
'vdom': 'root'}
is_error, changed, response = fortios_webfilter_urlfilter.fortios_webfilter(input_data, fos_instance)
delete_method_mock.assert_called_with('webfilter', 'urlfilter', mkey=ANY, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_webfilter_urlfilter_deletion_fails(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
delete_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
delete_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.delete', return_value=delete_method_result)
input_data = {
'username': 'admin',
'state': 'absent',
'webfilter_urlfilter': {
'comment': 'Optional comments.',
'id': '4',
'ip_addr_block': 'enable',
'name': 'default_name_6',
'one_arm_ips_urlfilter': 'enable'
},
'vdom': 'root'}
is_error, changed, response = fortios_webfilter_urlfilter.fortios_webfilter(input_data, fos_instance)
delete_method_mock.assert_called_with('webfilter', 'urlfilter', mkey=ANY, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_webfilter_urlfilter_idempotent(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'DELETE', 'http_status': 404}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'webfilter_urlfilter': {
'comment': 'Optional comments.',
'id': '4',
'ip_addr_block': 'enable',
'name': 'default_name_6',
'one_arm_ips_urlfilter': 'enable'
},
'vdom': 'root'}
is_error, changed, response = fortios_webfilter_urlfilter.fortios_webfilter(input_data, fos_instance)
expected_data = {
'comment': 'Optional comments.',
'id': '4',
'ip-addr-block': 'enable',
'name': 'default_name_6',
'one-arm-ips-urlfilter': 'enable'
}
set_method_mock.assert_called_with('webfilter', 'urlfilter', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 404
def test_webfilter_urlfilter_filter_foreign_attributes(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'webfilter_urlfilter': {
'random_attribute_not_valid': 'tag',
'comment': 'Optional comments.',
'id': '4',
'ip_addr_block': 'enable',
'name': 'default_name_6',
'one_arm_ips_urlfilter': 'enable'
},
'vdom': 'root'}
is_error, changed, response = fortios_webfilter_urlfilter.fortios_webfilter(input_data, fos_instance)
expected_data = {
'comment': 'Optional comments.',
'id': '4',
'ip-addr-block': 'enable',
'name': 'default_name_6',
'one-arm-ips-urlfilter': 'enable'
}
set_method_mock.assert_called_with('webfilter', 'urlfilter', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
|
|
#!/usr/bin/env python
#
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tool to subroutinize a CFF OpenType font. Backed by a C++ binary.
This file is a bootstrap for the C++ edition of the FontTools compreffor.
It prepares the input data for the extension and reads back in the results,
applying them to the input font.
Usage (command line):
>> ./cxxCompressor.py /path/to/font.otf
# font written to /path/to/font.compressed.otf
Usage (python):
>> font = TTFont("/path/to/font.otf")
>> cxxCompressor.compreff(font)
>> font.save("/path/to/output.otf")
"""
import array
from io import BytesIO
import struct
import logging
from compreffor.pyCompressor import (
Compreffor, CandidateSubr, tokenCost)
from compreffor import _compreffor as lib, timer
log = logging.getLogger(__name__)
__all__ = ["compreff"]
class IdKeyMap(object):
"""A map that where every key's value is itself. Used
as a map from simplified key space to actual key space
in pyCompressor"""
def __getitem__(self, tok):
return tok
class SimpleCandidateSubr(CandidateSubr):
"""A reimplimentation of CandidateSubr to be more
compatible with results from C++"""
def __init__(self, length, ref_loc):
self.length = length
self.location = ref_loc
self.freq = 0
self._flatten = False
self._global = False
def usages(self):
return self.freq
frequency = usages
def cost(self):
try:
return self.__cost
except AttributeError:
self.__cost = sum(map(tokenCost, self.value()))
return self.__cost
def encoding(self):
return self._encoding
@timer("produce data for C++ library")
def write_data(td):
"""Writes CharStrings from the TopDict td into a string that is easily
readable."""
out = BytesIO()
td.CharStrings.charStringsIndex.getCompiler(td.strings, None).toFile(out)
return out.getvalue()
def get_encoding(data_buffer, subrs):
"""Read a charstring's encoding stream out of a string buffer response
from cffCompressor.cc"""
pos = 0
num_calls = data_buffer[pos]
pos += 1
enc = []
for j in range(num_calls):
insertion_pos = struct.unpack_from('<I', data_buffer[pos:pos+4])[0]
pos += 4
subr_index = struct.unpack_from('<I', data_buffer[pos:pos+4])[0]
pos += 4
subrs[subr_index].freq += 1
enc.append((insertion_pos, subrs[subr_index]))
return enc, pos
def read_data(td, result_string):
"""Read the output of cffCompressor.cc into Python data
structures."""
results = array.array("B", result_string)
num_subrs = struct.unpack_from('<I', results[:4])[0]
# process subrs
subrs = []
pos = 4
for i in range(num_subrs):
glyph_idx = struct.unpack_from('<I', results[pos:pos+4])[0]
pos += 4
tok_idx = struct.unpack_from('<I', results[pos:pos+4])[0]
pos += 4
subr_len = struct.unpack_from('<I', results[pos:pos+4])[0]
pos += 4
subrs.append(SimpleCandidateSubr(subr_len, (glyph_idx, tok_idx)))
for i in range(num_subrs):
enc, num_read = get_encoding(results[pos:], subrs)
pos += num_read
subrs[i]._encoding = enc
# process glyph encodings
glyph_encodings = []
for i in range(len(td.CharStrings)):
enc, num_read = get_encoding(results[pos:], subrs)
pos += num_read
glyph_encodings.append(enc)
assert pos == len(results)
return (subrs, glyph_encodings)
@timer("extract results")
def interpret_data(td, results):
"""Interpret the result array from a lib.compreff call to
produce Python data structures."""
class MutableSpace: pass
MutableSpace.pos = 0
def pop_result():
ans = results[MutableSpace.pos]
MutableSpace.pos += 1
return ans
num_subrs = pop_result()
# process subrs
subrs = []
for i in range(num_subrs):
glyph_idx = pop_result()
tok_idx = pop_result()
subr_len = pop_result()
subrs.append(SimpleCandidateSubr(subr_len, (glyph_idx, tok_idx)))
def pop_encoding():
num_calls = pop_result()
enc = []
for j in range(num_calls):
insertion_pos = pop_result()
subr_index = pop_result()
subrs[subr_index].freq += 1
enc.append((insertion_pos, subrs[subr_index]))
return enc
for i in range(num_subrs):
enc = pop_encoding()
subrs[i]._encoding = enc
# process glyph encodings
glyph_encodings = []
for i in range(len(td.CharStrings)):
enc = pop_encoding()
glyph_encodings.append(enc)
return (subrs, glyph_encodings)
@timer("compress the font")
def compreff(font, nrounds=None, max_subrs=None):
"""Main function that compresses `font`, a TTFont object,
in place.
"""
assert len(font['CFF '].cff.topDictIndex) == 1
td = font['CFF '].cff.topDictIndex[0]
if nrounds is None:
nrounds = Compreffor.NROUNDS
if max_subrs is None:
max_subrs = Compreffor.NSUBRS_LIMIT
input_data = write_data(td)
with timer("run 'lib.compreff()'"):
results = lib.compreff(input_data, nrounds)
subrs, glyph_encodings = interpret_data(td, results)
with timer("decompile charstrings"):
for cs in td.CharStrings.values():
cs.decompile()
# in order of charset
chstrings = [x.program for x in td.CharStrings.values()]
for cs in chstrings:
Compreffor.collapse_hintmask(cs)
for s in subrs:
s.chstrings = chstrings
if hasattr(td, 'FDSelect'):
fdselect = lambda g: td.CharStrings.getItemAndSelector(g)[1]
fdlen = len(td.FDArray)
else:
fdselect = None
fdlen = 1
nest_limit = Compreffor.SUBR_NEST_LIMIT
gsubrs, lsubrs = Compreffor.process_subrs(
td.charset,
glyph_encodings,
fdlen,
fdselect,
subrs,
IdKeyMap(),
max_subrs,
nest_limit)
encoding = dict(zip(td.charset, glyph_encodings))
Compreffor.apply_subrs(td, encoding, gsubrs, lsubrs)
|
|
#!/usr/bin/env python
from __future__ import print_function
import hdr_parser, sys, re, os
from string import Template
if sys.version_info[0] >= 3:
from io import StringIO
else:
from cStringIO import StringIO
ignored_arg_types = ["RNG*"]
gen_template_check_self = Template(""" if(!PyObject_TypeCheck(self, &pyopencv_${name}_Type))
return failmsgp("Incorrect type of self (must be '${name}' or its derivative)");
$cname* _self_ = ${amp}((pyopencv_${name}_t*)self)->v${get};
""")
gen_template_check_self_algo = Template(""" if(!PyObject_TypeCheck(self, &pyopencv_${name}_Type))
return failmsgp("Incorrect type of self (must be '${name}' or its derivative)");
$cname* _self_ = dynamic_cast<$cname*>(${amp}((pyopencv_${name}_t*)self)->v.get());
""")
gen_template_call_constructor_prelude = Template("""self = PyObject_NEW(pyopencv_${name}_t, &pyopencv_${name}_Type);
new (&(self->v)) Ptr<$cname>(); // init Ptr with placement new
if(self) """)
gen_template_call_constructor = Template("""self->v.reset(new ${cname}${args})""")
gen_template_simple_call_constructor_prelude = Template("""self = PyObject_NEW(pyopencv_${name}_t, &pyopencv_${name}_Type);
if(self) """)
gen_template_simple_call_constructor = Template("""self->v = ${cname}${args}""")
gen_template_parse_args = Template("""const char* keywords[] = { $kw_list, NULL };
if( PyArg_ParseTupleAndKeywords(args, kw, "$fmtspec", (char**)keywords, $parse_arglist)$code_cvt )""")
gen_template_func_body = Template("""$code_decl
$code_parse
{
${code_prelude}ERRWRAP2($code_fcall);
$code_ret;
}
""")
py_major_version = sys.version_info[0]
if py_major_version >= 3:
head_init_str = "PyVarObject_HEAD_INIT(&PyType_Type, 0)"
else:
head_init_str = """PyObject_HEAD_INIT(&PyType_Type)
0,"""
gen_template_simple_type_decl = Template("""
struct pyopencv_${name}_t
{
PyObject_HEAD
${cname} v;
};
static PyTypeObject pyopencv_${name}_Type =
{
%s
MODULESTR".$wname",
sizeof(pyopencv_${name}_t),
};
static void pyopencv_${name}_dealloc(PyObject* self)
{
PyObject_Del(self);
}
template<> PyObject* pyopencv_from(const ${cname}& r)
{
pyopencv_${name}_t *m = PyObject_NEW(pyopencv_${name}_t, &pyopencv_${name}_Type);
m->v = r;
return (PyObject*)m;
}
template<> bool pyopencv_to(PyObject* src, ${cname}& dst, const char* name)
{
if( src == NULL || src == Py_None )
return true;
if(!PyObject_TypeCheck(src, &pyopencv_${name}_Type))
{
failmsg("Expected ${cname} for argument '%%s'", name);
return false;
}
dst = ((pyopencv_${name}_t*)src)->v;
return true;
}
""" % head_init_str)
gen_template_type_decl = Template("""
struct pyopencv_${name}_t
{
PyObject_HEAD
Ptr<${cname1}> v;
};
static PyTypeObject pyopencv_${name}_Type =
{
%s
MODULESTR".$wname",
sizeof(pyopencv_${name}_t),
};
static void pyopencv_${name}_dealloc(PyObject* self)
{
((pyopencv_${name}_t*)self)->v.release();
PyObject_Del(self);
}
template<> PyObject* pyopencv_from(const Ptr<${cname}>& r)
{
pyopencv_${name}_t *m = PyObject_NEW(pyopencv_${name}_t, &pyopencv_${name}_Type);
new (&(m->v)) Ptr<$cname1>(); // init Ptr with placement new
m->v = r;
return (PyObject*)m;
}
template<> bool pyopencv_to(PyObject* src, Ptr<${cname}>& dst, const char* name)
{
if( src == NULL || src == Py_None )
return true;
if(!PyObject_TypeCheck(src, &pyopencv_${name}_Type))
{
failmsg("Expected ${cname} for argument '%%s'", name);
return false;
}
dst = ((pyopencv_${name}_t*)src)->v.dynamicCast<${cname}>();
return true;
}
""" % head_init_str)
gen_template_map_type_cvt = Template("""
template<> bool pyopencv_to(PyObject* src, ${cname}& dst, const char* name);
""")
gen_template_set_prop_from_map = Template("""
if( PyMapping_HasKeyString(src, (char*)"$propname") )
{
tmp = PyMapping_GetItemString(src, (char*)"$propname");
ok = tmp && pyopencv_to(tmp, dst.$propname);
Py_DECREF(tmp);
if(!ok) return false;
}""")
gen_template_type_impl = Template("""
static PyObject* pyopencv_${name}_repr(PyObject* self)
{
char str[1000];
sprintf(str, "<$wname %p>", self);
return PyString_FromString(str);
}
${getset_code}
static PyGetSetDef pyopencv_${name}_getseters[] =
{${getset_inits}
{NULL} /* Sentinel */
};
${methods_code}
static PyMethodDef pyopencv_${name}_methods[] =
{
${methods_inits}
{NULL, NULL}
};
static void pyopencv_${name}_specials(void)
{
pyopencv_${name}_Type.tp_base = ${baseptr};
pyopencv_${name}_Type.tp_dealloc = pyopencv_${name}_dealloc;
pyopencv_${name}_Type.tp_repr = pyopencv_${name}_repr;
pyopencv_${name}_Type.tp_getset = pyopencv_${name}_getseters;
pyopencv_${name}_Type.tp_methods = pyopencv_${name}_methods;${extra_specials}
}
""")
gen_template_get_prop = Template("""
static PyObject* pyopencv_${name}_get_${member}(pyopencv_${name}_t* p, void *closure)
{
return pyopencv_from(p->v${access}${member});
}
""")
gen_template_get_prop_algo = Template("""
static PyObject* pyopencv_${name}_get_${member}(pyopencv_${name}_t* p, void *closure)
{
return pyopencv_from(dynamic_cast<$cname*>(p->v.get())${access}${member});
}
""")
gen_template_set_prop = Template("""
static int pyopencv_${name}_set_${member}(pyopencv_${name}_t* p, PyObject *value, void *closure)
{
if (value == NULL)
{
PyErr_SetString(PyExc_TypeError, "Cannot delete the ${member} attribute");
return -1;
}
return pyopencv_to(value, p->v${access}${member}) ? 0 : -1;
}
""")
gen_template_set_prop_algo = Template("""
static int pyopencv_${name}_set_${member}(pyopencv_${name}_t* p, PyObject *value, void *closure)
{
if (value == NULL)
{
PyErr_SetString(PyExc_TypeError, "Cannot delete the ${member} attribute");
return -1;
}
return pyopencv_to(value, dynamic_cast<$cname*>(p->v.get())${access}${member}) ? 0 : -1;
}
""")
gen_template_prop_init = Template("""
{(char*)"${member}", (getter)pyopencv_${name}_get_${member}, NULL, (char*)"${member}", NULL},""")
gen_template_rw_prop_init = Template("""
{(char*)"${member}", (getter)pyopencv_${name}_get_${member}, (setter)pyopencv_${name}_set_${member}, (char*)"${member}", NULL},""")
simple_argtype_mapping = {
"bool": ("bool", "b", "0"),
"int": ("int", "i", "0"),
"float": ("float", "f", "0.f"),
"double": ("double", "d", "0"),
"c_string": ("char*", "s", '(char*)""')
}
def normalize_class_name(name):
return re.sub(r"^cv\.", "", name).replace(".", "_")
class ClassProp(object):
def __init__(self, decl):
self.tp = decl[0].replace("*", "_ptr")
self.name = decl[1]
self.readonly = True
if "/RW" in decl[3]:
self.readonly = False
class ClassInfo(object):
def __init__(self, name, decl=None):
self.cname = name.replace(".", "::")
self.name = self.wname = normalize_class_name(name)
self.ismap = False
self.issimple = False
self.isalgorithm = False
self.methods = {}
self.props = []
self.consts = {}
customname = False
if decl:
self.bases = decl[1].split()[1:]
if len(self.bases) > 1:
print("Note: Class %s has more than 1 base class (not supported by Python C extensions)" % (self.name,))
print(" Bases: ", " ".join(self.bases))
print(" Only the first base class will be used")
self.bases = [self.bases[0].strip(",")]
#return sys.exit(-1)
if self.bases and self.bases[0].startswith("cv::"):
self.bases[0] = self.bases[0][4:]
if self.bases and self.bases[0] == "Algorithm":
self.isalgorithm = True
for m in decl[2]:
if m.startswith("="):
self.wname = m[1:]
customname = True
elif m == "/Map":
self.ismap = True
elif m == "/Simple":
self.issimple = True
self.props = [ClassProp(p) for p in decl[3]]
if not customname and self.wname.startswith("Cv"):
self.wname = self.wname[2:]
def gen_map_code(self, all_classes):
code = "static bool pyopencv_to(PyObject* src, %s& dst, const char* name)\n{\n PyObject* tmp;\n bool ok;\n" % (self.cname)
code += "".join([gen_template_set_prop_from_map.substitute(propname=p.name,proptype=p.tp) for p in self.props])
if self.bases:
code += "\n return pyopencv_to(src, (%s&)dst, name);\n}\n" % all_classes[self.bases[0]].cname
else:
code += "\n return true;\n}\n"
return code
def gen_code(self, all_classes):
if self.ismap:
return self.gen_map_code(all_classes)
getset_code = StringIO()
getset_inits = StringIO()
sorted_props = [(p.name, p) for p in self.props]
sorted_props.sort()
access_op = "->"
if self.issimple:
access_op = "."
for pname, p in sorted_props:
if self.isalgorithm:
getset_code.write(gen_template_get_prop_algo.substitute(name=self.name, cname=self.cname, member=pname, membertype=p.tp, access=access_op))
else:
getset_code.write(gen_template_get_prop.substitute(name=self.name, member=pname, membertype=p.tp, access=access_op))
if p.readonly:
getset_inits.write(gen_template_prop_init.substitute(name=self.name, member=pname))
else:
if self.isalgorithm:
getset_code.write(gen_template_set_prop_algo.substitute(name=self.name, cname=self.cname, member=pname, membertype=p.tp, access=access_op))
else:
getset_code.write(gen_template_set_prop.substitute(name=self.name, member=pname, membertype=p.tp, access=access_op))
getset_inits.write(gen_template_rw_prop_init.substitute(name=self.name, member=pname))
methods_code = StringIO()
methods_inits = StringIO()
sorted_methods = list(self.methods.items())
sorted_methods.sort()
for mname, m in sorted_methods:
methods_code.write(m.gen_code(all_classes))
methods_inits.write(m.get_tab_entry())
baseptr = "NULL"
if self.bases and self.bases[0] in all_classes:
baseptr = "&pyopencv_" + all_classes[self.bases[0]].name + "_Type"
code = gen_template_type_impl.substitute(name=self.name, wname=self.wname, cname=self.cname,
getset_code=getset_code.getvalue(), getset_inits=getset_inits.getvalue(),
methods_code=methods_code.getvalue(), methods_inits=methods_inits.getvalue(),
baseptr=baseptr, extra_specials="")
return code
class ConstInfo(object):
def __init__(self, name, val):
self.cname = name.replace(".", "::")
self.name = re.sub(r"^cv\.", "", name).replace(".", "_")
if self.name.startswith("Cv"):
self.name = self.name[2:]
self.name = re.sub(r"([a-z])([A-Z])", r"\1_\2", self.name)
self.name = self.name.upper()
self.value = val
def handle_ptr(tp):
if tp.startswith('Ptr_'):
tp = 'Ptr<' + "::".join(tp.split('_')[1:]) + '>'
return tp
class ArgInfo(object):
def __init__(self, arg_tuple):
self.tp = handle_ptr(arg_tuple[0])
self.name = arg_tuple[1]
self.defval = arg_tuple[2]
self.isarray = False
self.arraylen = 0
self.arraycvt = None
self.inputarg = True
self.outputarg = False
self.returnarg = False
for m in arg_tuple[3]:
if m == "/O":
self.inputarg = False
self.outputarg = True
self.returnarg = True
elif m == "/IO":
self.inputarg = True
self.outputarg = True
self.returnarg = True
elif m.startswith("/A"):
self.isarray = True
self.arraylen = m[2:].strip()
elif m.startswith("/CA"):
self.isarray = True
self.arraycvt = m[2:].strip()
self.py_inputarg = False
self.py_outputarg = False
def isbig(self):
return self.tp == "Mat" or self.tp == "vector_Mat"# or self.tp.startswith("vector")
def crepr(self):
return "ArgInfo(\"%s\", %d)" % (self.name, self.outputarg)
class FuncVariant(object):
def __init__(self, classname, name, decl, isconstructor):
self.classname = classname
self.name = self.wname = name
self.isconstructor = isconstructor
if self.isconstructor:
if self.wname.startswith("Cv"):
self.wname = self.wname[2:]
else:
self.wname = self.classname
self.rettype = handle_ptr(decl[1])
if self.rettype == "void":
self.rettype = ""
self.args = []
self.array_counters = {}
for a in decl[3]:
ainfo = ArgInfo(a)
if ainfo.isarray and not ainfo.arraycvt:
c = ainfo.arraylen
c_arrlist = self.array_counters.get(c, [])
if c_arrlist:
c_arrlist.append(ainfo.name)
else:
self.array_counters[c] = [ainfo.name]
self.args.append(ainfo)
self.init_pyproto()
def init_pyproto(self):
# string representation of argument list, with '[', ']' symbols denoting optional arguments, e.g.
# "src1, src2[, dst[, mask]]" for cv.add
argstr = ""
# list of all input arguments of the Python function, with the argument numbers:
# [("src1", 0), ("src2", 1), ("dst", 2), ("mask", 3)]
# we keep an argument number to find the respective argument quickly, because
# some of the arguments of C function may not present in the Python function (such as array counters)
# or even go in a different order ("heavy" output parameters of the C function
# become the first optional input parameters of the Python function, and thus they are placed right after
# non-optional input parameters)
arglist = []
# the list of "heavy" output parameters. Heavy parameters are the parameters
# that can be expensive to allocate each time, such as vectors and matrices (see isbig).
outarr_list = []
# the list of output parameters. Also includes input/output parameters.
outlist = []
firstoptarg = 1000000
argno = -1
for a in self.args:
argno += 1
if a.name in self.array_counters:
continue
if a.tp in ignored_arg_types:
continue
if a.returnarg:
outlist.append((a.name, argno))
if (not a.inputarg) and a.isbig():
outarr_list.append((a.name, argno))
continue
if not a.inputarg:
continue
if not a.defval:
arglist.append((a.name, argno))
else:
firstoptarg = min(firstoptarg, len(arglist))
# if there are some array output parameters before the first default parameter, they
# are added as optional parameters before the first optional parameter
if outarr_list:
arglist += outarr_list
outarr_list = []
arglist.append((a.name, argno))
if outarr_list:
firstoptarg = min(firstoptarg, len(arglist))
arglist += outarr_list
firstoptarg = min(firstoptarg, len(arglist))
noptargs = len(arglist) - firstoptarg
argnamelist = [aname for aname, argno in arglist]
argstr = ", ".join(argnamelist[:firstoptarg])
argstr = "[, ".join([argstr] + argnamelist[firstoptarg:])
argstr += "]" * noptargs
if self.rettype:
outlist = [("retval", -1)] + outlist
elif self.isconstructor:
assert outlist == []
outlist = [("self", -1)]
if self.isconstructor:
classname = self.classname
if classname.startswith("Cv"):
classname=classname[2:]
outstr = "<%s object>" % (classname,)
elif outlist:
outstr = ", ".join([o[0] for o in outlist])
else:
outstr = "None"
self.py_docstring = "%s(%s) -> %s" % (self.wname, argstr, outstr)
self.py_noptargs = noptargs
self.py_arglist = arglist
for aname, argno in arglist:
self.args[argno].py_inputarg = True
for aname, argno in outlist:
if argno >= 0:
self.args[argno].py_outputarg = True
self.py_outlist = outlist
class FuncInfo(object):
def __init__(self, classname, name, cname, isconstructor):
self.classname = classname
self.name = name
self.cname = cname
self.isconstructor = isconstructor
self.variants = []
def add_variant(self, decl):
self.variants.append(FuncVariant(self.classname, self.name, decl, self.isconstructor))
def get_wrapper_name(self):
name = self.name
if self.classname:
classname = self.classname + "_"
if "[" in name:
name = "getelem"
else:
classname = ""
return "pyopencv_" + classname + name
def get_wrapper_prototype(self):
full_fname = self.get_wrapper_name()
if self.classname and not self.isconstructor:
self_arg = "self"
else:
self_arg = ""
return "static PyObject* %s(PyObject* %s, PyObject* args, PyObject* kw)" % (full_fname, self_arg)
def get_tab_entry(self):
docstring_list = []
have_empty_constructor = False
for v in self.variants:
s = v.py_docstring
if (not v.py_arglist) and self.isconstructor:
have_empty_constructor = True
if s not in docstring_list:
docstring_list.append(s)
# if there are just 2 constructors: default one and some other,
# we simplify the notation.
# Instead of ClassName(args ...) -> object or ClassName() -> object
# we write ClassName([args ...]) -> object
if have_empty_constructor and len(self.variants) == 2:
idx = self.variants[1].py_arglist != []
s = self.variants[idx].py_docstring
p1 = s.find("(")
p2 = s.rfind(")")
docstring_list = [s[:p1+1] + "[" + s[p1+1:p2] + "]" + s[p2:]]
return Template(' {"$py_funcname", (PyCFunction)$wrap_funcname, METH_VARARGS | METH_KEYWORDS, "$py_docstring"},\n'
).substitute(py_funcname = self.variants[0].wname, wrap_funcname=self.get_wrapper_name(),
py_docstring = " or ".join(docstring_list))
def gen_code(self, all_classes):
proto = self.get_wrapper_prototype()
code = "%s\n{\n" % (proto,)
selfinfo = ClassInfo("")
ismethod = self.classname != "" and not self.isconstructor
# full name is needed for error diagnostic in PyArg_ParseTupleAndKeywords
fullname = self.name
if self.classname:
selfinfo = all_classes[self.classname]
if not self.isconstructor:
amp = "&" if selfinfo.issimple else ""
if selfinfo.isalgorithm:
code += gen_template_check_self_algo.substitute(name=selfinfo.name, cname=selfinfo.cname, amp=amp)
else:
get = "" if selfinfo.issimple else ".get()"
code += gen_template_check_self.substitute(name=selfinfo.name, cname=selfinfo.cname, amp=amp, get=get)
fullname = selfinfo.wname + "." + fullname
all_code_variants = []
declno = -1
for v in self.variants:
code_decl = ""
code_ret = ""
code_cvt_list = []
code_args = "("
all_cargs = []
parse_arglist = []
# declare all the C function arguments,
# add necessary conversions from Python objects to code_cvt_list,
# form the function/method call,
# for the list of type mappings
for a in v.args:
if a.tp in ignored_arg_types:
defval = a.defval
if not defval and a.tp.endswith("*"):
defval = 0
assert defval
if not code_args.endswith("("):
code_args += ", "
code_args += defval
all_cargs.append([[None, ""], ""])
continue
tp1 = tp = a.tp
amp = ""
defval0 = ""
if tp.endswith("*"):
tp = tp1 = tp[:-1]
amp = "&"
if tp.endswith("*"):
defval0 = "0"
tp1 = tp.replace("*", "_ptr")
if tp1.endswith("*"):
print("Error: type with star: a.tp=%s, tp=%s, tp1=%s" % (a.tp, tp, tp1))
sys.exit(-1)
amapping = simple_argtype_mapping.get(tp, (tp, "O", defval0))
parse_name = a.name
if a.py_inputarg:
if amapping[1] == "O":
code_decl += " PyObject* pyobj_%s = NULL;\n" % (a.name,)
parse_name = "pyobj_" + a.name
if a.tp == 'char':
code_cvt_list.append("convert_to_char(pyobj_%s, &%s, %s)"% (a.name, a.name, a.crepr()))
else:
code_cvt_list.append("pyopencv_to(pyobj_%s, %s, %s)" % (a.name, a.name, a.crepr()))
all_cargs.append([amapping, parse_name])
defval = a.defval
if not defval:
defval = amapping[2]
# "tp arg = tp();" is equivalent to "tp arg;" in the case of complex types
if defval == tp + "()" and amapping[1] == "O":
defval = ""
if a.outputarg and not a.inputarg:
defval = ""
if defval:
code_decl += " %s %s=%s;\n" % (amapping[0], a.name, defval)
else:
code_decl += " %s %s;\n" % (amapping[0], a.name)
if not code_args.endswith("("):
code_args += ", "
code_args += amp + a.name
code_args += ")"
if self.isconstructor:
code_decl += " pyopencv_%s_t* self = 0;\n" % selfinfo.name
if selfinfo.issimple:
templ_prelude = gen_template_simple_call_constructor_prelude
templ = gen_template_simple_call_constructor
else:
templ_prelude = gen_template_call_constructor_prelude
templ = gen_template_call_constructor
code_prelude = templ_prelude.substitute(name=selfinfo.name, cname=selfinfo.cname)
code_fcall = templ.substitute(name=selfinfo.name, cname=selfinfo.cname, args=code_args)
else:
code_prelude = ""
code_fcall = ""
if v.rettype:
code_decl += " " + v.rettype + " retval;\n"
code_fcall += "retval = "
if ismethod:
code_fcall += "_self_->" + self.cname
else:
code_fcall += self.cname
code_fcall += code_args
if code_cvt_list:
code_cvt_list = [""] + code_cvt_list
# add info about return value, if any, to all_cargs. if there non-void return value,
# it is encoded in v.py_outlist as ("retval", -1) pair.
# As [-1] in Python accesses the last element of a list, we automatically handle the return value by
# adding the necessary info to the end of all_cargs list.
if v.rettype:
tp = v.rettype
tp1 = tp.replace("*", "_ptr")
amapping = simple_argtype_mapping.get(tp, (tp, "O", "0"))
all_cargs.append(amapping)
if v.args and v.py_arglist:
# form the format spec for PyArg_ParseTupleAndKeywords
fmtspec = "".join([all_cargs[argno][0][1] for aname, argno in v.py_arglist])
if v.py_noptargs > 0:
fmtspec = fmtspec[:-v.py_noptargs] + "|" + fmtspec[-v.py_noptargs:]
fmtspec += ":" + fullname
# form the argument parse code that:
# - declares the list of keyword parameters
# - calls PyArg_ParseTupleAndKeywords
# - converts complex arguments from PyObject's to native OpenCV types
code_parse = gen_template_parse_args.substitute(
kw_list = ", ".join(['"' + aname + '"' for aname, argno in v.py_arglist]),
fmtspec = fmtspec,
parse_arglist = ", ".join(["&" + all_cargs[argno][1] for aname, argno in v.py_arglist]),
code_cvt = " &&\n ".join(code_cvt_list))
else:
code_parse = "if(PyObject_Size(args) == 0 && (kw == NULL || PyObject_Size(kw) == 0))"
if len(v.py_outlist) == 0:
code_ret = "Py_RETURN_NONE"
elif len(v.py_outlist) == 1:
if self.isconstructor:
code_ret = "return (PyObject*)self"
else:
aname, argno = v.py_outlist[0]
code_ret = "return pyopencv_from(%s)" % (aname,)
else:
# ther is more than 1 return parameter; form the tuple out of them
fmtspec = "N"*len(v.py_outlist)
backcvt_arg_list = []
for aname, argno in v.py_outlist:
amapping = all_cargs[argno][0]
backcvt_arg_list.append("%s(%s)" % (amapping[2], aname))
code_ret = "return Py_BuildValue(\"(%s)\", %s)" % \
(fmtspec, ", ".join(["pyopencv_from(" + aname + ")" for aname, argno in v.py_outlist]))
all_code_variants.append(gen_template_func_body.substitute(code_decl=code_decl,
code_parse=code_parse, code_prelude=code_prelude, code_fcall=code_fcall, code_ret=code_ret))
if len(all_code_variants)==1:
# if the function/method has only 1 signature, then just put it
code += all_code_variants[0]
else:
# try to execute each signature
code += " PyErr_Clear();\n\n".join([" {\n" + v + " }\n" for v in all_code_variants])
code += "\n return NULL;\n}\n\n"
return code
class PythonWrapperGenerator(object):
def __init__(self):
self.clear()
def clear(self):
self.classes = {}
self.funcs = {}
self.consts = {}
self.code_include = StringIO()
self.code_types = StringIO()
self.code_funcs = StringIO()
self.code_func_tab = StringIO()
self.code_type_reg = StringIO()
self.code_const_reg = StringIO()
self.class_idx = 0
def add_class(self, stype, name, decl):
classinfo = ClassInfo(name, decl)
classinfo.decl_idx = self.class_idx
self.class_idx += 1
if classinfo.name in self.classes:
print("Generator error: class %s (cname=%s) already exists" \
% (classinfo.name, classinfo.cname))
sys.exit(-1)
self.classes[classinfo.name] = classinfo
if classinfo.bases and not classinfo.isalgorithm:
classinfo.isalgorithm = self.classes[classinfo.bases[0]].isalgorithm
def add_const(self, name, decl):
constinfo = ConstInfo(name, decl[1])
if constinfo.name in self.consts:
print("Generator error: constant %s (cname=%s) already exists" \
% (constinfo.name, constinfo.cname))
sys.exit(-1)
self.consts[constinfo.name] = constinfo
def add_func(self, decl):
classname = bareclassname = ""
name = decl[0]
dpos = name.rfind(".")
if dpos >= 0 and name[:dpos] not in ["cv", "cv.ocl"]:
classname = bareclassname = re.sub(r"^cv\.", "", name[:dpos])
name = name[dpos+1:]
dpos = classname.rfind(".")
if dpos >= 0:
bareclassname = classname[dpos+1:]
classname = classname.replace(".", "_")
cname = name
name = re.sub(r"^cv\.", "", name)
name = name.replace(".", "_")
isconstructor = cname == bareclassname
cname = cname.replace(".", "::")
isclassmethod = False
customname = False
for m in decl[2]:
if m == "/S":
isclassmethod = True
elif m.startswith("="):
name = m[1:]
customname = True
func_map = self.funcs
if not classname or isconstructor:
pass
elif isclassmethod:
if not customname:
name = classname + "_" + name
cname = classname + "::" + cname
classname = ""
else:
classinfo = self.classes.get(classname, ClassInfo(""))
if not classinfo.name:
print("Generator error: the class for method %s is missing" % (name,))
sys.exit(-1)
func_map = classinfo.methods
func = func_map.get(name, FuncInfo(classname, name, cname, isconstructor))
func.add_variant(decl)
if len(func.variants) == 1:
func_map[name] = func
def gen_const_reg(self, constinfo):
self.code_const_reg.write("PUBLISH2(%s,%s);\n" % (constinfo.name, constinfo.cname))
def save(self, path, name, buf):
f = open(path + "/" + name, "wt")
f.write(buf.getvalue())
f.close()
def gen(self, srcfiles, output_path):
self.clear()
parser = hdr_parser.CppHeaderParser()
# step 1: scan the headers and build more descriptive maps of classes, consts, functions
for hdr in srcfiles:
self.code_include.write( '#include "{}"\n'.format(hdr[hdr.rindex('opencv2/'):]) )
decls = parser.parse(hdr)
for decl in decls:
name = decl[0]
if name.startswith("struct") or name.startswith("class"):
# class/struct
p = name.find(" ")
stype = name[:p]
name = name[p+1:].strip()
self.add_class(stype, name, decl)
elif name.startswith("const"):
# constant
self.add_const(name.replace("const ", "").strip(), decl)
else:
# function
self.add_func(decl)
# step 2: generate code for the classes and their methods
classlist = list(self.classes.items())
classlist.sort()
for name, classinfo in classlist:
if classinfo.ismap:
self.code_types.write(gen_template_map_type_cvt.substitute(name=name, cname=classinfo.cname))
else:
if classinfo.issimple:
templ = gen_template_simple_type_decl
else:
templ = gen_template_type_decl
self.code_types.write(templ.substitute(name=name, wname=classinfo.wname, cname=classinfo.cname,
cname1=("cv::Algorithm" if classinfo.isalgorithm else classinfo.cname)))
# register classes in the same order as they have been declared.
# this way, base classes will be registered in Python before their derivatives.
classlist1 = [(classinfo.decl_idx, name, classinfo) for name, classinfo in classlist]
classlist1.sort()
for decl_idx, name, classinfo in classlist1:
code = classinfo.gen_code(self.classes)
self.code_types.write(code)
if not classinfo.ismap:
self.code_type_reg.write("MKTYPE2(%s);\n" % (classinfo.name,) )
# step 3: generate the code for all the global functions
funclist = list(self.funcs.items())
funclist.sort()
for name, func in funclist:
code = func.gen_code(self.classes)
self.code_funcs.write(code)
self.code_func_tab.write(func.get_tab_entry())
# step 4: generate the code for constants
constlist = list(self.consts.items())
constlist.sort()
for name, constinfo in constlist:
self.gen_const_reg(constinfo)
# That's it. Now save all the files
self.save(output_path, "pyopencv_generated_include.h", self.code_include)
self.save(output_path, "pyopencv_generated_funcs.h", self.code_funcs)
self.save(output_path, "pyopencv_generated_func_tab.h", self.code_func_tab)
self.save(output_path, "pyopencv_generated_const_reg.h", self.code_const_reg)
self.save(output_path, "pyopencv_generated_types.h", self.code_types)
self.save(output_path, "pyopencv_generated_type_reg.h", self.code_type_reg)
if __name__ == "__main__":
srcfiles = hdr_parser.opencv_hdr_list
dstdir = "/Users/vp/tmp"
if len(sys.argv) > 1:
dstdir = sys.argv[1]
if len(sys.argv) > 2:
srcfiles = sys.argv[2:]
generator = PythonWrapperGenerator()
generator.gen(srcfiles, dstdir)
|
|
#!/usr/bin/env python
# filter_qc 0.0.1
# Generated by dx-app-wizard.
#
# Basic execution pattern: Your app will run on a single machine from
# beginning to end.
#
# See https://wiki.dnanexus.com/Developer-Portal for documentation and
# tutorials on how to modify this file.
#
# DNAnexus Python Bindings (dxpy) documentation:
# http://autodoc.dnanexus.com/bindings/python/current/
import os
import sys
import subprocess
import shlex
import common
import logging
import re
from pprint import pprint, pformat
from multiprocessing import cpu_count
import json
logger = logging.getLogger(__name__)
logger.propagate = False
logger.setLevel(logging.INFO)
PICARD_PATH = "/".join([
os.getenv('PICARD_HOME', "."),
"picard.jar"
])
def flagstat_parse(fname):
with open(fname, 'r') as flagstat_file:
if not flagstat_file:
return None
flagstat_lines = flagstat_file.read().splitlines()
qc_dict = {
# values are regular expressions,
# will be replaced with scores [hiq, lowq]
'in_total': 'in total',
'duplicates': 'duplicates',
'mapped': 'mapped',
'paired_in_sequencing': 'paired in sequencing',
'read1': 'read1',
'read2': 'read2',
'properly_paired': 'properly paired',
'with_self_mate_mapped': 'with itself and mate mapped',
'singletons': 'singletons',
# i.e. at the end of the line
'mate_mapped_different_chr': 'with mate mapped to a different chr$',
# RE so must escape
'mate_mapped_different_chr_hiQ':
'with mate mapped to a different chr \(mapQ>=5\)'
}
for (qc_key, qc_pattern) in qc_dict.items():
qc_metrics = next(re.split(qc_pattern, line)
for line in flagstat_lines
if re.search(qc_pattern, line))
(hiq, lowq) = qc_metrics[0].split(' + ')
qc_dict[qc_key] = [int(hiq.rstrip()), int(lowq.rstrip())]
return qc_dict
def dup_parse(fname):
with open(fname, 'r') as dup_file:
if not dup_file:
return None
lines = iter(dup_file.read().splitlines())
for line in lines:
if line.startswith('## METRICS CLASS'):
headers = lines.next().rstrip('\n').lower()
metrics = lines.next().rstrip('\n')
break
headers = headers.split('\t')
metrics = metrics.split('\t')
headers.pop(0)
metrics.pop(0)
dup_qc = dict(zip(headers, metrics))
return dup_qc
def pbc_parse(fname):
with open(fname, 'r') as pbc_file:
if not pbc_file:
return None
lines = pbc_file.read().splitlines()
line = lines[0].rstrip('\n')
# PBC File output:
# TotalReadPairs <tab>
# DistinctReadPairs <tab>
# OneReadPair <tab>
# TwoReadPairs <tab>
# NRF=Distinct/Total <tab>
# PBC1=OnePair/Distinct <tab>
# PBC2=OnePair/TwoPair
headers = ['TotalReadPairs',
'DistinctReadPairs',
'OneReadPair',
'TwoReadPairs',
'NRF',
'PBC1',
'PBC2']
metrics = line.split('\t')
pbc_qc = dict(zip(headers, metrics))
return pbc_qc
def main(input_bam, fastqs, samtools_params, debug):
if len(fastqs) > 1:
paired_end = True
else:
paired_end = False
# create a file handler
handler = logging.FileHandler('filter_qc.log')
if debug:
handler.setLevel(logging.DEBUG)
else:
handler.setLevel(logging.INFO)
logger.addHandler(handler)
raw_bam_basename = (input_bam.rstrip('.bam')).split('/')[-1]
subprocess.check_output('set -x; ls -l', shell=True)
filt_bam_prefix = raw_bam_basename + ".filt.srt"
filt_bam_filename = filt_bam_prefix + ".bam"
if paired_end:
# =============================
# Remove unmapped, mate unmapped
# not primary alignment, reads failing platform
# Remove low MAPQ reads
# Only keep properly paired reads
# Obtain name sorted BAM file
# ==================
tmp_filt_bam_prefix = "tmp.%s" % (filt_bam_prefix) # was tmp.prefix.nmsrt
tmp_filt_bam_filename = tmp_filt_bam_prefix + ".bam"
out, err = common.run_pipe([
# filter: -F 1804 FlAG bits to exclude; -f 2 FLAG bits to reqire;
# -q 30 exclude MAPQ < 30; -u uncompressed output
# exclude FLAG 1804: unmapped, next segment unmapped, secondary
# alignments, not passing platform q, PCR or optical duplicates
# require FLAG 2: properly aligned
"samtools view -F 1804 -f 2 %s -u %s" % (samtools_params, input_bam),
# sort: -n sort by name; - take input from stdin;
# out to specified filename
# Will produce name sorted BAM
"samtools sort -n -@%d -o %s" % (cpu_count(), tmp_filt_bam_filename)])
#logger.info("samtools view -F 1804 -f 2 %s -u %s" % (samtools_params, input_bam))
#logger.info("samtools sort -n - %s" % (tmp_filt_bam_prefix))
#logger.info(err)
if err:
logger.error("samtools error: %s" % (err))
# Remove orphan reads (pair was removed)
# and read pairs mapping to different chromosomes
# Obtain position sorted BAM
subprocess.check_output('set -x; ls -l', shell=True)
out, err = common.run_pipe([
# fill in mate coordinates, ISIZE and mate-related flags
# fixmate requires name-sorted alignment; -r removes secondary and
# unmapped (redundant here because already done above?)
# - send output to stdout
"samtools fixmate -r %s -" % (tmp_filt_bam_filename),
# repeat filtering after mate repair
"samtools view -F 1804 -f 2 -u -",
# produce the coordinate-sorted BAM
"samtools sort -@%d -o %s" % (cpu_count(), filt_bam_filename)])
#logger.info("samtools fixmate -r %s -" % (tmp_filt_bam_filename))
#logger.info("samtools view -F 1804 -f 2 -u -")
#logger.info("samtools sort - %s" % (filt_bam_prefix))
#logger.info(err)
subprocess.check_output('set -x; ls -l', shell=True)
else: # single-end data
# =============================
# Remove unmapped, mate unmapped
# not primary alignment, reads failing platform
# Remove low MAPQ reads
# Obtain name sorted BAM file
# ==================
with open(filt_bam_filename, 'w') as fh:
samtools_filter_command = (
"samtools view -F 1804 %s -b %s"
% (samtools_params, input_bam)
)
logger.info(samtools_filter_command)
subprocess.check_call(
shlex.split(samtools_filter_command),
stdout=fh)
subprocess.check_output('set -x; ls -l', shell=True)
# ========================
# Mark duplicates
# ======================
tmp_filt_bam_filename = raw_bam_basename + ".dupmark.bam"
dup_file_qc_filename = raw_bam_basename + ".dup.qc"
picard_string = ' '.join([
"java -Xmx4G -jar",
PICARD_PATH,
"MarkDuplicates",
"INPUT=%s" % (filt_bam_filename),
"OUTPUT=%s" % (tmp_filt_bam_filename),
"METRICS_FILE=%s" % (dup_file_qc_filename),
"VALIDATION_STRINGENCY=LENIENT",
"ASSUME_SORTED=true",
"REMOVE_DUPLICATES=false"
])
logger.info(picard_string)
subprocess.check_output(shlex.split(picard_string))
subprocess.check_output('set -x; ls -l', shell=True)
os.rename(tmp_filt_bam_filename, filt_bam_filename)
subprocess.check_output('set -x; ls -l', shell=True)
if paired_end:
final_bam_prefix = raw_bam_basename + ".filt.srt.nodup.final"
else:
final_bam_prefix = raw_bam_basename + ".filt.nodup.srt.final"
final_bam_filename = final_bam_prefix + ".bam" # To be stored
final_bam_index_filename = final_bam_filename + ".bai" # To be stored
# QC file
final_bam_file_mapstats_filename = final_bam_prefix + ".flagstat.qc"
if paired_end:
samtools_dedupe_command = \
"samtools view -F 1804 -f 2 -b %s" % (filt_bam_filename)
else:
samtools_dedupe_command = \
"samtools view -F 1804 -b %s" % (filt_bam_filename)
# ============================
# Remove duplicates
# Index final position sorted BAM
# ============================
with open(final_bam_filename, 'w') as fh:
logger.info(samtools_dedupe_command)
subprocess.check_call(
shlex.split(samtools_dedupe_command),
stdout=fh)
# Index final bam file
samtools_index_command = \
"samtools index %s %s" % (final_bam_filename, final_bam_index_filename)
logger.info(samtools_index_command)
subprocess.check_output(shlex.split(samtools_index_command))
# Generate mapping statistics
with open(final_bam_file_mapstats_filename, 'w') as fh:
flagstat_command = "samtools flagstat %s" % (final_bam_filename)
logger.info(flagstat_command)
subprocess.check_call(shlex.split(flagstat_command), stdout=fh)
# =============================
# Compute library complexity
# =============================
# Sort by name
# convert to bedPE and obtain fragment coordinates
# sort by position and strand
# Obtain unique count statistics
pbc_file_qc_filename = final_bam_prefix + ".pbc.qc"
# PBC File output
# TotalReadPairs [tab]
# DistinctReadPairs [tab]
# OneReadPair [tab]
# TwoReadPairs [tab]
# NRF=Distinct/Total [tab]
# PBC1=OnePair/Distinct [tab]
# PBC2=OnePair/TwoPair
if paired_end:
steps = [
"samtools sort -n -@%d %s" % (cpu_count(), filt_bam_filename),
"bamToBed -bedpe -i stdin",
r"""awk 'BEGIN{OFS="\t"}{print $1,$2,$4,$6,$9,$10}'"""]
else:
steps = [
"bamToBed -i %s" % (filt_bam_filename),
r"""awk 'BEGIN{OFS="\t"}{print $1,$2,$3,$6}'"""]
steps.extend([
# TODO this should be implemented as an explicit list of allowable
# names, so that mapping can be done to a complete reference
"grep -v 'chrM'",
"sort",
"uniq -c",
r"""awk 'BEGIN{mt=0;m0=0;m1=0;m2=0} ($1==1){m1=m1+1} ($1==2){m2=m2+1} {m0=m0+1} {mt=mt+$1} END{printf "%d\t%d\t%d\t%d\t%f\t%f\t%f\n",mt,m0,m1,m2,m0/mt,m1/m0,m1/m2}'"""
])
out, err = common.run_pipe(steps, pbc_file_qc_filename)
if err:
logger.error("PBC file error: %s" % (err))
logger.info("Uploading results files to the project")
print (final_bam_filename)
print (final_bam_index_filename)
print (dup_file_qc_filename)
print (pbc_file_qc_filename)
#filtered_bam = dxpy.upload_local_file(final_bam_filename)
#filtered_bam_index = dxpy.upload_local_file(final_bam_index_filename)
#filtered_mapstats = \
# dxpy.upload_local_file(final_bam_file_mapstats_filename)
#dup_file = dxpy.upload_local_file(dup_file_qc_filename)
#pbc_file = dxpy.upload_local_file(pbc_file_qc_filename)
flagstat_qc = flagstat_parse(final_bam_file_mapstats_filename)
dup_qc = dup_parse(dup_file_qc_filename)
pbc_qc = pbc_parse(pbc_file_qc_filename)
logger.info("dup_qc: %s" % (dup_qc))
logger.info("pbc_qc: %s" % (pbc_qc))
# Return links to the output files
output = {
#"filtered_bam": dxpy.dxlink(filtered_bam),
#"filtered_bam_index": dxpy.dxlink(filtered_bam_index),
#"filtered_mapstats": dxpy.dxlink(filtered_mapstats),
#"dup_file_qc": dxpy.dxlink(dup_file),
#"pbc_file_qc": dxpy.dxlink(pbc_file),
"paired_end": paired_end,
'n_filtered_mapped_reads': flagstat_qc.get('mapped')[0],
"NRF": pbc_qc.get('NRF'),
"PBC1": pbc_qc.get('PBC1'),
"PBC2": pbc_qc.get('PBC2'),
"duplicate_fraction": dup_qc.get('percent_duplication')
}
with open('filter_qc.json', 'w') as f:
json.dump(output, f, sort_keys=True, indent=4, separators=(',', ': '))
logger.info("Exiting with output:\n%s" % (pformat(output)))
return output
if __name__ == "__main__":
main(sys.argv[1], sys.argv[2:], '-q 30', False)
|
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for warping image and point data between coordinate systems."""
from concurrent import futures
from typing import Dict, Optional, Sequence, Tuple
from connectomics.common import bounding_box
from connectomics.common import box_generator
from connectomics.segmentation import labels
# pylint:disable=g-import-not-at-top
try:
from cvx2 import latest as cvx2
except ImportError:
import cv2 as cvx2 # pytype:disable=import-error
import numpy as np
from scipy import interpolate
from scipy import ndimage
import skimage.exposure
from sofima import map_utils
# pylint:enable=g-import-not-at-top
def _cvx2_interpolation(inter_scheme: str):
inter_map = {
'nearest': cvx2.INTER_NEAREST,
'linear': cvx2.INTER_LINEAR,
'cubic': cvx2.INTER_CUBIC,
'lanczos': cvx2.INTER_LANCZOS4
}
return inter_map[inter_scheme]
def _relabel_segmentation(data, orig_to_low, old_uids):
new_uids = frozenset(np.unique(data.astype(np.uint64)))
# No new IDs are introduced by the warping.
diff_ids = (new_uids - old_uids) - {0}
assert not diff_ids, f'Found unexpected new IDs: {diff_ids}'
orig_ids, low_ids = zip(*orig_to_low)
return labels.relabel(
data.astype(np.uint64), np.array(low_ids, dtype=np.uint64),
np.array(orig_ids, dtype=np.uint64))
def warp_subvolume(image: np.ndarray,
image_box: bounding_box.BoundingBoxBase,
coord_map: np.ndarray,
map_box: bounding_box.BoundingBoxBase,
stride: float,
out_box: bounding_box.BoundingBoxBase,
interpolation: Optional[str] = None,
offset: float = 0.) -> np.ndarray:
"""Warps a subvolume of data according to a coordinate map.
Args:
image: [n, z, y, x] data to warp; valid data types are those supported by
OpenCV's `remap` as well as uint64, which is treated as segmentation data
image_box: bounding box identifying the part of the volume from which the
image data was extracted
coord_map: [2, z, y, x] xy 'inverse' coordinate map in relative format (each
entry in the map specifies the source coordinate in 'image' from which to
read data)
map_box: bounding box identifying the part of the volume from which the
coordinate map was extracted
stride: length in pixels of the image corresponding to a single unit (pixel)
of the coordinate map
out_box: bounding box for the warped data
interpolation: interpolation scheme to use; defaults to nearest neighbor for
uint64 data, and Lanczos for other types
offset: (deprecated do not use) non-zero values necessary to reproduce some
old renders
Returns:
warped image covering 'out_box'
"""
# Segmentation warping.
if image.dtype == np.uint64:
interpolation = cvx2.INTER_NEAREST
image, orig_to_low = labels.make_contiguous(image)
assert np.max(image) < 2**31
assert np.min(image) >= 0
image = image.astype(np.int32)
old_uids = frozenset(np.unique(image))
# Image warping.
else:
orig_to_low = None
if interpolation is None:
interpolation = cvx2.INTER_LANCZOS4
elif isinstance(interpolation, str):
interpolation = _cvx2_interpolation(interpolation)
orig_dtype = image.dtype
if image.dtype == np.uint32:
if image.max() >= 2**16:
raise ValueError(
'Image warping supported up to uint16 only. For segmentation data, '
'use uint64.')
image = image.astype(np.uint16)
skipped_sections = frozenset(
np.where(np.all(np.isnan(coord_map), axis=(0, 2, 3)))[0])
# Convert values within the coordinate map so that they are
# within the local coordinate system of 'image'.
abs_map = map_utils.to_absolute(coord_map, stride)
abs_map += (map_box.start[:2] * stride - image_box.start[:2] +
offset).reshape(2, 1, 1, 1)
# Coordinates of the map nodes within the local coordinate
# system of 'out_box'.
map_y, map_x = np.ogrid[:coord_map.shape[2], :coord_map.shape[3]]
map_y = (map_y + map_box.start[1]) * stride - out_box.start[1] + offset
map_x = (map_x + map_box.start[0]) * stride - out_box.start[0] + offset
map_points = (map_y.ravel(), map_x.ravel())
warped = np.zeros(
shape=[image.shape[0]] + list(out_box.size[::-1]), dtype=image.dtype)
out_y, out_x = np.mgrid[:out_box.size[1], :out_box.size[0]]
try:
maptype = cvx2.CVX_16SC2
except AttributeError:
maptype = cvx2.CV_16SC2
for z in range(0, image.shape[1]):
if z in skipped_sections:
continue
dense_x = interpolate.RegularGridInterpolator(
map_points, abs_map[0, z, ...], bounds_error=False, fill_value=None)
dense_y = interpolate.RegularGridInterpolator(
map_points, abs_map[1, z, ...], bounds_error=False, fill_value=None)
# dxy: [0 .. out_box.size] -> [coord within image]
dx = dense_x((out_y, out_x)).astype(np.float32)
dy = dense_y((out_y, out_x)).astype(np.float32)
dx, dy = cvx2.convertMaps(
dx,
dy,
dstmap1type=maptype,
nninterpolation=(interpolation == cvx2.INTER_NEAREST))
for c in range(0, image.shape[0]):
warped[c, z, ...] = cvx2.remap(
image[c, z, ...], dx, dy, interpolation=interpolation)
# Map IDs back to the original space, which might be beyond the range of
# int32.
if orig_to_low is not None:
warped = _relabel_segmentation(warped, orig_to_low, old_uids)
else:
warped = warped.astype(orig_dtype)
return warped
def ndimage_warp(
image: np.ndarray,
coord_map: np.ndarray,
stride: Sequence[float],
work_size: Sequence[int],
overlap: Sequence[int],
order=1,
map_coordinates=ndimage.map_coordinates,
image_box: Optional[bounding_box.BoundingBoxBase] = None,
map_box: Optional[bounding_box.BoundingBoxBase] = None,
out_box: Optional[bounding_box.BoundingBoxBase] = None) -> np.ndarray:
"""Warps a subvolume of data using ndimage.map_coordinates.
Args:
image: [z, ] y, x data to warp
coord_map: [N, [z,] y, x] coordinate map
stride: [z,] y, x length in pixels of the image corresponding to a single
pixel of the coordinate map
work_size: xy[z] size of the subvolume to warp at a time; use smaller sizes
to limit RAM usage
overlap: xy[z] overlap between the subvolumes within which to do warping
order: interpolation order to use (passed to ndimage.map_coordinates)
map_coordinates: a callable with the signature of ndimage.map_coordinates to
use for warping
image_box: bounding box for the image data
map_box: bounding box for the coordinate map; if specified, image_box has to
also be defined; if not specified, coord_map's origin is assumed to lie at
the origin of 'image'
out_box: bounding box for which to generate warped data; if not specified,
assumed to be the same as image_box
Returns:
warped image
"""
shape = coord_map.shape[1:] # ignore xy[z] channel
dim = len(shape)
assert dim == len(stride)
assert dim == len(overlap)
assert dim == len(work_size)
assert dim == image.ndim
orig_to_low = None
if image.dtype == np.uint64:
image, orig_to_low = labels.make_contiguous(image)
old_uids = frozenset(np.unique(image))
order = 0
src_map = map_utils.to_absolute(coord_map, stride)
if map_box is not None:
if image_box is None:
raise ValueError('image_box has to be specified when map_box is used.')
src_map += (map_box.start[:dim] * stride[::-1] -
image_box.start[:dim]).reshape(dim, 1, 1, 1)
sub_dim = 0
image_size_xyz = image.shape[::-1]
if dim == 2:
work_size = list(work_size) + [1]
overlap = list(overlap) + [0]
image_size_xyz = list(image_size_xyz) + [1]
sub_dim = 1
if out_box is not None:
warped = np.zeros(shape=out_box.size[::-1], dtype=image.dtype)
else:
warped = np.zeros_like(image)
out_box = bounding_box.BoundingBox(start=(0, 0, 0), size=image_size_xyz)
calc = box_generator.BoxGenerator(
outer_box=bounding_box.BoundingBox(start=(0, 0, 0), size=out_box.size),
box_size=work_size,
box_overlap=overlap,
back_shift_small_boxes=True)
if map_box is not None:
assert out_box is not None
offset = (map_box.start * stride[::-1] - out_box.start)[::-1]
else:
offset = (0, 0, 0)
for i in range(calc.num_boxes):
in_sub_box = calc.generate(i)[1]
sel = [
np.s_[start:end] for start, end in zip(in_sub_box.start[::-1][sub_dim:],
in_sub_box.end[::-1][sub_dim:])
]
src_coords = np.mgrid[sel]
src_coords = [(c - o) / s for c, s, o in zip(src_coords, stride, offset)]
dense_coords = [
map_coordinates(eval_coords, src_coords, order=1)
for eval_coords in src_map[::-1]
]
out_sub_box = calc.index_to_cropped_box(i)
# Warp image data for the current subvolume.
sub_warped = map_coordinates(image, dense_coords, order=order)
rel_box = out_sub_box.translate(-in_sub_box.start)
warped[out_sub_box.to_slice3d()[sub_dim:]] = sub_warped[
rel_box.to_slice3d()[sub_dim:]]
if orig_to_low is not None:
warped = _relabel_segmentation(warped, orig_to_low, old_uids)
return warped.astype(image.dtype)
def render_tiles(
tiles: Dict[Tuple[int, int], np.ndarray],
coord_maps: Dict[Tuple[int, int], np.ndarray],
stride: Tuple[int, int] = (20, 20),
margin: int = 50,
parallelism: int = 1,
width: Optional[int] = None,
height: Optional[int] = None,
use_clahe: bool = False,
clahe_kwargs: ... = None,
margin_overrides: Optional[Dict[Tuple[int, int], Tuple[int, int, int,
int]]] = None
) -> Tuple[np.ndarray, np.ndarray]:
"""Warps a collection of tiles into a larger image.
All values in the 'tiles' and 'positions' maps are assumed to
have the same shape.
Args:
tiles: map from (x, y) tile coordinates to tile image content
coord_maps: map from (x, y) tile coordinates to coordinate map for the
corresponding tile; the map is expected to have shape [2,1,my,mx] where mx
and my are the horizontal/vertical size of the tile, divided by the stride
stride: stride of the coordinate map in pixels
margin: number of pixels at the tile edges to exclude from rendering
parallelism: number of threads used to render the tiles
width: width of the target image in pixels; inferred from 'tiles' when not
provided
height: height of the target image in pixels; inferred from 'tiles' when not
provided
use_clahe: whether to apply CLAHE prior to warping
clahe_kwargs: passed to skimage.exposure.equalize_adapthist
margin_overrides: optional map from (x, y) tile coordinates to a tuple of
(top, bottom, left, right) margin sizes in pixels; overrides the global
default provided in 'margin'.
Returns:
tuple of:
image with the warped tiles,
binary array with the same shape as the image;
'true' pixels in the latter array indicate locations that have been filled
with tile content during warping; both arrays are (height, width)-shaped
"""
if stride[0] != stride[1]:
raise NotImplementedError(
'Currently only equal strides in XY are supported.')
any_tile = next(iter(tiles.values()))
img_yx = any_tile.shape
image_box = bounding_box.BoundingBox(
start=(0, 0, 0), size=(img_yx[1], img_yx[0], 1))
map_yx = next(iter(coord_maps.values())).shape[-2:]
map_box = bounding_box.BoundingBox(
start=(0, 0, 0), size=(map_yx[1], map_yx[0], 1))
# Infer target image size if necessary.
if width is None or height is None:
max_x, max_y = 0, 0
for x, y in tiles.keys():
max_x = max(x, max_x)
max_y = max(y, max_y)
height, width = img_yx[0] * (max_y + 1), img_yx[1] * (max_x + 1)
ret = np.zeros((height, width), dtype=any_tile.dtype)
ret_mask = np.zeros((height, width), dtype=bool)
if clahe_kwargs is None:
clahe_kwargs = {}
def _render_tile(tile_x, tile_y, coord_map):
img = tiles.get((tile_x, tile_y), None)
if img is None:
return
tg_box = map_utils.outer_box(coord_map, map_box, stride[0])
# Add context to avoid rounding issues.
tg_box = tg_box.adjusted_by(start=(-1, -1, 0), end=(1, 1, 0))
inverted_map = map_utils.invert_map(coord_map, map_box, tg_box, stride[0])
inverted_map = map_utils.fill_missing(inverted_map, extrapolate=True)
# Margin removal here is necessary because tiles are sometimes a bit
# deformed over the first few pixels. Cutting based on actual tile-tile
# overlaps works, but will leave holes at the corners.
mask = np.zeros_like(img)
if margin_overrides is not None and (tile_x, tile_y) in margin_overrides:
mo = margin_overrides[tile_x, tile_y]
mask[mo[0]:-(mo[1] + 1), mo[2]:-(mo[3] + 1)] = 1
else:
mask[margin:-(margin + 1), margin:-(margin + 1)] = 1
if use_clahe:
img = (skimage.exposure.equalize_adapthist(img, **clahe_kwargs) *
np.iinfo(img.dtype).max).astype(img.dtype)
to_warp = np.concatenate(
[img[np.newaxis, np.newaxis, ...], mask[np.newaxis, np.newaxis, ...]],
axis=0)
out_box = image_box.translate(((tg_box.start[0] + 1) * stride[1],
(tg_box.start[1] + 1) * stride[0], 0))
out_box = bounding_box.BoundingBox(
start=out_box.start,
size=(tg_box.size[0] * stride[1], tg_box.size[1] * stride[0], 1))
warped_img, warped_mask = warp_subvolume(
to_warp, image_box, inverted_map, tg_box, stride[0], out_box=out_box)
warped_img = warped_img[0, ...]
warped_mask = warped_mask[0, ...].astype(bool)
# Position in the global coordinate space is relative to the default tile
# position.
y0 = img_yx[0] * tile_y + out_box.start[1]
x0 = img_yx[1] * tile_x + out_box.start[0]
# Trim warped content if necessary.
if x0 < 0:
warped_img = warped_img[:, -x0:]
warped_mask = warped_mask[:, -x0:]
x0 = 0
if y0 < 0:
warped_img = warped_img[-y0:, :]
warped_mask = warped_mask[-y0:, :]
y0 = 0
out = ret[y0:y0 + warped_img.shape[0], x0:x0 + warped_img.shape[1]]
os = out.shape
warped_mask = warped_mask[:os[0], :os[1]]
warped_img = warped_img[:os[0], :os[1]]
ret_mask[y0:y0 + warped_img.shape[0],
x0:x0 + warped_img.shape[1]][warped_mask] = True
# If we failed to render any locations in warped_img, do not copy them to
# the canvas.
warped_mask &= warped_img > 0
out[warped_mask] = warped_img[warped_mask]
if parallelism > 1:
fs = set()
with futures.ThreadPoolExecutor(max_workers=parallelism) as exc:
for (x, y), coord_map in coord_maps.items():
fs.add(
exc.submit(_render_tile, tile_x=x, tile_y=y, coord_map=coord_map))
for f in futures.as_completed(fs):
f.result()
else:
for (x, y), coord_map in coord_maps.items():
_render_tile(tile_x=x, tile_y=y, coord_map=coord_map)
return ret, ret_mask
|
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""An apiproxy stub that calls a remote handler via HTTP.
This allows easy remote access to the App Engine datastore, and potentially any
of the other App Engine APIs, using the same interface you use when accessing
the service locally.
An example Python script:
---
from google.appengine.ext import db
from google.appengine.ext.remote_api import remote_api_stub
from myapp import models
import getpass
def auth_func():
return (raw_input('Username:'), getpass.getpass('Password:'))
remote_api_stub.ConfigureRemoteDatastore('my-app', '/remote_api', auth_func)
# Now you can access the remote datastore just as if your code was running on
# App Engine!
houses = models.House.all().fetch(100)
for a_house in q:
a_house.doors += 1
db.put(houses)
---
A few caveats:
- Where possible, avoid iterating over queries. Fetching as many results as you
will need is faster and more efficient. If you don't know how many results
you need, or you need 'all of them', iterating is fine.
- Likewise, it's a good idea to put entities in batches. Instead of calling put
for each individual entity, accumulate them and put them in batches using
db.put(), if you can.
- Requests and responses are still limited to 1MB each, so if you have large
entities or try and fetch or put many of them at once, your requests may fail.
"""
import google
import os
import pickle
import random
import sha
import sys
import thread
import threading
import yaml
from google.appengine.api import datastore
from google.appengine.api import apiproxy_rpc
from google.appengine.api import apiproxy_stub_map
from google.appengine.datastore import datastore_pb
from google.appengine.ext.remote_api import remote_api_pb
from google.appengine.runtime import apiproxy_errors
from google.appengine.tools import appengine_rpc
class Error(Exception):
"""Base class for exceptions in this module."""
class ConfigurationError(Error):
"""Exception for configuration errors."""
class UnknownJavaServerError(Error):
"""Exception for exceptions returned from a Java remote_api handler."""
def GetUserAgent():
"""Determines the value of the 'User-agent' header to use for HTTP requests.
Returns:
String containing the 'user-agent' header value, which includes the SDK
version, the platform information, and the version of Python;
e.g., "remote_api/1.0.1 Darwin/9.2.0 Python/2.5.2".
"""
product_tokens = []
product_tokens.append("Google-remote_api/1.0")
product_tokens.append(appengine_rpc.GetPlatformToken())
python_version = ".".join(str(i) for i in sys.version_info)
product_tokens.append("Python/%s" % python_version)
return " ".join(product_tokens)
def GetSourceName():
return "Google-remote_api-1.0"
class TransactionData(object):
"""Encapsulates data about an individual transaction."""
def __init__(self, thread_id):
self.thread_id = thread_id
self.preconditions = {}
self.entities = {}
class RemoteStub(object):
"""A stub for calling services on a remote server over HTTP.
You can use this to stub out any service that the remote server supports.
"""
def __init__(self, server, path, _test_stub_map=None):
"""Constructs a new RemoteStub that communicates with the specified server.
Args:
server: An instance of a subclass of
google.appengine.tools.appengine_rpc.AbstractRpcServer.
path: The path to the handler this stub should send requests to.
"""
self._server = server
self._path = path
self._test_stub_map = _test_stub_map
def _PreHookHandler(self, service, call, request, response):
pass
def _PostHookHandler(self, service, call, request, response):
pass
def MakeSyncCall(self, service, call, request, response):
self._PreHookHandler(service, call, request, response)
try:
test_stub = self._test_stub_map and self._test_stub_map.GetStub(service)
if test_stub:
test_stub.MakeSyncCall(service, call, request, response)
else:
self._MakeRealSyncCall(service, call, request, response)
finally:
self._PostHookHandler(service, call, request, response)
def _MakeRealSyncCall(self, service, call, request, response):
request_pb = remote_api_pb.Request()
request_pb.set_service_name(service)
request_pb.set_method(call)
request_pb.mutable_request().set_contents(request.Encode())
response_pb = remote_api_pb.Response()
encoded_request = request_pb.Encode()
encoded_response = self._server.Send(self._path, encoded_request)
response_pb.ParseFromString(encoded_response)
if response_pb.has_application_error():
error_pb = response_pb.application_error()
raise apiproxy_errors.ApplicationError(error_pb.code(),
error_pb.detail())
elif response_pb.has_exception():
raise pickle.loads(response_pb.exception().contents())
elif response_pb.has_java_exception():
raise UnknownJavaServerError("An unknown error has occured in the "
"Java remote_api handler for this call.")
else:
response.ParseFromString(response_pb.response().contents())
def CreateRPC(self):
return apiproxy_rpc.RPC(stub=self)
class RemoteDatastoreStub(RemoteStub):
"""A specialised stub for accessing the App Engine datastore remotely.
A specialised stub is required because there are some datastore operations
that preserve state between calls. This stub makes queries possible.
Transactions on the remote datastore are unfortunately still impossible.
"""
def __init__(self, server, path, default_result_count=20,
_test_stub_map=None):
"""Constructor.
Args:
server: The server name to connect to.
path: The URI path on the server.
default_result_count: The number of items to fetch, by default, in a
datastore Query or Next operation. This affects the batch size of
query iterators.
"""
super(RemoteDatastoreStub, self).__init__(server, path, _test_stub_map)
self.default_result_count = default_result_count
self.__queries = {}
self.__transactions = {}
self.__next_local_cursor = 1
self.__local_cursor_lock = threading.Lock()
self.__next_local_tx = 1
self.__local_tx_lock = threading.Lock()
def MakeSyncCall(self, service, call, request, response):
assert service == 'datastore_v3'
explanation = []
assert request.IsInitialized(explanation), explanation
handler = getattr(self, '_Dynamic_' + call, None)
if handler:
handler(request, response)
else:
super(RemoteDatastoreStub, self).MakeSyncCall(service, call, request,
response)
assert response.IsInitialized(explanation), explanation
def _Dynamic_RunQuery(self, query, query_result, cursor_id = None):
super(RemoteDatastoreStub, self).MakeSyncCall(
'datastore_v3', 'RunQuery', query, query_result)
if cursor_id is None:
self.__local_cursor_lock.acquire()
try:
cursor_id = self.__next_local_cursor
self.__next_local_cursor += 1
finally:
self.__local_cursor_lock.release()
if query_result.more_results():
query.set_offset(query.offset() + query_result.result_size())
if query.has_limit():
query.set_limit(query.limit() - query_result.result_size())
self.__queries[cursor_id] = query
else:
self.__queries[cursor_id] = None
query_result.mutable_cursor().set_cursor(cursor_id)
def _Dynamic_Next(self, next_request, query_result):
cursor_id = next_request.cursor().cursor()
if cursor_id not in self.__queries:
raise apiproxy_errors.ApplicationError(datastore_pb.Error.BAD_REQUEST,
'Cursor %d not found' % cursor_id)
query = self.__queries[cursor_id]
if query is None:
query_result.set_more_results(False)
return
else:
if next_request.has_count():
query.set_count(next_request.count())
else:
query.clear_count()
self._Dynamic_RunQuery(query, query_result, cursor_id)
def _Dynamic_Get(self, get_request, get_response):
txid = None
if get_request.has_transaction():
txid = get_request.transaction().handle()
txdata = self.__transactions[txid]
assert (txdata.thread_id ==
thread.get_ident()), "Transactions are single-threaded."
keys = [(k, k.Encode()) for k in get_request.key_list()]
new_request = datastore_pb.GetRequest()
for key, enckey in keys:
if enckey not in txdata.entities:
new_request.add_key().CopyFrom(key)
else:
new_request = get_request
if new_request.key_size() > 0:
super(RemoteDatastoreStub, self).MakeSyncCall(
'datastore_v3', 'Get', new_request, get_response)
if txid is not None:
newkeys = new_request.key_list()
entities = get_response.entity_list()
for key, entity in zip(newkeys, entities):
entity_hash = None
if entity.has_entity():
entity_hash = sha.new(entity.entity().Encode()).digest()
txdata.preconditions[key.Encode()] = (key, entity_hash)
new_response = datastore_pb.GetResponse()
it = iter(get_response.entity_list())
for key, enckey in keys:
if enckey in txdata.entities:
cached_entity = txdata.entities[enckey][1]
if cached_entity:
new_response.add_entity().mutable_entity().CopyFrom(cached_entity)
else:
new_response.add_entity()
else:
new_entity = it.next()
if new_entity.has_entity():
assert new_entity.entity().key() == key
new_response.add_entity().CopyFrom(new_entity)
else:
new_response.add_entity()
get_response.CopyFrom(new_response)
def _Dynamic_Put(self, put_request, put_response):
if put_request.has_transaction():
entities = put_request.entity_list()
requires_id = lambda x: x.id() == 0 and not x.has_name()
new_ents = [e for e in entities
if requires_id(e.key().path().element_list()[-1])]
id_request = remote_api_pb.PutRequest()
if new_ents:
for ent in new_ents:
e = id_request.add_entity()
e.mutable_key().CopyFrom(ent.key())
e.mutable_entity_group()
id_response = datastore_pb.PutResponse()
super(RemoteDatastoreStub, self).MakeSyncCall(
'remote_datastore', 'GetIDs', id_request, id_response)
assert id_request.entity_size() == id_response.key_size()
for key, ent in zip(id_response.key_list(), new_ents):
ent.mutable_key().CopyFrom(key)
ent.mutable_entity_group().add_element().CopyFrom(
key.path().element(0))
txid = put_request.transaction().handle()
txdata = self.__transactions[txid]
assert (txdata.thread_id ==
thread.get_ident()), "Transactions are single-threaded."
for entity in entities:
txdata.entities[entity.key().Encode()] = (entity.key(), entity)
put_response.add_key().CopyFrom(entity.key())
else:
super(RemoteDatastoreStub, self).MakeSyncCall(
'datastore_v3', 'Put', put_request, put_response)
def _Dynamic_Delete(self, delete_request, response):
if delete_request.has_transaction():
txid = delete_request.transaction().handle()
txdata = self.__transactions[txid]
assert (txdata.thread_id ==
thread.get_ident()), "Transactions are single-threaded."
for key in delete_request.key_list():
txdata.entities[key.Encode()] = (key, None)
else:
super(RemoteDatastoreStub, self).MakeSyncCall(
'datastore_v3', 'Delete', delete_request, response)
def _Dynamic_BeginTransaction(self, request, transaction):
self.__local_tx_lock.acquire()
try:
txid = self.__next_local_tx
self.__transactions[txid] = TransactionData(thread.get_ident())
self.__next_local_tx += 1
finally:
self.__local_tx_lock.release()
transaction.set_handle(txid)
def _Dynamic_Commit(self, transaction, transaction_response):
txid = transaction.handle()
if txid not in self.__transactions:
raise apiproxy_errors.ApplicationError(
datastore_pb.Error.BAD_REQUEST,
'Transaction %d not found.' % (txid,))
txdata = self.__transactions[txid]
assert (txdata.thread_id ==
thread.get_ident()), "Transactions are single-threaded."
del self.__transactions[txid]
tx = remote_api_pb.TransactionRequest()
for key, hash in txdata.preconditions.values():
precond = tx.add_precondition()
precond.mutable_key().CopyFrom(key)
if hash:
precond.set_hash(hash)
puts = tx.mutable_puts()
deletes = tx.mutable_deletes()
for key, entity in txdata.entities.values():
if entity:
puts.add_entity().CopyFrom(entity)
else:
deletes.add_key().CopyFrom(key)
super(RemoteDatastoreStub, self).MakeSyncCall(
'remote_datastore', 'Transaction',
tx, datastore_pb.PutResponse())
def _Dynamic_Rollback(self, transaction, transaction_response):
txid = transaction.handle()
self.__local_tx_lock.acquire()
try:
if txid not in self.__transactions:
raise apiproxy_errors.ApplicationError(
datastore_pb.Error.BAD_REQUEST,
'Transaction %d not found.' % (txid,))
txdata = self.__transactions[txid]
assert (txdata[txid].thread_id ==
thread.get_ident()), "Transactions are single-threaded."
del self.__transactions[txid]
finally:
self.__local_tx_lock.release()
def _Dynamic_CreateIndex(self, index, id_response):
raise apiproxy_errors.CapabilityDisabledError(
'The remote datastore does not support index manipulation.')
def _Dynamic_UpdateIndex(self, index, void):
raise apiproxy_errors.CapabilityDisabledError(
'The remote datastore does not support index manipulation.')
def _Dynamic_DeleteIndex(self, index, void):
raise apiproxy_errors.CapabilityDisabledError(
'The remote datastore does not support index manipulation.')
ALL_SERVICES = set([
'capability_service',
'datastore_v3',
'images',
'mail',
'memcache',
'taskqueue',
'urlfetch',
'xmpp',
])
def ConfigureRemoteApi(app_id,
path,
auth_func,
servername=None,
rpc_server_factory=appengine_rpc.HttpRpcServer,
rtok=None,
secure=False,
services=None,
default_auth_domain=None):
"""Does necessary setup to allow easy remote access to App Engine APIs.
Either servername must be provided or app_id must not be None. If app_id
is None and a servername is provided, this function will send a request
to the server to retrieve the app_id.
Args:
app_id: The app_id of your app, as declared in app.yaml.
path: The path to the remote_api handler for your app
(for example, '/remote_api').
auth_func: A function that takes no arguments and returns a
(username, password) tuple. This will be called if your application
requires authentication to access the remote_api handler (it should!)
and you do not already have a valid auth cookie.
servername: The hostname your app is deployed on. Defaults to
<app_id>.appspot.com.
rpc_server_factory: A factory to construct the rpc server for the datastore.
rtok: The validation token to sent with app_id lookups. If None, a random
token is used.
secure: Use SSL when communicating with the server.
services: A list of services to set up stubs for. If specified, only those
services are configured; by default all supported services are configured.
default_auth_domain: The authentication domain to use by default.
Raises:
urllib2.HTTPError: if app_id is not provided and there is an error while
retrieving it.
ConfigurationError: if there is a error configuring the DatstoreFileStub.
"""
if not servername and not app_id:
raise ConfigurationError('app_id or servername required')
if not servername:
servername = '%s.appspot.com' % (app_id,)
server = rpc_server_factory(servername, auth_func, GetUserAgent(),
GetSourceName(), debug_data=False, secure=secure)
if not app_id:
if not rtok:
random.seed()
rtok = str(random.random())[2:]
urlargs = {'rtok': rtok}
response = server.Send(path, payload=None, **urlargs)
if not response.startswith('{'):
raise ConfigurationError(
'Invalid response recieved from server: %s' % response)
app_info = yaml.load(response)
if not app_info or 'rtok' not in app_info or 'app_id' not in app_info:
raise ConfigurationError('Error parsing app_id lookup response')
if app_info['rtok'] != rtok:
raise ConfigurationError('Token validation failed during app_id lookup. '
'(sent %s, got %s)' % (repr(rtok),
repr(app_info['rtok'])))
app_id = app_info['app_id']
if services is not None:
services = set(services)
unsupported = services.difference(ALL_SERVICES)
if unsupported:
raise ConfigurationError('Unsupported service(s): %s'
% (', '.join(unsupported),))
else:
services = set(ALL_SERVICES)
os.environ['APPLICATION_ID'] = app_id
if default_auth_domain:
os.environ['AUTH_DOMAIN'] = default_auth_domain
elif 'AUTH_DOMAIN' not in os.environ:
os.environ['AUTH_DOMAIN'] = 'gmail.com'
apiproxy_stub_map.apiproxy = apiproxy_stub_map.APIProxyStubMap()
if 'datastore_v3' in services:
services.remove('datastore_v3')
datastore_stub = RemoteDatastoreStub(server, path)
apiproxy_stub_map.apiproxy.RegisterStub('datastore_v3', datastore_stub)
stub = RemoteStub(server, path)
for service in services:
apiproxy_stub_map.apiproxy.RegisterStub(service, stub)
def MaybeInvokeAuthentication():
"""Sends an empty request through to the configured end-point.
If authentication is necessary, this will cause the rpc_server to invoke
interactive authentication.
"""
datastore_stub = apiproxy_stub_map.apiproxy.GetStub('datastore_v3')
if isinstance(datastore_stub, RemoteStub):
datastore_stub._server.Send(datastore_stub._path, payload=None)
else:
raise ConfigurationError('remote_api is not configured.')
ConfigureRemoteDatastore = ConfigureRemoteApi
|
|
from astropy import units as u
from panoptes.pocs.camera.gphoto.base import AbstractGPhotoCamera
from panoptes.utils import error
from panoptes.utils.time import current_time
from panoptes.utils.utils import get_quantity_value
class Camera(AbstractGPhotoCamera):
def __init__(self, readout_time: float = 1.0, file_extension: str = 'cr2', connect: bool = True,
*args, **kwargs):
"""Create a camera object for a Canon EOS DSLR.
Args:
readout (float): The time it takes to read out the file from the
camera, default 1.0 second.
file_extension (str): The file extension to use, default `cr2`.
connect (bool): Connect to camera on startup, default True.
"""
kwargs['readout_time'] = readout_time
kwargs['file_extension'] = file_extension
super().__init__(*args, **kwargs)
self.logger.debug("Creating Canon DSLR GPhoto2 camera")
if connect:
self.connect()
@property
def bit_depth(self):
return 12 * u.bit
@property
def egain(self):
return 1.5 * (u.electron / u.adu)
def connect(self):
"""Connect to Canon DSLR.
Gets the serial number from the camera and sets various settings.
"""
self.logger.debug('Connecting to Canon gphoto2 camera')
# Get serial number
_serial_number = self.get_property('serialnumber')
if not _serial_number:
raise error.CameraNotFound(f"Camera not responding: {self}")
self._serial_number = _serial_number
# Properties to be set upon init.
prop2index = {
'/main/capturesettings/autoexposuremode': 3, # 3 - Manual; 4 - Bulb
'/main/capturesettings/drivemode': 0, # Single exposure
'/main/capturesettings/focusmode': 0, # Manual (don't try to focus)
'/main/imgsettings/imageformat': 9, # RAW
'/main/imgsettings/imageformatsd': 9, # RAW
'/main/settings/capturetarget': 0, # Capture to RAM, for download
'/main/settings/reviewtime': 0, # Screen off after taking pictures
'/main/imgsettings/iso': 1, # ISO 100
'/main/capturesettings/shutterspeed': 0, # Bulb
}
owner_name = 'PANOPTES'
artist_name = self.get_config('pan_id', default=owner_name)
copy_right = f'{owner_name}_{current_time().datetime:%Y}'
prop2value = {
'artist': artist_name,
'copyright': copy_right,
'ownername': owner_name,
}
self.set_properties(prop2index=prop2index, prop2value=prop2value)
# TODO check this works on all Canon models.
self.model = self.get_property('d402')
self._connected = True
def _start_exposure(self,
seconds=None,
filename=None,
dark=False,
header=None,
iso=100,
*args, **kwargs):
"""Start the exposure.
Tested With:
* Canon EOS 100D
Args:
seconds (u.second, optional): Length of exposure.
filename (str, optional): Image is saved to this filename.
header (dict or Header, optional): The metadata to be added as FITS headers.
iso (int, optional): The ISO setting to use for the exposure, default 100.
"""
# Make sure we have just the value, no units
seconds = get_quantity_value(seconds)
shutterspeed_idx = self.get_shutterspeed_index(seconds=seconds, return_minimum=True)
cmd_args = [
f'--set-config', f'iso={iso}',
f'--filename', f'{filename}',
f'--set-config-index', f'shutterspeed={shutterspeed_idx}',
f'--wait-event=1s',
]
if shutterspeed_idx == 0:
# Bulb setting.
cmd_args.extend([
f'--set-config-index', 'eosremoterelease=2',
f'--wait-event={int(seconds):d}s',
f'--set-config-index', 'eosremoterelease=4',
f'--wait-event-and-download=2s',
])
else:
# Known shutterspeed value.
cmd_args.extend([
f'--capture-image-and-download',
])
try:
self.command(cmd_args)
except error.InvalidCommand as e:
self.logger.warning(e)
else:
readout_args = (filename, header)
return readout_args
@classmethod
def get_shutterspeed_index(cls, seconds: float, return_minimum: bool = False):
"""Looks up the appropriate shutterspeed setting for the given seconds.
If the given seconds does not match a set shutterspeed, the 'bulb' setting
is returned.
"""
seconds = get_quantity_value(seconds, unit='second')
# TODO derive these from `load_properties`.
# The index corresponds to what gphoto2 expects.
shutter_speeds = {
"bulb": "bulb",
"30": 30,
"25": 25,
"20": 20,
"15": 15,
"13": 13,
"10.3": 10.3,
"8": 8,
"6.3": 6.3,
"5": 5,
"4": 4,
"3.2": 3.2,
"2.5": 2.5,
"2": 2,
"1.6": 1.6,
"1.3": 1.3,
"1": 1,
"0.8": 0.8,
"0.6": 0.6,
"0.5": 0.5,
"0.4": 0.4,
"0.3": 0.3,
"1/4": 1 / 4,
"1/5": 1 / 5,
"1/6": 1 / 6,
"1/8": 1 / 8,
"1/10": 1 / 10,
"1/13": 1 / 13,
"1/15": 1 / 15,
"1/20": 1 / 20,
"1/25": 1 / 25,
"1/30": 1 / 30,
"1/40": 1 / 40,
"1/50": 1 / 50,
"1/60": 1 / 60,
"1/80": 1 / 80,
"1/100": 1 / 100,
"1/125": 1 / 125,
"1/160": 1 / 160,
"1/200": 1 / 200,
"1/250": 1 / 250,
"1/320": 1 / 320,
"1/400": 1 / 400,
"1/500": 1 / 500,
"1/640": 1 / 640,
"1/800": 1 / 800,
"1/1000": 1 / 1000,
"1/1250": 1 / 1250,
"1/1600": 1 / 1600,
"1/2000": 1 / 2000,
"1/2500": 1 / 2500,
"1/3200": 1 / 3200,
"1/4000": 1 / 4000,
}
try:
# First check by key.
return list(shutter_speeds.keys()).index(seconds)
except ValueError:
# Then check by value.
try:
# Check minimum of everything after 'bulb'.
if return_minimum and seconds < min(list(shutter_speeds.values())[1:]):
return len(shutter_speeds) - 1
else:
return list(shutter_speeds.values()).index(seconds)
except ValueError:
return 0
|
|
# Copyright 2013 Red Hat Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_utils.fixture import uuidsentinel as uuids
from nova import context
from nova.db import api as db
from nova.db.sqlalchemy import api as db_api
from nova.db.sqlalchemy import models as db_models
from nova import exception
from nova import objects
from nova.objects import block_device as block_device_obj
from nova import test
from nova.tests.unit import fake_block_device
from nova.tests.unit import fake_instance
from nova.tests.unit.objects import test_objects
class _TestBlockDeviceMappingObject(object):
def fake_bdm(self, instance=None):
instance = instance or {}
fake_bdm = fake_block_device.FakeDbBlockDeviceDict({
'id': 123,
'uuid': uuids.bdm,
'instance_uuid': instance.get('uuid') or uuids.instance,
'attachment_id': None,
'device_name': '/dev/sda2',
'source_type': 'snapshot',
'destination_type': 'volume',
'connection_info': "{'fake': 'connection_info'}",
'snapshot_id': 'fake-snapshot-id-1',
'boot_index': -1
})
if instance:
fake_bdm['instance'] = instance
return fake_bdm
def test_save(self):
fake_bdm = self.fake_bdm()
with mock.patch.object(db, 'block_device_mapping_update',
return_value=fake_bdm) as bdm_update_mock:
bdm_object = objects.BlockDeviceMapping(context=self.context)
bdm_object.id = 123
bdm_object.volume_id = 'fake_volume_id'
bdm_object.save()
bdm_update_mock.assert_called_once_with(
self.context, 123, {'volume_id': 'fake_volume_id'},
legacy=False)
def test_save_instance_changed(self):
bdm_object = objects.BlockDeviceMapping(context=self.context)
bdm_object.instance = objects.Instance()
self.assertRaises(exception.ObjectActionError,
bdm_object.save)
@mock.patch.object(db, 'block_device_mapping_update', return_value=None)
def test_save_not_found(self, bdm_update):
bdm_object = objects.BlockDeviceMapping(context=self.context)
bdm_object.id = 123
self.assertRaises(exception.BDMNotFound, bdm_object.save)
@mock.patch.object(db, 'block_device_mapping_get_all_by_volume_id')
def test_get_by_volume_id(self, get_by_vol_id):
# NOTE(danms): Include two results to make sure the first was picked.
# An invalid second item shouldn't be touched -- if it is, it'll
# fail from_db_object().
get_by_vol_id.return_value = [self.fake_bdm(),
None]
vol_bdm = objects.BlockDeviceMapping.get_by_volume_id(
self.context, 'fake-volume-id')
for attr in block_device_obj.BLOCK_DEVICE_OPTIONAL_ATTRS:
self.assertFalse(vol_bdm.obj_attr_is_set(attr))
@mock.patch.object(db, 'block_device_mapping_get_all_by_volume_id')
def test_get_by_volume_id_not_found(self, get_by_vol_id):
get_by_vol_id.return_value = None
self.assertRaises(exception.VolumeBDMNotFound,
objects.BlockDeviceMapping.get_by_volume_id,
self.context, 'fake-volume-id')
@mock.patch.object(db, 'block_device_mapping_get_all_by_volume_id')
def test_get_by_volume_instance_uuid_mismatch(self, get_by_vol_id):
fake_bdm_vol = self.fake_bdm(instance={'uuid': 'other-fake-instance'})
get_by_vol_id.return_value = [fake_bdm_vol]
self.assertRaises(exception.InvalidVolume,
objects.BlockDeviceMapping.get_by_volume_id,
self.context, 'fake-volume-id',
instance_uuid='fake-instance')
@mock.patch.object(db, 'block_device_mapping_get_all_by_volume_id')
def test_get_by_volume_id_with_expected(self, get_by_vol_id):
get_by_vol_id.return_value = [self.fake_bdm(
fake_instance.fake_db_instance())]
vol_bdm = objects.BlockDeviceMapping.get_by_volume_id(
self.context, 'fake-volume-id', expected_attrs=['instance'])
for attr in block_device_obj.BLOCK_DEVICE_OPTIONAL_ATTRS:
self.assertTrue(vol_bdm.obj_attr_is_set(attr))
get_by_vol_id.assert_called_once_with(self.context, 'fake-volume-id',
['instance'])
@mock.patch.object(db, 'block_device_mapping_get_all_by_volume_id')
def test_get_by_volume_returned_single(self, get_all):
fake_bdm_vol = self.fake_bdm()
get_all.return_value = [fake_bdm_vol]
vol_bdm = objects.BlockDeviceMapping.get_by_volume(
self.context, 'fake-volume-id')
self.assertEqual(fake_bdm_vol['id'], vol_bdm.id)
@mock.patch.object(db, 'block_device_mapping_get_all_by_volume_id')
def test_get_by_volume_returned_multiple(self, get_all):
fake_bdm_vol1 = self.fake_bdm()
fake_bdm_vol2 = self.fake_bdm()
get_all.return_value = [fake_bdm_vol1, fake_bdm_vol2]
self.assertRaises(exception.VolumeBDMIsMultiAttach,
objects.BlockDeviceMapping.get_by_volume,
self.context, 'fake-volume-id')
@mock.patch.object(db,
'block_device_mapping_get_by_instance_and_volume_id')
def test_get_by_instance_and_volume_id(self, mock_get):
fake_inst = fake_instance.fake_db_instance()
mock_get.return_value = self.fake_bdm(fake_inst)
obj_bdm = objects.BlockDeviceMapping
vol_bdm = obj_bdm.get_by_volume_and_instance(
self.context, 'fake-volume-id', 'fake-instance-id')
for attr in block_device_obj.BLOCK_DEVICE_OPTIONAL_ATTRS:
self.assertFalse(vol_bdm.obj_attr_is_set(attr))
@mock.patch.object(db,
'block_device_mapping_get_by_instance_and_volume_id')
def test_test_get_by_instance_and_volume_id_with_expected(self, mock_get):
fake_inst = fake_instance.fake_db_instance()
mock_get.return_value = self.fake_bdm(fake_inst)
obj_bdm = objects.BlockDeviceMapping
vol_bdm = obj_bdm.get_by_volume_and_instance(
self.context, 'fake-volume-id', fake_inst['uuid'],
expected_attrs=['instance'])
for attr in block_device_obj.BLOCK_DEVICE_OPTIONAL_ATTRS:
self.assertTrue(vol_bdm.obj_attr_is_set(attr))
mock_get.assert_called_once_with(self.context, 'fake-volume-id',
fake_inst['uuid'], ['instance'])
@mock.patch.object(db,
'block_device_mapping_get_by_instance_and_volume_id')
def test_get_by_instance_and_volume_id_not_found(self, mock_get):
mock_get.return_value = None
obj_bdm = objects.BlockDeviceMapping
self.assertRaises(exception.VolumeBDMNotFound,
obj_bdm.get_by_volume_and_instance,
self.context, 'fake-volume-id', 'fake-instance-id')
def _test_create_mocked(self, update_or_create=False):
values = {'source_type': 'volume', 'volume_id': 'fake-vol-id',
'destination_type': 'volume',
'instance_uuid': uuids.instance,
'attachment_id': None}
fake_bdm = fake_block_device.FakeDbBlockDeviceDict(values)
with test.nested(
mock.patch.object(
db, 'block_device_mapping_create', return_value=fake_bdm),
mock.patch.object(
db, 'block_device_mapping_update_or_create',
return_value=fake_bdm),
) as (bdm_create_mock, bdm_update_or_create_mock):
bdm = objects.BlockDeviceMapping(context=self.context, **values)
if update_or_create:
method = bdm.update_or_create
else:
method = bdm.create
method()
if update_or_create:
bdm_update_or_create_mock.assert_called_once_with(
self.context, values, legacy=False)
else:
bdm_create_mock.assert_called_once_with(
self.context, values, legacy=False)
def test_create(self):
self._test_create_mocked()
def test_update_or_create(self):
self._test_create_mocked(update_or_create=True)
def test_create_fails(self):
values = {'source_type': 'volume', 'volume_id': 'fake-vol-id',
'destination_type': 'volume',
'instance_uuid': uuids.instance}
bdm = objects.BlockDeviceMapping(context=self.context, **values)
bdm.create()
self.assertRaises(exception.ObjectActionError,
bdm.create)
def test_create_fails_instance(self):
values = {'source_type': 'volume', 'volume_id': 'fake-vol-id',
'destination_type': 'volume',
'instance_uuid': uuids.instance,
'instance': objects.Instance()}
bdm = objects.BlockDeviceMapping(context=self.context, **values)
self.assertRaises(exception.ObjectActionError,
bdm.create)
def test_destroy(self):
values = {'source_type': 'volume', 'volume_id': 'fake-vol-id',
'destination_type': 'volume', 'id': 1,
'instance_uuid': uuids.instance, 'device_name': 'fake'}
with mock.patch.object(db, 'block_device_mapping_destroy') as bdm_del:
bdm = objects.BlockDeviceMapping(context=self.context, **values)
bdm.destroy()
bdm_del.assert_called_once_with(self.context, values['id'])
def test_is_image_true(self):
bdm = objects.BlockDeviceMapping(context=self.context,
source_type='image')
self.assertTrue(bdm.is_image)
def test_is_image_false(self):
bdm = objects.BlockDeviceMapping(context=self.context,
source_type='snapshot')
self.assertFalse(bdm.is_image)
def test_is_volume_true(self):
bdm = objects.BlockDeviceMapping(context=self.context,
destination_type='volume')
self.assertTrue(bdm.is_volume)
def test_is_volume_false(self):
bdm = objects.BlockDeviceMapping(context=self.context,
destination_type='local')
self.assertFalse(bdm.is_volume)
def test_obj_load_attr_not_instance(self):
"""Tests that lazy-loading something other than the instance field
results in an error.
"""
bdm = objects.BlockDeviceMapping(self.context, **self.fake_bdm())
self.assertRaises(exception.ObjectActionError,
bdm.obj_load_attr, 'invalid')
def test_obj_load_attr_orphaned(self):
"""Tests that lazy-loading the instance field on an orphaned BDM
results in an error.
"""
bdm = objects.BlockDeviceMapping(context=None, **self.fake_bdm())
self.assertRaises(exception.OrphanedObjectError, bdm.obj_load_attr,
'instance')
@mock.patch.object(objects.Instance, 'get_by_uuid',
return_value=objects.Instance(uuid=uuids.instance))
def test_obj_load_attr_instance(self, mock_inst_get_by_uuid):
"""Tests lazy-loading the instance field."""
bdm = objects.BlockDeviceMapping(self.context, **self.fake_bdm())
self.assertEqual(mock_inst_get_by_uuid.return_value, bdm.instance)
mock_inst_get_by_uuid.assert_called_once_with(
self.context, bdm.instance_uuid)
def test_obj_make_compatible_pre_1_17(self):
values = {'source_type': 'volume', 'volume_id': 'fake-vol-id',
'destination_type': 'volume',
'instance_uuid': uuids.instance, 'tag': 'fake-tag'}
bdm = objects.BlockDeviceMapping(context=self.context, **values)
data = lambda x: x['nova_object.data']
primitive = data(bdm.obj_to_primitive(target_version='1.17'))
self.assertIn('tag', primitive)
primitive = data(bdm.obj_to_primitive(target_version='1.16'))
self.assertNotIn('tag', primitive)
self.assertIn('volume_id', primitive)
def test_obj_make_compatible_pre_1_18(self):
values = {'source_type': 'volume', 'volume_id': 'fake-vol-id',
'destination_type': 'volume',
'instance_uuid': uuids.instance,
'attachment_id': uuids.attachment_id}
bdm = objects.BlockDeviceMapping(context=self.context, **values)
data = lambda x: x['nova_object.data']
primitive = data(bdm.obj_to_primitive(target_version='1.17'))
self.assertNotIn('attachment_id', primitive)
self.assertIn('volume_id', primitive)
def test_obj_make_compatible_pre_1_19(self):
values = {'source_type': 'volume', 'volume_id': 'fake-vol-id',
'destination_type': 'volume',
'instance_uuid': uuids.instance, 'uuid': uuids.bdm}
bdm = objects.BlockDeviceMapping(context=self.context, **values)
data = lambda x: x['nova_object.data']
primitive = data(bdm.obj_to_primitive(target_version='1.18'))
self.assertNotIn('uuid', primitive)
self.assertIn('volume_id', primitive)
def test_obj_make_compatible_pre_1_20(self):
values = {'source_type': 'volume', 'volume_id': 'fake-vol-id',
'destination_type': 'volume',
'instance_uuid': uuids.instance,
'volume_type': 'fake-lvm-1'}
bdm = objects.BlockDeviceMapping(context=self.context, **values)
data = lambda x: x['nova_object.data']
primitive = data(bdm.obj_to_primitive(target_version='1.19'))
self.assertNotIn('volume_type', primitive)
self.assertIn('volume_id', primitive)
class TestBlockDeviceMappingUUIDMigration(test.TestCase):
def setUp(self):
super(TestBlockDeviceMappingUUIDMigration, self).setUp()
self.context = context.RequestContext('fake-user-id',
'fake-project-id')
self.orig_create_uuid = \
objects.BlockDeviceMapping._create_uuid
@staticmethod
@db_api.pick_context_manager_writer
def _create_legacy_bdm(context, deleted=False):
# Create a BDM with no uuid
values = {'instance_uuid': uuids.instance_uuid}
bdm_ref = db_models.BlockDeviceMapping()
bdm_ref.update(values)
bdm_ref.save(context.session)
if deleted:
bdm_ref.soft_delete(context.session)
return bdm_ref
@mock.patch.object(objects.BlockDeviceMapping, '_create_uuid')
def test_populate_uuid(self, mock_create_uuid):
mock_create_uuid.side_effect = self.orig_create_uuid
self._create_legacy_bdm(self.context)
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
self.context, uuids.instance_uuid)
# UUID should have been populated
uuid = bdms[0].uuid
self.assertIsNotNone(uuid)
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
self.context, uuids.instance_uuid)
# UUID should not have changed
self.assertEqual(uuid, bdms[0].uuid)
self.assertEqual(1, mock_create_uuid.call_count)
def test_create_uuid_race(self):
# If threads read a legacy BDM object concurrently, we can end up
# calling _create_uuid multiple times. Assert that calling _create_uuid
# multiple times yields the same uuid.
# NOTE(mdbooth): _create_uuid handles all forms of race, including any
# amount of overlapping. I have not attempted to write unit tests for
# all potential execution orders. This test is sufficient to
# demonstrate that the compare-and-swap works correctly, and we trust
# the correctness of the database for the rest.
db_bdm = self._create_legacy_bdm(self.context)
uuid1 = objects.BlockDeviceMapping._create_uuid(self.context,
db_bdm['id'])
bdm = objects.BlockDeviceMappingList.get_by_instance_uuid(
self.context, uuids.instance_uuid)[0]
self.assertEqual(uuid1, bdm.uuid)
# We would only ever call this twice if we raced
# This is also testing that the compare-and-swap doesn't overwrite an
# existing uuid if we hit that race.
uuid2 = objects.BlockDeviceMapping._create_uuid(self.context,
bdm['id'])
self.assertEqual(uuid1, uuid2)
def _assert_online_migration(self, expected_total, expected_done,
limit=10):
total, done = objects.BlockDeviceMapping.populate_uuids(
self.context, limit)
self.assertEqual(expected_total, total)
self.assertEqual(expected_done, done)
def test_online_migration(self):
self._assert_online_migration(0, 0)
# Create 2 BDMs, one with a uuid and one without
self._create_legacy_bdm(self.context)
db_api.block_device_mapping_create(self.context,
{'uuid': uuids.bdm2, 'instance_uuid': uuids.instance_uuid},
legacy=False)
# Run the online migration. We should find 1 and update 1
self._assert_online_migration(1, 1)
# Fetch the BDMs and check we didn't modify the uuid of bdm2
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
self.context, uuids.instance_uuid)
bdm_uuids = [bdm.uuid for bdm in bdms]
self.assertIn(uuids.bdm2, bdm_uuids)
self.assertNotIn(None, bdm_uuids)
# Run the online migration again to see nothing was processed
self._assert_online_migration(0, 0)
# Assert that we assign a uuid to a deleted bdm.
self._create_legacy_bdm(self.context, deleted=True)
self._assert_online_migration(1, 1)
# Test that we don't migrate more than the limit
for i in range(0, 3):
self._create_legacy_bdm(self.context)
self._assert_online_migration(2, 2, limit=2)
class TestBlockDeviceMappingObject(test_objects._LocalTest,
_TestBlockDeviceMappingObject):
pass
class TestRemoteBlockDeviceMappingObject(test_objects._RemoteTest,
_TestBlockDeviceMappingObject):
pass
class _TestBlockDeviceMappingListObject(object):
def fake_bdm(self, bdm_id, boot_index=-1, instance_uuid=uuids.instance):
fake_bdm = fake_block_device.FakeDbBlockDeviceDict({
'id': bdm_id,
'boot_index': boot_index,
'instance_uuid': instance_uuid,
'attachment_id': None,
'device_name': '/dev/sda2',
'source_type': 'snapshot',
'destination_type': 'volume',
'connection_info': "{'fake': 'connection_info'}",
'snapshot_id': 'fake-snapshot-id-1',
})
return fake_bdm
@mock.patch.object(db, 'block_device_mapping_get_all_by_instance_uuids')
def test_bdms_by_instance_uuid(self, get_all_by_inst_uuids):
fakes = [self.fake_bdm(123), self.fake_bdm(456)]
get_all_by_inst_uuids.return_value = fakes
bdms_by_uuid = objects.BlockDeviceMappingList.bdms_by_instance_uuid(
self.context, [uuids.instance])
self.assertEqual([uuids.instance], list(bdms_by_uuid.keys()))
self.assertIsInstance(
bdms_by_uuid[uuids.instance], objects.BlockDeviceMappingList)
for faked, got in zip(fakes, bdms_by_uuid[uuids.instance]):
self.assertIsInstance(got, objects.BlockDeviceMapping)
self.assertEqual(faked['id'], got.id)
@mock.patch.object(db, 'block_device_mapping_get_all_by_instance_uuids')
def test_bdms_by_instance_uuid_no_result(self, get_all_by_inst_uuids):
get_all_by_inst_uuids.return_value = None
bdms_by_uuid = objects.BlockDeviceMappingList.bdms_by_instance_uuid(
self.context, [uuids.instance])
self.assertEqual({}, bdms_by_uuid)
@mock.patch.object(db, 'block_device_mapping_get_all_by_instance_uuids')
def test_get_by_instance_uuids(self, get_all_by_inst_uuids):
fakes = [self.fake_bdm(123), self.fake_bdm(456)]
get_all_by_inst_uuids.return_value = fakes
bdm_list = objects.BlockDeviceMappingList.get_by_instance_uuids(
self.context, [uuids.instance])
for faked, got in zip(fakes, bdm_list):
self.assertIsInstance(got, objects.BlockDeviceMapping)
self.assertEqual(faked['id'], got.id)
@mock.patch.object(db, 'block_device_mapping_get_all_by_instance_uuids')
def test_get_by_instance_uuids_no_result(self, get_all_by_inst_uuids):
get_all_by_inst_uuids.return_value = None
bdm_list = objects.BlockDeviceMappingList.get_by_instance_uuids(
self.context, [uuids.instance])
self.assertEqual(0, len(bdm_list))
@mock.patch.object(db, 'block_device_mapping_get_all_by_instance')
def test_get_by_instance_uuid(self, get_all_by_inst):
fakes = [self.fake_bdm(123), self.fake_bdm(456)]
get_all_by_inst.return_value = fakes
bdm_list = objects.BlockDeviceMappingList.get_by_instance_uuid(
self.context, uuids.instance)
for faked, got in zip(fakes, bdm_list):
self.assertIsInstance(got, objects.BlockDeviceMapping)
self.assertEqual(faked['id'], got.id)
@mock.patch.object(db, 'block_device_mapping_get_all_by_instance')
def test_get_by_instance_uuid_no_result(self, get_all_by_inst):
get_all_by_inst.return_value = None
bdm_list = objects.BlockDeviceMappingList.get_by_instance_uuid(
self.context, uuids.instance)
self.assertEqual(0, len(bdm_list))
@mock.patch.object(db, 'block_device_mapping_get_all_by_instance')
def test_root_bdm(self, get_all_by_inst):
fakes = [self.fake_bdm(123), self.fake_bdm(456, boot_index=0)]
get_all_by_inst.return_value = fakes
bdm_list = objects.BlockDeviceMappingList.get_by_instance_uuid(
self.context, uuids.instance)
self.assertEqual(456, bdm_list.root_bdm().id)
@mock.patch.object(db, 'block_device_mapping_get_all_by_instance')
def test_root_bdm_empty_bdm_list(self, get_all_by_inst):
get_all_by_inst.return_value = None
bdm_list = objects.BlockDeviceMappingList.get_by_instance_uuid(
self.context, uuids.instance)
self.assertIsNone(bdm_list.root_bdm())
@mock.patch.object(db, 'block_device_mapping_get_all_by_instance')
def test_root_bdm_undefined(self, get_all_by_inst):
fakes = [
self.fake_bdm(123, instance_uuid=uuids.instance_1),
self.fake_bdm(456, instance_uuid=uuids.instance_2)
]
get_all_by_inst.return_value = fakes
bdm_list = objects.BlockDeviceMappingList.get_by_instance_uuid(
self.context, uuids.bdm_instance)
self.assertRaises(exception.UndefinedRootBDM, bdm_list.root_bdm)
class TestBlockDeviceMappingListObject(test_objects._LocalTest,
_TestBlockDeviceMappingListObject):
pass
class TestRemoteBlockDeviceMappingListObject(
test_objects._RemoteTest, _TestBlockDeviceMappingListObject):
pass
class TestBlockDeviceUtils(test.NoDBTestCase):
def test_make_list_from_dicts(self):
ctx = context.get_admin_context()
dicts = [{'id': 1}, {'id': 2}]
objs = block_device_obj.block_device_make_list_from_dicts(ctx,
dicts)
self.assertIsInstance(objs, block_device_obj.BlockDeviceMappingList)
self.assertEqual(2, len(objs))
self.assertEqual(1, objs[0].id)
self.assertEqual(2, objs[1].id)
def test_make_list_from_dicts_empty(self):
ctx = context.get_admin_context()
objs = block_device_obj.block_device_make_list_from_dicts(ctx, [])
self.assertIsInstance(objs, block_device_obj.BlockDeviceMappingList)
self.assertEqual(0, len(objs))
|
|
from __future__ import absolute_import
from django.core.exceptions import ImproperlyConfigured
from django.db import connection, transaction
from django.db.transaction import commit_on_success, commit_manually, TransactionManagementError
from django.test import TransactionTestCase, skipUnlessDBFeature
from django.test.utils import override_settings
from django.utils.unittest import skipIf
from .models import Mod, M2mA, M2mB
class TestTransactionClosing(TransactionTestCase):
"""
Tests to make sure that transactions are properly closed
when they should be, and aren't left pending after operations
have been performed in them. Refs #9964.
"""
def test_raw_committed_on_success(self):
"""
Make sure a transaction consisting of raw SQL execution gets
committed by the commit_on_success decorator.
"""
@commit_on_success
def raw_sql():
"Write a record using raw sql under a commit_on_success decorator"
cursor = connection.cursor()
cursor.execute("INSERT into transactions_regress_mod (id,fld) values (17,18)")
raw_sql()
# Rollback so that if the decorator didn't commit, the record is unwritten
transaction.rollback()
try:
# Check that the record is in the DB
obj = Mod.objects.get(pk=17)
self.assertEqual(obj.fld, 18)
except Mod.DoesNotExist:
self.fail("transaction with raw sql not committed")
def test_commit_manually_enforced(self):
"""
Make sure that under commit_manually, even "read-only" transaction require closure
(commit or rollback), and a transaction left pending is treated as an error.
"""
@commit_manually
def non_comitter():
"Execute a managed transaction with read-only operations and fail to commit"
_ = Mod.objects.count()
self.assertRaises(TransactionManagementError, non_comitter)
def test_commit_manually_commit_ok(self):
"""
Test that under commit_manually, a committed transaction is accepted by the transaction
management mechanisms
"""
@commit_manually
def committer():
"""
Perform a database query, then commit the transaction
"""
_ = Mod.objects.count()
transaction.commit()
try:
committer()
except TransactionManagementError:
self.fail("Commit did not clear the transaction state")
def test_commit_manually_rollback_ok(self):
"""
Test that under commit_manually, a rolled-back transaction is accepted by the transaction
management mechanisms
"""
@commit_manually
def roller_back():
"""
Perform a database query, then rollback the transaction
"""
_ = Mod.objects.count()
transaction.rollback()
try:
roller_back()
except TransactionManagementError:
self.fail("Rollback did not clear the transaction state")
def test_commit_manually_enforced_after_commit(self):
"""
Test that under commit_manually, if a transaction is committed and an operation is
performed later, we still require the new transaction to be closed
"""
@commit_manually
def fake_committer():
"Query, commit, then query again, leaving with a pending transaction"
_ = Mod.objects.count()
transaction.commit()
_ = Mod.objects.count()
self.assertRaises(TransactionManagementError, fake_committer)
@skipUnlessDBFeature('supports_transactions')
def test_reuse_cursor_reference(self):
"""
Make sure transaction closure is enforced even when the queries are performed
through a single cursor reference retrieved in the beginning
(this is to show why it is wrong to set the transaction dirty only when a cursor
is fetched from the connection).
"""
@commit_on_success
def reuse_cursor_ref():
"""
Fetch a cursor, perform an query, rollback to close the transaction,
then write a record (in a new transaction) using the same cursor object
(reference). All this under commit_on_success, so the second insert should
be committed.
"""
cursor = connection.cursor()
cursor.execute("INSERT into transactions_regress_mod (id,fld) values (1,2)")
transaction.rollback()
cursor.execute("INSERT into transactions_regress_mod (id,fld) values (1,2)")
reuse_cursor_ref()
# Rollback so that if the decorator didn't commit, the record is unwritten
transaction.rollback()
try:
# Check that the record is in the DB
obj = Mod.objects.get(pk=1)
self.assertEqual(obj.fld, 2)
except Mod.DoesNotExist:
self.fail("After ending a transaction, cursor use no longer sets dirty")
def test_failing_query_transaction_closed(self):
"""
Make sure that under commit_on_success, a transaction is rolled back even if
the first database-modifying operation fails.
This is prompted by http://code.djangoproject.com/ticket/6669 (and based on sample
code posted there to exemplify the problem): Before Django 1.3,
transactions were only marked "dirty" by the save() function after it successfully
wrote the object to the database.
"""
from django.contrib.auth.models import User
@transaction.commit_on_success
def create_system_user():
"Create a user in a transaction"
user = User.objects.create_user(username='system', password='iamr00t', email='root@SITENAME.com')
# Redundant, just makes sure the user id was read back from DB
Mod.objects.create(fld=user.id)
# Create a user
create_system_user()
try:
# The second call to create_system_user should fail for violating a unique constraint
# (it's trying to re-create the same user)
create_system_user()
except:
pass
else:
raise ImproperlyConfigured('Unique constraint not enforced on django.contrib.auth.models.User')
try:
# Try to read the database. If the last transaction was indeed closed,
# this should cause no problems
_ = User.objects.all()[0]
except:
self.fail("A transaction consisting of a failed operation was not closed.")
@override_settings(DEBUG=True)
def test_failing_query_transaction_closed_debug(self):
"""
Regression for #6669. Same test as above, with DEBUG=True.
"""
self.test_failing_query_transaction_closed()
class TestManyToManyAddTransaction(TransactionTestCase):
def test_manyrelated_add_commit(self):
"Test for https://code.djangoproject.com/ticket/16818"
a = M2mA.objects.create()
b = M2mB.objects.create(fld=10)
a.others.add(b)
# We're in a TransactionTestCase and have not changed transaction
# behavior from default of "autocommit", so this rollback should not
# actually do anything. If it does in fact undo our add, that's a bug
# that the bulk insert was not auto-committed.
transaction.rollback()
self.assertEqual(a.others.count(), 1)
class SavepointTest(TransactionTestCase):
@skipUnlessDBFeature('uses_savepoints')
def test_savepoint_commit(self):
@commit_manually
def work():
mod = Mod.objects.create(fld=1)
pk = mod.pk
sid = transaction.savepoint()
mod1 = Mod.objects.filter(pk=pk).update(fld=10)
transaction.savepoint_commit(sid)
mod2 = Mod.objects.get(pk=pk)
transaction.commit()
self.assertEqual(mod2.fld, 10)
work()
@skipIf(connection.vendor == 'mysql' and \
connection.features._mysql_storage_engine == 'MyISAM',
"MyISAM MySQL storage engine doesn't support savepoints")
@skipUnlessDBFeature('uses_savepoints')
def test_savepoint_rollback(self):
@commit_manually
def work():
mod = Mod.objects.create(fld=1)
pk = mod.pk
sid = transaction.savepoint()
mod1 = Mod.objects.filter(pk=pk).update(fld=20)
transaction.savepoint_rollback(sid)
mod2 = Mod.objects.get(pk=pk)
transaction.commit()
self.assertEqual(mod2.fld, 1)
work()
|
|
# Copyright (c) 2012-2015 Netforce Co. Ltd.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
from netforce.model import Model, fields, get_model
from netforce.access import get_active_user
from netforce.database import get_connection
from netforce import utils
class Contact(Model):
_name = "contact"
_string = "Contact"
_audit_log = True
_export_field = "name"
_key = ["code"]
_fields = {
"user_id": fields.Many2One("base.user", "User"),
"type": fields.Selection([["person", "Individual"], ["org", "Organization"]], "Contact Type", required=True, search=True),
"customer": fields.Boolean("Customer", search=True),
"supplier": fields.Boolean("Supplier", search=True),
"name": fields.Char("Name", required=True, search=True, translate=True, size=256),
"code": fields.Char("Code", search=True, required=True),
"phone": fields.Char("Phone", search=True),
"fax": fields.Char("Fax"),
"website": fields.Char("Website"),
"industry": fields.Char("Industry"), # XXX: deprecated
"employees": fields.Char("Employees"),
"revenue": fields.Char("Annual Revenue"),
"description": fields.Text("Description"),
"tax_no": fields.Char("Tax ID Number"),
"tax_branch_no" : fields.Char("Tax Branch Id"),
"bank_account_no": fields.Char("Bank Account Number"),
"bank_account_name": fields.Char("Bank Account Name"),
"bank_account_details": fields.Char("Bank Account Details"),
"active": fields.Boolean("Active"),
"account_receivable_id": fields.Many2One("account.account", "Account Receivable", multi_company=True),
"tax_receivable_id": fields.Many2One("account.tax.rate", "Account Receivable Tax"),
"account_payable_id": fields.Many2One("account.account", "Account Payable", multi_company=True),
"tax_payable_id": fields.Many2One("account.tax.rate", "Account Payable Tax"),
"currency_id": fields.Many2One("currency", "Default Currency"),
"payables_due": fields.Decimal("Payables Due"),
"payables_overdue": fields.Decimal("Payables Overdue"),
"receivables_due": fields.Decimal("Receivables Due"),
"receivables_overdue": fields.Decimal("Receivables Overdue"),
"payable_credit": fields.Decimal("Payable Credit", function="get_credit", function_multi=True),
"receivable_credit": fields.Decimal("Receivable Credit", function="get_credit", function_multi=True),
"invoices": fields.One2Many("account.invoice", "contact_id", "Invoices"),
"sale_price_list_id": fields.Many2One("price.list", "Sales Price List", condition=[["type", "=", "sale"]]),
"purchase_price_list_id": fields.Many2One("price.list", "Purchasing Price List", condition=[["type", "=", "purchase"]]),
"categ_id": fields.Many2One("contact.categ", "Contact Category"),
"payment_terms": fields.Char("Payment Terms"),
"opports": fields.One2Many("sale.opportunity", "contact_id", "Open Opportunities", condition=[["state", "=", "open"]]),
"addresses": fields.One2Many("address", "contact_id", "Addresses"),
"comments": fields.One2Many("message", "related_id", "Comments"),
"bank_accounts": fields.One2Many("bank.account", "contact_id", "Bank Accounts"),
"last_name": fields.Char("Last Name"),
"first_name": fields.Char("First Name"),
"first_name2": fields.Char("First Name (2)"),
"first_name3": fields.Char("First Name (3)"),
"title": fields.Char("Title"),
"position": fields.Char("Position"),
"report_to_id": fields.Many2One("contact", "Reports To"),
"mobile": fields.Char("Mobile"),
"email": fields.Char("Email", search=True),
"home_phone": fields.Char("Home Phone"),
"other_phone": fields.Char("Other Phone"),
"assistant": fields.Char("Assistant"),
"assistant_phone": fields.Char("Assistant Phone"),
"birth_date": fields.Date("Birth Date"),
"department": fields.Char("Department"),
"job_templates": fields.Many2Many("job.template", "Job Template"),
"projects": fields.One2Many("project", "contact_id", "Projects"),
"documents": fields.One2Many("document", "contact_id", "Documents"),
"assigned_to_id": fields.Many2One("base.user", "Assigned To"),
"lead_source": fields.Char("Lead source"),
"inquiry_type": fields.Char("Type of inquiry"),
"relations": fields.One2Many("contact.relation", "from_contact_id", "Relations", function="_get_relations"),
"contact_id": fields.Many2One("contact", "Parent"), # XXX: not used any more, just there for migration
"emails": fields.One2Many("email.message", "name_id", "Emails"),
"default_address_id": fields.Many2One("address", "Default Address", function="get_default_address"),
"sale_orders": fields.One2Many("sale.order", "contact_id", "Sales Orders"),
"country_id": fields.Many2One("country", "Country", search=True),
"region": fields.Char("Region"), # XXX: deprecated
"service_items": fields.One2Many("service.item", "contact_id", "Service Items", condition=[["parent_id", "=", None]]),
"contracts": fields.One2Many("service.contract", "contact_id", "Contracts"),
"branch": fields.Char("Branch"), # XXX: add by Cash
"industry_id": fields.Many2One("industry", "Industry", search=True),
"region_id": fields.Many2One("region", "Region", search=True),
"commission_po_percent": fields.Decimal("Commission Purchase Percentage"),
"business_area_id": fields.Many2One("business.area", "Business Area", search=True),
"fleet_size_id": fields.Many2One("fleet.size", "Fleet Size", search=True),
"groups": fields.Many2Many("contact.group", "Groups", search=True),
"sale_journal_id": fields.Many2One("account.journal", "Sales Journal"),
"purchase_journal_id": fields.Many2One("account.journal", "Purchase Journal"),
"pay_in_journal_id": fields.Many2One("account.journal", "Receipts Journal"),
"pay_out_journal_id": fields.Many2One("account.journal", "Disbursements Journal"),
"pick_in_journal_id": fields.Many2One("stock.journal", "Goods Receipt Journal"),
"pick_out_journal_id": fields.Many2One("stock.journal", "Goods Issue Journal"),
"coupons": fields.One2Many("sale.coupon", "contact_id", "Coupons"),
"companies": fields.Many2Many("company", "Companies"),
"request_product_groups": fields.Many2Many("product.group","Request Product Groups",reltable="m2m_contact_request_product_groups",relfield="contact_id",relfield_other="group_id"),
"exclude_product_groups": fields.Many2Many("product.group","Exclude Product Groups",reltable="m2m_contact_exclude_product_groups",relfield="contact_id",relfield_other="group_id"),
"picture": fields.File("Picture"),
"users": fields.One2Many("base.user","contact_id","Users"),
"ship_free": fields.Boolean("Free Shipping"),
}
def _get_number(self, context={}):
seq_id = get_model("sequence").find_sequence(type="contact")
if not seq_id:
return None
while 1:
num = get_model("sequence").get_next_number(seq_id, context=context)
res = self.search([["code", "=", num]])
if not res:
return num
get_model("sequence").increment_number(seq_id, context=context)
_defaults = {
"active": True,
"type": "person",
"code": _get_number,
}
_order = "name"
_constraints=["check_email"]
def create(self, vals, **kw):
if not vals.get("type"):
if vals.get("name"):
vals["type"] = "org"
elif vals.get("last_name"):
vals["type"] = "person"
if vals.get("type") == "person":
if vals.get("first_name"):
vals["name"] = vals["first_name"] + " " + vals["last_name"]
else:
vals["name"] = vals["last_name"]
new_id = super().create(vals, **kw)
return new_id
def write(self, ids, vals, set_name=True, **kw):
super().write(ids, vals, **kw)
if set_name:
for obj in self.browse(ids):
if obj.type == "person":
if obj.first_name:
name = obj.first_name + " " + obj.last_name
else:
name = obj.last_name
obj.write({"name": name}, set_name=False)
def get_credit(self, ids, context={}):
print("contact.get_credit", ids)
currency_id = context.get("currency_id")
print("currency_id", currency_id)
vals = {}
for obj in self.browse(ids):
ctx={
"contact_id": obj.id,
}
r_credit = 0
p_credit = 0
for acc in get_model("account.account").search_browse([["type","=","cust_deposit"]],context=ctx):
r_credit-=acc.balance
for acc in get_model("account.account").search_browse([["type","=","sup_deposit"]],context=ctx):
p_credit+=acc.balance
vals[obj.id] = {
"receivable_credit": r_credit,
"payable_credit": p_credit, # TODO
}
return vals
def get_address_str(self, ids, context={}):
obj = self.browse(ids[0])
if not obj.addresses:
return ""
addr = obj.addresses[0]
return addr.name_get()[0][1]
def _get_relations(self, ids, context={}):
cond = ["or", ["from_contact_id", "in", ids], ["to_contact_id", "in", ids]]
rels = get_model("contact.relation").search_read(cond, ["from_contact_id", "to_contact_id"])
vals = {}
for rel in rels:
from_id = rel["from_contact_id"][0]
to_id = rel["to_contact_id"][0]
vals.setdefault(from_id, []).append(rel["id"])
vals.setdefault(to_id, []).append(rel["id"])
return vals
def get_address(self, ids, pref_type=None, context={}):
obj = self.browse(ids)[0]
for addr in obj.addresses:
if pref_type and addr.type != pref_type:
continue
return addr.id
if obj.addresses:
return obj.addresses[0].id
return None
def get_default_address(self, ids, context={}):
vals = {}
for obj in self.browse(ids):
addr_id = None
for addr in obj.addresses:
if addr.type == "billing":
addr_id = addr.id
break
if not addr_id and obj.addresses:
addr_id = obj.addresses[0].id
vals[obj.id] = addr_id
print("XXX", vals)
return vals
def check_email(self,ids,context={}):
for obj in self.browse(ids):
if not obj.email:
continue
if not utils.check_email_syntax(obj.email):
raise Exception("Invalid email for contact '%s'"%obj.name)
def find_address(self,ids,addr_vals,context={}):
obj=self.browse(ids[0])
addr_id=None
for addr in obj.addresses:
if "address" in addr_vals and addr_vals["address"]!=addr.address:
continue
if "address2" in addr_vals and addr_vals["address2"]!=addr.address2:
continue
if "city" in addr_vals and addr_vals["city"]!=addr.city:
continue
if "postal_code" in addr_vals and addr_vals["postal_code"]!=addr.postal_code:
continue
if "country_id" in addr_vals and addr_vals["country_id"]!=addr.country_id.id:
continue
if "province_id" in addr_vals and addr_vals["province_id"]!=addr.province_id.id:
continue
if "district_id" in addr_vals and addr_vals["district_id"]!=addr.district_id.id:
continue
if "subdistrict_id" in addr_vals and addr_vals["subdistrict_id"]!=addr.subdistrict_id.id:
continue
if "phone" in addr_vals and addr_vals["phone"]!=addr.phone:
continue
if "first_name" in addr_vals and addr_vals["phone"]!=addr.first_name:
continue
if "last_name" in addr_vals and addr_vals["last_name"]!=addr.last_name:
continue
addr_id=addr.id
break
return addr_id
def add_address(self,ids,addr_vals,context={}):
addr_id=self.find_address(ids)
if not addr_id:
vals=addr_vals.copy()
vals["contact_id"]=ids[0]
addr_id=get_model("address").create(vals)
return addr_id
Contact.register()
|
|
"""
@package mi.instrument.sunburst.test.test_driver
@file marine-integrations/mi/instrument/sunburst/driver.py
@author Kevin Stiemke
@brief Common test case code for SAMI instrument drivers
USAGE:
Make tests verbose and provide stdout
* From the IDK
$ bin/test_driver
$ bin/test_driver -u [-t testname]
$ bin/test_driver -i [-t testname]
$ bin/test_driver -q [-t testname]
"""
__author__ = 'Kevin Stiemke'
__license__ = 'Apache 2.0'
import time
import datetime
import ntplib
from nose.plugins.attrib import attr
from mock import Mock
from mi.core.log import get_logger
log = get_logger()
from mi.core.exceptions import InstrumentCommandException
# MI imports.
from mi.idk.unit_test import InstrumentDriverUnitTestCase
from mi.idk.unit_test import InstrumentDriverIntegrationTestCase
from mi.idk.unit_test import InstrumentDriverQualificationTestCase
from mi.idk.unit_test import DriverTestMixin
from mi.idk.unit_test import ParameterTestConfigKey
from mi.idk.unit_test import GO_ACTIVE_TIMEOUT
from mi.idk.unit_test import DriverProtocolState
from mi.idk.unit_test import DriverEvent
from mi.idk.unit_test import ResourceAgentState
# from interface.objects import AgentCommand
from mi.core.instrument.port_agent_client import PortAgentPacket
from mi.core.instrument.data_particle import DataParticleKey
from mi.core.instrument.data_particle import DataParticleValue
from mi.instrument.sunburst.driver import Prompt, SamiRegularStatusDataParticle, SamiBatteryVoltageDataParticle, \
SamiThermistorVoltageDataParticle
from mi.instrument.sunburst.driver import SAMI_NEWLINE
from mi.instrument.sunburst.driver import SamiRegularStatusDataParticleKey
from mi.instrument.sunburst.driver import SamiBatteryVoltageDataParticleKey
from mi.instrument.sunburst.driver import SamiThermistorVoltageDataParticleKey
from mi.instrument.sunburst.driver import SamiProtocolState
from mi.instrument.sunburst.driver import SamiProtocolEvent
from mi.instrument.sunburst.driver import SamiProtocol
from mi.instrument.sunburst.driver import SAMI_UNIX_OFFSET
from mi.instrument.sunburst.driver import SamiParameter
class CallStatisticsContainer:
"""
Class to collect call statistics
"""
def __init__(self, unit_test):
self.call_count = 0
self.call_times = []
self.unit_test = unit_test
def side_effect(self):
"""
Call side effect
"""
self.call_count += 1
self.call_times.append(time.time())
log.debug('side effect count = %s', self.call_count)
def assert_call_count(self, call_count):
"""
Verify call count matches expectations
:param call_count: Expected call count
"""
self.unit_test.assertEqual(call_count, self.call_count, 'call count %s != %s' %
(call_count, self.call_count))
def assert_timing(self, delay):
"""
Verify delays between calls
:param delay: expected delay between calls
"""
for call_counter in range(self.call_count):
if call_counter > 0:
call_delay = self.call_times[call_counter] - self.call_times[call_counter - 1]
log.debug('call %s delay = %s', call_counter, call_delay)
time_diff = call_delay - delay
self.unit_test.assertTrue(-.5 < time_diff < .5,
'call delay %s: call delay %s != delay %s' %
(call_counter, call_delay, delay))
class PumpStatisticsContainer(CallStatisticsContainer):
"""
Class to collect pump call statistics
"""
def __init__(self, unit_test, pump_command):
self.pump_command = pump_command
CallStatisticsContainer.__init__(self, unit_test)
def side_effect(self, *args, **kwargs):
"""
Call side effect
:param args: pump command passed on call
:param kwargs: not used
"""
log.debug('args = %s, kwargs = %s', args, kwargs)
if args == self.pump_command:
CallStatisticsContainer.side_effect(self)
TIME_THRESHOLD = 2
###
# Driver parameters for the tests
###
#################################### RULES ####################################
# #
# Common capabilities in the base class #
# #
# Instrument specific stuff in the derived class #
# #
# Generator spits out either stubs or comments describing test this here, #
# test that there. #
# #
# Qualification tests are driven through the instrument_agent #
# #
###############################################################################
###
# Driver constant definitions
###
# Create some short names for the parameter test config
TYPE = ParameterTestConfigKey.TYPE
READONLY = ParameterTestConfigKey.READONLY
STARTUP = ParameterTestConfigKey.STARTUP
DA = ParameterTestConfigKey.DIRECT_ACCESS
VALUE = ParameterTestConfigKey.VALUE
REQUIRED = ParameterTestConfigKey.REQUIRED
DEFAULT = ParameterTestConfigKey.DEFAULT
STATES = ParameterTestConfigKey.STATES
###############################################################################
# DRIVER TEST MIXIN #
# Defines a set of constants and assert methods used for data particle #
# verification #
# #
# In python mixin classes are classes designed such that they wouldn't be #
# able to stand on their own, but are inherited by other classes generally #
# using multiple inheritance. #
# #
# This class defines a configuration structure for testing and common assert #
# methods for validating data particles. #
###############################################################################
class SamiMixin(DriverTestMixin):
"""
Mixin class used for storing SAMI instrument data particle constants and common data
assertion methods.
Should be subclassed in the specific test driver
"""
###
# Instrument output (driver input) Definitions
###
# Regular Status Message (response to S0 command)
VALID_STATUS_MESSAGE = ':CDDD74E10041000003000000000236F8' + SAMI_NEWLINE
# Error records (valid error codes are between 0x00 and 0x11)
VALID_ERROR_CODE = '?0B' + SAMI_NEWLINE
###
# Parameter and Type Definitions
###
_regular_status_parameters = {
# SAMI Regular Status Messages (S0)
SamiRegularStatusDataParticleKey.ELAPSED_TIME_CONFIG: {TYPE: int, VALUE: 0xCDDD74E1, REQUIRED: True},
SamiRegularStatusDataParticleKey.CLOCK_ACTIVE: {TYPE: bool, VALUE: True, REQUIRED: True},
SamiRegularStatusDataParticleKey.RECORDING_ACTIVE: {TYPE: bool, VALUE: False, REQUIRED: True},
SamiRegularStatusDataParticleKey.RECORD_END_ON_TIME: {TYPE: bool, VALUE: False, REQUIRED: True},
SamiRegularStatusDataParticleKey.RECORD_MEMORY_FULL: {TYPE: bool, VALUE: False, REQUIRED: True},
SamiRegularStatusDataParticleKey.RECORD_END_ON_ERROR: {TYPE: bool, VALUE: False, REQUIRED: True},
SamiRegularStatusDataParticleKey.DATA_DOWNLOAD_OK: {TYPE: bool, VALUE: False, REQUIRED: True},
SamiRegularStatusDataParticleKey.FLASH_MEMORY_OPEN: {TYPE: bool, VALUE: True, REQUIRED: True},
SamiRegularStatusDataParticleKey.BATTERY_LOW_PRESTART: {TYPE: bool, VALUE: False, REQUIRED: True},
SamiRegularStatusDataParticleKey.BATTERY_LOW_MEASUREMENT: {TYPE: bool, VALUE: False, REQUIRED: True},
SamiRegularStatusDataParticleKey.BATTERY_LOW_BANK: {TYPE: bool, VALUE: False, REQUIRED: True},
SamiRegularStatusDataParticleKey.BATTERY_LOW_EXTERNAL: {TYPE: bool, VALUE: False, REQUIRED: True},
SamiRegularStatusDataParticleKey.EXTERNAL_DEVICE1_FAULT: {TYPE: bool, VALUE: False, REQUIRED: True},
SamiRegularStatusDataParticleKey.EXTERNAL_DEVICE2_FAULT: {TYPE: bool, VALUE: False, REQUIRED: True},
SamiRegularStatusDataParticleKey.EXTERNAL_DEVICE3_FAULT: {TYPE: bool, VALUE: False, REQUIRED: True},
SamiRegularStatusDataParticleKey.FLASH_ERASED: {TYPE: bool, VALUE: False, REQUIRED: True},
SamiRegularStatusDataParticleKey.POWER_ON_INVALID: {TYPE: bool, VALUE: False, REQUIRED: True},
SamiRegularStatusDataParticleKey.NUM_DATA_RECORDS: {TYPE: int, VALUE: 0x000003, REQUIRED: True},
SamiRegularStatusDataParticleKey.NUM_ERROR_RECORDS: {TYPE: int, VALUE: 0x000000, REQUIRED: True},
SamiRegularStatusDataParticleKey.NUM_BYTES_STORED: {TYPE: int, VALUE: 0x000236, REQUIRED: True},
SamiRegularStatusDataParticleKey.UNIQUE_ID: {TYPE: int, VALUE: 0xF8, REQUIRED: True}
}
_battery_voltage_parameters = {
SamiBatteryVoltageDataParticleKey.BATTERY_VOLTAGE: {TYPE: int, VALUE: 0x0CD8, REQUIRED: True}
}
_thermistor_voltage_parameters = {
SamiThermistorVoltageDataParticleKey.THERMISTOR_VOLTAGE: {TYPE: int, VALUE: 0x067B, REQUIRED: True}
}
def assert_particle_battery_voltage(self, data_particle, verify_values=False):
"""
Verify battery voltage particle
@param data_particle: SamiBatteryVoltageDataParticle data particle
@param verify_values: bool, should we verify parameter values
"""
self.assert_data_particle_keys(SamiBatteryVoltageDataParticleKey,
self._battery_voltage_parameters)
self.assert_data_particle_header(data_particle,
SamiBatteryVoltageDataParticle._data_particle_type)
self.assert_data_particle_parameters(data_particle,
self._battery_voltage_parameters,
verify_values)
def assert_particle_thermistor_voltage(self, data_particle, verify_values=False):
"""
Verify thermistor voltage particle
@param data_particle: SamiThermistorVoltageDataParticle data particle
@param verify_values: bool, should we verify parameter values
"""
self.assert_data_particle_keys(SamiThermistorVoltageDataParticleKey,
self._thermistor_voltage_parameters)
self.assert_data_particle_header(data_particle,
SamiThermistorVoltageDataParticle._data_particle_type)
self.assert_data_particle_parameters(data_particle,
self._thermistor_voltage_parameters,
verify_values)
def assert_particle_regular_status(self, data_particle, verify_values=False):
"""
Verify regular_status particle
@param data_particle: SamiRegularStatusDataParticle data particle
@param verify_values: bool, should we verify parameter values
"""
self.assert_data_particle_keys(SamiRegularStatusDataParticleKey,
self._regular_status_parameters)
self.assert_data_particle_header(data_particle,
SamiRegularStatusDataParticle._data_particle_type)
self.assert_data_particle_parameters(data_particle,
self._regular_status_parameters,
verify_values)
@staticmethod
def send_port_agent_packet(protocol, data):
ts = ntplib.system_to_ntp_time(time.time())
port_agent_packet = PortAgentPacket()
port_agent_packet.attach_data(data)
port_agent_packet.attach_timestamp(ts)
port_agent_packet.pack_header()
# Push the response into the driver
protocol.got_data(port_agent_packet)
protocol.got_raw(port_agent_packet)
log.debug('Sent port agent packet containing: %r', data)
def send_newline_side_effect(self, protocol):
def inner(data):
"""
Return response to command
:param data: command
:return: length of response
"""
my_response = '\r'
if my_response is not None:
log.debug("my_send: data: %r, my_response: %r", data, my_response)
time.sleep(.1)
self.send_port_agent_packet(protocol, my_response)
return len(my_response)
return inner
###############################################################################
# UNIT TESTS #
# Unit tests test the method calls and parameters using Mock. #
# #
# These tests are especially useful for testing parsers and other data #
# handling. The tests generally focus on small segments of code, like a #
# single function call, but more complex code using Mock objects. However #
# if you find yourself mocking too much maybe it is better as an #
# integration test. #
# #
# Unit tests do not start up external processes like the port agent or #
# driver process. #
###############################################################################
@attr('UNIT', group='mi')
class SamiUnitTest(InstrumentDriverUnitTestCase, SamiMixin):
def sleep_for_realsies(self, seconds):
now = time.time()
while time.time() < (now+seconds):
time.sleep(.4)
def assert_waiting_discover(self, driver):
self.assert_initialize_driver(driver, initial_protocol_state=SamiProtocolState.WAITING)
class DiscoverWaitingStatisticsContainer(CallStatisticsContainer):
"""
Class to collect discover waiting statistics
"""
def discover_waiting_side_effect(self):
"""
Side effect of discover waiting method call
:return: protocol and agent states
"""
DiscoverWaitingStatisticsContainer.side_effect(self)
return SamiProtocolState.WAITING, ResourceAgentState.BUSY
stats = DiscoverWaitingStatisticsContainer(self)
driver._protocol._discover = Mock(side_effect=stats.discover_waiting_side_effect)
(protocol_state, (agent_state, result)) = driver._protocol._handler_waiting_discover()
self.assertEqual(protocol_state,
SamiProtocolState.UNKNOWN,
'protocol state %s != %s'
% (protocol_state, SamiProtocolState.UNKNOWN))
self.assertEqual(agent_state,
ResourceAgentState.ACTIVE_UNKNOWN,
'agent state %s != %s'
% (agent_state, ResourceAgentState.ACTIVE_UNKNOWN))
log.debug('discover call count = %s', stats.call_count)
log.debug('call times = %s', stats.call_times)
stats.assert_call_count(6)
stats.assert_timing(20)
def assert_autosample_timing(self, driver):
self.assert_initialize_driver(driver, initial_protocol_state=SamiProtocolState.COMMAND)
driver._protocol._protocol_fsm.current_state = SamiProtocolState.COMMAND
stats = CallStatisticsContainer(self)
driver._protocol._take_regular_sample = \
Mock(side_effect=stats.side_effect)
for param in driver._protocol._param_dict.get_keys():
log.debug('startup param = %s', param)
driver._protocol._param_dict.set_default(param)
driver._protocol._param_dict.set_value(SamiParameter.AUTO_SAMPLE_INTERVAL, 1)
driver._protocol._setup_scheduler_config()
(driver._protocol._protocol_fsm.current_state, (agent_state, result)) = \
driver._protocol._handler_command_start_autosample()
## Don't take sample upon entering autosample state
driver._protocol._queued_commands.reset()
self.sleep_for_realsies(3)
(driver._protocol._protocol_fsm.current_state, (agent_state, result)) = \
driver._protocol._handler_autosample_stop()
stats.assert_call_count(3)
stats.assert_timing(1)
stats.call_count = 0
stats.call_times = []
self.sleep_for_realsies(2)
stats.assert_call_count(0)
###############################################################################
# INTEGRATION TESTS #
# Integration test test the direct driver / instrument interaction #
# but making direct calls via zeromq. #
# - Common Integration tests test the driver through the instrument agent #
# and common for all drivers (minimum requirement for ION ingestion) #
###############################################################################
@attr('INT', group='mi')
class SamiIntegrationTest(InstrumentDriverIntegrationTestCase):
def setUp(self):
InstrumentDriverIntegrationTestCase.setUp(self)
def assert_particle_count(self, particle_type, particle_count, timeout):
start_time = time.time()
end_time = start_time + timeout
while True:
num_samples = len(self.get_sample_events(particle_type))
elapsed = time.time() - start_time
if num_samples >= particle_count:
rate = elapsed / num_samples
log.debug('Found %d samples, elapsed time: %d, approx data rate: %d seconds/sample',
num_samples, elapsed, rate)
break
self.assertGreater(end_time, time.time(), msg="Timeout waiting for sample")
time.sleep(1)
## Have to override because battery and thermistor do not have port time stamps
def assert_data_particle_header(self, data_particle, stream_name, require_instrument_timestamp=False):
"""
Verify a data particle header is formatted properly
@param data_particle version 1 data particle
@param stream_name version 1 data particle
@param require_instrument_timestamp should we verify the instrument timestamp exists
"""
sample_dict = self.convert_data_particle_to_dict(data_particle)
log.debug("SAMPLEDICT: %s", sample_dict)
self.assertTrue(sample_dict[DataParticleKey.STREAM_NAME], stream_name)
self.assertTrue(sample_dict[DataParticleKey.PKT_FORMAT_ID], DataParticleValue.JSON_DATA)
self.assertTrue(sample_dict[DataParticleKey.PKT_VERSION], 1)
self.assertIsInstance(sample_dict[DataParticleKey.VALUES], list)
self.assertTrue(sample_dict.get(DataParticleKey.PREFERRED_TIMESTAMP))
self.assertIsNotNone(sample_dict.get(DataParticleKey.DRIVER_TIMESTAMP))
self.assertIsInstance(sample_dict.get(DataParticleKey.DRIVER_TIMESTAMP), float)
# It is highly unlikely that we should have a particle without a port agent timestamp,
# at least that's the current assumption.
## self.assertIsNotNone(sample_dict.get(DataParticleKey.PORT_TIMESTAMP))
## self.assertIsInstance(sample_dict.get(DataParticleKey.PORT_TIMESTAMP), float)
if require_instrument_timestamp:
self.assertIsNotNone(sample_dict.get(DataParticleKey.INTERNAL_TIMESTAMP))
self.assertIsInstance(sample_dict.get(DataParticleKey.INTERNAL_TIMESTAMP), float)
def assert_time_sync(self, status_particle):
status_dict = self.get_data_particle_values_as_dict(status_particle)
elapsed_time_config = status_dict.get(SamiRegularStatusDataParticleKey.ELAPSED_TIME_CONFIG)
current_sami_time = SamiProtocol._current_sami_time()
log.debug("elapsed_time_config = %s", elapsed_time_config)
log.debug("current_sami_time = %s", current_sami_time)
time_difference = current_sami_time - elapsed_time_config
log.debug("time difference = %s", time_difference)
sami_now_seconds = current_sami_time - SAMI_UNIX_OFFSET.total_seconds()
sami_now = datetime.datetime.utcfromtimestamp(sami_now_seconds)
log.debug('utc time = %s', datetime.datetime.utcnow())
log.debug('sami_now = %s', sami_now)
self.assertTrue(time_difference <= TIME_THRESHOLD,
"Time threshold exceeded, time_difference = %s, time_threshold = %s" % (
time_difference, TIME_THRESHOLD))
def test_bad_command(self):
self.assert_initialize_driver()
self.assert_driver_command_exception('bad_command', exception_class=InstrumentCommandException)
def test_time_sync(self):
self.assert_initialize_driver()
time.sleep(10)
self.clear_events()
request_status_time = time.time()
self.assert_driver_command(SamiProtocolEvent.ACQUIRE_STATUS)
self.assert_async_particle_generation(SamiRegularStatusDataParticle._data_particle_type, self.assert_time_sync, timeout=10)#SamiDataParticleType.REGULAR_STATUS, self.assert_time_sync, timeout=10)
receive_status_time = time.time()
status_time = receive_status_time - request_status_time
log.debug("status_time = %s", status_time)
###############################################################################
# QUALIFICATION TESTS #
# Device specific qualification tests are for doing final testing of ion #
# integration. The generally aren't used for instrument debugging and should #
# be tackled after all unit and integration tests are complete #
###############################################################################
@attr('QUAL', group='mi')
class SamiQualificationTest(InstrumentDriverQualificationTestCase):
## Have to override because battery and thermistor do not have port time stamps
def assert_data_particle_header(self, data_particle, stream_name, require_instrument_timestamp=False):
"""
Verify a data particle header is formatted properly
@param data_particle version 1 data particle
@param stream_name version 1 data particle
@param require_instrument_timestamp should we verify the instrument timestamp exists
"""
sample_dict = self.convert_data_particle_to_dict(data_particle)
log.debug("SAMPLEDICT: %s", sample_dict)
self.assertTrue(sample_dict[DataParticleKey.STREAM_NAME], stream_name)
self.assertTrue(sample_dict[DataParticleKey.PKT_FORMAT_ID], DataParticleValue.JSON_DATA)
self.assertTrue(sample_dict[DataParticleKey.PKT_VERSION], 1)
self.assertIsInstance(sample_dict[DataParticleKey.VALUES], list)
self.assertTrue(sample_dict.get(DataParticleKey.PREFERRED_TIMESTAMP))
self.assertIsNotNone(sample_dict.get(DataParticleKey.DRIVER_TIMESTAMP))
self.assertIsInstance(sample_dict.get(DataParticleKey.DRIVER_TIMESTAMP), float)
# It is highly unlikely that we should have a particle without a port agent timestamp,
# at least that's the current assumption.
## self.assertIsNotNone(sample_dict.get(DataParticleKey.PORT_TIMESTAMP))
## self.assertIsInstance(sample_dict.get(DataParticleKey.PORT_TIMESTAMP), float)
if require_instrument_timestamp:
self.assertIsNotNone(sample_dict.get(DataParticleKey.INTERNAL_TIMESTAMP))
self.assertIsInstance(sample_dict.get(DataParticleKey.INTERNAL_TIMESTAMP), float)
## Have to override because the driver enters a sample state as soon as autosample mode is entered by design.
def assert_start_autosample(self, timeout=GO_ACTIVE_TIMEOUT):
"""
Enter autosample mode from command
"""
res_state = self.instrument_agent_client.get_resource_state()
self.assertEqual(res_state, DriverProtocolState.COMMAND)
# Begin streaming.
cmd = AgentCommand(command=DriverEvent.START_AUTOSAMPLE)
self.instrument_agent_client.execute_resource(cmd, timeout=timeout)
self.assert_state_change(ResourceAgentState.STREAMING, SamiProtocolState.AUTOSAMPLE, timeout=timeout)
def assert_sample_autosample(self, sample_data_assert, sample_queue,
timeout=GO_ACTIVE_TIMEOUT, sample_count=3):
"""
Test instrument driver execute interface to start and stop streaming
mode. Overridden because the SAMI drivers enter a sample state as soon as autosample mode is entered by design.
:param sample_data_assert: method to test samples
:param sample_queue: type of sample
:param timeout: timeout for sample retrieval
:param sample_count: number of samples expected
"""
res_state = self.instrument_agent_client.get_resource_state()
self.assertEqual(res_state, DriverProtocolState.COMMAND)
# Begin streaming.
cmd = AgentCommand(command=DriverEvent.START_AUTOSAMPLE)
self.instrument_agent_client.execute_resource(cmd, timeout=timeout)
# Wait for driver to exit sample state
self.assert_particle_async(sample_queue, sample_data_assert, 1, timeout)
state = self.instrument_agent_client.get_agent_state()
self.assertEqual(state, ResourceAgentState.STREAMING)
sample_count -= 1
self.assert_particle_async(sample_queue, sample_data_assert, sample_count, timeout)
# Halt streaming.
self.assert_stop_autosample()
def setUp(self):
InstrumentDriverQualificationTestCase.setUp(self)
## Not applicable to this driver
def test_discover(self):
pass
def test_boot_prompt_escape(self):
self.assert_direct_access_start_telnet()
self.assertTrue(self.tcp_client)
# Erase memory
self.tcp_client.send_data("E5A%s" % SAMI_NEWLINE)
time.sleep(1)
# Cause boot prompt by entering L5A command without a config string
self.tcp_client.send_data("L5A%s" % SAMI_NEWLINE)
time.sleep(10)
self.tcp_client.send_data(SAMI_NEWLINE)
boot_prompt = self.tcp_client.expect(Prompt.BOOT_PROMPT)
self.assertTrue(boot_prompt)
self.assert_direct_access_stop_telnet()
self.assert_state_change(ResourceAgentState.COMMAND, SamiProtocolState.COMMAND, 60)
|
|
# -*- coding: utf-8 -*-
# -- Dual Licence ----------------------------------------------------------
############################################################################
# GPL License #
# #
# This file is a SCons (http://www.scons.org/) builder #
# Copyright (c) 2012-14, Philipp Kraus, <philipp.kraus@flashpixx.de> #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as #
# published by the Free Software Foundation, either version 3 of the #
# License, or (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
############################################################################
# --------------------------------------------------------------------------
############################################################################
# BSD 3-Clause License #
# #
# This file is a SCons (http://www.scons.org/) builder #
# Copyright (c) 2012-14, Philipp Kraus, <philipp.kraus@flashpixx.de> #
# All rights reserved. #
# #
# Redistribution and use in source and binary forms, with or without #
# modification, are permitted provided that the following conditions are #
# met: #
# #
# 1. Redistributions of source code must retain the above copyright #
# notice, this list of conditions and the following disclaimer. #
# #
# 2. Redistributions in binary form must reproduce the above copyright #
# notice, this list of conditions and the following disclaimer in the #
# documentation and/or other materials provided with the distribution. #
# #
# 3. Neither the name of the copyright holder nor the names of its #
# contributors may be used to endorse or promote products derived from #
# this software without specific prior written permission. #
# #
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS #
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A #
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT #
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, #
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED #
# TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR #
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF #
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING #
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS #
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #
############################################################################
# iotivity note: this is a forked copy of the UnpackBuilder elsewhere
# in the tree, which instead of defining a Builder, adds a method UnpackAll.
# The Unpack Builder can be used for unpacking archives (eg Zip, TGZ, BZ, ... ).
# The emitter of the Builder reads the archive data and creates a returning file list
# the builder extract the archive. The environment variable stores a dictionary "UNPACK"
# for set different extractions (subdict "EXTRACTOR"):
# {
# PRIORITY => a value for setting the extractor order (lower numbers = extractor is used earlier)
# SUFFIX => defines a list with file suffixes, which should be handled with this extractor
# EXTRACTSUFFIX => suffix of the extract command
# EXTRACTFLAGS => a string parameter for the RUN command for extracting the data
# EXTRACTCMD => full extract command of the builder
# RUN => the main program which will be started (if the parameter is empty, the extractor will be ignored)
# LISTCMD => the listing command for the emitter
# LISTFLAGS => the string options for the RUN command for showing a list of files
# LISTSUFFIX => suffix of the list command
# LISTEXTRACTOR => a optional Python function, that is called on each output line of the
# LISTCMD for extracting file & dir names, the function need two parameters (first line number,
# second line content) and must return a string with the file / dir path (other value types
# will be ignored)
# }
# Other options in the UNPACK dictionary are:
# STOPONEMPTYFILE => bool variable for stoping if the file has empty size (default True)
# VIWEXTRACTOUTPUT => shows the output messages of the extraction command (default False)
# EXTRACTDIR => path in that the data will be extracted (default #)
#
# The file which is handled by the first suffix match of the extractor, the extractor list can be append for other files.
# The order of the extractor dictionary creates the listing & extractor command eg file extension .tar.gz should be
# before .gz, because the tar.gz is extract in one shoot.
#
# Under *nix system these tools are supported: tar, bzip2, gzip, unzip
# Under Windows only 7-Zip (http://www.7-zip.org/) is supported
import subprocess, os
import SCons.Errors, SCons.Warnings
import SCons.Util
# enables Scons warning for this builder
class UnpackWarning(SCons.Warnings.Warning) :
pass
SCons.Warnings.enableWarningClass(UnpackWarning)
# extractor function for Tar output
# @param env environment object
# @param count number of returning lines
# @param no number of the output line
# @param i line content
def __fileextractor_nix_tar( env, count, no, i ) :
return i.split()[-1]
# extractor function for GZip output,
# ignore the first line
# @param env environment object
# @param count number of returning lines
# @param no number of the output line
# @param i line content
def __fileextractor_nix_gzip( env, count, no, i ) :
if no == 0 :
return None
return i.split()[-1]
# extractor function for Unzip output,
# ignore the first & last two lines
# @param env environment object
# @param count number of returning lines
# @param no number of the output line
# @param i line content
def __fileextractor_nix_unzip( env, count, no, i ) :
if no < 3 or no >= count - 2 :
return None
return i.split()[-1]
# extractor function for 7-Zip
# @param env environment object
# @param count number of returning lines
# @param no number of the output line
# @param i line content
def __fileextractor_win_7zip( env, count, no, i ) :
item = i.split()
if no > 8 and no < count - 2 :
return item[-1]
return None
# returns the extractor item for handling the source file
# @param source input source file
# @param env environment object
# @return extractor entry or None on non existing
def __getExtractor( source, env ) :
# we check each unpacker and get the correc list command first, run the command and
# replace the target filelist with the list values, we sorte the extractors by their priority
for unpackername, extractor in sorted(env["UNPACK"]["EXTRACTOR"].iteritems(), key = lambda (k,v) : (v["PRIORITY"],k)):
# if the run command not set, we continue the extractor search, otherwise we check the extractor parameters
if not SCons.Util.is_String(extractor["RUN"]) :
raise SCons.Errors.StopError("list command of the unpack builder for [%s] archives is not a string" % (unpackername))
if not len(extractor["RUN"]) :
raise SCons.Errors.StopError("run command of the unpack builder for [%s] archives is not set - can not extract files" % (unpackername))
if not SCons.Util.is_String(extractor["LISTFLAGS"]) :
raise SCons.Errors.StopError("list flags of the unpack builder for [%s] archives is not a string" % (unpackername))
if not SCons.Util.is_String(extractor["LISTCMD"]) :
raise SCons.Errors.StopError("list command of the unpack builder for [%s] archives is not a string" % (unpackername))
if not SCons.Util.is_String(extractor["EXTRACTFLAGS"]) :
raise SCons.Errors.StopError("extract flags of the unpack builder for [%s] archives is not a string" % (unpackername))
if not SCons.Util.is_String(extractor["EXTRACTCMD"]) :
raise SCons.Errors.StopError("extract command of the unpack builder for [%s] archives is not a string" % (unpackername))
# check the source file suffix and if the first is found, run the list command
if not SCons.Util.is_List(extractor["SUFFIX"]) :
raise SCons.Errors.StopError("suffix list of the unpack builder for [%s] archives is not a list" % (unpackername))
for suffix in extractor["SUFFIX"] :
if str(source[0]).lower()[-len(suffix):] == suffix.lower() :
return extractor
return None
# creates the extracter output message
# @param s original message
# @param target target name
# @param source source name
# @param env environment object
def __message( s, target, source, env ) :
print("extract [%s] ..." % (source[0]))
# action function for extracting of the data
# @param target target packed file
# @param source extracted files
# @env environment object
def __action( target, source, env ) :
extractor = __getExtractor([File(source)], env)
if not extractor :
raise SCons.Errors.StopError( "can not find any extractor value for the source file [%s]" % (source[0]) )
# if the extract command is empty, we create an error
if len(extractor["EXTRACTCMD"]) == 0 :
raise SCons.Errors.StopError( "the extractor command for the source file [%s] is empty" % (source[0]) )
# build it now (we need the shell, because some programs need it)
handle = None
cmd = env.subst(extractor["EXTRACTCMD"], source=source, target=target)
if env["UNPACK"]["VIWEXTRACTOUTPUT"] :
handle = subprocess.Popen( cmd, shell=True )
else :
devnull = open(os.devnull, "wb")
handle = subprocess.Popen( cmd, shell=True, stdout=devnull )
if handle.wait() != 0 :
raise SCons.Errors.BuildError( "error running extractor [%s] on the source [%s]" % (cmd, source[0]) )
# emitter function for getting the files
# within the archive
# @param target target packed file
# @param source extracted files
# @env environment object
def __emitter( target, source, env ) :
extractor = __getExtractor(source, env)
if not extractor :
raise SCons.Errors.StopError( "can not find any extractor value for the source file [%s]" % (source[0]) )
# we do a little trick, because in some cases we do not have got a physical
# file (eg we download a packed archive), so we don't get a list or knows
# the targets. On physical files we can do this with the LISTCMD, but on
# non-physical files we hope the user knows the target files, so we inject
# this knowledge into the return target.
if "UNPACKLIST" in env:
if not SCons.Util.is_List(env["UNPACKLIST"]) and not SCons.Util.is_String(env["UNPACKLIST"]) :
raise SCons.Errors.StopError( "manual target list of [%s] must be a string or list" % (source[0]) )
if not env["UNPACKLIST"] :
raise SCons.Errors.StopError( "manual target list of [%s] need not be empty" % (source[0]) )
return env["UNPACKLIST"], source
# we check if the source file exists, because we would like to read the data
if not source[0].exists() :
raise SCons.Errors.StopError( "source file [%s] must be exist" % (source[0]) )
# create the list command and run it in a subprocess and pipes the output to a variable,
# we need the shell for reading data from the stdout
cmd = env.subst(extractor["LISTCMD"], source=source, target=target)
handle = subprocess.Popen( cmd, shell=True, stdout=subprocess.PIPE )
target = handle.stdout.readlines()
handle.communicate()
if handle.returncode != 0 :
raise SCons.Errors.StopError("error on running list command [%s] of the source file [%s]" % (cmd, source[0]) )
# if the returning output exists and the listseperator is a callable structure
# we run it for each line of the output and if the return of the callable is
# a string we push it back to the target list
try :
if callable(extractor["LISTEXTRACTOR"]) :
target = filter(lambda s: SCons.Util.is_String(s), [extractor["LISTEXTRACTOR"](env, len(target), no, i) for no, i in enumerate(target)])
except Exception as e :
raise SCons.Errors.StopError( "%s" % (e) )
# the line removes duplicated names - we need this line, otherwise an cyclic dependency error will occured,
# because the list process can create redundant data (an archive file can not store redundant content in a filepath)
target = [i.strip() for i in list(set(target))]
if not target :
SCons.Warnings.warn(UnpackWarning, "emitter file list on target [%s] is empty, please check your extractor list function [%s]" % (source[0], cmd) )
# we append the extractdir to each target if is not absolut
if env["UNPACK"]["EXTRACTDIR"] != "." :
target = [i if os.path.isabs(i) else os.path.join(env["UNPACK"]["EXTRACTDIR"], i) for i in target]
return target, source
def __unpack_all(env, target, source) :
if os.path.exists(target):
return
print("Unpacking %s ..." % source)
__action(target, source, env)
# generate function, that adds the builder to the environment
# @env environment object
def generate( env ) :
# setup environment variable
toolset = {
"STOPONEMPTYFILE" : True,
"VIWEXTRACTOUTPUT" : False,
"EXTRACTDIR" : os.curdir,
"EXTRACTOR" : {
"TARGZ" : {
"PRIORITY" : 0,
"SUFFIX" : [".tar.gz", ".tgz", ".tar.gzip"],
"EXTRACTSUFFIX" : "",
"EXTRACTFLAGS" : "",
"EXTRACTCMD" : "${UNPACK['EXTRACTOR']['TARGZ']['RUN']} ${UNPACK['EXTRACTOR']['TARGZ']['EXTRACTFLAGS']} $SOURCE ${UNPACK['EXTRACTOR']['TARGZ']['EXTRACTSUFFIX']}",
"RUN" : "",
"LISTCMD" : "${UNPACK['EXTRACTOR']['TARGZ']['RUN']} ${UNPACK['EXTRACTOR']['TARGZ']['LISTFLAGS']} $SOURCE ${UNPACK['EXTRACTOR']['TARGZ']['LISTSUFFIX']}",
"LISTSUFFIX" : "",
"LISTFLAGS" : "",
"LISTEXTRACTOR" : None
},
"TARBZ" : {
"PRIORITY" : 0,
"SUFFIX" : [".tar.bz", ".tbz", ".tar.bz2", ".tar.bzip2", ".tar.bzip"],
"EXTRACTSUFFIX" : "",
"EXTRACTFLAGS" : "",
"EXTRACTCMD" : "${UNPACK['EXTRACTOR']['TARBZ']['RUN']} ${UNPACK['EXTRACTOR']['TARBZ']['EXTRACTFLAGS']} $SOURCE ${UNPACK['EXTRACTOR']['TARBZ']['EXTRACTSUFFIX']}",
"RUN" : "",
"LISTCMD" : "${UNPACK['EXTRACTOR']['TARBZ']['RUN']} ${UNPACK['EXTRACTOR']['TARBZ']['LISTFLAGS']} $SOURCE ${UNPACK['EXTRACTOR']['TARBZ']['LISTSUFFIX']}",
"LISTSUFFIX" : "",
"LISTFLAGS" : "",
"LISTEXTRACTOR" : None
},
"BZIP" : {
"PRIORITY" : 1,
"SUFFIX" : [".bz", "bzip", ".bz2", ".bzip2"],
"EXTRACTSUFFIX" : "",
"EXTRACTFLAGS" : "",
"EXTRACTCMD" : "${UNPACK['EXTRACTOR']['BZIP']['RUN']} ${UNPACK['EXTRACTOR']['BZIP']['EXTRACTFLAGS']} $SOURCE ${UNPACK['EXTRACTOR']['BZIP']['EXTRACTSUFFIX']}",
"RUN" : "",
"LISTCMD" : "${UNPACK['EXTRACTOR']['BZIP']['RUN']} ${UNPACK['EXTRACTOR']['BZIP']['LISTFLAGS']} $SOURCE ${UNPACK['EXTRACTOR']['BZIP']['LISTSUFFIX']}",
"LISTSUFFIX" : "",
"LISTFLAGS" : "",
"LISTEXTRACTOR" : None
},
"GZIP" : {
"PRIORITY" : 1,
"SUFFIX" : [".gz", ".gzip"],
"EXTRACTSUFFIX" : "",
"EXTRACTFLAGS" : "",
"EXTRACTCMD" : "${UNPACK['EXTRACTOR']['GZIP']['RUN']} ${UNPACK['EXTRACTOR']['GZIP']['EXTRACTFLAGS']} $SOURCE ${UNPACK['EXTRACTOR']['GZIP']['EXTRACTSUFFIX']}",
"RUN" : "",
"LISTCMD" : "${UNPACK['EXTRACTOR']['GZIP']['RUN']} ${UNPACK['EXTRACTOR']['GZIP']['LISTFLAGS']} $SOURCE ${UNPACK['EXTRACTOR']['GZIP']['LISTSUFFIX']}",
"LISTSUFFIX" : "",
"LISTFLAGS" : "",
"LISTEXTRACTOR" : None
},
"TAR" : {
"PRIORITY" : 1,
"SUFFIX" : [".tar"],
"EXTRACTSUFFIX" : "",
"EXTRACTFLAGS" : "",
"EXTRACTCMD" : "${UNPACK['EXTRACTOR']['TAR']['RUN']} ${UNPACK['EXTRACTOR']['TAR']['EXTRACTFLAGS']} $SOURCE ${UNPACK['EXTRACTOR']['TAR']['EXTRACTSUFFIX']}",
"RUN" : "",
"LISTCMD" : "${UNPACK['EXTRACTOR']['TAR']['RUN']} ${UNPACK['EXTRACTOR']['TAR']['LISTFLAGS']} $SOURCE ${UNPACK['EXTRACTOR']['TAR']['LISTSUFFIX']}",
"LISTSUFFIX" : "",
"LISTFLAGS" : "",
"LISTEXTRACTOR" : None
},
"ZIP" : {
"PRIORITY" : 1,
"SUFFIX" : [".zip"],
"EXTRACTSUFFIX" : "",
"EXTRACTFLAGS" : "",
"EXTRACTCMD" : "${UNPACK['EXTRACTOR']['ZIP']['RUN']} ${UNPACK['EXTRACTOR']['ZIP']['EXTRACTFLAGS']} $SOURCE ${UNPACK['EXTRACTOR']['ZIP']['EXTRACTSUFFIX']}",
"RUN" : "",
"LISTCMD" : "${UNPACK['EXTRACTOR']['ZIP']['RUN']} ${UNPACK['EXTRACTOR']['ZIP']['LISTFLAGS']} $SOURCE ${UNPACK['EXTRACTOR']['ZIP']['LISTSUFFIX']}",
"LISTSUFFIX" : "",
"LISTFLAGS" : "",
"LISTEXTRACTOR" : None
}
}
}
# read tools for Windows system
if env["PLATFORM"] != "darwin" and "win" in env["PLATFORM"] :
if env.WhereIs('7z', env.get('PATH')):
toolset["EXTRACTOR"]["TARGZ"]["RUN"] = "7z"
toolset["EXTRACTOR"]["TARGZ"]["LISTEXTRACTOR"] = __fileextractor_win_7zip
toolset["EXTRACTOR"]["TARGZ"]["LISTFLAGS"] = "x"
toolset["EXTRACTOR"]["TARGZ"]["LISTSUFFIX"] = "-so -y | ${UNPACK['EXTRACTOR']['TARGZ']['RUN']} l -sii -ttar -y -so"
toolset["EXTRACTOR"]["TARGZ"]["EXTRACTFLAGS"] = "x"
toolset["EXTRACTOR"]["TARGZ"]["EXTRACTSUFFIX"] = "-so -y | ${UNPACK['EXTRACTOR']['TARGZ']['RUN']} x -sii -ttar -y -o${UNPACK['EXTRACTDIR']}"
toolset["EXTRACTOR"]["TARBZ"]["RUN"] = "7z"
toolset["EXTRACTOR"]["TARBZ"]["LISTEXTRACTOR"] = __fileextractor_win_7zip
toolset["EXTRACTOR"]["TARBZ"]["LISTFLAGS"] = "x"
toolset["EXTRACTOR"]["TARBZ"]["LISTSUFFIX"] = "-so -y | ${UNPACK['EXTRACTOR']['TARBZ']['RUN']} l -sii -ttar -y -so"
toolset["EXTRACTOR"]["TARBZ"]["EXTRACTFLAGS"] = "x"
toolset["EXTRACTOR"]["TARBZ"]["EXTRACTSUFFIX"] = "-so -y | ${UNPACK['EXTRACTOR']['TARBZ']['RUN']} x -sii -ttar -y -o${UNPACK['EXTRACTDIR']}"
toolset["EXTRACTOR"]["BZIP"]["RUN"] = "7z"
toolset["EXTRACTOR"]["BZIP"]["LISTEXTRACTOR"] = __fileextractor_win_7zip
toolset["EXTRACTOR"]["BZIP"]["LISTFLAGS"] = "l"
toolset["EXTRACTOR"]["BZIP"]["LISTSUFFIX"] = "-y -so"
toolset["EXTRACTOR"]["BZIP"]["EXTRACTFLAGS"] = "x"
toolset["EXTRACTOR"]["BZIP"]["EXTRACTSUFFIX"] = "-y -o${UNPACK['EXTRACTDIR']}"
toolset["EXTRACTOR"]["GZIP"]["RUN"] = "7z"
toolset["EXTRACTOR"]["GZIP"]["LISTEXTRACTOR"] = __fileextractor_win_7zip
toolset["EXTRACTOR"]["GZIP"]["LISTFLAGS"] = "l"
toolset["EXTRACTOR"]["GZIP"]["LISTSUFFIX"] = "-y -so"
toolset["EXTRACTOR"]["GZIP"]["EXTRACTFLAGS"] = "x"
toolset["EXTRACTOR"]["GZIP"]["EXTRACTSUFFIX"] = "-y -o${UNPACK['EXTRACTDIR']}"
toolset["EXTRACTOR"]["ZIP"]["RUN"] = "7z"
toolset["EXTRACTOR"]["ZIP"]["LISTEXTRACTOR"] = __fileextractor_win_7zip
toolset["EXTRACTOR"]["ZIP"]["LISTFLAGS"] = "l"
toolset["EXTRACTOR"]["ZIP"]["LISTSUFFIX"] = "-y -so"
toolset["EXTRACTOR"]["ZIP"]["EXTRACTFLAGS"] = "x"
toolset["EXTRACTOR"]["ZIP"]["EXTRACTSUFFIX"] = "-y -o${UNPACK['EXTRACTDIR']}"
toolset["EXTRACTOR"]["TAR"]["RUN"] = "7z"
toolset["EXTRACTOR"]["TAR"]["LISTEXTRACTOR"] = __fileextractor_win_7zip
toolset["EXTRACTOR"]["TAR"]["LISTFLAGS"] = "l"
toolset["EXTRACTOR"]["TAR"]["LISTSUFFIX"] = "-y -ttar -so"
toolset["EXTRACTOR"]["TAR"]["EXTRACTFLAGS"] = "x"
toolset["EXTRACTOR"]["TAR"]["EXTRACTSUFFIX"] = "-y -ttar -o${UNPACK['EXTRACTDIR']}"
else:
print('''
*********************** Error ************************
* *
* Please make sure that 7-zip is in your System PATH *
* *
******************************************************
''')
# here can add some other Windows tools, that can handle the archive files
# but I don't know which ones can handle all file types
# read the tools on *nix systems and sets the default parameters
elif env["PLATFORM"] in ["darwin", "linux", "posix", "msys"] :
if env.WhereIs("unzip") :
toolset["EXTRACTOR"]["ZIP"]["RUN"] = "unzip"
toolset["EXTRACTOR"]["ZIP"]["LISTEXTRACTOR"] = __fileextractor_nix_unzip
toolset["EXTRACTOR"]["ZIP"]["LISTFLAGS"] = "-l"
toolset["EXTRACTOR"]["ZIP"]["EXTRACTFLAGS"] = "-oqq"
toolset["EXTRACTOR"]["ZIP"]["EXTRACTSUFFIX"] = "-d ${UNPACK['EXTRACTDIR']}"
if env.WhereIs("tar") :
toolset["EXTRACTOR"]["TAR"]["RUN"] = "tar"
toolset["EXTRACTOR"]["TAR"]["LISTEXTRACTOR"] = __fileextractor_nix_tar
toolset["EXTRACTOR"]["TAR"]["LISTFLAGS"] = "tvf"
toolset["EXTRACTOR"]["TAR"]["EXTRACTFLAGS"] = "xf"
toolset["EXTRACTOR"]["TAR"]["EXTRACTSUFFIX"] = "-C ${UNPACK['EXTRACTDIR']}"
toolset["EXTRACTOR"]["TARGZ"]["RUN"] = "tar"
toolset["EXTRACTOR"]["TARGZ"]["LISTEXTRACTOR"] = __fileextractor_nix_tar
toolset["EXTRACTOR"]["TARGZ"]["EXTRACTFLAGS"] = "xfz"
toolset["EXTRACTOR"]["TARGZ"]["LISTFLAGS"] = "tvfz"
toolset["EXTRACTOR"]["TARGZ"]["EXTRACTSUFFIX"] = "-C ${UNPACK['EXTRACTDIR']}"
toolset["EXTRACTOR"]["TARBZ"]["RUN"] = "tar"
toolset["EXTRACTOR"]["TARBZ"]["LISTEXTRACTOR"] = __fileextractor_nix_tar
toolset["EXTRACTOR"]["TARBZ"]["EXTRACTFLAGS"] = "xfj"
toolset["EXTRACTOR"]["TARBZ"]["LISTFLAGS"] = "tvfj"
toolset["EXTRACTOR"]["TARBZ"]["EXTRACTSUFFIX"] = "-C ${UNPACK['EXTRACTDIR']}"
if env.WhereIs("bzip2") :
toolset["EXTRACTOR"]["BZIP"]["RUN"] = "bzip2"
toolset["EXTRACTOR"]["BZIP"]["EXTRACTFLAGS"] = "-df"
if env.WhereIs("gzip") :
toolset["EXTRACTOR"]["GZIP"]["RUN"] = "gzip"
toolset["EXTRACTOR"]["GZIP"]["LISTEXTRACTOR"] = __fileextractor_nix_gzip
toolset["EXTRACTOR"]["GZIP"]["LISTFLAGS"] = "-l"
toolset["EXTRACTOR"]["GZIP"]["EXTRACTFLAGS"] = "-df"
else :
raise SCons.Errors.StopError("Unpack tool detection on this platform [%s] unkown" % (env["PLATFORM"]))
# the target_factory must be an "Entry", because the target list can be
# files and dirs, so we can not specify the targetfactory explicitly
env.Replace(UNPACK = toolset)
#env["BUILDERS"]["UnpackAll"] = SCons.Builder.Builder(
# action=__action,
# emitter=__emitter,
# target_factory=SCons.Node.FS.Entry,
# source_factory=SCons.Node.FS.File,
# single_source=True,
# PRINT_CMD_LINE_FUNC=__message
#)
env.AddMethod(__unpack_all, 'UnpackAll')
# existing function of the builder
# @param env environment object
# @return true
def exists(env) :
return 1
Import('env')
generate(env)
|
|
# Purpose: Script containing Settings for the Model
#
# Info: Change the Parameters at the top of the scrip to change how the Agent interacts
#
# Developed as part of the Software Agents Course at City University
#
# Dev: Dan Dixey and Enrico Lopedoto
#
# Updated: 10/3/2016
#
import json
import os
import numpy as np
model_version = 3
# Case 1 - Default Evaluation
# Complete: 1, 2
case_one = dict(trials=500,
completed=100,
crashed=-100,
open=1,
alpha=0.75,
epsilon=0.15,
gamma=0.99,
nb_actions=5,
model=model_version,
epsilon_decay=0.9,
epsilon_action=6000,
change_values=[],
train=True)
# Case 2 - Change Gamma values
# Complete: 1, 2, 3
case_two = dict(trials=500,
completed=100,
crashed=-100,
open=1,
alpha=0.75,
epsilon=0.15,
gamma=np.arange(0.1, 1.1, 0.1),
nb_actions=5,
model=model_version,
epsilon_decay=0.9,
epsilon_action=6000,
change_values=['gamma'],
train=True)
# Case 3 - Change Learning Rates
# Complete: 1, 2
# Important to Note: DQN implementation does not use Alpha
case_three = dict(trials=200,
completed=500,
crashed=-100,
open=5,
alpha=np.arange(0.1, 1.1, 0.1),
epsilon=0.75,
gamma=0.7,
nb_actions=5,
model=model_version,
epsilon_decay=0.9,
epsilon_action=6000,
change_values=['alpha'],
train=True)
# Case 4 - Different policies (epsilon)
# Complete: 1, 2, 3
case_four = dict(trials=550,
completed=100,
crashed=-100,
open=1,
alpha=0.65,
epsilon=np.arange(0.1, 1.1, 0.1),
gamma=0.7,
nb_actions=5,
model=model_version,
epsilon_decay=0.9,
epsilon_action=6000,
change_values=['epsilon'],
train=True)
# Case 5 - different Reward functions
# Complete: 1, 2
case_five = dict(trials=550,
completed=np.arange(50, 235, 20),
crashed=np.arange(-50, -235, -20),
open=np.arange(-5, 5),
alpha=0.75,
epsilon=0.15,
gamma=0.7,
nb_actions=5,
model=model_version,
epsilon_decay=0.9,
epsilon_action=6000,
change_values=['completed',
'crashed',
'open'],
train=True)
# Case Dictionary
case_lookup = dict(case_one=case_one,
case_two=case_two,
case_three=case_three,
case_four=case_four,
case_five=case_five)
def save_results(case, settings, results):
"""
Save all results to a JSON file
:param case: str
:param settings: dict
:param results: list
:return: None
"""
f = open(
os.path.join(
os.getcwd(),
'Results',
case,
'Model{}'.format(
settings['model']) +
'.json'),
'w').write(
json.dumps(results))
def load_results(directory, model):
"""
Loading the Settings File
:param directory: str
:param model: int
:return: dict
"""
return json.loads(
open(directory + '/Model{}.json'.format(model), 'r').read())
def check_files(settings, case, value_iter):
"""
In case the Train File Stops...
:param settings: dict
:param case: str
:param value_iter: int
:return: Boolean
"""
name = 'model_{}_case_{}_iter_{}'.format(
settings['model'],
case.split('_')[1],
value_iter)
path = os.path.join(os.getcwd(), 'Results', case) + \
'/Model{}'.format(settings['model']) + '.json'
results_file = os.path.isfile(path)
# Depending on Model No. Check if Model Memory is Saved
if settings['model'] < 3:
path = os.path.join(
os.getcwd(), 'Model/NN_Model/', name + '.pkl')
model_saved = os.path.isfile(path)
else:
path = os.path.join(
os.getcwd(),
'Model/NN_Model/',
name + '_weights.h5')
model_saved = os.path.isfile(path)
if results_file and model_saved:
continue_on = True
else:
continue_on = False
return continue_on, name
def get_indicies(data, ind=0):
"""
Get the number of Iterations Required for Dictionary
:param data: dict
:param ind: int
:return: tuple(int, dict)
"""
if len(data['change_values']) > 0:
return 10, get_settings(data)
else:
return 1, data
# Get New Dictionary values
def get_settings(dictionary=None, ind=0):
"""
Get Next value in dictionary
:param dictionary: dict
:param ind: int
:return: dict
"""
new_dict = dictionary.copy()
for each_value in dictionary['change_values']:
new_dict[each_value] = dictionary[each_value][ind]
return new_dict
results = dict(time_chart=[],
final_location=[],
best_test=[],
q_plot=[],
model_names=[],
q_matrix=[],
paths=[])
t_array = [] # Storing Time to Complete
f_array = [] # Storing Final Locations
b_array = [] # Storing Full Control
path = []
|
|
# Copyright (c) 2003-2005 Maxim Sobolev. All rights reserved.
# Copyright (c) 2006-2014 Sippy Software, Inc. All rights reserved.
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from sippy.SipGenericHF import SipGenericHF
from sippy.Security.SipNonce import HashOracle, DGST_MD5, DGST_MD5SESS, \
DGST_SHA256, DGST_SHA256SESS, DGST_SHA512, DGST_SHA512SESS
from hashlib import md5, sha256
from time import time
from Crypto.Hash import SHA512
class sha512_256(object):
d = None
def __init__(self):
self.d = SHA512.new(truncate = '256')
def update(self, arg):
self.d.update(arg)
def digest(self):
return self.d.digest()
def hexdigest(self):
return self.d.hexdigest()
_HASH_FUNC = {None:(md5, DGST_MD5), 'MD5':(md5, DGST_MD5), 'MD5-sess':(md5, DGST_MD5SESS), \
'SHA-256':(sha256, DGST_SHA256), 'SHA-256-sess':(sha256, DGST_SHA256SESS), \
'SHA-512-256':(sha512_256, DGST_SHA512), 'SHA-512-256-sess':(sha512_256, DGST_SHA512SESS)}
class SipAuthorization(SipGenericHF):
hf_names = ('authorization',)
username = None
uri = None
realm = None
nonce = None
response = None
qop = None
cnonce = None
nc = None
algorithm = None
otherparams = None
ho = HashOracle()
def __init__(self, body = None, username = None, uri = None, realm = None, nonce = None, response = None, \
cself = None):
SipGenericHF.__init__(self, body)
if body != None:
return
self.parsed = True
if cself != None:
self.username = cself.username
self.uri = cself.uri
self.realm = cself.realm
self.nonce = cself.nonce
self.response = cself.response
self.qop = cself.qop
self.cnonce = cself.cnonce
self.nc = cself.nc
self.algorithm = cself.algorithm
self.otherparams = cself.otherparams[:]
return
self.username = username
self.uri = uri
self.realm = realm
self.nonce = nonce
self.response = response
self.otherparams = []
def parse(self):
self.otherparams = []
for name, value in [x.strip(', ').split('=', 1) for x in self.body.split(' ', 1)[1].split(',')]:
ci_name = name.lower()
if ci_name == 'username':
self.username = value.strip('"')
elif ci_name == 'uri':
self.uri = value.strip('"')
elif ci_name == 'realm':
self.realm = value.strip('"')
elif ci_name == 'nonce':
self.nonce = value.strip('"')
elif ci_name == 'response':
self.response = value.strip('"')
elif ci_name == 'qop':
self.qop = value.strip('"')
elif ci_name == 'cnonce':
self.cnonce = value.strip('"')
elif ci_name == 'nc':
self.nc = value.strip('"')
elif ci_name == 'algorithm':
self.algorithm = value.strip('"')
else:
self.otherparams.append((name, value))
self.parsed = True
def genAuthResponse(self, password, method, body):
HA1 = DigestCalcHA1(self.algorithm, self.username, self.realm, password, \
self.nonce, self.cnonce)
self.response = DigestCalcResponse(self.algorithm, HA1, self.nonce, \
self.nc, self.cnonce, self.qop, method, self.uri, body)
def __str__(self):
if not self.parsed:
return self.body
rval = 'Digest username="%s",realm="%s",nonce="%s",uri="%s",response="%s"' % \
(self.username, self.realm, self.nonce, self.uri, self.response)
if self.algorithm != None:
rval += ',algorithm=%s' % (self.algorithm,)
if self.qop != None:
rval += ',qop=%s,nc=%s,cnonce="%s"' % (self.qop, self.nc, self.cnonce)
for param in self.otherparams:
rval += ',%s=%s' % param
return rval
def getCopy(self):
if not self.parsed:
return self.__class__(self.body)
return self.__class__(cself = self)
def verify(self, password, method, body = None):
if not self.parsed:
self.parse()
HA1 = DigestCalcHA1(self.algorithm, self.username, self.realm, password, self.nonce, self.cnonce)
return self.verifyHA1(HA1, method, body)
def verifyHA1(self, HA1, method, body):
if not self.parsed:
self.parse()
if self.algorithm not in _HASH_FUNC:
return False
if self.qop != None and self.qop not in ('auth', 'auth-int'):
return False
algmask = _HASH_FUNC[self.algorithm][1]
if not self.ho.validate_challenge(self.nonce, (algmask,)):
return False
response = DigestCalcResponse(self.algorithm, HA1, self.nonce, self.nc, \
self.cnonce, self.qop, method, self.uri, body)
return response == self.response
def getCanName(self, name, compact = False):
return 'Authorization'
def IsDigestAlgSupported(algorithm):
return (algorithm in _HASH_FUNC)
def NameList2AlgMask(nlist):
return tuple([_HASH_FUNC[x][1] for x in nlist])
def DigestCalcHA1(pszAlg, pszUserName, pszRealm, pszPassword, pszNonce, pszCNonce):
delim = ':'.encode()
hashfunc = _HASH_FUNC[pszAlg][0]
m = hashfunc()
m.update(pszUserName.encode())
m.update(delim)
m.update(pszRealm.encode())
m.update(delim)
m.update(pszPassword.encode())
HA1 = m.hexdigest().encode()
if pszAlg and pszAlg.endswith('-sess'):
m = hashfunc()
m.update(HA1)
m.update(delim)
m.update(pszNonce.encode())
m.update(delim)
m.update(pszCNonce.encode())
HA1 = m.hexdigest().encode()
return HA1
def DigestCalcResponse(pszAlg, HA1, pszNonce, pszNonceCount, pszCNonce, pszQop, pszMethod, pszDigestUri, pszHEntity):
delim = ':'.encode()
hashfunc = _HASH_FUNC[pszAlg][0]
m = hashfunc()
m.update(pszMethod.encode())
m.update(delim)
m.update(pszDigestUri.encode())
if pszQop == "auth-int":
m.update(delim)
if pszHEntity is None:
pszHEntity = ''
m1 = hashfunc()
m1.update(pszHEntity.encode())
HA_pszHEntity = m1.hexdigest()
m.update(HA_pszHEntity.encode())
HA2 = m.hexdigest()
m = hashfunc()
m.update(HA1)
m.update(delim)
m.update(pszNonce.encode())
m.update(delim)
if pszNonceCount and pszCNonce and pszQop:
m.update(pszNonceCount.encode())
m.update(delim)
m.update(pszCNonce.encode())
m.update(delim)
m.update(pszQop.encode())
m.update(delim)
m.update(HA2.encode())
response = m.hexdigest()
return response
|
|
# coding=utf-8
# Copyright 2013 International Business Machines Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Ironic Native IPMI power manager.
"""
import os
from ironic_lib import utils as ironic_utils
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import importutils
from ironic.common import boot_devices
from ironic.common import exception
from ironic.common.i18n import _
from ironic.common.i18n import _LE
from ironic.common.i18n import _LW
from ironic.common import states
from ironic.common import utils
from ironic.conductor import task_manager
from ironic.drivers import base
from ironic.drivers.modules import console_utils
from ironic.drivers import utils as driver_utils
pyghmi = importutils.try_import('pyghmi')
if pyghmi:
from pyghmi import exceptions as pyghmi_exception
from pyghmi.ipmi import command as ipmi_command
opts = [
cfg.IntOpt('retry_timeout',
default=60,
help=_('Maximum time in seconds to retry IPMI operations. '
'There is a tradeoff when setting this value. Setting '
'this too low may cause older BMCs to crash and require '
'a hard reset. However, setting too high can cause the '
'sync power state periodic task to hang when there are '
'slow or unresponsive BMCs.')),
cfg.IntOpt('min_command_interval',
default=5,
help=_('Minimum time, in seconds, between IPMI operations '
'sent to a server. There is a risk with some hardware '
'that setting this too low may cause the BMC to crash. '
'Recommended setting is 5 seconds.')),
]
CONF = cfg.CONF
CONF.register_opts(opts, group='ipmi')
LOG = logging.getLogger(__name__)
REQUIRED_PROPERTIES = {'ipmi_address': _("IP of the node's BMC. Required."),
'ipmi_password': _("IPMI password. Required."),
'ipmi_username': _("IPMI username. Required.")}
OPTIONAL_PROPERTIES = {
'ipmi_force_boot_device': _("Whether Ironic should specify the boot "
"device to the BMC each time the server "
"is turned on, eg. because the BMC is not "
"capable of remembering the selected boot "
"device across power cycles; default value "
"is False. Optional.")
}
COMMON_PROPERTIES = REQUIRED_PROPERTIES.copy()
COMMON_PROPERTIES.update(OPTIONAL_PROPERTIES)
CONSOLE_PROPERTIES = {
'ipmi_terminal_port': _("node's UDP port to connect to. Only required for "
"console access.")
}
_BOOT_DEVICES_MAP = {
boot_devices.DISK: 'hd',
boot_devices.PXE: 'network',
boot_devices.CDROM: 'cdrom',
boot_devices.BIOS: 'setup',
}
def _parse_driver_info(node):
"""Gets the bmc access info for the given node.
:raises: MissingParameterValue when required ipmi credentials
are missing.
:raises: InvalidParameterValue when the IPMI terminal port is not an
integer.
"""
info = node.driver_info or {}
missing_info = [key for key in REQUIRED_PROPERTIES if not info.get(key)]
if missing_info:
raise exception.MissingParameterValue(_(
"Missing the following IPMI credentials in node's"
" driver_info: %s.") % missing_info)
bmc_info = {}
bmc_info['address'] = info.get('ipmi_address')
bmc_info['username'] = info.get('ipmi_username')
bmc_info['password'] = info.get('ipmi_password')
bmc_info['force_boot_device'] = info.get('ipmi_force_boot_device', False)
# get additional info
bmc_info['uuid'] = node.uuid
# terminal port must be an integer
port = info.get('ipmi_terminal_port')
if port is not None:
port = utils.validate_network_port(port, 'ipmi_terminal_port')
bmc_info['port'] = port
return bmc_info
def _console_pwfile_path(uuid):
"""Return the file path for storing the ipmi password."""
file_name = "%(uuid)s.pw" % {'uuid': uuid}
return os.path.join(CONF.tempdir, file_name)
def _power_on(driver_info):
"""Turn the power on for this node.
:param driver_info: the bmc access info for a node.
:returns: power state POWER_ON, one of :class:`ironic.common.states`.
:raises: IPMIFailure when the native ipmi call fails.
:raises: PowerStateFailure when invalid power state is returned
from ipmi.
"""
msg = _("IPMI power on failed for node %(node_id)s with the "
"following error: %(error)s")
try:
ipmicmd = ipmi_command.Command(bmc=driver_info['address'],
userid=driver_info['username'],
password=driver_info['password'])
wait = CONF.ipmi.retry_timeout
ret = ipmicmd.set_power('on', wait)
except pyghmi_exception.IpmiException as e:
error = msg % {'node_id': driver_info['uuid'], 'error': e}
LOG.error(error)
raise exception.IPMIFailure(error)
state = ret.get('powerstate')
if state == 'on':
return states.POWER_ON
else:
error = _("bad response: %s") % ret
LOG.error(msg, {'node_id': driver_info['uuid'], 'error': error})
raise exception.PowerStateFailure(pstate=states.POWER_ON)
def _power_off(driver_info):
"""Turn the power off for this node.
:param driver_info: the bmc access info for a node.
:returns: power state POWER_OFF, one of :class:`ironic.common.states`.
:raises: IPMIFailure when the native ipmi call fails.
:raises: PowerStateFailure when invalid power state is returned
from ipmi.
"""
msg = _("IPMI power off failed for node %(node_id)s with the "
"following error: %(error)s")
try:
ipmicmd = ipmi_command.Command(bmc=driver_info['address'],
userid=driver_info['username'],
password=driver_info['password'])
wait = CONF.ipmi.retry_timeout
ret = ipmicmd.set_power('off', wait)
except pyghmi_exception.IpmiException as e:
error = msg % {'node_id': driver_info['uuid'], 'error': e}
LOG.error(error)
raise exception.IPMIFailure(error)
state = ret.get('powerstate')
if state == 'off':
return states.POWER_OFF
else:
error = _("bad response: %s") % ret
LOG.error(msg, {'node_id': driver_info['uuid'], 'error': error})
raise exception.PowerStateFailure(pstate=states.POWER_OFF)
def _reboot(driver_info):
"""Reboot this node.
If the power is off, turn it on. If the power is on, reset it.
:param driver_info: the bmc access info for a node.
:returns: power state POWER_ON, one of :class:`ironic.common.states`.
:raises: IPMIFailure when the native ipmi call fails.
:raises: PowerStateFailure when invalid power state is returned
from ipmi.
"""
msg = _("IPMI power reboot failed for node %(node_id)s with the "
"following error: %(error)s")
try:
ipmicmd = ipmi_command.Command(bmc=driver_info['address'],
userid=driver_info['username'],
password=driver_info['password'])
wait = CONF.ipmi.retry_timeout
ret = ipmicmd.set_power('boot', wait)
except pyghmi_exception.IpmiException as e:
error = msg % {'node_id': driver_info['uuid'], 'error': e}
LOG.error(error)
raise exception.IPMIFailure(error)
state = ret.get('powerstate')
if state == 'on':
return states.POWER_ON
else:
error = _("bad response: %s") % ret
LOG.error(msg, {'node_id': driver_info['uuid'], 'error': error})
raise exception.PowerStateFailure(pstate=states.REBOOT)
def _power_status(driver_info):
"""Get the power status for this node.
:param driver_info: the bmc access info for a node.
:returns: power state POWER_ON, POWER_OFF or ERROR defined in
:class:`ironic.common.states`.
:raises: IPMIFailure when the native ipmi call fails.
"""
try:
ipmicmd = ipmi_command.Command(bmc=driver_info['address'],
userid=driver_info['username'],
password=driver_info['password'])
ret = ipmicmd.get_power()
except pyghmi_exception.IpmiException as e:
msg = (_("IPMI get power state failed for node %(node_id)s "
"with the following error: %(error)s") %
{'node_id': driver_info['uuid'], 'error': e})
LOG.error(msg)
raise exception.IPMIFailure(msg)
state = ret.get('powerstate')
if state == 'on':
return states.POWER_ON
elif state == 'off':
return states.POWER_OFF
else:
# NOTE(linggao): Do not throw an exception here because it might
# return other valid values. It is up to the caller to decide
# what to do.
LOG.warning(_LW("IPMI get power state for node %(node_id)s returns the"
" following details: %(detail)s"),
{'node_id': driver_info['uuid'], 'detail': ret})
return states.ERROR
def _get_sensors_data(driver_info):
"""Get sensors data.
:param driver_info: node's driver info
:raises: FailedToGetSensorData when getting the sensor data fails.
:returns: returns a dict of sensor data group by sensor type.
"""
try:
ipmicmd = ipmi_command.Command(bmc=driver_info['address'],
userid=driver_info['username'],
password=driver_info['password'])
ret = ipmicmd.get_sensor_data()
except Exception as e:
LOG.error(_LE("IPMI get sensor data failed for node %(node_id)s "
"with the following error: %(error)s"),
{'node_id': driver_info['uuid'], 'error': e})
raise exception.FailedToGetSensorData(
node=driver_info['uuid'], error=e)
if not ret:
return {}
sensors_data = {}
for reading in ret:
# ignore the sensor data which has no sensor reading value
if not reading.value:
continue
sensors_data.setdefault(
reading.type,
{})[reading.name] = {
'Sensor Reading': '%s %s' % (reading.value, reading.units),
'Sensor ID': reading.name,
'States': str(reading.states),
'Units': reading.units,
'Health': str(reading.health)}
return sensors_data
def _parse_raw_bytes(raw_bytes):
"""Parse raw bytes string.
:param raw_bytes: a string of hexadecimal raw bytes, e.g. '0x00 0x01'.
:returns: a tuple containing the arguments for pyghmi call as integers,
(IPMI net function, IPMI command, list of command's data).
:raises: InvalidParameterValue when an invalid value is specified.
"""
try:
bytes_list = [int(x, base=16) for x in raw_bytes.split()]
return bytes_list[0], bytes_list[1], bytes_list[2:]
except ValueError:
raise exception.InvalidParameterValue(_(
"Invalid raw bytes string: '%s'") % raw_bytes)
except IndexError:
raise exception.InvalidParameterValue(_(
"Raw bytes string requires two bytes at least."))
def _send_raw(driver_info, raw_bytes):
"""Send raw bytes to the BMC."""
netfn, command, data = _parse_raw_bytes(raw_bytes)
LOG.debug("Sending raw bytes %(bytes)s to node %(node_id)s",
{'bytes': raw_bytes, 'node_id': driver_info['uuid']})
try:
ipmicmd = ipmi_command.Command(bmc=driver_info['address'],
userid=driver_info['username'],
password=driver_info['password'])
ipmicmd.xraw_command(netfn, command, data=data)
except pyghmi_exception.IpmiException as e:
msg = (_("IPMI send raw bytes '%(bytes)s' failed for node %(node_id)s"
" with the following error: %(error)s") %
{'bytes': raw_bytes, 'node_id': driver_info['uuid'],
'error': e})
LOG.error(msg)
raise exception.IPMIFailure(msg)
class NativeIPMIPower(base.PowerInterface):
"""The power driver using native python-ipmi library."""
def get_properties(self):
return COMMON_PROPERTIES
def validate(self, task):
"""Check that node['driver_info'] contains IPMI credentials.
:param task: a TaskManager instance containing the node to act on.
:raises: MissingParameterValue when required ipmi credentials
are missing.
"""
_parse_driver_info(task.node)
def get_power_state(self, task):
"""Get the current power state of the task's node.
:param task: a TaskManager instance containing the node to act on.
:returns: power state POWER_ON, POWER_OFF or ERROR defined in
:class:`ironic.common.states`.
:raises: MissingParameterValue when required ipmi credentials
are missing.
:raises: IPMIFailure when the native ipmi call fails.
"""
driver_info = _parse_driver_info(task.node)
return _power_status(driver_info)
@task_manager.require_exclusive_lock
def set_power_state(self, task, pstate):
"""Turn the power on or off.
:param task: a TaskManager instance containing the node to act on.
:param pstate: a power state that will be set on the task's node.
:raises: IPMIFailure when the native ipmi call fails.
:raises: MissingParameterValue when required ipmi credentials
are missing.
:raises: InvalidParameterValue when an invalid power state
is specified
:raises: PowerStateFailure when invalid power state is returned
from ipmi.
"""
driver_info = _parse_driver_info(task.node)
if pstate == states.POWER_ON:
driver_utils.ensure_next_boot_device(task, driver_info)
_power_on(driver_info)
elif pstate == states.POWER_OFF:
_power_off(driver_info)
else:
raise exception.InvalidParameterValue(
_("set_power_state called with an invalid power state: %s."
) % pstate)
@task_manager.require_exclusive_lock
def reboot(self, task):
"""Cycles the power to the task's node.
:param task: a TaskManager instance containing the node to act on.
:raises: IPMIFailure when the native ipmi call fails.
:raises: MissingParameterValue when required ipmi credentials
are missing.
:raises: PowerStateFailure when invalid power state is returned
from ipmi.
"""
driver_info = _parse_driver_info(task.node)
driver_utils.ensure_next_boot_device(task, driver_info)
_reboot(driver_info)
class NativeIPMIManagement(base.ManagementInterface):
def get_properties(self):
return COMMON_PROPERTIES
def validate(self, task):
"""Check that 'driver_info' contains IPMI credentials.
Validates whether the 'driver_info' property of the supplied
task's node contains the required credentials information.
:param task: a task from TaskManager.
:raises: MissingParameterValue when required ipmi credentials
are missing.
"""
_parse_driver_info(task.node)
def get_supported_boot_devices(self, task):
"""Get a list of the supported boot devices.
:param task: a task from TaskManager.
:returns: A list with the supported boot devices defined
in :mod:`ironic.common.boot_devices`.
"""
return list(_BOOT_DEVICES_MAP.keys())
@task_manager.require_exclusive_lock
def set_boot_device(self, task, device, persistent=False):
"""Set the boot device for the task's node.
Set the boot device to use on next reboot of the node.
:param task: a task from TaskManager.
:param device: the boot device, one of
:mod:`ironic.common.boot_devices`.
:param persistent: Boolean value. True if the boot device will
persist to all future boots, False if not.
Default: False.
:raises: InvalidParameterValue if an invalid boot device is specified
or required ipmi credentials are missing.
:raises: MissingParameterValue when required ipmi credentials
are missing.
:raises: IPMIFailure on an error from pyghmi.
"""
if device not in self.get_supported_boot_devices(task):
raise exception.InvalidParameterValue(_(
"Invalid boot device %s specified.") % device)
if task.node.driver_info.get('ipmi_force_boot_device', False):
driver_utils.force_persistent_boot(task,
device,
persistent)
# Reset persistent to False, in case of BMC does not support
# persistent or we do not have admin rights.
persistent = False
driver_info = _parse_driver_info(task.node)
try:
ipmicmd = ipmi_command.Command(bmc=driver_info['address'],
userid=driver_info['username'],
password=driver_info['password'])
bootdev = _BOOT_DEVICES_MAP[device]
ipmicmd.set_bootdev(bootdev, persist=persistent)
except pyghmi_exception.IpmiException as e:
LOG.error(_LE("IPMI set boot device failed for node %(node_id)s "
"with the following error: %(error)s"),
{'node_id': driver_info['uuid'], 'error': e})
raise exception.IPMIFailure(cmd=e)
def get_boot_device(self, task):
"""Get the current boot device for the task's node.
Returns the current boot device of the node.
:param task: a task from TaskManager.
:raises: MissingParameterValue if required IPMI parameters
are missing.
:raises: IPMIFailure on an error from pyghmi.
:returns: a dictionary containing:
:boot_device: the boot device, one of
:mod:`ironic.common.boot_devices` or None if it is unknown.
:persistent: Whether the boot device will persist to all
future boots or not, None if it is unknown.
"""
driver_info = task.node.driver_info
driver_internal_info = task.node.driver_internal_info
if (driver_info.get('ipmi_force_boot_device', False) and
driver_internal_info.get('persistent_boot_device') and
driver_internal_info.get('is_next_boot_persistent', True)):
return {
'boot_device': driver_internal_info['persistent_boot_device'],
'persistent': True
}
driver_info = _parse_driver_info(task.node)
response = {'boot_device': None}
try:
ipmicmd = ipmi_command.Command(bmc=driver_info['address'],
userid=driver_info['username'],
password=driver_info['password'])
ret = ipmicmd.get_bootdev()
# FIXME(lucasagomes): pyghmi doesn't seem to handle errors
# consistently, for some errors it raises an exception
# others it just returns a dictionary with the error.
if 'error' in ret:
raise pyghmi_exception.IpmiException(ret['error'])
except pyghmi_exception.IpmiException as e:
LOG.error(_LE("IPMI get boot device failed for node %(node_id)s "
"with the following error: %(error)s"),
{'node_id': driver_info['uuid'], 'error': e})
raise exception.IPMIFailure(cmd=e)
response['persistent'] = ret.get('persistent')
bootdev = ret.get('bootdev')
if bootdev:
response['boot_device'] = next((dev for dev, hdev in
_BOOT_DEVICES_MAP.items()
if hdev == bootdev), None)
return response
def get_sensors_data(self, task):
"""Get sensors data.
:param task: a TaskManager instance.
:raises: FailedToGetSensorData when getting the sensor data fails.
:raises: MissingParameterValue if required ipmi parameters are missing
:returns: returns a dict of sensor data group by sensor type.
"""
driver_info = _parse_driver_info(task.node)
return _get_sensors_data(driver_info)
class NativeIPMIShellinaboxConsole(base.ConsoleInterface):
"""A ConsoleInterface that uses pyghmi and shellinabox."""
def get_properties(self):
d = COMMON_PROPERTIES.copy()
d.update(CONSOLE_PROPERTIES)
return d
def validate(self, task):
"""Validate the Node console info.
:param task: a TaskManager instance containing the node to act on.
:raises: MissingParameterValue when required IPMI credentials or
the IPMI terminal port are missing
:raises: InvalidParameterValue when the IPMI terminal port is not
an integer.
"""
driver_info = _parse_driver_info(task.node)
if not driver_info['port']:
raise exception.MissingParameterValue(_(
"Missing 'ipmi_terminal_port' parameter in node's"
" driver_info."))
def start_console(self, task):
"""Start a remote console for the node.
:param task: a TaskManager instance containing the node to act on.
:raises: MissingParameterValue when required ipmi credentials
are missing.
:raises: InvalidParameterValue when the IPMI terminal port is not an
integer.
:raises: ConsoleError if unable to start the console process.
"""
driver_info = _parse_driver_info(task.node)
path = _console_pwfile_path(driver_info['uuid'])
pw_file = console_utils.make_persistent_password_file(
path, driver_info['password'])
console_cmd = ("/:%(uid)s:%(gid)s:HOME:pyghmicons %(bmc)s"
" %(user)s"
" %(passwd_file)s"
% {'uid': os.getuid(),
'gid': os.getgid(),
'bmc': driver_info['address'],
'user': driver_info['username'],
'passwd_file': pw_file})
try:
console_utils.start_shellinabox_console(driver_info['uuid'],
driver_info['port'],
console_cmd)
except exception.ConsoleError:
with excutils.save_and_reraise_exception():
ironic_utils.unlink_without_raise(path)
def stop_console(self, task):
"""Stop the remote console session for the node.
:param task: a TaskManager instance containing the node to act on.
:raises: ConsoleError if unable to stop the console process.
"""
try:
console_utils.stop_shellinabox_console(task.node.uuid)
finally:
password_file = _console_pwfile_path(task.node.uuid)
ironic_utils.unlink_without_raise(password_file)
def get_console(self, task):
"""Get the type and connection information about the console.
:param task: a TaskManager instance containing the node to act on.
:raises: MissingParameterValue when required IPMI credentials or
the IPMI terminal port are missing
:raises: InvalidParameterValue when the IPMI terminal port is not
an integer.
"""
driver_info = _parse_driver_info(task.node)
url = console_utils.get_shellinabox_console_url(driver_info['port'])
return {'type': 'shellinabox', 'url': url}
class VendorPassthru(base.VendorInterface):
def get_properties(self):
return COMMON_PROPERTIES
def validate(self, task, method, **kwargs):
"""Validate vendor-specific actions.
:param task: a task from TaskManager.
:param method: method to be validated
:param kwargs: info for action.
:raises: InvalidParameterValue when an invalid parameter value is
specified.
:raises: MissingParameterValue if a required parameter is missing.
"""
if method == 'send_raw':
raw_bytes = kwargs.get('raw_bytes')
if not raw_bytes:
raise exception.MissingParameterValue(_(
'Parameter raw_bytes (string of bytes) was not '
'specified.'))
_parse_raw_bytes(raw_bytes)
_parse_driver_info(task.node)
@base.passthru(['POST'])
@task_manager.require_exclusive_lock
def send_raw(self, task, http_method, raw_bytes):
"""Send raw bytes to the BMC. Bytes should be a string of bytes.
:param task: a TaskManager instance.
:param http_method: the HTTP method used on the request.
:param raw_bytes: a string of raw bytes to send, e.g. '0x00 0x01'
:raises: IPMIFailure on an error from native IPMI call.
:raises: MissingParameterValue if a required parameter is missing.
:raises: InvalidParameterValue when an invalid value is specified.
"""
driver_info = _parse_driver_info(task.node)
_send_raw(driver_info, raw_bytes)
@base.passthru(['POST'])
@task_manager.require_exclusive_lock
def bmc_reset(self, task, http_method, warm=True):
"""Reset BMC via IPMI command.
:param task: a TaskManager instance.
:param http_method: the HTTP method used on the request.
:param warm: boolean parameter to decide on warm or cold reset.
:raises: IPMIFailure on an error from native IPMI call.
:raises: MissingParameterValue if a required parameter is missing.
:raises: InvalidParameterValue when an invalid value is specified
"""
driver_info = _parse_driver_info(task.node)
# NOTE(yuriyz): pyghmi 0.8.0 does not have a method for BMC reset
command = '0x03' if warm else '0x02'
raw_command = '0x06 ' + command
_send_raw(driver_info, raw_command)
|
|
"""
Filter effects structure.
"""
from __future__ import absolute_import, unicode_literals
import attr
import io
import logging
from psd_tools.psd.base import BaseElement, ListElement
from psd_tools.utils import (
read_fmt,
write_fmt,
read_length_block,
write_length_block,
is_readable,
write_bytes,
read_pascal_string,
write_pascal_string,
)
logger = logging.getLogger(__name__)
@attr.s(repr=False, slots=True)
class FilterEffects(ListElement):
"""
List-like FilterEffects structure. See :py:class:`FilterEffect`.
.. py:attribute:: version
"""
version = attr.ib(default=1, type=int)
@classmethod
def read(cls, fp, **kwargs):
version = read_fmt('I', fp)[0]
assert version in (1, 2, 3), 'Invalid version %d' % (version)
items = []
while is_readable(fp, 8):
with io.BytesIO(read_length_block(fp, fmt='Q', padding=4)) as f:
items.append(FilterEffect.read(f))
return cls(version=version, items=items)
def write(self, fp, **kwargs):
written = write_fmt(fp, 'I', self.version)
for item in self:
written += write_length_block(
fp, lambda f: item.write(f), fmt='Q', padding=4
)
return written
@attr.s(repr=False, slots=True)
class FilterEffect(BaseElement):
"""
FilterEffect structure.
.. py:attribute:: uuid
.. py:attribute:: version
.. py:attribute:: rectangle
.. py:attribute:: depth
.. py:attribute:: max_channels
.. py:attribute:: channels
List of :py:class:`FilterEffectChannel`.
.. py:attribute:: extra
See :py:class:`FilterEffectExtra`.
"""
uuid = attr.ib(default=None)
version = attr.ib(default=None)
rectangle = attr.ib(default=None)
depth = attr.ib(default=None)
max_channels = attr.ib(default=None)
channels = attr.ib(default=None)
extra = attr.ib(default=None)
@classmethod
def read(cls, fp, **kwargs):
uuid = read_pascal_string(fp, encoding='ascii', padding=1)
version = read_fmt('I', fp)[0]
assert version <= 1, 'Invalid version %d' % (version)
with io.BytesIO(read_length_block(fp, fmt='Q')) as f:
rectangle, depth, max_channels, channels = cls._read_body(f)
# Documentation is incorrect here.
extra = FilterEffectExtra.read(fp) if is_readable(fp) else None
return cls(
uuid, version, rectangle, depth, max_channels, channels, extra
)
@classmethod
def _read_body(cls, fp):
rectangle = read_fmt('4i', fp)
depth, max_channels = read_fmt('2I', fp)
channels = []
for _ in range(max_channels + 2):
channels.append(FilterEffectChannel.read(fp))
return rectangle, depth, max_channels, channels
def write(self, fp, **kwargs):
written = write_pascal_string(
fp, self.uuid, encoding='ascii', padding=1
)
written += write_fmt(fp, 'I', self.version)
def writer(f):
return self._write_body(f)
written += write_length_block(fp, writer, fmt='Q')
if self.extra is not None:
written += self.extra.write(fp)
return written
def _write_body(self, fp):
written = write_fmt(fp, '4i', *self.rectangle)
written += write_fmt(fp, '2I', self.depth, self.max_channels)
for channel in self.channels:
written += channel.write(fp)
return written
@attr.s(repr=False, slots=True)
class FilterEffectChannel(BaseElement):
"""
FilterEffectChannel structure.
.. py:attribute:: is_written
.. py:attribute:: compression
.. py:attribute:: data
"""
is_written = attr.ib(default=0)
compression = attr.ib(default=None)
data = attr.ib(default=b'')
@classmethod
def read(cls, fp, **kwargs):
is_written = read_fmt('I', fp)[0]
if is_written == 0:
return cls(is_written=is_written)
data = read_length_block(fp, fmt='Q')
if len(data) == 0:
return cls(is_written=is_written)
with io.BytesIO(data) as f:
compression = read_fmt('H', f)[0]
data = f.read()
return cls(is_written, compression, data)
def write(self, fp, **kwargs):
written = write_fmt(fp, 'I', self.is_written)
if self.is_written == 0:
return written
def writer(f):
if self.compression is None:
return 0
length = write_fmt(f, 'H', self.compression)
length += write_bytes(f, self.data)
return length
written += write_length_block(fp, writer, fmt='Q')
return written
@attr.s(repr=False, slots=True)
class FilterEffectExtra(BaseElement):
"""
FilterEffectExtra structure.
.. py:attribute:: is_written
.. py:attribute:: rectangle
.. py:attribute:: compression
.. py:attribute:: data
"""
is_written = attr.ib(default=0)
rectangle = attr.ib(factory=lambda: [0, 0, 0, 0], converter=list)
compression = attr.ib(default=0, type=int)
data = attr.ib(default=b'', type=bytes)
@classmethod
def read(cls, fp):
is_written = read_fmt('B', fp)[0]
if not is_written:
return cls(is_written=is_written)
rectangle = read_fmt('4i', fp)
compression = 0
data = b''
with io.BytesIO(read_length_block(fp, fmt='Q')) as f:
compression = read_fmt('H', f)[0]
data = f.read()
return cls(is_written, rectangle, compression, data)
def write(self, fp):
written = write_fmt(fp, 'B', self.is_written)
def writer(f):
length = write_fmt(f, 'H', self.compression)
length += write_bytes(f, self.data)
return length
if self.is_written:
written += write_fmt(fp, '4i', *self.rectangle)
written += write_length_block(fp, writer, fmt='Q')
return written
|
|
# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from glance.common import config
from glance.common import exception
from glance import context
import glance.tests.functional.db as db_tests
from glance.tests import utils as test_utils
def build_namespace_fixture(**kwargs):
namespace = {
'namespace': u'MyTestNamespace',
'display_name': u'test-display-name',
'description': u'test-description',
'visibility': u'public',
'protected': 0,
'owner': u'test-owner'
}
namespace.update(kwargs)
return namespace
def build_resource_type_fixture(**kwargs):
resource_type = {
'name': u'MyTestResourceType',
'protected': 0
}
resource_type.update(kwargs)
return resource_type
def build_association_fixture(**kwargs):
association = {
'name': u'MyTestResourceType',
'properties_target': 'test-properties-target',
'prefix': 'test-prefix'
}
association.update(kwargs)
return association
def build_object_fixture(**kwargs):
# Full testing of required and schema done via rest api tests
object = {
'namespace_id': 1,
'name': u'test-object-name',
'description': u'test-object-description',
'required': u'fake-required-properties-list',
'json_schema': u'{fake-schema}'
}
object.update(kwargs)
return object
def build_property_fixture(**kwargs):
# Full testing of required and schema done via rest api tests
property = {
'namespace_id': 1,
'name': u'test-property-name',
'json_schema': u'{fake-schema}'
}
property.update(kwargs)
return property
def build_tag_fixture(**kwargs):
# Full testing of required and schema done via rest api tests
tag = {
'namespace_id': 1,
'name': u'test-tag-name',
}
tag.update(kwargs)
return tag
def build_tags_fixture(tag_name_list):
tag_list = []
for tag_name in tag_name_list:
tag_list.append({'name': tag_name})
return tag_list
class TestMetadefDriver(test_utils.BaseTestCase):
"""Test Driver class for Metadef tests."""
def setUp(self):
"""Run before each test method to initialize test environment."""
super(TestMetadefDriver, self).setUp()
config.parse_args(args=[])
context_cls = context.RequestContext
self.adm_context = context_cls(is_admin=True,
auth_token='user:user:admin')
self.context = context_cls(is_admin=False,
auth_token='user:user:user')
self.db_api = db_tests.get_db(self.config)
db_tests.reset_db(self.db_api)
def _assert_saved_fields(self, expected, actual):
for k in expected.keys():
self.assertEqual(expected[k], actual[k])
class MetadefNamespaceTests(object):
def test_namespace_create(self):
fixture = build_namespace_fixture()
created = self.db_api.metadef_namespace_create(self.context, fixture)
self.assertIsNotNone(created)
self._assert_saved_fields(fixture, created)
def test_namespace_create_duplicate(self):
fixture = build_namespace_fixture()
created = self.db_api.metadef_namespace_create(self.context, fixture)
self.assertIsNotNone(created)
self._assert_saved_fields(fixture, created)
self.assertRaises(exception.Duplicate,
self.db_api.metadef_namespace_create,
self.context, fixture)
def test_namespace_get(self):
fixture = build_namespace_fixture()
created = self.db_api.metadef_namespace_create(self.context, fixture)
self.assertIsNotNone(created)
self._assert_saved_fields(fixture, created)
found = self.db_api.metadef_namespace_get(
self.context, created['namespace'])
self.assertIsNotNone(found, "Namespace not found.")
def test_namespace_get_all_with_resource_types_filter(self):
ns_fixture = build_namespace_fixture()
ns_created = self.db_api.metadef_namespace_create(
self.context, ns_fixture)
self.assertIsNotNone(ns_created, "Could not create a namespace.")
self._assert_saved_fields(ns_fixture, ns_created)
fixture = build_association_fixture()
created = self.db_api.metadef_resource_type_association_create(
self.context, ns_created['namespace'], fixture)
self.assertIsNotNone(created, "Could not create an association.")
rt_filters = {'resource_types': fixture['name']}
found = self.db_api.metadef_namespace_get_all(
self.context, filters=rt_filters, sort_key='created_at')
self.assertEqual(1, len(found))
for item in found:
self._assert_saved_fields(ns_fixture, item)
def test_namespace_update(self):
delta = {'owner': u'New Owner'}
fixture = build_namespace_fixture()
created = self.db_api.metadef_namespace_create(self.context, fixture)
self.assertIsNotNone(created['namespace'])
self.assertEqual(fixture['namespace'], created['namespace'])
delta_dict = copy.deepcopy(created)
delta_dict.update(delta.copy())
updated = self.db_api.metadef_namespace_update(
self.context, created['id'], delta_dict)
self.assertEqual(delta['owner'], updated['owner'])
def test_namespace_delete(self):
fixture = build_namespace_fixture()
created = self.db_api.metadef_namespace_create(self.context, fixture)
self.assertIsNotNone(created, "Could not create a Namespace.")
self.db_api.metadef_namespace_delete(
self.context, created['namespace'])
self.assertRaises(exception.NotFound,
self.db_api.metadef_namespace_get,
self.context, created['namespace'])
def test_namespace_delete_with_content(self):
fixture_ns = build_namespace_fixture()
created_ns = self.db_api.metadef_namespace_create(
self.context, fixture_ns)
self._assert_saved_fields(fixture_ns, created_ns)
# Create object content for the namespace
fixture_obj = build_object_fixture()
created_obj = self.db_api.metadef_object_create(
self.context, created_ns['namespace'], fixture_obj)
self.assertIsNotNone(created_obj)
# Create property content for the namespace
fixture_prop = build_property_fixture(namespace_id=created_ns['id'])
created_prop = self.db_api.metadef_property_create(
self.context, created_ns['namespace'], fixture_prop)
self.assertIsNotNone(created_prop)
# Create associations
fixture_assn = build_association_fixture()
created_assn = self.db_api.metadef_resource_type_association_create(
self.context, created_ns['namespace'], fixture_assn)
self.assertIsNotNone(created_assn)
deleted_ns = self.db_api.metadef_namespace_delete(
self.context, created_ns['namespace'])
self.assertRaises(exception.NotFound,
self.db_api.metadef_namespace_get,
self.context, deleted_ns['namespace'])
class MetadefPropertyTests(object):
def test_property_create(self):
fixture = build_namespace_fixture()
created_ns = self.db_api.metadef_namespace_create(
self.context, fixture)
self.assertIsNotNone(created_ns)
self._assert_saved_fields(fixture, created_ns)
fixture_prop = build_property_fixture(namespace_id=created_ns['id'])
created_prop = self.db_api.metadef_property_create(
self.context, created_ns['namespace'], fixture_prop)
self._assert_saved_fields(fixture_prop, created_prop)
def test_property_create_duplicate(self):
fixture = build_namespace_fixture()
created_ns = self.db_api.metadef_namespace_create(
self.context, fixture)
self.assertIsNotNone(created_ns)
self._assert_saved_fields(fixture, created_ns)
fixture_prop = build_property_fixture(namespace_id=created_ns['id'])
created_prop = self.db_api.metadef_property_create(
self.context, created_ns['namespace'], fixture_prop)
self._assert_saved_fields(fixture_prop, created_prop)
self.assertRaises(exception.Duplicate,
self.db_api.metadef_property_create,
self.context, created_ns['namespace'], fixture_prop)
def test_property_get(self):
fixture_ns = build_namespace_fixture()
created_ns = self.db_api.metadef_namespace_create(
self.context, fixture_ns)
self.assertIsNotNone(created_ns)
self._assert_saved_fields(fixture_ns, created_ns)
fixture_prop = build_property_fixture(namespace_id=created_ns['id'])
created_prop = self.db_api.metadef_property_create(
self.context, created_ns['namespace'], fixture_prop)
found_prop = self.db_api.metadef_property_get(
self.context, created_ns['namespace'], created_prop['name'])
self._assert_saved_fields(fixture_prop, found_prop)
def test_property_get_all(self):
ns_fixture = build_namespace_fixture()
ns_created = self.db_api.metadef_namespace_create(
self.context, ns_fixture)
self.assertIsNotNone(ns_created, "Could not create a namespace.")
self._assert_saved_fields(ns_fixture, ns_created)
fixture1 = build_property_fixture(namespace_id=ns_created['id'])
created_p1 = self.db_api.metadef_property_create(
self.context, ns_created['namespace'], fixture1)
self.assertIsNotNone(created_p1, "Could not create a property.")
fixture2 = build_property_fixture(namespace_id=ns_created['id'],
name='test-prop-2')
created_p2 = self.db_api.metadef_property_create(
self.context, ns_created['namespace'], fixture2)
self.assertIsNotNone(created_p2, "Could not create a property.")
found = self.db_api.metadef_property_get_all(
self.context, ns_created['namespace'])
self.assertEqual(2, len(found))
def test_property_update(self):
delta = {'name': u'New-name', 'json_schema': u'new-schema'}
fixture_ns = build_namespace_fixture()
created_ns = self.db_api.metadef_namespace_create(
self.context, fixture_ns)
self.assertIsNotNone(created_ns['namespace'])
prop_fixture = build_property_fixture(namespace_id=created_ns['id'])
created_prop = self.db_api.metadef_property_create(
self.context, created_ns['namespace'], prop_fixture)
self.assertIsNotNone(created_prop, "Could not create a property.")
delta_dict = copy.deepcopy(created_prop)
delta_dict.update(delta.copy())
updated = self.db_api.metadef_property_update(
self.context, created_ns['namespace'],
created_prop['id'], delta_dict)
self.assertEqual(delta['name'], updated['name'])
self.assertEqual(delta['json_schema'], updated['json_schema'])
def test_property_delete(self):
fixture_ns = build_namespace_fixture()
created_ns = self.db_api.metadef_namespace_create(
self.context, fixture_ns)
self.assertIsNotNone(created_ns['namespace'])
prop_fixture = build_property_fixture(namespace_id=created_ns['id'])
created_prop = self.db_api.metadef_property_create(
self.context, created_ns['namespace'], prop_fixture)
self.assertIsNotNone(created_prop, "Could not create a property.")
self.db_api.metadef_property_delete(
self.context, created_ns['namespace'], created_prop['name'])
self.assertRaises(exception.NotFound,
self.db_api.metadef_property_get,
self.context, created_ns['namespace'],
created_prop['name'])
def test_property_delete_namespace_content(self):
fixture_ns = build_namespace_fixture()
created_ns = self.db_api.metadef_namespace_create(
self.context, fixture_ns)
self.assertIsNotNone(created_ns['namespace'])
prop_fixture = build_property_fixture(namespace_id=created_ns['id'])
created_prop = self.db_api.metadef_property_create(
self.context, created_ns['namespace'], prop_fixture)
self.assertIsNotNone(created_prop, "Could not create a property.")
self.db_api.metadef_property_delete_namespace_content(
self.context, created_ns['namespace'])
self.assertRaises(exception.NotFound,
self.db_api.metadef_property_get,
self.context, created_ns['namespace'],
created_prop['name'])
class MetadefObjectTests(object):
def test_object_create(self):
fixture = build_namespace_fixture()
created_ns = self.db_api.metadef_namespace_create(self.context,
fixture)
self.assertIsNotNone(created_ns)
self._assert_saved_fields(fixture, created_ns)
fixture_object = build_object_fixture(namespace_id=created_ns['id'])
created_object = self.db_api.metadef_object_create(
self.context, created_ns['namespace'], fixture_object)
self._assert_saved_fields(fixture_object, created_object)
def test_object_create_duplicate(self):
fixture = build_namespace_fixture()
created_ns = self.db_api.metadef_namespace_create(self.context,
fixture)
self.assertIsNotNone(created_ns)
self._assert_saved_fields(fixture, created_ns)
fixture_object = build_object_fixture(namespace_id=created_ns['id'])
created_object = self.db_api.metadef_object_create(
self.context, created_ns['namespace'], fixture_object)
self._assert_saved_fields(fixture_object, created_object)
self.assertRaises(exception.Duplicate,
self.db_api.metadef_object_create,
self.context, created_ns['namespace'],
fixture_object)
def test_object_get(self):
fixture_ns = build_namespace_fixture()
created_ns = self.db_api.metadef_namespace_create(self.context,
fixture_ns)
self.assertIsNotNone(created_ns)
self._assert_saved_fields(fixture_ns, created_ns)
fixture_object = build_object_fixture(namespace_id=created_ns['id'])
created_object = self.db_api.metadef_object_create(
self.context, created_ns['namespace'], fixture_object)
found_object = self.db_api.metadef_object_get(
self.context, created_ns['namespace'], created_object['name'])
self._assert_saved_fields(fixture_object, found_object)
def test_object_get_all(self):
ns_fixture = build_namespace_fixture()
ns_created = self.db_api.metadef_namespace_create(self.context,
ns_fixture)
self.assertIsNotNone(ns_created, "Could not create a namespace.")
self._assert_saved_fields(ns_fixture, ns_created)
fixture1 = build_object_fixture(namespace_id=ns_created['id'])
created_o1 = self.db_api.metadef_object_create(
self.context, ns_created['namespace'], fixture1)
self.assertIsNotNone(created_o1, "Could not create an object.")
fixture2 = build_object_fixture(namespace_id=ns_created['id'],
name='test-object-2')
created_o2 = self.db_api.metadef_object_create(
self.context, ns_created['namespace'], fixture2)
self.assertIsNotNone(created_o2, "Could not create an object.")
found = self.db_api.metadef_object_get_all(
self.context, ns_created['namespace'])
self.assertEqual(len(found), 2)
def test_object_update(self):
delta = {'name': u'New-name', 'json_schema': u'new-schema',
'required': u'new-required'}
fixture_ns = build_namespace_fixture()
created_ns = self.db_api.metadef_namespace_create(self.context,
fixture_ns)
self.assertIsNotNone(created_ns['namespace'])
object_fixture = build_object_fixture(namespace_id=created_ns['id'])
created_object = self.db_api.metadef_object_create(
self.context, created_ns['namespace'], object_fixture)
self.assertIsNotNone(created_object, "Could not create an object.")
delta_dict = {}
delta_dict.update(delta.copy())
updated = self.db_api.metadef_object_update(
self.context, created_ns['namespace'],
created_object['id'], delta_dict)
self.assertEqual(delta['name'], updated['name'])
self.assertEqual(delta['json_schema'], updated['json_schema'])
def test_object_delete(self):
fixture_ns = build_namespace_fixture()
created_ns = self.db_api.metadef_namespace_create(
self.context, fixture_ns)
self.assertIsNotNone(created_ns['namespace'])
object_fixture = build_object_fixture(namespace_id=created_ns['id'])
created_object = self.db_api.metadef_object_create(
self.context, created_ns['namespace'], object_fixture)
self.assertIsNotNone(created_object, "Could not create an object.")
self.db_api.metadef_object_delete(
self.context, created_ns['namespace'], created_object['name'])
self.assertRaises(exception.NotFound,
self.db_api.metadef_object_get,
self.context, created_ns['namespace'],
created_object['name'])
class MetadefResourceTypeTests(object):
def test_resource_type_get_all(self):
resource_types_orig = self.db_api.metadef_resource_type_get_all(
self.context)
fixture = build_resource_type_fixture()
self.db_api.metadef_resource_type_create(self.context, fixture)
resource_types = self.db_api.metadef_resource_type_get_all(
self.context)
test_len = len(resource_types_orig) + 1
self.assertEqual(test_len, len(resource_types))
class MetadefResourceTypeAssociationTests(object):
def test_association_create(self):
ns_fixture = build_namespace_fixture()
ns_created = self.db_api.metadef_namespace_create(
self.context, ns_fixture)
self.assertIsNotNone(ns_created)
self._assert_saved_fields(ns_fixture, ns_created)
assn_fixture = build_association_fixture()
assn_created = self.db_api.metadef_resource_type_association_create(
self.context, ns_created['namespace'], assn_fixture)
self.assertIsNotNone(assn_created)
self._assert_saved_fields(assn_fixture, assn_created)
def test_association_create_duplicate(self):
ns_fixture = build_namespace_fixture()
ns_created = self.db_api.metadef_namespace_create(
self.context, ns_fixture)
self.assertIsNotNone(ns_created)
self._assert_saved_fields(ns_fixture, ns_created)
assn_fixture = build_association_fixture()
assn_created = self.db_api.metadef_resource_type_association_create(
self.context, ns_created['namespace'], assn_fixture)
self.assertIsNotNone(assn_created)
self._assert_saved_fields(assn_fixture, assn_created)
self.assertRaises(exception.Duplicate,
self.db_api.
metadef_resource_type_association_create,
self.context, ns_created['namespace'], assn_fixture)
def test_association_delete(self):
ns_fixture = build_namespace_fixture()
ns_created = self.db_api.metadef_namespace_create(
self.context, ns_fixture)
self.assertIsNotNone(ns_created, "Could not create a namespace.")
self._assert_saved_fields(ns_fixture, ns_created)
fixture = build_association_fixture()
created = self.db_api.metadef_resource_type_association_create(
self.context, ns_created['namespace'], fixture)
self.assertIsNotNone(created, "Could not create an association.")
created_resource = self.db_api.metadef_resource_type_get(
self.context, fixture['name'])
self.assertIsNotNone(created_resource, "resource_type not created")
self.db_api.metadef_resource_type_association_delete(
self.context, ns_created['namespace'], created_resource['name'])
self.assertRaises(exception.NotFound,
self.db_api.metadef_resource_type_association_get,
self.context, ns_created['namespace'],
created_resource['name'])
def test_association_get_all_by_namespace(self):
ns_fixture = build_namespace_fixture()
ns_created = self.db_api.metadef_namespace_create(
self.context, ns_fixture)
self.assertIsNotNone(ns_created, "Could not create a namespace.")
self._assert_saved_fields(ns_fixture, ns_created)
fixture = build_association_fixture()
created = self.db_api.metadef_resource_type_association_create(
self.context, ns_created['namespace'], fixture)
self.assertIsNotNone(created, "Could not create an association.")
found = (
self.db_api.metadef_resource_type_association_get_all_by_namespace(
self.context, ns_created['namespace']))
self.assertEqual(1, len(found))
for item in found:
self._assert_saved_fields(fixture, item)
class MetadefTagTests(object):
def test_tag_create(self):
fixture = build_namespace_fixture()
created_ns = self.db_api.metadef_namespace_create(self.context,
fixture)
self.assertIsNotNone(created_ns)
self._assert_saved_fields(fixture, created_ns)
fixture_tag = build_tag_fixture(namespace_id=created_ns['id'])
created_tag = self.db_api.metadef_tag_create(
self.context, created_ns['namespace'], fixture_tag)
self._assert_saved_fields(fixture_tag, created_tag)
def test_tag_create_duplicate(self):
fixture = build_namespace_fixture()
created_ns = self.db_api.metadef_namespace_create(self.context,
fixture)
self.assertIsNotNone(created_ns)
self._assert_saved_fields(fixture, created_ns)
fixture_tag = build_tag_fixture(namespace_id=created_ns['id'])
created_tag = self.db_api.metadef_tag_create(
self.context, created_ns['namespace'], fixture_tag)
self._assert_saved_fields(fixture_tag, created_tag)
self.assertRaises(exception.Duplicate,
self.db_api.metadef_tag_create,
self.context, created_ns['namespace'],
fixture_tag)
def test_tag_create_tags(self):
fixture = build_namespace_fixture()
created_ns = self.db_api.metadef_namespace_create(self.context,
fixture)
self.assertIsNotNone(created_ns)
self._assert_saved_fields(fixture, created_ns)
tags = build_tags_fixture(['Tag1', 'Tag2', 'Tag3'])
created_tags = self.db_api.metadef_tag_create_tags(
self.context, created_ns['namespace'], tags)
actual = set([tag['name'] for tag in created_tags])
expected = set(['Tag1', 'Tag2', 'Tag3'])
self.assertEqual(expected, actual)
def test_tag_create_duplicate_tags_1(self):
fixture = build_namespace_fixture()
created_ns = self.db_api.metadef_namespace_create(self.context,
fixture)
self.assertIsNotNone(created_ns)
self._assert_saved_fields(fixture, created_ns)
tags = build_tags_fixture(['Tag1', 'Tag2', 'Tag3', 'Tag2'])
self.assertRaises(exception.Duplicate,
self.db_api.metadef_tag_create_tags,
self.context, created_ns['namespace'],
tags)
def test_tag_create_duplicate_tags_2(self):
fixture = build_namespace_fixture()
created_ns = self.db_api.metadef_namespace_create(self.context,
fixture)
self.assertIsNotNone(created_ns)
self._assert_saved_fields(fixture, created_ns)
tags = build_tags_fixture(['Tag1', 'Tag2', 'Tag3'])
self.db_api.metadef_tag_create_tags(self.context,
created_ns['namespace'], tags)
dup_tag = build_tag_fixture(namespace_id=created_ns['id'],
name='Tag3')
self.assertRaises(exception.Duplicate,
self.db_api.metadef_tag_create,
self.context, created_ns['namespace'], dup_tag)
def test_tag_get(self):
fixture_ns = build_namespace_fixture()
created_ns = self.db_api.metadef_namespace_create(self.context,
fixture_ns)
self.assertIsNotNone(created_ns)
self._assert_saved_fields(fixture_ns, created_ns)
fixture_tag = build_tag_fixture(namespace_id=created_ns['id'])
created_tag = self.db_api.metadef_tag_create(
self.context, created_ns['namespace'], fixture_tag)
found_tag = self.db_api.metadef_tag_get(
self.context, created_ns['namespace'], created_tag['name'])
self._assert_saved_fields(fixture_tag, found_tag)
def test_tag_get_all(self):
ns_fixture = build_namespace_fixture()
ns_created = self.db_api.metadef_namespace_create(self.context,
ns_fixture)
self.assertIsNotNone(ns_created, "Could not create a namespace.")
self._assert_saved_fields(ns_fixture, ns_created)
fixture1 = build_tag_fixture(namespace_id=ns_created['id'])
created_tag1 = self.db_api.metadef_tag_create(
self.context, ns_created['namespace'], fixture1)
self.assertIsNotNone(created_tag1, "Could not create tag 1.")
fixture2 = build_tag_fixture(namespace_id=ns_created['id'],
name='test-tag-2')
created_tag2 = self.db_api.metadef_tag_create(
self.context, ns_created['namespace'], fixture2)
self.assertIsNotNone(created_tag2, "Could not create tag 2.")
found = self.db_api.metadef_tag_get_all(
self.context, ns_created['namespace'], sort_key='created_at')
self.assertEqual(2, len(found))
def test_tag_update(self):
delta = {'name': u'New-name'}
fixture_ns = build_namespace_fixture()
created_ns = self.db_api.metadef_namespace_create(self.context,
fixture_ns)
self.assertIsNotNone(created_ns['namespace'])
tag_fixture = build_tag_fixture(namespace_id=created_ns['id'])
created_tag = self.db_api.metadef_tag_create(
self.context, created_ns['namespace'], tag_fixture)
self.assertIsNotNone(created_tag, "Could not create a tag.")
delta_dict = {}
delta_dict.update(delta.copy())
updated = self.db_api.metadef_tag_update(
self.context, created_ns['namespace'],
created_tag['id'], delta_dict)
self.assertEqual(delta['name'], updated['name'])
def test_tag_delete(self):
fixture_ns = build_namespace_fixture()
created_ns = self.db_api.metadef_namespace_create(
self.context, fixture_ns)
self.assertIsNotNone(created_ns['namespace'])
tag_fixture = build_tag_fixture(namespace_id=created_ns['id'])
created_tag = self.db_api.metadef_tag_create(
self.context, created_ns['namespace'], tag_fixture)
self.assertIsNotNone(created_tag, "Could not create a tag.")
self.db_api.metadef_tag_delete(
self.context, created_ns['namespace'], created_tag['name'])
self.assertRaises(exception.NotFound,
self.db_api.metadef_tag_get,
self.context, created_ns['namespace'],
created_tag['name'])
class MetadefDriverTests(MetadefNamespaceTests,
MetadefResourceTypeTests,
MetadefResourceTypeAssociationTests,
MetadefPropertyTests,
MetadefObjectTests,
MetadefTagTests):
# collection class
pass
|
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""Test the fileview interface."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import os
from absl import app
from future.builtins import range
from grr_response_core.lib import rdfvalue
from grr_response_core.lib import utils
from grr_response_core.lib.util import text
from grr_response_server.databases import db
from grr_response_server.gui import api_call_handler_base
from grr_response_server.gui import gui_test_lib
from grr_response_server.gui.api_plugins import vfs as api_vfs
from grr_response_server.rdfvalues import objects as rdf_objects
from grr.test_lib import fixture_test_lib
from grr.test_lib import test_lib
from grr.test_lib import vfs_test_lib
class TestFileView(gui_test_lib.GRRSeleniumTest):
"""Test the fileview interface."""
def setUp(self):
super(TestFileView, self).setUp()
# Prepare our fixture.
self.client_id, self.unapproved_client_id = self.SetupClients(2)
with test_lib.FakeTime(test_lib.FIXED_TIME):
fixture_test_lib.ClientFixture(self.client_id)
self.content_1, self.content_2 = gui_test_lib.CreateFileVersions(
self.client_id)
self.content_1_hash = rdf_objects.SHA256HashID.FromData(
self.content_1).AsBytes()
self.content_2_hash = rdf_objects.SHA256HashID.FromData(
self.content_2).AsBytes()
self.RequestAndGrantClientApproval(self.client_id)
def testOpeningVfsOfUnapprovedClientRedirectsToHostInfoPage(self):
self.Open("/#/clients/%s/vfs/" % self.unapproved_client_id)
# As we don't have an approval for unapproved_client_id, we should be
# redirected to the host info page.
self.WaitUntilEqual("/#/clients/%s/host-info" % self.unapproved_client_id,
self.GetCurrentUrlPath)
self.WaitUntil(self.IsTextPresent,
"You do not have an approval for this client.")
def testPageTitleChangesAccordingToSelectedFile(self):
self.Open("/#/clients/%s/vfs/" % self.client_id)
self.WaitUntilEqual("GRR | %s | /" % self.client_id, self.GetPageTitle)
# Select a folder in the tree.
self.Click("css=#_fs i.jstree-icon")
self.Click("css=#_fs-os i.jstree-icon")
self.Click("css=#_fs-os-c i.jstree-icon")
self.Click("link=Downloads")
self.WaitUntilEqual("GRR | %s | /fs/os/c/Downloads/" % self.client_id,
self.GetPageTitle)
# Select a file from the table.
self.Click("css=tr:contains(\"a.txt\")")
self.WaitUntilEqual("GRR | %s | /fs/os/c/Downloads/a.txt" % self.client_id,
self.GetPageTitle)
def testSwitchingBetweenFilesRefreshesFileHashes(self):
vfs_test_lib.CreateFile(
db.ClientPath.OS(self.client_id, ["c", "Downloads", "a.txt"]),
content=self.content_1)
vfs_test_lib.CreateFile(
db.ClientPath.OS(self.client_id, ["c", "Downloads", "b.txt"]),
content=self.content_2)
# Open a URL pointing to file "a".
self.Open("/#/clients/%s/vfs/fs/os/c/Downloads/a.txt?tab=download" %
self.client_id)
self.WaitUntil(
self.IsElementPresent, "css=tr:contains('Sha256') td:contains('%s')" %
text.Hexify(self.content_1_hash))
# Click on a file table row with file "b". Information in the download
# tab should get rerendered and we should see Sha256 value corresponding
# to file "b".
self.Click("css=tr:contains(\"b.txt\")")
self.WaitUntil(
self.IsElementPresent, "css=tr:contains('Sha256') td:contains('%s')" %
text.Hexify(self.content_2_hash))
def testSwitchingBetweenFileVersionsRefreshesDownloadTab(self):
with test_lib.FakeTime(gui_test_lib.TIME_0):
vfs_test_lib.CreateFile(
db.ClientPath.OS(self.client_id, ["c", "Downloads", "a.txt"]),
content=self.content_1)
with test_lib.FakeTime(gui_test_lib.TIME_1):
vfs_test_lib.CreateFile(
db.ClientPath.OS(self.client_id, ["c", "Downloads", "a.txt"]),
content=self.content_2)
# Open a URL corresponding to a HEAD version of the file.
self.Open("/#/clients/%s/vfs/fs/os/c/Downloads/a.txt?tab=download" %
self.client_id)
# Make sure displayed hash value is correct.
self.WaitUntil(
self.IsElementPresent, "css=tr:contains('Sha256') td:contains('%s')" %
text.Hexify(self.content_2_hash))
# Select the previous file version.
self.Click("css=select.version-dropdown > option:contains(\"%s\")" %
gui_test_lib.DateString(gui_test_lib.TIME_0))
# Make sure displayed hash value gets updated.
self.WaitUntil(
self.IsElementPresent, "css=tr:contains('Sha256') td:contains('%s')" %
text.Hexify(self.content_1_hash))
def testVersionDropDownChangesFileContentAndDownloads(self):
"""Test the fileview interface."""
self.Open("/#/clients/%s" % self.client_id)
# Go to Browse VFS.
self.Click("css=a[grrtarget='client.vfs']")
self.Click("css=#_fs i.jstree-icon")
self.Click("css=#_fs-os i.jstree-icon")
self.Click("css=#_fs-os-c i.jstree-icon")
# Test file versioning.
self.WaitUntil(self.IsElementPresent, "css=#_fs-os-c-Downloads")
self.Click("link=Downloads")
# Verify that we have the latest version in the table by default.
self.assertIn(
gui_test_lib.DateString(gui_test_lib.TIME_2),
self.GetText("css=tr:contains(\"a.txt\")"))
# Click on the row.
self.Click("css=tr:contains(\"a.txt\")")
self.WaitUntilContains("a.txt", self.GetText, "css=div#main_bottomPane h1")
self.WaitUntilContains("HEAD", self.GetText,
"css=.version-dropdown > option[selected]")
self.WaitUntilContains(
gui_test_lib.DateString(gui_test_lib.TIME_2), self.GetText,
"css=.version-dropdown > option:nth(1)")
# Check the data in this file.
self.Click("css=li[heading=TextView]")
self.WaitUntilContains("Goodbye World", self.GetText,
"css=div.monospace pre")
downloaded_files = []
def FakeDownloadHandle(unused_self, args, token=None):
_ = token # Avoid unused variable linter warnings.
downloaded_files.append((args.file_path, args.timestamp))
return api_call_handler_base.ApiBinaryStream(
filename=os.path.basename(args.file_path),
content_generator=range(42))
with utils.Stubber(api_vfs.ApiGetFileBlobHandler, "Handle",
FakeDownloadHandle):
# Try to download the file.
self.Click("css=li[heading=Download]")
self.WaitUntilContains(
gui_test_lib.DateTimeString(gui_test_lib.TIME_2), self.GetText,
"css=grr-file-download-view")
self.Click("css=button:contains(\"Download\")")
# Select the previous version.
self.Click("css=select.version-dropdown > option:contains(\"%s\")" %
gui_test_lib.DateString(gui_test_lib.TIME_1))
# Now we should have a different time.
self.WaitUntilContains(
gui_test_lib.DateTimeString(gui_test_lib.TIME_1), self.GetText,
"css=grr-file-download-view")
self.Click("css=button:contains(\"Download\")")
self.WaitUntil(self.IsElementPresent, "css=li[heading=TextView]")
# the FakeDownloadHandle method was actually called four times, since
# a file download first sends a HEAD request to check user access.
self.WaitUntil(lambda: len(downloaded_files) == 4)
# Both files should be the same...
self.assertEqual(downloaded_files[0][0], u"fs/os/c/Downloads/a.txt")
self.assertEqual(downloaded_files[2][0], u"fs/os/c/Downloads/a.txt")
# But from different times. The downloaded file timestamp is only accurate
# to the nearest second.
self.assertEqual(downloaded_files[0][1], None)
self.assertAlmostEqual(
downloaded_files[2][1],
gui_test_lib.TIME_1,
delta=rdfvalue.Duration.From(1, rdfvalue.SECONDS))
self.Click("css=li[heading=TextView]")
# Make sure the file content has changed. This version has "Hello World" in
# it.
self.WaitUntilContains("Hello World", self.GetText, "css=div.monospace pre")
def testHexViewer(self):
content = b"ls\000hello world\'\000-l"
vfs_test_lib.CreateFile(
db.ClientPath.OS(self.client_id, ["proc", "10", "cmdline"]),
content=content)
self.Open("/#clients/%s/vfs/fs/os/proc/10/" % self.client_id)
self.Click("css=td:contains(\"cmdline\")")
self.Click("css=li[heading=HexView]:not(.disabled)")
self.WaitUntilEqual(
text.Hexify(content), self.GetText, "css=table.hex-area tr:first td")
# The string inside the file is null-character-delimited. The way
# a null character is displayed depends on Angular
# version. I.e. it was ignored in version 1.6.5 and is displayed
# as a square in version 1.6.6. Making the checks in a
# null-character-presentation-independent way.
self.WaitUntil(self.IsElementPresent,
"css=table.content-area td.data:contains('ls')")
self.WaitUntil(self.IsElementPresent,
"css=table.content-area td.data:contains('hello world')")
self.WaitUntil(self.IsElementPresent,
"css=table.content-area td.data:contains('-l')")
def testSearchInputFiltersFileList(self):
# Open VFS view for client 1.
self.Open("/#c=%s&main=VirtualFileSystemView&t=_fs-os-c" % self.client_id)
# Navigate to the bin C.0000000000000001 directory
self.Click("link=bin %s" % self.client_id)
# We need to await the initial file listing for the current directory,
# since the infinite table will only issue one request at a time.
# We could use WaitUntilNot to check that "Loading..." is not visible
# anymore, but this could cause problems if "Loading..." is not shown yet.
self.WaitUntilEqual("bash", self.GetText,
"css=table.file-list tr:nth(1) span")
self.WaitUntilEqual("bsd-csh", self.GetText,
"css=table.file-list tr:nth(2) span")
# Filter the table for bash (should match both bash and rbash)
self.Type("css=input.file-search", "bash", end_with_enter=True)
self.WaitUntilEqual("bash", self.GetText,
"css=table.file-list tr:nth(1) span")
self.WaitUntilEqual("rbash", self.GetText,
"css=table.file-list tr:nth(2) span")
self.WaitUntilEqual(2, self.GetCssCount,
"css=#content_rightPane table.file-list tbody > tr")
# If we anchor cat at the start, we should only receive one result item.
self.Type("css=input.file-search", "^cat", end_with_enter=True)
self.WaitUntilEqual("cat", self.GetText,
"css=table.file-list tr:nth(1) span")
self.assertEqual(
1,
self.GetCssCount("css=#content_rightPane table.file-list tbody > tr"))
self.Click("css=tr:nth(1)")
self.WaitUntilContains("cat", self.GetText, "css=#main_bottomPane h1")
self.WaitUntil(self.IsTextPresent, "1026267") # st_inode.
# Lets download it.
self.Click("css=li[heading=Download]")
# TODO(user): refactor the test so that the call below doesn't trigger
# an HTTP 500.
with self.DisableHttpErrorChecks():
self.Click("css=button:contains(\"Collect from the client\")")
def testExportToolHintIsDisplayed(self):
self.Open("/#/clients/%s/vfs/" % self.client_id)
self.Click("css=li#_fs i.jstree-icon")
self.Click("css=li#_fs-os i.jstree-icon")
self.Click("css=li#_fs-os-c i.jstree-icon")
self.Click("css=li#_fs-os-c-Downloads i.jstree-themeicon")
# Click on the row and on the Download tab.
self.Click("css=tr:contains(\"a.txt\")")
self.Click("css=li[heading=Download]:not(:disabled)")
# Check that export tool download hint is displayed.
self.WaitUntil(
self.IsTextPresent, "/usr/bin/grr_api_shell "
"'http://localhost:8000/' "
"--exec_code 'grrapi.Client(\"%s\")."
"File(r\"\"\"fs/os/c/Downloads/a.txt\"\"\").GetBlob()."
"WriteToFile(\"./a.txt\")'" % self.client_id)
def testTimestampsAreCorrectlyDisplayedInFileDetails(self):
self.Open("/#/clients/%s/vfs/fs/os/c/Downloads/a.txt" % self.client_id)
self.WaitUntil(
self.IsElementPresent,
"css=tr:contains('SIZE') grr-timestamp:contains('%s')" %
gui_test_lib.TIME_2)
self.Click("css=td:contains('SIZE') i.fa-plus")
self.WaitUntil(
self.IsElementPresent,
"css=tr:contains('SIZE') grr-timestamp:contains('%s')" %
gui_test_lib.TIME_2)
self.WaitUntil(
self.IsElementPresent,
"css=tr:contains('SIZE') grr-timestamp:contains('%s')" %
gui_test_lib.TIME_1)
if __name__ == "__main__":
app.run(test_lib.main)
|
|
from dispel4py.workflow_graph import WorkflowGraph
from dispel4py.provenance import *
from dispel4py.new.processor import *
import time
import random
import numpy
import traceback
from dispel4py.base import create_iterative_chain, GenericPE, ConsumerPE, IterativePE, SimpleFunctionPE
from dispel4py.new.simple_process import process_and_return
import socket
import json
import ujson
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from scipy.stats.stats import pearsonr
import networkx as nx
import os
from copy import deepcopy
from dateutil.parser import parse as parse_date
sns.set(style="white")
class Start(GenericPE):
def __init__(self):
GenericPE.__init__(self)
self._add_input('iterations')
self._add_output('output')
#self.prov_cluster="myne"
def _process(self,inputs):
if 'iterations' in inputs:
inp=inputs['iterations']
self.write('output',inp,metadata={'iterations':inp})
#Uncomment this line to associate this PE to the mycluster provenance-cluster
#self.prov_cluster ='mycluster'
class Source(GenericPE):
def __init__(self,sr,index):
GenericPE.__init__(self)
self._add_input('iterations')
self._add_output('output')
self.sr=sr
self.var_index=index
#self.prov_cluster="myne"
self.parameters={'sampling_rate':sr}
#Uncomment this line to associate this PE to the mycluster provenance-cluster
#self.prov_cluster ='mycluster'
def _process(self,inputs):
if 'iterations' in inputs:
iteration=inputs['iterations'][0]
#Streams out values at 1/self.sr sampling rate, until iteration>0
while (iteration>0):
val=random.uniform(0,100)
time.sleep(1/self.sr)
iteration-=1
self.write('output',(self.name,val),metadata={'val':val,'var_index':self.var_index,'iteration':iteration})
class MaxClique(GenericPE):
def __init__(self,threshold):
GenericPE.__init__(self)
self._add_input('matrix',grouping=[2])
self._add_output('graph')
self._add_output('clique')
self.threshold=threshold
#self.prov_cluster="myne"
self.parameters={'threshold':threshold}
#Uncomment this line to associate this PE to the mycluster provenance-cluster
#self.prov_cluster ='mycluster'
def _process(self,inputs):
if 'matrix' in inputs:
matrix=inputs['matrix'][0]
batch=inputs['matrix'][1]
low_values_indices = matrix < self.threshold # Where values are low
matrix[low_values_indices] = 0
#self.log(matrix)
self.log(batch)
self.write('graph',matrix,metadata={'matrix':str(matrix),'batch':batch})
self.write('clique',matrix,metadata={'clique':str(matrix),'batch':batch},ignore_inputs=False)
G = nx.from_numpy_matrix(matrix)
plt.figure(batch)
nx.draw(G)
fig1 = plt.gcf()
plt.close(fig1)
#H = nx.from_numpy_matrix(matrix)
#plt.figure(2)
#nx.draw(H)
#plt.close()
#Streams out values at 1/self.sr sampling rate, until iteration>0
class CompMatrix(GenericPE):
def __init__(self,variables_number):
GenericPE.__init__(self)
self._add_output('output')
self.size=variables_number
self.parameters={'variables_number':variables_number}
self.data={}
#Uncomment this line to associate this PE to the mycluster provenance-cluster
#self.prov_cluster ='mycluster'self.prov_cluster='mycluster'
def _process(self,data):
for x in data:
if data[x][1] not in self.data:
#prepares the data to visualise the xcor matrix of a specific batch number.
self.data[data[x][1]]={}
self.data[data[x][1]]['matrix']=numpy.identity(self.size)
self.data[data[x][1]]['ro_count']=0
self.data[data[x][1]]['matrix'][(data[x][2][1],data[x][2][0])]=data[x][0]
self.update_prov_state('batch_'+str(data[x][1]),self.data[data[x][1]]['matrix'],metadata={'matrix':str(self.data[data[x][1]]['matrix'])},dep=['batch_'+str(data[x][1])],ignore_inputs=False)
self.data[data[x][1]]['ro_count']+=1
if self.data[data[x][1]]['ro_count']==(self.size*(self.size-1))/2:
matrix=self.data[data[x][1]]['matrix']
d = pd.DataFrame(data=matrix,
columns=range(0,self.size),index=range(0,self.size))
mask = numpy.zeros_like(d, dtype=numpy.bool)
mask[numpy.triu_indices_from(mask)] = True
# Set up the matplotlib figure
f, ax = plt.subplots(figsize=(11, 9))
# Generate a custom diverging colormap
cmap = sns.diverging_palette(220, 10, as_cmap=True)
# Draw the heatmap with the mask and correct aspect ratio
sns.heatmap(d, mask=mask, cmap=cmap, vmax=1,
square=True,
linewidths=.5, cbar_kws={"shrink": .5}, ax=ax)
#sns.plt.show()
#self.log(matrix)
self.write('output',(matrix,data[x][1],self.name),metadata={'matrix':str(d),'batch':str(data[x][1])},dep=['batch_'+str(data[x][1])])
class CorrCoef(GenericPE):
def __init__(self,batch_size,index):
GenericPE.__init__(self)
self._add_input('input1',grouping=[0])
self._add_input('input2',grouping=[0])
self._add_output('output')
self.index1=0
self.index2=0
self.batch1=[]
self.batch2=[]
self.size=batch_size
self.parameters={'batch_size':batch_size}
self.index=index
self.batchnum=1
def _process(self, inputs):
index=None
val=None
try:
val = inputs['input1'][1]
self.batch1.append(val)
#self.log("Variables= "+str(inputs['input1'][0]))
#if len(self.batch1)>=self.size:
contributesto=(len(self.batch1)-1)/self.size+self.batchnum
#Umment to record entities in the Provenance State
self.update_prov_state('batch1_'+str(contributesto),self.batch1,metadata={'name':'batch1_'+str(contributesto),'batch1':str(self.batch1)}, ignore_inputs=False,dep=['batch1_'+str(contributesto)])
except KeyError:
#traceback.print_exc(file=sys.stderr)
val = inputs['input2'][1]
self.batch2.append(val)
#self.log("Variables= "+str(inputs['input2'][0]))
#if len(self.batch2)>=self.size:
contributesto=(len(self.batch2)-1)/self.size+self.batchnum
#Uncomment to record Element in the Provenance State
self.update_prov_state('batch2_'+str(contributesto),self.batch2,metadata={'name':'batch2_'+str(contributesto),'batch2':str(self.batch2)}, ignore_inputs=False, dep=['batch2_'+str(contributesto)])
#self.update_prov_state(None,,ignore_dep=False)
if len(self.batch2)>=self.size and len(self.batch1)>=self.size:
array1=numpy.array(self.batch1[0:self.size])
array2=numpy.array(self.batch2[0:self.size])
ro=numpy.corrcoef([array1,array2])
# stream out the correlation coefficient, the sequence number of the batch and the indexes of the sources.
#Uncomment to reference entities in the Provenance State
self.write('output',(ro[0][1],self.batchnum,self.index,self.name),metadata={'batchnum':self.batchnum,'ro':ro[0][1],'array1':str(array1),'array2':str(array2),'source_index':self.index},dep=['batch1_'+str(self.batchnum),'batch2_'+str(self.batchnum)])
#Uncomment to reference entities in the Data Flow
#self.write('output',(ro[0][1],self.batchnum,self.index),metadata={'batchnum':self.batchnum,'ro':str(ro[0][1]),'array1':str(array1),'array2':str(array2),'source_index':self.index})
self.batchnum+=1
#self.log(self.batchnum)
self.batch1=self.batch1[(self.size):len(self.batch1)]
self.batch2=self.batch2[(self.size):len(self.batch2)]
# number of projections = iterations/batch_size at speed defined by sampling rate
variables_number=2
sampling_rate=10000
batch_size=20
iterations=20
input_data = {"Start": [{"iterations": [iterations]}]}
# Instantiates the Workflow Components
# and generates the graph based on parameters
def createWf():
graph = WorkflowGraph()
mat=CompMatrix(variables_number)
mat.prov_cluster='record2'
mc = MaxClique(-0.01)
mc.prov_cluster='record0'
start=Start()
start.prov_cluster='record0'
sources={}
mc.numprocesses=1
mat.numprocesses=1
for i in range(0,variables_number):
sources[i] = Source(sampling_rate,i)
sources[i].prov_cluster='record0'
#'+str(i%variables_number)
#+str(i%7)
sources[i].numprocesses=1
#sources[i].name="Source"+str(i)
for h in range(0,variables_number):
graph.connect(start,'output',sources[h],'iterations')
for j in range(h+1,variables_number):
cc=CorrCoef(batch_size,(h,j))
cc.prov_cluster='record1'
#+str(h%variables_number)
mat._add_input('input'+'_'+str(h)+'_'+str(j),grouping=[3])
graph.connect(sources[h],'output',cc,'input1')
graph.connect(sources[j],'output',cc,'input2')
graph.connect(cc,'output',mat,'input'+'_'+str(h)+'_'+str(j))
cc.numprocesses=1
graph.connect(mat,'output',mc,'matrix')
return graph
#from dispel4py.visualisation import display
#display(graph)
print ("Preparing for: "+str(iterations/batch_size)+" projections" )
#Store via sensors
ProvenanceRecorder.REPOS_URL='http://127.0.0.1:8082/workflow/insert'
#Store via service
ProvenancePE.REPOS_URL='http://127.0.0.1:8082/workflow/insert'
#Store to local path
ProvenancePE.PROV_PATH=os.environ['PROV_PATH']
#Size of the provenance bulk before storage
ProvenancePE.BULK_SIZE=1
#ProvenancePE.REPOS_URL='http://climate4impact.eu/prov/workflow/insert'
class ProvenanceSpecs(ProvenancePE):
def __init__(self):
ProvenancePE.__init__(self)
self.streammeta=[]
self.count=1
def extractItemMetadata(self, data, port='output'):
return {'this':data}
class ProvenanceOnWriteOnly(ProvenancePE):
def __init__(self):
ProvenancePE.__init__(self)
self.streammeta=[]
self.count=1
def apply_state_reset_policy(self,event,value):
if (event=='state'):
#self.log(event)
self.provon=False
super(ProvenanceOnWriteOnly,self).apply_state_reset_policy(event,value)
#self.provon=False
class ProvenanceRecorderToService(ProvenanceRecorder):
def __init__(self, name='ProvenanceRecorderToService', toW3C=False):
ProvenanceRecorder.__init__(self)
self.name = name
self.numprocesses=2
self.convertToW3C = toW3C
def _postprocess(self):
self.connection.close()
def _preprocess(self):
self.provurl = urlparse(ProvenanceRecorder.REPOS_URL)
self.connection = httplib.HTTPConnection(
self.provurl.netloc)
def sendToService(self,prov):
params = urllib.urlencode({'prov': ujson.dumps(prov)})
headers = {
"Content-type": "application/x-www-form-urlencoded",
"Accept": "application/json"}
self.connection.request(
"POST",
self.provurl.path,
params,
headers)
response = self.connection.getresponse()
self.log("Postprocress: " +
str((response.status, response.reason, response)))
self.connection.close()
def process(self, inputs):
try:
for x in inputs:
prov = inputs[x]
if "_d4p" in prov:
prov = prov["_d4p"]
elif "provenance" in prov:
prov = prov["provenance"]
#self.log(prov)
self.sendToService(prov)
except:
self.log(traceback.format_exc())
class ProvenanceRecorderToFile(ProvenanceRecorder):
def __init__(self, name='ProvenanceRecorderToFile', toW3C=False):
ProvenanceRecorder.__init__(self)
self.name = name
self.numprocesses=3
self.convertToW3C = toW3C
def process(self, inputs):
try:
None
for x in inputs:
#self.log(x)
prov = inputs[x]
#if isinstance(prov, list) and "data" in prov[0]:
# prov = prov[0]["data"]
#el
if "_d4p" in prov:
prov = prov["_d4p"]
elif "provenance" in prov:
prov = prov["provenance"]
filep = open(
os.environ['PROV_PATH'] +
"/bulk_" +
getUniqueId(),
"wr")
ujson.dump(prov, filep)
filep.close()
except:
self.log(traceback.format_exc())
class ProvenanceSummaryToService(ProvenanceRecorderToService):
def __init__(self, name='ProvenanceSummaryToService', toW3C=False):
ProvenanceRecorderToService.__init__(self)
self.name = name
#self.numprocesses=3
self.convertToW3C = toW3C
self.doc_count = 0
self.document={}
self.streamsstart=[]
self.streamsend=[]
self.document.update({'streams':[{'content':[{},{}]}]})
self.document.update({'startTime':None})
self.document.update({'endTime':None})
self.document.update({'derivationIds':[]})
self.document.update({'parameters':[]})
self.contente=[]
self.contents=[]
self.derivationIndex={}
self.content=[]
self.locationss=[]
self.locationse=[]
self.update=False
def postprocess(self):
if self.update>0:
self.sendToService(self.document)
def process(self, inputs):
try:
out = None
for x in inputs:
prov = inputs[x]
if isinstance(prov, list) and "data" in prov[0]:
prov = prov[0]["data"]
elif "_d4p" in prov:
prov = prov["_d4p"]
#self.log(x)
self.sendToService(prov)
return None
elif "provenance" in prov:
prov = prov["provenance"]
if isinstance(prov, list):
for x in prov:
self.doc_count+=1
#self.log(x)
for key in x:
if isinstance(x[key], list):
continue
if self.doc_count==1 and (key!='startTime') and (key!='endTime'):
self.document.update({key:x[key]})
self.document.update({'_id':x['prov_cluster']+"_"+x['runId']})
self.document.update({'instanceId':x['prov_cluster']+"_"+x['runId']})
#
if (self.document['startTime'] == None) or parse_date(self.document['startTime']) > parse_date(x['startTime']):
#self.log("Adj time to: "+str(x['endTime']))
self.document.update({'startTime':x['startTime']})
self.streamsstart=x['streams']
elif (self.document['endTime'] == None) or parse_date(self.document['endTime']) < parse_date(x['endTime']):
self.document.update({'endTime':x['endTime']})
self.streamsend=x['streams']
self.document.update(x['parameters'])
for d in x['derivationIds']:
if d['prov_cluster'] not in self.derivationIndex:
derivation = {'DerivedFromDatasetID':
'Data_'+d['prov_cluster']+"_"+self.document['runId']
}
self.derivationIndex.update({d['prov_cluster']:derivation})
for d in self.streamsstart:
if 'location' in d and d['location']!='':
self.locationss.append(d['location'])
for c in d['content']:
self.contents.append(c)
for d in self.streamsend:
if 'location' in d and d['location']!='':
self.locationse.append(d['location'])
for c in d['content']:
self.contente.append(c)
if len(self.contents)>0:
self.update=True
self.document['streams'][0]['content'][0]=self.contents
self.document['streams'][0].update({'id':'Data_'+self.document['prov_cluster']+"_"+self.document['runId'],'location':self.locationss})
if len(self.contente)>0:
self.update=True
self.document['streams'][0]['content'][1]=self.contente
self.document['streams'][0].update({'id':'Data_'+self.document['prov_cluster']+"_"+self.document['runId'],'location':self.locationse})
self.document['streams'][0]['content']=self.document['streams'][0]['content'][0]+self.document['streams'][0]['content'][1]
for x in self.derivationIndex:
self.document['derivationIds'].append(self.derivationIndex[x])
if self.update:
#Self.log(self.document)
# del self.document['streamsstart']
# del self.document['streamsend']
self.sendToService(self.document)
self.update=False
self.contente=[]
self.contents=[]
#for key in self.document:
# del key
#self.document.update({'streamsstart':[]})
#self.document.update({'streamsend':[]})
# self.document.update({'startTime':None})
# self.document.update({'endTime':None})
# self.document.update({'derivationIds':[]})
except:
self.log(traceback.format_exc())
class ProvenanceTimedSensorToService(ProvenanceRecorderToService):
INTERVAL_S=4
WINDOW_S=10
def __init__(self, name='ProvenanceSummaryToService', toW3C=False):
ProvenanceRecorderToService.__init__(self)
self.name = name
#self.numprocesses=3
self.convertToW3C = toW3C
self.doc_count = 0
self.document={}
self.streamsstart=[]
self.streamsend=[]
self.document.update({'streams':[]})
self.document.update({'startTime':None})
self.document.update({'endTime':None})
self.document.update({'derivationIds':[]})
self.document.update({'parameters':[]})
self.contente=[]
self.contents=[]
self.derivationIndex={}
self.content=[]
self.locationss=[]
self.locationse=[]
self.update=True
self.current=time.time()
self.last=time.time()
self.update=True
def postprocess(self):
if self.update>0:
self.sendToService(self.document)
def process(self, inputs):
try:
out = None
for x in inputs:
prov = inputs[x]
if isinstance(prov, list) and "data" in prov[0]:
prov = prov[0]["data"]
elif "_d4p" in prov:
prov = prov["_d4p"]
self.sendToService(prov)
return None
elif "provenance" in prov:
prov = prov["provenance"]
self.current=time.time()
if self.update or (self.current-self.last)>ProvenanceTimedSensorToService.INTERVAL_S:
print self.current-self.last
self.last=time.time()
if isinstance(prov, list):
for x in prov:
self.doc_count+=1
for key in x:
if isinstance(x[key], list):
continue
else:
self.document.update({key:x[key]})
self.document.update({'_id':x['prov_cluster']+"_"+x['runId']})
self.document['streams'][0]['content']=self.document['streams'][0]['content']+x['streams']
for d in x['derivationIds']:
if d['prov_cluster'] not in self.derivationIndex:
derivation = {'DerivedFromDatasetID':
'Data_'+d['prov_cluster']+"_"+self.document['runId']
}
self.derivationIndex.update({d['prov_cluster']:derivation})
#self.document['streams'][0]['content']=self.document['streams'][0]['content'][0]+self.document['streams'][0]['content'][1]
for x in self.derivationIndex:
self.document['derivationIds'].append(self.derivationIndex[x])
index=0
for x in self.document['streams']:
x['id']='Data_'+self.document['prov_cluster']+"_"+self.document['runId']+'_'+str(index)
index+=1
self.current = time.time()
self.sendToService(self.document)
self.update=False
except:
self.log(traceback.format_exc())
class ProvenanceRecorderToFileBulk(ProvenanceRecorder):
def __init__(self, name='ProvenanceRecorderToFileBulk', toW3C=False):
ProvenanceRecorder.__init__(self)
self.name = name
self.numprocesses=3
self.convertToW3C = toW3C
self.bulk = []
def postprocess(self):
try:
if len(self.bulk)>0:
filep = open(os.environ['PROV_PATH'] + "/bulk_" + getUniqueId(), "wr")
ujson.dump(self.bulk, filep)
filep.close()
self.bulk[:]=[]
#del self.bulk[:]
#self.bulk = []
None
except:
self.log(traceback.format_exc())
def process(self, inputs):
try:
out = None
for x in inputs:
prov = inputs[x]
if isinstance(prov, list) and "data" in prov[0]:
prov = prov[0]["data"]
elif "_d4p" in prov:
prov = prov["_d4p"]
self.bulk.append(prov)
#self.log(os.environ['PBS_NODEFILE'])
#self.log(socket.gethostname())
if len(self.bulk) == 100:
#:
# None
filep = open(
os.environ['PROV_PATH'] +
"/bulk_" +
getUniqueId(),
"wr")
ujson.dump(self.bulk, filep)
#
filep.close()
self.bulk[:]=[]
# for x in self.bulk:
# del x
except:
self.log(traceback.format_exc())
def createGraphWithProv():
graph=createWf()
#Location of the remote repository for runtime updates of the lineage traces. Shared among ProvenanceRecorder subtypes
# Ranomdly generated unique identifier for the current run
rid=os.environ['RUN_ID']
# Finally, provenance enhanced graph is prepared:
##Initialise provenance storage in files:
#profile_prov_run(graph,None,provImpClass=(ProvenancePE,),componentsType={'CorrCoef':(ProvenancePE,)},username='aspinuso',runId=rid,w3c_prov=False,description="provState",workflowName="test_rdwd",workflowId="xx",save_mode='file')
# skip_rules={'CorrCoef':{'ro':{'$lt':0}}})
#Initialise provenance storage to service:
#profile_prov_run(graph,None,provImpClass=(ProvenancePE,),username='aspinuso',runId=rid,w3c_prov=False,description="provState",workflowName="test_rdwd",workflowId="xx",save_mode='service')
#skip_rules={'CorrCoef':{'ro':{'$lt':0}}})
#clustersRecorders={'record0':ProvenanceRecorderToFileBulk,'record1':ProvenanceRecorderToFileBulk,'record2':ProvenanceRecorderToFileBulk,'record6':ProvenanceRecorderToFileBulk,'record3':ProvenanceRecorderToFileBulk,'record4':ProvenanceRecorderToFileBulk,'record5':ProvenanceRecorderToFileBulk}
#Initialise provenance storage to sensors and Files:
#profile_prov_run(graph,ProvenanceRecorderToFile,provImpClass=(ProvenancePE,),username='aspinuso',runId=rid,w3c_prov=False,description="provState",workflowName="test_rdwd",workflowId="xx",save_mode='sensor')
#clustersRecorders=clustersRecorders)
#Initialise provenance storage to sensors and service:
#profile_prov_run(graph,ProvenanceRecorderToService,provImpClass=(ProvenancePE,),username='aspinuso',runId=rid,w3c_prov=False,description="provState",workflowName="test_rdwd",workflowId="xx",save_mode='sensor')
#Summary view on each component
#profile_prov_run(graph,ProvenanceTimedSensorToService,provImpClass=(ProvenancePE,),username='aspinuso',runId=rid,w3c_prov=False,description="provState",workflowName="test_rdwd",workflowId="xx",save_mode='sensor')
#Configuring provenance feedback-loop
#profile_prov_run(graph,ProvenanceTimedSensorToService,provImpClass=(ProvenancePE,),username='aspinuso',runId=rid,w3c_prov=False,description="provState",workflowName="test_rdwd",workflowId="xx",save_mode='sensor',feedbackPEs=['Source','MaxClique'])
#Initialise provenance storage end associate a Provenance type with specific components:
#profile_prov_run(graph,provImpClass=ProvenancePE,componentsType={'Source':(ProvenanceStock,)},username='aspinuso',runId=rid,w3c_prov=False,description="provState",workflowName="test_rdwd",workflowId="xx",save_mode='service')
#
return graph
#.. and visualised..
import argparse
from dispel4py.new.multi_process import process
args = argparse.Namespace
args.num = 424
args.simple = False
num=1
#print("PROV TO SENSOR")
print("PROV TO FILE")
#print("NO PROV")
graph = createGraphWithProv()
#graph = createWf()
#global gtime
#gtime = time.time()
from dispel4py.visualisation import *
display(graph)
|
|
# -*- coding: utf-8 -*-
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper functions for dealing with encryption keys used with cloud APIs."""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import base64
import binascii
from hashlib import sha256
import re
import sys
import six
from gslib.exception import CommandException
from gslib.lazy_wrapper import LazyWrapper
MAX_DECRYPTION_KEYS = 100
VALID_CMEK_RE = LazyWrapper(
lambda: re.compile('projects/([^/]+)/'
'locations/([a-zA-Z0-9_-]{1,63})/'
'keyRings/([a-zA-Z0-9_-]{1,63})/'
'cryptoKeys/([a-zA-Z0-9_-]{1,63})$'))
class CryptoKeyType(object):
"""Enum of valid types of encryption keys used with cloud API requests."""
CSEK = 'CSEK'
CMEK = 'CMEK'
class CryptoKeyWrapper(object):
"""Class describing a crypto key used with cloud API requests.
This class should be instantiated via the `CryptoKeyWrapperFromKey` method.
"""
def __init__(self, crypto_key):
"""Initialize the CryptoKeyWrapper.
Args:
crypto_key: Base64-encoded string of a CSEK, or the name of a Cloud KMS
CMEK.
Raises:
CommandException: The specified crypto key was neither a CMEK key name nor
a valid base64-encoded string of a CSEK.
"""
self.crypto_key = crypto_key
# Base64-encoded CSEKs always have a length of 44 characters, whereas
# fully-qualified CMEK names are guaranteed to be longer than 45 characters.
if len(crypto_key) == 44:
self.crypto_type = CryptoKeyType.CSEK
self.crypto_alg = 'AES256' # Only currently supported algorithm for CSEK.
try:
self.crypto_key_sha256 = Base64Sha256FromBase64EncryptionKey(crypto_key)
except:
raise CommandException(
'Configured encryption_key or decryption_key looked like a CSEK, '
'but it was not a valid 44-character base64 string. Please '
'double-check your configuration and ensure the key is correct.')
else: # CMEK
try:
ValidateCMEK(crypto_key)
except CommandException as e:
raise CommandException(
'Configured encryption_key or decryption_key looked like a CMEK, '
'but the key failed validation:\n%s' % e.reason)
self.crypto_type = CryptoKeyType.CMEK
self.crypto_alg = None
self.crypto_key_sha256 = None
def CryptoKeyWrapperFromKey(crypto_key):
"""Returns a CryptoKeyWrapper for crypto_key, or None for no key."""
return CryptoKeyWrapper(crypto_key) if crypto_key else None
def FindMatchingCSEKInBotoConfig(key_sha256, boto_config):
"""Searches boto_config for a CSEK with the given base64-encoded SHA256 hash.
Args:
key_sha256: (str) Base64-encoded SHA256 hash of the AES256 encryption key.
boto_config: (boto.pyami.config.Config) The boto config in which to check
for a matching encryption key.
Returns:
(str) Base64-encoded encryption key string if a match is found, None
otherwise.
"""
if six.PY3:
if not isinstance(key_sha256, bytes):
key_sha256 = key_sha256.encode('ascii')
keywrapper = CryptoKeyWrapperFromKey(
boto_config.get('GSUtil', 'encryption_key', None))
if (keywrapper is not None and
keywrapper.crypto_type == CryptoKeyType.CSEK and
keywrapper.crypto_key_sha256 == key_sha256):
return keywrapper.crypto_key
for i in range(MAX_DECRYPTION_KEYS):
key_number = i + 1
keywrapper = CryptoKeyWrapperFromKey(
boto_config.get('GSUtil', 'decryption_key%s' % str(key_number), None))
if keywrapper is None:
# Reading 100 config values can take ~1ms in testing. To avoid adding
# this tax, stop reading keys as soon as we encounter a non-existent
# entry (in lexicographic order).
break
elif (keywrapper.crypto_type == CryptoKeyType.CSEK and
keywrapper.crypto_key_sha256 == key_sha256):
return keywrapper.crypto_key
def GetEncryptionKeyWrapper(boto_config):
"""Returns a CryptoKeyWrapper for the configured encryption key.
Reads in the value of the "encryption_key" attribute in boto_config, and if
present, verifies it is a valid base64-encoded string and returns a
CryptoKeyWrapper for it.
Args:
boto_config: (boto.pyami.config.Config) The boto config in which to check
for a matching encryption key.
Returns:
CryptoKeyWrapper for the specified encryption key, or None if no encryption
key was specified in boto_config.
"""
encryption_key = boto_config.get('GSUtil', 'encryption_key', None)
return CryptoKeyWrapper(encryption_key) if encryption_key else None
def Base64Sha256FromBase64EncryptionKey(csek_encryption_key):
if six.PY3:
if not isinstance(csek_encryption_key, bytes):
csek_encryption_key = csek_encryption_key.encode('ascii')
decoded_bytes = base64.decodestring(csek_encryption_key)
key_sha256 = _CalculateSha256FromString(decoded_bytes)
sha256_bytes = binascii.unhexlify(key_sha256)
sha256_base64 = base64.encodestring(sha256_bytes)
return sha256_base64.replace(b'\n', b'')
def ValidateCMEK(key):
if not key:
raise CommandException('KMS key is empty.')
if key.startswith('/'):
raise CommandException(
'KMS key should not start with leading slash (/): "%s"' % key)
if not VALID_CMEK_RE().match(key):
raise CommandException(
'Invalid KMS key name: "%s".\nKMS keys should follow the format '
'"projects/<project-id>/locations/<location>/keyRings/<keyring>/'
'cryptoKeys/<key-name>"' % key)
def _CalculateSha256FromString(input_string):
sha256_hash = sha256()
sha256_hash.update(input_string)
return sha256_hash.hexdigest()
def _GetAndVerifyBase64EncryptionKey(boto_config):
"""Reads the encryption key from boto_config and ensures it is base64-encoded.
Args:
boto_config: (boto.pyami.config.Config) The boto config in which to check
for a matching encryption key.
Returns:
(str) Base64-encoded encryption key string, or None if no encryption key
exists in configuration.
"""
encryption_key = boto_config.get('GSUtil', 'encryption_key', None)
if encryption_key:
# Ensure the key has a valid encoding.
try:
base64.decodestring(encryption_key)
except:
raise CommandException(
'Configured encryption_key is not a valid base64 string. Please '
'double-check your configuration and ensure the key is valid and in '
'base64 format.')
return encryption_key
|
|
"""
Defines classes to represent each Babel type in Python. These classes should
be used to validate Python objects and normalize them for a given type.
The data types defined here should not be specific to an RPC or serialization
format.
This module should be dropped into a project that requires the use of Babel. In
the future, this could be imported from a pre-installed Python package, rather
than being added to a project.
EDITING THIS FILE? Please modify the version in the babelapi repo,
"""
from abc import ABCMeta, abstractmethod
import datetime
import math
import numbers
import re
import six
if six.PY3:
_binary_types = (bytes, memoryview)
else:
_binary_types = (bytes, buffer)
class ValidationError(Exception):
"""Raised when a value doesn't pass validation by its validator."""
def __init__(self, message, parent=None):
"""
Args:
message (str): Error message detailing validation failure.
parent (str): Adds the parent as the closest reference point for
the error. Use :meth:`add_parent` to add more.
"""
super(ValidationError, self).__init__(message)
self.message = message
self._parents = []
if parent:
self._parents.append(parent)
def add_parent(self, parent):
"""
Args:
parent (str): Adds the parent to the top of the tree of references
that lead to the validator that failed.
"""
self._parents.append(parent)
def __str__(self):
"""
Returns:
str: A descriptive message of the validation error that may also
include the path to the validator that failed.
"""
if self._parents:
return '{}: {}'.format('.'.join(self._parents[::-1]), self.message)
else:
return self.message
def __repr__(self):
# Not a perfect repr, but includes the error location information.
return 'ValidationError(%r)' % str(self)
def generic_type_name(v):
"""Return a descriptive type name that isn't Python specific. For example,
an int value will return 'integer' rather than 'int'."""
if isinstance(v, numbers.Integral):
# Must come before real numbers check since integrals are reals too
return 'integer'
elif isinstance(v, numbers.Real):
return 'float'
elif isinstance(v, (tuple, list)):
return 'list'
elif isinstance(v, six.string_types):
return 'string'
elif v is None:
return 'null'
else:
return type(v).__name__
class Validator(object):
"""All primitive and composite data types should be a subclass of this."""
__metaclass__ = ABCMeta
@abstractmethod
def validate(self, val):
"""Validates that val is of this data type.
Returns: A normalized value if validation succeeds.
Raises: ValidationError
"""
pass
def has_default(self):
return False
def get_default(self):
raise AssertionError('No default available.')
class Primitive(Validator):
"""A basic type that is defined by Babel."""
pass
class Boolean(Primitive):
def validate(self, val):
if not isinstance(val, bool):
raise ValidationError('%r is not a valid boolean' % val)
return val
class Integer(Primitive):
"""
Do not use this class directly. Extend it and specify a 'minimum' and
'maximum' value as class variables for a more restrictive integer range.
"""
minimum = None
maximum = None
def __init__(self, min_value=None, max_value=None):
"""
A more restrictive minimum or maximum value can be specified than the
range inherent to the defined type.
"""
if min_value is not None:
assert isinstance(min_value, numbers.Integral), \
'min_value must be an integral number'
assert min_value >= self.minimum, \
'min_value cannot be less than the minimum value for this ' \
'type (%d < %d)' % (min_value, self.minimum)
self.minimum = min_value
if max_value is not None:
assert isinstance(max_value, numbers.Integral), \
'max_value must be an integral number'
assert max_value <= self.maximum, \
'max_value cannot be greater than the maximum value for ' \
'this type (%d < %d)' % (max_value, self.maximum)
self.maximum = max_value
def validate(self, val):
if not isinstance(val, numbers.Integral):
raise ValidationError('expected integer, got %s'
% generic_type_name(val))
elif not (self.minimum <= val <= self.maximum):
raise ValidationError('%d is not within range [%d, %d]'
% (val, self.minimum, self.maximum))
return val
def __repr__(self):
return '%s()' % self.__class__.__name__
class Int32(Integer):
minimum = -2**31
maximum = 2**31 - 1
class UInt32(Integer):
minimum = 0
maximum = 2**32 - 1
class Int64(Integer):
minimum = -2**63
maximum = 2**63 - 1
class UInt64(Integer):
minimum = 0
maximum = 2**64 - 1
class Real(Primitive):
"""
Do not use this class directly. Extend it and optionally set a 'minimum'
and 'maximum' value to enforce a range that's a subset of the Python float
implementation. Python floats are doubles.
"""
minimum = None
maximum = None
def __init__(self, min_value=None, max_value=None):
"""
A more restrictive minimum or maximum value can be specified than the
range inherent to the defined type.
"""
if min_value is not None:
assert isinstance(min_value, numbers.Real), \
'min_value must be a real number'
if not isinstance(min_value, float):
try:
min_value = float(min_value)
except OverflowError:
raise AssertionError('min_value is too small for a float')
if self.minimum is not None and min_value < self.minimum:
raise AssertionError('min_value cannot be less than the '
'minimum value for this type (%f < %f)' %
(min_value, self.minimum))
self.minimum = min_value
if max_value is not None:
assert isinstance(max_value, numbers.Real), \
'max_value must be a real number'
if not isinstance(max_value, float):
try:
max_value = float(max_value)
except OverflowError:
raise AssertionError('max_value is too large for a float')
if self.maximum is not None and max_value > self.maximum:
raise AssertionError('max_value cannot be greater than the '
'maximum value for this type (%f < %f)' %
(max_value, self.maximum))
self.maximum = max_value
def validate(self, val):
if not isinstance(val, numbers.Real):
raise ValidationError('expected real number, got %s' %
generic_type_name(val))
if not isinstance(val, float):
# This checks for the case where a number is passed in with a
# magnitude larger than supported by float64.
try:
val = float(val)
except OverflowError:
raise ValidationError('too large for float')
if math.isnan(val) or math.isinf(val):
raise ValidationError('%f values are not supported' % val)
if self.minimum is not None and val < self.minimum:
raise ValidationError('%f is not greater than %f' %
(val, self.minimum))
if self.maximum is not None and val > self.maximum:
raise ValidationError('%f is not less than %f' %
(val, self.maximum))
return val
def __repr__(self):
return '%s()' % self.__class__.__name__
class Float32(Real):
# Maximum and minimums from the IEEE 754-1985 standard
minimum = -3.40282 * 10**38
maximum = 3.40282 * 10**38
class Float64(Real):
pass
class String(Primitive):
"""Represents a unicode string."""
def __init__(self, min_length=None, max_length=None, pattern=None):
if min_length is not None:
assert isinstance(min_length, numbers.Integral), \
'min_length must be an integral number'
assert min_length >= 0, 'min_length must be >= 0'
if max_length is not None:
assert isinstance(max_length, numbers.Integral), \
'max_length must be an integral number'
assert max_length > 0, 'max_length must be > 0'
if min_length and max_length:
assert max_length >= min_length, 'max_length must be >= min_length'
if pattern is not None:
assert isinstance(pattern, six.string_types), \
'pattern must be a string'
self.min_length = min_length
self.max_length = max_length
self.pattern = pattern
self.pattern_re = None
if pattern:
try:
self.pattern_re = re.compile(pattern)
except re.error as e:
raise AssertionError('Regex {!r} failed: {}'.format(
pattern, e.args[0]))
def validate(self, val):
"""
A unicode string of the correct length and pattern will pass validation.
In PY2, we enforce that a str type must be valid utf-8, and a unicode
string will be returned.
"""
if not isinstance(val, six.string_types):
raise ValidationError("'%s' expected to be a string, got %s"
% (val, generic_type_name(val)))
if not six.PY3 and isinstance(val, str):
try:
val = val.decode('utf-8')
except UnicodeDecodeError:
raise ValidationError("'%s' was not valid utf-8")
if self.max_length is not None and len(val) > self.max_length:
raise ValidationError("'%s' must be at most %d characters, got %d"
% (val, self.max_length, len(val)))
if self.min_length is not None and len(val) < self.min_length:
raise ValidationError("'%s' must be at least %d characters, got %d"
% (val, self.min_length, len(val)))
if self.pattern and not self.pattern_re.match(val):
raise ValidationError("'%s' did not match pattern '%s'"
% (val, self.pattern))
return val
class Binary(Primitive):
def __init__(self, min_length=None, max_length=None):
if min_length is not None:
assert isinstance(min_length, numbers.Integral), \
'min_length must be an integral number'
assert min_length >= 0, 'min_length must be >= 0'
if max_length is not None:
assert isinstance(max_length, numbers.Integral), \
'max_length must be an integral number'
assert max_length > 0, 'max_length must be > 0'
if min_length is not None and max_length is not None:
assert max_length >= min_length, 'max_length must be >= min_length'
self.min_length = min_length
self.max_length = max_length
def validate(self, val):
if not isinstance(val, _binary_types):
raise ValidationError("expected binary type, got %s"
% generic_type_name(val))
elif self.max_length is not None and len(val) > self.max_length:
raise ValidationError("'%s' must have at most %d bytes, got %d"
% (val, self.max_length, len(val)))
elif self.min_length is not None and len(val) < self.min_length:
raise ValidationError("'%s' has fewer than %d bytes, got %d"
% (val, self.min_length, len(val)))
return val
class Timestamp(Primitive):
"""Note that while a format is specified, it isn't used in validation
since a native Python datetime object is preferred. The format, however,
can and should be used by serializers."""
def __init__(self, format):
"""format must be composed of format codes that the C standard (1989)
supports, most notably in its strftime() function."""
assert isinstance(format, six.text_type), 'format must be a string'
self.format = format
def validate(self, val):
if not isinstance(val, datetime.datetime):
raise ValidationError('expected timestamp, got %s'
% generic_type_name(val))
elif val.tzinfo is not None and \
val.tzinfo.utcoffset(val).total_seconds() != 0:
raise ValidationError('timestamp should have either a UTC '
'timezone or none set at all')
return val
class List(Primitive):
"""Assumes list contents are homogeneous with respect to types."""
def __init__(self, item_validator, min_items=None, max_items=None):
"""Every list item will be validated with item_validator."""
self.item_validator = item_validator
if min_items is not None:
assert isinstance(min_items, numbers.Integral), \
'min_items must be an integral number'
assert min_items >= 0, 'min_items must be >= 0'
if max_items is not None:
assert isinstance(max_items, numbers.Integral), \
'max_items must be an integral number'
assert max_items > 0, 'max_items must be > 0'
if min_items is not None and max_items is not None:
assert max_items >= min_items, 'max_items must be >= min_items'
self.min_items = min_items
self.max_items = max_items
def validate(self, val):
if not isinstance(val, (tuple, list)):
raise ValidationError('%r is not a valid list' % val)
elif self.max_items is not None and len(val) > self.max_items:
raise ValidationError('%r has more than %s items'
% (val, self.max_items))
elif self.min_items is not None and len(val) < self.min_items:
raise ValidationError('%r has fewer than %s items'
% (val, self.min_items))
return [self.item_validator.validate(item) for item in val]
class Composite(Validator):
"""Validator for user-defined types."""
pass
class Struct(Composite):
def __init__(self, definition):
"""
Args:
definition (class): A generated class representing a Babel struct
from a spec. Must have a _fields_ attribute with the following
structure:
_fields_ = [(field_name, validator), ...]
where
field_name: Name of the field (str).
validator: Validator object.
"""
self.definition = definition
def validate(self, val):
"""
For a val to pass validation, val must be of the correct type and have
all required fields present.
"""
self.validate_type_only(val)
self.validate_fields_only(val)
return val
def validate_fields_only(self, val):
"""
To pass field validation, no required field should be missing.
This method assumes that the contents of each field have already been
validated on assignment, so it's merely a presence check.
FIXME(kelkabany): Since the definition object does not maintain a list
of which fields are required, all fields are scanned.
"""
for field_name, _ in self.definition._all_fields_:
if not hasattr(val, field_name):
raise ValidationError("missing required field '%s'" %
field_name)
def validate_type_only(self, val):
"""
Use this when you only want to validate that the type of an object
is correct, but not yet validate each field.
"""
# Since the definition maintains the list of fields for serialization,
# we're okay with a subclass that might have extra information. This
# makes it easier to return one subclass for two routes, one of which
# relies on the parent class.
if not isinstance(val, self.definition):
raise ValidationError('expected type %s, got %s' %
(self.definition.__name__, generic_type_name(val)))
def has_default(self):
return not self.definition._has_required_fields
def get_default(self):
assert not self.definition._has_required_fields, 'No default available.'
return self.definition()
class StructTree(Struct):
"""Validator for structs with enumerated subtypes.
NOTE: validate_fields_only() validates the fields known to this base
struct, but does not do any validation specific to the subtype.
"""
def __init__(self, definition):
super(StructTree, self).__init__(definition)
class Union(Composite):
def __init__(self, definition):
"""
Args:
definition (class): A generated class representing a Babel union
from a spec. Must have a _tagmap attribute with the following
structure:
_tagmap = {field_name: validator, ...}
where
field_name (str): Tag name.
validator (Validator): Tag value validator.
"""
self.definition = definition
def validate(self, val):
"""
For a val to pass validation, it must have a _tag set. This assumes
that the object validated that _tag is a valid tag, and that any
associated value has also been validated.
"""
self.validate_type_only(val)
if not hasattr(val, '_tag') or val._tag is None:
raise ValidationError('no tag set')
return val
def validate_type_only(self, val):
"""
Use this when you only want to validate that the type of an object
is correct, but not yet validate each field.
We check whether val is a Python parent class of the definition. This
is because Union subtyping works in the opposite direction of Python
inheritance. For example, if a union U2 extends U1 in Python, this
validator will accept U1 in places where U2 is expected.
"""
if not issubclass(self.definition, type(val)):
raise ValidationError('expected type %s or subtype, got %s' %
(self.definition.__name__, generic_type_name(val)))
class Void(Primitive):
def validate(self, val):
if val is not None:
raise ValidationError('expected NoneType, got %s' %
generic_type_name(val))
def has_default(self):
return True
def get_default(self):
return None
class Nullable(Validator):
def __init__(self, validator):
assert isinstance(validator, (Primitive, Composite)), \
'validator must be for a primitive or composite type'
assert not isinstance(validator, Nullable), \
'nullables cannot be stacked'
assert not isinstance(validator, Void), \
'void cannot be made nullable'
self.validator = validator
def validate(self, val):
if val is None:
return
else:
return self.validator.validate(val)
def validate_type_only(self, val):
"""Use this only if Nullable is wrapping a Composite."""
if val is None:
return
else:
return self.validator.validate_type_only(val)
def has_default(self):
return True
def get_default(self):
return None
class FunctionStyle(object):
def __init__(self, ident):
self.ident = ident
def __repr__(self):
return "FunctionStyle.{}".format(self.ident)
FunctionStyle.RPC = FunctionStyle("RPC")
FunctionStyle.UPLOAD = FunctionStyle("UPLOAD")
FunctionStyle.DOWNLOAD = FunctionStyle("DOWNLOAD")
class FunctionSignature(object):
def __init__(self, style, request_type, response_type, error_type):
self.style = style
self.request_type = request_type
self.response_type = response_type
self.error_type = error_type
def __repr__(self):
return "FunctionSignature{!r}".format((
self.style, self.request_type, self.response_type, self.error_type))
|
|
# coding=utf-8
# Copyright 2018 The Dopamine Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for dopamine.agents.rainbow.rainbow_agent.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from dopamine.agents.dqn import dqn_agent
from dopamine.agents.rainbow import rainbow_agent
from dopamine.discrete_domains import atari_lib
from dopamine.utils import test_utils
import numpy as np
import tensorflow as tf
class ProjectDistributionTest(tf.test.TestCase):
def testInconsistentSupportsAndWeightsParameters(self):
supports = tf.constant([[0, 2, 4, 6, 8], [3, 4, 5, 6, 7]], dtype=tf.float32)
weights = tf.constant(
[[0.1, 0.2, 0.3, 0.2], [0.1, 0.2, 0.3, 0.2]], dtype=tf.float32)
target_support = tf.constant([4, 5, 6, 7, 8], dtype=tf.float32)
with self.assertRaisesRegexp(ValueError, 'are incompatible'):
rainbow_agent.project_distribution(supports, weights, target_support)
def testInconsistentSupportsAndWeightsWithPlaceholders(self):
supports = [[0, 2, 4, 6, 8], [3, 4, 5, 6, 7]]
supports_ph = tf.compat.v1.placeholder(tf.float32, None)
weights = [[0.1, 0.2, 0.3, 0.2], [0.1, 0.2, 0.3, 0.2]]
weights_ph = tf.compat.v1.placeholder(tf.float32, None)
target_support = [4, 5, 6, 7, 8]
target_support_ph = tf.compat.v1.placeholder(tf.float32, None)
projection = rainbow_agent.project_distribution(
supports_ph, weights_ph, target_support_ph, validate_args=True)
with self.test_session() as sess:
tf.compat.v1.global_variables_initializer().run()
with self.assertRaisesRegexp(tf.errors.InvalidArgumentError,
'assertion failed'):
sess.run(
projection,
feed_dict={
supports_ph: supports,
weights_ph: weights,
target_support_ph: target_support
})
def testInconsistentSupportsAndTargetSupportParameters(self):
supports = tf.constant([[0, 2, 4, 6, 8], [3, 4, 5, 6, 7]], dtype=tf.float32)
weights = tf.constant(
[[0.1, 0.2, 0.3, 0.2, 0.2], [0.1, 0.2, 0.3, 0.2, 0.2]],
dtype=tf.float32)
target_support = tf.constant([4, 5, 6], dtype=tf.float32)
with self.assertRaisesRegexp(ValueError, 'are incompatible'):
rainbow_agent.project_distribution(supports, weights, target_support)
def testInconsistentSupportsAndTargetSupportWithPlaceholders(self):
supports = [[0, 2, 4, 6, 8], [3, 4, 5, 6, 7]]
supports_ph = tf.compat.v1.placeholder(tf.float32, None)
weights = [[0.1, 0.2, 0.3, 0.2, 0.2], [0.1, 0.2, 0.3, 0.2, 0.2]]
weights_ph = tf.compat.v1.placeholder(tf.float32, None)
target_support = [4, 5, 6]
target_support_ph = tf.compat.v1.placeholder(tf.float32, None)
projection = rainbow_agent.project_distribution(
supports_ph, weights_ph, target_support_ph, validate_args=True)
with self.test_session() as sess:
tf.compat.v1.global_variables_initializer().run()
with self.assertRaisesRegexp(tf.errors.InvalidArgumentError,
'assertion failed'):
sess.run(
projection,
feed_dict={
supports_ph: supports,
weights_ph: weights,
target_support_ph: target_support
})
def testZeroDimensionalTargetSupport(self):
supports = tf.constant([[0, 2, 4, 6, 8], [3, 4, 5, 6, 7]], dtype=tf.float32)
weights = tf.constant(
[[0.1, 0.2, 0.3, 0.2, 0.2], [0.1, 0.2, 0.3, 0.2, 0.2]],
dtype=tf.float32)
target_support = tf.constant(3, dtype=tf.float32)
with self.assertRaisesRegexp(ValueError, 'Index out of range'):
rainbow_agent.project_distribution(supports, weights, target_support)
def testZeroDimensionalTargetSupportWithPlaceholders(self):
supports = [[0, 2, 4, 6, 8], [3, 4, 5, 6, 7]]
supports_ph = tf.compat.v1.placeholder(tf.float32, None)
weights = [[0.1, 0.2, 0.3, 0.2, 0.2], [0.1, 0.2, 0.3, 0.2, 0.2]]
weights_ph = tf.compat.v1.placeholder(tf.float32, None)
target_support = 3
target_support_ph = tf.compat.v1.placeholder(tf.float32, None)
projection = rainbow_agent.project_distribution(
supports_ph, weights_ph, target_support_ph, validate_args=True)
with self.test_session() as sess:
tf.compat.v1.global_variables_initializer().run()
with self.assertRaises(tf.errors.InvalidArgumentError):
sess.run(
projection,
feed_dict={
supports_ph: supports,
weights_ph: weights,
target_support_ph: target_support
})
def testMultiDimensionalTargetSupport(self):
supports = tf.constant([[0, 2, 4, 6, 8], [3, 4, 5, 6, 7]], dtype=tf.float32)
weights = tf.constant(
[[0.1, 0.2, 0.3, 0.2, 0.2], [0.1, 0.2, 0.3, 0.2, 0.2]],
dtype=tf.float32)
target_support = tf.constant([[3]], dtype=tf.float32)
with self.assertRaisesRegexp(ValueError, 'out of bounds'):
rainbow_agent.project_distribution(supports, weights, target_support)
def testMultiDimensionalTargetSupportWithPlaceholders(self):
supports = [[0, 2, 4, 6, 8], [3, 4, 5, 6, 7]]
supports_ph = tf.compat.v1.placeholder(tf.float32, None)
weights = [[0.1, 0.2, 0.3, 0.2, 0.2], [0.1, 0.2, 0.3, 0.2, 0.2]]
weights_ph = tf.compat.v1.placeholder(tf.float32, None)
target_support = [[3]]
target_support_ph = tf.compat.v1.placeholder(tf.float32, None)
projection = rainbow_agent.project_distribution(
supports_ph, weights_ph, target_support_ph, validate_args=True)
with self.test_session() as sess:
tf.compat.v1.global_variables_initializer().run()
with self.assertRaises(tf.errors.InvalidArgumentError):
sess.run(
projection,
feed_dict={
supports_ph: supports,
weights_ph: weights,
target_support_ph: target_support
})
def testProjectWithNonMonotonicTargetSupport(self):
supports = tf.constant([[0, 2, 4, 6, 8], [3, 4, 5, 6, 7]], dtype=tf.float32)
weights = tf.constant(
[[0.1, 0.2, 0.3, 0.2, 0.2], [0.1, 0.2, 0.3, 0.2, 0.2]],
dtype=tf.float32)
target_support = tf.constant([8, 7, 6, 5, 4], dtype=tf.float32)
projection = rainbow_agent.project_distribution(
supports, weights, target_support, validate_args=True)
with self.test_session() as sess:
tf.compat.v1.global_variables_initializer().run()
with self.assertRaisesRegexp(tf.errors.InvalidArgumentError,
'assertion failed'):
sess.run(projection)
def testProjectNewSupportHasInconsistentDeltask(self):
supports = tf.constant([[0, 2, 4, 6, 8], [3, 4, 5, 6, 7]], dtype=tf.float32)
weights = tf.constant(
[[0.1, 0.2, 0.3, 0.2, 0.2], [0.1, 0.2, 0.3, 0.2, 0.2]],
dtype=tf.float32)
target_support = tf.constant([3, 4, 6, 7, 8], dtype=tf.float32)
projection = rainbow_agent.project_distribution(
supports, weights, target_support, validate_args=True)
with self.test_session() as sess:
tf.compat.v1.global_variables_initializer().run()
with self.assertRaisesRegexp(tf.errors.InvalidArgumentError,
'assertion failed'):
sess.run(projection)
def testProjectSingleIdenticalDistribution(self):
supports = tf.constant([[0, 1, 2, 3, 4]], dtype=tf.float32)
expected_weights = [0.1, 0.2, 0.1, 0.3, 0.3]
weights = tf.constant([expected_weights], dtype=tf.float32)
target_support = tf.constant([0, 1, 2, 3, 4], dtype=tf.float32)
projection = rainbow_agent.project_distribution(supports, weights,
target_support)
with self.test_session() as sess:
tf.compat.v1.global_variables_initializer().run()
projection_ = sess.run(projection)
self.assertAllClose([expected_weights], projection_)
def testProjectSingleDifferentDistribution(self):
supports = tf.constant([[0, 1, 2, 3, 4]], dtype=tf.float32)
weights = tf.constant([[0.1, 0.2, 0.1, 0.3, 0.3]], dtype=tf.float32)
target_support = tf.constant([3, 4, 5, 6, 7], dtype=tf.float32)
projection = rainbow_agent.project_distribution(supports, weights,
target_support)
expected_projection = [[0.7, 0.3, 0.0, 0.0, 0.0]]
with self.test_session() as sess:
tf.compat.v1.global_variables_initializer().run()
projection_ = sess.run(projection)
self.assertAllClose(expected_projection, projection_)
def testProjectFromNonMonotonicSupport(self):
supports = tf.constant([[4, 3, 2, 1, 0]], dtype=tf.float32)
weights = tf.constant([[0.1, 0.2, 0.1, 0.3, 0.3]], dtype=tf.float32)
target_support = tf.constant([3, 4, 5, 6, 7], dtype=tf.float32)
projection = rainbow_agent.project_distribution(supports, weights,
target_support)
expected_projection = [[0.9, 0.1, 0.0, 0.0, 0.0]]
with self.test_session() as sess:
tf.compat.v1.global_variables_initializer().run()
projection_ = sess.run(projection)
self.assertAllClose(expected_projection, projection_)
def testExampleFromCodeComments(self):
supports = tf.constant([[0, 2, 4, 6, 8], [1, 3, 4, 5, 6]], dtype=tf.float32)
weights = tf.constant(
[[0.1, 0.6, 0.1, 0.1, 0.1], [0.1, 0.2, 0.5, 0.1, 0.1]],
dtype=tf.float32)
target_support = tf.constant([4, 5, 6, 7, 8], dtype=tf.float32)
projection = rainbow_agent.project_distribution(supports, weights,
target_support)
expected_projections = [[0.8, 0.0, 0.1, 0.0, 0.1],
[0.8, 0.1, 0.1, 0.0, 0.0]]
with self.test_session() as sess:
tf.compat.v1.global_variables_initializer().run()
projection_ = sess.run(projection)
self.assertAllClose(expected_projections, projection_)
def testProjectBatchOfDifferentDistributions(self):
supports = tf.constant(
[[0, 2, 4, 6, 8], [0, 1, 2, 3, 4], [3, 4, 5, 6, 7]], dtype=tf.float32)
weights = tf.constant(
[[0.1, 0.2, 0.3, 0.2, 0.2], [0.1, 0.2, 0.1, 0.3, 0.3],
[0.1, 0.2, 0.3, 0.2, 0.2]],
dtype=tf.float32)
target_support = tf.constant([3, 4, 5, 6, 7], dtype=tf.float32)
projection = rainbow_agent.project_distribution(supports, weights,
target_support)
expected_projections = [[0.3, 0.3, 0.0, 0.2,
0.2], [0.7, 0.3, 0.0, 0.0, 0.0],
[0.1, 0.2, 0.3, 0.2, 0.2]]
with self.test_session() as sess:
tf.compat.v1.global_variables_initializer().run()
projection_ = sess.run(projection)
self.assertAllClose(expected_projections, projection_)
def testUsingPlaceholders(self):
supports = [[0, 2, 4, 6, 8], [0, 1, 2, 3, 4], [3, 4, 5, 6, 7]]
supports_ph = tf.compat.v1.placeholder(tf.float32, None)
weights = [[0.1, 0.2, 0.3, 0.2, 0.2], [0.1, 0.2, 0.1, 0.3, 0.3],
[0.1, 0.2, 0.3, 0.2, 0.2]]
weights_ph = tf.compat.v1.placeholder(tf.float32, None)
target_support = [3, 4, 5, 6, 7]
target_support_ph = tf.compat.v1.placeholder(tf.float32, None)
projection = rainbow_agent.project_distribution(supports_ph, weights_ph,
target_support_ph)
expected_projections = [[0.3, 0.3, 0.0, 0.2,
0.2], [0.7, 0.3, 0.0, 0.0, 0.0],
[0.1, 0.2, 0.3, 0.2, 0.2]]
with self.test_session() as sess:
tf.compat.v1.global_variables_initializer().run()
projection_ = sess.run(
projection,
feed_dict={
supports_ph: supports,
weights_ph: weights,
target_support_ph: target_support
})
self.assertAllClose(expected_projections, projection_)
def testProjectBatchOfDifferentDistributionsWithLargerDelta(self):
supports = tf.constant(
[[0, 2, 4, 6, 8], [8, 9, 10, 12, 14]], dtype=tf.float32)
weights = tf.constant(
[[0.1, 0.2, 0.2, 0.2, 0.3], [0.1, 0.2, 0.4, 0.1, 0.2]],
dtype=tf.float32)
target_support = tf.constant([0, 4, 8, 12, 16], dtype=tf.float32)
projection = rainbow_agent.project_distribution(supports, weights,
target_support)
expected_projections = [[0.2, 0.4, 0.4, 0.0, 0.0],
[0.0, 0.0, 0.45, 0.45, 0.1]]
with self.test_session() as sess:
tf.compat.v1.global_variables_initializer().run()
projection_ = sess.run(projection)
self.assertAllClose(expected_projections, projection_)
class RainbowAgentTest(tf.test.TestCase):
def setUp(self):
super(RainbowAgentTest, self).setUp()
self._num_actions = 4
self._num_atoms = 5
self._vmax = 7.
self._min_replay_history = 32
self._epsilon_decay_period = 90
self.observation_shape = dqn_agent.NATURE_DQN_OBSERVATION_SHAPE
self.observation_dtype = dqn_agent.NATURE_DQN_DTYPE
self.stack_size = dqn_agent.NATURE_DQN_STACK_SIZE
self.zero_state = np.zeros(
(1,) + self.observation_shape + (self.stack_size,))
def _create_test_agent(self, sess):
stack_size = self.stack_size
# This dummy network allows us to deterministically anticipate that
# action 0 will be selected by an argmax.
# In Rainbow we are dealing with a distribution over Q-values,
# which are represented as num_atoms bins, ranging from -vmax to vmax.
# The output layer will have num_actions * num_atoms elements,
# so each group of num_atoms weights represent the logits for a
# particular action. By setting 1s everywhere, except for the first
# num_atoms (representing the logits for the first action), which are
# set to np.arange(num_atoms), we are ensuring that the first action
# places higher weight on higher Q-values; this results in the first
# action being chosen.
class MockRainbowNetwork(tf.keras.Model):
"""Custom tf.keras.Model used in tests."""
def __init__(self, num_actions, num_atoms, support, **kwargs):
super(MockRainbowNetwork, self).__init__(**kwargs)
self.num_actions = num_actions
self.num_atoms = num_atoms
self.support = support
first_row = np.tile(np.ones(self.num_atoms), self.num_actions - 1)
first_row = np.concatenate((np.arange(self.num_atoms), first_row))
bottom_rows = np.tile(
np.ones(self.num_actions * self.num_atoms), (stack_size - 1, 1))
weights_initializer = np.concatenate(([first_row], bottom_rows))
self.layer = tf.keras.layers.Dense(
self.num_actions * self.num_atoms,
kernel_initializer=tf.constant_initializer(weights_initializer),
bias_initializer=tf.ones_initializer())
def call(self, state):
inputs = tf.constant(
np.zeros((state.shape[0], stack_size)), dtype=tf.float32)
net = self.layer(inputs)
logits = tf.reshape(net, [-1, self.num_actions, self.num_atoms])
probabilities = tf.keras.activations.softmax(logits)
qs = tf.reduce_sum(self.support * probabilities, axis=2)
return atari_lib.RainbowNetworkType(qs, logits, probabilities)
agent = rainbow_agent.RainbowAgent(
sess=sess,
network=MockRainbowNetwork,
num_actions=self._num_actions,
num_atoms=self._num_atoms,
vmax=self._vmax,
min_replay_history=self._min_replay_history,
epsilon_fn=lambda w, x, y, z: 0.0, # No exploration.
epsilon_eval=0.0,
epsilon_decay_period=self._epsilon_decay_period)
# This ensures non-random action choices (since epsilon_eval = 0.0) and
# skips the train_step.
agent.eval_mode = True
sess.run(tf.compat.v1.global_variables_initializer())
return agent
def testCreateAgentWithDefaults(self):
# Verifies that we can create and train an agent with the default values.
with tf.compat.v1.Session() as sess:
agent = rainbow_agent.RainbowAgent(sess, num_actions=4)
sess.run(tf.compat.v1.global_variables_initializer())
observation = np.ones([84, 84, 1])
agent.begin_episode(observation)
agent.step(reward=1, observation=observation)
agent.end_episode(reward=1)
def testShapesAndValues(self):
with tf.compat.v1.Session() as sess:
agent = self._create_test_agent(sess)
self.assertEqual(agent._support.shape[0], self._num_atoms)
self.assertEqual(
self.evaluate(tf.reduce_min(agent._support)), -self._vmax)
self.assertEqual(self.evaluate(tf.reduce_max(agent._support)), self._vmax)
self.assertEqual(agent._net_outputs.logits.shape,
(1, self._num_actions, self._num_atoms))
self.assertEqual(agent._net_outputs.probabilities.shape,
agent._net_outputs.logits.shape)
self.assertEqual(agent._replay_net_outputs.logits.shape[1],
self._num_actions)
self.assertEqual(agent._replay_net_outputs.logits.shape[2],
self._num_atoms)
self.assertEqual(agent._replay_next_target_net_outputs.logits.shape[1],
self._num_actions)
self.assertEqual(agent._replay_next_target_net_outputs.logits.shape[2],
self._num_atoms)
self.assertEqual(agent._net_outputs.q_values.shape,
(1, self._num_actions))
def testBeginEpisode(self):
"""Tests the functionality of agent.begin_episode.
Specifically, the action returned and its effect on the state.
"""
with tf.compat.v1.Session() as sess:
agent = self._create_test_agent(sess)
# We fill up the state with 9s. On calling agent.begin_episode the state
# should be reset to all 0s.
agent.state.fill(9)
first_observation = np.ones(self.observation_shape + (1,))
self.assertEqual(agent.begin_episode(first_observation), 0)
# When the all-1s observation is received, it will be placed at the end of
# the state.
expected_state = self.zero_state
expected_state[:, :, :, -1] = np.ones((1,) + self.observation_shape)
self.assertAllEqual(agent.state, expected_state)
self.assertAllEqual(agent._observation, first_observation[:, :, 0])
# No training happens in eval mode.
self.assertEqual(agent.training_steps, 0)
# This will now cause training to happen.
agent.eval_mode = False
# Having a low replay memory add_count will prevent any of the
# train/prefetch/sync ops from being called.
agent._replay.memory.add_count = 0
second_observation = np.ones(self.observation_shape + (1,)) * 2
agent.begin_episode(second_observation)
# The agent's state will be reset, so we will only be left with the all-2s
# observation.
expected_state[:, :, :, -1] = np.full((1,) + self.observation_shape, 2)
self.assertAllEqual(agent.state, expected_state)
self.assertAllEqual(agent._observation, second_observation[:, :, 0])
# training_steps is incremented since we set eval_mode to False.
self.assertEqual(agent.training_steps, 1)
def testStepEval(self):
"""Tests the functionality of agent.step() in eval mode.
Specifically, the action returned, and confirms that no training happens.
"""
with tf.compat.v1.Session() as sess:
agent = self._create_test_agent(sess)
base_observation = np.ones(self.observation_shape + (1,))
# This will reset state and choose a first action.
agent.begin_episode(base_observation)
# We mock the replay buffer to verify how the agent interacts with it.
agent._replay = test_utils.MockReplayBuffer()
self.evaluate(tf.compat.v1.global_variables_initializer())
expected_state = self.zero_state
num_steps = 10
for step in range(1, num_steps + 1):
# We make observation a multiple of step for testing purposes (to
# uniquely identify each observation).
observation = base_observation * step
self.assertEqual(agent.step(reward=1, observation=observation), 0)
stack_pos = step - num_steps - 1
if stack_pos >= -self.stack_size:
expected_state[:, :, :, stack_pos] = np.full(
(1,) + self.observation_shape, step)
self.assertAllEqual(agent.state, expected_state)
self.assertAllEqual(
agent._last_observation,
np.ones(self.observation_shape) * (num_steps - 1))
self.assertAllEqual(agent._observation, observation[:, :, 0])
# No training happens in eval mode.
self.assertEqual(agent.training_steps, 0)
# No transitions are added in eval mode.
self.assertEqual(agent._replay.add.call_count, 0)
def testStepTrain(self):
"""Test the functionality of agent.step() in train mode.
Specifically, the action returned, and confirms training is happening.
"""
with tf.compat.v1.Session() as sess:
agent = self._create_test_agent(sess)
agent.eval_mode = False
base_observation = np.ones(self.observation_shape + (1,))
# We mock the replay buffer to verify how the agent interacts with it.
agent._replay = test_utils.MockReplayBuffer()
self.evaluate(tf.compat.v1.global_variables_initializer())
# This will reset state and choose a first action.
agent.begin_episode(base_observation)
expected_state = self.zero_state
num_steps = 10
for step in range(1, num_steps + 1):
# We make observation a multiple of step for testing purposes (to
# uniquely identify each observation).
observation = base_observation * step
self.assertEqual(agent.step(reward=1, observation=observation), 0)
stack_pos = step - num_steps - 1
if stack_pos >= -self.stack_size:
expected_state[:, :, :, stack_pos] = np.full(
(1,) + self.observation_shape, step)
self.assertAllEqual(agent.state, expected_state)
self.assertAllEqual(
agent._last_observation,
np.full(self.observation_shape, num_steps - 1))
self.assertAllEqual(agent._observation, observation[:, :, 0])
# We expect one more than num_steps because of the call to begin_episode.
self.assertEqual(agent.training_steps, num_steps + 1)
self.assertEqual(agent._replay.add.call_count, num_steps)
agent.end_episode(reward=1)
self.assertEqual(agent._replay.add.call_count, num_steps + 1)
def testStoreTransitionWithUniformSampling(self):
with tf.compat.v1.Session() as sess:
agent = rainbow_agent.RainbowAgent(
sess, num_actions=4, replay_scheme='uniform')
dummy_frame = np.zeros((84, 84))
# Adding transitions with default, 10., default priorities.
agent._store_transition(dummy_frame, 0, 0, False)
agent._store_transition(dummy_frame, 0, 0, False, 10.)
agent._store_transition(dummy_frame, 0, 0, False)
returned_priorities = agent._replay.memory.get_priority(
np.arange(self.stack_size - 1, self.stack_size + 2, dtype=np.int32))
expected_priorities = [1., 10., 1.]
self.assertAllEqual(returned_priorities, expected_priorities)
def testStoreTransitionWithPrioritizedSamplingy(self):
with tf.compat.v1.Session() as sess:
agent = rainbow_agent.RainbowAgent(
sess, num_actions=4, replay_scheme='prioritized')
dummy_frame = np.zeros((84, 84))
# Adding transitions with default, 10., default priorities.
agent._store_transition(dummy_frame, 0, 0, False)
agent._store_transition(dummy_frame, 0, 0, False, 10.)
agent._store_transition(dummy_frame, 0, 0, False)
returned_priorities = agent._replay.memory.get_priority(
np.arange(self.stack_size - 1, self.stack_size + 2, dtype=np.int32))
expected_priorities = [1., 10., 10.]
self.assertAllEqual(returned_priorities, expected_priorities)
if __name__ == '__main__':
tf.compat.v1.disable_v2_behavior()
tf.test.main()
|
|
from __future__ import division
import datetime
from django.core.urlresolvers import reverse
from django.db import models
from django.db.models import F, Q, Sum, Count
from django.utils import timezone
from django.utils.text import slugify
from django.contrib.auth.models import User
from django.conf import settings
from django_countries.fields import CountryField
from utils.database import BooleanSum
from utils.models import Marker
# Managers
class MatchManager(models.Manager):
"""
Manager that provides upcoming matches.
"""
def upcoming(self, competition=None):
return self.get_queryset(competition=competition).filter(
match_datetime__gte=timezone.now()
).order_by('match_datetime')
def latest(self, competition=None):
return self.get_queryset(competition=competition).filter(
match_datetime__lt=timezone.now()
).order_by('-match_datetime')
def get_queryset(self, competition=None):
qs = super(MatchManager, self).get_queryset()
if competition:
return qs.filter(
group__stage__comp_season__competition=competition)
return qs
# Models
class Season(models.Model):
year_from = models.PositiveSmallIntegerField(db_index=True)
year_to = models.PositiveSmallIntegerField()
class Meta:
unique_together = ('year_from', 'year_to')
def __unicode__(self):
return u'%s' % (self.name)
@property
def name(self):
return '%s/%s' % (self.year_from, str(self.year_to)[-2:])
@staticmethod
def curr_year():
today = timezone.now().today()
middle = datetime.datetime(today.year, 7, 1)
if today < middle:
return today.year - 1
return today.year
class Club(Marker):
name = models.CharField(max_length=100)
short_name = models.CharField(
max_length=15, blank=True,
help_text="For use as common name, ex. Vardar, Hypo, FCM...")
initials = models.CharField(
max_length=3, blank=True,
help_text="For use in matches, ex. GYO, VAR, BUD...")
country = CountryField()
ehf_id = models.IntegerField('EHF id', unique=True)
website = models.URLField(blank=True)
twitter = models.URLField(blank=True)
facebook = models.URLField(blank=True)
logo = models.ImageField(upload_to='clubs', blank=True, null=True)
players = models.ManyToManyField(
'Player', through='PlayerContract', blank=True)
coaches = models.ManyToManyField(
'Coach', through='CoachContract', blank=True)
fans = models.ManyToManyField(User, related_name='fav_clubs')
def __unicode__(self):
return u'%s' % (self.name)
def get_absolute_url(self):
return reverse('data:club_detail',
kwargs={'pk': self.pk, 'slug': slugify(self.name)})
def get_canonical_url(self):
return reverse('data:club_detail', kwargs={'pk': self.pk})
def get_current_team(self):
today = timezone.now().today()
middle = datetime.datetime(today.year, 7, 1)
season_year = today.year
if today < middle:
season_year -= 1
return self.playercontract_set.select_related(
'player', 'season').filter(
season__year_from=season_year, departure_month=None)
def get_current_coaches(self):
today = timezone.now().today()
middle = datetime.datetime(today.year, 7, 1)
season_year = today.year
if today < middle:
season_year -= 1
return self.coachcontract_set.filter(
season__year_from=season_year, departure_month=None)
def get_matches(self):
query = Q(home_team=self) | Q(away_team=self)
return Match.objects.filter(query).select_related(
).order_by('-match_datetime')
def get_matches_with_rival(self, club_id):
query = Q(home_team=club_id) | Q(away_team=club_id)
return self.get_matches().filter(query)
def get_competitions(self):
return self.grouptable_set.select_related().order_by(
'-group__stage__comp_season__season__year_from',
'-group__stage__comp_season__competition__level',
'-group__stage__order')
def get_scorer_list(self, year):
return Player.objects.filter(
matchplayerstats__club=self,
matchplayerstats__match__group__stage__comp_season__season__year_from=year
).annotate(sum_goals=Sum('matchplayerstats__goals'),
yellows=BooleanSum('matchplayerstats__yellow_card'),
two_mins=Sum('matchplayerstats__two_minutes'),
reds=BooleanSum('matchplayerstats__red_card')
).order_by('-sum_goals')
def address_lines(self):
if self.address:
return self.address.split(',')
return []
def has_logo(self):
if self.logo:
return True
return False
has_logo.boolean = True
has_logo.short_description = 'Has logo?'
def admin_thumbnail(self):
if self.logo:
return u'<img src="%s" />' % (self.logo.url)
else:
return u'No image.'
admin_thumbnail.short_description = 'Logo preview'
admin_thumbnail.allow_tags = True
@property
def logo_url(self):
if self.logo:
return self.logo.url
else:
return settings.STATIC_URL + 'img/no_logo.png'
@property
def display_name(self):
return self.short_name or self.name
class ClubName(models.Model):
club = models.ForeignKey(Club)
season = models.ForeignKey(Season)
name = models.CharField(max_length=100)
def __unicode__(self):
return u'%s (%s)' % (self.name, self.season)
class Person(models.Model):
MALE = 'M'
FEMALE = 'F'
GENDER_CHOICES = ((FEMALE, 'Female'), (MALE, 'Male'))
first_name = models.CharField(max_length=30)
last_name = models.CharField(max_length=30)
country = CountryField()
birth_date = models.DateField(blank=True, null=True)
birth_place = models.CharField(max_length=50, blank=True, null=True)
gender = models.CharField(
max_length=1, choices=GENDER_CHOICES, default=FEMALE)
photo = models.ImageField(upload_to='people', blank=True, null=True)
class Meta:
abstract = True
@property
def full_name(self):
"Returns the person's full name."
return '%s %s' % (self.first_name, self.last_name)
@property
def age(self):
today = timezone.now().today()
born = self.birth_date
adjust = ((today.month, today.day) < (born.month, born.day))
return today.year - born.year - adjust
@property
def photo_url(self):
if self.photo:
return self.photo.url
else:
return u'https://placehold.it/320x400&text=No+Image'
def has_photo(self):
if self.photo:
return True
return False
has_photo.boolean = True
has_photo.short_description = 'Has photo?'
def admin_thumbnail(self):
if self.photo:
return u'<img style="height: 300px;" src="%s" />' % (
self.photo.url)
else:
return u'No image.'
admin_thumbnail.short_description = 'Photo preview'
admin_thumbnail.allow_tags = True
class Player(Person):
UNKNOWN = 'U'
GOALKEEPER = 'GK'
LINE_PLAYER = 'LP'
LEFT_WING = 'LW'
RIGHT_WING = 'RW'
LEFT_BACK = 'LB'
RIGHT_BACK = 'RB'
MIDDLE_BACK = 'MB'
BACK = 'B'
WING = 'W'
POSITION_CHOICES = (
(GOALKEEPER, 'Goalkeeper'),
(LINE_PLAYER, 'Line player'),
(LEFT_WING, 'Left wing'),
(RIGHT_WING, 'Right wing'),
(LEFT_BACK, 'Left back'),
(RIGHT_BACK, 'Right back'),
(MIDDLE_BACK, 'Middle back'),
(BACK, 'Back'),
(WING, 'Wing'),
(UNKNOWN, 'Unknown')
)
LEFT_HAND = 'L'
RIGHT_HAND = 'R'
HAND_CHOICES = (
(LEFT_HAND, 'Left'),
(RIGHT_HAND, 'Right'),
(UNKNOWN, 'Unknown')
)
ehf_id = models.IntegerField('EHF id', unique=True)
position = models.CharField(
max_length=2, choices=POSITION_CHOICES, default=UNKNOWN)
height = models.PositiveSmallIntegerField(
blank=True, default=0,
help_text="Please indicate height in centimeters.")
main_hand = models.CharField(
max_length=1, choices=HAND_CHOICES, default=UNKNOWN)
retired = models.BooleanField(default=False)
retirement_date = models.DateField(null=True, blank=True)
fans = models.ManyToManyField(User, related_name='fav_players')
def __unicode__(self):
return u'%s' % (self.full_name)
def get_absolute_url(self):
return reverse('data:player_detail',
kwargs={'pk': self.pk, 'slug': slugify(self.full_name)})
def get_canonical_url(self):
return reverse('data:player_detail', kwargs={'pk': self.pk})
@property
def is_goalkeeper(self):
return self.position == Player.GOALKEEPER
@property
def current_contract(self):
today = timezone.now().today()
middle = datetime.datetime(today.year, 7, 1)
season_year = today.year
if today < middle:
season_year -= 1
contracts = self.playercontract_set.select_related(
'club', 'season').filter(
season__year_from=season_year, departure_month=None)
if len(contracts) == 1:
return contracts[0]
else:
return None
class PlayerName(models.Model):
player = models.ForeignKey(Player)
first_name = models.CharField(max_length=30)
last_name = models.CharField(max_length=30)
def __unicode__(self):
return u'%s - %s %s' % (self.player, self.first_name, self.last_name)
@property
def full_name(self):
"Returns the person's full name."
return '%s %s' % (self.first_name, self.last_name)
class Contract(models.Model):
JANUARY = 1
FEBRUARY = 2
MARCH = 3
APRIL = 4
MAY = 5
JUNE = 6
JULY = 7
AUGUST = 8
SEPTEMBER = 9
OCTOBER = 10
NOVEMBER = 11
DECEMBER = 12
MONTH_CHOICES = (
(JANUARY, 'January'),
(FEBRUARY, 'February'),
(MARCH, 'March'),
(APRIL, 'April'),
(MAY, 'May'),
(JUNE, 'June'),
(JULY, 'July'),
(AUGUST, 'August'),
(SEPTEMBER, 'September'),
(OCTOBER, 'October'),
(NOVEMBER, 'November'),
(DECEMBER, 'December')
)
club = models.ForeignKey(Club)
season = models.ForeignKey(Season)
departure_month = models.IntegerField(
choices=MONTH_CHOICES, blank=True, null=True,
help_text="Only if the person left before the end of the season.")
arrival_month = models.IntegerField(
choices=MONTH_CHOICES, blank=True, null=True,
help_text="Only if the person arrived after the start of the season.")
class Meta:
abstract = True
def player_contract_filename(instance, filename):
"""
Construct the filepath to store contract photos.
Ex: /contracts/2013/cid_pid.ext
"""
ext = filename.split('.')[-1]
new_file = '%s_%s.%s' % (instance.club_id, instance.player_id, ext)
return '/'.join([
'contracts', str(instance.season.year_from), new_file
])
class PlayerContract(Contract):
player = models.ForeignKey(Player)
shirt_number = models.PositiveSmallIntegerField(blank=True, default=0)
photo = models.ImageField(
upload_to=player_contract_filename, blank=True, null=True)
def __unicode__(self):
return u'%s (%s) in %s (%s)' % (
self.player.full_name, self.player.position,
self.club.name, self.season.name)
@property
def photo_url(self):
if self.photo:
return self.photo.url
else:
return self.player.photo_url
@property
def career_photo_url(self):
if self.photo:
return self.photo.url
else:
return u'https://placehold.it/320x400&text=No+Image'
def is_current(self):
today = timezone.now().today()
middle = datetime.date(today.year, 7, 1)
season_year = today.year
if today < middle:
season_year -= 1
return self.season.year_from == season_year
def get_stats(self):
qs = MatchPlayerStats.objects.filter(
player=self.player,
club=self.club,
match__group__stage__comp_season__season=self.season)
return qs.aggregate(
num_matches=Count('match'),
goals=Sum('goals'),
saves=Sum('saves'),
yellows=BooleanSum('yellow_card'),
two_mins=Sum('two_minutes'),
reds=BooleanSum('red_card')
)
class Coach(Person):
player = models.OneToOneField(Player, blank=True, null=True,
on_delete=models.SET_NULL)
class Meta:
verbose_name_plural = 'coaches'
def __unicode__(self):
return u'%s %s' % (self.first_name, self.last_name)
class CoachContract(Contract):
HEAD = 'H'
ASSISTANT = 'A'
ROLE_CHOICES = (
(HEAD, 'Head coach'),
(ASSISTANT, 'Assistant coach')
)
coach = models.ForeignKey(Coach)
role = models.CharField(max_length=1, choices=ROLE_CHOICES, default=HEAD)
def __unicode__(self):
return u'%s in %s (%s)' % (
self.coach.full_name, self.club.name, self.season.name)
class Category(models.Model):
name = models.CharField(max_length=100)
display_order = models.SmallIntegerField(default=0)
competitions = models.ManyToManyField('Competition', blank=True)
class Meta:
verbose_name_plural = 'categories'
ordering = ['display_order']
def __unicode__(self):
return u'%s' % (self.name)
def get_competitions(self):
return self.competitions.order_by('level')
class Competition(models.Model):
name = models.CharField(max_length=50)
short_name = models.CharField(max_length=5)
website = models.URLField(blank=True)
country = CountryField()
is_international = models.BooleanField(default=False)
level = models.PositiveSmallIntegerField(default=1)
logo = models.ImageField(upload_to='comps', blank=True, null=True)
seasons = models.ManyToManyField(Season, through='CompetitionSeason')
def __unicode__(self):
return u'%s' % (self.name)
def get_absolute_url(self):
return reverse('data:comp_detail',
kwargs={'pk': self.pk, 'slug': slugify(self.name)})
@property
def logo_url(self):
if self.logo:
return self.logo.url
else:
return settings.STATIC_URL + 'img/no_logo.png'
def has_logo(self):
if self.logo:
return True
return False
has_logo.boolean = True
has_logo.short_description = 'Has logo?'
def admin_thumbnail(self):
if self.logo:
return u'<img src="%s" />' % (self.logo.url)
else:
return u'No image.'
admin_thumbnail.short_description = 'Logo preview'
admin_thumbnail.allow_tags = True
def get_season_or_latest(self, year=None):
year = year or timezone.now().year
cs = self.competitionseason_set.filter(season__year_to=year)
if not cs:
cs = self.competitionseason_set.order_by(
'-season__year_to')
if cs:
return cs[0]
return None
def get_seasons(self):
return self.seasons.order_by('-year_from')
def get_participations(self):
return Club.objects.filter(
grouptable__group__stage__comp_season__competition=self
).annotate(times=Count('grouptable__group__stage__comp_season',
distinct=True)).order_by('-times')
def get_top_scorers(self):
return Player.objects.filter(
matchplayerstats__match__group__stage__comp_season__competition=self
).annotate(goals=Sum('matchplayerstats__goals')).order_by('-goals')
class CompetitionSeason(models.Model):
competition = models.ForeignKey(Competition)
season = models.ForeignKey(Season)
start_date = models.DateField(blank=True, null=True)
end_date = models.DateField(blank=True, null=True)
class Meta:
unique_together = ('competition', 'season')
def __unicode__(self):
return u'%s %s' % (self.competition.name, self.season.name)
def get_absolute_url(self):
return reverse('data:comp_season',
kwargs={'year': self.season.year_from,
'comp_id': self.competition.id})
def get_stats_url(self):
return reverse('data:comp_season_stats',
kwargs={'year': self.season.year_from,
'comp_id': self.competition.id})
def get_teams_url(self):
return reverse('data:comp_season_teams',
kwargs={'year': self.season.year_from,
'comp_id': self.competition.id})
def get_teams(self):
return Club.objects.filter(
grouptable__group__stage__comp_season=self
).order_by('name').distinct()
def get_player_stats(self):
return Player.objects.filter(
matchplayerstats__match__group__stage__comp_season=self).annotate(
sum_goals=Sum('matchplayerstats__goals'),
yellows=BooleanSum('matchplayerstats__yellow_card'),
two_mins=Sum('matchplayerstats__two_minutes'),
reds=BooleanSum('matchplayerstats__red_card'),
matches=Count('matchplayerstats')
).order_by('-sum_goals')
class Stage(models.Model):
"""Represents a round or stage in a competition"""
KNOCKOUT = 'KO'
ROUND_ROBIN = 'RR'
KO_GROUPS = 'KG'
TYPE_CHOICES = (
(KNOCKOUT, 'Knockout'),
(ROUND_ROBIN, 'Round robin'),
(KO_GROUPS, 'Knockout groups')
)
comp_season = models.ForeignKey(CompetitionSeason,
verbose_name='Competition Season')
order = models.PositiveSmallIntegerField('Stage order')
name = models.CharField(max_length=30)
short_name = models.CharField(max_length=5)
is_qualification = models.BooleanField(default=False)
type = models.CharField(max_length=2, choices=TYPE_CHOICES)
class Meta:
ordering = ['order']
def __unicode__(self):
return u'%s %s, %s' % (
self.comp_season.season, self.comp_season.competition, self.name)
def get_absolute_url(self):
return reverse('data:stage_detail',
kwargs={'pk': self.pk,
'comp_id': self.comp_season.competition.id,
'year': self.comp_season.season.year_from
})
def get_teams(self):
return Club.objects.filter(
grouptable__group__stage=self
).order_by('name')
class Group(models.Model):
stage = models.ForeignKey(Stage)
order = models.PositiveSmallIntegerField('Group order')
name = models.CharField(max_length=30)
teams = models.ManyToManyField(Club, through='GroupTable')
class Meta:
ordering = ['order']
def __unicode__(self):
return u'%s - %s' % (self.stage, self.name)
def get_table(self):
return GroupTable.objects.filter(
group=self).select_related('team', 'group').order_by('position')
def get_matches(self):
return self.match_set.select_related(
'home_team', 'away_team').order_by('match_datetime')
class GroupTable(models.Model):
group = models.ForeignKey(Group)
team = models.ForeignKey(Club)
position = models.PositiveSmallIntegerField(default=0)
start_points = models.SmallIntegerField(
default=0, help_text="Additional points to add or take away.")
class Meta:
unique_together = ('group', 'team')
ordering = ['position']
def __unicode__(self):
return u'%s %s' % (self.group, self.team)
def query(self):
return Q(home_team=self.team) | Q(away_team=self.team)
def get_matches(self):
return self.group.get_matches().filter(self.query())
@property
def table_stats(self):
stats = {'wins': 0, 'losses': 0, 'draws': 0,
'goals_for': 0, 'goals_against': 0}
# Get all the matches first.
matches = self.get_matches().filter(score_home__gt=0, score_away__gt=0)
stats['num_matches'] = len(matches)
# Calculate stats from the queryset.
for m in matches:
stats['draws'] += m.score_home == m.score_away
if m.home_team == self.team:
stats['wins'] += m.score_home > m.score_away
stats['losses'] += m.score_home < m.score_away
stats['goals_for'] += m.score_home
stats['goals_against'] += m.score_away
else:
stats['wins'] += m.score_away > m.score_home
stats['losses'] += m.score_away < m.score_home
stats['goals_for'] += m.score_away
stats['goals_against'] += m.score_home
stats['points'] = stats['wins'] * 2 + stats['draws'] * 1
stats['points'] += self.start_points
return stats
@property
def num_matches(self):
return self.group.match_set.filter(self.query()).count()
@property
def wins(self):
home = self.team.home_matches.filter(
group=self.group, score_home__gt=F('score_away')).count()
away = self.team.away_matches.filter(
group=self.group, score_away__gt=F('score_home')).count()
total = home + away
return (total, home, away)
@property
def draws(self):
home = self.team.home_matches.filter(
group=self.group, score_home=F('score_away')).count()
away = self.team.away_matches.filter(
group=self.group, score_away=F('score_home')).count()
total = home + away
return (total, home, away)
@property
def losses(self):
home = self.team.home_matches.filter(
group=self.group, score_home__lt=F('score_away')).count()
away = self.team.away_matches.filter(
group=self.group, score_away__lt=F('score_home')).count()
total = home + away
return (total, home, away)
@property
def goals_for(self):
home = self.team.home_matches.filter(
group=self.group).aggregate(
for_home=Sum('score_home'))['for_home']
away = self.team.away_matches.filter(
group=self.group).aggregate(
for_away=Sum('score_away'))['for_away']
total = int(home or 0) + int(away or 0)
return (total, home, away)
@property
def goals_against(self):
home = self.team.home_matches.filter(
group=self.group).aggregate(
against_home=Sum('score_away'))['against_home']
away = self.team.away_matches.filter(
group=self.group).aggregate(
against_away=Sum('score_home'))['against_away']
total = int(home or 0) + int(away or 0)
return (total, home, away)
@property
def points(self):
return self.wins[0] * 2 + self.draws[0] * 1 + self.start_points
class Match(models.Model):
group = models.ForeignKey(Group)
home_team = models.ForeignKey(Club, related_name='home_matches')
away_team = models.ForeignKey(Club, related_name='away_matches')
match_datetime = models.DateTimeField()
arena = models.CharField(max_length=100, blank=True)
location = models.CharField(max_length=100, blank=True)
spectators = models.PositiveIntegerField(blank=True, null=True)
score_home = models.PositiveSmallIntegerField(blank=True, null=True)
score_away = models.PositiveSmallIntegerField(blank=True, null=True)
report_url = models.URLField(blank=True)
week = models.SmallIntegerField(default=0)
referees = models.ManyToManyField('Referee', blank=True)
delegates = models.ManyToManyField('Delegate', blank=True)
objects = MatchManager()
class Meta:
verbose_name_plural = 'matches'
def __unicode__(self):
return u'%s vs %s on %s' % (
self.home_team, self.away_team, self.match_datetime)
def get_absolute_url(self):
return reverse('data:match_detail', kwargs={'pk': self.pk})
@property
def display_name(self):
return (self.home_team.display_name + ' v ' +
self.away_team.display_name)
@property
def display_result(self):
if self.score_home or self.score_away:
return str(self.score_home) + ':' + str(self.score_away)
return '?:?'
@property
def display_halftime(self):
try:
home_ht = self.get_home_stats().halftime_score
away_ht = self.get_away_stats().halftime_score
except:
return '?:?'
return str(home_ht) + ':' + str(away_ht)
@property
def is_future(self):
now = timezone.now()
return self.match_datetime > now
@property
def is_draw(self):
if self.score_home or self.score_away:
return self.score_home == self.score_away
return False
@property
def is_home_win(self):
if self.score_home or self.score_away:
return self.score_home > self.score_away
return False
@property
def is_away_win(self):
if self.score_home or self.score_away:
return self.score_away > self.score_home
return False
def get_home_stats(self):
try:
return self.matchteamstats_set.get(club=self.home_team)
except MatchTeamStats.DoesNotExist:
return None
def get_home_player_stats(self):
q = self.matchplayerstats_set.filter(club=self.home_team)
return q.select_related('player').order_by('-goals')
def get_away_stats(self):
try:
return self.matchteamstats_set.get(club=self.away_team)
except MatchTeamStats.DoesNotExist:
return None
def get_away_player_stats(self):
q = self.matchplayerstats_set.filter(club=self.away_team)
return q.select_related('player').order_by('-goals')
class MatchTeamStats(models.Model):
match = models.ForeignKey(Match)
club = models.ForeignKey(Club)
halftime_score = models.PositiveSmallIntegerField(blank=True, null=True)
finaltime_score = models.PositiveSmallIntegerField(blank=True, null=True)
score_pt = models.PositiveSmallIntegerField(
'Score after playing time', blank=True, null=True)
score_et1 = models.PositiveSmallIntegerField(
'Score after ET1', blank=True, null=True)
score_et2 = models.PositiveSmallIntegerField(
'Score after ET2', blank=True, null=True)
score_7m = models.PositiveSmallIntegerField(
'Score after 7m shootout', blank=True, null=True)
given_7m = models.PositiveSmallIntegerField(
'7m given', blank=True, null=True)
goals_7m = models.PositiveSmallIntegerField(
'7m scored', blank=True, null=True)
timeout1 = models.CharField(max_length=10, blank=True)
timeout2 = models.CharField(max_length=10, blank=True)
timeout3 = models.CharField(max_length=10, blank=True)
yellow_card = models.BooleanField(default=False)
two_minutes = models.PositiveSmallIntegerField(blank=True, null=True)
red_card = models.BooleanField(default=False)
class Meta:
verbose_name_plural = 'team stats'
unique_together = ('match', 'club')
def __unicode__(self):
return u'%s in %s' % (self.club, self.match)
@property
def first_half_ratio(self):
return round(
(self.halftime_score or 0)/(self.finaltime_score or 1) * 100)
@property
def second_half_ratio(self):
return round(
(self.second_half_score or 0)/(self.finaltime_score or 1) * 100)
@property
def second_half_score(self):
return self.finaltime_score - self.halftime_score
@property
def penalty_ratio(self):
try:
return round(self.goals_7m/self.given_7m * 100)
except:
return 0
@property
def penalty_miss_ratio(self):
try:
missed = self.given_7m - self.goals_7m
return round(missed/self.given_7m * 100)
except:
return 0
class MatchPlayerStats(models.Model):
match = models.ForeignKey(Match)
club = models.ForeignKey(Club)
player = models.ForeignKey(Player)
goals = models.PositiveSmallIntegerField(blank=True, null=True)
goals_7m = models.PositiveSmallIntegerField(blank=True, null=True)
goals_shots = models.PositiveSmallIntegerField(
'Shots made', blank=True, null=True)
saves = models.PositiveSmallIntegerField(blank=True, null=True)
saves_7m = models.PositiveSmallIntegerField(blank=True, null=True)
saves_shots = models.PositiveSmallIntegerField(
'Shots received', blank=True, null=True)
yellow_card = models.BooleanField(default=False)
two_minutes = models.PositiveSmallIntegerField(blank=True, null=True)
red_card = models.BooleanField(default=False)
# playing_time = models.FloatField(default=0)
class Meta:
verbose_name_plural = 'player stats'
unique_together = ('match', 'club', 'player')
def __unicode__(self):
return u'%s in %s' % (self.player, self.match)
class Referee(models.Model):
name = models.CharField(max_length=100)
country = CountryField()
pair = models.ForeignKey('self',
blank=True,
null=True,
on_delete=models.SET_NULL)
def __unicode__(self):
return u'%s (%s)' % (self.name, self.country)
class Delegate(models.Model):
name = models.CharField(max_length=100)
country = CountryField()
def __unicode__(self):
return u'%s (%s)' % (self.name, self.country)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.