gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Facilities for creating multiple test combinations.
Here is an example of testing various optimizers in Eager and Graph mode:
class AdditionExample(test.TestCase, parameterized.TestCase):
@combinations.generate(
combinations.combine(mode=["graph", "eager"],
optimizer=[AdamOptimizer(),
GradientDescentOptimizer()]))
def testOptimizer(self, optimizer):
... f(optimizer)...
This will run `testOptimizer` 4 times with the specified optimizers: 2 in
Eager and 2 in Graph mode.
The test will be provided with arguments that match the arguments of combine
by name. It is necessary to request all arguments, except for `mode`, which is
optional.
`combine()` function is available for creating a cross product of various
options. `times()` function exists for creating a product of N `combine()`-ed
results. See below.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import OrderedDict
import sys
import types
import unittest
from absl.testing import parameterized
import six
from tensorflow.contrib import cluster_resolver
from tensorflow.contrib.distribute.python import mirrored_strategy as mirrored_lib
from tensorflow.contrib.distribute.python import one_device_strategy as one_device_lib
from tensorflow.contrib.distribute.python import parameter_server_strategy
from tensorflow.contrib.distribute.python import tpu_strategy as tpu_lib
from tensorflow.contrib.optimizer_v2 import adagrad as adagrad_v2
from tensorflow.contrib.optimizer_v2 import adam as adam_v2
from tensorflow.contrib.optimizer_v2 import gradient_descent as gradient_descent_v2
from tensorflow.contrib.tpu.python.tpu import device_assignment as device_assignment_lib
from tensorflow.python.distribute import distribution_strategy_context
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from tensorflow.python.keras.optimizer_v2 import adagrad as adagrad_keras_v2
from tensorflow.python.keras.optimizer_v2 import adam as adam_keras_v2
from tensorflow.python.keras.optimizer_v2 import gradient_descent as gradient_descent_keras_v2
from tensorflow.python.keras.optimizer_v2 import rmsprop as rmsprop_keras_v2
from tensorflow.python.training import adagrad
from tensorflow.python.training import adam
from tensorflow.python.training import gradient_descent
from tensorflow.python.training import rmsprop
from tensorflow.python.util import tf_inspect
GPU_TEST = "test_gpu" in sys.argv[0]
TPU_TEST = "test_tpu" in sys.argv[0]
def generate(combinations):
"""A decorator for generating test cases of a test method or a test class.
Args:
combinations: a list of dictionaries created using combine() and times().
Restrictions:
-- the "mode" argument can be either "eager" or "graph". It's "graph" by
default.
-- arguments of the test method must match by name to get the corresponding
value of the combination. Tests must accept all arguments except the
"mode", "required_tpu" and "required_gpus".
-- "distribution" argument is special and optional. It is meant for passing
instances of DistributionStrategy. Each instance is to be passed as via
`NamedDistribution`. If using "distribution", "required_gpus" and
"required_tpu" should be specified via the NamedDistribution instance,
rather than as separate arguments.
-- "required_tpu" argument is special and optional. If not `None`, then the
test will be skipped if TPUs aren't available.
-- "required_gpus" argument is special and optional. If not `None`, then the
test will be skipped if the specified number of GPUs aren't available.
Returns:
a decorator that will cause the test method or the test class to be run
under the specified conditions.
Raises:
ValueError - if "mode" argument wasn't either "eager" or "graph" or if other
arguments were not accepted by the test method.
"""
def decorator(test_method_or_class):
"""The decorator to be returned."""
# Generate good test names that can be used with --test_filter.
named_combinations = []
for combination in combinations:
# We use OrderedDicts in `combine()` and `times()` to ensure stable
# order of keys in each dictionary.
assert isinstance(combination, OrderedDict)
name = "".join([
"_{}_{}".format(
"".join(filter(str.isalnum, key)),
"".join(filter(str.isalnum, str(value))))
for key, value in combination.items()
])
named_combinations.append(
OrderedDict(
list(combination.items()) + [("testcase_name",
"_test{}".format(name))]))
if isinstance(test_method_or_class, type):
class_object = test_method_or_class
class_object._test_method_ids = test_method_ids = {}
for name, test_method in six.iteritems(class_object.__dict__.copy()):
if (name.startswith(unittest.TestLoader.testMethodPrefix) and
isinstance(test_method, types.FunctionType)):
delattr(class_object, name)
methods = {}
parameterized._update_class_dict_for_param_test_case(
class_object.__name__, methods, test_method_ids, name,
parameterized._ParameterizedTestIter(
_augment_with_special_arguments(test_method),
named_combinations, parameterized._NAMED, name))
for method_name, method in six.iteritems(methods):
setattr(class_object, method_name, method)
return class_object
else:
test_method = _augment_with_special_arguments(test_method_or_class)
return parameterized.named_parameters(*named_combinations)(test_method)
return decorator
def _augment_with_special_arguments(test_method):
def decorated(self, **kwargs):
"""A wrapped test method that treats some arguments in a special way."""
mode = kwargs.pop("mode", "graph")
distribution = kwargs.get("distribution", None)
required_tpu = kwargs.pop("required_tpu", False)
required_gpus = kwargs.pop("required_gpus", None)
if distribution:
assert required_gpus is None, (
"Do not use `required_gpus` and `distribution` together.")
assert required_tpu is False, (
"Do not use `required_tpu` and `distribution` together.")
required_gpus = distribution.required_gpus
required_tpu = distribution.required_tpu
if required_tpu and not TPU_TEST:
self.skipTest("Test requires a TPU, but it's not available.")
if not required_tpu and TPU_TEST:
self.skipTest("Test that doesn't require a TPU.")
if not required_gpus:
if GPU_TEST:
self.skipTest("Test that doesn't require GPUs.")
elif context.num_gpus() < required_gpus:
# TODO(priyag): Consider allowing tests in graph mode using soft
# placement.
self.skipTest(
"{} GPUs are not available for this test. {} GPUs are available".
format(required_gpus, context.num_gpus()))
# At this point, `kwargs` doesn't have `required_gpus` or `required_tpu`
# that the user might have specified. `kwargs` still has `mode`, which
# the test is allowed to accept or ignore.
requested_arguments = tf_inspect.getfullargspec(test_method).args
missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
set(requested_arguments + ["mode"]))
if missing_arguments:
raise ValueError("The test is missing arguments {} .".format(
missing_arguments))
kwargs_to_pass = {}
for arg in requested_arguments:
if arg == "self":
kwargs_to_pass[arg] = self
else:
kwargs_to_pass[arg] = kwargs[arg]
if mode == "eager":
with context.eager_mode():
if distribution:
kwargs_to_pass["distribution"] = distribution.strategy
test_method(**kwargs_to_pass)
elif mode == "graph":
with ops.Graph().as_default(), context.graph_mode():
if distribution:
kwargs_to_pass["distribution"] = distribution.strategy
test_method(**kwargs_to_pass)
else:
raise ValueError(
"'mode' has to be either 'eager' or 'graph' and not {}".format(
mode))
return decorated
def combine(**kwargs):
"""Generate combinations based on its keyword arguments.
Two sets of returned combinations can be concatenated using +. Their product
can be computed using `times()`.
Args:
**kwargs: keyword arguments of form `option=[possibilities, ...]`
or `option=the_only_possibility`.
Returns:
a list of dictionaries for each combination. Keys in the dictionaries are
the keyword argument names. Each key has one value - one of the
corresponding keyword argument values.
"""
if not kwargs:
return [OrderedDict()]
sort_by_key = lambda k: k[0]
kwargs = OrderedDict(sorted(kwargs.items(), key=sort_by_key))
first = list(kwargs.items())[0]
rest = dict(list(kwargs.items())[1:])
rest_combined = combine(**rest)
key = first[0]
values = first[1]
if not isinstance(values, list):
values = [values]
return [
OrderedDict(sorted(list(combined.items()) + [(key, v)], key=sort_by_key))
for v in values
for combined in rest_combined
]
def times(*combined):
"""Generate a product of N sets of combinations.
times(combine(a=[1,2]), combine(b=[3,4])) == combine(a=[1,2], b=[3,4])
Args:
*combined: N lists of dictionaries that specify combinations.
Returns:
a list of dictionaries for each combination.
Raises:
ValueError: if some of the inputs have overlapping keys.
"""
assert combined
if len(combined) == 1:
return combined[0]
first = combined[0]
rest_combined = times(*combined[1:])
combined_results = []
for a in first:
for b in rest_combined:
if set(a.keys()).intersection(set(b.keys())):
raise ValueError("Keys need to not overlap: {} vs {}".format(
a.keys(), b.keys()))
combined_results.append(OrderedDict(list(a.items()) + list(b.items())))
return combined_results
class NamedObject(object):
"""A class that translates an object into a good test name."""
def __init__(self, name, obj):
self._name = name
self._obj = obj
def __getattr__(self, name):
return getattr(self._obj, name)
def __call__(self, *args, **kwargs):
return self._obj(*args, **kwargs)
def __repr__(self):
return self._name
class NamedDistribution(object):
"""Translates DistributionStrategy and its data into a good name."""
def __init__(self, name, distribution_fn, required_gpus=None,
required_tpu=False):
self._distribution_fn = distribution_fn
self._name = name
self._required_gpus = required_gpus
self._required_tpu = required_tpu
def __repr__(self):
return self._name
@property
def strategy(self):
return self._distribution_fn()
@property
def required_gpus(self):
return self._required_gpus
@property
def required_tpu(self):
return self._required_tpu
def _get_tpu_strategy_creator(steps_per_run, use_single_core=False, **kwargs):
def _create_tpu_strategy():
resolver = cluster_resolver.TPUClusterResolver("")
topology = tpu_lib.initialize_tpu_system(resolver)
device_assignment = None
if use_single_core:
device_assignment = device_assignment_lib.DeviceAssignment(
topology, core_assignment=device_assignment_lib.
SINGLE_CORE_ASSIGNMENT)
strategy = tpu_lib.TPUStrategy(resolver, steps_per_run=steps_per_run,
device_assignment=device_assignment,
**kwargs)
return strategy
return _create_tpu_strategy
# pylint: disable=g-long-lambda
default_strategy = NamedDistribution(
"Default",
distribution_strategy_context._get_default_strategy, # pylint: disable=protected-access
required_gpus=None)
one_device_strategy = NamedDistribution(
"OneDeviceCPU", lambda: one_device_lib.OneDeviceStrategy("/cpu:0"),
required_gpus=None)
one_device_strategy_gpu = NamedDistribution(
"OneDeviceGPU", lambda: one_device_lib.OneDeviceStrategy("/gpu:0"),
required_gpus=1)
tpu_strategy = NamedDistribution(
"TPU", _get_tpu_strategy_creator(steps_per_run=2),
required_tpu=True)
tpu_strategy_one_step = NamedDistribution(
"TPUOneStep", _get_tpu_strategy_creator(steps_per_run=1),
required_tpu=True)
tpu_strategy_one_core = NamedDistribution(
"TPUOneCore", _get_tpu_strategy_creator(
steps_per_run=2, use_single_core=True),
required_tpu=True)
tpu_strategy_one_step_one_core = NamedDistribution(
"TPUOneStepOneCore", _get_tpu_strategy_creator(
steps_per_run=1, use_single_core=True),
required_tpu=True)
mirrored_strategy_with_one_cpu = NamedDistribution(
"Mirrored1CPU",
lambda: mirrored_lib.MirroredStrategy(["/cpu:0"]))
mirrored_strategy_with_one_gpu = NamedDistribution(
"Mirrored1GPU",
lambda: mirrored_lib.MirroredStrategy(["/gpu:0"]),
required_gpus=1)
mirrored_strategy_with_gpu_and_cpu = NamedDistribution(
"MirroredCPUAndGPU",
lambda: mirrored_lib.MirroredStrategy(["/gpu:0", "/cpu:0"]),
required_gpus=1)
mirrored_strategy_with_two_gpus = NamedDistribution(
"Mirrored2GPUs",
lambda: mirrored_lib.MirroredStrategy(["/gpu:0", "/gpu:1"]),
required_gpus=2)
core_mirrored_strategy_with_one_cpu = NamedDistribution(
"CoreMirrored1CPU",
lambda: mirrored_lib.CoreMirroredStrategy(["/cpu:0"]))
core_mirrored_strategy_with_one_gpu = NamedDistribution(
"CoreMirrored1GPU",
lambda: mirrored_lib.CoreMirroredStrategy(["/gpu:0"]),
required_gpus=1)
core_mirrored_strategy_with_gpu_and_cpu = NamedDistribution(
"CoreMirroredCPUAndGPU",
lambda: mirrored_lib.CoreMirroredStrategy(["/gpu:0", "/cpu:0"]),
required_gpus=1)
core_mirrored_strategy_with_two_gpus = NamedDistribution(
"CoreMirrored2GPUs",
lambda: mirrored_lib.CoreMirroredStrategy(["/gpu:0", "/gpu:1"]),
required_gpus=2)
parameter_server_strategy_with_two_gpus = NamedDistribution(
"ParameterServer2GPUs",
lambda: parameter_server_strategy.ParameterServerStrategy(
num_gpus_per_worker=2),
required_gpus=2)
gradient_descent_optimizer_v1_fn = NamedObject(
"GradientDescentV1", lambda: gradient_descent.GradientDescentOptimizer(0.2))
adagrad_optimizer_v1_fn = NamedObject(
"AdagradV1", lambda: adagrad.AdagradOptimizer(0.001))
adam_optimizer_v1_fn = NamedObject("AdamV1",
lambda: adam.AdamOptimizer(0.001, epsilon=1))
rmsprop_optimizer_v1_fn = NamedObject(
"RmsPropV1", lambda: rmsprop.RMSPropOptimizer(0.001))
optimizers_v1 = [gradient_descent_optimizer_v1_fn, adagrad_optimizer_v1_fn]
gradient_descent_optimizer_v2_fn = NamedObject(
"GradientDescentV2",
lambda: gradient_descent_v2.GradientDescentOptimizer(0.2))
adagrad_optimizer_v2_fn = NamedObject(
"AdagradV2", lambda: adagrad_v2.AdagradOptimizer(0.001))
adam_optimizer_v2_fn = NamedObject(
"AdamV2", lambda: adam_v2.AdamOptimizer(0.001, epsilon=1.0))
optimizers_v2 = [gradient_descent_optimizer_v2_fn, adagrad_optimizer_v2_fn]
gradient_descent_optimizer_keras_v2_fn = NamedObject(
"GradientDescentKerasV2",
lambda: gradient_descent_keras_v2.SGD(0.2))
adagrad_optimizer_keras_v2_fn = NamedObject(
"AdagradKerasV2", lambda: adagrad_keras_v2.Adagrad(0.001))
adam_optimizer_keras_v2_fn = NamedObject(
"AdamKerasV2", lambda: adam_keras_v2.Adam(0.001, epsilon=1.0))
rmsprop_optimizer_keras_v2_fn = NamedObject(
"RmsPropKerasV2", lambda: rmsprop_keras_v2.RMSprop(0.001))
graph_and_eager_modes = ["graph", "eager"]
def distributions_and_v1_optimizers():
"""A common set of combination with DistributionStrategies and Optimizers."""
return combine(
distribution=[
one_device_strategy,
mirrored_strategy_with_gpu_and_cpu,
mirrored_strategy_with_two_gpus,
core_mirrored_strategy_with_gpu_and_cpu,
core_mirrored_strategy_with_two_gpus,
],
optimizer_fn=optimizers_v1)
def distributions_and_v2_optimizers():
"""DistributionStrategies and V2 Optimizers."""
return combine(
distribution=[
one_device_strategy,
mirrored_strategy_with_gpu_and_cpu,
mirrored_strategy_with_two_gpus,
core_mirrored_strategy_with_gpu_and_cpu,
core_mirrored_strategy_with_two_gpus,
],
optimizer_fn=optimizers_v2)
|
|
# Adapted from test_file.py by Daniel Stutzbach
from __future__ import unicode_literals
import sys
import os
import errno
import unittest
from array import array
from weakref import proxy
from functools import wraps
from UserList import UserList
from test.test_support import TESTFN, check_warnings, run_unittest, make_bad_fd
from test.test_support import py3k_bytes as bytes, cpython_only
from test.script_helper import run_python
from _io import FileIO as _FileIO
class AutoFileTests(unittest.TestCase):
# file tests for which a test file is automatically set up
def setUp(self):
self.f = _FileIO(TESTFN, 'w')
def tearDown(self):
if self.f:
self.f.close()
os.remove(TESTFN)
def testWeakRefs(self):
# verify weak references
p = proxy(self.f)
p.write(bytes(range(10)))
self.assertEqual(self.f.tell(), p.tell())
self.f.close()
self.f = None
self.assertRaises(ReferenceError, getattr, p, 'tell')
def testSeekTell(self):
self.f.write(bytes(range(20)))
self.assertEqual(self.f.tell(), 20)
self.f.seek(0)
self.assertEqual(self.f.tell(), 0)
self.f.seek(10)
self.assertEqual(self.f.tell(), 10)
self.f.seek(5, 1)
self.assertEqual(self.f.tell(), 15)
self.f.seek(-5, 1)
self.assertEqual(self.f.tell(), 10)
self.f.seek(-5, 2)
self.assertEqual(self.f.tell(), 15)
def testAttributes(self):
# verify expected attributes exist
f = self.f
self.assertEqual(f.mode, "wb")
self.assertEqual(f.closed, False)
# verify the attributes are readonly
for attr in 'mode', 'closed':
self.assertRaises((AttributeError, TypeError),
setattr, f, attr, 'oops')
def testReadinto(self):
# verify readinto
self.f.write(b"\x01\x02")
self.f.close()
a = array(b'b', b'x'*10)
self.f = _FileIO(TESTFN, 'r')
n = self.f.readinto(a)
self.assertEqual(array(b'b', [1, 2]), a[:n])
def testWritelinesList(self):
l = [b'123', b'456']
self.f.writelines(l)
self.f.close()
self.f = _FileIO(TESTFN, 'rb')
buf = self.f.read()
self.assertEqual(buf, b'123456')
def testWritelinesUserList(self):
l = UserList([b'123', b'456'])
self.f.writelines(l)
self.f.close()
self.f = _FileIO(TESTFN, 'rb')
buf = self.f.read()
self.assertEqual(buf, b'123456')
def testWritelinesError(self):
self.assertRaises(TypeError, self.f.writelines, [1, 2, 3])
self.assertRaises(TypeError, self.f.writelines, None)
def test_none_args(self):
self.f.write(b"hi\nbye\nabc")
self.f.close()
self.f = _FileIO(TESTFN, 'r')
self.assertEqual(self.f.read(None), b"hi\nbye\nabc")
self.f.seek(0)
self.assertEqual(self.f.readline(None), b"hi\n")
self.assertEqual(self.f.readlines(None), [b"bye\n", b"abc"])
def testRepr(self):
self.assertEqual(repr(self.f), "<_io.FileIO name=%r mode='%s'>"
% (self.f.name, self.f.mode))
del self.f.name
self.assertEqual(repr(self.f), "<_io.FileIO fd=%r mode='%s'>"
% (self.f.fileno(), self.f.mode))
self.f.close()
self.assertEqual(repr(self.f), "<_io.FileIO [closed]>")
def testErrors(self):
f = self.f
self.assertTrue(not f.isatty())
self.assertTrue(not f.closed)
#self.assertEqual(f.name, TESTFN)
self.assertRaises(ValueError, f.read, 10) # Open for reading
f.close()
self.assertTrue(f.closed)
f = _FileIO(TESTFN, 'r')
self.assertRaises(TypeError, f.readinto, "")
self.assertTrue(not f.closed)
f.close()
self.assertTrue(f.closed)
def testMethods(self):
methods = ['fileno', 'isatty', 'seekable', 'readable', 'writable',
'read', 'readall', 'readline', 'readlines',
'tell', 'truncate', 'flush']
if sys.platform.startswith('atheos'):
methods.remove('truncate')
self.f.close()
self.assertTrue(self.f.closed)
for methodname in methods:
method = getattr(self.f, methodname)
# should raise on closed file
self.assertRaises(ValueError, method)
self.assertRaises(ValueError, self.f.readinto) # XXX should be TypeError?
self.assertRaises(ValueError, self.f.readinto, bytearray(1))
self.assertRaises(ValueError, self.f.seek)
self.assertRaises(ValueError, self.f.seek, 0)
self.assertRaises(ValueError, self.f.write)
self.assertRaises(ValueError, self.f.write, b'')
self.assertRaises(TypeError, self.f.writelines)
self.assertRaises(ValueError, self.f.writelines, b'')
def testOpendir(self):
# Issue 3703: opening a directory should fill the errno
# Windows always returns "[Errno 13]: Permission denied
# Unix calls dircheck() and returns "[Errno 21]: Is a directory"
try:
_FileIO('.', 'r')
except IOError as e:
self.assertNotEqual(e.errno, 0)
self.assertEqual(e.filename, ".")
else:
self.fail("Should have raised IOError")
@unittest.skipIf(os.name == 'nt', "test only works on a POSIX-like system")
def testOpenDirFD(self):
fd = os.open('.', os.O_RDONLY)
with self.assertRaises(IOError) as cm:
_FileIO(fd, 'r')
os.close(fd)
self.assertEqual(cm.exception.errno, errno.EISDIR)
#A set of functions testing that we get expected behaviour if someone has
#manually closed the internal file descriptor. First, a decorator:
def ClosedFD(func):
@wraps(func)
def wrapper(self):
#forcibly close the fd before invoking the problem function
f = self.f
os.close(f.fileno())
try:
func(self, f)
finally:
try:
self.f.close()
except IOError:
pass
return wrapper
def ClosedFDRaises(func):
@wraps(func)
def wrapper(self):
#forcibly close the fd before invoking the problem function
f = self.f
os.close(f.fileno())
try:
func(self, f)
except IOError as e:
self.assertEqual(e.errno, errno.EBADF)
else:
self.fail("Should have raised IOError")
finally:
try:
self.f.close()
except IOError:
pass
return wrapper
@ClosedFDRaises
def testErrnoOnClose(self, f):
f.close()
@ClosedFDRaises
def testErrnoOnClosedWrite(self, f):
f.write('a')
@ClosedFDRaises
def testErrnoOnClosedSeek(self, f):
f.seek(0)
@ClosedFDRaises
def testErrnoOnClosedTell(self, f):
f.tell()
@ClosedFDRaises
def testErrnoOnClosedTruncate(self, f):
f.truncate(0)
@ClosedFD
def testErrnoOnClosedSeekable(self, f):
f.seekable()
@ClosedFD
def testErrnoOnClosedReadable(self, f):
f.readable()
@ClosedFD
def testErrnoOnClosedWritable(self, f):
f.writable()
@ClosedFD
def testErrnoOnClosedFileno(self, f):
f.fileno()
@ClosedFD
def testErrnoOnClosedIsatty(self, f):
self.assertEqual(f.isatty(), False)
def ReopenForRead(self):
try:
self.f.close()
except IOError:
pass
self.f = _FileIO(TESTFN, 'r')
os.close(self.f.fileno())
return self.f
@ClosedFDRaises
def testErrnoOnClosedRead(self, f):
f = self.ReopenForRead()
f.read(1)
@ClosedFDRaises
def testErrnoOnClosedReadall(self, f):
f = self.ReopenForRead()
f.readall()
@ClosedFDRaises
def testErrnoOnClosedReadinto(self, f):
f = self.ReopenForRead()
a = array(b'b', b'x'*10)
f.readinto(a)
class OtherFileTests(unittest.TestCase):
def testAbles(self):
try:
f = _FileIO(TESTFN, "w")
self.assertEqual(f.readable(), False)
self.assertEqual(f.writable(), True)
self.assertEqual(f.seekable(), True)
f.close()
f = _FileIO(TESTFN, "r")
self.assertEqual(f.readable(), True)
self.assertEqual(f.writable(), False)
self.assertEqual(f.seekable(), True)
f.close()
f = _FileIO(TESTFN, "a+")
self.assertEqual(f.readable(), True)
self.assertEqual(f.writable(), True)
self.assertEqual(f.seekable(), True)
self.assertEqual(f.isatty(), False)
f.close()
finally:
os.unlink(TESTFN)
@unittest.skipIf(sys.platform == 'win32', 'no ttys on Windows')
def testAblesOnTTY(self):
try:
f = _FileIO("/dev/tty", "a")
except EnvironmentError:
# When run in a cron job there just aren't any
# ttys, so skip the test. This also handles other
# OS'es that don't support /dev/tty.
self.skipTest('need /dev/tty')
else:
self.assertEqual(f.readable(), False)
self.assertEqual(f.writable(), True)
if sys.platform != "darwin" and \
'bsd' not in sys.platform and \
not sys.platform.startswith('sunos'):
# Somehow /dev/tty appears seekable on some BSDs
self.assertEqual(f.seekable(), False)
self.assertEqual(f.isatty(), True)
f.close()
def testInvalidModeStrings(self):
# check invalid mode strings
for mode in ("", "aU", "wU+", "rw", "rt"):
try:
f = _FileIO(TESTFN, mode)
except ValueError:
pass
else:
f.close()
self.fail('%r is an invalid file mode' % mode)
def testModeStrings(self):
# test that the mode attribute is correct for various mode strings
# given as init args
try:
for modes in [('w', 'wb'), ('wb', 'wb'), ('wb+', 'rb+'),
('w+b', 'rb+'), ('a', 'ab'), ('ab', 'ab'),
('ab+', 'ab+'), ('a+b', 'ab+'), ('r', 'rb'),
('rb', 'rb'), ('rb+', 'rb+'), ('r+b', 'rb+')]:
# read modes are last so that TESTFN will exist first
with _FileIO(TESTFN, modes[0]) as f:
self.assertEqual(f.mode, modes[1])
finally:
if os.path.exists(TESTFN):
os.unlink(TESTFN)
def testUnicodeOpen(self):
# verify repr works for unicode too
f = _FileIO(str(TESTFN), "w")
f.close()
os.unlink(TESTFN)
def testBytesOpen(self):
# Opening a bytes filename
try:
fn = TESTFN.encode("ascii")
except UnicodeEncodeError:
self.skipTest('could not encode %r to ascii' % TESTFN)
f = _FileIO(fn, "w")
try:
f.write(b"abc")
f.close()
with open(TESTFN, "rb") as f:
self.assertEqual(f.read(), b"abc")
finally:
os.unlink(TESTFN)
def testInvalidFd(self):
self.assertRaises(ValueError, _FileIO, -10)
self.assertRaises(OSError, _FileIO, make_bad_fd())
if sys.platform == 'win32':
import msvcrt
self.assertRaises(IOError, msvcrt.get_osfhandle, make_bad_fd())
@cpython_only
def testInvalidFd_overflow(self):
# Issue 15989
import _testcapi
self.assertRaises(TypeError, _FileIO, _testcapi.INT_MAX + 1)
self.assertRaises(TypeError, _FileIO, _testcapi.INT_MIN - 1)
def testBadModeArgument(self):
# verify that we get a sensible error message for bad mode argument
bad_mode = "qwerty"
try:
f = _FileIO(TESTFN, bad_mode)
except ValueError as msg:
if msg.args[0] != 0:
s = str(msg)
if TESTFN in s or bad_mode not in s:
self.fail("bad error message for invalid mode: %s" % s)
# if msg.args[0] == 0, we're probably on Windows where there may be
# no obvious way to discover why open() failed.
else:
f.close()
self.fail("no error for invalid mode: %s" % bad_mode)
def testTruncate(self):
f = _FileIO(TESTFN, 'w')
f.write(bytes(bytearray(range(10))))
self.assertEqual(f.tell(), 10)
f.truncate(5)
self.assertEqual(f.tell(), 10)
self.assertEqual(f.seek(0, os.SEEK_END), 5)
f.truncate(15)
self.assertEqual(f.tell(), 5)
self.assertEqual(f.seek(0, os.SEEK_END), 15)
f.close()
def testTruncateOnWindows(self):
def bug801631():
# SF bug <http://www.python.org/sf/801631>
# "file.truncate fault on windows"
f = _FileIO(TESTFN, 'w')
f.write(bytes(range(11)))
f.close()
f = _FileIO(TESTFN,'r+')
data = f.read(5)
if data != bytes(range(5)):
self.fail("Read on file opened for update failed %r" % data)
if f.tell() != 5:
self.fail("File pos after read wrong %d" % f.tell())
f.truncate()
if f.tell() != 5:
self.fail("File pos after ftruncate wrong %d" % f.tell())
f.close()
size = os.path.getsize(TESTFN)
if size != 5:
self.fail("File size after ftruncate wrong %d" % size)
try:
bug801631()
finally:
os.unlink(TESTFN)
def testAppend(self):
try:
f = open(TESTFN, 'wb')
f.write(b'spam')
f.close()
f = open(TESTFN, 'ab')
f.write(b'eggs')
f.close()
f = open(TESTFN, 'rb')
d = f.read()
f.close()
self.assertEqual(d, b'spameggs')
finally:
try:
os.unlink(TESTFN)
except:
pass
def testInvalidInit(self):
self.assertRaises(TypeError, _FileIO, "1", 0, 0)
def testWarnings(self):
with check_warnings(quiet=True) as w:
self.assertEqual(w.warnings, [])
self.assertRaises(TypeError, _FileIO, [])
self.assertEqual(w.warnings, [])
self.assertRaises(ValueError, _FileIO, "/some/invalid/name", "rt")
self.assertEqual(w.warnings, [])
def test_surrogates(self):
# Issue #8438: try to open a filename containing surrogates.
# It should either fail because the file doesn't exist or the filename
# can't be represented using the filesystem encoding, but not because
# of a LookupError for the error handler "surrogateescape".
filename = u'\udc80.txt'
try:
with _FileIO(filename):
pass
except (UnicodeEncodeError, IOError):
pass
# Spawn a separate Python process with a different "file system
# default encoding", to exercise this further.
env = dict(os.environ)
env[b'LC_CTYPE'] = b'C'
_, out = run_python('-c', 'import _io; _io.FileIO(%r)' % filename, env=env)
if ('UnicodeEncodeError' not in out and not
( ('IOError: [Errno 2] No such file or directory' in out) or
('IOError: [Errno 22] Invalid argument' in out) ) ):
self.fail('Bad output: %r' % out)
def testUnclosedFDOnException(self):
class MyException(Exception): pass
class MyFileIO(_FileIO):
def __setattr__(self, name, value):
if name == "name":
raise MyException("blocked setting name")
return super(MyFileIO, self).__setattr__(name, value)
fd = os.open(__file__, os.O_RDONLY)
self.assertRaises(MyException, MyFileIO, fd)
os.close(fd) # should not raise OSError(EBADF)
def test_main():
# Historically, these tests have been sloppy about removing TESTFN.
# So get rid of it no matter what.
try:
run_unittest(AutoFileTests, OtherFileTests)
finally:
if os.path.exists(TESTFN):
os.unlink(TESTFN)
if __name__ == '__main__':
test_main()
|
|
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# pylint: disable=invalid-name
"""Tests continuous pulse functions."""
import numpy as np
from qiskit.test import QiskitTestCase
import qiskit.pulse.pulse_lib.continuous as continuous
class TestContinuousPulses(QiskitTestCase):
"""Test continuous pulses."""
def test_constant(self):
"""Test constant pulse."""
amp = 0.5j
samples = 50
times = np.linspace(0, 10, samples)
constant_arr = continuous.constant(times, amp=amp)
self.assertEqual(constant_arr.dtype, np.complex_)
np.testing.assert_equal(constant_arr, amp)
self.assertEqual(len(constant_arr), samples)
def test_zero(self):
"""Test constant pulse."""
times = np.linspace(0, 10, 50)
zero_arr = continuous.zero(times)
self.assertEqual(zero_arr.dtype, np.complex_)
np.testing.assert_equal(zero_arr, 0.0)
self.assertEqual(len(zero_arr), 50)
def test_square(self):
"""Test square wave."""
amp = 0.5
period = 5
samples = 100
times = np.linspace(0, 10, samples)
square_arr = continuous.square(times, amp=amp, period=period)
# with new phase
square_arr_phased = continuous.square(times, amp=amp, period=period, phase=np.pi/2)
self.assertEqual(square_arr.dtype, np.complex_)
self.assertAlmostEqual(square_arr[0], amp)
# test constant
self.assertAlmostEqual(square_arr[1]-square_arr[0], 0.0)
self.assertAlmostEqual(square_arr[25], -amp)
self.assertAlmostEqual(square_arr_phased[0], -amp)
# Assert bounded between -amp and amp
self.assertTrue(np.all((-amp <= square_arr) & (square_arr <= amp)))
self.assertEqual(len(square_arr), samples)
def test_sawtooth(self):
"""Test sawtooth wave."""
amp = 0.5
period = 5
samples = 101
times, dt = np.linspace(0, 10, samples, retstep=True)
sawtooth_arr = continuous.sawtooth(times, amp=amp, period=period)
# with new phase
sawtooth_arr_phased = continuous.sawtooth(times, amp=amp,
period=period, phase=np.pi/2)
self.assertEqual(sawtooth_arr.dtype, np.complex_)
self.assertAlmostEqual(sawtooth_arr[0], 0.0)
# test slope
self.assertAlmostEqual((sawtooth_arr[1]-sawtooth_arr[0])/dt, 2*amp/period)
self.assertAlmostEqual(sawtooth_arr[24], 0.48)
self.assertAlmostEqual(sawtooth_arr[50], 0.)
self.assertAlmostEqual(sawtooth_arr[75], -amp)
self.assertAlmostEqual(sawtooth_arr_phased[0], -amp)
# Assert bounded between -amp and amp
self.assertTrue(np.all((-amp <= sawtooth_arr) & (sawtooth_arr <= amp)))
self.assertEqual(len(sawtooth_arr), samples)
def test_triangle(self):
"""Test triangle wave."""
amp = 0.5
period = 5
samples = 101
times, dt = np.linspace(0, 10, samples, retstep=True)
triangle_arr = continuous.triangle(times, amp=amp, period=period)
# with new phase
triangle_arr_phased = continuous.triangle(times, amp=amp,
period=period, phase=np.pi/2)
self.assertEqual(triangle_arr.dtype, np.complex_)
self.assertAlmostEqual(triangle_arr[0], 0.0)
# test slope
self.assertAlmostEqual((triangle_arr[1]-triangle_arr[0])/dt, 4*amp/period)
self.assertAlmostEqual(triangle_arr[12], 0.48)
self.assertAlmostEqual(triangle_arr[13], 0.48)
self.assertAlmostEqual(triangle_arr[50], 0.)
self.assertAlmostEqual(triangle_arr_phased[0], amp)
# Assert bounded between -amp and amp
self.assertTrue(np.all((-amp <= triangle_arr) & (triangle_arr <= amp)))
self.assertEqual(len(triangle_arr), samples)
def test_cos(self):
"""Test cosine wave."""
amp = 0.5
period = 5
freq = 1/period
samples = 101
times = np.linspace(0, 10, samples)
cos_arr = continuous.cos(times, amp=amp, freq=freq)
# with new phase
cos_arr_phased = continuous.cos(times, amp=amp,
freq=freq, phase=np.pi/2)
self.assertEqual(cos_arr.dtype, np.complex_)
# Assert starts at 1
self.assertAlmostEqual(cos_arr[0], amp)
self.assertAlmostEqual(cos_arr[6], 0.3644, places=2)
self.assertAlmostEqual(cos_arr[25], -amp)
self.assertAlmostEqual(cos_arr[50], amp)
self.assertAlmostEqual(cos_arr_phased[0], 0.0)
# Assert bounded between -amp and amp
self.assertTrue(np.all((-amp <= cos_arr) & (cos_arr <= amp)))
self.assertEqual(len(cos_arr), samples)
def test_sin(self):
"""Test sine wave."""
amp = 0.5
period = 5
freq = 1/period
samples = 101
times = np.linspace(0, 10, samples)
sin_arr = continuous.sin(times, amp=amp, freq=freq)
# with new phase
sin_arr_phased = continuous.sin(times, amp=0.5,
freq=1/5, phase=np.pi/2)
self.assertEqual(sin_arr.dtype, np.complex_)
# Assert starts at 1
self.assertAlmostEqual(sin_arr[0], 0.0)
self.assertAlmostEqual(sin_arr[6], 0.3427, places=2)
self.assertAlmostEqual(sin_arr[25], 0.0)
self.assertAlmostEqual(sin_arr[13], amp, places=2)
self.assertAlmostEqual(sin_arr_phased[0], amp)
# Assert bounded between -amp and amp
self.assertTrue(np.all((-amp <= sin_arr) & (sin_arr <= amp)))
self.assertEqual(len(sin_arr), samples)
def test_gaussian(self):
"""Test gaussian pulse."""
amp = 0.5
center = 10
sigma = 2
times, dt = np.linspace(0, 20, 1001, retstep=True)
gaussian_arr = continuous.gaussian(times, amp, center, sigma)
gaussian_arr_zeroed = continuous.gaussian(np.array([-1, 10]), amp, center,
sigma, zeroed_width=2*(center+1),
rescale_amp=True)
self.assertEqual(gaussian_arr.dtype, np.complex_)
center_time = np.argmax(gaussian_arr)
self.assertAlmostEqual(times[center_time], center)
self.assertAlmostEqual(gaussian_arr[center_time], amp)
self.assertAlmostEqual(gaussian_arr_zeroed[0], 0., places=6)
self.assertAlmostEqual(gaussian_arr_zeroed[1], amp)
self.assertAlmostEqual(np.sum(gaussian_arr*dt), amp*np.sqrt(2*np.pi*sigma**2), places=3)
def test_gaussian_deriv(self):
"""Test gaussian derivative pulse."""
amp = 0.5
center = 10
sigma = 2
times, dt = np.linspace(0, 20, 1000, retstep=True)
deriv_prefactor = -sigma**2/(times-center)
gaussian_deriv_arr = continuous.gaussian_deriv(times, amp, center, sigma)
gaussian_arr = gaussian_deriv_arr*deriv_prefactor
self.assertEqual(gaussian_deriv_arr.dtype, np.complex_)
self.assertAlmostEqual(continuous.gaussian_deriv(np.array([0]), amp, center, sigma)[0],
0, places=5)
self.assertAlmostEqual(np.sum(gaussian_arr*dt), amp*np.sqrt(2*np.pi*sigma**2), places=3)
def test_sech(self):
"""Test sech pulse."""
amp = 0.5
center = 20
sigma = 2
times, dt = np.linspace(0, 40, 1001, retstep=True)
sech_arr = continuous.sech(times, amp, center, sigma)
sech_arr_zeroed = continuous.sech(np.array([-1, 20]), amp, center,
sigma)
self.assertEqual(sech_arr.dtype, np.complex_)
center_time = np.argmax(sech_arr)
self.assertAlmostEqual(times[center_time], center)
self.assertAlmostEqual(sech_arr[center_time], amp)
self.assertAlmostEqual(sech_arr_zeroed[0], 0., places=2)
self.assertAlmostEqual(sech_arr_zeroed[1], amp)
self.assertAlmostEqual(np.sum(sech_arr*dt), amp*np.pi*sigma, places=3)
def test_sech_deriv(self):
"""Test sech derivative pulse."""
amp = 0.5
center = 20
sigma = 2
times = np.linspace(0, 40, 1000)
sech_deriv_arr = continuous.sech_deriv(times, amp, center, sigma)
self.assertEqual(sech_deriv_arr.dtype, np.complex_)
self.assertAlmostEqual(continuous.sech_deriv(np.array([0]), amp, center, sigma)[0],
0, places=3)
def test_gaussian_square(self):
"""Test gaussian square pulse."""
amp = 0.5
center = 10
width = 2
sigma = 0.1
times, dt = np.linspace(0, 20, 2001, retstep=True)
gaussian_square_arr = continuous.gaussian_square(times, amp, center, width, sigma)
self.assertEqual(gaussian_square_arr.dtype, np.complex_)
self.assertEqual(gaussian_square_arr[1000], amp)
# test half gaussian rise/fall
self.assertAlmostEqual(np.sum(gaussian_square_arr[:900]*dt)*2,
amp*np.sqrt(2*np.pi*sigma**2), places=2)
self.assertAlmostEqual(np.sum(gaussian_square_arr[1100:]*dt)*2,
amp*np.sqrt(2*np.pi*sigma**2), places=2)
# test for continuity at gaussian/square boundaries
gauss_rise_end_time = center-width/2
gauss_fall_start_time = center+width/2
epsilon = 0.01
rise_times, dt_rise = np.linspace(gauss_rise_end_time-epsilon,
gauss_rise_end_time+epsilon, 1001, retstep=True)
fall_times, dt_fall = np.linspace(gauss_fall_start_time-epsilon,
gauss_fall_start_time+epsilon, 1001, retstep=True)
gaussian_square_rise_arr = continuous.gaussian_square(rise_times, amp, center, width, sigma)
gaussian_square_fall_arr = continuous.gaussian_square(fall_times, amp, center, width, sigma)
# should be locally approximated by amp*dt^2/(2*sigma^2)
self.assertAlmostEqual(amp*dt_rise**2/(2*sigma**2),
gaussian_square_rise_arr[500]-gaussian_square_rise_arr[499])
self.assertAlmostEqual(amp*dt_fall**2/(2*sigma**2),
gaussian_square_fall_arr[501]-gaussian_square_fall_arr[500])
def test_drag(self):
"""Test drag pulse."""
amp = 0.5
center = 10
sigma = 0.1
beta = 0
times = np.linspace(0, 20, 2001)
# test that we recover gaussian for beta=0
gaussian_arr = continuous.gaussian(times, amp, center, sigma,
zeroed_width=2*(center+1), rescale_amp=True)
drag_arr = continuous.drag(times, amp, center, sigma, beta=beta,
zeroed_width=2*(center+1), rescale_amp=True)
self.assertEqual(drag_arr.dtype, np.complex_)
np.testing.assert_equal(drag_arr, gaussian_arr)
|
|
# -*- coding: utf-8 -*-
"""
@author: Federico Cerchiari <federicocerchiari@gmail.com>
"""
import unittest
from copy import copy
from tempy.widgets import TempyTable, TempyList, TempyPage
from tempy.tags import (Table, Tr, Td, Dl, Dt, Dd, Ul, Ol,
Li, Html, Head, Body, Thead, Tfoot)
from tempy.exceptions import WidgetDataError, WidgetError
class TestTempyTable(unittest.TestCase):
def setUp(self):
self.data = [[x * y for x in range(1, 11)] for y in range(15)]
def verify_content(self, table):
# Check table content
self.assertTrue(0 in table.body[0][4])
self.assertTrue(1 in table.body[1][0])
self.assertTrue(2 in table.body[1][1])
self.assertTrue(16 in table.body[8][1])
def test_empty_creation(self):
table = TempyTable()
self.assertFalse(table.body.childs)
self.assertTrue(table.body)
# Future non-regression, TempyTable should remain a Table Tag
self.assertIsInstance(table, Table)
def test_skeleton_creation(self):
table = TempyTable(rows=15, cols=10)
self.assertTrue(table.body)
# Check table sizes
self.assertEqual(len(table.body), 15)
self.assertEqual(len(table.body[0]), 10)
def test_caption(self):
table = TempyTable(caption='Test Table')
self.assertTrue('Test Table' in table.caption)
def test_init_from_data(self):
table = TempyTable(data=self.data)
self.assertEqual(len(table.body), 15)
self.assertEqual(len(table.body[0]), 10)
self.verify_content(table)
def test_init_from_data_full(self):
table = TempyTable(data=self.data, head=True, foot=True)
self.assertEqual(len(table.body), 13)
self.assertIsInstance(table.header, Thead)
self.assertIsInstance(table.footer, Tfoot)
def test_populate(self):
table = TempyTable().populate(self.data)
# Check table sizes
self.assertEqual(len(table.body), 15)
self.assertEqual(len(table.body[0]), 10)
# test add row
new_data = copy(self.data)
new_data.append(list(range(1, 11)))
table.populate(new_data)
self.assertEqual(len(table.body), 16)
# test resize
new_data.append(list(range(1, 12)))
table.populate(new_data)
self.assertEqual(len(table.body), 17)
self.assertEqual(len(table.body[0]), 11)
self.assertEqual(len(table.body[1]), 11)
self.assertEqual(len(table.body[-1]), 11)
# test non normalize:
new_data[3].append('test2')
table.populate(new_data, normalize=False)
self.assertTrue('test2' in table.body[3][10])
self.assertEqual(len(table.body[1]), 10)
self.assertEqual(len(table.body[3]), 11)
with self.assertRaises(IndexError):
table.body[6][11]
with self.assertRaises(WidgetDataError):
table.populate(None)
def test_clear(self):
table = TempyTable(data=self.data)
table.clear()
self.assertTrue(table.body.is_empty)
def test_pop_row(self):
table = TempyTable(data=self.data)
# test pop last
r = table.pop_row()
self.assertEqual(r, self.data[-1])
# test pop get tags
r = table.pop_row(tags=True)
test_row = Tr()(Td()(c) for c in self.data[-2])
for cell, t_cell in zip(r, test_row):
self.assertEqual(cell, t_cell)
# test pop by index
r = table.pop_row(0)
self.assertEqual(r, self.data[0])
def test_pop_cell(self):
table = TempyTable(data=self.data)
# test pop last
r = table.pop_cell()
self.assertEqual(r, self.data[-1][-1])
# test pop get tags
r = table.pop_cell(tags=True)
test_cell = Td()(self.data[-2][-1])
self.assertEqual(r, test_cell)
# test pop by index row
r = table.pop_cell(0)
self.assertEqual(r, self.data[0][-1])
# test pop by index row andcol
r = table.pop_cell(0, 0)
self.assertEqual(r, self.data[0][0])
def test_col_class(self):
table = TempyTable(data=self.data)
table.col_class('class_example')
self.assertEqual({'class_example'}, table.childs[0].childs[0].childs[0].attrs['klass'])
# first column of each row
table.col_class('class_example_new', 0)
self.assertEqual({'class_example_new', 'class_example'}, table.childs[0].childs[0].childs[0].attrs['klass'])
def test_row_class(self):
table = TempyTable(data=self.data)
table.row_class('class_example')
self.assertEqual({'class_example'}, table.childs[0].childs[0].attrs['klass'])
# first row for each column
table.row_class('class_example_new', 0)
self.assertEqual({'class_example_new', 'class_example'}, table.childs[0].childs[0].attrs['klass'])
def test_map_col(self):
table = TempyTable(data=self.data)
table.map_col(lambda x: x - 1)
self.assertEqual(-1, table.childs[0].childs[0].childs[0].childs[0])
# applies function x - 2 for second column
table.map_col(lambda x: x - 2, 1)
self.assertEqual(-3, table.childs[0].childs[0].childs[1].childs[0])
def test_map_row(self):
table = TempyTable(data=self.data)
table.map_row(lambda x: x - 1)
self.assertEqual(-1, table.childs[0].childs[0].childs[0].childs[0])
# applies function x - 2 for first row
table.map_row(lambda x: x - 2, 0)
self.assertEqual(-3, table.childs[0].childs[0].childs[1].childs[0])
def test_make_scope(self):
table = TempyTable(data=self.data)
table.make_scope(col_scope_list=[(0, 0)])
self.assertEqual('col', table.childs[0].childs[0].childs[0].attrs['scope'])
table.make_scope(row_scope_list=[(0, 0)])
self.assertEqual('row', table.childs[0].childs[0].childs[0].attrs['scope'])
def test_is_row_within_bounds(self):
table = TempyTable(data=self.data)
self.assertTrue(table.is_row_within_bounds(0))
with self.assertRaises(WidgetDataError):
table.is_row_within_bounds(20)
def test_is_col_within_bounds(self):
table = TempyTable(data=self.data)
self.assertTrue(table.is_col_within_bounds(0, table.childs[0].childs[0]))
with self.assertRaises(WidgetDataError):
table.is_col_within_bounds(20, table.childs[0].childs[0])
class TestTempyList(unittest.TestCase):
def test_create_empty(self):
li = TempyList()
self.assertIsInstance(li, Ul)
self.assertEqual(len(li), 0)
li = TempyList(Ol)
self.assertIsInstance(li, Ol)
self.assertEqual(len(li), 0)
li = TempyList('Ol')
self.assertIsInstance(li, Ol)
self.assertEqual(len(li), 0)
with self.assertRaises(WidgetError):
li = TempyList('Wrong')
def test_populate_empty(self):
li = TempyList()
li.populate([1, 2, 3])
self.assertIsInstance(li, Ul)
self.assertEqual(len(li), 3)
self.assertIsInstance(li[0], Li)
with self.assertRaises(WidgetDataError):
li.populate('wrong type')
li = TempyList(typ=Dl)
li.populate({1: 'one', 2: 'two', 34: ['three', 'four']})
self.assertIsInstance(li, Dl)
self.assertEqual(len(li), 7)
self.assertIsInstance(li[0], Dt)
self.assertIsInstance(li[1], Dd)
self.assertIsInstance(li[6], Dd)
with self.assertRaises(WidgetDataError):
li.populate('wrong type')
def test_create_full(self):
li = TempyList(struct=[1, 2, 3])
self.assertIsInstance(li, Ul)
self.assertEqual(len(li), 3)
self.assertIsInstance(li[0], Li)
self.assertTrue(1 in li[0])
li = TempyList(struct={1, 2, 3})
self.assertIsInstance(li, Ul)
self.assertEqual(len(li), 3)
self.assertIsInstance(li[0], Li)
li = TempyList(struct={1: None, 2: None, 3: None, '_typ': Ol})
self.assertIsInstance(li, Ol)
self.assertEqual(len(li), 3)
self.assertIsInstance(li[0], Li)
def test_populate_recursive(self):
li = TempyList()
li.populate({1: None, 2: ['a', 'b', 'c'], 3: {'test': [1, 2, 3]}})
self.assertIsInstance(li, Ul)
self.assertEqual(len(li), 3)
self.assertIsInstance(li[0], Li)
self.assertIsInstance(li[1][1], Ul)
self.assertEqual(len(li[1][1]), 3)
self.assertIsInstance(li[1][1][0], Li)
class TestTempyPage(unittest.TestCase):
def test_create(self):
page = TempyPage()
self.assertIsInstance(page, Html)
self.assertEqual(len(page), 2)
self.assertIsInstance(page.head, Head)
self.assertIsInstance(page.body, Body)
self.assertEqual(len(page.head.title), 1)
self.assertEqual(page.head.charset.attrs['charset'], 'UTF-8')
def test_charset(self):
page = TempyPage()
self.assertEqual(page.head.charset.attrs['charset'], 'UTF-8')
page.set_charset('text/html;charset=ISO-8859-1')
self.assertEqual(page.head.charset.attrs['charset'], 'text/html;charset=ISO-8859-1')
def test_description(self):
page = TempyPage()
page.set_description('test page')
self.assertEqual(page.head.description.attrs['content'], 'test page')
def test_keywords(self):
page = TempyPage()
kw = ['test', 'foo', 'bar']
page.set_keywords(kw)
self.assertEqual(page.head.keywords.attrs['content'], ', '.join(kw))
def test_doctype(self):
page = TempyPage()
page.set_doctype('html_strict')
charset_string = 'HTML PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd"'
self.assertTrue(charset_string in page.render())
def test_title(self):
page = TempyPage()
page.set_title('test title')
self.assertEqual(page.head.title.childs[-1], 'test title')
|
|
# Authors: Manoj Kumar <manojkumarsivaraj334@gmail.com>
# Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Joel Nothman <joel.nothman@gmail.com>
# License: BSD 3 clause
import warnings
import numbers
import numpy as np
from scipy import sparse
from math import sqrt
from ..metrics import pairwise_distances_argmin
from ..metrics.pairwise import euclidean_distances
from ..base import TransformerMixin, ClusterMixin, BaseEstimator
from ..utils.extmath import row_norms
from ..utils import deprecated
from ..utils.validation import check_is_fitted, _deprecate_positional_args
from ..exceptions import ConvergenceWarning
from . import AgglomerativeClustering
from .._config import config_context
def _iterate_sparse_X(X):
"""This little hack returns a densified row when iterating over a sparse
matrix, instead of constructing a sparse matrix for every row that is
expensive.
"""
n_samples = X.shape[0]
X_indices = X.indices
X_data = X.data
X_indptr = X.indptr
for i in range(n_samples):
row = np.zeros(X.shape[1])
startptr, endptr = X_indptr[i], X_indptr[i + 1]
nonzero_indices = X_indices[startptr:endptr]
row[nonzero_indices] = X_data[startptr:endptr]
yield row
def _split_node(node, threshold, branching_factor):
"""The node has to be split if there is no place for a new subcluster
in the node.
1. Two empty nodes and two empty subclusters are initialized.
2. The pair of distant subclusters are found.
3. The properties of the empty subclusters and nodes are updated
according to the nearest distance between the subclusters to the
pair of distant subclusters.
4. The two nodes are set as children to the two subclusters.
"""
new_subcluster1 = _CFSubcluster()
new_subcluster2 = _CFSubcluster()
new_node1 = _CFNode(
threshold=threshold, branching_factor=branching_factor,
is_leaf=node.is_leaf,
n_features=node.n_features)
new_node2 = _CFNode(
threshold=threshold, branching_factor=branching_factor,
is_leaf=node.is_leaf,
n_features=node.n_features)
new_subcluster1.child_ = new_node1
new_subcluster2.child_ = new_node2
if node.is_leaf:
if node.prev_leaf_ is not None:
node.prev_leaf_.next_leaf_ = new_node1
new_node1.prev_leaf_ = node.prev_leaf_
new_node1.next_leaf_ = new_node2
new_node2.prev_leaf_ = new_node1
new_node2.next_leaf_ = node.next_leaf_
if node.next_leaf_ is not None:
node.next_leaf_.prev_leaf_ = new_node2
dist = euclidean_distances(
node.centroids_, Y_norm_squared=node.squared_norm_, squared=True)
n_clusters = dist.shape[0]
farthest_idx = np.unravel_index(
dist.argmax(), (n_clusters, n_clusters))
node1_dist, node2_dist = dist[(farthest_idx,)]
node1_closer = node1_dist < node2_dist
for idx, subcluster in enumerate(node.subclusters_):
if node1_closer[idx]:
new_node1.append_subcluster(subcluster)
new_subcluster1.update(subcluster)
else:
new_node2.append_subcluster(subcluster)
new_subcluster2.update(subcluster)
return new_subcluster1, new_subcluster2
class _CFNode:
"""Each node in a CFTree is called a CFNode.
The CFNode can have a maximum of branching_factor
number of CFSubclusters.
Parameters
----------
threshold : float
Threshold needed for a new subcluster to enter a CFSubcluster.
branching_factor : int
Maximum number of CF subclusters in each node.
is_leaf : bool
We need to know if the CFNode is a leaf or not, in order to
retrieve the final subclusters.
n_features : int
The number of features.
Attributes
----------
subclusters_ : list
List of subclusters for a particular CFNode.
prev_leaf_ : _CFNode
Useful only if is_leaf is True.
next_leaf_ : _CFNode
next_leaf. Useful only if is_leaf is True.
the final subclusters.
init_centroids_ : ndarray of shape (branching_factor + 1, n_features)
Manipulate ``init_centroids_`` throughout rather than centroids_ since
the centroids are just a view of the ``init_centroids_`` .
init_sq_norm_ : ndarray of shape (branching_factor + 1,)
manipulate init_sq_norm_ throughout. similar to ``init_centroids_``.
centroids_ : ndarray of shape (branching_factor + 1, n_features)
View of ``init_centroids_``.
squared_norm_ : ndarray of shape (branching_factor + 1,)
View of ``init_sq_norm_``.
"""
def __init__(self, *, threshold, branching_factor, is_leaf, n_features):
self.threshold = threshold
self.branching_factor = branching_factor
self.is_leaf = is_leaf
self.n_features = n_features
# The list of subclusters, centroids and squared norms
# to manipulate throughout.
self.subclusters_ = []
self.init_centroids_ = np.zeros((branching_factor + 1, n_features))
self.init_sq_norm_ = np.zeros((branching_factor + 1))
self.squared_norm_ = []
self.prev_leaf_ = None
self.next_leaf_ = None
def append_subcluster(self, subcluster):
n_samples = len(self.subclusters_)
self.subclusters_.append(subcluster)
self.init_centroids_[n_samples] = subcluster.centroid_
self.init_sq_norm_[n_samples] = subcluster.sq_norm_
# Keep centroids and squared norm as views. In this way
# if we change init_centroids and init_sq_norm_, it is
# sufficient,
self.centroids_ = self.init_centroids_[:n_samples + 1, :]
self.squared_norm_ = self.init_sq_norm_[:n_samples + 1]
def update_split_subclusters(self, subcluster,
new_subcluster1, new_subcluster2):
"""Remove a subcluster from a node and update it with the
split subclusters.
"""
ind = self.subclusters_.index(subcluster)
self.subclusters_[ind] = new_subcluster1
self.init_centroids_[ind] = new_subcluster1.centroid_
self.init_sq_norm_[ind] = new_subcluster1.sq_norm_
self.append_subcluster(new_subcluster2)
def insert_cf_subcluster(self, subcluster):
"""Insert a new subcluster into the node."""
if not self.subclusters_:
self.append_subcluster(subcluster)
return False
threshold = self.threshold
branching_factor = self.branching_factor
# We need to find the closest subcluster among all the
# subclusters so that we can insert our new subcluster.
dist_matrix = np.dot(self.centroids_, subcluster.centroid_)
dist_matrix *= -2.
dist_matrix += self.squared_norm_
closest_index = np.argmin(dist_matrix)
closest_subcluster = self.subclusters_[closest_index]
# If the subcluster has a child, we need a recursive strategy.
if closest_subcluster.child_ is not None:
split_child = closest_subcluster.child_.insert_cf_subcluster(
subcluster)
if not split_child:
# If it is determined that the child need not be split, we
# can just update the closest_subcluster
closest_subcluster.update(subcluster)
self.init_centroids_[closest_index] = \
self.subclusters_[closest_index].centroid_
self.init_sq_norm_[closest_index] = \
self.subclusters_[closest_index].sq_norm_
return False
# things not too good. we need to redistribute the subclusters in
# our child node, and add a new subcluster in the parent
# subcluster to accommodate the new child.
else:
new_subcluster1, new_subcluster2 = _split_node(
closest_subcluster.child_, threshold, branching_factor)
self.update_split_subclusters(
closest_subcluster, new_subcluster1, new_subcluster2)
if len(self.subclusters_) > self.branching_factor:
return True
return False
# good to go!
else:
merged = closest_subcluster.merge_subcluster(
subcluster, self.threshold)
if merged:
self.init_centroids_[closest_index] = \
closest_subcluster.centroid_
self.init_sq_norm_[closest_index] = \
closest_subcluster.sq_norm_
return False
# not close to any other subclusters, and we still
# have space, so add.
elif len(self.subclusters_) < self.branching_factor:
self.append_subcluster(subcluster)
return False
# We do not have enough space nor is it closer to an
# other subcluster. We need to split.
else:
self.append_subcluster(subcluster)
return True
class _CFSubcluster:
"""Each subcluster in a CFNode is called a CFSubcluster.
A CFSubcluster can have a CFNode has its child.
Parameters
----------
linear_sum : ndarray of shape (n_features,), default=None
Sample. This is kept optional to allow initialization of empty
subclusters.
Attributes
----------
n_samples_ : int
Number of samples that belong to each subcluster.
linear_sum_ : ndarray
Linear sum of all the samples in a subcluster. Prevents holding
all sample data in memory.
squared_sum_ : float
Sum of the squared l2 norms of all samples belonging to a subcluster.
centroid_ : ndarray of shape (branching_factor + 1, n_features)
Centroid of the subcluster. Prevent recomputing of centroids when
``CFNode.centroids_`` is called.
child_ : _CFNode
Child Node of the subcluster. Once a given _CFNode is set as the child
of the _CFNode, it is set to ``self.child_``.
sq_norm_ : ndarray of shape (branching_factor + 1,)
Squared norm of the subcluster. Used to prevent recomputing when
pairwise minimum distances are computed.
"""
def __init__(self, *, linear_sum=None):
if linear_sum is None:
self.n_samples_ = 0
self.squared_sum_ = 0.0
self.centroid_ = self.linear_sum_ = 0
else:
self.n_samples_ = 1
self.centroid_ = self.linear_sum_ = linear_sum
self.squared_sum_ = self.sq_norm_ = np.dot(
self.linear_sum_, self.linear_sum_)
self.child_ = None
def update(self, subcluster):
self.n_samples_ += subcluster.n_samples_
self.linear_sum_ += subcluster.linear_sum_
self.squared_sum_ += subcluster.squared_sum_
self.centroid_ = self.linear_sum_ / self.n_samples_
self.sq_norm_ = np.dot(self.centroid_, self.centroid_)
def merge_subcluster(self, nominee_cluster, threshold):
"""Check if a cluster is worthy enough to be merged. If
yes then merge.
"""
new_ss = self.squared_sum_ + nominee_cluster.squared_sum_
new_ls = self.linear_sum_ + nominee_cluster.linear_sum_
new_n = self.n_samples_ + nominee_cluster.n_samples_
new_centroid = (1 / new_n) * new_ls
new_sq_norm = np.dot(new_centroid, new_centroid)
# The squared radius of the cluster is defined:
# r^2 = sum_i ||x_i - c||^2 / n
# with x_i the n points assigned to the cluster and c its centroid:
# c = sum_i x_i / n
# This can be expanded to:
# r^2 = sum_i ||x_i||^2 / n - 2 < sum_i x_i / n, c> + n ||c||^2 / n
# and therefore simplifies to:
# r^2 = sum_i ||x_i||^2 / n - ||c||^2
sq_radius = new_ss / new_n - new_sq_norm
if sq_radius <= threshold ** 2:
(self.n_samples_, self.linear_sum_, self.squared_sum_,
self.centroid_, self.sq_norm_) = \
new_n, new_ls, new_ss, new_centroid, new_sq_norm
return True
return False
@property
def radius(self):
"""Return radius of the subcluster"""
# Because of numerical issues, this could become negative
sq_radius = self.squared_sum_ / self.n_samples_ - self.sq_norm_
return sqrt(max(0, sq_radius))
class Birch(ClusterMixin, TransformerMixin, BaseEstimator):
"""Implements the BIRCH clustering algorithm.
It is a memory-efficient, online-learning algorithm provided as an
alternative to :class:`MiniBatchKMeans`. It constructs a tree
data structure with the cluster centroids being read off the leaf.
These can be either the final cluster centroids or can be provided as input
to another clustering algorithm such as :class:`AgglomerativeClustering`.
Read more in the :ref:`User Guide <birch>`.
.. versionadded:: 0.16
Parameters
----------
threshold : float, default=0.5
The radius of the subcluster obtained by merging a new sample and the
closest subcluster should be lesser than the threshold. Otherwise a new
subcluster is started. Setting this value to be very low promotes
splitting and vice-versa.
branching_factor : int, default=50
Maximum number of CF subclusters in each node. If a new samples enters
such that the number of subclusters exceed the branching_factor then
that node is split into two nodes with the subclusters redistributed
in each. The parent subcluster of that node is removed and two new
subclusters are added as parents of the 2 split nodes.
n_clusters : int, instance of sklearn.cluster model, default=3
Number of clusters after the final clustering step, which treats the
subclusters from the leaves as new samples.
- `None` : the final clustering step is not performed and the
subclusters are returned as they are.
- :mod:`sklearn.cluster` Estimator : If a model is provided, the model
is fit treating the subclusters as new samples and the initial data
is mapped to the label of the closest subcluster.
- `int` : the model fit is :class:`AgglomerativeClustering` with
`n_clusters` set to be equal to the int.
compute_labels : bool, default=True
Whether or not to compute labels for each fit.
copy : bool, default=True
Whether or not to make a copy of the given data. If set to False,
the initial data will be overwritten.
Attributes
----------
root_ : _CFNode
Root of the CFTree.
dummy_leaf_ : _CFNode
Start pointer to all the leaves.
subcluster_centers_ : ndarray
Centroids of all subclusters read directly from the leaves.
subcluster_labels_ : ndarray
Labels assigned to the centroids of the subclusters after
they are clustered globally.
labels_ : ndarray of shape (n_samples,)
Array of labels assigned to the input data.
if partial_fit is used instead of fit, they are assigned to the
last batch of data.
See Also
--------
MiniBatchKMeans : Alternative implementation that does incremental updates
of the centers' positions using mini-batches.
Notes
-----
The tree data structure consists of nodes with each node consisting of
a number of subclusters. The maximum number of subclusters in a node
is determined by the branching factor. Each subcluster maintains a
linear sum, squared sum and the number of samples in that subcluster.
In addition, each subcluster can also have a node as its child, if the
subcluster is not a member of a leaf node.
For a new point entering the root, it is merged with the subcluster closest
to it and the linear sum, squared sum and the number of samples of that
subcluster are updated. This is done recursively till the properties of
the leaf node are updated.
References
----------
* Tian Zhang, Raghu Ramakrishnan, Maron Livny
BIRCH: An efficient data clustering method for large databases.
https://www.cs.sfu.ca/CourseCentral/459/han/papers/zhang96.pdf
* Roberto Perdisci
JBirch - Java implementation of BIRCH clustering algorithm
https://code.google.com/archive/p/jbirch
Examples
--------
>>> from sklearn.cluster import Birch
>>> X = [[0, 1], [0.3, 1], [-0.3, 1], [0, -1], [0.3, -1], [-0.3, -1]]
>>> brc = Birch(n_clusters=None)
>>> brc.fit(X)
Birch(n_clusters=None)
>>> brc.predict(X)
array([0, 0, 0, 1, 1, 1])
"""
@_deprecate_positional_args
def __init__(self, *, threshold=0.5, branching_factor=50, n_clusters=3,
compute_labels=True, copy=True):
self.threshold = threshold
self.branching_factor = branching_factor
self.n_clusters = n_clusters
self.compute_labels = compute_labels
self.copy = copy
# TODO: Remove in 1.2
# mypy error: Decorated property not supported
@deprecated( # type: ignore
"fit_ is deprecated in 1.0 and will be removed in 1.2"
)
@property
def fit_(self):
return self._deprecated_fit
# TODO: Remove in 1.2
# mypy error: Decorated property not supported
@deprecated( # type: ignore
"partial_fit_ is deprecated in 1.0 and will be removed in 1.2"
)
@property
def partial_fit_(self):
return self._deprecated_partial_fit
def fit(self, X, y=None):
"""
Build a CF Tree for the input data.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Input data.
y : Ignored
Not used, present here for API consistency by convention.
Returns
-------
self
Fitted estimator.
"""
# TODO: Remove deprected flags in 1.2
self._deprecated_fit, self._deprecated_partial_fit = True, False
return self._fit(X, partial=False)
def _fit(self, X, partial):
has_root = getattr(self, 'root_', None)
first_call = not (partial and has_root)
X = self._validate_data(X, accept_sparse='csr', copy=self.copy,
reset=first_call)
threshold = self.threshold
branching_factor = self.branching_factor
if branching_factor <= 1:
raise ValueError("Branching_factor should be greater than one.")
n_samples, n_features = X.shape
# If partial_fit is called for the first time or fit is called, we
# start a new tree.
if first_call:
# The first root is the leaf. Manipulate this object throughout.
self.root_ = _CFNode(threshold=threshold,
branching_factor=branching_factor,
is_leaf=True,
n_features=n_features)
# To enable getting back subclusters.
self.dummy_leaf_ = _CFNode(threshold=threshold,
branching_factor=branching_factor,
is_leaf=True, n_features=n_features)
self.dummy_leaf_.next_leaf_ = self.root_
self.root_.prev_leaf_ = self.dummy_leaf_
# Cannot vectorize. Enough to convince to use cython.
if not sparse.issparse(X):
iter_func = iter
else:
iter_func = _iterate_sparse_X
for sample in iter_func(X):
subcluster = _CFSubcluster(linear_sum=sample)
split = self.root_.insert_cf_subcluster(subcluster)
if split:
new_subcluster1, new_subcluster2 = _split_node(
self.root_, threshold, branching_factor)
del self.root_
self.root_ = _CFNode(threshold=threshold,
branching_factor=branching_factor,
is_leaf=False,
n_features=n_features)
self.root_.append_subcluster(new_subcluster1)
self.root_.append_subcluster(new_subcluster2)
centroids = np.concatenate([
leaf.centroids_ for leaf in self._get_leaves()])
self.subcluster_centers_ = centroids
self._global_clustering(X)
return self
def _get_leaves(self):
"""
Retrieve the leaves of the CF Node.
Returns
-------
leaves : list of shape (n_leaves,)
List of the leaf nodes.
"""
leaf_ptr = self.dummy_leaf_.next_leaf_
leaves = []
while leaf_ptr is not None:
leaves.append(leaf_ptr)
leaf_ptr = leaf_ptr.next_leaf_
return leaves
def partial_fit(self, X=None, y=None):
"""
Online learning. Prevents rebuilding of CFTree from scratch.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features), \
default=None
Input data. If X is not provided, only the global clustering
step is done.
y : Ignored
Not used, present here for API consistency by convention.
Returns
-------
self
Fitted estimator.
"""
# TODO: Remove deprected flags in 1.2
self._deprecated_partial_fit, self._deprecated_fit = True, False
if X is None:
# Perform just the final global clustering step.
self._global_clustering()
return self
else:
return self._fit(X, partial=True)
def _check_fit(self, X):
check_is_fitted(self)
if (hasattr(self, 'subcluster_centers_') and
X.shape[1] != self.subcluster_centers_.shape[1]):
raise ValueError(
"Training data and predicted data do "
"not have same number of features.")
def predict(self, X):
"""
Predict data using the ``centroids_`` of subclusters.
Avoid computation of the row norms of X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Input data.
Returns
-------
labels : ndarray of shape(n_samples,)
Labelled data.
"""
check_is_fitted(self)
X = self._validate_data(X, accept_sparse='csr', reset=False)
kwargs = {'Y_norm_squared': self._subcluster_norms}
with config_context(assume_finite=True):
argmin = pairwise_distances_argmin(X, self.subcluster_centers_,
metric_kwargs=kwargs)
return self.subcluster_labels_[argmin]
def transform(self, X):
"""
Transform X into subcluster centroids dimension.
Each dimension represents the distance from the sample point to each
cluster centroid.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Input data.
Returns
-------
X_trans : {array-like, sparse matrix} of shape (n_samples, n_clusters)
Transformed data.
"""
check_is_fitted(self)
self._validate_data(X, accept_sparse='csr', reset=False)
with config_context(assume_finite=True):
return euclidean_distances(X, self.subcluster_centers_)
def _global_clustering(self, X=None):
"""
Global clustering for the subclusters obtained after fitting
"""
clusterer = self.n_clusters
centroids = self.subcluster_centers_
compute_labels = (X is not None) and self.compute_labels
# Preprocessing for the global clustering.
not_enough_centroids = False
if isinstance(clusterer, numbers.Integral):
clusterer = AgglomerativeClustering(
n_clusters=self.n_clusters)
# There is no need to perform the global clustering step.
if len(centroids) < self.n_clusters:
not_enough_centroids = True
elif (clusterer is not None and not
hasattr(clusterer, 'fit_predict')):
raise ValueError("n_clusters should be an instance of "
"ClusterMixin or an int")
# To use in predict to avoid recalculation.
self._subcluster_norms = row_norms(
self.subcluster_centers_, squared=True)
if clusterer is None or not_enough_centroids:
self.subcluster_labels_ = np.arange(len(centroids))
if not_enough_centroids:
warnings.warn(
"Number of subclusters found (%d) by BIRCH is less "
"than (%d). Decrease the threshold."
% (len(centroids), self.n_clusters), ConvergenceWarning)
else:
# The global clustering step that clusters the subclusters of
# the leaves. It assumes the centroids of the subclusters as
# samples and finds the final centroids.
self.subcluster_labels_ = clusterer.fit_predict(
self.subcluster_centers_)
if compute_labels:
self.labels_ = self.predict(X)
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Serialized DAG and BaseOperator"""
import datetime
import enum
import logging
from dataclasses import dataclass
from inspect import Parameter, signature
from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Set, Union
import cattr
import pendulum
from dateutil import relativedelta
try:
from functools import cache
except ImportError:
from functools import lru_cache
cache = lru_cache(maxsize=None)
from pendulum.tz.timezone import Timezone
from airflow.configuration import conf
from airflow.exceptions import AirflowException, SerializationError
from airflow.models.baseoperator import BaseOperator, BaseOperatorLink
from airflow.models.connection import Connection
from airflow.models.dag import DAG
from airflow.providers_manager import ProvidersManager
from airflow.serialization.enums import DagAttributeTypes as DAT, Encoding
from airflow.serialization.helpers import serialize_template_field
from airflow.serialization.json_schema import Validator, load_dag_schema
from airflow.settings import json
from airflow.utils.code_utils import get_python_source
from airflow.utils.module_loading import import_string
from airflow.utils.task_group import TaskGroup
try:
# isort: off
from kubernetes.client import models as k8s
from airflow.kubernetes.pod_generator import PodGenerator
# isort: on
HAS_KUBERNETES = True
except ImportError:
HAS_KUBERNETES = False
if TYPE_CHECKING:
from airflow.ti_deps.deps.base_ti_dep import BaseTIDep
log = logging.getLogger(__name__)
_OPERATOR_EXTRA_LINKS: Set[str] = {
"airflow.operators.trigger_dagrun.TriggerDagRunLink",
"airflow.sensors.external_task.ExternalTaskSensorLink",
# Deprecated names, so that existing serialized dags load straight away.
"airflow.operators.dagrun_operator.TriggerDagRunLink",
"airflow.sensors.external_task_sensor.ExternalTaskSensorLink",
}
@cache
def get_operator_extra_links():
"""
Returns operator extra links - both the ones that are built in and the ones that come from
the providers.
:return: set of extra links
"""
_OPERATOR_EXTRA_LINKS.update(ProvidersManager().extra_links_class_names)
return _OPERATOR_EXTRA_LINKS
class BaseSerialization:
"""BaseSerialization provides utils for serialization."""
# JSON primitive types.
_primitive_types = (int, bool, float, str)
# Time types.
# datetime.date and datetime.time are converted to strings.
_datetime_types = (datetime.datetime,)
# Object types that are always excluded in serialization.
_excluded_types = (logging.Logger, Connection, type)
_json_schema: Optional[Validator] = None
# Should the extra operator link be loaded via plugins when
# de-serializing the DAG? This flag is set to False in Scheduler so that Extra Operator links
# are not loaded to not run User code in Scheduler.
_load_operator_extra_links = True
_CONSTRUCTOR_PARAMS: Dict[str, Parameter] = {}
SERIALIZER_VERSION = 1
@classmethod
def to_json(cls, var: Union[DAG, BaseOperator, dict, list, set, tuple]) -> str:
"""Stringifies DAGs and operators contained by var and returns a JSON string of var."""
return json.dumps(cls.to_dict(var), ensure_ascii=True)
@classmethod
def to_dict(cls, var: Union[DAG, BaseOperator, dict, list, set, tuple]) -> dict:
"""Stringifies DAGs and operators contained by var and returns a dict of var."""
# Don't call on this class directly - only SerializedDAG or
# SerializedBaseOperator should be used as the "entrypoint"
raise NotImplementedError()
@classmethod
def from_json(cls, serialized_obj: str) -> Union['BaseSerialization', dict, list, set, tuple]:
"""Deserializes json_str and reconstructs all DAGs and operators it contains."""
return cls.from_dict(json.loads(serialized_obj))
@classmethod
def from_dict(
cls, serialized_obj: Dict[Encoding, Any]
) -> Union['BaseSerialization', dict, list, set, tuple]:
"""Deserializes a python dict stored with type decorators and
reconstructs all DAGs and operators it contains.
"""
return cls._deserialize(serialized_obj)
@classmethod
def validate_schema(cls, serialized_obj: Union[str, dict]) -> None:
"""Validate serialized_obj satisfies JSON schema."""
if cls._json_schema is None:
raise AirflowException(f'JSON schema of {cls.__name__:s} is not set.')
if isinstance(serialized_obj, dict):
cls._json_schema.validate(serialized_obj)
elif isinstance(serialized_obj, str):
cls._json_schema.validate(json.loads(serialized_obj))
else:
raise TypeError("Invalid type: Only dict and str are supported.")
@staticmethod
def _encode(x: Any, type_: Any) -> Dict[Encoding, Any]:
"""Encode data by a JSON dict."""
return {Encoding.VAR: x, Encoding.TYPE: type_}
@classmethod
def _is_primitive(cls, var: Any) -> bool:
"""Primitive types."""
return var is None or isinstance(var, cls._primitive_types)
@classmethod
def _is_excluded(cls, var: Any, attrname: str, instance: Any) -> bool:
"""Types excluded from serialization."""
if var is None:
if not cls._is_constructor_param(attrname, instance):
# Any instance attribute, that is not a constructor argument, we exclude None as the default
return True
return cls._value_is_hardcoded_default(attrname, var, instance)
return isinstance(var, cls._excluded_types) or cls._value_is_hardcoded_default(
attrname, var, instance
)
@classmethod
def serialize_to_json(
cls, object_to_serialize: Union[BaseOperator, DAG], decorated_fields: Set
) -> Dict[str, Any]:
"""Serializes an object to json"""
serialized_object: Dict[str, Any] = {}
keys_to_serialize = object_to_serialize.get_serialized_fields()
for key in keys_to_serialize:
# None is ignored in serialized form and is added back in deserialization.
value = getattr(object_to_serialize, key, None)
if cls._is_excluded(value, key, object_to_serialize):
continue
if key in decorated_fields:
serialized_object[key] = cls._serialize(value)
else:
value = cls._serialize(value)
if isinstance(value, dict) and "__type" in value:
value = value["__var"]
serialized_object[key] = value
return serialized_object
# pylint: disable=too-many-return-statements
@classmethod
def _serialize(cls, var: Any) -> Any: # Unfortunately there is no support for recursive types in mypy
"""Helper function of depth first search for serialization.
The serialization protocol is:
(1) keeping JSON supported types: primitives, dict, list;
(2) encoding other types as ``{TYPE: 'foo', VAR: 'bar'}``, the deserialization
step decode VAR according to TYPE;
(3) Operator has a special field CLASS to record the original class
name for displaying in UI.
"""
if cls._is_primitive(var):
# enum.IntEnum is an int instance, it causes json dumps error so we use its value.
if isinstance(var, enum.Enum):
return var.value
return var
elif isinstance(var, dict):
return cls._encode({str(k): cls._serialize(v) for k, v in var.items()}, type_=DAT.DICT)
elif isinstance(var, list):
return [cls._serialize(v) for v in var]
elif HAS_KUBERNETES and isinstance(var, k8s.V1Pod):
json_pod = PodGenerator.serialize_pod(var)
return cls._encode(json_pod, type_=DAT.POD)
elif isinstance(var, DAG):
return SerializedDAG.serialize_dag(var)
elif isinstance(var, BaseOperator):
return SerializedBaseOperator.serialize_operator(var)
elif isinstance(var, cls._datetime_types):
return cls._encode(var.timestamp(), type_=DAT.DATETIME)
elif isinstance(var, datetime.timedelta):
return cls._encode(var.total_seconds(), type_=DAT.TIMEDELTA)
elif isinstance(var, Timezone):
return cls._encode(str(var.name), type_=DAT.TIMEZONE)
elif isinstance(var, relativedelta.relativedelta):
encoded = {k: v for k, v in var.__dict__.items() if not k.startswith("_") and v}
if var.weekday and var.weekday.n:
# Every n'th Friday for example
encoded['weekday'] = [var.weekday.weekday, var.weekday.n]
elif var.weekday:
encoded['weekday'] = [var.weekday.weekday]
return cls._encode(encoded, type_=DAT.RELATIVEDELTA)
elif callable(var):
return str(get_python_source(var))
elif isinstance(var, set):
# FIXME: casts set to list in customized serialization in future.
try:
return cls._encode(sorted(cls._serialize(v) for v in var), type_=DAT.SET)
except TypeError:
return cls._encode([cls._serialize(v) for v in var], type_=DAT.SET)
elif isinstance(var, tuple):
# FIXME: casts tuple to list in customized serialization in future.
return cls._encode([cls._serialize(v) for v in var], type_=DAT.TUPLE)
elif isinstance(var, TaskGroup):
return SerializedTaskGroup.serialize_task_group(var)
else:
log.debug('Cast type %s to str in serialization.', type(var))
return str(var)
# pylint: enable=too-many-return-statements
@classmethod
def _deserialize(cls, encoded_var: Any) -> Any: # pylint: disable=too-many-return-statements
"""Helper function of depth first search for deserialization."""
# JSON primitives (except for dict) are not encoded.
if cls._is_primitive(encoded_var):
return encoded_var
elif isinstance(encoded_var, list):
return [cls._deserialize(v) for v in encoded_var]
if not isinstance(encoded_var, dict):
raise ValueError(f"The encoded_var should be dict and is {type(encoded_var)}")
var = encoded_var[Encoding.VAR]
type_ = encoded_var[Encoding.TYPE]
if type_ == DAT.DICT:
return {k: cls._deserialize(v) for k, v in var.items()}
elif type_ == DAT.DAG:
return SerializedDAG.deserialize_dag(var)
elif type_ == DAT.OP:
return SerializedBaseOperator.deserialize_operator(var)
elif type_ == DAT.DATETIME:
return pendulum.from_timestamp(var)
elif type_ == DAT.POD:
if not HAS_KUBERNETES:
raise RuntimeError("Cannot deserialize POD objects without kubernetes libraries installed!")
pod = PodGenerator.deserialize_model_dict(var)
return pod
elif type_ == DAT.TIMEDELTA:
return datetime.timedelta(seconds=var)
elif type_ == DAT.TIMEZONE:
return Timezone(var)
elif type_ == DAT.RELATIVEDELTA:
if 'weekday' in var:
var['weekday'] = relativedelta.weekday(*var['weekday']) # type: ignore
return relativedelta.relativedelta(**var)
elif type_ == DAT.SET:
return {cls._deserialize(v) for v in var}
elif type_ == DAT.TUPLE:
return tuple(cls._deserialize(v) for v in var)
else:
raise TypeError(f'Invalid type {type_!s} in deserialization.')
_deserialize_datetime = pendulum.from_timestamp
_deserialize_timezone = pendulum.tz.timezone
@classmethod
def _deserialize_timedelta(cls, seconds: int) -> datetime.timedelta:
return datetime.timedelta(seconds=seconds)
@classmethod
def _is_constructor_param(cls, attrname: str, instance: Any) -> bool:
# pylint: disable=unused-argument
return attrname in cls._CONSTRUCTOR_PARAMS
@classmethod
def _value_is_hardcoded_default(cls, attrname: str, value: Any, instance: Any) -> bool:
"""
Return true if ``value`` is the hard-coded default for the given attribute.
This takes in to account cases where the ``concurrency`` parameter is
stored in the ``_concurrency`` attribute.
And by using `is` here only and not `==` this copes with the case a
user explicitly specifies an attribute with the same "value" as the
default. (This is because ``"default" is "default"`` will be False as
they are different strings with the same characters.)
Also returns True if the value is an empty list or empty dict. This is done
to account for the case where the default value of the field is None but has the
``field = field or {}`` set.
"""
# pylint: disable=unused-argument
if attrname in cls._CONSTRUCTOR_PARAMS and (
cls._CONSTRUCTOR_PARAMS[attrname] is value or (value in [{}, []])
):
return True
return False
class DependencyDetector:
"""Detects dependencies between DAGs."""
@staticmethod
def detect_task_dependencies(task: BaseOperator) -> Optional['DagDependency']:
"""Detects dependencies caused by tasks"""
if task.task_type == "TriggerDagRunOperator":
return DagDependency(
source=task.dag_id,
target=getattr(task, "trigger_dag_id"),
dependency_type="trigger",
dependency_id=task.task_id,
)
elif task.task_type == "ExternalTaskSensor":
return DagDependency(
source=getattr(task, "external_dag_id"),
target=task.dag_id,
dependency_type="sensor",
dependency_id=task.task_id,
)
return None
class SerializedBaseOperator(BaseOperator, BaseSerialization):
"""A JSON serializable representation of operator.
All operators are casted to SerializedBaseOperator after deserialization.
Class specific attributes used by UI are move to object attributes.
"""
_decorated_fields = {'executor_config'}
_CONSTRUCTOR_PARAMS = {
k: v.default
for k, v in signature(BaseOperator.__init__).parameters.items()
if v.default is not v.empty
}
dependency_detector = conf.getimport('scheduler', 'dependency_detector')
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# task_type is used by UI to display the correct class type, because UI only
# receives BaseOperator from deserialized DAGs.
self._task_type = 'BaseOperator'
# Move class attributes into object attributes.
self.ui_color = BaseOperator.ui_color
self.ui_fgcolor = BaseOperator.ui_fgcolor
self.template_fields = BaseOperator.template_fields
self.operator_extra_links = BaseOperator.operator_extra_links
@property
def task_type(self) -> str:
# Overwrites task_type of BaseOperator to use _task_type instead of
# __class__.__name__.
return self._task_type
@task_type.setter
def task_type(self, task_type: str):
self._task_type = task_type
@classmethod
def serialize_operator(cls, op: BaseOperator) -> Dict[str, Any]:
"""Serializes operator into a JSON object."""
serialize_op = cls.serialize_to_json(op, cls._decorated_fields)
serialize_op['_task_type'] = op.__class__.__name__
serialize_op['_task_module'] = op.__class__.__module__
# Used to determine if an Operator is inherited from DummyOperator
serialize_op['_is_dummy'] = op.inherits_from_dummy_operator
if op.operator_extra_links:
serialize_op['_operator_extra_links'] = cls._serialize_operator_extra_links(
op.operator_extra_links
)
if op.deps is not BaseOperator.deps:
# Are the deps different to BaseOperator, if so serialize the class names!
# For Airflow 2.0 expediency we _only_ allow built in Dep classes.
# Fix this for 2.0.x or 2.1
deps = []
for dep in op.deps:
klass = type(dep)
module_name = klass.__module__
if not module_name.startswith("airflow.ti_deps.deps."):
raise SerializationError(
f"Cannot serialize {(op.dag.dag_id + '.' + op.task_id)!r} with `deps` from non-core "
f"module {module_name!r}"
)
deps.append(f'{module_name}.{klass.__name__}')
serialize_op['deps'] = deps
# Store all template_fields as they are if there are JSON Serializable
# If not, store them as strings
if op.template_fields:
for template_field in op.template_fields:
value = getattr(op, template_field, None)
if not cls._is_excluded(value, template_field, op):
serialize_op[template_field] = serialize_template_field(value)
return serialize_op
@classmethod
def deserialize_operator(cls, encoded_op: Dict[str, Any]) -> BaseOperator:
"""Deserializes an operator from a JSON object."""
op = SerializedBaseOperator(task_id=encoded_op['task_id'])
if "label" not in encoded_op:
# Handle deserialization of old data before the introduction of TaskGroup
encoded_op["label"] = encoded_op["task_id"]
# Extra Operator Links defined in Plugins
op_extra_links_from_plugin = {}
# We don't want to load Extra Operator links in Scheduler
if cls._load_operator_extra_links: # pylint: disable=too-many-nested-blocks
from airflow import plugins_manager
plugins_manager.initialize_extra_operators_links_plugins()
if plugins_manager.operator_extra_links is None:
raise AirflowException("Can not load plugins")
for ope in plugins_manager.operator_extra_links:
for operator in ope.operators:
if (
operator.__name__ == encoded_op["_task_type"]
and operator.__module__ == encoded_op["_task_module"]
):
op_extra_links_from_plugin.update({ope.name: ope})
# If OperatorLinks are defined in Plugins but not in the Operator that is being Serialized
# set the Operator links attribute
# The case for "If OperatorLinks are defined in the operator that is being Serialized"
# is handled in the deserialization loop where it matches k == "_operator_extra_links"
if op_extra_links_from_plugin and "_operator_extra_links" not in encoded_op:
setattr(op, "operator_extra_links", list(op_extra_links_from_plugin.values()))
for k, v in encoded_op.items():
if k == "_downstream_task_ids":
v = set(v)
elif k == "subdag":
v = SerializedDAG.deserialize_dag(v)
elif k in {"retry_delay", "execution_timeout", "sla", "max_retry_delay"}:
v = cls._deserialize_timedelta(v)
elif k in encoded_op["template_fields"]:
pass
elif k.endswith("_date"):
v = cls._deserialize_datetime(v)
elif k == "_operator_extra_links":
if cls._load_operator_extra_links:
op_predefined_extra_links = cls._deserialize_operator_extra_links(v)
# If OperatorLinks with the same name exists, Links via Plugin have higher precedence
op_predefined_extra_links.update(op_extra_links_from_plugin)
else:
op_predefined_extra_links = {}
v = list(op_predefined_extra_links.values())
k = "operator_extra_links"
elif k == "deps":
v = cls._deserialize_deps(v)
elif (
k in cls._decorated_fields
or k not in op.get_serialized_fields() # pylint: disable=unsupported-membership-test
):
v = cls._deserialize(v)
# else use v as it is
setattr(op, k, v)
for k in op.get_serialized_fields() - encoded_op.keys() - cls._CONSTRUCTOR_PARAMS.keys():
setattr(op, k, None)
# Set all the template_field to None that were not present in Serialized JSON
for field in op.template_fields:
if not hasattr(op, field):
setattr(op, field, None)
# Used to determine if an Operator is inherited from DummyOperator
setattr(op, "_is_dummy", bool(encoded_op.get("_is_dummy", False)))
return op
@classmethod
def detect_dependencies(cls, op: BaseOperator) -> Optional['DagDependency']:
"""Detects between DAG dependencies for the operator."""
return cls.dependency_detector.detect_task_dependencies(op)
@classmethod
def _is_excluded(cls, var: Any, attrname: str, op: BaseOperator):
if var is not None and op.has_dag() and attrname.endswith("_date"):
# If this date is the same as the matching field in the dag, then
# don't store it again at the task level.
dag_date = getattr(op.dag, attrname, None)
if var is dag_date or var == dag_date:
return True
return super()._is_excluded(var, attrname, op)
@classmethod
def _deserialize_deps(cls, deps: List[str]) -> Set["BaseTIDep"]:
instances = set()
for qualname in set(deps):
if not qualname.startswith("airflow.ti_deps.deps."):
log.error("Dep class %r not registered", qualname)
continue
try:
instances.add(import_string(qualname)())
except ImportError:
log.warning("Error importing dep %r", qualname, exc_info=True)
return instances
@classmethod
def _deserialize_operator_extra_links(cls, encoded_op_links: list) -> Dict[str, BaseOperatorLink]:
"""
Deserialize Operator Links if the Classes are registered in Airflow Plugins.
Error is raised if the OperatorLink is not found in Plugins too.
:param encoded_op_links: Serialized Operator Link
:return: De-Serialized Operator Link
"""
from airflow import plugins_manager
plugins_manager.initialize_extra_operators_links_plugins()
if plugins_manager.registered_operator_link_classes is None:
raise AirflowException("Can't load plugins")
op_predefined_extra_links = {}
for _operator_links_source in encoded_op_links:
# Get the key, value pair as Tuple where key is OperatorLink ClassName
# and value is the dictionary containing the arguments passed to the OperatorLink
#
# Example of a single iteration:
#
# _operator_links_source =
# {
# 'airflow.providers.google.cloud.operators.bigquery.BigQueryConsoleIndexableLink': {
# 'index': 0
# }
# },
#
# list(_operator_links_source.items()) =
# [
# (
# 'airflow.providers.google.cloud.operators.bigquery.BigQueryConsoleIndexableLink',
# {'index': 0}
# )
# ]
#
# list(_operator_links_source.items())[0] =
# (
# 'airflow.providers.google.cloud.operators.bigquery.BigQueryConsoleIndexableLink',
# {
# 'index': 0
# }
# )
_operator_link_class_path, data = list(_operator_links_source.items())[0]
if _operator_link_class_path in get_operator_extra_links():
single_op_link_class = import_string(_operator_link_class_path)
elif _operator_link_class_path in plugins_manager.registered_operator_link_classes:
single_op_link_class = plugins_manager.registered_operator_link_classes[
_operator_link_class_path
]
else:
log.error("Operator Link class %r not registered", _operator_link_class_path)
return {}
op_predefined_extra_link: BaseOperatorLink = cattr.structure(data, single_op_link_class)
op_predefined_extra_links.update({op_predefined_extra_link.name: op_predefined_extra_link})
return op_predefined_extra_links
@classmethod
def _serialize_operator_extra_links(cls, operator_extra_links: Iterable[BaseOperatorLink]):
"""
Serialize Operator Links. Store the import path of the OperatorLink and the arguments
passed to it. Example
``[{'airflow.providers.google.cloud.operators.bigquery.BigQueryConsoleLink': {}}]``
:param operator_extra_links: Operator Link
:return: Serialized Operator Link
"""
serialize_operator_extra_links = []
for operator_extra_link in operator_extra_links:
op_link_arguments = cattr.unstructure(operator_extra_link)
if not isinstance(op_link_arguments, dict):
op_link_arguments = {}
serialize_operator_extra_links.append(
{
"{}.{}".format(
operator_extra_link.__class__.__module__, operator_extra_link.__class__.__name__
): op_link_arguments
}
)
return serialize_operator_extra_links
class SerializedDAG(DAG, BaseSerialization):
"""
A JSON serializable representation of DAG.
A stringified DAG can only be used in the scope of scheduler and webserver, because fields
that are not serializable, such as functions and customer defined classes, are casted to
strings.
Compared with SimpleDAG: SerializedDAG contains all information for webserver.
Compared with DagPickle: DagPickle contains all information for worker, but some DAGs are
not pickle-able. SerializedDAG works for all DAGs.
"""
_decorated_fields = {'schedule_interval', 'default_args', '_access_control'}
@staticmethod
def __get_constructor_defaults(): # pylint: disable=no-method-argument
param_to_attr = {
'concurrency': '_concurrency',
'description': '_description',
'default_view': '_default_view',
'access_control': '_access_control',
}
return {
param_to_attr.get(k, k): v.default
for k, v in signature(DAG.__init__).parameters.items()
if v.default is not v.empty
}
_CONSTRUCTOR_PARAMS = __get_constructor_defaults.__func__() # type: ignore
del __get_constructor_defaults
_json_schema = load_dag_schema()
@classmethod
def serialize_dag(cls, dag: DAG) -> dict:
"""Serializes a DAG into a JSON object."""
try:
serialize_dag = cls.serialize_to_json(dag, cls._decorated_fields)
serialize_dag["tasks"] = [cls._serialize(task) for _, task in dag.task_dict.items()]
serialize_dag["dag_dependencies"] = [
vars(t)
for t in (SerializedBaseOperator.detect_dependencies(task) for task in dag.task_dict.values())
if t is not None
]
serialize_dag['_task_group'] = SerializedTaskGroup.serialize_task_group(dag.task_group)
# Edge info in the JSON exactly matches our internal structure
serialize_dag["edge_info"] = dag.edge_info
# has_on_*_callback are only stored if the value is True, as the default is False
if dag.has_on_success_callback:
serialize_dag['has_on_success_callback'] = True
if dag.has_on_failure_callback:
serialize_dag['has_on_failure_callback'] = True
return serialize_dag
except SerializationError:
raise
except Exception:
raise SerializationError(f'Failed to serialize dag {dag.dag_id!r}')
@classmethod
def deserialize_dag(cls, encoded_dag: Dict[str, Any]) -> 'SerializedDAG':
"""Deserializes a DAG from a JSON object."""
dag = SerializedDAG(dag_id=encoded_dag['_dag_id'])
for k, v in encoded_dag.items():
if k == "_downstream_task_ids":
v = set(v)
elif k == "tasks":
# pylint: disable=protected-access
SerializedBaseOperator._load_operator_extra_links = cls._load_operator_extra_links
# pylint: enable=protected-access
v = {task["task_id"]: SerializedBaseOperator.deserialize_operator(task) for task in v}
k = "task_dict"
elif k == "timezone":
v = cls._deserialize_timezone(v)
elif k in {"dagrun_timeout"}:
v = cls._deserialize_timedelta(v)
elif k.endswith("_date"):
v = cls._deserialize_datetime(v)
elif k == "edge_info":
# Value structure matches exactly
pass
elif k in cls._decorated_fields:
v = cls._deserialize(v)
# else use v as it is
setattr(dag, k, v)
# Set _task_group
# pylint: disable=protected-access
if "_task_group" in encoded_dag:
dag._task_group = SerializedTaskGroup.deserialize_task_group( # type: ignore
encoded_dag["_task_group"], None, dag.task_dict
)
else:
# This must be old data that had no task_group. Create a root TaskGroup and add
# all tasks to it.
dag._task_group = TaskGroup.create_root(dag)
for task in dag.tasks:
dag.task_group.add(task)
# pylint: enable=protected-access
# Set has_on_*_callbacks to True if they exist in Serialized blob as False is the default
if "has_on_success_callback" in encoded_dag:
dag.has_on_success_callback = True
if "has_on_failure_callback" in encoded_dag:
dag.has_on_failure_callback = True
keys_to_set_none = dag.get_serialized_fields() - encoded_dag.keys() - cls._CONSTRUCTOR_PARAMS.keys()
for k in keys_to_set_none:
setattr(dag, k, None)
setattr(dag, 'full_filepath', dag.fileloc)
for task in dag.task_dict.values():
task.dag = dag
serializable_task: BaseOperator = task
for date_attr in ["start_date", "end_date"]:
if getattr(serializable_task, date_attr) is None:
setattr(serializable_task, date_attr, getattr(dag, date_attr))
if serializable_task.subdag is not None:
setattr(serializable_task.subdag, 'parent_dag', dag)
serializable_task.subdag.is_subdag = True
for task_id in serializable_task.downstream_task_ids:
# Bypass set_upstream etc here - it does more than we want
# noqa: E501 # pylint: disable=protected-access
dag.task_dict[task_id]._upstream_task_ids.add(serializable_task.task_id)
return dag
@classmethod
def to_dict(cls, var: Any) -> dict:
"""Stringifies DAGs and operators contained by var and returns a dict of var."""
json_dict = {"__version": cls.SERIALIZER_VERSION, "dag": cls.serialize_dag(var)}
# Validate Serialized DAG with Json Schema. Raises Error if it mismatches
cls.validate_schema(json_dict)
return json_dict
@classmethod
def from_dict(cls, serialized_obj: dict) -> 'SerializedDAG':
"""Deserializes a python dict in to the DAG and operators it contains."""
ver = serialized_obj.get('__version', '<not present>')
if ver != cls.SERIALIZER_VERSION:
raise ValueError(f"Unsure how to deserialize version {ver!r}")
return cls.deserialize_dag(serialized_obj['dag'])
class SerializedTaskGroup(TaskGroup, BaseSerialization):
"""A JSON serializable representation of TaskGroup."""
@classmethod
def serialize_task_group(cls, task_group: TaskGroup) -> Optional[Union[Dict[str, Any]]]:
"""Serializes TaskGroup into a JSON object."""
if not task_group:
return None
serialize_group = {
"_group_id": task_group._group_id, # pylint: disable=protected-access
"prefix_group_id": task_group.prefix_group_id,
"tooltip": task_group.tooltip,
"ui_color": task_group.ui_color,
"ui_fgcolor": task_group.ui_fgcolor,
"children": {
label: (DAT.OP, child.task_id)
if isinstance(child, BaseOperator)
else (DAT.TASK_GROUP, SerializedTaskGroup.serialize_task_group(child))
for label, child in task_group.children.items()
},
"upstream_group_ids": cls._serialize(list(task_group.upstream_group_ids)),
"downstream_group_ids": cls._serialize(list(task_group.downstream_group_ids)),
"upstream_task_ids": cls._serialize(list(task_group.upstream_task_ids)),
"downstream_task_ids": cls._serialize(list(task_group.downstream_task_ids)),
}
return serialize_group
@classmethod
def deserialize_task_group(
cls,
encoded_group: Dict[str, Any],
parent_group: Optional[TaskGroup],
task_dict: Dict[str, BaseOperator],
) -> Optional[TaskGroup]:
"""Deserializes a TaskGroup from a JSON object."""
if not encoded_group:
return None
group_id = cls._deserialize(encoded_group["_group_id"])
kwargs = {
key: cls._deserialize(encoded_group[key])
for key in ["prefix_group_id", "tooltip", "ui_color", "ui_fgcolor"]
}
group = SerializedTaskGroup(group_id=group_id, parent_group=parent_group, **kwargs)
group.children = {
label: task_dict[val]
if _type == DAT.OP # type: ignore
else SerializedTaskGroup.deserialize_task_group(val, group, task_dict)
for label, (_type, val) in encoded_group["children"].items()
}
group.upstream_group_ids = set(cls._deserialize(encoded_group["upstream_group_ids"]))
group.downstream_group_ids = set(cls._deserialize(encoded_group["downstream_group_ids"]))
group.upstream_task_ids = set(cls._deserialize(encoded_group["upstream_task_ids"]))
group.downstream_task_ids = set(cls._deserialize(encoded_group["downstream_task_ids"]))
return group
@dataclass
class DagDependency:
"""Dataclass for representing dependencies between DAGs.
These are calculated during serialization and attached to serialized DAGs.
"""
source: str
target: str
dependency_type: str
dependency_id: str
@property
def node_id(self):
"""Node ID for graph rendering"""
return f"{self.dependency_type}:{self.source}:{self.target}:{self.dependency_id}"
|
|
from __future__ import print_function
import sys, os
import re
sys.path.insert(1, os.path.join("..","..",".."))
import h2o
import h2o.exceptions
from tests import pyunit_utils as pu
from h2o.automl import H2OAutoML
"""This test suite checks the AutoML parameters influencing the model selection pipeline"""
max_models = 5
def import_dataset(seed=0, larger=False):
df = h2o.import_file(path=pu.locate("smalldata/prostate/{}".format("prostate_complete.csv.zip" if larger else "prostate.csv")))
target = "CAPSULE"
df[target] = df[target].asfactor()
#Split frames
fr = df.split_frame(ratios=[.8,.1], seed=seed)
#Set up train, validation, and test sets
return pu.ns(train=fr[0], valid=fr[1], test=fr[2], target=target, target_idx=1)
def get_partitioned_model_names(leaderboard):
model_names = [leaderboard[i, 0] for i in range(0, (leaderboard.nrows))]
se_model_names = [m for m in model_names if m.startswith('StackedEnsemble')]
non_se_model_names = [m for m in model_names if m not in se_model_names]
return model_names, non_se_model_names, se_model_names
def test_exclude_algos():
print("AutoML doesn't train models for algos listed in exclude_algos")
ds = import_dataset()
aml = H2OAutoML(project_name="py_exclude_algos",
exclude_algos=['DRF', 'GLM'],
max_models=max_models,
seed=1)
aml.train(y=ds.target, training_frame=ds.train, validation_frame=ds.valid)
_, non_se, se = get_partitioned_model_names(aml.leaderboard)
assert not any(['DRF' in name or 'GLM' in name for name in non_se])
assert len(se) == 2
def test_include_algos():
print("AutoML trains only models for algos listed in include_algos")
ds = import_dataset()
aml = H2OAutoML(project_name="py_include_algos",
include_algos=['GBM'],
max_models=max_models,
seed=1)
aml.train(y=ds.target, training_frame=ds.train, validation_frame=ds.valid)
_, non_se, se = get_partitioned_model_names(aml.leaderboard)
assert all(['GBM' in name for name in non_se])
assert len(se) == 0, "No StackedEnsemble should have been trained if not explicitly included to the existing include_algos"
def test_include_exclude_algos():
print("include_algos and exclude_algos parameters are mutually exclusive")
try:
H2OAutoML(project_name="py_include_exclude_algos",
exclude_algos=['DRF', 'XGBoost'],
include_algos=['GBM'],
max_models=max_models,
seed=1)
assert False, "Should have thrown AssertionError"
except AssertionError as e:
assert "Use either `exclude_algos` or `include_algos`, not both" in str(e)
def test_bad_modeling_plan_using_full_syntax():
try:
H2OAutoML(modeling_plan=[
dict(steps=['def_1'])
])
except AssertionError as e:
assert "each definition must have a 'name' key" in str(e)
try:
H2OAutoML(modeling_plan=[
dict(name="GBM", steps=['def_1'], alias='defaults')
])
except AssertionError as e:
assert "each definition must have only 1 or 2 keys" in str(e)
try:
H2OAutoML(modeling_plan=[
dict(name="GBM", alias='all_steps')
])
except AssertionError as e:
assert "alias must be one of ['all', 'defaults', 'grids']" in str(e)
try:
H2OAutoML(modeling_plan=[
dict(name="GBM", dummy=['def_1'])
])
except AssertionError as e:
assert "steps definitions support only the following keys: name, alias, steps" in str(e)
try:
H2OAutoML(modeling_plan=[
dict(name="GBM", steps=['def_1'])
])
except AssertionError as e:
assert "each step must be a dict" in str(e)
try:
H2OAutoML(modeling_plan=[
dict(name="GBM", steps=[dict(foo='def_1')])
])
except AssertionError as e:
assert "each step must have an 'id' key" in str(e)
try:
H2OAutoML(modeling_plan=[
dict(name="GBM", steps=[dict(id='def_1', weight=3/4)])
])
except AssertionError as e:
assert "weight must be an integer" in str(e)
def test_bad_modeling_plan_using_simplified_syntax():
try:
H2OAutoML(modeling_plan=[
['GBM']
])
except h2o.exceptions.H2OTypeError:
pass
try:
H2OAutoML(modeling_plan=[
('GBM', 'defaults', ['def_1'])
])
except AssertionError:
pass
try:
H2OAutoML(modeling_plan=[
('GBM', 'dummy_alias')
])
except h2o.exceptions.H2OTypeError:
pass
try:
H2OAutoML(modeling_plan=[
('GBM', ('def_1', 'def_2'))
])
except h2o.exceptions.H2OTypeError:
pass
def test_modeling_plan_using_full_syntax():
ds = import_dataset()
aml = H2OAutoML(project_name="py_modeling_plan_full_syntax",
max_models=2,
modeling_plan=[
dict(name='GLM', steps=[dict(id='def_1')]),
dict(name='GBM', alias='grids'),
dict(name='DRF', steps=[dict(id='def_1', weight=333)]), # just testing that it is parsed correctly on backend (no model won't be build due to max_models)
],
seed=1)
aml.train(y=ds.target, training_frame=ds.train)
_, non_se, se = get_partitioned_model_names(aml.leaderboard)
assert len(non_se) == 2
assert len(se) == 0
assert any('GLM' in name for name in non_se)
assert any('GBM_grid' in name for name in non_se)
def test_modeling_plan_using_simplified_syntax():
ds = import_dataset()
aml = H2OAutoML(project_name="py_modeling_plan_simple_syntax",
max_models=3,
modeling_plan=[
('DRF', ['XRT', 'def_1']),
('GBM', 'grids'),
('StackedEnsemble', ['best'])
],
seed=1)
aml.train(y=ds.target, training_frame=ds.train)
_, non_se, se = get_partitioned_model_names(aml.leaderboard)
assert len(non_se) == 3
assert len(se) == 1
assert any('DRF' in name for name in non_se)
assert any('XRT' in name for name in non_se)
assert any('GBM_grid' in name for name in non_se)
assert any('BestOfFamily' in name for name in se)
def test_modeling_plan_using_minimal_syntax():
ds = import_dataset()
aml = H2OAutoML(project_name="py_modeling_plan_minimal_syntax",
max_models=5,
modeling_plan=['DRF', 'GLM', ('GBM', 'grids'), 'StackedEnsemble'],
seed=1)
aml.train(y=ds.target, training_frame=ds.train)
_, non_se, se = get_partitioned_model_names(aml.leaderboard)
assert len(non_se) == 5
assert len(se) == 2
assert any('DRF' in name for name in non_se)
assert any('XRT' in name for name in non_se)
assert any('GLM' in name for name in non_se)
assert any('GBM_grid' in name for name in non_se)
assert any('BestOfFamily' in name for name in se)
assert any('AllModels' in name for name in se)
def test_modeling_steps():
ds = import_dataset()
aml = H2OAutoML(project_name="py_modeling_steps",
max_models=5,
modeling_plan=['DRF',
('GLM', 'defaults'),
dict(name='GBM', steps=[dict(id='grid_1', weight=77)]),
'StackedEnsemble'],
seed=1)
aml.train(y=ds.target, training_frame=ds.train)
print(aml.leaderboard)
assert aml.modeling_steps == [
dict(name='DRF', steps=[dict(id='def_1', weight=10), dict(id='XRT', weight=10)]),
dict(name='GLM', steps=[dict(id='def_1', weight=10)]),
dict(name='GBM', steps=[dict(id='grid_1', weight=77)]),
dict(name='StackedEnsemble', steps=[dict(id='best', weight=10), dict(id='all', weight=10)]),
]
new_aml = H2OAutoML(project_name="py_reinject_modeling_steps",
max_models=5,
modeling_plan=aml.modeling_steps,
seed=1)
new_aml.train(y=ds.target, training_frame=ds.train)
print(new_aml.leaderboard)
assert aml.modeling_steps == new_aml.modeling_steps
def test_exclude_algos_is_applied_on_top_of_modeling_plan():
ds = import_dataset()
aml = H2OAutoML(project_name="py_modeling_plan_minimal_syntax",
max_models=5,
modeling_plan=['DRF', 'GLM', ('GBM', 'grids'), 'StackedEnsemble'],
exclude_algos=['GBM', 'StackedEnsemble'],
seed=1)
aml.train(y=ds.target, training_frame=ds.train)
_, non_se, se = get_partitioned_model_names(aml.leaderboard)
assert len(non_se) == 3
assert len(se) == 0
def test_monotone_constraints():
ds = import_dataset()
aml = H2OAutoML(project_name="py_monotone_constraints",
monotone_constraints=dict(AGE=1, VOL=-1), # constraints just for the sake of testing
max_models=6,
seed=1)
aml.train(y=ds.target, training_frame=ds.train)
model_names, _, _ = get_partitioned_model_names(aml.leaderboard)
models_supporting_monotone_constraints = [n for n in model_names if re.match(r"GBM|XGBoost", n)]
assert len(models_supporting_monotone_constraints) < len(model_names), \
"models not supporting the constraint should not have been skipped"
for m in models_supporting_monotone_constraints:
model = h2o.get_model(m)
value = next(v['actual'] for n, v in model.params.items() if n == 'monotone_constraints')
assert isinstance(value, list)
assert len(value) == 2
age = next((v for v in value if v['key'] == 'AGE'), None)
assert age is not None
assert age['value'] == 1.0
vol = next((v for v in value if v['key'] == 'VOL'), None)
assert vol is not None
assert vol['value'] == -1.0
def test_monotone_constraints_can_be_passed_as_algo_parameter():
ds = import_dataset()
aml = H2OAutoML(project_name="py_monotone_constraints",
algo_parameters=dict(
monotone_constraints=dict(AGE=1, VOL=-1), # constraints just for the sake of testing
# ntrees=10,
),
max_models=6,
seed=1)
aml.train(y=ds.target, training_frame=ds.train)
model_names, _, _ = get_partitioned_model_names(aml.leaderboard)
models_supporting_monotone_constraints = [n for n in model_names if re.match(r"GBM|XGBoost", n)]
assert len(models_supporting_monotone_constraints) < len(model_names), \
"models not supporting the constraint should not have been skipped"
for m in models_supporting_monotone_constraints:
model = h2o.get_model(m)
value = next(v['actual'] for n, v in model.params.items() if n == 'monotone_constraints')
# print(param)
assert isinstance(value, list)
assert len(value) == 2
age = next((v for v in value if v['key'] == 'AGE'), None)
assert age is not None
assert age['value'] == 1.0
vol = next((v for v in value if v['key'] == 'VOL'), None)
assert vol is not None
assert vol['value'] == -1.0
# models_supporting_ntrees = [n for n in model_names if re.match(r"DRF|GBM|XGBoost|XRT", n)]
# assert len(models_supporting_ntrees) > 0
# for m in models_supporting_ntrees:
# model = h2o.get_model(m)
# value = next(v['actual'] for n, v in model.params.items() if n == 'ntrees')
# assert value == 10
def test_algo_parameter_can_be_applied_only_to_a_specific_algo():
ds = import_dataset()
aml = H2OAutoML(project_name="py_specific_algo_param",
algo_parameters=dict(
GBM__monotone_constraints=dict(AGE=1)
),
max_models=6,
seed=1)
aml.train(y=ds.target, training_frame=ds.train)
model_names, _, _ = get_partitioned_model_names(aml.leaderboard)
models_supporting_monotone_constraints = [n for n in model_names if re.match(r"GBM|XGBoost", n)]
assert next((m for m in models_supporting_monotone_constraints if m.startswith('GBM')), None), "There should be at least one GBM model"
for m in models_supporting_monotone_constraints:
model = h2o.get_model(m)
mc_value = next(v['actual'] for n, v in model.params.items() if n == 'monotone_constraints')
if m.startswith('GBM'):
assert isinstance(mc_value, list)
age = next((v for v in mc_value if v['key'] == 'AGE'), None)
assert age is not None
assert age['value'] == 1.0
else:
assert mc_value is None
def test_cannot_set_unauthorized_algo_parameter():
ds = import_dataset()
aml = H2OAutoML(project_name="py_unauthorized_algo_param",
algo_parameters=dict(
score_tree_interval=7
),
max_models=6,
seed=1)
try:
aml.train(y=ds.target, training_frame=ds.train)
except h2o.exceptions.H2OResponseError as e:
assert "algo_parameters: score_tree_interval" in str(e)
def test_exploitation_disabled():
ds = import_dataset()
aml = H2OAutoML(project_name="py_exploitation_ratio_disabled",
exploitation_ratio=.0,
max_models=6,
seed=1)
aml.train(y=ds.target, training_frame=ds.train)
assert 'start_GBM_lr_annealing' not in aml.training_info
assert 'start_XGBoost_lr_search' not in aml.training_info
def test_exploitation_doesnt_impact_max_models():
ds = import_dataset()
aml = H2OAutoML(project_name="py_exploitation_ratio_max_models",
exploitation_ratio=.1,
max_models=6,
seed=1)
aml.train(y=ds.target, training_frame=ds.train)
assert 'start_GBM_lr_annealing' in aml.training_info
assert 'start_XGBoost_lr_search' in aml.training_info
_, non_se, se = get_partitioned_model_names(aml.leaderboard)
assert len(non_se) == 6
assert len(se) == 2
def test_exploitation_impacts_exploration_duration():
ds = import_dataset()
planned_duration = 30
aml = H2OAutoML(project_name="py_exploitation_ratio_max_runtime",
exploitation_ratio=.5, # excessive ratio on purpose, due to training overheads in multinode
exclude_algos=['DeepLearning', 'XGBoost'], # removing some algos for the same reason as above
max_runtime_secs=planned_duration,
seed=1,
# verbosity='debug'
)
aml.train(y=ds.target, training_frame=ds.train)
automl_start = int(aml.training_info['start_epoch'])
assert 'start_GBM_lr_annealing' in aml.training_info
# assert 'start_XGBoost_lr_search' in aml.training_info
exploitation_start = int(aml.training_info['start_GBM_lr_annealing'])
exploration_duration = exploitation_start - automl_start
se_start = int(aml.training_info['start_StackedEnsemble_best'])
exploitation_duration = se_start - exploitation_start
# can't reliably check duration ratio
assert 0 < exploration_duration < planned_duration
assert 0 < exploitation_duration < exploration_duration
pu.run_tests([
test_exclude_algos,
test_include_algos,
test_include_exclude_algos,
test_bad_modeling_plan_using_full_syntax,
test_bad_modeling_plan_using_simplified_syntax,
test_modeling_plan_using_full_syntax,
test_modeling_plan_using_simplified_syntax,
test_modeling_plan_using_minimal_syntax,
test_modeling_steps,
test_exclude_algos_is_applied_on_top_of_modeling_plan,
test_monotone_constraints,
test_monotone_constraints_can_be_passed_as_algo_parameter,
test_algo_parameter_can_be_applied_only_to_a_specific_algo,
test_cannot_set_unauthorized_algo_parameter,
test_exploitation_disabled,
test_exploitation_doesnt_impact_max_models,
test_exploitation_impacts_exploration_duration,
])
|
|
import sys
import os
import gzip
import zipfile
from optparse import make_option
from django.conf import settings
from django.core import serializers
from django.core.management.base import BaseCommand
from django.core.management.color import no_style
from django.db import connections, router, transaction, DEFAULT_DB_ALIAS
from django.db.models import get_apps
from django.utils.itercompat import product
try:
import bz2
has_bz2 = True
except ImportError:
has_bz2 = False
class Command(BaseCommand):
help = 'Installs the named fixture(s) in the database.'
args = "fixture [fixture ...]"
option_list = BaseCommand.option_list + (
make_option('--database', action='store', dest='database',
default=DEFAULT_DB_ALIAS, help='Nominates a specific database to load '
'fixtures into. Defaults to the "default" database.'),
)
def handle(self, *fixture_labels, **options):
using = options.get('database', DEFAULT_DB_ALIAS)
connection = connections[using]
self.style = no_style()
verbosity = int(options.get('verbosity', 1))
show_traceback = options.get('traceback', False)
# commit is a stealth option - it isn't really useful as
# a command line option, but it can be useful when invoking
# loaddata from within another script.
# If commit=True, loaddata will use its own transaction;
# if commit=False, the data load SQL will become part of
# the transaction in place when loaddata was invoked.
commit = options.get('commit', True)
# Keep a count of the installed objects and fixtures
fixture_count = 0
object_count = 0
models = set()
humanize = lambda dirname: dirname and "'%s'" % dirname or 'absolute path'
# Get a cursor (even though we don't need one yet). This has
# the side effect of initializing the test database (if
# it isn't already initialized).
cursor = connection.cursor()
# Start transaction management. All fixtures are installed in a
# single transaction to ensure that all references are resolved.
if commit:
transaction.commit_unless_managed(using=using)
transaction.enter_transaction_management(using=using)
transaction.managed(True, using=using)
class SingleZipReader(zipfile.ZipFile):
def __init__(self, *args, **kwargs):
zipfile.ZipFile.__init__(self, *args, **kwargs)
if settings.DEBUG:
assert len(self.namelist()) == 1, "Zip-compressed fixtures must contain only one file."
def read(self):
return zipfile.ZipFile.read(self, self.namelist()[0])
compression_types = {
None: file,
'gz': gzip.GzipFile,
'zip': SingleZipReader
}
if has_bz2:
compression_types['bz2'] = bz2.BZ2File
app_module_paths = []
for app in get_apps():
if hasattr(app, '__path__'):
# It's a 'models/' subpackage
for path in app.__path__:
app_module_paths.append(path)
else:
# It's a models.py module
app_module_paths.append(app.__file__)
app_fixtures = [os.path.join(os.path.dirname(path), 'fixtures') for path in app_module_paths]
for fixture_label in fixture_labels:
parts = fixture_label.split('.')
if len(parts) > 1 and parts[-1] in compression_types:
compression_formats = [parts[-1]]
parts = parts[:-1]
else:
compression_formats = compression_types.keys()
if len(parts) == 1:
fixture_name = parts[0]
formats = serializers.get_public_serializer_formats()
else:
fixture_name, format = '.'.join(parts[:-1]), parts[-1]
if format in serializers.get_public_serializer_formats():
formats = [format]
else:
formats = []
if formats:
if verbosity > 1:
self.stdout.write("Loading '%s' fixtures...\n" % fixture_name)
else:
sys.stderr.write(
self.style.ERROR("Problem installing fixture '%s': %s is not a known serialization format.\n" %
(fixture_name, format)))
transaction.rollback(using=using)
transaction.leave_transaction_management(using=using)
return
if os.path.isabs(fixture_name):
fixture_dirs = [fixture_name]
else:
fixture_dirs = app_fixtures + list(settings.FIXTURE_DIRS) + ['']
for fixture_dir in fixture_dirs:
if verbosity > 1:
self.stdout.write("Checking %s for fixtures...\n" % humanize(fixture_dir))
label_found = False
for combo in product([using, None], formats, compression_formats):
database, format, compression_format = combo
file_name = '.'.join(
p for p in [
fixture_name, database, format, compression_format
]
if p
)
if verbosity > 1:
self.stdout.write("Trying %s for %s fixture '%s'...\n" % \
(humanize(fixture_dir), file_name, fixture_name))
full_path = os.path.join(fixture_dir, file_name)
open_method = compression_types[compression_format]
try:
fixture = open_method(full_path, 'r')
if label_found:
fixture.close()
self.stderr.write(self.style.ERROR("Multiple fixtures named '%s' in %s. Aborting.\n" %
(fixture_name, humanize(fixture_dir))))
transaction.rollback(using=using)
transaction.leave_transaction_management(using=using)
return
else:
fixture_count += 1
objects_in_fixture = 0
if verbosity > 0:
self.stdout.write("Installing %s fixture '%s' from %s.\n" % \
(format, fixture_name, humanize(fixture_dir)))
try:
objects = serializers.deserialize(format, fixture, using=using)
for obj in objects:
if router.allow_syncdb(using, obj.object.__class__):
objects_in_fixture += 1
models.add(obj.object.__class__)
obj.save(using=using)
object_count += objects_in_fixture
label_found = True
except (SystemExit, KeyboardInterrupt):
raise
except Exception:
import traceback
fixture.close()
transaction.rollback(using=using)
transaction.leave_transaction_management(using=using)
if show_traceback:
traceback.print_exc()
else:
sys.stderr.write(
self.style.ERROR("Problem installing fixture '%s': %s\n" %
(full_path, ''.join(traceback.format_exception(sys.exc_type,
sys.exc_value, sys.exc_traceback)))))
return
fixture.close()
# If the fixture we loaded contains 0 objects, assume that an
# error was encountered during fixture loading.
if objects_in_fixture == 0:
sys.stderr.write(
self.style.ERROR("No fixture data found for '%s'. (File format may be invalid.)\n" %
(fixture_name)))
transaction.rollback(using=using)
transaction.leave_transaction_management(using=using)
return
except Exception, e:
if verbosity > 1:
self.stdout.write("No %s fixture '%s' in %s.\n" % \
(format, fixture_name, humanize(fixture_dir)))
# If we found even one object in a fixture, we need to reset the
# database sequences.
if object_count > 0:
sequence_sql = connection.ops.sequence_reset_sql(self.style, models)
if sequence_sql:
if verbosity > 1:
self.stdout.write("Resetting sequences\n")
for line in sequence_sql:
cursor.execute(line)
if commit:
transaction.commit(using=using)
transaction.leave_transaction_management(using=using)
if object_count == 0:
if verbosity > 0:
self.stdout.write("No fixtures found.\n")
else:
if verbosity > 0:
self.stdout.write("Installed %d object(s) from %d fixture(s)\n" % (object_count, fixture_count))
# Close the DB connection. This is required as a workaround for an
# edge case in MySQL: if the same connection is used to
# create tables, load data, and query, the query can return
# incorrect results. See Django #7572, MySQL #37735.
if commit:
connection.close()
|
|
# Copyright 2004-2017 Tom Rothamel <pytom@bishoujo.us>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# This file ensures that renpy packages will be imported in the right
# order.
from __future__ import print_function
import sys
import os
import copy
import types
import threading
import cPickle
################################################################################
# Version information
################################################################################
# Version numbers.
try:
from renpy.vc_version import vc_version; vc_version
except ImportError:
vc_version = 0
# The tuple giving the version number.
version_tuple = (6, 99, 13, vc_version)
# The name of this version.
version_name = "We came in peace."
# A string giving the version number only (7.0.1.123).
version_only = ".".join(str(i) for i in version_tuple)
# A verbose string giving the version.
version = "Ren'Py " + version_only
# Other versions.
script_version = 5003000
savegame_suffix = "-LT1.save"
bytecode_version = 1
################################################################################
# Platform Information
################################################################################
# Information about the platform we're running on. We break the platforms
# up into 5 groups - windows-like, mac-like, linux-like, android-like,
# and ios-like.
windows = False
macintosh = False
linux = False
android = False
ios = False
import platform
def get_windows_version():
"""
When called on windows, returns the windows version.
"""
import ctypes
class OSVERSIONINFOEXW(ctypes.Structure):
_fields_ = [('dwOSVersionInfoSize', ctypes.c_ulong),
('dwMajorVersion', ctypes.c_ulong),
('dwMinorVersion', ctypes.c_ulong),
('dwBuildNumber', ctypes.c_ulong),
('dwPlatformId', ctypes.c_ulong),
('szCSDVersion', ctypes.c_wchar*128),
('wServicePackMajor', ctypes.c_ushort),
('wServicePackMinor', ctypes.c_ushort),
('wSuiteMask', ctypes.c_ushort),
('wProductType', ctypes.c_byte),
('wReserved', ctypes.c_byte)]
try:
os_version = OSVERSIONINFOEXW()
os_version.dwOSVersionInfoSize = ctypes.sizeof(os_version)
retcode = ctypes.windll.Ntdll.RtlGetVersion(ctypes.byref(os_version))
# Om failure, assume we have a newer version of windows
if retcode != 0:
return (10, 0)
return (os_version.dwMajorVersion, os_version.dwMinorVersion)
except:
return (10, 0)
if platform.win32_ver()[0]:
windows = get_windows_version()
elif "RENPY_IOS" in os.environ:
ios = True
elif platform.mac_ver()[0]:
macintosh = True
elif "ANDROID_PRIVATE" in os.environ:
android = True
else:
linux = True
# A flag that's true if we're on a smartphone or tablet-like platform.
mobile = android or ios
# A flag that's set to true if the game directory is bundled inside a mac app.
macapp = False
################################################################################
# Backup Data for Reload
################################################################################
# True if we're done with safe mode checks.
safe_mode_checked = False
# True if autoreload mode is enabled. This has to live here, because it
# needs to survive through an utter restart.
autoreload = False
# A dict that persists through utter restarts. Accessible to all code as
# renpy.session.
session = { }
# A list of modules beginning with "renpy" that we don't want
# to backup.
backup_blacklist = {
"renpy",
"renpy.object",
"renpy.log",
"renpy.bootstrap",
"renpy.debug",
"renpy.display",
"renpy.display.pgrender",
"renpy.display.scale",
"renpy.display.presplash",
"renpy.display.test",
"renpy.text.ftfont",
"renpy.test",
"renpy.test.testast",
"renpy.test.testexecution",
"renpy.test.testkey",
"renpy.test.testmouse",
"renpy.test.testparser",
"renpycoverage",
}
type_blacklist = (
types.ModuleType,
)
name_blacklist = {
"renpy.loadsave.autosave_not_running",
"renpy.python.unicode_re",
"renpy.python.string_re",
"renpy.python.store_dicts",
"renpy.python.store_modules",
"renpy.text.text.VERT_FORWARD",
"renpy.text.text.VERT_REVERSE",
"renpy.savelocation.scan_thread_condition",
"renpy.savelocation.disk_lock",
"renpy.character.TAG_RE",
"renpy.display.im.cache",
"renpy.display.render.blit_lock",
"renpy.display.render.IDENTITY",
"renpy.loader.auto_lock",
"renpy.display.screen.cprof",
"renpy.audio.audio.lock",
}
class Backup():
"""
This represents a backup of all of the fields in the python modules
comprising Ren'Py, shortly after they were imported.
This attempts to preserve object aliasing, but not object identity. If
renpy.mod.a is renpy.mod.b before the restore, the same will be true
after the restore - even though renpy.mod.a will have changed identity.
"""
def __init__(self):
# A map from (module, field) to the id of the object in that field.
self.variables = { }
# A map from id(object) to objects. This is discarded after being
# pickled.
self.objects = { }
# A map from module to the set of names in that module.
self.names = { }
if mobile:
return
for m in sys.modules.values():
if m is None:
continue
self.backup_module(m)
# A pickled version of self.objects.
self.objects_pickle = cPickle.dumps(self.objects, cPickle.HIGHEST_PROTOCOL)
self.objects = None
def backup_module(self, mod):
"""
Makes a backup of `mod`, which must be a Python module.
"""
try:
name = mod.__name__
except:
return
if not name.startswith("renpy"):
return
if name in backup_blacklist:
return
if name.startswith("renpy.styledata"):
return
self.names[mod] = set(vars(mod).keys())
for k, v in vars(mod).iteritems():
if k.startswith("__") and k.endswith("__"):
continue
if isinstance(v, type_blacklist):
continue
if name + "." + k in name_blacklist:
continue
idv = id(v)
self.variables[mod, k] = idv
self.objects[idv] = v
# If we have a problem pickling things, uncomment the next block.
try:
cPickle.dumps(v, cPickle.HIGHEST_PROTOCOL)
except:
print("Cannot pickle", name + "." + k, "=", repr(v))
print("Reduce Ex is:", repr(v.__reduce_ex__(cPickle.HIGHEST_PROTOCOL)))
def restore(self):
"""
Restores the modules to a state similar to the state of the modules
when the backup was created.
"""
if not self.names:
return
# Remove new variables from the module.
for mod, names in self.names.iteritems():
modvars = vars(mod)
for name in set(modvars.keys()) - names:
del modvars[name]
objects = cPickle.loads(self.objects_pickle)
for k, v in self.variables.iteritems():
mod, field = k
setattr(mod, field, objects[v])
# A backup of the Ren'Py modules after initial import.
backup = None
################################################################################
# Import
################################################################################
def update_path(package):
"""
Update the __path__ of package, to import binary modules from a libexec
directory.
"""
name = package.__name__.split(".")
import _renpy
libexec = os.path.dirname(_renpy.__file__)
package.__path__.append(os.path.join(libexec, *name))
# Also find encodings, to deal with the way py2exe lays things out.
import encodings
libexec = os.path.dirname(encodings.__path__[0])
package.__path__.append(os.path.join(libexec, *name))
def import_all():
# Note: If we add a new update_path, we have to add an equivalent
# hook in the renpython hooks dir.
import renpy # @UnresolvedImport
update_path(renpy)
import renpy.arguments # @UnresolvedImport
import renpy.config
import renpy.log
import renpy.display
import renpy.debug
# Should probably be early, as we will add it as a base to serialized things.
import renpy.object
import renpy.game
import renpy.preferences
# Adds in the Ren'Py loader.
import renpy.loader
import renpy.pyanalysis
import renpy.ast
import renpy.atl
import renpy.curry
import renpy.color
import renpy.easy
import renpy.execution
import renpy.loadsave
import renpy.savelocation # @UnresolvedImport
import renpy.persistent
import renpy.scriptedit
import renpy.parser
import renpy.python
import renpy.script
import renpy.statements
import renpy.styledata # @UnresolvedImport
update_path(renpy.styledata)
import renpy.style
renpy.styledata.import_style_functions()
sys.modules['renpy.styleclass'] = renpy.style
import renpy.substitutions
import renpy.translation
import renpy.translation.scanstrings
import renpy.translation.generation
import renpy.translation.dialogue
import renpy.translation.extract
import renpy.translation.merge
import renpy.display # @UnresolvedImport @Reimport
update_path(renpy.display)
import renpy.display.presplash
import renpy.display.pgrender
import renpy.display.scale
import renpy.display.module
import renpy.display.render # Most display stuff depends on this. @UnresolvedImport
import renpy.display.core # object @UnresolvedImport
import renpy.text
update_path(renpy.text)
import renpy.text.ftfont
import renpy.text.font
import renpy.text.textsupport
import renpy.text.texwrap
import renpy.text.text
import renpy.text.extras
sys.modules['renpy.display.text'] = renpy.text.text
import renpy.gl
update_path(renpy.gl)
import renpy.angle
update_path(renpy.angle)
import renpy.display.layout
import renpy.display.viewport
import renpy.display.transform
import renpy.display.motion # layout @UnresolvedImport
import renpy.display.behavior # layout @UnresolvedImport
import renpy.display.transition # core, layout @UnresolvedImport
import renpy.display.movetransition # core @UnresolvedImport
import renpy.display.im
import renpy.display.imagelike
import renpy.display.image # core, behavior, im, imagelike @UnresolvedImport
import renpy.display.video
import renpy.display.focus
import renpy.display.anim
import renpy.display.particle
import renpy.display.joystick
import renpy.display.controller
import renpy.display.minigame
import renpy.display.screen
import renpy.display.dragdrop
import renpy.display.imagemap
import renpy.display.predict
import renpy.display.emulator
import renpy.display.tts
import renpy.display.gesture
import renpy.display.error
# Note: For windows to work, renpy.audio.audio needs to be after
# renpy.display.module.
import renpy.audio
update_path(renpy.audio)
import renpy.audio.audio
import renpy.audio.music
import renpy.audio.sound
import renpy.ui
import renpy.screenlang
import renpy.sl2
update_path(renpy.sl2)
import renpy.sl2.slast
import renpy.sl2.slparser
import renpy.sl2.slproperties
import renpy.sl2.sldisplayables
import renpy.lint
import renpy.warp
import renpy.editor
import renpy.memory
import renpy.exports
import renpy.character # depends on exports. @UnresolvedImport
import renpy.add_from
import renpy.dump
import renpy.minstore # depends on lots. @UnresolvedImport
import renpy.defaultstore # depends on everything. @UnresolvedImport
import renpy.test
import renpy.test.testmouse
import renpy.test.testfocus
import renpy.test.testkey
import renpy.test.testast
import renpy.test.testparser
import renpy.test.testexecution
import renpy.main
# Back up the Ren'Py modules.
global backup
if not mobile:
backup = Backup()
post_import()
def post_import():
"""
This is called after import or reload, to do further initialization
of various modules.
"""
import renpy # @UnresolvedImport
# Create the store.
renpy.python.create_store("store")
# Import the contents of renpy.defaultstore into renpy.store, and set
# up an alias as we do.
renpy.store = sys.modules['store']
renpy.exports.store = renpy.store
sys.modules['renpy.store'] = sys.modules['store']
import subprocess
sys.modules['renpy.subprocess'] = subprocess
for k, v in renpy.defaultstore.__dict__.iteritems():
renpy.store.__dict__.setdefault(k, v)
# Import everything into renpy.exports, provided it isn't
# already there.
for k, v in globals().iteritems():
vars(renpy.exports).setdefault(k, v)
def reload_all():
"""
Resets all modules to the state they were in right after import_all
returned.
"""
if mobile:
raise Exception("Reloading is not supported on mobile platforms.")
import renpy.style
import renpy.display
# Clear all pending exceptions.
sys.exc_clear()
# Reset the styles.
renpy.style.reset() # @UndefinedVariable
# Shut down the cache thread.
renpy.display.im.cache.quit()
# Shut down the importer.
renpy.loader.quit_importer()
# Free memory.
renpy.exports.free_memory()
# GC renders.
renpy.display.render.screen_render = None
renpy.display.render.mark_sweep()
# Get rid of the draw module and interface.
renpy.display.draw.deinit()
renpy.display.draw = None
renpy.display.interface = None
# Delete the store modules.
for i in sys.modules.keys():
if i.startswith("store") or i == "renpy.store":
m = sys.modules[i]
if m is not None:
m.__dict__.reset()
del sys.modules[i]
# Restore the state of all modules from backup.
backup.restore()
renpy.display.im.reset_module()
post_import()
# Re-initialize the importer.
renpy.loader.init_importer()
################################################################################
# Fix things for code analysis
################################################################################
def setup_modulefinder(modulefinder):
"""
Informs modulefinder about the location of modules in nonstandard places.
"""
import _renpy
libexec = os.path.dirname(_renpy.__file__)
for i in [ "display", "gl", "angle", "text", "styledata" ]:
displaypath = os.path.join(libexec, "renpy", i)
if os.path.exists(displaypath):
modulefinder.AddPackagePath('renpy.' + i, displaypath)
def import_cython():
"""
Never called, but necessary to ensure that modulefinder will properly
grab the various cython modules.
"""
import renpy.arguments
import renpy.display.accelerator
import renpy.display.render
import renpy.gl.gl
import renpy.gl.gl1
import renpy.gl.gldraw
import renpy.gl.glenviron_fixed
import renpy.gl.glenviron_limited
import renpy.gl.glenviron_shader
import renpy.gl.glrtt_copy
import renpy.gl.glrtt_fbo
import renpy.gl.gltexture
import renpy.angle.gl
import renpy.angle.gldraw
import renpy.angle.glenviron_shader
import renpy.angle.glrtt_copy
import renpy.angle.glrtt_fbo
import renpy.angle.gltexture
if False:
import renpy.defaultstore as store
|
|
# Mechanics IO
import os
import sys
from math import cos, sin, asin, atan2
import numpy as np
import h5py
import pickle
import tempfile
from contextlib import contextmanager
# Siconos Mechanics imports
from siconos.mechanics.collision.tools import Volume
# Constants
joint_points_axes = {
'KneeJointR': (1, 0),
'PivotJointR': (1, 1),
'PrismaticJointR': (0, 1),
'CylindricalJointR': (1, 1),
'FixedJointR': (0, 0),
}
# Utility functions
def floatv(v):
return [float(x) for x in v]
def arguments():
"""Returns tuple containing dictionary of calling function's
named arguments and a list of calling function's unnamed
positional arguments.
"""
from inspect import getargvalues, stack
posname, kwname, args = getargvalues(stack()[1][0])[-3:]
posargs = args.pop(posname, [])
args.update(args.pop(kwname, []))
return args, posargs
def check_points_axes(name, joint_class, points, axes):
def check(x, idx):
def er():
n = joint_points_axes[joint_class][idx]
raise ValueError('{} ({}) expects {} {} (got {})'
.format(joint_class, name, n,
['point', 'points', 'axis',
'axes'][idx * 2 + 1 * (n != 1)],
x))
if np.shape(x) == (0,) or np.shape(x) == ():
num = 0
else:
if len(np.shape(x)) != 2 or np.shape(x)[1] != 3:
er()
num = np.shape(x)[0]
if joint_class in joint_points_axes and \
joint_points_axes[joint_class][idx] != num:
er()
check(points, 0)
check(axes, 1)
@contextmanager
def tmpfile(suffix='', prefix='siconos_io', contents=None,
debug=False):
"""
A context manager for a named temporary file.
"""
(_, tfilename) = tempfile.mkstemp(suffix=suffix, prefix=prefix)
fid = open(tfilename, 'w')
if contents is not None:
fid.write(contents)
fid.flush()
class TmpFile:
def __init__(self, fid, name):
self.fid = fid
self.name = name
def __getitem__(self, n):
if n == 0:
return self.fid
elif n == 1:
return self.name
else:
raise IndexError
r = TmpFile(fid, tfilename)
yield r
fid.close()
if not debug:
os.remove(tfilename)
def warn(msg):
sys.stderr.write('{0}: {1}'.format(sys.argv[0], msg))
def object_id(obj):
"""returns an unique object identifier"""
return obj.__hash__()
def group(h, name, must_exist=True):
try:
return h[name]
except KeyError:
if must_exist:
return h.create_group(name)
else:
try:
return h.create_group(name)
except ValueError:
# could not create group, return None
# (file is probably in read-only mode)
return None
def data(h, name, nbcolumns, use_compression=False):
try:
return h[name]
except KeyError:
comp = use_compression and nbcolumns > 0
return h.create_dataset(name, (0, nbcolumns),
maxshape=(None, nbcolumns),
chunks=[None, (4000, nbcolumns)][comp],
compression=[None, 'gzip'][comp],
compression_opts=[None, 9][comp])
def add_line(dataset, line):
dataset.resize(dataset.shape[0] + 1, 0)
dataset[dataset.shape[0] - 1, :] = line
#
# misc fixes
#
# fix ctr.'name' in old hdf5 files
#
def upgrade_io_format(filename):
with MechanicsHdf5(filename, mode='a') as io:
for instance_name in io.instances():
for contactor_instance_name in io.instances()[instance_name]:
contactor = io.instances()[instance_name][
contactor_instance_name]
if 'name' in contactor.attrs:
warn("""
contactor {0} attribute 'name': renamed in 'shape_name'
""")
contactor.attrs['shape_name'] = contactor['name']
del contactor['name']
def str_of_file(filename):
with open(filename, 'r') as f:
return str(f.read())
def file_of_str(filename, string):
if not os.path.exists(os.path.dirname(filename)):
try:
os.makedirs(os.path.dirname(filename))
except OSError as exc:
if exc.errno != exc.errno.EEXIST:
raise
with open(filename, "w") as f:
f.write(string)
#
# fix orientation -> rotation ?
#
def quaternion_get(orientation):
"""
Get quaternion from orientation
"""
if len(orientation) == 2:
# axis + angle
axis = orientation[0]
assert len(axis) == 3
angle = orientation[1]
assert type(angle) is float
n = sin(angle / 2.) / np.linalg.norm(axis)
ori = [cos(angle / 2.), axis[0] * n, axis[1] * n, axis[2] * n]
else:
assert(len(orientation) == 4)
# a given quaternion
ori = orientation
return ori
def quaternion_multiply(q1, q0):
w0, x0, y0, z0 = q0
w1, x1, y1, z1 = q1
return np.array([-x1 * x0 - y1 * y0 - z1 * z0 + w1 * w0,
x1 * w0 + y1 * z0 - z1 * y0 + w1 * x0,
-x1 * z0 + y1 * w0 + z1 * x0 + w1 * y0,
x1 * y0 - y1 * x0 + z1 * w0 + w1 * z0], dtype=np.float64)
def phi(q0, q1, q2, q3):
"""
Euler angle phi from quaternion.
"""
return atan2(2 * (q0 * q1 + q2 * q3), 1 - 2 * (q1 * q1 + q2 * q2))
def theta(q0, q1, q2, q3):
"""
Euler angle theta from quaternion.
"""
return asin(2 * (q0 * q2 - q3 * q1))
def psi(q0, q1, q2, q3):
"""
Euler angle psi from quaternion.
"""
return atan2(2 * (q0 * q3 + q1 * q2), 1 - 2 * (q2 * q2 + q3 * q3))
# vectorized versions
phiv = np.vectorize(phi)
thetav = np.vectorize(theta)
psiv = np.vectorize(psi)
#
# inertia
#
def compute_inertia_and_center_of_mass(shapes, io=None):
"""
Compute inertia from a list of Shapes.
Returns
-------
mass
center_of_mass
inertia
inertia_matrix
"""
from OCC.GProp import GProp_GProps
from OCC.BRepGProp import brepgprop_VolumeProperties
from OCC.gp import gp_Ax1, gp_Dir
from siconos.mechanics import occ
system = GProp_GProps()
for shape in shapes:
iprops = GProp_GProps()
if shape.data is None:
if io is not None:
shape.data = io._shape.get(shape.shape_name, new_instance=True)
else:
warn('cannot get shape {0}'.format(shape.shape_name))
return None
iishape = shape.data
ishape = occ.OccContactShape(iishape).data()
# the shape relative displacement
occ.occ_move(ishape, list(shape.translation) + list(shape.orientation))
brepgprop_VolumeProperties(iishape, iprops)
density = None
if hasattr(shape, 'mass') and shape.mass is not None:
density = shape.mass / iprops.Mass()
elif shape.parameters is not None and hasattr(shape.parameters, 'density'):
density = shape.parameters.density
#print('shape.parameters.density:', shape.parameters.density)
else:
density = 1.
assert density is not None
# print("shape", shape.shape_name)
# print('density:', density)
# print('iprops.Mass():', iprops.Mass())
system.Add(iprops, density)
mass = system.Mass()
assert (system.Mass() > 0.)
computed_com = system.CentreOfMass()
gp_mat = system.MatrixOfInertia()
inertia_matrix = np.zeros((3, 3))
for i in range(0, 3):
for j in range(0, 3):
inertia_matrix[i, j] = gp_mat.Value(i + 1, j + 1)
I1 = system.MomentOfInertia(
gp_Ax1(computed_com, gp_Dir(1, 0, 0)))
I2 = system.MomentOfInertia(
gp_Ax1(computed_com, gp_Dir(0, 1, 0)))
I3 = system.MomentOfInertia(
gp_Ax1(computed_com, gp_Dir(0, 0, 1)))
inertia = [I1, I2, I3]
center_of_mass = np.array([computed_com.Coord(1),
computed_com.Coord(2),
computed_com.Coord(3)])
return mass, center_of_mass, inertia, inertia_matrix
def occ_topo_list(shape):
""" return the edges & faces from `shape`
:param shape: a TopoDS_Shape
:return: a list of edges and faces
"""
from OCC.TopAbs import TopAbs_FACE
from OCC.TopAbs import TopAbs_EDGE
from OCC.TopExp import TopExp_Explorer
from OCC.TopoDS import topods_Face, topods_Edge
topExp = TopExp_Explorer()
topExp.Init(shape, TopAbs_FACE)
faces = []
edges = []
while topExp.More():
face = topods_Face(topExp.Current())
faces.append(face)
topExp.Next()
topExp.Init(shape, TopAbs_EDGE)
while topExp.More():
edge = topods_Edge(topExp.Current())
edges.append(edge)
topExp.Next()
return faces, edges
def occ_load_file(filename):
"""
load in pythonocc a igs or step file
:param filename: a filename with extension
:return: a topods_shape
"""
from OCC.STEPControl import STEPControl_Reader
from OCC.IGESControl import IGESControl_Reader
from OCC.BRep import BRep_Builder
from OCC.TopoDS import TopoDS_Compound
from OCC.IFSelect import IFSelect_RetDone, IFSelect_ItemsByEntity
reader_switch = {'stp': STEPControl_Reader,
'step': STEPControl_Reader,
'igs': IGESControl_Reader,
'iges': IGESControl_Reader}
builder = BRep_Builder()
comp = TopoDS_Compound()
builder.MakeCompound(comp)
reader = reader_switch[os.path.splitext(filename)[1][1:].lower()]()
status = reader.ReadFile(filename)
if status == IFSelect_RetDone: # check status
failsonly = False
reader.PrintCheckLoad(
failsonly, IFSelect_ItemsByEntity)
reader.PrintCheckTransfer(
failsonly, IFSelect_ItemsByEntity)
reader.TransferRoots()
nbs = reader.NbShapes()
for i in range(1, nbs + 1):
shape = reader.Shape(i)
builder.Add(comp, shape)
return comp
def topods_shape_reader(shape, deflection=0.001):
from OCC.StlAPI import StlAPI_Writer
from OCC.BRepMesh import BRepMesh_IncrementalMesh
import vtk
stl_writer = StlAPI_Writer()
with tmpfile(suffix='.stl') as tmpf:
mesh = BRepMesh_IncrementalMesh(shape, deflection)
mesh.Perform()
assert mesh.IsDone()
stl_writer.SetASCIIMode(False)
stl_writer.Write(shape, tmpf[1])
tmpf[0].flush()
reader = vtk.vtkSTLReader()
reader.SetFileName(tmpf[1])
reader.Update()
return reader
def brep_reader(brep_string, indx):
from OCC.StlAPI import StlAPI_Writer
from OCC.BRepTools import BRepTools_ShapeSet
import vtk
shape_set = BRepTools_ShapeSet()
shape_set.ReadFromString(brep_string)
shape = shape_set.Shape(shape_set.NbShapes())
location = shape_set.Locations().Location(indx)
shape.Location(location)
stl_writer = StlAPI_Writer()
with tmpfile(suffix='.stl') as tmpf:
stl_writer.Write(shape, tmpf[1])
tmpf[0].flush()
reader = vtk.vtkSTLReader()
reader.SetFileName(tmpf[1])
reader.Update()
return reader
class MechanicsHdf5(object):
"""a MechanicsHdf5 context manager, used to prepare a simulation description
to be executed by MechanicsRunner.
Parameters
----------
io_filename: string, optional
hdf5 file name, default = <caller>.hdf5, caller being the name
without ext of the file that instanciates the Runner.
mode: string, optional
h5 mode (w, r, append), default = 'w'
io_filename_backup: string, optional
name of a backup (copy) file for hdf5 outputs.
Backup every <output_frequency> step. Default = <caller>_last.hdf5
use_compression: boolean, optional
true to use compression for h5 file, default=False
output_domains: boolean, optional
if trueoutputs info regarding contact point domains
default=False
verbose: boolean, optional
default=True
"""
def __init__(self, io_filename=None, mode='w', io_filename_backup=None,
use_compression=False, output_domains=False, verbose=True):
if io_filename is None:
self._io_filename = '{0}.hdf5'.format(
os.path.splitext(os.path.basename(sys.argv[0]))[0])
else:
self._io_filename = io_filename
if io_filename_backup is None:
self._io_filename_backup = '{0}_last.hdf5'.format(
os.path.splitext(self._io_filename)[0])
else:
self._io_filename_backup = io_filename_backup
self._output_backup = False
self._mode = mode
self._static_data = None
self._velocities_data = None
self._dynamic_data = None
self._cf_data = None
self._domain_data = None
self._solv_data = None
self._log_data = None
self._input = None
self._nslaws_data = None
self._nslaws = dict()
self._out = None
self._data = None
self._ref = None
self._permanent_interactions = None
self._joints = None
self._boundary_conditions = None
self._plugins = None
self._external_functions = None
self._number_of_shapes = 0
self._number_of_permanent_interactions = 0
self._number_of_dynamic_objects = 0
self._number_of_static_objects = 0
self._use_compression = use_compression
self._should_output_domains = output_domains
self._verbose = verbose
def __enter__(self):
"""Reminder: this function will be called when a 'with'
statement
will be executed with the present class.
Warning : it means that this class must be called inside
a with statement to be properly initialized!
"""
# -- Creates the Open the hdf5 object --
self._out = h5py.File(self._io_filename, self._mode)
# -- And read its content --
# Important : since the mode might be write or read, most
# of the attributes and fields
# must have a default value to tackle the 'write' case.
self._dimension = self._out.attrs.get('dimension', 3)
self._data = group(self._out, 'data')
self._ref = group(self._data, 'ref')
self._permanent_interactions = group(self._data,
'permanent_interactions',
must_exist=False)
self._joints = group(self._data, 'joints', must_exist=False)
self._plugins = group(self._data, 'plugins', must_exist=False)
self._external_functions = group(self._data, 'external_functions',
must_exist=False)
try:
self._boundary_conditions = group(self._data,
'boundary_conditions',
must_exist=(self._mode == 'w'))
except Exception as e:
print('Warning - group(self._data, boundary_conditions ) : ', e)
self._static_data = data(self._data, 'static', 9,
use_compression=self._use_compression)
self._velocities_data = data(self._data, 'velocities', 8,
use_compression=self._use_compression)
if self._mode == 'w':
self._velocities_data.attrs['info'] = 'time, ds id ,'
self._velocities_data.attrs['info'] += 'translational velocities ,'
self._velocities_data.attrs['info'] += 'angular velocities'
self._dynamic_data = data(self._data, 'dynamic', 9,
use_compression=self._use_compression)
if self._mode == 'w':
self._dynamic_data.attrs['info'] = 'time, ds id , translation ,'
self._dynamic_data.attrs['info'] += 'orientation'
self._cf_data = data(self._data, 'cf', 26,
use_compression=self._use_compression)
if self._mode == 'w':
self._cf_data.attrs['info'] = 'time, mu, contact point A ,'
self._cf_data.attrs['info'] += 'contact point B, contact normal, '
self._cf_data.attrs['info'] += 'relative gap relative velocity,'
self._cf_data.attrs['info'] += 'reaction impulse, interaction id,'
self._cf_data.attrs['info'] += 'ds 1 number, ds 2 number '
if self._should_output_domains or 'domain' in self._data:
self._domain_data = data(self._data, 'domain', 3,
use_compression=self._use_compression)
self._solv_data = data(self._data, 'solv', 4,
use_compression=self._use_compression)
try:
self._log_data = group(self._data, 'log')
except Exception as e:
print('Warning - group(self._data, log ) : ', e)
self._input = group(self._data, 'input')
self._nslaws_data = group(self._data, 'nslaws')
return self
def __exit__(self, type_, value, traceback):
self._out.close()
def print_verbose(self, *args, **kwargs):
if self._verbose:
print('[io.mechanics]', *args, **kwargs)
# hdf5 structure
def dimension(self):
"""
dimension : get the dimension (2 or 3) of the scene
"""
return self._dimension
def shapes(self):
"""
Shapes : parameterized primitives or user defined
(convex set or meshes)
"""
return self._ref
def permanent_interactions(self):
"""
Permanent interactions.
"""
return self._permanent_interactions
def static_data(self):
"""
Coordinates and orientations of static objects.
"""
return self._static_data
def dynamic_data(self):
"""
Coordinates and orientations of dynamic objects.
"""
return self._dynamic_data
def velocities_data(self):
"""
Velocities of dynamic objects
"""
return self._velocities_data
def contact_forces_data(self):
"""
Contact points information.
"""
return self._cf_data
def domains_data(self):
"""
Contact point domain information.
"""
return self._domain_data
def solver_data(self):
"""
Solver output
"""
return self._solv_data
def log_data(self):
"""
log output
"""
return self._log_data
def instances(self):
"""
Scene objects.
"""
return self._input
def nonsmooth_laws(self):
"""
Non smooth laws between group of contactors.
"""
return self._nslaws_data
def joints(self):
"""
Joints between dynamic objects or between an object and the scenery.
"""
return self._joints
def boundary_conditions(self):
"""
Boundary conditions applied to dynamic objects
"""
return self._boundary_conditions
def add_plugin_source(self, name, filename):
"""
Add C source plugin
"""
if name not in self._plugins:
plugin_src = self._plugins.create_dataset(name, (1,),
dtype=h5py.new_vlen(str))
plugin_src[:] = str_of_file(filename)
plugin_src.attrs['filename'] = filename
def add_external_function(self, name, body_name, function_name,
plugin_name, plugin_function_name):
if name not in self._external_functions:
ext_fun = group(self._external_functions, name)
ext_fun.attrs['body_name'] = body_name
ext_fun.attrs['function_name'] = function_name
ext_fun.attrs['plugin_name'] = plugin_name
ext_fun.attrs['plugin_function_name'] = plugin_function_name
def add_external_bc_function(self, name, body_name, bc_indices,
plugin_name, plugin_function_name):
if name not in self._external_functions:
ext_fun = group(self._external_functions, name)
ext_fun.attrs['body_name'] = body_name
ext_fun.attrs['plugin_name'] = plugin_name
ext_fun.attrs['plugin_function_name'] = plugin_function_name
ext_fun.attrs['bc_indices'] = bc_indices
def add_mesh_from_string(self, name, shape_data, scale=None,
insideMargin=None, outsideMargin=None):
"""
Add a mesh shape from a string.
Accepted format : mesh encoded in VTK .vtp format
"""
if name not in self._ref:
shape = self._ref.create_dataset(name, (1,),
dtype=h5py.new_vlen(str))
shape[:] = shape_data
shape.attrs['id'] = self._number_of_shapes
shape.attrs['type'] = 'vtp'
if scale is not None:
shape.attrs['scale'] = scale
if insideMargin is not None:
shape.attrs['insideMargin'] = insideMargin
if outsideMargin is not None:
shape.attrs['outsideMargin'] = outsideMargin
self._number_of_shapes += 1
def add_mesh_from_file(self, name, filename, scale=None,
insideMargin=None, outsideMargin=None):
""" Add a mesh shape from a file.
Accepted format : .stl or mesh encoded in VTK .vtp format
"""
import vtk
if filename[0] != os.path.sep:
filename = os.path.join(
os.path.split(os.path.abspath(sys.argv[0]))[0],
filename)
if name not in self._ref:
if os.path.splitext(filename)[-1][1:] == 'stl':
reader = vtk.vtkSTLReader()
reader.SetFileName(filename)
reader.Update()
if reader.GetErrorCode() != 0:
print('vtkSTLReader error', reader.GetErrorCode())
sys.exit(1)
with tmpfile() as tmpf:
writer = vtk.vtkXMLPolyDataWriter()
writer.SetInputData(reader.GetOutput())
writer.SetFileName(tmpf[1])
writer.Write()
shape_data = str_of_file(tmpf[1])
else:
assert os.path.splitext(filename)[-1][1:] == 'vtp'
shape_data = str_of_file(filename)
self.add_mesh_from_string(name, shape_data, scale=scale,
insideMargin=insideMargin,
outsideMargin=outsideMargin)
def add_height_map(self, name, heightmap, rectangle,
insideMargin=None, outsideMargin=None):
"""
Add a heightmap represented as a SiconosMatrix
"""
assert(heightmap.shape[0] >= 2)
assert(heightmap.shape[1] >= 2)
if name not in self._ref:
shape = self._ref.create_dataset(name, data=heightmap)
shape.attrs['id'] = self._number_of_shapes
shape.attrs['type'] = 'heightmap'
# measurements of the heightfield, i.e. length of sides of
# the rectangle where heightmap will be placed -- height
# is represented by heightmap values
assert(len(rectangle) == 2)
shape.attrs['rect'] = rectangle # tuple (length x, length y)
if insideMargin is not None:
shape.attrs['insideMargin'] = insideMargin
if outsideMargin is not None:
shape.attrs['outsideMargin'] = outsideMargin
self._number_of_shapes += 1
def add_brep_from_string(self, name, shape_data):
"""
Add a brep contained in a string.
"""
if name not in self._ref:
shape = self._ref.create_dataset(name, (1,),
dtype=h5py.new_vlen(str))
if type(shape_data) == str:
# raw str
shape[:] = shape_data
else:
# __getstate__ as with pythonocc
shape[:] = shape_data[0]
shape.attrs['occ_indx'] = shape_data[1]
shape.attrs['id'] = self._number_of_shapes
shape.attrs['type'] = 'brep'
self._number_of_shapes += 1
def add_occ_shape(self, name, occ_shape):
"""
Add an OpenCascade TopoDS_Shape.
"""
if name not in self._ref:
from OCC.STEPControl import STEPControl_Writer, STEPControl_AsIs
# step format is used for the storage.
step_writer = STEPControl_Writer()
step_writer.Transfer(occ_shape, STEPControl_AsIs)
shape_data = None
with tmpfile() as tmpf:
step_writer.Write(tmpf[1])
tmpf[0].flush()
shape_data = str_of_file(tmpf[1])
shape = self._ref.create_dataset(name, (1,),
dtype=h5py.new_vlen(str))
shape[:] = shape_data
shape.attrs['id'] = self._number_of_shapes
shape.attrs['type'] = 'step'
self._number_of_shapes += 1
def add_shape_data_from_file(self, name, filename):
"""
Add shape data from a file.
"""
if name not in self._ref:
shape = self._ref.create_dataset(name, (1,),
dtype=h5py.new_vlen(str))
shape[:] = str_of_file(filename)
shape.attrs['id'] = self._number_of_shapes
try:
shape.attrs['type'] = os.path.splitext(filename)[1][1:]
except:
shape.attrs['type'] = 'unknown'
self._number_of_shapes += 1
def add_interaction(self, name, body1_name, contactor1_name=None,
body2_name=None, contactor2_name=None,
distance_calculator='cadmbtb',
offset1=0.0, offset2=0.0):
"""
Add permanent interactions between two objects contactors.
"""
if name not in self.permanent_interactions():
pinter = self.permanent_interactions().create_dataset(
name, (1,), dtype=h5py.new_vlen(str))
pinter.attrs['id'] = self._number_of_permanent_interactions
pinter.attrs['type'] = 'permanent_interaction'
pinter.attrs['body1_name'] = body1_name
pinter.attrs['body2_name'] = body2_name
if contactor1_name is not None:
pinter.attrs['contactor1_name'] = contactor1_name
if contactor2_name is not None:
pinter.attrs['contactor2_name'] = contactor2_name
pinter.attrs['distance_calculator'] = distance_calculator
pinter.attrs['offset1'] = offset1
pinter.attrs['offset2'] = offset2
self._number_of_permanent_interactions += 1
def add_convex_shape(self, name, points,
insideMargin=None, outsideMargin=None):
"""
Add a convex shape defined by a list of points.
"""
# infer the dimension of the problem
if np.shape(points)[1] == 2:
self._dimension = 2
else:
if self._dimension == 2:
raise ValueError('It is not yet possible to mix 2D and 3D primitives shapes')
self._dimension == 3
self._out.attrs['dimension'] = self._dimension
if name not in self._ref:
shape = self._ref.create_dataset(name,
(np.shape(points)[0],
np.shape(points)[1]))
if insideMargin is not None:
shape.attrs['insideMargin'] = insideMargin
if outsideMargin is not None:
shape.attrs['outsideMargin'] = outsideMargin
shape[:] = points[:]
shape.attrs['type'] = 'convex'
shape.attrs['id'] = self._number_of_shapes
self._number_of_shapes += 1
def add_primitive_shape(self, name, primitive, params,
insideMargin=None, outsideMargin=None):
"""
Add a primitive shape.
"""
# infer the dimension of the problem
if primitive == 'Disk' or primitive == 'Box2d':
self._dimension = 2
else:
if self._dimension == 2:
raise ValueError('It is not yet possible to mix 2D and 3D primitives shapes')
self._dimension == 3
self._out.attrs['dimension'] = self._dimension
if name not in self._ref:
shape = self._ref.create_dataset(name, (1, len(params)))
shape.attrs['id'] = self._number_of_shapes
shape.attrs['type'] = 'primitive'
shape.attrs['primitive'] = primitive
if insideMargin is not None:
shape.attrs['insideMargin'] = insideMargin
if outsideMargin is not None:
shape.attrs['outsideMargin'] = outsideMargin
shape[:] = params
self._number_of_shapes += 1
def add_object(self, name, shapes,
translation,
orientation=None,
velocity= None,
use_volume_centroid_as_initial_translation=False,
mass=None, center_of_mass=[0, 0, 0], inertia=None,
time_of_birth=-1, time_of_death=-1,
allow_self_collide=False):
"""Add an object with associated shapes as a list of Volume or
Contactor objects. Contact detection and processing is
defined by the Contactor objects. The Volume objects are used for
the computation of inertia and center of mass if not provided.
The body-fixed frame is assumed to be the global inertial
frame. This means that
1. By default, the center of mass is located at the origin.
The initial translation is applied from this point, so that x_g(0) = translation
2. the orientation is identical to the inertial frame.
The initial orientation is applied to the inertial frame to obtain
the body-fixed frame.
Each Contactor and Volume object may have a relative
translation and a relative orientation expressed in the bodyframe
coordinates.
Parameters
----------
name: string
The name of the object.
shapes: iterable
The list of associated Contactor or Volume objects.
translation: array_like of length 3 or 2 (dimension =2)
Initial translation of the object (mandatory)
orientation: array_like of length 3 (Euler Angles) or 4
(unit quaternion), or 1 (dimension =2)
Initial orientiation of the object. By default, identity
velocity: array_like of length 6, or 3 (dimension =2)
Initial velocity of the object. The default velocity is zero.
dimension =3 :
The components are those of the translation velocity along
x, y and z axis and the rotation velocity around x, y and
z axis.
dimension =2 :
The components are those of the translation velocity along
x, y and the rotation velocity z axis.
mass: float
The mass of the object, if it is None the object is defined as
a static object involved only in contact detection.
The default value is None.
center_of_mass: array_like of length 3
The position of the center of mass expressed in the body frame
coordinates.
inertia: array_like of length 3 or 3x3 matrix.
The principal moments of inertia (array of length 3) or
a full 3x3 inertia matrix
use_volume_centroid_as_initial_translation: boolean.
if True and if a Volume is given is the list of shape,
the position of
the volume centroid is used as initial translation.
"""
# print(arguments())
if (self._dimension == 3):
if orientation is None:
orientation = [1, 0, 0, 0]
if velocity is None:
velocity = [0, 0, 0, 0, 0, 0]
ori = quaternion_get(orientation)
assert (len(translation) == 3)
assert (len(ori) == 4)
elif (self._dimension == 2):
if orientation is None:
orientation = [0.]
if velocity is None:
velocity = [0, 0, 0]
assert (len(translation) == 2)
ori = orientation
is_center_of_mass_computed = False
if name not in self._input:
if (inertia is None) or (mass is None):
if any(map(lambda s: isinstance(s, Volume), shapes)):
# a computed inertia and center of mass
# occ only
volumes = filter(lambda s: isinstance(s, Volume),
shapes)
computed_mass, com, computed_inertia, computed_inertia_matrix = compute_inertia_and_center_of_mass(volumes, self)
self.print_verbose('{0}: computed mass from Volume'.format(name))
self.print_verbose('{0}: computed center of mass:'.format(name),
com[0],
com[1],
com[2])
self.print_verbose('{0}: computed mass:'.format(name),
computed_mass)
self.print_verbose('{0}: computed inertia:'.format(name),
computed_inertia[0], computed_inertia[1],
computed_inertia[2])
self.print_verbose('{0}: computed inertia matrix:'.format(name),
computed_inertia_matrix)
is_center_of_mass_computed = True
if mass is None:
mass = computed_mass
if inertia is None:
inertia = computed_inertia_matrix
obj = group(self._input, name)
if use_volume_centroid_as_initial_translation and is_center_of_mass_computed:
translation = com
for s in shapes:
s.translation = s.translation - com
if time_of_birth >= 0:
obj.attrs['time_of_birth'] = time_of_birth
if time_of_death >= 0:
obj.attrs['time_of_death'] = time_of_death
if mass is not None:
obj.attrs['mass'] = mass
obj.attrs['type'] = 'dynamic'
if np.isscalar(mass) and mass <= 0.:
self.print_verbose("The use of a mass equal to zero to define a static object is deprecated.")
self.print_verbose("Do not give the mass or set mass=None to define a static object")
else:
obj.attrs['type'] = 'static'
obj.attrs['translation'] = translation
obj.attrs['orientation'] = ori
obj.attrs['velocity'] = velocity
obj.attrs['center_of_mass'] = center_of_mass
if inertia is not None:
obj.attrs['inertia'] = inertia
if allow_self_collide is not None:
obj.attrs['allow_self_collide'] = allow_self_collide
contactors = shapes
for num, ctor in enumerate(contactors):
if ctor.instance_name is not None:
# a specified name
instance_name = ctor.instance_name
else:
# the default name for contactor
instance_name = '{0}-{1}'.format(ctor.shape_name, num)
dat = data(obj, instance_name, 0,
use_compression=self._use_compression)
dat.attrs['instance_name'] = instance_name
dat.attrs['shape_name'] = ctor.shape_name
if hasattr(ctor, 'group'):
dat.attrs['group'] = ctor.group
if hasattr(ctor, 'parameters') and \
ctor.parameters is not None:
# we add np.void to manage writing string in hdf5 files see http://docs.h5py.org/en/latest/strings.html
dat.attrs['parameters'] = np.void(pickle.dumps(ctor.parameters))
if hasattr(ctor, 'contact_type') and \
ctor.contact_type is not None:
dat.attrs['type'] = ctor.contact_type
if hasattr(ctor, 'contact_index') and \
ctor.contact_index is not None:
dat.attrs['contact_index'] = ctor.contact_index
dat.attrs['translation'] = ctor.translation
dat.attrs['orientation'] = quaternion_get(ctor.orientation)
if mass is None or mass == 0:
obj.attrs['id'] = -(self._number_of_static_objects + 1)
self._number_of_static_objects += 1
else:
obj.attrs['id'] = (self._number_of_dynamic_objects + 1)
self._number_of_dynamic_objects += 1
return obj
def add_Newton_impact_rolling_friction_nsl(self, name, mu, mu_r, e=0,
collision_group1=0,
collision_group2=0):
"""
Add a nonsmooth law for contact between 2 groups.
Only NewtonImpactFrictionNSL are supported.
name is an user identifiant and must be unique,
mu is the coefficient of friction,
e is the coefficient of restitution on the contact normal,
gid1 and gid2 define the group identifiants.
"""
if name not in self._nslaws_data:
nslaw = self._nslaws_data.create_dataset(name, (0,))
nslaw.attrs['type'] = 'NewtonImpactRollingFrictionNSL'
nslaw.attrs['mu'] = mu
nslaw.attrs['mu_r'] = mu_r
nslaw.attrs['e'] = e
nslaw.attrs['gid1'] = collision_group1
nslaw.attrs['gid2'] = collision_group2
def add_Newton_impact_friction_nsl(self, name, mu, e=0, collision_group1=0,
collision_group2=0):
"""
Add a nonsmooth law for contact between 2 groups.
Only NewtonImpactFrictionNSL are supported.
name is an user identifiant and must be unique,
mu is the coefficient of friction,
e is the coefficient of restitution on the contact normal,
gid1 and gid2 define the group identifiants.
"""
if name not in self._nslaws_data:
nslaw = self._nslaws_data.create_dataset(name, (0,))
nslaw.attrs['type'] = 'NewtonImpactFrictionNSL'
nslaw.attrs['mu'] = mu
nslaw.attrs['e'] = e
nslaw.attrs['gid1'] = collision_group1
nslaw.attrs['gid2'] = collision_group2
# Note, default groups are -1 here, indicating not to add them to
# the nslaw lookup table for contacts, since 1D impacts are
# useless in this case. They are however useful for joint stops.
def add_Newton_impact_nsl(self, name, e=0, collision_group1=-1,
collision_group2=-1):
"""
Add a nonsmooth law for contact between 2 groups.
Only NewtonImpactNSL are supported.
name is a user identifier and must be unique,
e is the coefficient of restitution on the contact normal,
gid1 and gid2 define the group identifiers.
As opposed to add_Newton_impact_friction_nsl, the default groups are
-1, making the NSL unassociated with point contacts. It can
by used for joint stops however.
"""
if name not in self._nslaws_data:
nslaw = self._nslaws_data.create_dataset(name, (0,))
nslaw.attrs['type'] = 'NewtonImpactNSL'
nslaw.attrs['e'] = e
nslaw.attrs['gid1'] = collision_group1
nslaw.attrs['gid2'] = collision_group2
# Note, default groups are -1 here, indicating not to add them to
# the nslaw lookup table for contacts, since 1D impacts are
# useless in this case. They are however useful for joint friction.
def add_relay_nsl(self, name, lb, ub, size=1, collision_group1=-1,
collision_group2=-1):
"""
Add a nonsmooth law for contact between 2 groups.
Only NewtonImpactNSL are supported.
name is a user identifier and must be unique,
e is the coefficient of restitution on the contact normal,
gid1 and gid2 define the group identifiers.
As opposed to add_Newton_impact_friction_nsl, the default groups are
-1, making the NSL unassociated with point contacts. It can
by used for joint stops however.
"""
if name not in self._nslaws_data:
nslaw = self._nslaws_data.create_dataset(name, (0,))
nslaw.attrs['type'] = 'RelayNSL'
nslaw.attrs['size'] = size
nslaw.attrs['lb'] = lb
nslaw.attrs['ub'] = ub
nslaw.attrs['gid1'] = collision_group1
nslaw.attrs['gid2'] = collision_group2
def add_joint(self, name, object1, object2=None,
points=[[0, 0, 0]], axes=[[0, 1, 0]],
joint_class='PivotJointR', absolute=None,
allow_self_collide=None, nslaws=None, stops=None,
friction=None, coupled=None,references=None):
"""
add a joint between two objects
"""
if name in self.joints():
raise ValueError('Joint {} already in simulation!'.format(name))
else:
joint = self.joints().create_dataset(name, (0,))
joint.attrs['object1'] = object1
if object2 is not None:
joint.attrs['object2'] = object2
joint.attrs['type'] = joint_class
check_points_axes(name, joint_class, points, axes)
if points is not None:
joint.attrs['points'] = points
if axes is not None:
joint.attrs['axes'] = axes
if absolute in [True, False]:
joint.attrs['absolute'] = absolute
if allow_self_collide in [True, False]:
joint.attrs['allow_self_collide'] = allow_self_collide
if nslaws is not None:
# either name of one nslaw, or a list of names same length as stops
joint.attrs['nslaws'] = np.array(nslaws, dtype='S')
if stops is not None:
joint.attrs['stops'] = stops # must be a table of [[axis,pos,dir]..]
if friction is not None:
# must be an NSL name (e.g. RelayNSL), or list of same
joint.attrs['friction'] = np.array(friction, dtype='S')
if coupled is not None:
# must be a list of tuples of two integers (DoF
# indexes) and a float (ratio)
for c in coupled:
assert(len(c) == 3)
joint.attrs['coupled'] = np.array(coupled)
if references is not None:
# must be a list of two joint names and one DS name
assert(len(references) == 2 or len(references)==3)
joint.attrs['references'] = np.array(references, dtype='S')
def add_boundary_condition(self, name, object1, indices=None,
bc_class='HarmonicBC',
v=None, a=None, b=None, omega=None, phi=None):
"""
add boundarycondition to the object object1
implementation only works for HarmonicBC for the moment
"""
if name not in self.boundary_conditions():
boundary_condition = self.boundary_conditions().create_dataset(
name, (0,))
boundary_condition.attrs['object1'] = object1
boundary_condition.attrs['indices'] = indices
boundary_condition.attrs['type'] = bc_class
if bc_class == 'HarmonicBC':
boundary_condition.attrs['a'] = a
boundary_condition.attrs['b'] = b
boundary_condition.attrs['omega'] = omega
boundary_condition.attrs['phi'] = phi
elif bc_class == 'BoundaryCondition':
boundary_condition.attrs['v'] = v
elif bc_class == 'FixedBC':
pass # nothing to do
else:
raise NotImplementedError
|
|
# -*- coding: utf-8 -*-
"""Methods for calculating what happens to the ice_column as external forcing is applied. That is, how does
weather affect an ice column. The inner workings of the ice column is part of the IceColumn class fount in ice.py."""
import math
import copy
import numpy as np
from icemodelling import parameterization as dp, constants as const
from experimental import energybalance as deb
from icemodelling import ice as ice
from utilities import makelogs as ml
__author__ = 'raek'
def calculate_ice_cover_air_temp(inn_column_inn, date, temp, dh_sno, cloud_cover=None, time_step=60*60*24):
"""
:param inn_column_inn: [IceThickness] Initial ice column for modelling.
:param date: [] dates for plotting
:param temp:
:param dh_sno: [] new snow over the period (day)
:param cloud_cover:
:param time_step: [int] fixed time step of 24hrs given in seconds
:return:
"""
inn_column = copy.deepcopy(inn_column_inn)
inn_column.update_water_line()
inn_column.remove_metadata()
inn_column.remove_time()
ice_cover = []
if cloud_cover is None:
cloud_cover = [None] * len(date)
ice_cover.append(copy.deepcopy(inn_column))
for i in range(0, len(date), 1):
# if date is before the initial ice column, step forward
if date[i] < inn_column.date:
i += 1
# else calculate ice evolution
else:
# Cloudless sky gives a lower surface temperature
if cloud_cover[i] is not None:
temp_surf = dp.temperature_from_temperature_and_clouds(temp[i], cloud_cover[i])
else:
temp_surf = temp[i]
out_column = get_ice_thickness_from_surface_temp(inn_column, time_step, dh_sno[i], temp_surf)
inn_column = copy.deepcopy(out_column)
ice_cover.append(out_column)
return ice_cover
def calculate_ice_cover_eb(
utm33_x, utm33_y, date, temp_atm, prec, prec_snow, cloud_cover, wind, rel_hum, pressure_atm, inn_column=None):
"""
:param utm33_x:
:param utm33_y:
:param date:
:param temp_atm:
:param prec:
:param prec_snow:
:param cloud_cover:
:param wind:
:param inn_column:
:return:
"""
if inn_column is None:
inn_column = ice.IceColumn(date[0], [])
icecover = []
time_span_in_sec = 60*60*24 # fixed timestep of 24hrs given in seconds
inn_column.remove_metadata()
inn_column.remove_time()
icecover.append(copy.deepcopy(inn_column))
energy_balance = []
age_factor_tau = 0.
albedo_prim = const.alfa_black_ice
for i in range(0, len(date), 1):
print("{0}".format(date[i]))
if date[i] < inn_column.date:
i = i + 1
else:
out_column, eb = get_ice_thickness_from_energy_balance(
utm33_x=utm33_x, utm33_y=utm33_y, ice_column=inn_column, temp_atm=temp_atm[i],
prec=prec[i], prec_snow=prec_snow[i], time_span_in_sec=time_span_in_sec,
albedo_prim=albedo_prim, age_factor_tau=age_factor_tau, wind=wind[i], cloud_cover=cloud_cover[i],
rel_hum=rel_hum[i], pressure_atm=pressure_atm[i])
icecover.append(out_column)
energy_balance.append(eb)
inn_column = copy.deepcopy(out_column)
if eb.EB is None:
age_factor_tau = 0.
albedo_prim = const.alfa_black_ice
else:
age_factor_tau = eb.age_factor_tau
albedo_prim = eb.albedo_prim
return icecover, energy_balance
def get_ice_thickness_from_surface_temp(ic, time_step, dh_snow, temp, melt_energy=None):
"""Given surface temperature and new snow on an ice-column, ice evolution is estimated. In the simplest case
the surface temp is estimated from air temperature. More advances approaches calculates surface temperature
by solving er energy balance equation.
:param ic: Ice column at the beginning of the time step. Object containing the ice column with metadata
:param dh_snow: New snow in period of time step. Given as float in SI units [m]
:param temp: Average temperature in period of time step. Given i C as float.
:param time_step: In seconds. 60*60*24 = 86400 is 24hrs
:return: Ice column at end of time step
"""
dh_snow = float(dh_snow)
# step the date forward one time step. We do it initially because the variable is also used and subtracted in the following calculations.
ic.time_step_forward(time_step)
# Add new snow on top of the column if we have ice and snow
# and update the slush level/buoyancy given new snow
if len(ic.column) != 0:
if dh_snow != 0.:
ic.add_layer_at_index(0, ice.IceLayer(dh_snow, 'new_snow'))
ic.update_slush_level()
# if surface or air temperature is FREEZING
if temp < const.temp_f:
# If no ice, freeze water to ice
if len(ic.column) == 0:
# The heat flux equation gives how much water will freeze. U_total for the equation is estimated.
U_total = ice.add_layer_conductance_to_total(None, const.k_black_ice, 0, 10)
dh = - temp * U_total * time_step / const.rho_water / const.L_fusion
ic.add_layer_at_index(0, ice.IceLayer(dh, 'black_ice'))
pass
else:
# Declaration of total conductance of layers above freezing layer
U_total = None
i = 0
while time_step > 0 and i <= len(ic.column)-1:
# If the layer is a solid, it only adds to the total isolation. Unless it is the last and water is frozen to ice.
if (ic.column[i].get_enum()) > 9:
U_total = ice.add_layer_conductance_to_total(U_total, ic.column[i].conductivity, ic.column[i].height, ic.column[i].get_enum())
# If the layer is the last layer of solids and thus at the bottom, we get freezing at the bottom
if i == len(ic.column)-1:
# The heat flux equation gives how much water will freeze.
dh = - temp * U_total * time_step / const.rho_water / const.L_fusion
ic.add_layer_at_index(i+1, ice.IceLayer(dh, 'black_ice'))
time_step = 0
# Else the layer is a slush layer above or in the ice column and it will freeze fully or partially.
# Note, we do not freeze slush in the same time step it occurs.
elif not ic.in_slush_event:
# If the total conductance is None, we are dealing with the top layer and a surface/thin ice conductance mut be defined.
if U_total is None:
U_total = ice.add_layer_conductance_to_total(None, const.k_slush_ice, 0, 11)
# Only the water part in the slush freezes
dh = - temp * U_total * time_step / const.rho_water / const.L_fusion / (1 - const.part_ice_in_slush)
# If a layer totaly freezes during the tieme period, the rest of the time will be used to freeze a layer further down.
if ic.column[i].height < dh:
ic.column[i].set_type('slush_ice')
# The heat flux equation sorted for time
time_step_used = ic.column[i].height * const.rho_water * const.L_fusion * (1 - const.part_ice_in_slush) / -temp / U_total
time_step = time_step - time_step_used
# Layer height increases when water in the layer freezes
ic.column[i].height += ic.column[i].height * (1 - const.part_ice_in_slush) * ((const.rho_water - const.rho_slush_ice) / const.rho_slush_ice)
# Update conductance
U_total = ice.add_layer_conductance_to_total(U_total, ic.column[i].conductivity, ic.column[i].height, ic.column[i].get_enum())
# Else all energy is used to freeze the layer only partially
else:
# The thickness that remains slush
ic.column[i].height -= dh
# dh has frozen to slush ice. Layer height increases when water in the layer freezes.
dh += dh * (1 - const.part_ice_in_slush) * ((const.rho_water - const.rho_slush_ice) / const.rho_slush_ice)
ic.add_layer_at_index(i, ice.IceLayer(dh, 'slush_ice'))
# Nothing more to freeze
time_step = 0
# Slush event has happened and this is the first time step after the slush event. Do not create ice in the first time step.
else:
# ml.log_and_print("[info] icethickness.py -> get_ice_thickness_from_surface_temp: No freezing event in the current time step due to slush event.", log_it=False, print_it=True)
ic.in_slush_event = False
# If we don't set time step to 0, layers further down will freeze.
time_step = 0
# Go to next ice layer
i += 1
# if surface or air temperature is MELTING
else:
# In case surface temperatures are above 0C (when air temp is used to calculate ice evolution) there
# should not be submitted a energy term from the energy balance calculations (melt_energy = None).
if temp > 0.:
# all melting is made by simple degree day model using different calibration constants for snow,
# slush ice and black ice melting only effects the top layer (index = 0)
while time_step > 0 and len(ic.column) > 0:
if ic.column[0].type == 'water':
ic.remove_layer_at_index(0)
else:
if ic.column[0].get_enum() >= 20: # snow
meltingcoeff = const.meltingcoeff_snow
elif ic.column[0].type == 'slush_ice':
meltingcoeff = const.meltingcoeff_slush_ice
elif ic.column[0].type == 'slush':
meltingcoeff = const.meltingcoeff_slush
elif ic.column[0].type == 'black_ice':
meltingcoeff = const.meltingcoeff_black_ice
else:
ml.log_and_print("[info] icethickness.py -> get_ice_thickness_from_surface_temp: Melting on unknown layer type: {0}. Using slush_ice coeff.".format(ic.column[0].type))
meltingcoeff = const.meltingcoeff_slush_ice
# degree day melting. I have separated the time factor from the melting coefficiant.
dh = meltingcoeff * time_step * (temp - const.temp_f)
# if layer is thinner than total melting the layer is removed and the rest of melting occurs
# in the layer below for the reminder of time. melting (dh) and time are proportional in the degreeday equation
if ic.column[0].height < -dh:
time_step_used = ic.column[0].height / -dh * time_step
ic.remove_layer_at_index(0)
time_step = time_step - time_step_used
# the layer is only partly melted during this time_step
else:
ic.column[0].height = ic.column[0].height + dh
time_step = 0
# In case surface temp is calculated from energy balance, surface temp is never above 0C, but if we have
# melting and thus melt_energy is not None and temp == 0.
elif melt_energy is not None:
while time_step > 0 and len(ic.column) > 0:
if ic.column[0].type == 'water':
ic.remove_layer_at_index(0)
else:
# energy available to melt used with latent heat of fusion (delta_h = Q/L/rho)
L_ice = const.L_fusion/1000. # Joule to Kilo Joule
dh = melt_energy / L_ice / ic.column[0].density * time_step/24/60/60
# if layer is thinner than total melting the layer is removed and the rest of melting occurs
# in the layer below for the reminder of time. melting (dh) and time are proportional in the degreeday equation
if ic.column[0].height < -dh:
time_step_used = ic.column[0].height / -dh * time_step
ic.remove_layer_at_index(0)
time_step = time_step - time_step_used
# the layer is only partly melted during this time_step
else:
ic.column[0].height = ic.column[0].height + dh
time_step = 0
else:
ml.log_and_print("[info] icethickness.py -> get_ice_thickness_from_surface_temp: Need either energy or positive temperatures in model to melt snow and ice.")
ic.merge_and_remove_excess_layers()
ic.merge_snow_layers_and_compress(temp)
ic.update_draft_thickness()
ic.update_water_line()
ic.update_column_temperatures(temp)
ic.update_total_column_height()
ic.set_surface_temperature(temp)
return ic
def get_ice_thickness_from_energy_balance(
utm33_x, utm33_y, ice_column, temp_atm, prec, prec_snow, time_span_in_sec,
albedo_prim=None, age_factor_tau=None, cloud_cover=None, wind=None, rel_hum=None, pressure_atm=None):
"""
:param utm33_x:
:param utm33_y:
:param ice_column:
:param temp_atm:
:param prec:
:param prec_snow:
:param albedo_prim:
:param time_span_in_sec:
:param age_factor_tau:
:param cloud_cover:
:param wind:
:param pressure_atm:
:return:
"""
out_column = None
energy_balance = None
# No ice?, dont do EB and use air temp as surface temp
if len(ice_column.column) == 0:
energy_balance = deb.EnergyBalanceElement(ice_column.date)
energy_balance.add_no_energy_balance(is_ice_inn=False)
out_column = get_ice_thickness_from_surface_temp(ice_column, time_span_in_sec, prec_snow, temp_atm)
else:
energy_balance = deb.temp_surface_from_eb(
utm33_x, utm33_y, ice_column, temp_atm, prec, prec_snow, albedo_prim, time_span_in_sec,
age_factor_tau=age_factor_tau,
cloud_cover=cloud_cover, wind=wind, rel_hum=rel_hum, pressure_atm=pressure_atm)
surface_temp = energy_balance.temp_surface
out_column = None
if surface_temp < 0.:
out_column = get_ice_thickness_from_surface_temp(
ice_column, time_span_in_sec, prec_snow, surface_temp)
elif surface_temp == 0.:
melt_energy = energy_balance.SM
out_column = get_ice_thickness_from_surface_temp(
ice_column, time_span_in_sec, prec_snow, surface_temp, melt_energy=melt_energy)
else:
print("doicethicckness --> get_ice_thickness_from_energy_balance: Surface temp cant be above 0C in the method get_ice_thickness_from_energy_balance")
return out_column, energy_balance
|
|
#!/usr/bin/python
import time
import sys
import logging
import signal
import getopt
from simulation import *
from statistics import *
from system import *
def usage(arg):
print arg, ": -h [--help] -l [--log] -m <mission_time> [--mission_time <mission_time>]"
print "-i <num_iterations> [--iterations <num_iterations>] -r <raid_type> [--raid <raid_type>]"
print "-n <num_raids> [--raid_num <num_raids>] -c <capacity_factor> [--capacity <capacity_factor>]"
print "-F <disk_fail_dist> [--disk_fail_dist <disk_fail_dist>]"
print "-R <disk_repair_dist> [--disk_repair_dist <disk_repair_dist>]"
print "-L <disk_lse_dist> [--disk_lse_dist <disk_lse_dist>]"
print "-S <disk_scrubbing_dist> [--disk_scrubbing_dist <disk_scrubbing_dist>]"
print "-a <required_re> [--accuracy <required_re>]"
# file system model;
print "-t <trace> [--trace <trace_file>]"
print "-f [--filelevel]"
print "-d [--dedup]"
print "-w [--weighted]"
print ""
print "Detail:"
print "mission_time = simulation end time in hours, default is 87600"
print ""
print "num_iterations = number of simulation runs, default is 10000"
print ""
print "raid_type = the raid configuration , 14_2_mds by default"
print ""
print "num_raids = number of raids in the system, defaut is 1"
print ""
print "capacity_factor = the disk capacity factor, defaut is 1 (2*1024*1024*1024 sectors (1TB)),"
print ""
print "disk_fail_dist = \"(shape = 1.2, scale = 461386 by default)\" OR"
print " \"(scale)\" OR"
print " \"(shape, scale)\" OR"
print " \"(shape, scale, location)\""
print "disk_repair_dist = \"(shape = 2.0, scale = 12, location = 6 by default)\" OR"
print " \"(scale)\" OR"
print " \"(shape, scale)\" OR"
print " \"(shape, scale, location)\""
print "disk_scrubbing_dist = \"(shape = 3.0, scale = 168, location = 6 by default)\" OR"
print " \"(scale)\" OR"
print " \"(shape, scale)\" OR"
print " \"(shape, scale, location)\""
print " shape = shape parameter of a Weibull (1 for Exponential)"
print " scale = scale parameter of a Weibull"
print " location = location parameter of a Weibull"
print "disk_lse_dist = \"(rate = 1.08/10000 by default)\""
print ""
print "required_re = the required relative error, disable by default"
print ""
print "Samples:"
print arg, "-i 10000 -r \"mds_5_1\" -a 0.05"
sys.exit(2)
def get_parms():
logging.basicConfig(level = getattr(logging, "WARNING"))
# 87600 hours, for 10 years
mission_time = 87600
# more iterations, more accurate estimate
iterations = 10000L
# the data/parity configuration
# such as mds_7_1
raid_type = "mds_14_2"
# the number of raid
raid_num = 1
# the number of sectors in each disk
# 512 bytes for each sector
# So the default is 1TB
disk_capacity = 2*1024*1024*1024L
capacity_factor = 1.0
parms = "Elerath2014A"
disk_fail_parms = None
disk_repair_parms = None
disk_lse_parms = None
disk_scrubbing_parms = None
# This indicates the simulation will not end until reach a required relative error
force_re = False
required_re = 0.05
# file system trace
fs_trace = None
filelevel = False
dedup = False
weighted = False
# output all data loss events
output_events = None
try:
(opts, args) = getopt.getopt(sys.argv[1:], "hl:m:i:r:n:c:p:F:R:L:S:a:t:fdwo:", ["help", "log", "mission_time",
"iterations",
"raid", "raid_num",
"capacity",
"parameters",
"disk_fail_dist",
"disk_repair_dist",
"disk_lse_dist",
"disk_scrubbing_dist",
"accuracy",
"trace",
"filelevel"
"dedup",
"weighted",
"output",
])
except:
usage(sys.argv[0])
print "getopts excepted"
sys.exit(1)
for o, a in opts:
if o in ("-h", "--help"):
print usage(sys.argv[0])
sys.exit(0)
if o in ("-l", "--log"):
logger = logging.getLogger("sim")
logger.setLevel(getattr(logging, a.upper()))
if o in ("-F", "--disk_fail_dist"):
if len(eval(a)) == 1:
disk_fail_parms = (1, eval(a), 0)
elif len(eval(a)) == 2:
(shape, scale) = eval(a)
disk_fail_parms = (shape, scale, 0)
elif len(eval(a)) == 3:
(shape, scale, location) = eval(a)
disk_fail_parms = (shape, scale, location)
else:
bad_opt = o + " : " + a
break
elif o in ("-R", "--disk_repair_dist"):
if len(eval(a)) == 1:
disk_repair_parms = (1, eval(a), 0)
elif len(eval(a)) == 2:
(shape, scale) = eval(a)
disk_repair_parms = (shape, scale, 0)
elif len(eval(a)) == 3:
(shape, scale, location) = eval(a)
disk_repair_parms = (shape, scale, location)
else:
bad_opt = o + " : " + a
break
elif o in ("-L", "--disk_lse_dist"):
if len(eval(a)) == 1: # the lse rate
disk_lse_parms = eval(a)
else:
bad_opt = o + " : num args must be 1"
break
elif o in ("-m", "--mission_time"):
mission_time = float(a)
elif o in ("-i", "--iterations"):
iterations = long(a)
elif o in ("-r", "--raid"):
raid_type = a
elif o in ("-n", "--raid_num"):
raid_num = int(a)
elif o in ("-c", "--capacity"):
capacity_factor = float(a)
elif o in ("-p", "--parameters"):
parms = a
elif o in ("-a", "--accuracy"):
force_re = True
required_re = float(a)
elif o in ("-t", "--trace"):
fs_trace = a
elif o in ("-f", "--filelevel"):
filelevel = True
elif o in ("-d", "--dedup"):
dedup = True
elif o in ("-w", "--weighted"):
weighted = True
elif o in ("-o", "--output"):
output_events = a
# TO-DO: We should verify these numbers
# We assume larger disks will have longer repair and scrubbing time
disk_capacity *= capacity_factor
# The following parameters may change with disk capacity
# For failure, restore, and scrubbing, the parameters are (shape, scale, location)
if disk_fail_parms != None and disk_repair_parms != None and disk_lse_parms != None and disk_scrubbing_dist != None:
parms = None
if parms == "Elerath2009":
# data from [Elerath2009]
disk_fail_parms = (1.2, 461386.0, 0)
disk_repair_parms = (2.0, 12.0 * capacity_factor, 6.0 * capacity_factor)
disk_lse_parms = (1.08/10000)
disk_scrubbing_parms = (3, 168 * capacity_factor, 6 * capacity_factor)
elif parms == "Elerath2014A":
#data from [Elerath2014], SATA Disk A
disk_fail_parms = (1.13, 302016.0, 0)
disk_repair_parms = (1.65, 22.7 * capacity_factor, 0)
disk_lse_parms = (1.0/12325)
disk_scrubbing_parms = (1, 186 * capacity_factor, 0)
elif parms == "Elerath2014B":
#data from [Elerath2014], SATA Disk B
disk_fail_parms = (0.576, 4833522.0, 0)
disk_repair_parms = (1.15, 20.25 * capacity_factor, 0)
disk_lse_parms = (1.0/42857)
disk_scrubbing_parms = (0.97, 160 * capacity_factor, 0)
else:
if parms != None:
usage(sys.argv[0])
print "Invaid parms"
exit(2)
return (mission_time, iterations, raid_type, raid_num, disk_capacity,
disk_fail_parms, disk_repair_parms, disk_lse_parms, disk_scrubbing_parms, force_re, required_re,
fs_trace, filelevel, dedup, weighted, output_events)
def print_result(model, raid_failure_samples, lse_samples, systems_with_data_loss,
systems_with_raid_failures, systems_with_lse, iterations, raid_type, raid_num, disk_capacity, df):
(type, d, p) = raid_type.split("_");
data_fragments = int(d)
total_capacity = data_fragments * disk_capacity * raid_num * 512/1024/1024/1024/1024 * df
localtime = time.asctime(time.localtime(time.time()))
print "**************************************"
print "System (%s): %.2fTB data, D/F = %.4f, %d of %s RAID, %ld iterations" % (localtime, total_capacity, df, raid_num, raid_type, iterations)
print "Filelevel =", model.filelevel, ", Dedup =", model.dedup, ", Weighted =", model.weighted
print "Summary: %d of systems with data loss events (%d by raid failures, %d by lse)" % (systems_with_data_loss, systems_with_raid_failures, systems_with_lse)
prob_result = (raid_failure_samples.prob_mean, 100*raid_failure_samples.prob_re, raid_failure_samples.prob_mean - raid_failure_samples.prob_ci,
raid_failure_samples.prob_mean + raid_failure_samples.prob_ci, raid_failure_samples.prob_dev)
value_result = (raid_failure_samples.value_mean, 100*raid_failure_samples.value_re, raid_failure_samples.value_mean - raid_failure_samples.value_ci,
raid_failure_samples.value_mean + raid_failure_samples.value_ci, raid_failure_samples.value_dev)
print "******** RAID Failure Part ***********"
print "Probability of RAID Failures: %e +/- %f Percent , CI (%e,%e), StdDev: %e" % prob_result
if model.filelevel == False:
print "Fraction of Blocks/Chunks Lost in the Failed Disk: %e +/- %f Percent, CI (%e,%e), StdDev: %e" % value_result
elif model.weighted == False:
print "Fraction of Files Lost: %e +/- %f Percent, CI (%e,%e), StdDev: %e" % value_result
else:
print "Fraction of Files Lost Weighted by Bytes: %e +/- %f Percent, CI (%e,%e), StdDev: %e" % value_result
prob_result = (lse_samples.prob_mean, 100*lse_samples.prob_re, lse_samples.prob_mean - lse_samples.prob_ci,
lse_samples.prob_mean + lse_samples.prob_ci, lse_samples.prob_dev)
value_result = (lse_samples.value_mean, 100*lse_samples.value_re, lse_samples.value_mean - lse_samples.value_ci,
lse_samples.value_mean + lse_samples.value_ci, lse_samples.value_dev)
print "************* LSE Part ***************"
print "Probability of LSEs: %e +/- %f Percent , CI (%e,%e), StdDev: %e" % prob_result
NOMDL = value_result[0]/total_capacity
if model.filelevel == False:
if model.weighted == False:
print "# of Blocks/Chunks Lost: %e +/- %f Percent, CI (%f,%f), StdDev: %e" % value_result
print "NOMDL (Normalized Magnitude of Data Loss): %e chunks per TB" % NOMDL
else:
print "Bytes of Blocks/Chunks Lost: %e +/- %f Percent, CI (%f,%f), StdDev: %e" % value_result
print "NOMDL (Normalized Magnitude of Data Loss): %e bytes per TB" % NOMDL
else:
if model.weighted == False:
print "# of Corrupted Files: %e +/- %f Percent, CI (%f,%f), StdDev: %e" % value_result
print "NOMDL (Normalized Magnitude of Data Loss): %e files per TB" % NOMDL
else:
print "Size of Corrupted Files: %e +/- %f Percent, CI (%f,%f), StdDev: %e" % value_result
print "NOMDL (Normalized Magnitude of Data Loss): %e bytes per TB" % NOMDL
print "**************************************"
def do_it():
parms = get_parms()
simulation = Simulation(*parms)
(model, raid_failure_samples, lse_samples, systems_with_data_loss,
systems_with_raid_failures, systems_with_lse, iterations, df) = simulation.simulate()
raid_type = parms[2]
raid_num = parms[3]
disk_capacity = parms[4]
print_result(model, raid_failure_samples, lse_samples, systems_with_data_loss,
systems_with_raid_failures, systems_with_lse, iterations, raid_type, raid_num, disk_capacity, df)
def sig_quit(sig, frame):
# backtrace to get the simulation object
object = frame.f_locals.get("self", None)
while not isinstance(object, Simulation):
frame = frame.f_back
object = frame.f_locals.get("self", None)
print >>sys.stderr, "\nThe simulation is interrupted!"
object.raid_failure_samples.calcResults("0.95")
object.lse_samples.calcResults("0.95")
iterations = object.iterations - object.more_iterations + object.cur_i
print_result(object.system.dedup_model, object.raid_failure_samples, object.lse_samples, object.systems_with_data_loss,
object.systems_with_raid_failures, object.systems_with_lse,
iterations, object.raid_type, object.raid_num, object.disk_capacity, object.system.get_df())
if object.output is not None:
print >>object.output, "I=%d" % iterations
object.output.close()
sys.exit(1)
if __name__ == "__main__":
simulation = None
signal.signal(signal.SIGINT, sig_quit)
do_it()
|
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from platform import platform
# import requests
import urllib
from google.appengine.api import urlfetch
from .access_methods import AccessMethodsMixin
from .compat import json, string_type
from .decorators import objectify
from .exceptions import (
IllegalHttpMethod,
InvalidWebhook
)
from . import __version__
from .utils import (
format_path,
construct_namespaced_dict,
get_webhook_from_request,
EVENTBRITE_API_URL
)
class requests():
@classmethod
def _request(path, headers=None, params=None, method="get"):
url = path
resp = urlfetch(url, headers=headers)
return RequestsShim(resp, url)
@classmethod
def get(path, headers=None, params=None):
return self._request(path,
headers=headers, params=params)
@classmethod
def post(path, headers=None, params=None):
return self._request(path,
headers=headers, params=params, method="POST")
@classmethod
def delete(path, headers=None, params=None):
return self._request(path,
headers=headers, params=params, method="DELETE")
class RequestsShim():
def __init__(self, resp, url):
self.status_code = resp.status_code
self.text = resp.content
self.headers = resp.headers
self.url = url
self.request = None
self.reason = "ok"
self.ok = True
self.elapsed = 120
def json(self):
return json.loads(self.text)
class Eventbrite(AccessMethodsMixin):
allowed_methods = ['post', 'get', 'delete']
content_type_specified = True
def __init__(self, oauth_token, eventbrite_api_url=EVENTBRITE_API_URL):
self.oauth_token = oauth_token
self.eventbrite_api_url = eventbrite_api_url
@property
def headers(self):
headers = {
"content-type": "application/json",
"Authorization": "Bearer {0}".format(self.oauth_token),
"User-Agent": "eventbrite-python-sdk {version} ({system})".format(
version=__version__,
system="",
)
}
return headers
def api(self, method, path, data, expand=()):
method = method.strip().lower()
if method not in self.allowed_methods:
msg = "The '{0}' method is not accepted by the Eventbrite " \
"client.".format(method)
raise IllegalHttpMethod(msg)
method = getattr(self, method)
return method(path, data)
@objectify
def get(self, path, data=None, expand=()):
# Resolves the search result response problem
headers = self.headers
if 'content-type' in headers:
headers.pop('content-type')
# Get the function path
path = format_path(path, self.eventbrite_api_url)
if data is None:
data = {}
# Manage expansions
if data.get('expand'):
# Do nothing because expand is already passed in
pass
elif expand:
# Manage expansions
data['expand'] = ','.join(expand)
else:
# Anything else is None
data['expand'] = 'none'
# return requests.get(path, headers=headers, params=data or {})
url = path
if data:
url = url + "?" + urllib.urlencode(data)
resp = urlfetch.Fetch(url, headers=headers, method="GET", deadline=60)
return RequestsShim(resp, url)
@objectify
def post(self, path, data=None):
path = format_path(path, self.eventbrite_api_url)
json_data = json.dumps(data or {})
# return requests.post(path, headers=self.headers, data=json_data)
url = path
resp = urlfetch.Fetch(url, headers=self.headers, method="POST", payload=json_data, deadline=60)
return RequestsShim(resp, url)
@objectify
def delete(self, path, data=None):
path = format_path(path, self.eventbrite_api_url)
# return requests.delete(path, headers=self.headers, data=data or {})
url = path
if params:
url = url + "?" + urllib.urlencode(data)
resp = urlfetch.Fetch(url, headers=headers, method="DELETE", deadline=60)
return RequestsShim(resp, url)
############################
#
# Access methods
#
############################
def get_user(self, user_id=None):
"""
Returns a user for the specified user as user.
GET users/:id/
:param int user_id: (optional) The id assigned to a user
"""
if user_id:
return self.get('/users/{0}/'.format(user_id))
return self.get('/users/me/')
def get_user_orders(self, user_id=None, changed_since=None):
"""
Returns a paginated response of orders, under the key orders, of all
orders the user has placed (i.e. where the user was the person buying
the tickets).
GET users/:id/orders/
:param int user_id: (optional) The id assigned to a user. Leave empty
to get current user.
:param datetime changed_since: (optional) Only return attendees changed
on or after the time given
.. note:: A datetime represented as a string in ISO8601 combined date
and time format, always in UTC.
"""
if user_id:
url = '/users/{0}/orders/'.format(user_id)
else:
url = '/users/me/orders/'
data = {}
if changed_since:
data['changed_since'] = changed_since
return self.get(url, data=data)
def get_event_attendees(self, event_id, status=None, changed_since=None):
"""
Returns a paginated response with a key of attendees, containing a
list of attendee.
GET /events/:id/attendees/
"""
data = {}
if status: # TODO - check the types of valid status
data['status'] = status
if changed_since:
data['changed_since'] = changed_since
return self.get("/events/{0}/attendees/".format(event_id), data=data)
def get_event_attendee_by_id(self, event_id, attendee_id):
"""
GET /events/:id/attendees/:id/
"""
return self.get("/events/{0}/attendees/{1}/".format(event_id, attendee_id))
def get_event_ticket_classes(self, event_id):
"""
Returns a paginated response with a key of ticket_classes, containing
a list of ticket_class.
GET /events/:id/ticket_classes/
"""
return self.get("/events/{0}/ticket_classes/".format(event_id))
def get_event_ticket_class_by_id(self, event_id, ticket_class_id):
"""
GET /events/:id/ticket_classes/:id/
"""
return self.get("/events/{0}/ticket_classes/{1}/".format(event_id, ticket_class_id))
def get_event_discounts(self, event_id):
"""
Returns a paginated response with a key of discounts, containing a list of discount.
GET /events/:id/discounts/
"""
return self.get("/events/{0}/discounts/".format(event_id))
def post_event_discount(
self, event_id,
discount_code,
discount_amount_off=None,
discount_percent_off=None,
discount_ticket_ids=None,
discount_quantity_available=None,
discount_start_date=None,
discount_end_date=None):
"""
POST /events/:id/discounts/
discount_code string required Code to activate discount
discount_amount_off unknown optional Fixed reduction amount
discount_percent_off string optional Percentage reduction
discount_ticket_ids unknown optional IDs of tickets to limit discount to
discount_quantity_available integer optional Number of discount uses
discount_start_date datetime optional Allow use from this date
discount_end_date datetime optional Allow use until this date
TODO: Consider deprecating this method
"""
data = construct_namespaced_dict("discount", locals())
return self.post("/events/{0}/discounts/".format(event_id), data=data)
def get_event_discount_by_id(self, event_id, discount_id):
"""
GET /events/:id/discounts/:id/
"""
return self.get("/events/{0}/discounts/{1}/".format(
event_id, discount_id))
def post_event(self, data):
return self.post("/events/", data=data)
def publish_event(self, event_id):
return self.post("/events/%s/publish/" % event_id)
def unpublish_event(self, event_id):
return self.post("/events/%s/unpublish/" % event_id)
def post_event_ticket_class(self, event_id, data):
return self.post("/events/{0}/ticket_classes/".format(event_id), data=data)
def event_search(self, **data):
# Resolves the search result response problem
return self.get("/events/search/", data=data)
def webhook_to_object(self, webhook):
"""
Converts JSON sent by an Eventbrite Webhook to the appropriate
Eventbrite object.
# TODO - Add capability to handle Django request objects
"""
if isinstance(webhook, string_type):
# If still JSON, convert to a Python dict
webhook = json.dumps(webhook)
# if a flask.Request object, try to convert that to a webhook
if not isinstance(webhook, dict):
webhook = get_webhook_from_request(webhook)
try:
webhook['api_url']
except KeyError:
raise InvalidWebhook
payload = self.get(webhook['api_url'])
return payload
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Command-line interface to inspect and execute a graph in a SavedModel.
For detailed usages and examples, please refer to:
https://www.tensorflow.org/guide/saved_model#cli_to_inspect_and_execute_savedmodel
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import re
import sys
import warnings
import numpy as np
from six import integer_types
from tensorflow.contrib.saved_model.python.saved_model import reader
from tensorflow.core.example import example_pb2
from tensorflow.core.framework import types_pb2
from tensorflow.python.client import session
from tensorflow.python.debug.wrappers import local_cli_wrapper
from tensorflow.python.framework import meta_graph as meta_graph_lib
from tensorflow.python.framework import ops as ops_lib
from tensorflow.python.lib.io import file_io
from tensorflow.python.platform import app # pylint: disable=unused-import
from tensorflow.python.saved_model import loader
from tensorflow.python.tools import saved_model_utils
# Set of ops to blacklist.
_OP_BLACKLIST = set(['WriteFile', 'ReadFile'])
def _show_tag_sets(saved_model_dir):
"""Prints the tag-sets stored in SavedModel directory.
Prints all the tag-sets for MetaGraphs stored in SavedModel directory.
Args:
saved_model_dir: Directory containing the SavedModel to inspect.
"""
tag_sets = reader.get_saved_model_tag_sets(saved_model_dir)
print('The given SavedModel contains the following tag-sets:')
for tag_set in sorted(tag_sets):
print(', '.join(sorted(tag_set)))
def _show_signature_def_map_keys(saved_model_dir, tag_set):
"""Prints the keys for each SignatureDef in the SignatureDef map.
Prints the list of SignatureDef keys from the SignatureDef map specified by
the given tag-set and SavedModel directory.
Args:
saved_model_dir: Directory containing the SavedModel to inspect.
tag_set: Group of tag(s) of the MetaGraphDef to get SignatureDef map from,
in string format, separated by ','. For tag-set contains multiple tags,
all tags must be passed in.
"""
signature_def_map = get_signature_def_map(saved_model_dir, tag_set)
print('The given SavedModel MetaGraphDef contains SignatureDefs with the '
'following keys:')
for signature_def_key in sorted(signature_def_map.keys()):
print('SignatureDef key: \"%s\"' % signature_def_key)
def _get_inputs_tensor_info_from_meta_graph_def(meta_graph_def,
signature_def_key):
"""Gets TensorInfo for all inputs of the SignatureDef.
Returns a dictionary that maps each input key to its TensorInfo for the given
signature_def_key in the meta_graph_def
Args:
meta_graph_def: MetaGraphDef protocol buffer with the SignatureDef map to
look up SignatureDef key.
signature_def_key: A SignatureDef key string.
Returns:
A dictionary that maps input tensor keys to TensorInfos.
"""
return meta_graph_def.signature_def[signature_def_key].inputs
def _get_outputs_tensor_info_from_meta_graph_def(meta_graph_def,
signature_def_key):
"""Gets TensorInfos for all outputs of the SignatureDef.
Returns a dictionary that maps each output key to its TensorInfo for the given
signature_def_key in the meta_graph_def.
Args:
meta_graph_def: MetaGraphDef protocol buffer with the SignatureDefmap to
look up signature_def_key.
signature_def_key: A SignatureDef key string.
Returns:
A dictionary that maps output tensor keys to TensorInfos.
"""
return meta_graph_def.signature_def[signature_def_key].outputs
def _show_inputs_outputs(saved_model_dir, tag_set, signature_def_key, indent=0):
"""Prints input and output TensorInfos.
Prints the details of input and output TensorInfos for the SignatureDef mapped
by the given signature_def_key.
Args:
saved_model_dir: Directory containing the SavedModel to inspect.
tag_set: Group of tag(s) of the MetaGraphDef, in string format, separated by
','. For tag-set contains multiple tags, all tags must be passed in.
signature_def_key: A SignatureDef key string.
indent: How far (in increments of 2 spaces) to indent each line of output.
"""
meta_graph_def = saved_model_utils.get_meta_graph_def(saved_model_dir,
tag_set)
inputs_tensor_info = _get_inputs_tensor_info_from_meta_graph_def(
meta_graph_def, signature_def_key)
outputs_tensor_info = _get_outputs_tensor_info_from_meta_graph_def(
meta_graph_def, signature_def_key)
indent_str = ' ' * indent
def in_print(s):
print(indent_str + s)
in_print('The given SavedModel SignatureDef contains the following input(s):')
for input_key, input_tensor in sorted(inputs_tensor_info.items()):
in_print(' inputs[\'%s\'] tensor_info:' % input_key)
_print_tensor_info(input_tensor, indent+1)
in_print('The given SavedModel SignatureDef contains the following '
'output(s):')
for output_key, output_tensor in sorted(outputs_tensor_info.items()):
in_print(' outputs[\'%s\'] tensor_info:' % output_key)
_print_tensor_info(output_tensor, indent+1)
in_print('Method name is: %s' %
meta_graph_def.signature_def[signature_def_key].method_name)
def _print_tensor_info(tensor_info, indent=0):
"""Prints details of the given tensor_info.
Args:
tensor_info: TensorInfo object to be printed.
indent: How far (in increments of 2 spaces) to indent each line output
"""
indent_str = ' ' * indent
def in_print(s):
print(indent_str + s)
in_print(' dtype: ' +
{value: key
for (key, value) in types_pb2.DataType.items()}[tensor_info.dtype])
# Display shape as tuple.
if tensor_info.tensor_shape.unknown_rank:
shape = 'unknown_rank'
else:
dims = [str(dim.size) for dim in tensor_info.tensor_shape.dim]
shape = ', '.join(dims)
shape = '(' + shape + ')'
in_print(' shape: ' + shape)
in_print(' name: ' + tensor_info.name)
def _show_all(saved_model_dir):
"""Prints tag-set, SignatureDef and Inputs/Outputs information in SavedModel.
Prints all tag-set, SignatureDef and Inputs/Outputs information stored in
SavedModel directory.
Args:
saved_model_dir: Directory containing the SavedModel to inspect.
"""
tag_sets = reader.get_saved_model_tag_sets(saved_model_dir)
for tag_set in sorted(tag_sets):
print("\nMetaGraphDef with tag-set: '%s' "
"contains the following SignatureDefs:" % ', '.join(tag_set))
tag_set = ','.join(tag_set)
signature_def_map = get_signature_def_map(saved_model_dir, tag_set)
for signature_def_key in sorted(signature_def_map.keys()):
print('\nsignature_def[\'' + signature_def_key + '\']:')
_show_inputs_outputs(saved_model_dir, tag_set, signature_def_key,
indent=1)
def get_meta_graph_def(saved_model_dir, tag_set):
"""DEPRECATED: Use saved_model_utils.get_meta_graph_def instead.
Gets MetaGraphDef from SavedModel. Returns the MetaGraphDef for the given
tag-set and SavedModel directory.
Args:
saved_model_dir: Directory containing the SavedModel to inspect or execute.
tag_set: Group of tag(s) of the MetaGraphDef to load, in string format,
separated by ','. For tag-set contains multiple tags, all tags must be
passed in.
Raises:
RuntimeError: An error when the given tag-set does not exist in the
SavedModel.
Returns:
A MetaGraphDef corresponding to the tag-set.
"""
return saved_model_utils.get_meta_graph_def(saved_model_dir, tag_set)
def get_signature_def_map(saved_model_dir, tag_set):
"""Gets SignatureDef map from a MetaGraphDef in a SavedModel.
Returns the SignatureDef map for the given tag-set in the SavedModel
directory.
Args:
saved_model_dir: Directory containing the SavedModel to inspect or execute.
tag_set: Group of tag(s) of the MetaGraphDef with the SignatureDef map, in
string format, separated by ','. For tag-set contains multiple tags, all
tags must be passed in.
Returns:
A SignatureDef map that maps from string keys to SignatureDefs.
"""
meta_graph = saved_model_utils.get_meta_graph_def(saved_model_dir, tag_set)
return meta_graph.signature_def
def scan_meta_graph_def(meta_graph_def):
"""Scans meta_graph_def and reports if there are ops on blacklist.
Print ops if they are on black list, or print success if no blacklisted ops
found.
Args:
meta_graph_def: MetaGraphDef protocol buffer.
"""
all_ops_set = set(
meta_graph_lib.ops_used_by_graph_def(meta_graph_def.graph_def))
blacklisted_ops = _OP_BLACKLIST & all_ops_set
if blacklisted_ops:
# TODO(yifeif): print more warnings
print('MetaGraph with tag set %s contains the following blacklisted ops:' %
meta_graph_def.meta_info_def.tags, blacklisted_ops)
else:
print('MetaGraph with tag set %s does not contain blacklisted ops.' %
meta_graph_def.meta_info_def.tags)
def run_saved_model_with_feed_dict(saved_model_dir, tag_set, signature_def_key,
input_tensor_key_feed_dict, outdir,
overwrite_flag, worker=None, tf_debug=False):
"""Runs SavedModel and fetch all outputs.
Runs the input dictionary through the MetaGraphDef within a SavedModel
specified by the given tag_set and SignatureDef. Also save the outputs to file
if outdir is not None.
Args:
saved_model_dir: Directory containing the SavedModel to execute.
tag_set: Group of tag(s) of the MetaGraphDef with the SignatureDef map, in
string format, separated by ','. For tag-set contains multiple tags, all
tags must be passed in.
signature_def_key: A SignatureDef key string.
input_tensor_key_feed_dict: A dictionary maps input keys to numpy ndarrays.
outdir: A directory to save the outputs to. If the directory doesn't exist,
it will be created.
overwrite_flag: A boolean flag to allow overwrite output file if file with
the same name exists.
worker: If provided, the session will be run on the worker. Valid worker
specification is a bns or gRPC path.
tf_debug: A boolean flag to use TensorFlow Debugger (TFDBG) to observe the
intermediate Tensor values and runtime GraphDefs while running the
SavedModel.
Raises:
ValueError: When any of the input tensor keys is not valid.
RuntimeError: An error when output file already exists and overwrite is not
enabled.
"""
# Get a list of output tensor names.
meta_graph_def = saved_model_utils.get_meta_graph_def(saved_model_dir,
tag_set)
# Re-create feed_dict based on input tensor name instead of key as session.run
# uses tensor name.
inputs_tensor_info = _get_inputs_tensor_info_from_meta_graph_def(
meta_graph_def, signature_def_key)
# Check if input tensor keys are valid.
for input_key_name in input_tensor_key_feed_dict.keys():
if input_key_name not in inputs_tensor_info:
raise ValueError(
'"%s" is not a valid input key. Please choose from %s, or use '
'--show option.' %
(input_key_name, '"' + '", "'.join(inputs_tensor_info.keys()) + '"'))
inputs_feed_dict = {
inputs_tensor_info[key].name: tensor
for key, tensor in input_tensor_key_feed_dict.items()
}
# Get outputs
outputs_tensor_info = _get_outputs_tensor_info_from_meta_graph_def(
meta_graph_def, signature_def_key)
# Sort to preserve order because we need to go from value to key later.
output_tensor_keys_sorted = sorted(outputs_tensor_info.keys())
output_tensor_names_sorted = [
outputs_tensor_info[tensor_key].name
for tensor_key in output_tensor_keys_sorted
]
with session.Session(worker, graph=ops_lib.Graph()) as sess:
loader.load(sess, tag_set.split(','), saved_model_dir)
if tf_debug:
sess = local_cli_wrapper.LocalCLIDebugWrapperSession(sess)
outputs = sess.run(output_tensor_names_sorted, feed_dict=inputs_feed_dict)
for i, output in enumerate(outputs):
output_tensor_key = output_tensor_keys_sorted[i]
print('Result for output key %s:\n%s' % (output_tensor_key, output))
# Only save if outdir is specified.
if outdir:
# Create directory if outdir does not exist
if not os.path.isdir(outdir):
os.makedirs(outdir)
output_full_path = os.path.join(outdir, output_tensor_key + '.npy')
# If overwrite not enabled and file already exist, error out
if not overwrite_flag and os.path.exists(output_full_path):
raise RuntimeError(
'Output file %s already exists. Add \"--overwrite\" to overwrite'
' the existing output files.' % output_full_path)
np.save(output_full_path, output)
print('Output %s is saved to %s' % (output_tensor_key,
output_full_path))
def preprocess_inputs_arg_string(inputs_str):
"""Parses input arg into dictionary that maps input to file/variable tuple.
Parses input string in the format of, for example,
"input1=filename1[variable_name1],input2=filename2" into a
dictionary looks like
{'input_key1': (filename1, variable_name1),
'input_key2': (file2, None)}
, which maps input keys to a tuple of file name and variable name(None if
empty).
Args:
inputs_str: A string that specified where to load inputs. Inputs are
separated by semicolons.
* For each input key:
'<input_key>=<filename>' or
'<input_key>=<filename>[<variable_name>]'
* The optional 'variable_name' key will be set to None if not specified.
Returns:
A dictionary that maps input keys to a tuple of file name and variable name.
Raises:
RuntimeError: An error when the given input string is in a bad format.
"""
input_dict = {}
inputs_raw = inputs_str.split(';')
for input_raw in filter(bool, inputs_raw): # skip empty strings
# Format of input=filename[variable_name]'
match = re.match(r'([^=]+)=([^\[\]]+)\[([^\[\]]+)\]$', input_raw)
if match:
input_dict[match.group(1)] = match.group(2), match.group(3)
else:
# Format of input=filename'
match = re.match(r'([^=]+)=([^\[\]]+)$', input_raw)
if match:
input_dict[match.group(1)] = match.group(2), None
else:
raise RuntimeError(
'--inputs "%s" format is incorrect. Please follow'
'"<input_key>=<filename>", or'
'"<input_key>=<filename>[<variable_name>]"' % input_raw)
return input_dict
def preprocess_input_exprs_arg_string(input_exprs_str):
"""Parses input arg into dictionary that maps input key to python expression.
Parses input string in the format of 'input_key=<python expression>' into a
dictionary that maps each input_key to its python expression.
Args:
input_exprs_str: A string that specifies python expression for input keys.
Each input is separated by semicolon. For each input key:
'input_key=<python expression>'
Returns:
A dictionary that maps input keys to their values.
Raises:
RuntimeError: An error when the given input string is in a bad format.
"""
input_dict = {}
for input_raw in filter(bool, input_exprs_str.split(';')):
if '=' not in input_exprs_str:
raise RuntimeError('--input_exprs "%s" format is incorrect. Please follow'
'"<input_key>=<python expression>"' % input_exprs_str)
input_key, expr = input_raw.split('=', 1)
# ast.literal_eval does not work with numpy expressions
input_dict[input_key] = eval(expr) # pylint: disable=eval-used
return input_dict
def preprocess_input_examples_arg_string(input_examples_str):
"""Parses input into dict that maps input keys to lists of tf.Example.
Parses input string in the format of 'input_key1=[{feature_name:
feature_list}];input_key2=[{feature_name:feature_list}];' into a dictionary
that maps each input_key to its list of serialized tf.Example.
Args:
input_examples_str: A string that specifies a list of dictionaries of
feature_names and their feature_lists for each input.
Each input is separated by semicolon. For each input key:
'input=[{feature_name1: feature_list1, feature_name2:feature_list2}]'
items in feature_list can be the type of float, int, long or str.
Returns:
A dictionary that maps input keys to lists of serialized tf.Example.
Raises:
ValueError: An error when the given tf.Example is not a list.
"""
input_dict = preprocess_input_exprs_arg_string(input_examples_str)
for input_key, example_list in input_dict.items():
if not isinstance(example_list, list):
raise ValueError(
'tf.Example input must be a list of dictionaries, but "%s" is %s' %
(example_list, type(example_list)))
input_dict[input_key] = [
_create_example_string(example) for example in example_list
]
return input_dict
def _create_example_string(example_dict):
"""Create a serialized tf.example from feature dictionary."""
example = example_pb2.Example()
for feature_name, feature_list in example_dict.items():
if not isinstance(feature_list, list):
raise ValueError('feature value must be a list, but %s: "%s" is %s' %
(feature_name, feature_list, type(feature_list)))
if isinstance(feature_list[0], float):
example.features.feature[feature_name].float_list.value.extend(
feature_list)
elif isinstance(feature_list[0], str):
example.features.feature[feature_name].bytes_list.value.extend(
feature_list)
elif isinstance(feature_list[0], integer_types):
example.features.feature[feature_name].int64_list.value.extend(
feature_list)
else:
raise ValueError(
'Type %s for value %s is not supported for tf.train.Feature.' %
(type(feature_list[0]), feature_list[0]))
return example.SerializeToString()
def load_inputs_from_input_arg_string(inputs_str, input_exprs_str,
input_examples_str):
"""Parses input arg strings and create inputs feed_dict.
Parses '--inputs' string for inputs to be loaded from file, and parses
'--input_exprs' string for inputs to be evaluated from python expression.
'--input_examples' string for inputs to be created from tf.example feature
dictionary list.
Args:
inputs_str: A string that specified where to load inputs. Each input is
separated by semicolon.
* For each input key:
'<input_key>=<filename>' or
'<input_key>=<filename>[<variable_name>]'
* The optional 'variable_name' key will be set to None if not specified.
* File specified by 'filename' will be loaded using numpy.load. Inputs
can be loaded from only .npy, .npz or pickle files.
* The "[variable_name]" key is optional depending on the input file type
as descripted in more details below.
When loading from a npy file, which always contains a numpy ndarray, the
content will be directly assigned to the specified input tensor. If a
variable_name is specified, it will be ignored and a warning will be
issued.
When loading from a npz zip file, user can specify which variable within
the zip file to load for the input tensor inside the square brackets. If
nothing is specified, this function will check that only one file is
included in the zip and load it for the specified input tensor.
When loading from a pickle file, if no variable_name is specified in the
square brackets, whatever that is inside the pickle file will be passed
to the specified input tensor, else SavedModel CLI will assume a
dictionary is stored in the pickle file and the value corresponding to
the variable_name will be used.
input_exprs_str: A string that specifies python expressions for inputs.
* In the format of: '<input_key>=<python expression>'.
* numpy module is available as np.
input_examples_str: A string that specifies tf.Example with dictionary.
* In the format of: '<input_key>=<[{feature:value list}]>'
Returns:
A dictionary that maps input tensor keys to numpy ndarrays.
Raises:
RuntimeError: An error when a key is specified, but the input file contains
multiple numpy ndarrays, none of which matches the given key.
RuntimeError: An error when no key is specified, but the input file contains
more than one numpy ndarrays.
"""
tensor_key_feed_dict = {}
inputs = preprocess_inputs_arg_string(inputs_str)
input_exprs = preprocess_input_exprs_arg_string(input_exprs_str)
input_examples = preprocess_input_examples_arg_string(input_examples_str)
for input_tensor_key, (filename, variable_name) in inputs.items():
data = np.load(file_io.FileIO(filename, mode='rb'))
# When a variable_name key is specified for the input file
if variable_name:
# if file contains a single ndarray, ignore the input name
if isinstance(data, np.ndarray):
warnings.warn(
'Input file %s contains a single ndarray. Name key \"%s\" ignored.'
% (filename, variable_name))
tensor_key_feed_dict[input_tensor_key] = data
else:
if variable_name in data:
tensor_key_feed_dict[input_tensor_key] = data[variable_name]
else:
raise RuntimeError(
'Input file %s does not contain variable with name \"%s\".' %
(filename, variable_name))
# When no key is specified for the input file.
else:
# Check if npz file only contains a single numpy ndarray.
if isinstance(data, np.lib.npyio.NpzFile):
variable_name_list = data.files
if len(variable_name_list) != 1:
raise RuntimeError(
'Input file %s contains more than one ndarrays. Please specify '
'the name of ndarray to use.' % filename)
tensor_key_feed_dict[input_tensor_key] = data[variable_name_list[0]]
else:
tensor_key_feed_dict[input_tensor_key] = data
# When input is a python expression:
for input_tensor_key, py_expr_evaluated in input_exprs.items():
if input_tensor_key in tensor_key_feed_dict:
warnings.warn(
'input_key %s has been specified with both --inputs and --input_exprs'
' options. Value in --input_exprs will be used.' % input_tensor_key)
tensor_key_feed_dict[input_tensor_key] = py_expr_evaluated
# When input is a tf.Example:
for input_tensor_key, example in input_examples.items():
if input_tensor_key in tensor_key_feed_dict:
warnings.warn(
'input_key %s has been specified in multiple options. Value in '
'--input_examples will be used.' % input_tensor_key)
tensor_key_feed_dict[input_tensor_key] = example
return tensor_key_feed_dict
def show(args):
"""Function triggered by show command.
Args:
args: A namespace parsed from command line.
"""
# If all tag is specified, display all information.
if args.all:
_show_all(args.dir)
else:
# If no tag is specified, display all tag_set, if no signaure_def key is
# specified, display all SignatureDef keys, else show input output tensor
# information corresponding to the given SignatureDef key
if args.tag_set is None:
_show_tag_sets(args.dir)
else:
if args.signature_def is None:
_show_signature_def_map_keys(args.dir, args.tag_set)
else:
_show_inputs_outputs(args.dir, args.tag_set, args.signature_def)
def run(args):
"""Function triggered by run command.
Args:
args: A namespace parsed from command line.
Raises:
AttributeError: An error when neither --inputs nor --input_exprs is passed
to run command.
"""
if not args.inputs and not args.input_exprs and not args.input_examples:
raise AttributeError(
'At least one of --inputs, --input_exprs or --input_examples must be '
'required')
tensor_key_feed_dict = load_inputs_from_input_arg_string(
args.inputs, args.input_exprs, args.input_examples)
run_saved_model_with_feed_dict(args.dir, args.tag_set, args.signature_def,
tensor_key_feed_dict, args.outdir,
args.overwrite, worker=args.worker,
tf_debug=args.tf_debug)
def scan(args):
"""Function triggered by scan command.
Args:
args: A namespace parsed from command line.
"""
if args.tag_set:
scan_meta_graph_def(
saved_model_utils.get_meta_graph_def(args.dir, args.tag_set))
else:
saved_model = reader.read_saved_model(args.dir)
for meta_graph_def in saved_model.meta_graphs:
scan_meta_graph_def(meta_graph_def)
def create_parser():
"""Creates a parser that parse the command line arguments.
Returns:
A namespace parsed from command line arguments.
"""
parser = argparse.ArgumentParser(
description='saved_model_cli: Command-line interface for SavedModel')
parser.add_argument('-v', '--version', action='version', version='0.1.0')
subparsers = parser.add_subparsers(
title='commands', description='valid commands', help='additional help')
# show command
show_msg = (
'Usage examples:\n'
'To show all tag-sets in a SavedModel:\n'
'$saved_model_cli show --dir /tmp/saved_model\n\n'
'To show all available SignatureDef keys in a '
'MetaGraphDef specified by its tag-set:\n'
'$saved_model_cli show --dir /tmp/saved_model --tag_set serve\n\n'
'For a MetaGraphDef with multiple tags in the tag-set, all tags must be '
'passed in, separated by \';\':\n'
'$saved_model_cli show --dir /tmp/saved_model --tag_set serve,gpu\n\n'
'To show all inputs and outputs TensorInfo for a specific'
' SignatureDef specified by the SignatureDef key in a'
' MetaGraph.\n'
'$saved_model_cli show --dir /tmp/saved_model --tag_set serve'
' --signature_def serving_default\n\n'
'To show all available information in the SavedModel:\n'
'$saved_model_cli show --dir /tmp/saved_model --all')
parser_show = subparsers.add_parser(
'show',
description=show_msg,
formatter_class=argparse.RawTextHelpFormatter)
parser_show.add_argument(
'--dir',
type=str,
required=True,
help='directory containing the SavedModel to inspect')
parser_show.add_argument(
'--all',
action='store_true',
help='if set, will output all information in given SavedModel')
parser_show.add_argument(
'--tag_set',
type=str,
default=None,
help='tag-set of graph in SavedModel to show, separated by \',\'')
parser_show.add_argument(
'--signature_def',
type=str,
default=None,
metavar='SIGNATURE_DEF_KEY',
help='key of SignatureDef to display input(s) and output(s) for')
parser_show.set_defaults(func=show)
# run command
run_msg = ('Usage example:\n'
'To run input tensors from files through a MetaGraphDef and save'
' the output tensors to files:\n'
'$saved_model_cli show --dir /tmp/saved_model --tag_set serve \\\n'
' --signature_def serving_default \\\n'
' --inputs input1_key=/tmp/124.npz[x],input2_key=/tmp/123.npy '
'\\\n'
' --input_exprs \'input3_key=np.ones(2)\' \\\n'
' --input_examples '
'\'input4_key=[{"id":[26],"weights":[0.5, 0.5]}]\' \\\n'
' --outdir=/out\n\n'
'For more information about input file format, please see:\n'
'https://www.tensorflow.org/guide/saved_model_cli\n')
parser_run = subparsers.add_parser(
'run', description=run_msg, formatter_class=argparse.RawTextHelpFormatter)
parser_run.add_argument(
'--dir',
type=str,
required=True,
help='directory containing the SavedModel to execute')
parser_run.add_argument(
'--tag_set',
type=str,
required=True,
help='tag-set of graph in SavedModel to load, separated by \',\'')
parser_run.add_argument(
'--signature_def',
type=str,
required=True,
metavar='SIGNATURE_DEF_KEY',
help='key of SignatureDef to run')
msg = ('Loading inputs from files, in the format of \'<input_key>=<filename>,'
' or \'<input_key>=<filename>[<variable_name>]\', separated by \';\'.'
' The file format can only be from .npy, .npz or pickle.')
parser_run.add_argument('--inputs', type=str, default='', help=msg)
msg = ('Specifying inputs by python expressions, in the format of'
' "<input_key>=\'<python expression>\'", separated by \';\'. '
'numpy module is available as \'np\'. '
'Will override duplicate input keys from --inputs option.')
parser_run.add_argument('--input_exprs', type=str, default='', help=msg)
msg = (
'Specifying tf.Example inputs as list of dictionaries. For example: '
'<input_key>=[{feature0:value_list,feature1:value_list}]. Use ";" to '
'separate input keys. Will override duplicate input keys from --inputs '
'and --input_exprs option.')
parser_run.add_argument('--input_examples', type=str, default='', help=msg)
parser_run.add_argument(
'--outdir',
type=str,
default=None,
help='if specified, output tensor(s) will be saved to given directory')
parser_run.add_argument(
'--overwrite',
action='store_true',
help='if set, output file will be overwritten if it already exists.')
parser_run.add_argument(
'--tf_debug',
action='store_true',
help='if set, will use TensorFlow Debugger (tfdbg) to watch the '
'intermediate Tensors and runtime GraphDefs while running the '
'SavedModel.')
parser_run.add_argument(
'--worker',
type=str,
default=None,
help='if specified, a Session will be run on the worker. '
'Valid worker specification is a bns or gRPC path.')
parser_run.set_defaults(func=run)
# scan command
scan_msg = ('Usage example:\n'
'To scan for blacklisted ops in SavedModel:\n'
'$saved_model_cli scan --dir /tmp/saved_model\n'
'To scan a specific MetaGraph, pass in --tag_set\n')
parser_scan = subparsers.add_parser(
'scan',
description=scan_msg,
formatter_class=argparse.RawTextHelpFormatter)
parser_scan.add_argument(
'--dir',
type=str,
required=True,
help='directory containing the SavedModel to execute')
parser_scan.add_argument(
'--tag_set',
type=str,
help='tag-set of graph in SavedModel to scan, separated by \',\'')
parser_scan.set_defaults(func=scan)
return parser
def main():
parser = create_parser()
args = parser.parse_args()
if not hasattr(args, 'func'):
parser.error('too few arguments')
args.func(args)
if __name__ == '__main__':
sys.exit(main())
|
|
# ===============================================================================
# Copyright 2019 Jan Hendrickx and Gabriel Parrish
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
import os
import yaml
import numpy as np
import gdal
from gdalconst import GDT_Float32
# ============= standard library imports ========================
def write_raster(array, geotransform, output_path, output_filename, dimensions, projection, flip_arr=False):
"""
Write raster outputs a Geotiff to a specified location.
:param array: an array to be printed as a raster
:param geotransform: a list of intergers containing information about the size and resolution of the raster
:param output_path: path where you want to output the raster.
:param output_filename:
:param dimensions: x and y dimensions of the raster as a tuple
:param projection: geographic projection string
:param datatype: NA
:return: NA
"""
filename = os.path.join(output_path, output_filename)
print 'writing to location {}'.format(filename)
driver = gdal.GetDriverByName('GTiff')
# path, cols, rows, bandnumber, data type (if not specified, as below, the default is GDT_Byte)
output_dataset = driver.Create(filename, dimensions[0], dimensions[1], 1, GDT_Float32)
# we write TO the output band
output_band = output_dataset.GetRasterBand(1)
if flip_arr:
array = np.flipud(array)
print 'shape of transpose', array.shape
# we don't need to do an offset
output_band.WriteArray(array, 0, 0)
print 'done writing.'
# set the geotransform in order to georefference the image
output_dataset.SetGeoTransform(geotransform)
# set the projection
output_dataset.SetProjection(projection)
def numpy_to_geotiff(array, geo_info, output_path, output_name):
""""""
trans = geo_info['geotransform']
dim = geo_info['dimensions']
proj = geo_info['projection']
print'transform', trans
print 'dimensions', dim
print 'projections', proj
write_raster(array, geotransform=trans, output_path=output_path, output_filename=output_name,
dimensions=dim, projection=proj)
def geotiff_output(taw_vals, rss_arrs, geo_info, namekey, outpath):
""""""
for arr, taw_val in zip(rss_arrs, taw_vals):
outname = '{}_image_taw_{}.tif'.format(namekey, taw_val)
numpy_to_geotiff(arr, geo_info, outpath, outname)
def optimize_taw_disaggregate(rss_path, output_path, geo_info, big_arr=False, test_mode=False, hair_trigger=False):
"""
:param rss_path:
:param output_path:
:param geo_info:
:param big_arr:
:param test_mode:
:param hair_trigger: if the error reduction ever falls below specified threshold we take the correspondig TAW.
If false, we take TAW beyond which every error reduction is below the specified threshold.
:return:
"""
if test_mode:
test_path = '/Users/dcadol/Desktop/academic_docs_II/JPL_Data/taw_calibration_disaggregated/grassland_test.csv'
with open(test_path, 'r') as rfile:
taw_vals = []
rss_vals = []
for line in rfile:
taw_rss = line.split(',')
taw = int(taw_rss[0])
rss = float(taw_rss[1])
taw_vals.append(taw)
rss_vals.append(rss)
# get the average daily rss in mm
rss_vals_avg_daily = [((rss / 11.0) / 365.0) for rss in rss_vals]
print 'the rss avg daily error \n', rss_vals_avg_daily
error_reduced_lst = []
for i in range(len(rss_vals_avg_daily)):
# print 'i', i
if i == 0:
error_reduced_lst.append('')
elif i > 0:
# calculate the error reduced by each taw step
error_reduced = rss_vals_avg_daily[i] - rss_vals_avg_daily[i-1]
error_reduced_lst.append(error_reduced)
# elif i == len(rss_vals_avg_daily)
print 'the error reduced list \n', error_reduced_lst
# set the first value of the list to the second value
error_reduced_lst[0] = error_reduced_lst[1]
print 'the error reduced list \n', error_reduced_lst
# round the values to the 2nd decimal place
error_reduced_lst= [round(i, 2) for i in error_reduced_lst]
# # select the TAW after which error reduced is no longer greater than 0.01
# for taw, reduced_error in zip(taw_vals, error_reduced_lst):
# print 'taw {}, re {}'.format(taw, reduced_error)
indx_lst = []
for i, re in enumerate(error_reduced_lst):
if abs(re) <= 0.01:
indx_lst.append(i)
print 'the index list\n', indx_lst
consecutives = []
for i in range(len(indx_lst)+1):
if i > 0 and i < (len(indx_lst)-1):
print i
if indx_lst[i + 1] == indx_lst[i] + 1:
consecutives.append(indx_lst[i])
elif i == len(indx_lst)-1:
if indx_lst[i] -1 == indx_lst[i-1]:
consecutives.append(indx_lst[i-1])
consecutives.append(indx_lst[i])
print 'consecutives \n', consecutives
# take the first index after which the reduced error is consistently less than or equal to 0.01
target_index = consecutives[0]
# taw at the target index is the optimum taw
optimum_taw = taw_vals[target_index]
print 'optimum taw', optimum_taw
else:
print 'running'
# open rss dict from yml file for testing
with open(rss_path, 'r') as rfile:
rss = yaml.load(rfile)
print 'optimizing taw'
# get taw, rss arrays out.
taw_vals = rss['taw']
rss_arrs = rss['rss']
# # slice the array for testing so you can see it change or not...
# rss_arrs = [rss[200:220, 200:220] for rss in rss_arrs]
print 'len of rss arrs', len(rss_arrs)
# get the average daily rss in mm for an 11 year time period todo - these outputs look strange
rss_vals_avg_daily = [((rss / 11.0) / 365.0) for rss in rss_arrs]
# output average daily rss as images for better visualization
geotiff_output(taw_vals, rss_vals_avg_daily, geo_info, namekey='daily_rss', outpath=output_path)
print 'the rss avg daily error \n', len(rss_vals_avg_daily)
error_reduced_lst = []
for i in range(len(rss_vals_avg_daily)):
print 'i', i
if i == 0:
error_reduced_lst.append('')
elif i > 0:
# calculate the error reduced by each taw step todo - these should be positive if error is DECREASING
error_reduced = rss_vals_avg_daily[i] - rss_vals_avg_daily[i - 1]
error_reduced_lst.append(error_reduced)
# elif i == len(rss_vals_avg_daily)
print 'the error reduced list \n', error_reduced_lst
# set the first value of the list to the second value
error_reduced_lst[0] = error_reduced_lst[1]
print 'the error reduced list \n', error_reduced_lst
# output ERROR_REDUCED as images
geotiff_output(taw_vals, error_reduced_lst, geo_info, namekey='error_reduced', outpath=output_path)
# make all errors positive by taking the absolute value todo - what are the implications of taking the absolute value? It may mess up the algorithm
error_reduced_lst = [np.absolute(i) for i in error_reduced_lst]
# output ERROR_REDUCED as images
geotiff_output(taw_vals, error_reduced_lst, geo_info, namekey='error_reduced_positive', outpath=output_path)
# round the values to the 2nd decimal place FOR AN ARRAY
error_reduced_lst = [np.round(i, 2) for i in error_reduced_lst]
# output ERROR_REDUCED as images
geotiff_output(taw_vals, error_reduced_lst, geo_info, namekey='error_reduced_positive_rounded', outpath=output_path)
# # select the TAW after which error reduced is no longer greater than 0.01
# prepare to store three dimensional arrays with dstack
value_shape = rss_arrs[0].shape
three_d_shape = (value_shape[0], value_shape[1], 0)
# for storing the boolean for the expression: rss value < 0.01
# todo - should this be np.zeros or is np.empty better?
# reduced_error_tab = np.zeros(three_d_shape, dtype=bool)
reduced_error_tab = np.empty(three_d_shape)
# for storing the minimum taw
taw_tab = np.empty(three_d_shape)
smaller_than_list = []
for taw, error_array in zip(taw_vals, error_reduced_lst):
print 'checking rss for taw: {}'.format(taw)
# make each taw into an array so we can index it
taw_arr = np.full(error_array.shape, taw, dtype='float64')
# # we only want to store values that are less than or equal to 0.01 when rounded (rounding handled earlier)
# smaller_than = error_array <= 0.01
# get the boolean where error array is less than 0.05
smaller_than = error_array <= 0.05
# print'smaller than array \n', smaller_than
# we append the smaller_than array as an int for testing
smaller_than_list.append(smaller_than.astype(int))
# append the smaller than array to reduced error tab with dstack
reduced_error_tab = np.dstack((reduced_error_tab, smaller_than))
# append the taw array to a 3d array
taw_tab = np.dstack((taw_tab, taw_arr))
# print '3d array True for error values less than or equal to 0.01 otherwise, False \n', reduced_error_tab
geotiff_output(taw_vals, smaller_than_list, geo_info, namekey='smaller_than', outpath=output_path)
# 1) go through the 3d array of true false from start to finish, extract true/false as list along 3rd dimension
# 2) go through that list and get the indices of the true values
# 3) get the indices that are consecutive
# 4) take the first of the consecutive indices and grab the corresponding TAW.
# 5) put the TAW back in a 2d array where it belongs.
# This will hold the optimized TAW (2d array)
optimum_taw_disagg = np.zeros(rss_arrs[0].shape)
# iterate through the 3d array
cols, rows, vals = reduced_error_tab.shape
# print 'cols {}, rows {}, vals {}'.format(cols, rows, vals)
for i in range(cols):
for j in range(rows):
true_indices = []
taw_lst = []
for k in range(vals):
taw = taw_tab[i, j, k]
# print 'taw is ', taw
taw_lst.append(taw)
if reduced_error_tab[i, j, k]:
true_indices.append(k)
# print 'true indices {} for ({},{})'.format(true_indices, i, j)
# based on optional setting take the taw value based on the first instance that the error reduction falls below the threshold
if hair_trigger:
try:
target_index = true_indices[0]
except IndexError:
# otherwise go with the max TAW
target_index = -1
else:
consecutives = []
for ti in range(len(true_indices) + 1):
# for i in range(len(indx_lst) + 1):
#
# if i > 0 and i < (len(indx_lst) - 1):
# print i
# if indx_lst[i + 1] == indx_lst[i] + 1:
# consecutives.append(indx_lst[i])
# elif i == len(indx_lst) - 1:
# if indx_lst[i] - 1 == indx_lst[i - 1]:
# consecutives.append(indx_lst[i - 1])
# consecutives.append(indx_lst[i])
# The problem is likely right here...
if ti > 0 and ti < (len(true_indices) - 1):
if true_indices[ti + 1] == true_indices[ti] + 1:
consecutives.append(true_indices[ti])
elif ti == len(true_indices) - 1:
if true_indices[ti] - 1 == true_indices[ti - 1]:
consecutives.append(true_indices[ti - 1])
consecutives.append(true_indices[ti])
# print 'consecutives \n', consecutives
# take the first index after which the reduced error is consistently less than or equal to 0.01
try:
target_index = consecutives[0]
except IndexError:
# otherwise go with the maximum TAW
target_index = -1
# # taw at the target index is the optimum taw
# print 'target index {}'.format(target_index)
# print 'Taw list is {} \n and we select taw {} using the target index'.format(taw_lst, taw_lst[target_index])
optimum_taw = taw_lst[target_index]
# when we have the taw value put it back in the 2d array
optimum_taw_disagg[i, j] = optimum_taw
# with open(os.path.join(output_path, 'index.txt'), 'a') as wfile:
# wfile.write('{},{} modified'.format(i, j))
#
# print 'optimum_taw disagg array\n', optimum_taw_disagg
# # todo - output the rasters
numpy_to_geotiff(optimum_taw_disagg, geo_info, output_path, output_name='optimized_taw_disagg.tif')
test_path = '/Users/dcadol/Desktop/academic_docs_II/JPL_Data/taw_calibration_disaggregated/rss.yml'
geo_info_path = '/Volumes/Seagate_Expansion_Drive/taw_optimization_work_folder/geo_info_espanola.yml'
output_path = '/Volumes/Seagate_Expansion_Drive/taw_optimization_work_folder/disagg_test_folder'
# pull out the geo info
with open(geo_info_path, mode='r') as geofile:
geo_dict = yaml.load(geofile)
optimize_taw_disaggregate(rss_path=test_path, output_path=output_path, geo_info=geo_dict, big_arr=False,
test_mode=False, hair_trigger=True)
|
|
from __future__ import unicode_literals
import boto.rds
import boto.vpc
from boto.exception import BotoServerError
import sure # noqa
from moto import mock_ec2_deprecated, mock_rds_deprecated
from tests.helpers import disable_on_py3
@disable_on_py3()
@mock_rds_deprecated
def test_create_database():
conn = boto.rds.connect_to_region("us-west-2")
database = conn.create_dbinstance("db-master-1", 10, 'db.m1.small', 'root', 'hunter2',
security_groups=["my_sg"])
database.status.should.equal('available')
database.id.should.equal("db-master-1")
database.allocated_storage.should.equal(10)
database.instance_class.should.equal("db.m1.small")
database.master_username.should.equal("root")
database.endpoint.should.equal(
('db-master-1.aaaaaaaaaa.us-west-2.rds.amazonaws.com', 3306))
database.security_groups[0].name.should.equal('my_sg')
@disable_on_py3()
@mock_rds_deprecated
def test_get_databases():
conn = boto.rds.connect_to_region("us-west-2")
list(conn.get_all_dbinstances()).should.have.length_of(0)
conn.create_dbinstance("db-master-1", 10, 'db.m1.small', 'root', 'hunter2')
conn.create_dbinstance("db-master-2", 10, 'db.m1.small', 'root', 'hunter2')
list(conn.get_all_dbinstances()).should.have.length_of(2)
databases = conn.get_all_dbinstances("db-master-1")
list(databases).should.have.length_of(1)
databases[0].id.should.equal("db-master-1")
@mock_rds_deprecated
def test_describe_non_existant_database():
conn = boto.rds.connect_to_region("us-west-2")
conn.get_all_dbinstances.when.called_with(
"not-a-db").should.throw(BotoServerError)
@disable_on_py3()
@mock_rds_deprecated
def test_delete_database():
conn = boto.rds.connect_to_region("us-west-2")
list(conn.get_all_dbinstances()).should.have.length_of(0)
conn.create_dbinstance("db-master-1", 10, 'db.m1.small', 'root', 'hunter2')
list(conn.get_all_dbinstances()).should.have.length_of(1)
conn.delete_dbinstance("db-master-1")
list(conn.get_all_dbinstances()).should.have.length_of(0)
@mock_rds_deprecated
def test_delete_non_existant_database():
conn = boto.rds.connect_to_region("us-west-2")
conn.delete_dbinstance.when.called_with(
"not-a-db").should.throw(BotoServerError)
@mock_rds_deprecated
def test_create_database_security_group():
conn = boto.rds.connect_to_region("us-west-2")
security_group = conn.create_dbsecurity_group('db_sg', 'DB Security Group')
security_group.name.should.equal('db_sg')
security_group.description.should.equal("DB Security Group")
list(security_group.ip_ranges).should.equal([])
@mock_rds_deprecated
def test_get_security_groups():
conn = boto.rds.connect_to_region("us-west-2")
list(conn.get_all_dbsecurity_groups()).should.have.length_of(0)
conn.create_dbsecurity_group('db_sg1', 'DB Security Group')
conn.create_dbsecurity_group('db_sg2', 'DB Security Group')
list(conn.get_all_dbsecurity_groups()).should.have.length_of(2)
databases = conn.get_all_dbsecurity_groups("db_sg1")
list(databases).should.have.length_of(1)
databases[0].name.should.equal("db_sg1")
@mock_rds_deprecated
def test_get_non_existant_security_group():
conn = boto.rds.connect_to_region("us-west-2")
conn.get_all_dbsecurity_groups.when.called_with(
"not-a-sg").should.throw(BotoServerError)
@mock_rds_deprecated
def test_delete_database_security_group():
conn = boto.rds.connect_to_region("us-west-2")
conn.create_dbsecurity_group('db_sg', 'DB Security Group')
list(conn.get_all_dbsecurity_groups()).should.have.length_of(1)
conn.delete_dbsecurity_group("db_sg")
list(conn.get_all_dbsecurity_groups()).should.have.length_of(0)
@mock_rds_deprecated
def test_delete_non_existant_security_group():
conn = boto.rds.connect_to_region("us-west-2")
conn.delete_dbsecurity_group.when.called_with(
"not-a-db").should.throw(BotoServerError)
@disable_on_py3()
@mock_rds_deprecated
def test_security_group_authorize():
conn = boto.rds.connect_to_region("us-west-2")
security_group = conn.create_dbsecurity_group('db_sg', 'DB Security Group')
list(security_group.ip_ranges).should.equal([])
security_group.authorize(cidr_ip='10.3.2.45/32')
security_group = conn.get_all_dbsecurity_groups()[0]
list(security_group.ip_ranges).should.have.length_of(1)
security_group.ip_ranges[0].cidr_ip.should.equal('10.3.2.45/32')
@disable_on_py3()
@mock_rds_deprecated
def test_add_security_group_to_database():
conn = boto.rds.connect_to_region("us-west-2")
database = conn.create_dbinstance(
"db-master-1", 10, 'db.m1.small', 'root', 'hunter2')
security_group = conn.create_dbsecurity_group('db_sg', 'DB Security Group')
database.modify(security_groups=[security_group])
database = conn.get_all_dbinstances()[0]
list(database.security_groups).should.have.length_of(1)
database.security_groups[0].name.should.equal("db_sg")
@mock_ec2_deprecated
@mock_rds_deprecated
def test_add_database_subnet_group():
vpc_conn = boto.vpc.connect_to_region("us-west-2")
vpc = vpc_conn.create_vpc("10.0.0.0/16")
subnet1 = vpc_conn.create_subnet(vpc.id, "10.1.0.0/24")
subnet2 = vpc_conn.create_subnet(vpc.id, "10.2.0.0/24")
subnet_ids = [subnet1.id, subnet2.id]
conn = boto.rds.connect_to_region("us-west-2")
subnet_group = conn.create_db_subnet_group(
"db_subnet", "my db subnet", subnet_ids)
subnet_group.name.should.equal('db_subnet')
subnet_group.description.should.equal("my db subnet")
list(subnet_group.subnet_ids).should.equal(subnet_ids)
@mock_ec2_deprecated
@mock_rds_deprecated
def test_describe_database_subnet_group():
vpc_conn = boto.vpc.connect_to_region("us-west-2")
vpc = vpc_conn.create_vpc("10.0.0.0/16")
subnet = vpc_conn.create_subnet(vpc.id, "10.1.0.0/24")
conn = boto.rds.connect_to_region("us-west-2")
conn.create_db_subnet_group("db_subnet1", "my db subnet", [subnet.id])
conn.create_db_subnet_group("db_subnet2", "my db subnet", [subnet.id])
list(conn.get_all_db_subnet_groups()).should.have.length_of(2)
list(conn.get_all_db_subnet_groups("db_subnet1")).should.have.length_of(1)
conn.get_all_db_subnet_groups.when.called_with(
"not-a-subnet").should.throw(BotoServerError)
@mock_ec2_deprecated
@mock_rds_deprecated
def test_delete_database_subnet_group():
vpc_conn = boto.vpc.connect_to_region("us-west-2")
vpc = vpc_conn.create_vpc("10.0.0.0/16")
subnet = vpc_conn.create_subnet(vpc.id, "10.1.0.0/24")
conn = boto.rds.connect_to_region("us-west-2")
conn.create_db_subnet_group("db_subnet1", "my db subnet", [subnet.id])
list(conn.get_all_db_subnet_groups()).should.have.length_of(1)
conn.delete_db_subnet_group("db_subnet1")
list(conn.get_all_db_subnet_groups()).should.have.length_of(0)
conn.delete_db_subnet_group.when.called_with(
"db_subnet1").should.throw(BotoServerError)
@disable_on_py3()
@mock_ec2_deprecated
@mock_rds_deprecated
def test_create_database_in_subnet_group():
vpc_conn = boto.vpc.connect_to_region("us-west-2")
vpc = vpc_conn.create_vpc("10.0.0.0/16")
subnet = vpc_conn.create_subnet(vpc.id, "10.1.0.0/24")
conn = boto.rds.connect_to_region("us-west-2")
conn.create_db_subnet_group("db_subnet1", "my db subnet", [subnet.id])
database = conn.create_dbinstance("db-master-1", 10, 'db.m1.small',
'root', 'hunter2', db_subnet_group_name="db_subnet1")
database = conn.get_all_dbinstances("db-master-1")[0]
database.subnet_group.name.should.equal("db_subnet1")
@disable_on_py3()
@mock_rds_deprecated
def test_create_database_replica():
conn = boto.rds.connect_to_region("us-west-2")
primary = conn.create_dbinstance(
"db-master-1", 10, 'db.m1.small', 'root', 'hunter2')
replica = conn.create_dbinstance_read_replica(
"replica", "db-master-1", "db.m1.small")
replica.id.should.equal("replica")
replica.instance_class.should.equal("db.m1.small")
status_info = replica.status_infos[0]
status_info.normal.should.equal(True)
status_info.status_type.should.equal('read replication')
status_info.status.should.equal('replicating')
primary = conn.get_all_dbinstances("db-master-1")[0]
primary.read_replica_dbinstance_identifiers[0].should.equal("replica")
conn.delete_dbinstance("replica")
primary = conn.get_all_dbinstances("db-master-1")[0]
list(primary.read_replica_dbinstance_identifiers).should.have.length_of(0)
@disable_on_py3()
@mock_rds_deprecated
def test_create_cross_region_database_replica():
west_1_conn = boto.rds.connect_to_region("us-west-1")
west_2_conn = boto.rds.connect_to_region("us-west-2")
primary = west_1_conn.create_dbinstance(
"db-master-1", 10, 'db.m1.small', 'root', 'hunter2')
primary_arn = "arn:aws:rds:us-west-1:1234567890:db:db-master-1"
replica = west_2_conn.create_dbinstance_read_replica(
"replica",
primary_arn,
"db.m1.small",
)
primary = west_1_conn.get_all_dbinstances("db-master-1")[0]
primary.read_replica_dbinstance_identifiers[0].should.equal("replica")
replica = west_2_conn.get_all_dbinstances("replica")[0]
replica.instance_class.should.equal("db.m1.small")
west_2_conn.delete_dbinstance("replica")
primary = west_1_conn.get_all_dbinstances("db-master-1")[0]
list(primary.read_replica_dbinstance_identifiers).should.have.length_of(0)
@disable_on_py3()
@mock_rds_deprecated
def test_connecting_to_us_east_1():
# boto does not use us-east-1 in the URL for RDS,
# and that broke moto in the past:
# https://github.com/boto/boto/blob/e271ff09364ea18d9d8b6f4d63d6b0ac6cbc9b75/boto/endpoints.json#L285
conn = boto.rds.connect_to_region("us-east-1")
database = conn.create_dbinstance("db-master-1", 10, 'db.m1.small', 'root', 'hunter2',
security_groups=["my_sg"])
database.status.should.equal('available')
database.id.should.equal("db-master-1")
database.allocated_storage.should.equal(10)
database.instance_class.should.equal("db.m1.small")
database.master_username.should.equal("root")
database.endpoint.should.equal(
('db-master-1.aaaaaaaaaa.us-east-1.rds.amazonaws.com', 3306))
database.security_groups[0].name.should.equal('my_sg')
@disable_on_py3()
@mock_rds_deprecated
def test_create_database_with_iops():
conn = boto.rds.connect_to_region("us-west-2")
database = conn.create_dbinstance(
"db-master-1", 10, 'db.m1.small', 'root', 'hunter2', iops=6000)
database.status.should.equal('available')
database.iops.should.equal(6000)
# boto>2.36.0 may change the following property name to `storage_type`
database.StorageType.should.equal('io1')
|
|
#!/usr/bin/env python3
"""
Chord peer
==========
This module provides peer of a Chord distributed hash table.
"""
import random
import time
import socket
import socketserver
import threading
import logging
logging.basicConfig(format='%(asctime)s %(levelname)s:%(message)s',
level=logging.DEBUG)
CHAIN = 3
CHORDS = 30
MAX_KEY = 2**CHORDS
CHORD_UPDATE_INTERVAL = 5
class Peer:
def __init__(self, port=4321, key=None):
if key is None:
self.key = random.randint(0, MAX_KEY)
else:
self.key = key
logging.info('Peer key: %x' % self.key)
self.chords = [None] * CHORDS
self.chain = [None]
self.storage = {}
self.port = port
def connect(self, url):
"""
Connects to the DHT using the given `url` (of any connected node).
"""
logging.info('Connecting to: ' + url)
old = self.find_re(self.key, connecting=url)
logging.debug(old)
self.chain = [old] + request(url, 'accept', self.key,
bytes(str(self.port), 'ascii'))
for i in range(CHORDS):
key = (self.key + 2**i) % MAX_KEY
if not inside(key, self.key, old[0]):
self.chords[i] = self.find_re(key, connecting=url)
def accept(self, key, url):
"""
Accepts a peer to the DHT by:
- putting him on the ring after itself
- reassigning to him part of own key space
"""
self.chain = [(key, url)] + self.chain
# TODO: transfer him the stored keys
for i in range(CHORDS):
key = (self.key + 2**i) % MAX_KEY
if self.chords[i] is None and\
not inside(key, self.key, self.chain[0][0]):
self.chords[i] = self.chain[0]
def start(self):
"""
Starts Peer's operation.
"""
Handler.peer = self
logging.info('Listening on port %d' % self.port)
server = Server(('0.0.0.0', self.port), Handler)
server_thread = threading.Thread(target=server.serve_forever)
server_thread.daemon = True
server_thread.start()
logging.debug('Server thread started')
while True:
time.sleep(CHORD_UPDATE_INTERVAL)
self._update_chords()
def find(self, key):
"""
Find a peer that is closer to the one responsible for the given `key`.
Returns `None` if it's the responsible itself, or a tuple `(key, url)`.
"""
if self.chain[0] is None or inside(key, self.key, self.chain[0][0]):
return None
for i in range(CHORDS - 1):
if self.chords[i] is None:
continue # I'm still responsible for this part
if inside(key, self.chords[i][0], self.chords[i+1][0]):
return self.chords[i]
if self.chords[-1] is None:
return self.chain[0] # Another funny corner case
else:
return self.chords[-1]
def find_re(self, key, connecting=None):
"""
Find the peer that is responsible for the given `key`.
Returns `None` if it's the responsible itself, or a tuple `(key, url)`.
"""
if connecting is not None:
closer = (None, connecting)
else:
closer = self.find(key)
if closer is None:
return None
while not isinstance(closer, Me):
closer = request(closer[1], 'find', key)
return closer
def get(self, key):
"""
Return the value for the `key`, wherever it is stored.
"""
responsible = self.find_re(key)
logging.debug('Peer %s responsible for key %x' %
(responsible, key))
if responsible is None:
return self.storage.get(key, None)
else:
return request(responsible[1], 'get', key)
def put(self, key, value):
"""
Store the `(key, value)` in the DHT.
"""
responsible = self.find_re(key)
logging.debug('Peer %s responsible for key %x' %
(responsible, key))
if responsible is None:
self.storage[key] = value
else:
request(responsible[1], 'put', key, value)
def _update_chords(self):
logging.info('Storing %d values' % len(self.storage))
logging.debug(self.chain)
if self.chain[0] is None:
return
logging.debug('Updating chords')
for i in range(CHORDS):
key = (self.key + 2**i) % MAX_KEY
if not inside(key, self.key, self.chain[0][0]):
self.chords[i] = self.find_re(key)
logging.debug("%d chords established" %
sum([1 for x in self.chords if x is not None]))
def inside(key, left, right):
"""
Find whether the key is in the interval `[left, right)`.
Note the keys are arranged on a ring, so it is possible that left > right.
"""
if left == right:
return False
if left < right:
return left <= key < right
else:
return left <= key or key < right
def request(url, operation, key, value=None):
logging.debug('Requesting from %s operation %s key %x value %s' %
(url, operation, key, value))
sock = _connect(url)
body = bytes("%s %x\n" % (operation, key), 'ascii')
if value:
body += bytes("%d\n" % len(value), 'ascii')
body += value
try:
sock.sendall(body)
inh = sock.makefile('rb')
response = inh.readline()
if response.startswith(b'value'):
logging.debug(response)
length = int(response.split()[1])
return inh.read(length)
elif response.startswith(b'none'):
raise KeyError("Key %x not in DHT" % key)
elif response.startswith(b'peer'):
logging.debug('Raw response %s' % response)
return _parse_peer(response)
elif response.startswith(b'me'):
key = int(response.split()[1], base=16)
return Me([key, url])
elif response.startswith(b'chain'):
chain = []
for line in inh:
chain.append(_parse_peer(line))
return chain
finally:
sock.close()
return response
class Handler(socketserver.StreamRequestHandler):
peer = None
def handle(self):
inh = self.rfile
operation, key = inh.readline().split()
key = int(key, base=16)
logging.info("Request: %s %x" % (operation, key))
response = b'unknown operation'
if operation == b'find':
peer = self.peer.find(key)
if peer is None:
response = bytes("me %x\n" % self.peer.key, 'ascii')
else:
response = _serialize_peer(peer)
elif operation == b'accept':
response = b"chain\n"
for peer in self.peer.chain:
response += _serialize_peer(peer)
port = int(_read_value(inh))
self.peer.accept(key, _make_url(self.request, port))
elif operation == b'get':
value = self.peer.get(key)
if value is None:
response = b'none'
else:
response = bytes("value %d\n" % len(value), 'ascii')
response += value
elif operation == b'put':
value = _read_value(inh)
logging.debug("Value: %s" % value)
self.peer.put(key, value)
response = b'ok'
elif operation == b'ping':
response = b'pong'
logging.debug("Response: %s\n" % response)
self.request.sendall(response)
def _read_value(inh):
length = int(inh.readline())
return inh.read(length)
class Server(socketserver.ThreadingMixIn, socketserver.TCPServer):
pass
class Address(tuple): # Hate I can't define my own __init__
pass
class Me(Address):
pass
def _parse_peer(line):
if line.startswith(b'peer'):
key, url = line.split()[1:]
return Address([int(key, base=16), url])
elif line.startswith(b'none'):
return None
else:
raise ValueError('Wrong response for peer %s' % line)
def _serialize_peer(peer):
if peer is None:
return b'none'
else:
return bytes("peer %x %s\n" % (peer[0], str(peer[1], 'ascii')),
'ascii')
def _make_url(socket, port=None):
#FIXME: this gives us the request socket, not the listening one
if port is None:
return bytes("%s:%d" % socket.getpeername(), 'ascii')
else:
return bytes("%s:%d" % (socket.getpeername()[0], port), 'ascii')
def _connect(url):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if isinstance(url, bytes):
url = str(url, 'ascii')
if ':' in str(url):
host, port = url.split(':')
port = int(port)
else:
host, port = url, 4321
sock.connect((host, port))
return sock
def main():
import argparse
argp = argparse.ArgumentParser(description=__doc__)
argp.add_argument('-key', help='hexadecimal key for this node')
argp.add_argument('-url', help='url of an existing DHT peer')
argp.add_argument('-port', help='listening TCP port',
type=int, default=4321)
args = argp.parse_args()
if args.key is not None:
args.key = int(args.key, 16)
peer = Peer(port=args.port, key=args.key)
if args.url:
peer.connect(args.url)
peer.start()
if __name__ == '__main__':
main()
|
|
import bs4
from selenium import webdriver
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.keys import Keys
import time
from unidecode import unidecode
import pandas as pd
import datetime
from dateutil.parser import parse
from sklearn.cluster import DBSCAN
from sklearn.preprocessing import StandardScaler
import numpy as np
import matplotlib.pyplot as plt
########### task 1 ##########
def scrape_data(start_date, from_place, to_place, city_name):
driver = webdriver.Chrome()
driver.get('https://www.google.com/flights/explore/')
time.sleep(1.5)
#input to_place
to_input = driver.find_element_by_xpath('//*[@id="root"]/div[3]/div[3]/div/div[4]/div/div')
to_input.click()
actions = ActionChains(driver)
actions.send_keys(to_place)
actions.send_keys(Keys.ENTER)
actions.perform()
time.sleep(0.5)
#input from_place
to_input = driver.find_element_by_xpath('//*[@id="root"]/div[3]/div[3]/div/div[2]/div/div')
to_input.click()
actions = ActionChains(driver)
actions.send_keys(from_place)
actions.send_keys(Keys.ENTER)
actions.perform()
time.sleep(0.5)
#input start_date
driver.get(driver.current_url[:-10]+start_date)
time.sleep(0.5)
#find the city_name
data = []
city_name0=unicode(city_name,'utf-8')
city_name_unicode=unidecode(city_name0)
city_name1=city_name_unicode.lower().split(' ')
city_name2=''
for i in range(len(city_name1)):
city_name2=city_name2+city_name1[i][0].upper()+city_name1[i][1:]+' '
city_name2=city_name2.strip()
results = driver.find_elements_by_class_name('LJTSM3-v-d')
for result in results:
if city_name2 in result.text:
bars = result.find_elements_by_class_name('LJTSM3-w-x')
for bar in bars:
ActionChains(driver).move_to_element(bar).perform()
time.sleep(0.0001)
data.append((result.find_element_by_class_name('LJTSM3-w-k').find_elements_by_tag_name('div')[0].text,
result.find_element_by_class_name('LJTSM3-w-k').find_elements_by_tag_name('div')[1].text))
else:
pass
time.sleep(0.01)
driver.quit()
return data
########### task 2 ##########
def scrape_data_90(start_date, from_place, to_place, city_name):
driver = webdriver.Chrome()
driver.get('https://www.google.com/flights/explore/')
time.sleep(1.5)
#input to_place
to_input = driver.find_element_by_xpath('//*[@id="root"]/div[3]/div[3]/div/div[4]/div/div')
to_input.click()
actions = ActionChains(driver)
actions.send_keys(to_place)
actions.send_keys(Keys.ENTER)
actions.perform()
time.sleep(0.5)
#input from_place
to_input = driver.find_element_by_xpath('//*[@id="root"]/div[3]/div[3]/div/div[2]/div/div')
to_input.click()
actions = ActionChains(driver)
actions.send_keys(from_place)
actions.send_keys(Keys.ENTER)
actions.perform()
time.sleep(0.5)
#input start_date
driver.get(driver.current_url[:-10]+start_date)
time.sleep(0.5)
#find the city_name
data = []
city_name0=unicode(city_name,'utf-8')
city_name_unicode=unidecode(city_name0)
city_name1=city_name_unicode.lower().split(' ')
city_name2=''
for i in range(len(city_name1)):
city_name2=city_name2+city_name1[i][0].upper()+city_name1[i][1:]+' '
city_name2=city_name2.strip()
results = driver.find_elements_by_class_name('LJTSM3-v-d')
for result in results:
if city_name2 in result.text:
bars = result.find_elements_by_class_name('LJTSM3-w-x')
for bar in bars:
ActionChains(driver).move_to_element(bar).perform()
time.sleep(0.0001)
data.append((result.find_element_by_class_name('LJTSM3-w-k').find_elements_by_tag_name('div')[0].text,
result.find_element_by_class_name('LJTSM3-w-k').find_elements_by_tag_name('div')[1].text))
else:
pass
time.sleep(0.5)
to_input = driver.find_element_by_xpath('//*[@id="root"]/div[3]/div[4]/div/div[2]/div[1]/div/div[2]/div[2]/div/div[2]/div[5]/div')
to_input.click()
time.sleep(0.5)
results = driver.find_elements_by_class_name('LJTSM3-v-d')
for result in results:
if city_name2 in result.text:
bars = result.find_elements_by_class_name('LJTSM3-w-x')
for bar in bars:
ActionChains(driver).move_to_element(bar).perform()
time.sleep(0.0001)
data.append((result.find_element_by_class_name('LJTSM3-w-k').find_elements_by_tag_name('div')[0].text,
result.find_element_by_class_name('LJTSM3-w-k').find_elements_by_tag_name('div')[1].text))
else:
pass
data=data[:60]+data[-30:]
driver.quit()
return data
########## task 3 part 1 ##########
def task_3_dbscan(flight_data):
clean_data = [(float(d[0].replace('$', '').replace(',', '')),
(parse(d[1].split('-')[0].strip()) - parse(flight_data[0][1].split('-')[0].strip())).days,
reduce(lambda x, y: y - x, [parse(x.strip()) for x in d[1].split('-')]).days) for d in flight_data]
df = pd.DataFrame(clean_data, columns=['Price', 'Start_Date', 'Trip_Length'])
X = StandardScaler().fit_transform(df[['Start_Date', 'Price']])
db = DBSCAN(eps=.5, min_samples=3).fit(X)
labels = db.labels_
clusters = len(set(labels))
unique_labels = set(labels)
colors = plt.cm.Spectral(np.linspace(0, 1, len(unique_labels)))
plt.subplots(figsize=(12, 8))
for k, c in zip(unique_labels, colors):
class_member_mask = (labels == k)
xy = X[class_member_mask]
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=c,
markeredgecolor='k', markersize=14)
plt.title("Total Clusters: {}".format(clusters), fontsize=14, y=1.01)
df['dbscan_labels'] = db.labels_
plt.savefig('task_3_dbscan.png')
outliers = df[df['dbscan_labels'] == -1].copy()
outliers_1 = zip(outliers.Start_Date, outliers.Price)
clusters = df[df['dbscan_labels'] != -1].copy()
clusters_1 = zip(clusters.Start_Date, clusters.Price, clusters.dbscan_labels)
outliers_label = []
for outlier in outliers_1:
min_cluster_label = -1
min_dist = 9999
for cluster in clusters_1:
dist = (float(outlier[0]) - float(cluster[0])) ** 2 + ((float(outlier[1]) - float(cluster[1])) / 100) ** 2
# I think the weight of date in distance is more important than weight of price. Therefore, I did not use Euclidean distance.
if dist < min_dist:
min_dist = dist
min_cluster_label = cluster[2]
outliers_label.append(min_cluster_label)
outliers_2 = zip(outliers.Start_Date, outliers.Price, outliers_label)
agg = df[df['dbscan_labels'] != -1].groupby('dbscan_labels')['Start_Date', 'Price'].agg(
['std', 'mean', 'count']).copy()
outliers_3 = []
for outlier in outliers_2:
mean = agg[agg.index == outlier[2]]['Price']['mean']
std = max(float(agg[agg.index == outlier[2]]['Price']['std']), 10)
line = max(float(mean - 2 * std), 50)
if outlier[1] < line:
outliers_3.append(outlier[0])
if len(outliers_3) == 0:
return 'There is no low price outlier.'
else:
return df.loc[outliers_3]
########## task 3 part 2 ##########
def task_3_IQR(flight_data):
clean_data = [(float(d[0].replace('$', '').replace(',', '')),
(parse(d[1].split('-')[0].strip()) - parse(flight_data[0][1].split('-')[0].strip())).days,
reduce(lambda x,y: y-x, [parse(x.strip()) for x in d[1].split('-')]).days) for d in flight_data]
df = pd.DataFrame(clean_data, columns=['Price', 'Start_Date', 'Trip_Length'])
plt.boxplot(df['Price']);
plt.savefig('task_3_iqr.png')
Q1 = df.Price.describe()['25%']
Q3 = df.Price.describe()['75%']
IQR = Q3 - Q1
low_line = Q1 - 1.5 * IQR
result = df[df['Price'] < low_line]
if len(result) == 0:
return 'No outliers'
else:
return result
########## task 4 ##########
def task_4_dbscan(flight_data):
clean_data = [(float(d[0].replace('$', '').replace(',', '')),
(parse(d[1].split('-')[0].strip()) - parse(flight_data[0][1].split('-')[0].strip())).days,
reduce(lambda x, y: y - x, [parse(x.strip()) for x in d[1].split('-')]).days) for d in flight_data]
df = pd.DataFrame(clean_data, columns=['Price', 'Start_Date', 'Trip_Length'])
X = df[['Start_Date', 'Price']].values * np.array([20, 1])
radius = np.sqrt(np.square(20.00) + np.square(20.00))
db = DBSCAN(eps=radius, min_samples=3).fit(X)
df['dbscan_labels'] = db.labels_
clusters = df.dbscan_labels.unique()
clusters_5 = []
for cluster in clusters:
if cluster != -1 and len(df[df['dbscan_labels'] == cluster]) > 4:
for i in range(len(df[df['dbscan_labels'] == cluster]) - 4):
clusters_5.append(df[df['dbscan_labels'] == cluster]['Start_Date'].values[i:i + 5])
mean_min = 9999
cluster_mean_min = []
for cluster_5 in clusters_5:
df_5 = df.loc[cluster_5][['Start_Date', 'Price']]
cluster_max = df_5['Price'].max()
cluster_min = df_5['Price'].min()
cluster_mean = df_5['Price'].mean()
if cluster_max - cluster_min <= 20 and cluster_mean < mean_min:
mean_min = cluster_mean
cluster_mean_min = cluster_5
else:
pass
if len(cluster_mean_min) == 0:
return 'No required value'
else:
return df.loc[cluster_mean_min]
|
|
#!/usr/bin/env python
"""
Aerostat Registrar Unittests
"""
import os
import StringIO
import sys
import unittest
import mox
from aerostat import aerostat
from aerostat import registrar
class RegistrarTest(mox.MoxTestBase):
"""Test Class for Aerostat (client) module."""
def test_get_types_masterful(self):
"""test get_types function for masterful service_types."""
fake_host_file = StringIO.StringIO('mongodb masterful')
fake_results = ['mongodb', 'masterful']
self.mox.StubOutWithMock(sys.modules['__builtin__'], 'open')
sys.modules['__builtin__'].open('/etc/aerostat_info', 'r').AndReturn(
fake_host_file)
self.mox.ReplayAll()
fake_registrar = registrar.Registrar()
self.assertEqual(fake_registrar.get_types(), fake_results)
def test_get_types_iterative(self):
"""test get_types function for iterative service_types."""
fake_host_file = StringIO.StringIO('web')
fake_results = ['web']
self.mox.StubOutWithMock(sys.modules['__builtin__'], 'open')
sys.modules['__builtin__'].open('/etc/aerostat_info', 'r').AndReturn(
fake_host_file)
self.mox.ReplayAll()
fake_registrar = registrar.Registrar()
self.assertEqual(fake_registrar.get_types(), fake_results)
def test_get_types_aliases(self):
"""test get_types for when aliases are supplied."""
fake_host_file = StringIO.StringIO('web iterative web-master web-slave')
fake_results = ['web', 'iterative', 'web-master', 'web-slave']
self.mox.StubOutWithMock(sys.modules['__builtin__'], 'open')
sys.modules['__builtin__'].open('/etc/aerostat_info', 'r').AndReturn(
fake_host_file)
self.mox.ReplayAll()
fake_registrar = registrar.Registrar()
self.assertEqual(fake_registrar.get_types(), fake_results)
def test_get_smallest_gap(self):
"""test get_smallest_gap function."""
expected_hostname = 'cassandra-1'
fake_service = 'cassandra'
fake_results = [
{u'instance_id': u'',
u'ip': u'10.212.127.1',
u'_id': '4bd60012bcd9590caa000002',
u'hostname': u'cassandra-2',
u'server_type': u'cassandra'},
{u'instance_id': u'',
u'ip': u'10.212.127.34',
u'_id': '4bd60012bcd9590caa000001',
u'hostname': u'cassandra-1',
u'server_type': u'cassandra'}]
fake_db = self.mox.CreateMockAnything()
fake_db.servers = self.mox.CreateMockAnything()
fake_db.servers.find(
{'instance_id': '', 'service': fake_service}).AndReturn(
fake_results)
self.mox.ReplayAll()
fake_registrar = registrar.Registrar()
test_hostname = fake_registrar.get_smallest_gap(fake_db, fake_service)
self.assertEqual(test_hostname, expected_hostname)
def test_hostname_instance_exists(self):
"""Test negative and postitive cases of inst/host combo existing."""
test_hostname1 = 'mongodb-master'
fake_results1 = {
u'instance_id': u'i-tester',
u'ip': u'10.212.127.1',
u'_id': '4bd60012bcd9590caa000000',
u'hostname': u'mongodb-master',
u'server_type': u'mongodb',
u'aliases': ['mongodb-masta']}
fake_results2 = {
u'instance_id': u'',
u'ip': u'10.212.127.1',
u'_id': '4bd60012bcd9590caa000000',
u'hostname': u'mongodb-master',
u'server_type': u'mongodb',
u'aliases': ['mongodb-masta']}
fake_db = self.mox.CreateMockAnything()
fake_db.servers = self.mox.CreateMockAnything()
fake_db.servers.find_one = self.mox.CreateMockAnything()
fake_db.servers.find_one({'hostname': test_hostname1}).AndReturn(
fake_results1)
fake_db.servers.find_one({'hostname': test_hostname1}).AndReturn(
fake_results2)
self.mox.ReplayAll()
fake_registrar = registrar.Registrar()
self.assertTrue(fake_registrar.hostname_instance_exists(
fake_db, test_hostname1))
self.assertFalse(fake_registrar.hostname_instance_exists(
fake_db, test_hostname1))
def test_alias_exists(self):
"""Test postitive and negative cases for aliases existing."""
expected_good_output = ['cassandra-giga']
test_aliases1 = ['cassandra-test']
test_aliases2 = ['cassandra-mega', 'cassandra-giga']
fake_results1 = []
fake_results2 = [
{u'instance_id': u'',
u'ip': u'10.212.127.1',
u'_id': '4bd60012bcd9590caa000000',
u'hostname': u'cassandra-0',
u'server_type': u'cassandra',
u'aliases': ['cassandra-giga']}]
fake_db = self.mox.CreateMockAnything()
fake_db.servers = self.mox.CreateMockAnything()
fake_db.servers.find({'aliases': {'$in' : test_aliases1}}).AndReturn(
fake_results1)
fake_db.servers.find({'aliases': {'$in': test_aliases2}}).AndReturn(
fake_results2)
self.mox.ReplayAll()
fake_registrar = registrar.Registrar()
self.assertFalse(fake_registrar.alias_exists(fake_db, test_aliases1))
self.assertEqual(fake_registrar.alias_exists(
fake_db, test_aliases2), expected_good_output)
def test_change_hostname(self):
"""test change_hostname function."""
fake_inst = 'fake_inst'
fake_host = 'fake_host'
fake_db = self.mox.CreateMockAnything()
fake_db.servers = self.mox.CreateMockAnything()
fake_db.servers.update(
{'instance_id': fake_inst},
{'$set':
{'hostname': fake_host}}).AndReturn(None)
fake_db.servers.update(
{'hostname': fake_host},
{'$set':
{'hostname': fake_host}}).AndReturn(None)
self.mox.ReplayAll()
fake_registrar = registrar.Registrar()
self.assertTrue(fake_registrar.change_hostname(
fake_db, fake_host, inst=fake_inst))
self.assertTrue(fake_registrar.change_hostname(
fake_db, fake_host, host=fake_host))
self.assertFalse(fake_registrar.change_hostname(
fake_db, fake_host))
self.assertFalse(fake_registrar.change_hostname(
fake_db, fake_host, host=fake_host, inst=fake_inst))
def test_change_master(self):
"""Test change_master function."""
fake_service = 'testing'
fake_service_type = 'masterful'
test_inst1 = 'test-instance1'
test_inst2 = 'test-instance2'
master_inst = test_inst2
master_hostname = 'testing-master'
slave_hostname = 'tesing-slave-1'
fake_db = self.mox.CreateMockAnything()
fake_registrar = registrar.Registrar()
self.mox.StubOutWithMock(aerostat, 'check_master')
aerostat.check_master(
fake_db, fake_service, test_inst1).AndReturn(False)
aerostat.check_master(
fake_db, fake_service, test_inst2).AndReturn(True)
self.mox.StubOutWithMock(aerostat, 'get_master')
aerostat.get_master(fake_db, fake_service).AndReturn(
master_inst)
self.mox.StubOutWithMock(fake_registrar, 'change_hostname')
fake_registrar.change_hostname(fake_db, '', inst=master_inst)
fake_registrar.change_hostname(fake_db, '', inst=test_inst1)
fake_registrar.change_hostname(fake_db, master_hostname,
inst=test_inst1)
fake_registrar.change_hostname(fake_db, slave_hostname,
inst=master_inst)
self.mox.StubOutWithMock(fake_registrar, 'pick_name')
# This should only be called when replacing the master.
fake_registrar.pick_name(fake_db, fake_service, fake_service_type,
test_inst2).AndReturn(slave_hostname)
self.mox.ReplayAll()
self.assertTrue(fake_registrar.change_master(
fake_db, fake_service, fake_service_type, test_inst1))
self.assertFalse(fake_registrar.change_master(
fake_db, fake_service, fake_service_type, test_inst2))
def test_pick_name(self):
"""Test pick_name function under normal parameters."""
expected_hostname = 'mongodb-slave-1'
fake_service = 'mongodb'
fake_service_type = 'masterful'
fake_instance_id = 'i-test'
fake_row = {
'hostname': 'mongodb-master',
'ip': '12.123.234.3',
'service': 'mongodb',
'service_type': 'masterful',
'instance_id': 'i-d23lk3kjl'}
fake_db = self.mox.CreateMockAnything()
fake_db.servers = self.mox.CreateMockAnything()
# I'm cheating here by using a list instead of an iterable obj.
fake_registrar = registrar.Registrar()
self.mox.StubOutWithMock(aerostat, 'hostname_exists')
self.mox.StubOutWithMock(fake_registrar, 'hostname_instance_exists')
self.mox.StubOutWithMock(fake_registrar, 'check_dup')
self.mox.StubOutWithMock(fake_registrar, 'get_smallest_gap')
aerostat.hostname_exists(
fake_db, 'mongodb-master').AndReturn(True)
fake_registrar.hostname_instance_exists(
fake_db, 'mongodb-master').AndReturn(True)
fake_registrar.check_dup(fake_db, fake_instance_id).AndReturn(False)
fake_registrar.get_smallest_gap(fake_db, fake_service).AndReturn('mongodb-slave-1')
# I'm cheating here by using a list instead of an iterable obj.
fake_db.servers.find(
{'service': fake_service}).AndReturn([fake_row])
self.mox.ReplayAll()
test_hostname = fake_registrar.pick_name(fake_db, fake_service,
fake_service_type, fake_instance_id)
self.assertEqual(test_hostname, expected_hostname)
def test_pick_name_duplicate_inst(self):
"""test pick_name function when there is a duplicate."""
expected_hostname = None
fake_service = 'mongodb'
fake_service_type = 'masterful'
fake_instance_id = 'i-test'
fake_db = self.mox.CreateMockAnything()
fake_db.servers = self.mox.CreateMockAnything()
fake_registrar = registrar.Registrar()
self.mox.StubOutWithMock(aerostat, 'get_hostname')
aerostat.get_hostname(fake_db, fake_instance_id).AndReturn(
'mongodb-master')
self.mox.StubOutWithMock(fake_registrar, 'check_dup')
fake_registrar.check_dup(fake_db, fake_instance_id).AndReturn(True)
self.mox.ReplayAll()
test_hostname = fake_registrar.pick_name(
fake_db, fake_service,
fake_service_type, fake_instance_id)
self.assertEqual(test_hostname, expected_hostname)
def test_pick_name_duplicate_inst_no_host(self):
"""test pick_name when there is a duplicate, but no hostname."""
# This tests the case where a master name swap is underway.
# the <service>-master hostname is removed from the former instance.
# then added to the new master. Meanwhile, the old master needs
# to have a name picked. So we ignore the fact that its instance_id
# is already in the database as long as it has no hostname.
# Replace fallen master.
expected_hostname1 = 'mongodb-master'
# You were master, replace gap in slave names.
expected_hostname2 = 'mongodb-slave-1'
fake_service = 'mongodb'
fake_service_type = 'masterful'
fake_instance_id1 = 'i-test'
fake_instance_id2 = 'i-test2'
# Missing Master
fake_row1 = {'hostname': '', 'ip': '12.123.234.3',
'service': 'mongodb', 'service_type': 'masterful',
'instance_id': 'i-test'}
# Missing Slave
fake_row2 = [
fake_row1,
{'hostname': 'mongodb-master', 'ip': '12.1.1.2',
'service': 'mongodb', 'service_type': 'masterful',
'instance_id': 'i-test2'}]
fake_db = self.mox.CreateMockAnything()
fake_db.servers = self.mox.CreateMockAnything()
fake_db.servers.find(
{'service': fake_service}).AndReturn([fake_row1])
fake_db.servers.find(
{'service': fake_service}).AndReturn(fake_row2)
fake_registrar = registrar.Registrar()
self.mox.StubOutWithMock(aerostat, 'get_hostname')
aerostat.get_hostname(fake_db, fake_instance_id1).AndReturn(
'')
aerostat.get_hostname(fake_db, fake_instance_id2).AndReturn(
'')
self.mox.StubOutWithMock(fake_registrar, 'check_dup')
fake_registrar.check_dup(fake_db, fake_instance_id1).AndReturn(True)
fake_registrar.check_dup(fake_db, fake_instance_id2).AndReturn(True)
# Called for Slave.
self.mox.StubOutWithMock(aerostat, 'hostname_exists')
aerostat.hostname_exists(
fake_db, 'mongodb-master').AndReturn(True)
self.mox.StubOutWithMock(fake_registrar, 'hostname_instance_exists')
fake_registrar.hostname_instance_exists(
fake_db, 'mongodb-master').AndReturn(True)
self.mox.StubOutWithMock(fake_registrar, 'get_smallest_gap')
fake_registrar.get_smallest_gap(fake_db, fake_service).AndReturn(expected_hostname2)
self.mox.ReplayAll()
test_hostname1 = fake_registrar.pick_name(
fake_db, fake_service,
fake_service_type, fake_instance_id1)
test_hostname2 = fake_registrar.pick_name(
fake_db, fake_service,
fake_service_type, fake_instance_id2)
self.assertEqual(test_hostname1, expected_hostname1)
self.assertEqual(test_hostname2, expected_hostname2)
def test_reset_conflict_aliases(self):
"""test resetting conflict aliases on mongodb."""
fake_conflicts = ['test']
fake_row = {
'hostname': 'mongodb-slave-1',
'ip': '12.123.234.5',
'service': 'mongodb',
'service_type': 'masterful',
'instance_id': 'i-23426',
'aliases': ['test', 'not_test']}
fake_db = self.mox.CreateMockAnything()
fake_db.servers = self.mox.CreateMockAnything()
fake_db.servers.find({'aliases' : { '$in' : ['test']}}).AndReturn(
[fake_row])
fake_db.servers.update({'instance_id': 'i-23426'},
{'$set': {'aliases': ['not_test']}})
self.mox.ReplayAll()
fake_registrar = registrar.Registrar()
self.assertTrue(
fake_registrar.reset_conflict_aliases(fake_db, fake_conflicts))
def test_register_name(self):
"""Test registration of new hostnames."""
fake_hostname = 'mongodb-slave-1'
fake_ip = '12.123.234.5'
fake_instance_id = 'i-23426'
fake_service = 'mongodb'
fake_service_type = 'masterful'
fake_aliases = []
fake_row = {
'hostname': 'mongodb-slave-1',
'ip': '12.123.234.5',
'service': 'mongodb',
'service_type': 'masterful',
'instance_id': 'i-23426',
'aliases': []}
fake_hostname_exists = False
fake_db = self.mox.CreateMockAnything()
fake_db.servers = self.mox.CreateMockAnything()
fake_db.servers.insert(fake_row).AndReturn(None)
fake_registrar = registrar.Registrar()
self.mox.StubOutWithMock(aerostat, 'hostname_exists')
aerostat.hostname_exists(fake_db, fake_hostname).AndReturn(
fake_hostname_exists)
self.mox.ReplayAll()
test_value = fake_registrar.register_name(
fake_db, fake_hostname, fake_ip,
fake_instance_id, fake_service, fake_service_type, fake_aliases)
self.assertTrue(test_value)
def test_set_sys_hostname(self):
"""test set_sys_hostname."""
self.mox.StubOutWithMock(os, 'system')
os.system('/bin/hostname mongodb-slave-1').AndReturn(0)
self.mox.StubOutWithMock(os, 'remove')
os.remove('/etc/hostname').AndReturn(0)
fake_hostname_file = StringIO.StringIO()
self.mox.StubOutWithMock(sys.modules['__builtin__'], 'open')
sys.modules['__builtin__'].open('/etc/hostname', 'w').AndReturn(
fake_hostname_file)
self.mox.ReplayAll()
fake_registrar = registrar.Registrar()
self.assertTrue(fake_registrar.set_sys_hostname('mongodb-slave-1'))
def test_parse_service_info(self):
"""test parse_service_info function."""
expected_first = ('web', 'iterative', None)
expected_second = ('web', 'masterful', None)
expected_third = ('web', 'masterful', ['webby-prime', 'webby-mega'])
fake_registrar = registrar.Registrar()
self.mox.StubOutWithMock(fake_registrar, 'get_types')
fake_types_simple = ['web']
fake_types_adv = ['web', 'masterful']
fake_types_complex = ['web', 'masterful', 'webby-prime', 'webby-mega']
fake_registrar.get_types().AndReturn(fake_types_simple)
fake_registrar.get_types().AndReturn(fake_types_adv)
fake_registrar.get_types().AndReturn(fake_types_complex)
self.mox.ReplayAll()
self.assertEqual(
fake_registrar.parse_service_info(), expected_first)
self.assertEqual(
fake_registrar.parse_service_info(), expected_second)
self.assertEqual(
fake_registrar.parse_service_info(), expected_third)
if __name__ == '__main__':
unittest.main()
|
|
# Copyright (c) 2011 Openstack, LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Handles all requests relating to schedulers.
"""
import novaclient
from nova import db
from nova import exception
from nova import flags
from nova import log as logging
from nova import rpc
from eventlet import greenpool
FLAGS = flags.FLAGS
flags.DEFINE_bool('enable_zone_routing',
False,
'When True, routing to child zones will occur.')
LOG = logging.getLogger('nova.scheduler.api')
def _call_scheduler(method, context, params=None):
"""Generic handler for RPC calls to the scheduler.
:param params: Optional dictionary of arguments to be passed to the
scheduler worker
:retval: Result returned by scheduler worker
"""
if not params:
params = {}
queue = FLAGS.scheduler_topic
kwargs = {'method': method, 'args': params}
return rpc.call(context, queue, kwargs)
def get_zone_list(context):
"""Return a list of zones assoicated with this zone."""
items = _call_scheduler('get_zone_list', context)
for item in items:
item['api_url'] = item['api_url'].replace('\\/', '/')
if not items:
items = db.zone_get_all(context)
return items
def zone_get(context, zone_id):
return db.zone_get(context, zone_id)
def zone_delete(context, zone_id):
return db.zone_delete(context, zone_id)
def zone_create(context, data):
return db.zone_create(context, data)
def zone_update(context, zone_id, data):
return db.zone_update(context, zone_id, data)
def get_zone_capabilities(context):
"""Returns a dict of key, value capabilities for this zone."""
return _call_scheduler('get_zone_capabilities', context=context)
def select(context, specs=None):
"""Returns a list of hosts."""
return _call_scheduler('select', context=context,
params={"specs": specs})
def update_service_capabilities(context, service_name, host, capabilities):
"""Send an update to all the scheduler services informing them
of the capabilities of this service."""
kwargs = dict(method='update_service_capabilities',
args=dict(service_name=service_name, host=host,
capabilities=capabilities))
return rpc.fanout_cast(context, 'scheduler', kwargs)
def _wrap_method(function, self):
"""Wrap method to supply self."""
def _wrap(*args, **kwargs):
return function(self, *args, **kwargs)
return _wrap
def _process(func, zone):
"""Worker stub for green thread pool. Give the worker
an authenticated nova client and zone info."""
nova = novaclient.OpenStack(zone.username, zone.password, zone.api_url)
nova.authenticate()
return func(nova, zone)
def call_zone_method(context, method, errors_to_ignore=None, *args, **kwargs):
"""Returns a list of (zone, call_result) objects."""
if not isinstance(errors_to_ignore, (list, tuple)):
# This will also handle the default None
errors_to_ignore = [errors_to_ignore]
pool = greenpool.GreenPool()
results = []
for zone in db.zone_get_all(context):
try:
nova = novaclient.OpenStack(zone.username, zone.password,
zone.api_url)
nova.authenticate()
except novaclient.exceptions.BadRequest, e:
url = zone.api_url
LOG.warn(_("Failed request to zone; URL=%(url)s: %(e)s")
% locals())
#TODO (dabo) - add logic for failure counts per zone,
# with escalation after a given number of failures.
continue
zone_method = getattr(nova.zones, method)
def _error_trap(*args, **kwargs):
try:
return zone_method(*args, **kwargs)
except Exception as e:
if type(e) in errors_to_ignore:
return None
# TODO (dabo) - want to be able to re-raise here.
# Returning a string now; raising was causing issues.
# raise e
return "ERROR", "%s" % e
res = pool.spawn(_error_trap, *args, **kwargs)
results.append((zone, res))
pool.waitall()
return [(zone.id, res.wait()) for zone, res in results]
def child_zone_helper(zone_list, func):
"""Fire off a command to each zone in the list.
The return is [novaclient return objects] from each child zone.
For example, if you are calling server.pause(), the list will
be whatever the response from server.pause() is. One entry
per child zone called."""
green_pool = greenpool.GreenPool()
return [result for result in green_pool.imap(
_wrap_method(_process, func), zone_list)]
def _issue_novaclient_command(nova, zone, collection, method_name, item_id):
"""Use novaclient to issue command to a single child zone.
One of these will be run in parallel for each child zone."""
manager = getattr(nova, collection)
result = None
try:
try:
result = manager.get(int(item_id))
except ValueError, e:
result = manager.find(name=item_id)
except novaclient.NotFound:
url = zone.api_url
LOG.debug(_("%(collection)s '%(item_id)s' not found on '%(url)s'" %
locals()))
return None
if method_name.lower() not in ['get', 'find']:
result = getattr(result, method_name)()
return result
def wrap_novaclient_function(f, collection, method_name, item_id):
"""Appends collection, method_name and item_id to the incoming
(nova, zone) call from child_zone_helper."""
def inner(nova, zone):
return f(nova, zone, collection, method_name, item_id)
return inner
class RedirectResult(exception.Error):
"""Used to the HTTP API know that these results are pre-cooked
and they can be returned to the caller directly."""
def __init__(self, results):
self.results = results
super(RedirectResult, self).__init__(
message=_("Uncaught Zone redirection exception"))
class reroute_compute(object):
"""Decorator used to indicate that the method should
delegate the call the child zones if the db query
can't find anything."""
def __init__(self, method_name):
self.method_name = method_name
def __call__(self, f):
def wrapped_f(*args, **kwargs):
collection, context, item_id = \
self.get_collection_context_and_id(args, kwargs)
try:
# Call the original function ...
return f(*args, **kwargs)
except exception.InstanceNotFound, e:
LOG.debug(_("Instance %(item_id)s not found "
"locally: '%(e)s'" % locals()))
if not FLAGS.enable_zone_routing:
raise
zones = db.zone_get_all(context)
if not zones:
raise
# Ask the children to provide an answer ...
LOG.debug(_("Asking child zones ..."))
result = self._call_child_zones(zones,
wrap_novaclient_function(_issue_novaclient_command,
collection, self.method_name, item_id))
# Scrub the results and raise another exception
# so the API layers can bail out gracefully ...
raise RedirectResult(self.unmarshall_result(result))
return wrapped_f
def _call_child_zones(self, zones, function):
"""Ask the child zones to perform this operation.
Broken out for testing."""
return child_zone_helper(zones, function)
def get_collection_context_and_id(self, args, kwargs):
"""Returns a tuple of (novaclient collection name, security
context and resource id. Derived class should override this."""
context = kwargs.get('context', None)
instance_id = kwargs.get('instance_id', None)
if len(args) > 0 and not context:
context = args[1]
if len(args) > 1 and not instance_id:
instance_id = args[2]
return ("servers", context, instance_id)
def unmarshall_result(self, zone_responses):
"""Result is a list of responses from each child zone.
Each decorator derivation is responsible to turning this
into a format expected by the calling method. For
example, this one is expected to return a single Server
dict {'server':{k:v}}. Others may return a list of them, like
{'servers':[{k,v}]}"""
reduced_response = []
for zone_response in zone_responses:
if not zone_response:
continue
server = zone_response.__dict__
for k in server.keys():
if k[0] == '_' or k == 'manager':
del server[k]
reduced_response.append(dict(server=server))
if reduced_response:
return reduced_response[0] # first for now.
return {}
def redirect_handler(f):
def new_f(*args, **kwargs):
try:
return f(*args, **kwargs)
except RedirectResult, e:
return e.results
return new_f
|
|
##Built-In Libraries##
import time
import csv
import string
##Third-Party Libraries##
import numpy as np
from PIL import Image,ImageTk
import wolframalpha
##Other TP Files##
import mnist_training as mnist #See File for Citations
import unpackData as unpack #See File for Citations
class Network(object):
def __init__(self,other): #other is a list containing the number of
#neurons per layer. A net with three inputs, a two-node hidden layer,
#and one output would be represented as [3,2,1]
self.numLayers=len(other)
self.netSize=other
self.learningRate=0.3 #learning rate is the step size for gradient
#descent. The ideal learning rate varies by net.
self.count = 0 #count interations for auto_stop
self.biases=[np.random.randn(i,1)/32 \
for i in self.netSize[1:]]
#this initializes a 2D list of random, normally distributed values
#which represent the initial bias values for each node
self.weights=[np.random.randn(b,a)/32 \
for (a,b) in zip(self.netSize[:-1],self.netSize[1:])]
#initializes a 3D list of weights associated with each neuron.
def save(self,title):
f = open("WeightsMNIST"+str(title)+".txt","wb+")
np.save(f,self.weights)
g = open("BiasesMNIST"+str(title)+".txt","wb+")
np.save(g,self.biases)
print ("Saved!")
def feedforward(self, a):
#a is the input matrix of size (n,1) where n is the number of neurons
#in the first row
for index in range(len(self.netSize)-1):
#np.dot performs matrix multiplication in 2D and regular dot
#product in 1D assuming the dimensions are correct
nextLayer=np.dot(self.weights[index],a)
#nextLayer is the weighted sum of all the previous inputs
#arranged as a vector based on how many neurons are in the
#next row
a=sigmoid(nextLayer+self.biases[index])
#the sigmoid takes in the weighted array and adds the bias and
#returns an array with the same dimensions just with modified
#values
return a
def MBGD(self,trainX,trainY,batchsize=1,test_x=None,test_y=None):
#print ("learning rate:",self.learningRate)
if not isinstance(test_x,type(None)): #check if test data is provided
before = testAccuracy(self,test_x,test_y)
combinedData = list(zip(trainX,trainY)) #combine x,y for shuffling
np.random.shuffle(combinedData) #randomly shuffle it
shuffled = list(zip(*combinedData)) #unzip/separate it
for batch in range(len(trainX)//batchsize): #goes through batches
start = batch*batchsize #start of batch interval
end = start+batchsize #end of batch interval (start-end=batchsize)
updateW = [np.zeros(w.shape) for w in
self.weights] # zero vector with
# same shape as self.weights
updateB = [np.zeros(b.shape) for b in
self.biases] # zero vector with
# same shape as self.biases
for index in range(start,end): #loop through individual batch
x = shuffled[0][index]
y = shuffled[1][index]
#entry = np.reshape(x,(1024,1))
#exit = np.reshape(y,(94,1))
gradB,gradW=self.backprop(x,y) #graident vectors with the same
#shape as self.weights and self.biases with the gradients
#of the cost function computed by the backpropgation algorithm
updateW=[uw+gw for uw,gw in zip(updateW,gradW)]
updateB=[ub+gb for ub,gb in zip(updateB,gradB)]
for index in range(len(self.weights)): #update Weights
weights = self.weights[index]
update = (self.learningRate)*updateW[index]/batchsize
newVal=weights-update
self.weights[index]=newVal
for index in range(len(self.biases)): #update Biases
newVal=(self.biases[index]-
(self.learningRate*updateB[index]/batchsize))
self.biases[index]=newVal
if not isinstance(test_x,type(None)): #check if test data is provided
after = testAccuracy(self,test_x,test_y)
if after-before<=0: #if accuracy has gone down
self.count+=1
self.learningRate/=2 #lower the learning rate
###NOT MY CODE#### (backprop) #taken from Neural Networks and Deep Learning
#book online by Michael Nielsen
###Not my code (Backprop)###
def backprop(self, x, y):
nabla_b = [np.zeros(b.shape) for b in self.biases]
nabla_w = [np.zeros(w.shape) for w in self.weights]
# feedforward
activation = x
activations = [x]
zs = []
for b, w in zip(self.biases, self.weights):
z = np.dot(w, activation)+b
zs.append(z)
activation = sigmoid(z)
activations.append(activation)
# backward pass
delta = costDerivative(activations[-1], y) * \
sigmoidPrime(zs[-1])
nabla_b[-1] = delta
nabla_w[-1] = np.dot(delta, activations[-2].transpose())
for l in range(2, self.numLayers):
z = zs[-l]
sp = sigmoidPrime(z)
delta = np.dot(self.weights[-l+1].transpose(), delta) * sp
nabla_b[-l] = delta
nabla_w[-l] = np.dot(delta, activations[-l-1].transpose())
return (nabla_b, nabla_w)
###^^^^^^Not My Code^^^^^^### (backprop written by Michael Nielsen)
def setNet(self):
self.weights = [np.array([[1,2],[3,2],[1,2]]),np.array([[3,2,1]])]
self.biases = [np.array([[1],[1],[1]]),np.array([[1]])]
###Math Functions###
def cost(actual,ideal): #takes in (n,1) and (n,1) arrays where actual is the
#output of the network and #ideal is the ideal result. Returns MSE
#according to the cost function
MSE=0 #MSE is Mean Squared Error
for index in range(len(actual)):
errorSq=(actual[index][0]-ideal[index][0])**2
MSE+=errorSq
MSE/=(2*len(actual)) #WLOG we can use actual, we could also use ideal
return MSE
def costDerivative(actual,ideal):
return (actual-ideal)
from scipy.special import expit #expit is a built-in sigmoid function with high
#floating point arithmetic accuracy
def sigmoid(z):
return expit(z)
def sigmoidPrime(z):
return sigmoid(z)*(1-sigmoid(z))
###TESTING FUNCTIONS###
def testAccuracy(net,trainX,trainY):
n = len(trainY)
seen = []
count = 0
for index in range(len(trainX)):
output = np.argmax(net.feedforward(np.reshape(trainX[index],(1024,1)))) #np.reshape for non-mnist
#output = np.argmax(net.feedforward(trainX[index]))
expected = np.argmax(trainY[index])
#print ("Output:",output,"expected:",expected)
count += (output==expected)
if output==expected:
if expected not in seen:
seen.append(expected)
return count/n
def trainNet(netSize,trainX,trainY,epochs,testx=1,testy=1):
np.seterr(all="raise")
net=Network(netSize)
for i in range(epochs):
if net.count==10:
net.save(net.count-10)
print ("accuracy has gone down and up 10 times")
a=time.time()
currentAccuracy = testAccuracy(net,trainX,trainY)
if not isinstance(testx,int):
otherAccuracy = testAccuracy(net,testx,testy)
print("Accuracy before run", i, ":", currentAccuracy)
if not isinstance(testx,int):
print("Accuracy of Test Data:",otherAccuracy)
net.MBGD(trainX,trainY,100,trainX,trainY)
print ("Time after run",i,":",(time.time()-a))
return net
def testSmallDataset(): #tests on small batch (1500) saved to txt file
netsize = [1024,500,369]
trainx = 1-(loadTestingData()[0]/255)
trainy = loadTestingData()[1]
net = trainNet(netsize,trainx,trainy,100)
#print (net.feedforward(np.reshape(trainx[5],(1024,1))))
print ("Done!")
def testReal():
trnx,trny,tstx,tsty = TRAIN_X(),TRAIN_Y(),TEST_X(),TEST_Y()
netsize = [1024,200,94]
net = trainNet(netsize,trnx,trny,10,tstx,tsty)
accuracy = testAccuracy(net,tstx,tsty)
net.save("hasy")
return ("Accuracy With Test Data: "+str(accuracy))
def testExistingHasy():
trnx, trny, tstx, tsty = TRAIN_X(), TRAIN_Y(), TEST_X(), TEST_Y()
net = createNet()
for i in range(5):
print ("accuracy before run",str(i),str(testAccuracy(net,tstx,tsty)))
net.MBGD(trnx,trny,batchsize=100,test_x=tstx,test_y=tsty)
accuracy = testAccuracy(net,tstx,tsty)
net.save("hasy")
print ("Accuracy with Test Data: "+str(accuracy))
def trainExistingMnist():
net = createMNIST()
train_x = scaletestData(mnist.load_data_wrapper()[0])
train_y = mnist.load_data_wrapper()[1]
test_x = scaletestData(mnist.load_data_wrapper()[2])
test_y = mnist.load_data_wrapper()[3]
for i in range(5):
net.MBGD(train_x,train_y,batchsize=100,test_x=test_x,test_y=test_y)
print ("accuracy after run",i,testAccuracy(net,test_x,test_y))
net.save('JUSTDIDTHISTONIGHT')
print ("saved and done")
#for HASY
def loadWeightsAndBiases():
weights = np.load("WeightsMNISThasy.txt")
biases = np.load("BiasesMNISThasy.txt")
return list(weights),list(biases)
#for HASY
def createNet():
net = Network([1024,200,94])
net.weights=loadWeightsAndBiases()[0]
net.biases=loadWeightsAndBiases()[1]
return net
#for MNIST
def loadMnist():
weights = np.load("WeightsMNISTJUSTDIDTHISTONIGHT.txt")
biases = np.load("BiasesMNISTJUSTDIDTHISTONIGHT.txt")
return list(weights),list(biases)
#for MNIST
def createMNIST():
net = Network([784,100,10])
net.weights = loadMnist()[0]
net.biases = loadMnist()[1]
return net
def savePic(datax,datay,index):
arrayyy = datax[index]
ind = np.argmax(datay[index])
print ("index:",ind)
print ("done!")
def mnistTest(epochs):
netsize = [784, 100, 10]
train_x = scaletestData(mnist.load_data_wrapper()[0])
train_y = mnist.load_data_wrapper()[1]
test_x = scaletestData(mnist.load_data_wrapper()[2])
test_y = mnist.load_data_wrapper()[3]
net = trainNet(netsize,train_x,train_y,epochs,test_x,test_y)
net.save('JUSTDIDTHISTONIGHT')
print ("saved and done!")
###CREATE AND LOAD TESTING DATA###
def TRAIN_X():
x = scaletestData(unpack.loadData()[0][0])
print (x[0])
return x
def TRAIN_Y():
return unpack.loadData()[0][1]
def TEST_X():
return scaletestData(unpack.loadData()[1][0])
def TEST_Y():
return unpack.loadData()[1][1]
def TRN_X():
return setUpTestData()[0]
def TRN_Y():
return setUpTestData()[1]
def TST_X():
return setUpTestData()[2]
def TST_Y():
return setUpTestData()[3]
def saveData():
np.savetxt("debuggingX",DEBUG_X)
np.savetxt("debuggingY",DEBUG_Y)
print ("saved files")
def setUpTestData():
testx = TEST_X()
testy = TEST_Y()
trn_x,trn_y,tst_x,tst_y=[],[],[],[]
for i in range(len(testx)):
if i%10==0:
tst_x.append(testx[i])
tst_y.append(testy[i])
else:
trn_x.append(testx[i])
trn_y.append(testy[i])
return trn_x,trn_y,tst_x,tst_y
def loadTestingData():
debug_x = np.loadtxt("debuggingX")
debug_y = np.loadtxt("debuggingY")
return debug_x,debug_y
def scaletestData(x):
zeros,ones = (0,0)
for i in range(len(x)):
for value in range(len(x[i])):
if x[i][value]!=0:
x[i][value]=0
zeros+=1
else:
x[i][value]=1
ones+=1
if ones > zeros:
raise Exception("Check your scaling, most of the image is 1's")
return x
###IMPORT AND IDENTIFY IMAGES###
def latexCommand(index):
line=index+1
file = open("C:/Users/Joe/Documents/S17/15-112/Term Project/HASYv2/symbols3.csv",'r')
reader = csv.reader(file)
for i,row in enumerate(reader):
if i==line:
return row[1]
print ("you fucked up")
def formatURL(input,data):
inp = str(input)
inp = inp.strip()
inp = inp.replace("+","%2B")
inp = inp.replace(" ","+")
inp+="%3F"
inp+="&width="
inp+=str(data.width//2-data.margin)
return inp
#Wolfrom Alpha API Key obtained from wolframalpha.com
#Used the Wolfram Alpha module which can be pip installed via 'wolframalpha'
import urllib
def wolframAlpha(input,data):
appID = "4YUQ4H-EUKJ63VXG2"
url = 'http://api.wolframalpha.com/v1/simple?appid=4YUQ4H-EUKJ63VXG2&i='
query = formatURL(input,data)
url+=query
#Following syntax loosely taken from
# "http://stackoverflow.com/questions/40911170/ \n
# python-how-to-read-an-image-from-a-url
try:
image = Image.open(urllib.request.urlopen(url))
image.save("wolframTemp.gif","gif")
return 1 #to differentiate between returning None
except:
print ("Connect to the internet or enter a valid query")
return None
###GRAPHICS AND GUI###
# mouseEventsDemo.py
# TAKEN AND MODIFIED FROM 15-112 WEBSITE #
from tkinter import (Tk,ALL,PhotoImage,Canvas,simpledialog,messagebox,
Frame,Label,Entry,NW,CENTER)
###BUTTONS AND WINDOWS###
class Window1(simpledialog.Dialog): #taken from the 15-112 website
def body(self, master):
self.modalResult = None
Label(master, text="Correct Symbol \n \
(press OK without entering anything \n \
if the digit is already correct):").grid(row=0)
self.e1 = Entry(master)
self.e1.grid(row=0, column=1)
return self.e1 # initial focus
def apply(self):
first = self.e1.get()
self.modalResult = (first)
def showDialog(data): #taken from the 15-112 website
dialog = Window1(data.root)
return dialog.modalResult
class Button(object):
def __init__(self,x0,y0,x,y,fgcolor,bgcolor,data,text,textcolor="black"):
self.x = x0
self.y = y0
self.dims=(x0-x//2,y0-y//2,x0+x//2,y0+y//2)
self.normalColor=fgcolor
self.clickedColor=bgcolor
self.textColor=textcolor
self.text=text
self.data = data
def drawNormal(self,canvas):
canvas.create_rectangle(self.dims,fill=self.normalColor)
canvas.create_text(self.x,self.y,text = self.text,fill=self.textColor,
font=("Bradley Hand ITC",14,"bold"),justify=CENTER)
def drawClicked(self,canvas):
canvas.create_rectangle(self.dims,fill=self.clickedColor)
canvas.create_text(self.x, self.y, text=self.text, fill=self.textColor)
def inBoundaries(self,x,y):
x0,y0,x1,y1=self.dims
if x<=x1 and x>=x0 and y<=y1 and y>=y0:
return True
return False
def drawImage(self,canvas):
image = self.data.eraserImage
canvas.create_image(self.x,self.y,image=image)
###MODEL###
def startButton(data):
data.splash = False
data.draw = True
data.erase = False
def classifyButton(data):
classifySymbols(data)
convertClassification(data)
def clearButton(data):
data.symbol = set()
data.classification = []
data.characters = []
def correctButton(data):
train_x,train_y=[],[]
findBoundaries(data)
for (symbol,i) in zip(data.characters,list(range(len(data.characters)))):
(left, top, right, bottom) = symbol
mnist = resizeImagetoSquare(left, top, right, bottom, data)[0]
hasy = resizeImagetoSquare(left, top, right, bottom, data)[1]
classification = data.classification[i]
data.highlight = classification
try:
correct = str(showDialog(data))
data.highlight = ""
if correct == "" or None:
continue
data.classification[i] = correct
onlineLearning(data,hasy,mnist,correct)
except:
continue
convertClassification(data)
def solveButton(data):
if wolframAlpha(data.printable,data) == None:
return None
wolframAlpha(data.printable,data)
showWolframAlpha(data)
data.solved = True
data.draw = False
def backButton(data):
data.splash=True
data.about=False
data.draw = False
data.symbol = set()
data.classification = []
data.characters = []
data.erase = False
def aboutButton(data):
data.about=True
data.splash=False
def solveBackButton(data):
clearButton(data)
data.solved = False
data.draw = True
def eraseButton(data):
data.erase = not(data.erase)
if data.erase == False:
data.eraserImage = PhotoImage(file="eraser.gif")
else:
data.eraserImage = PhotoImage(file="chalk.gif")
def make2dList(rows,cols): #makes 2d list- similar to 15-112 website
return [[0 for i in range(cols)] for i in range(rows)]
def findBoundaries(data):
#positions of every symbol drawn
rows = data.height//data.squaresize
cols = data.width//data.squaresize
data.image = make2dList(rows,cols)
imageList = data.image
#create 2D list of 1's and 0's modeling the entire image
for position in data.symbol:
row,col = position[1],position[0]
imageList[row][col] = 1
left,right,top,bottom = -1,-1,-1,-1
intermediate = -2 #helps determine where the left edge is in the middle
#of the page
# find the top and bottom-most shits
for row in range(rows):
for col in range(cols):
if imageList[row][col]==1:
if top==-1:
top = row #saves highest row
bottom = row #saves lowest row
# find left-most boundary (lowest x-val):
for col in range(len(imageList[0])):
count = 0
for row in range(rows):
if imageList[row][col]==1:
count+=1
#saves the right-most column in which there is a black pixel
right = col
if left==-1 or left==intermediate:
#saves the left-most column in which there is a black pixel
left = col
if count==0 and right>left:
boundaries = (left, top, right + 1, bottom + 1)
if boundaries not in data.characters:
data.characters.append(boundaries)
intermediate = left
return data.characters
def resizeImagetoSquare(left,top,right,bottom,data):
image = make2dList((bottom-top),(right-left))
for i in range(left,right):
for j in range(top,bottom):
image[j-top][i-left] = data.image[j][i]
#Remove white space at top and bottom
for row in image:
if 1 not in row:
image.remove(row)
#convert to image
image = Image.fromarray(np.array(image))
hasyimage = image
##For MNIST##
imageCenter = image.resize((20,20))
imarr = np.array(imageCenter)
mnistImage = np.zeros((28,28))
mnistImage[4:24,4:24] = imarr
##For HASY##
hasyimage = hasyimage.resize((32,32))
hasyimage = np.array(hasyimage)
return np.reshape(mnistImage,(784,1)),np.reshape(hasyimage,(1024,1))
def findSquare(x,y,data):
squaresize = data.squaresize
i = x//squaresize
j = y//squaresize
if (i,j) not in data.symbol:
data.symbol.add((i,j))
if (i+1,j) not in data.symbol:
data.symbol.add((i+1,j))
if (i,j+1) not in data.symbol:
data.symbol.add((i,j+1))
if (i+1,j+1) not in data.symbol:
data.symbol.add((i+1,j+1))
def findErase(x,y,data):
squaresize = data.squaresize
i = x // squaresize
j = y // squaresize
if (i, j) in data.symbol:
data.symbol.remove((i, j))
if (i + 1, j) in data.symbol:
data.symbol.remove((i + 1, j))
if (i, j + 1) in data.symbol:
data.symbol.remove((i, j + 1))
if (i + 1, j + 1) in data.symbol:
data.symbol.remove((i + 1, j + 1))
def init(data):
#Misc.
data.margin = 20
data.printable=""
data.buttonColor = "grey"
data.splash = True
data.about = False
data.solved = False
data.draw = False
data.erase = False
data.symbol = set()
data.size = 110
data.squaresize = data.height // data.size
data.image = []
data.classification = []
data.highlight = ""
data.net = createNet()
data.net2 = createMNIST()
data.characters = [] # list containing tuples of the (left,top,right,bottom)
#Images
data.pic = PhotoImage(file="background.gif") #taken from www.123rf.com
data.drawpic= PhotoImage(file="blackboard.gif") #taken from www.123RF.com
data.eraserImage = PhotoImage(file="eraser.gif") #taken from www.123rf.com
data.aboutpic = PhotoImage(file="about_screen.gif") #created on my own
#Buttons
data.startButton = Button(data.width//3,3*data.height//4,90,50,
data.buttonColor,"white",data,"Start")
data.classifyButton = Button(data.width//3-5,5*data.height//6,90,50,
data.buttonColor,"white",data,"Classify")
data.clearButton = Button(5+2*data.width//3,5*data.height//6,90,50,
data.buttonColor,"white",data,"Clear")
data.correctButton = Button(data.width//3-5,5*data.height//6,90,50,
data.buttonColor,"white",data,"Correct It")
data.solveButton = Button(data.width//2,5*data.height//6,140,50,
data.buttonColor,"white",data,"Send To \n Wolfram Alpha!")
data.backButton = Button(45,20,90,40,data.buttonColor,"white",data,"Back")
data.aboutButton = Button(2*data.width//3,3*data.height//4,90,50,
data.buttonColor,"white",data,"About")
data.solveBackButton = Button(45,20,90,40,data.buttonColor,'white',
data,"Back")
data.eraseButton = Button(data.width-45,20,90,40,data.buttonColor,"white",
data,"shouln't ever be displayed")
def classifySymbols(data):
findBoundaries(data)
for symbol in data.characters:
(left, top, right, bottom) = symbol
mnist = resizeImagetoSquare(left, top, right, bottom, data)[0]
hasy = resizeImagetoSquare(left, top, right, bottom, data)[1]
hasy2 = data.net.feedforward(hasy)
mnist2 = data.net2.feedforward(mnist)
exclusions = [0,78, 79, 80,87]
for i in exclusions:
hasy2[i][0] = 0
has = np.amax(hasy2)
mnst = (np.amax(mnist2))*1.05
if has >= mnst:
line = np.argmax(hasy2)
data.classification.append(str(latexCommand(line)))
else:
index = np.argmax(mnist2)
data.classification.append(str(index))
def onlineLearning(data,hasyarray,mnistarray,inp):
inputy = str(inp)
if inputy in string.digits:
net = data.net2
array = mnistarray
index = int(inp)
title = 'JUSTDIDTHISTONIGHT'
else:
net = data.net
array = hasyarray
index = findIndex(inp)
title = 'hasy'
testY = make2dList(net.netSize[2],1)
testY[int(index)][0] = 1
net.MBGD([array],[testY])
net.save(title)
def findIndex(input):
file = open(
"C:/Users/Joe/Documents/S17/15-112/Term Project/HASYv2/symbols3.csv", 'r')
reader = csv.reader(file)
for i, row in enumerate(reader):
if str(input).lower()==str(row[1]).lower():
return i-1
print ("Symbol Name Not Recognized")
return None
def convertClassification(data):
classification = ""
for i in data.classification:
classification+=i+' '
exclusions = ["f",'m','s','l','+','-','*','/']
for index in range(1,len(classification)-1):
try:
if ((classification[index - 1] in string.ascii_letters) and \
(classification[index + 1] in string.digits) and \
(classification[index-1] not in exclusions) and \
classification[index] == " "):
classification=classification[:index]+"^"+classification[index+1:]
except:
continue
for index in range(1,len(classification)-1):
try:
if ((classification[index-1] in string.digits) and \
(classification[index+1] in string.digits) and \
classification[index]==" "):
classification = classification[:index]+classification[index+1:]
except:
continue
data.printable=classification
def showWolframAlpha(data):
im = Image.open("wolframtemp.gif")
(width,height) = im.size
h = data.height-data.margin
im1 = im.crop(box=(0,0,width,h))
im1.save("temp1.gif")
try:
im2 = im.crop(box=(0,h,width,height))
except:
im2 = im.crop(box=(0,h,width,2*h))
im2.save("temp2.gif")
data.im1 = PhotoImage(file="temp1.gif")
data.im2 = PhotoImage(file="temp2.gif")
###CONTROLLER###
def leftReleased(event, data):
setEventInfo(event, data, "leftReleased")
data.leftPosn = (event.x, event.y)
def setEventInfo(event, data, eventName):
ctrl = ((event.state & 0x0004) != 0)
shift = ((event.state & 0x0001) != 0)
msg = ""
if ctrl: msg += "ctrl-"
if shift: msg += "shift-"
msg += eventName
msg += " at " + str((event.x, event.y))
data.info = msg
def mouseMotion(event,data):
setEventInfo(event, data, "mouseMotion")
data.motionPosn = (event.x, event.y)
def leftPressed(event, data):
setEventInfo(event, data, "leftPressed")
data.leftPosn = (event.x, event.y)
if data.splash==True:
if data.startButton.inBoundaries(event.x,event.y):
startButton(data)
if data.aboutButton.inBoundaries(event.x,event.y):
aboutButton(data)
elif data.about==True:
if data.backButton.inBoundaries(event.x, event.y):
backButton(data)
elif data.draw==True:
if data.classification != []: #Already Classified
if data.correctButton.inBoundaries(event.x, event.y):
correctButton(data)
elif data.solveButton.inBoundaries(event.x,event.y):
solveButton(data)
elif data.backButton.inBoundaries(event.x, event.y):
backButton(data)
elif data.clearButton.inBoundaries(event.x, event.y):
clearButton(data)
else: #still drawing
if data.classifyButton.inBoundaries(event.x,event.y):
classifyButton(data)
elif data.backButton.inBoundaries(event.x, event.y):
backButton(data)
elif data.clearButton.inBoundaries(event.x, event.y):
clearButton(data)
elif data.eraseButton.inBoundaries(event.x,event.y):
eraseButton(data)
elif data.solved==True:
if data.solveBackButton.inBoundaries(event.x,event.y):
solveBackButton(data)
def leftMoved(event, data):
setEventInfo(event, data, "leftMoved")
data.leftPosn = (event.x, event.y)
if data.splash==False and data.erase==False:
findSquare(event.x, event.y, data)
elif data.erase==True:
findErase(event.x,event.y,data)
def timerFired(data): pass
def keyPressed(event, data): pass
###VIEW###
def createGrid(canvas,data):
width = data.width
height = data.height
squaresize = data.squaresize
for square in data.symbol:
i,j = square[0],square[1]
x0,y0,x1,y1=i*squaresize,j*squaresize,(i+1)*squaresize,(j+1)*squaresize
canvas.create_rectangle(x0,y0,x1,y1,fill="white",outline = "white")
def drawSplashScreen(canvas,data):
if data.splash == True:
#background image
canvas.create_image(data.width//2,data.height//2, image=data.pic)
#above code (background image) taken from 15-112 website
data.startButton.drawNormal(canvas)
data.aboutButton.drawNormal(canvas)
elif data.about==True:
canvas.create_image(data.width//2,data.height//2,image=data.drawpic)
im = data.aboutpic
canvas.create_image(data.width//2,data.height//2+15,image=im)
def drawClassifyScreen(canvas,data):
if data.draw==True: #Draw Screen
if data.classification != []: #already classified
canvas.create_image(data.width//2,data.height//2,image=data.drawpic)
canvas.create_text(data.width//2,data.height//2,text=str(data.printable),
font=("Bradley Hand ITC", 20,"bold"),fill="white")
data.correctButton.drawNormal(canvas)
data.solveButton.drawNormal(canvas)
if data.highlight != "":
text = "Classified Symbol: "+str(data.highlight)
canvas.create_text(data.width//2,2*data.height//3,
text=text,font=("Bradley Hand ITC", 20,"bold"),fill="white")
else:
canvas.create_image(data.width//2,data.height//2,image=data.drawpic)
createGrid(canvas,data)
drawSquareBoundaries(canvas,data)
canvas.create_text(data.width//2,data.height//10,text="Write Math",
font=("Bradley Hand ITC",20,"bold"),fill="white")
data.classifyButton.drawNormal(canvas)
data.eraseButton.drawImage(canvas)
data.clearButton.drawNormal(canvas)
data.backButton.drawNormal(canvas)
elif data.splash==False: #About Screen
data.backButton.drawNormal(canvas)
def drawSolvedScreen(canvas,data):
if data.solved == True:
im1 = data.im1
im2 = data.im2
canvas.create_rectangle(0,0,data.width,data.height,fill="white")
canvas.create_image(20,20,anchor=NW,image=im1)
canvas.create_image(data.width//2,20,anchor=NW,image=im2)
data.solveBackButton.drawNormal(canvas)
def drawSquareBoundaries(canvas,data):
for positions in data.characters:
(x0,y0,x1,y1) = positions
s = data.squaresize
canvas.create_rectangle(x0*s,y0*s,x1*s,y1*s,fill="",outline="black")
def redrawAll(canvas, data):
drawSplashScreen(canvas,data)
drawClassifyScreen(canvas,data)
drawSolvedScreen(canvas,data)
####################################
# use the run function as-is
####################################
def run(width=800, height=400):
def redrawAllWrapper(canvas, data):
canvas.delete(ALL)
canvas.create_rectangle(0, 0, data.width, data.height,
fill='white', width=0)
redrawAll(canvas, data)
canvas.update()
# Note changes #1:
def mouseWrapper(mouseFn, event, canvas, data):
mouseFn(event, data)
#redrawAllWrapper(canvas, data)
def keyPressedWrapper(event, canvas, data):
keyPressed(event, data)
redrawAllWrapper(canvas, data)
def timerFiredWrapper(canvas, data):
timerFired(data)
redrawAllWrapper(canvas, data)
# pause, then call timerFired again
canvas.after(data.timerDelay, timerFiredWrapper, canvas, data)
# Set up data and call init
class Struct(object): pass
data = Struct()
data.width = width
data.height = height
data.timerDelay = 20 # milliseconds
root = Tk()
data.root = root
init(data)
# create the root and the canvas
canvas = Canvas(root, width=data.width, height=data.height)
canvas.grid()
# set up events
# Note changes #2:
root.bind("<Button-1>", lambda event:
mouseWrapper(leftPressed, event, canvas, data))
#root.bind("<Button-3>", lambda event:
#mouseWrapper(rightPressed, event, canvas, data))
canvas.bind("<Motion>", lambda event:
mouseWrapper(mouseMotion, event, canvas, data))
canvas.bind("<B1-Motion>", lambda event:
mouseWrapper(leftMoved, event, canvas, data))
#canvas.bind("<B3-Motion>", lambda event:
#mouseWrapper(rightMoved, event, canvas, data))
root.bind("<B1-ButtonRelease>", lambda event:
mouseWrapper(leftReleased, event, canvas, data))
#root.bind("<B3-ButtonRelease>", lambda event:
#mouseWrapper(rightReleased, event, canvas, data))
root.bind("<Key>", lambda event:
keyPressedWrapper(event, canvas, data))
timerFiredWrapper(canvas, data)
# and launch the app
root.mainloop() # blocks until window is closed
print("bye!")
#Overview of Citations#
#Backpropogation Algorithm-- within my Network Class there is a method called
#backprop(x,y) which was taken entirely and without modification from Michael
#Neilsen's book Neural Networks and Deep Learning. This can be found online at
#http://neuralnetworksanddeeplearning.com/chap1.html
#Several of my functions for loading data call external python files called
#'mnist_training' and 'unpackData'. These files are a mixture of my own code
#and code written by others. See the files for more detailed citations
#I use the wolframAlpha module which is a nice way of accessing the Wolfram API
#more information about the module and API can be found at www.wolframalpha.com
#My pop-up dialog class was taken with light modifications from the 15-112 website
#under miscelaneous tkinter demos
#The entirety of my graphics is built of events_example0.py from the 15-112 website
#the run function is modified from mouse-pressed examples posted on the 15-112 website
#any media (pictures) used in this project were are clearly cited in a comment
#to the right of where they are first called. (generally in init)
#In a couple locations I load or save files using code taken and modified from
#stackoverflow.com. The exact URL's of these can be found next to the usage.
#All above citations can be found next to their location in my code#
|
|
#
# Copyright (C) 2007, Mark Lee
#
#http://rl-glue-ext.googlecode.com/
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# $Revision: 592 $
# $Date: 2009-02-04 16:24:59 -0700 (Wed, 04 Feb 2009) $
# $Author: brian@tannerpages.com $
# $HeadURL: http://rl-glue-ext.googlecode.com/svn/trunk/projects/codecs/Python/src/rlglue/network/Network.py $
#
#The Network class is defined in here
#
import socket
import struct
import array
import time
import sys
import StringIO
try:
import numpy
numpy_int_type = numpy.dtype('int32').newbyteorder('>')
numpy_float_type = numpy.dtype('float64').newbyteorder('>')
numpy_char_type = 'S1'#numpy.dtype('uint8').newbyteorder('>')
except:
pass
from rlglue.types import Action
from rlglue.types import Observation
from rlglue.types import Reward_observation_terminal
from rlglue.types import RL_Abstract_Type
# BEGIN: change made by: Akshay Narayan (05-01-2015:1904)
from rlglue.types import Reward
# END: change made by: Akshay Narayan (05-01-2015:1904)
# RL-Glue needs to know what type of object is trying to connect.
kExperimentConnection = 1
kAgentConnection = 2
kEnvironmentConnection = 3
kAgentInit = 4 # agent_* start by sending one of these values
kAgentStart = 5 # to the client to let it know what type of
kAgentStep = 6 # event to respond to
kAgentEnd = 7
kAgentCleanup = 8
kAgentMessage = 10
kEnvInit = 11
kEnvStart = 12
kEnvStep = 13
kEnvCleanup = 14
kEnvMessage = 19
kRLInit = 20
kRLStart = 21
kRLStep = 22
kRLCleanup = 23
kRLReturn = 24
kRLNumSteps = 25
kRLNumEpisodes = 26
kRLEpisode = 27
kRLAgentMessage = 33
kRLEnvMessage = 34
kRLTerm = 35
kLocalHost = "127.0.0.1"
kDefaultPort = 4096
kRetryTimeout = 2
kDefaultBufferSize = 4096
kIntSize = 4
kDoubleSize = 8
kCharSize = 1
kUnknownMessage = "Unknown Message: %s\n"
class Network:
def __init__(self):
self.sock = None
self.recvBuffer = StringIO.StringIO('')
self.sendBuffer = StringIO.StringIO('')
if 'numpy' in globals():
self.getAbstractType = self.getAbstractType_numpy
else:
self.getAbstractType = self.getAbstractType_list
def connect(self, host=kLocalHost, port=kDefaultPort, retryTimeout=kRetryTimeout):
while self.sock == None:
try:
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
self.sock.connect((host, port))
except (socket.error, msg):
self.sock = None
time.sleep(retryTimeout)
else:
break
def close(self):
self.sock.close()
def send(self):
self.sock.sendall(self.sendBuffer.getvalue())
def recv(self,size):
s = ''
while len(s) < size:
s += self.sock.recv(size - len(s))
self.recvBuffer.write(s)
self.recvBuffer.seek(0)
return len(s)
def clearSendBuffer(self):
self.sendBuffer.close()
self.sendBuffer = StringIO.StringIO()
def clearRecvBuffer(self):
self.recvBuffer.close()
self.recvBuffer = StringIO.StringIO()
def flipSendBuffer(self):
self.clearSendBuffer()
def flipRecvBuffer(self):
self.clearRecvBuffer()
def getInt(self):
s = self.recvBuffer.read(kIntSize)
return struct.unpack("!i",s)[0]
def getDouble(self):
s = self.recvBuffer.read(kDoubleSize)
return struct.unpack("!d",s)[0]
def getString(self):
#If you read 0 you get "" not None so that's fine
length = self.getInt()
return self.recvBuffer.read(length)
def getAbstractType_list(self):
numInts = self.getInt()
numDoubles = self.getInt()
numChars = self.getInt()
returnStruct=RL_Abstract_Type()
if numInts > 0:
s = self.recvBuffer.read(numInts*kIntSize)
returnStruct.intArray = list(struct.unpack("!%di" % (numInts),s))
if numDoubles > 0:
s = self.recvBuffer.read(numDoubles*kDoubleSize)
returnStruct.doubleArray = list(struct.unpack("!%dd" % (numDoubles),s))
if numChars > 0:
s = self.recvBuffer.read(numChars*kCharSize)
returnStruct.charArray = list(struct.unpack("!%dc" % (numChars),s))
return returnStruct
def getAbstractType_numpy(self):
numInts = self.getInt()
numDoubles = self.getInt()
numChars = self.getInt()
returnStruct=RL_Abstract_Type()
if numInts > 0:
s = self.recvBuffer.read(numInts*kIntSize)
assert kIntSize == 4
returnStruct.intArray = numpy.frombuffer(s, dtype=numpy_int_type,count=numInts)
if numDoubles > 0:
s = self.recvBuffer.read(numDoubles*kDoubleSize)
returnStruct.doubleArray = numpy.frombuffer(s, count=numDoubles, dtype=numpy_float_type)
if numChars > 0:
s = self.recvBuffer.read(numChars*kCharSize)
returnStruct.charArray = numpy.frombuffer(s, count=numChars, dtype=numpy_char_type)
return returnStruct
def getObservation(self):
return Observation.fromAbstractType(self.getAbstractType())
def getAction(self):
return Action.fromAbstractType(self.getAbstractType())
# BEGIN: change made by: Akshay Narayan (05-01-2015:2222)
def getReward(self):
return Reward.fromAbstractType(self.getAbstractType())
# END: change made by: Akshay Narayan (05-01-2015:2222)
def putInt(self,value):
self.sendBuffer.write(struct.pack("!i",value))
def putDouble(self,value):
self.sendBuffer.write(struct.pack("!d",value))
def putString(self,value):
if value == None:
value = ''
self.putInt(len(value))
self.sendBuffer.write(value)
def putObservation(self,obs):
self.putAbstractType(obs)
def putAction(self,action):
self.putAbstractType(action)
# BEGIN: change made by: Akshay Narayan (05-01-2015:2224)
def putReward(self, reward):
self.putAbstractType(reward)
# END: change made by: Akshay Narayan (05-01-2015:2224)
def putAbstractType(self, theItem):
self.putInt(len(theItem.intArray))
self.putInt(len(theItem.doubleArray))
self.putInt(len(theItem.charArray))
if len(theItem.intArray) > 0:
self.sendBuffer.write(struct.pack("!%di" % (len(theItem.intArray)),*(theItem.intArray)))
if len(theItem.doubleArray) > 0:
self.sendBuffer.write(struct.pack("!%dd" % (len(theItem.doubleArray)),*(theItem.doubleArray)))
if len(theItem.charArray) > 0:
self.sendBuffer.write(struct.pack("!%dc" % (len(theItem.charArray)),*(theItem.charArray)))
def putRewardObservation(self,rewardObservation):
self.putInt(rewardObservation.terminal);
# BEGIN: change made by: Akshay Narayan (05-01-2015:2226)
#self.putDouble(rewardObservation.r);
self.putReward(rewardObservation.r)
# END: change made by: Akshay Narayan (05-01-2015:2226)
self.putObservation(rewardObservation.o);
def sizeOfAbstractType(self, theItem):
size = kIntSize * 3
intSize = 0
doubleSize = 0
charSize = 0
if theItem != None:
if theItem.intArray != None:
intSize = kIntSize * len(theItem.intArray)
if theItem.doubleArray != None:
doubleSize = kDoubleSize * len(theItem.doubleArray)
if theItem.charArray != None:
charSize = kCharSize * len(theItem.charArray)
return size + intSize + doubleSize + charSize
def sizeOfAction(self,action):
return self.sizeOfAbstractType(action)
def sizeOfObservation(self,observation):
return self.sizeOfAbstractType(observation)
# BEGIN: change made by: Akshay Narayan (05-01-2015:2229)
def sizeOfReward(self, reward):
return self.sizeOfAbstractType(reward)
# END: change made by: Akshay Narayan (05-01-2015:2229)
def sizeOfRewardObservation(self,reward_observation):
# BEGIN: change made by: Akshay Narayan (05-01-2015:2231)
# return kIntSize + kDoubleSize + self.sizeOfObservation(reward_observation.o)
return kIntSize + self.sizeOfReward(reward_observation.r) + self.sizeOfObservation(reward_observation.o)
# END: change made by: Akshay Narayan (05-01-2015:2231)
|
|
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
import oslo_messaging as messaging
from oslo_utils import strutils
import six
import webob
from cinder.api import extensions
from cinder.api.openstack import wsgi
from cinder.api import xmlutil
from cinder import exception
from cinder.i18n import _
from cinder import utils
from cinder import volume
LOG = logging.getLogger(__name__)
def authorize(context, action_name):
action = 'volume_actions:%s' % action_name
extensions.extension_authorizer('volume', action)(context)
class VolumeToImageSerializer(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('os-volume_upload_image',
selector='os-volume_upload_image')
root.set('id')
root.set('updated_at')
root.set('status')
root.set('display_description')
root.set('size')
root.set('volume_type')
root.set('image_id')
root.set('container_format')
root.set('disk_format')
root.set('image_name')
return xmlutil.MasterTemplate(root, 1)
class VolumeToImageDeserializer(wsgi.XMLDeserializer):
"""Deserializer to handle xml-formatted requests."""
def default(self, string):
dom = utils.safe_minidom_parse_string(string)
action_node = dom.childNodes[0]
action_name = action_node.tagName
action_data = {}
attributes = ["force", "image_name", "container_format", "disk_format"]
for attr in attributes:
if action_node.hasAttribute(attr):
action_data[attr] = action_node.getAttribute(attr)
if 'force' in action_data and action_data['force'] == 'True':
action_data['force'] = True
return {'body': {action_name: action_data}}
class VolumeActionsController(wsgi.Controller):
def __init__(self, *args, **kwargs):
super(VolumeActionsController, self).__init__(*args, **kwargs)
self.volume_api = volume.API()
@wsgi.action('os-attach')
def _attach(self, req, id, body):
"""Add attachment metadata."""
context = req.environ['cinder.context']
try:
volume = self.volume_api.get(context, id)
except exception.VolumeNotFound as error:
raise webob.exc.HTTPNotFound(explanation=error.msg)
# instance uuid is an option now
instance_uuid = None
if 'instance_uuid' in body['os-attach']:
instance_uuid = body['os-attach']['instance_uuid']
host_name = None
# Keep API backward compatibility
if 'host_name' in body['os-attach']:
host_name = body['os-attach']['host_name']
mountpoint = body['os-attach']['mountpoint']
if 'mode' in body['os-attach']:
mode = body['os-attach']['mode']
else:
mode = 'rw'
if instance_uuid and host_name:
msg = _("Invalid request to attach volume to an "
"instance %(instance_uuid)s and a "
"host %(host_name)s simultaneously") % {
'instance_uuid': instance_uuid,
'host_name': host_name,
}
raise webob.exc.HTTPBadRequest(explanation=msg)
elif instance_uuid is None and host_name is None:
msg = _("Invalid request to attach volume to an invalid target")
raise webob.exc.HTTPBadRequest(explanation=msg)
if mode not in ('rw', 'ro'):
msg = _("Invalid request to attach volume with an invalid mode. "
"Attaching mode should be 'rw' or 'ro'")
raise webob.exc.HTTPBadRequest(explanation=msg)
self.volume_api.attach(context, volume,
instance_uuid, host_name, mountpoint, mode)
return webob.Response(status_int=202)
@wsgi.action('os-detach')
def _detach(self, req, id, body):
"""Clear attachment metadata."""
context = req.environ['cinder.context']
try:
volume = self.volume_api.get(context, id)
except exception.VolumeNotFound as error:
raise webob.exc.HTTPNotFound(explanation=error.msg)
attachment_id = None
if body['os-detach']:
attachment_id = body['os-detach'].get('attachment_id', None)
self.volume_api.detach(context, volume, attachment_id)
return webob.Response(status_int=202)
@wsgi.action('os-reserve')
def _reserve(self, req, id, body):
"""Mark volume as reserved."""
context = req.environ['cinder.context']
try:
volume = self.volume_api.get(context, id)
except exception.VolumeNotFound as error:
raise webob.exc.HTTPNotFound(explanation=error.msg)
self.volume_api.reserve_volume(context, volume)
return webob.Response(status_int=202)
@wsgi.action('os-unreserve')
def _unreserve(self, req, id, body):
"""Unmark volume as reserved."""
context = req.environ['cinder.context']
try:
volume = self.volume_api.get(context, id)
except exception.VolumeNotFound as error:
raise webob.exc.HTTPNotFound(explanation=error.msg)
self.volume_api.unreserve_volume(context, volume)
return webob.Response(status_int=202)
@wsgi.action('os-begin_detaching')
def _begin_detaching(self, req, id, body):
"""Update volume status to 'detaching'."""
context = req.environ['cinder.context']
try:
volume = self.volume_api.get(context, id)
except exception.VolumeNotFound as error:
raise webob.exc.HTTPNotFound(explanation=error.msg)
self.volume_api.begin_detaching(context, volume)
return webob.Response(status_int=202)
@wsgi.action('os-roll_detaching')
def _roll_detaching(self, req, id, body):
"""Roll back volume status to 'in-use'."""
context = req.environ['cinder.context']
try:
volume = self.volume_api.get(context, id)
except exception.VolumeNotFound as error:
raise webob.exc.HTTPNotFound(explanation=error.msg)
self.volume_api.roll_detaching(context, volume)
return webob.Response(status_int=202)
@wsgi.action('os-initialize_connection')
def _initialize_connection(self, req, id, body):
"""Initialize volume attachment."""
context = req.environ['cinder.context']
try:
volume = self.volume_api.get(context, id)
except exception.VolumeNotFound as error:
raise webob.exc.HTTPNotFound(explanation=error.msg)
try:
connector = body['os-initialize_connection']['connector']
except KeyError:
raise webob.exc.HTTPBadRequest(
explanation=_("Must specify 'connector'"))
try:
info = self.volume_api.initialize_connection(context,
volume,
connector)
except exception.InvalidInput as err:
raise webob.exc.HTTPBadRequest(
explanation=err)
except exception.VolumeBackendAPIException as error:
msg = _("Unable to fetch connection information from backend.")
raise webob.exc.HTTPInternalServerError(explanation=msg)
return {'connection_info': info}
@wsgi.action('os-terminate_connection')
def _terminate_connection(self, req, id, body):
"""Terminate volume attachment."""
context = req.environ['cinder.context']
try:
volume = self.volume_api.get(context, id)
except exception.VolumeNotFound as error:
raise webob.exc.HTTPNotFound(explanation=error.msg)
try:
connector = body['os-terminate_connection']['connector']
except KeyError:
raise webob.exc.HTTPBadRequest(
explanation=_("Must specify 'connector'"))
try:
self.volume_api.terminate_connection(context, volume, connector)
except exception.VolumeBackendAPIException as error:
msg = _("Unable to terminate volume connection from backend.")
raise webob.exc.HTTPInternalServerError(explanation=msg)
return webob.Response(status_int=202)
@wsgi.response(202)
@wsgi.action('os-volume_upload_image')
@wsgi.serializers(xml=VolumeToImageSerializer)
@wsgi.deserializers(xml=VolumeToImageDeserializer)
def _volume_upload_image(self, req, id, body):
"""Uploads the specified volume to image service."""
context = req.environ['cinder.context']
params = body['os-volume_upload_image']
if not params.get("image_name"):
msg = _("No image_name was specified in request.")
raise webob.exc.HTTPBadRequest(explanation=msg)
force = params.get('force', False)
if isinstance(force, basestring):
try:
force = strutils.bool_from_string(force, strict=False)
except ValueError:
msg = _("Bad value for 'force' parameter.")
raise webob.exc.HTTPBadRequest(explanation=msg)
elif not isinstance(force, bool):
msg = _("'force' is not string or bool.")
raise webob.exc.HTTPBadRequest(explanation=msg)
try:
volume = self.volume_api.get(context, id)
except exception.VolumeNotFound as error:
raise webob.exc.HTTPNotFound(explanation=error.msg)
authorize(context, "upload_image")
image_metadata = {"container_format": params.get("container_format",
"bare"),
"disk_format": params.get("disk_format", "raw"),
"name": params["image_name"]}
try:
response = self.volume_api.copy_volume_to_image(context,
volume,
image_metadata,
force)
except exception.InvalidVolume as error:
raise webob.exc.HTTPBadRequest(explanation=error.msg)
except ValueError as error:
raise webob.exc.HTTPBadRequest(explanation=six.text_type(error))
except messaging.RemoteError as error:
msg = "%(err_type)s: %(err_msg)s" % {'err_type': error.exc_type,
'err_msg': error.value}
raise webob.exc.HTTPBadRequest(explanation=msg)
except Exception as error:
raise webob.exc.HTTPBadRequest(explanation=six.text_type(error))
return {'os-volume_upload_image': response}
@wsgi.action('os-extend')
def _extend(self, req, id, body):
"""Extend size of volume."""
context = req.environ['cinder.context']
try:
volume = self.volume_api.get(context, id)
except exception.VolumeNotFound as error:
raise webob.exc.HTTPNotFound(explanation=error.msg)
try:
int(body['os-extend']['new_size'])
except (KeyError, ValueError, TypeError):
msg = _("New volume size must be specified as an integer.")
raise webob.exc.HTTPBadRequest(explanation=msg)
size = int(body['os-extend']['new_size'])
self.volume_api.extend(context, volume, size)
return webob.Response(status_int=202)
@wsgi.action('os-update_readonly_flag')
def _volume_readonly_update(self, req, id, body):
"""Update volume readonly flag."""
context = req.environ['cinder.context']
try:
volume = self.volume_api.get(context, id)
except exception.VolumeNotFound as error:
raise webob.exc.HTTPNotFound(explanation=error.msg)
try:
readonly_flag = body['os-update_readonly_flag']['readonly']
except KeyError:
msg = _("Must specify readonly in request.")
raise webob.exc.HTTPBadRequest(explanation=msg)
if isinstance(readonly_flag, basestring):
try:
readonly_flag = strutils.bool_from_string(readonly_flag,
strict=True)
except ValueError:
msg = _("Bad value for 'readonly'")
raise webob.exc.HTTPBadRequest(explanation=msg)
elif not isinstance(readonly_flag, bool):
msg = _("'readonly' not string or bool")
raise webob.exc.HTTPBadRequest(explanation=msg)
self.volume_api.update_readonly_flag(context, volume, readonly_flag)
return webob.Response(status_int=202)
@wsgi.action('os-retype')
def _retype(self, req, id, body):
"""Change type of existing volume."""
context = req.environ['cinder.context']
volume = self.volume_api.get(context, id)
try:
new_type = body['os-retype']['new_type']
except KeyError:
msg = _("New volume type must be specified.")
raise webob.exc.HTTPBadRequest(explanation=msg)
policy = body['os-retype'].get('migration_policy')
self.volume_api.retype(context, volume, new_type, policy)
return webob.Response(status_int=202)
@wsgi.action('os-set_bootable')
def _set_bootable(self, req, id, body):
"""Update bootable status of a volume."""
context = req.environ['cinder.context']
try:
volume = self.volume_api.get(context, id)
except exception.VolumeNotFound as error:
raise webob.exc.HTTPNotFound(explanation=error.msg)
try:
bootable = body['os-set_bootable']['bootable']
except KeyError:
msg = _("Must specify bootable in request.")
raise webob.exc.HTTPBadRequest(explanation=msg)
if isinstance(bootable, basestring):
try:
bootable = strutils.bool_from_string(bootable,
strict=True)
except ValueError:
msg = _("Bad value for 'bootable'")
raise webob.exc.HTTPBadRequest(explanation=msg)
elif not isinstance(bootable, bool):
msg = _("'bootable' not string or bool")
raise webob.exc.HTTPBadRequest(explanation=msg)
update_dict = {'bootable': bootable}
self.volume_api.update(context, volume, update_dict)
return webob.Response(status_int=200)
class Volume_actions(extensions.ExtensionDescriptor):
"""Enable volume actions
"""
name = "VolumeActions"
alias = "os-volume-actions"
namespace = "http://docs.openstack.org/volume/ext/volume-actions/api/v1.1"
updated = "2012-05-31T00:00:00+00:00"
def get_controller_extensions(self):
controller = VolumeActionsController()
extension = extensions.ControllerExtension(self, 'volumes', controller)
return [extension]
|
|
# -*- coding: utf-8 -*-
"""
pygments.lexers.php
~~~~~~~~~~~~~~~~~~~
Lexers for PHP and related languages.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, include, bygroups, default, using, this
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Other
from pygments.util import get_bool_opt, get_list_opt, iteritems
__all__ = ['ZephirLexer', 'PhpLexer']
class ZephirLexer(RegexLexer):
"""
For `Zephir language <http://zephir-lang.com/>`_ source code.
Zephir is a compiled high level language aimed
to the creation of C-extensions for PHP.
.. versionadded:: 2.0
"""
name = 'Zephir'
aliases = ['zephir']
filenames = ['*.zep']
zephir_keywords = ['fetch', 'echo', 'isset', 'empty']
zephir_type = ['bit', 'bits', 'string']
flags = re.DOTALL | re.MULTILINE
tokens = {
'commentsandwhitespace': [
(r'\s+', Text),
(r'//.*?\n', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline)
],
'slashstartsregex': [
include('commentsandwhitespace'),
(r'/(\\.|[^[/\\\n]|\[(\\.|[^\]\\\n])*])+/'
r'([gim]+\b|\B)', String.Regex, '#pop'),
default('#pop')
],
'badregex': [
(r'\n', Text, '#pop')
],
'root': [
(r'^(?=\s|/|<!--)', Text, 'slashstartsregex'),
include('commentsandwhitespace'),
(r'\+\+|--|~|&&|\?|:|\|\||\\(?=\n)|'
r'(<<|>>>?|==?|!=?|->|[-<>+*%&|^/])=?', Operator, 'slashstartsregex'),
(r'[{(\[;,]', Punctuation, 'slashstartsregex'),
(r'[})\].]', Punctuation),
(r'(for|in|while|do|break|return|continue|switch|case|default|if|else|loop|'
r'require|inline|throw|try|catch|finally|new|delete|typeof|instanceof|void|'
r'namespace|use|extends|this|fetch|isset|unset|echo|fetch|likely|unlikely|'
r'empty)\b', Keyword, 'slashstartsregex'),
(r'(var|let|with|function)\b', Keyword.Declaration, 'slashstartsregex'),
(r'(abstract|boolean|bool|char|class|const|double|enum|export|extends|final|'
r'native|goto|implements|import|int|string|interface|long|ulong|char|uchar|'
r'float|unsigned|private|protected|public|short|static|self|throws|reverse|'
r'transient|volatile)\b', Keyword.Reserved),
(r'(true|false|null|undefined)\b', Keyword.Constant),
(r'(Array|Boolean|Date|_REQUEST|_COOKIE|_SESSION|'
r'_GET|_POST|_SERVER|this|stdClass|range|count|iterator|'
r'window)\b', Name.Builtin),
(r'[$a-zA-Z_][\w\\]*', Name.Other),
(r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r'0x[0-9a-fA-F]+', Number.Hex),
(r'[0-9]+', Number.Integer),
(r'"(\\\\|\\"|[^"])*"', String.Double),
(r"'(\\\\|\\'|[^'])*'", String.Single),
]
}
class PhpLexer(RegexLexer):
"""
For `PHP <http://www.php.net/>`_ source code.
For PHP embedded in HTML, use the `HtmlPhpLexer`.
Additional options accepted:
`startinline`
If given and ``True`` the lexer starts highlighting with
php code (i.e.: no starting ``<?php`` required). The default
is ``False``.
`funcnamehighlighting`
If given and ``True``, highlight builtin function names
(default: ``True``).
`disabledmodules`
If given, must be a list of module names whose function names
should not be highlighted. By default all modules are highlighted
except the special ``'unknown'`` module that includes functions
that are known to php but are undocumented.
To get a list of allowed modules have a look into the
`_php_builtins` module:
.. sourcecode:: pycon
>>> from pygments.lexers._php_builtins import MODULES
>>> MODULES.keys()
['PHP Options/Info', 'Zip', 'dba', ...]
In fact the names of those modules match the module names from
the php documentation.
"""
name = 'PHP'
aliases = ['php', 'php3', 'php4', 'php5']
filenames = ['*.php', '*.php[345]', '*.inc']
mimetypes = ['text/x-php']
# Note that a backslash is included in the following two patterns
# PHP uses a backslash as a namespace separator
_ident_char = r'[\\\w]|[^\x00-\x7f]'
_ident_begin = r'(?:[\\_a-z]|[^\x00-\x7f])'
_ident_end = r'(?:' + _ident_char + ')*'
_ident_inner = _ident_begin + _ident_end
flags = re.IGNORECASE | re.DOTALL | re.MULTILINE
tokens = {
'root': [
(r'<\?(php)?', Comment.Preproc, 'php'),
(r'[^<]+', Other),
(r'<', Other)
],
'php': [
(r'\?>', Comment.Preproc, '#pop'),
(r'<<<([\'"]?)(' + _ident_inner + r')\1\n.*?\n\s*\2;?\n', String),
(r'\s+', Text),
(r'#.*?\n', Comment.Single),
(r'//.*?\n', Comment.Single),
# put the empty comment here, it is otherwise seen as
# the start of a docstring
(r'/\*\*/', Comment.Multiline),
(r'/\*\*.*?\*/', String.Doc),
(r'/\*.*?\*/', Comment.Multiline),
(r'(->|::)(\s*)(' + _ident_inner + ')',
bygroups(Operator, Text, Name.Attribute)),
(r'[~!%^&*+=|:.<>/@-]+', Operator),
(r'\?', Operator), # don't add to the charclass above!
(r'[\[\]{}();,]+', Punctuation),
(r'(class)(\s+)', bygroups(Keyword, Text), 'classname'),
(r'(function)(\s*)(?=\()', bygroups(Keyword, Text)),
(r'(function)(\s+)(&?)(\s*)',
bygroups(Keyword, Text, Operator, Text), 'functionname'),
(r'(const)(\s+)(' + _ident_inner + ')',
bygroups(Keyword, Text, Name.Constant)),
(r'(and|E_PARSE|old_function|E_ERROR|or|as|E_WARNING|parent|'
r'eval|PHP_OS|break|exit|case|extends|PHP_VERSION|cfunction|'
r'FALSE|print|for|require|continue|foreach|require_once|'
r'declare|return|default|static|do|switch|die|stdClass|'
r'echo|else|TRUE|elseif|var|empty|if|xor|enddeclare|include|'
r'virtual|endfor|include_once|while|endforeach|global|__FILE__|'
r'endif|list|__LINE__|endswitch|new|__sleep|endwhile|not|'
r'array|__wakeup|E_ALL|NULL|final|php_user_filter|interface|'
r'implements|public|private|protected|abstract|clone|try|'
r'catch|throw|this|use|namespace|trait|yield|'
r'finally)\b', Keyword),
(r'(true|false|null)\b', Keyword.Constant),
(r'\$\{\$+' + _ident_inner + '\}', Name.Variable),
(r'\$+' + _ident_inner, Name.Variable),
(_ident_inner, Name.Other),
(r'(\d+\.\d*|\d*\.\d+)(e[+-]?[0-9]+)?', Number.Float),
(r'\d+e[+-]?[0-9]+', Number.Float),
(r'0[0-7]+', Number.Oct),
(r'0x[a-f0-9]+', Number.Hex),
(r'\d+', Number.Integer),
(r'0b[01]+', Number.Bin),
(r"'([^'\\]*(?:\\.[^'\\]*)*)'", String.Single),
(r'`([^`\\]*(?:\\.[^`\\]*)*)`', String.Backtick),
(r'"', String.Double, 'string'),
],
'classname': [
(_ident_inner, Name.Class, '#pop')
],
'functionname': [
(_ident_inner, Name.Function, '#pop')
],
'string': [
(r'"', String.Double, '#pop'),
(r'[^{$"\\]+', String.Double),
(r'\\([nrt"$\\]|[0-7]{1,3}|x[0-9a-f]{1,2})', String.Escape),
(r'\$' + _ident_inner + '(\[\S+?\]|->' + _ident_inner + ')?',
String.Interpol),
(r'(\{\$\{)(.*?)(\}\})',
bygroups(String.Interpol, using(this, _startinline=True),
String.Interpol)),
(r'(\{)(\$.*?)(\})',
bygroups(String.Interpol, using(this, _startinline=True),
String.Interpol)),
(r'(\$\{)(\S+)(\})',
bygroups(String.Interpol, Name.Variable, String.Interpol)),
(r'[${\\]+', String.Double)
],
}
def __init__(self, **options):
self.funcnamehighlighting = get_bool_opt(
options, 'funcnamehighlighting', True)
self.disabledmodules = get_list_opt(
options, 'disabledmodules', ['unknown'])
self.startinline = get_bool_opt(options, 'startinline', False)
# private option argument for the lexer itself
if '_startinline' in options:
self.startinline = options.pop('_startinline')
# collect activated functions in a set
self._functions = set()
if self.funcnamehighlighting:
from pygments.lexers._php_builtins import MODULES
for key, value in iteritems(MODULES):
if key not in self.disabledmodules:
self._functions.update(value)
RegexLexer.__init__(self, **options)
def get_tokens_unprocessed(self, text):
stack = ['root']
if self.startinline:
stack.append('php')
for index, token, value in \
RegexLexer.get_tokens_unprocessed(self, text, stack):
if token is Name.Other:
if value in self._functions:
yield index, Name.Builtin, value
continue
yield index, token, value
def analyse_text(text):
rv = 0.0
if re.search(r'<\?(?!xml)', text):
rv += 0.3
return rv
|
|
import os,sys
import re
import utils as cu
import libDetection as ldet
import regionSelection as rs
import numpy as np
#Functions
def cumsum(a):
b = [0.0 for x in range(0,len(a))]
for i in range(0,len(a)):
b[i] = b[i-1]+a[i]
return b
intersect = lambda x,y: [max(x[0],y[0]),max(x[1],y[1]),min(x[2],y[2]),min(x[3],y[3])]
area = lambda x: (x[2]-x[0]+1)*(x[3]-x[1]+1)
# Windows inside a bounding box
def inside(box,gt):
ov = ldet.overlap(gt,box)
iou = ldet.IoU(box,gt)
if ov >= 0.9 and iou <= 1.0 and iou >= 0.01:
return ov
else:
return 0.0
def loadGroundTruthAnnotations(indexData):
groundTruth = dict()
for gt in indexData:
imgName = re.sub(r'(.+/){0,1}(.+).jpg',r'\2',gt[0])
data = map(float,gt[1:]) + [False]
try:
groundTruth[imgName].append( data )
except:
groundTruth[imgName] = [ data ]
return groundTruth
def loadDetections(detectionsData):
detections = list()
for d in detectionsData:
if d[0].endswith('.jpg'):
d[0] = re.sub(r'(.+/){0,1}(.+).jpg',r'\2',d[0])
data = [d[0], float(d[1])] + map(float,d[2:])
detections.append(data)
# Sort Detections by decreasing confidence
detections.sort(key=lambda x:x[1], reverse=True)
detections = detections[0:10000]
print 'Detections:',len(detections)
return detections
def evaluateDetections(groundTruth,detections,minOverlap,outFile=None,overlapMeasure=ldet.IoU,allowDuplicates=False,supOverlap=1.0):
print 'Minimum overlap:',minOverlap
if outFile != None:
log = open(outFile+'.log','w')
paste = lambda x,y:str(x)+" "+str(y)
mix = lambda det: reduce(paste, map(int,det[2:]))
logW = lambda det,maxOverlap,label: log.write(det[0]+' '+mix(det)+' '+str(maxOverlap)+' '+label+'\n')
else:
logW = lambda x,y,z: x
logData = []
# Assign detections to ground truth objects
tp = [0 for x in range(0,len(detections))]
fp = [0 for x in range(0,len(detections))]
for i in range(0,len(detections)):
det = detections[i]
maxOverlap = -1
index = -1
label = "0"
if det[0] in groundTruth.keys():
for j in range(0,len(groundTruth[det[0]])):
bbox = groundTruth[det[0]][j]
#overlap = ldet.IoU(bbox[0:4],det[2:6])
#overlap = ldet.overlap(det[2:6],bbox[0:4])
overlap = overlapMeasure(det[2:6],bbox[0:4])
if overlap > maxOverlap:
maxOverlap = overlap
index = j
if maxOverlap >= minOverlap and maxOverlap <= supOverlap:
if not groundTruth[det[0]][index][4]: # Has not been assigned
tp[i] = 1 # True Positive
if not allowDuplicates:
groundTruth[det[0]][index][4] = True
label = "1"
else:
fp[i] = 1 # False Positive
else:
fp[i] = 1 # False Positive
else:
fp[i] = 1 # False Positive
logW(det,maxOverlap,label)
logData.append( [det[0]]+map(int,det[2:])+[maxOverlap,label] )
if outFile != None:
missedF = open(outFile+'.missed','w')
for k in groundTruth.keys():
for det in groundTruth[k]:
if not det[4]:
missedF.write(k+' {:} {:} {:} {:}\n'.format(det[0],det[1],det[2],det[3]))
missedF.close()
log.close()
return {'log':logData,'fp':fp,'tp':tp}
def computePrecisionRecall(numPositives,tp,fp,outFile):
# Compute Precision/Recall
numTP = sum(tp)
numFP = sum(fp)
print "True Positives:",numTP,"False Positives:",numFP
tp = cumsum(tp)
fp = cumsum(fp)
if numTP > numPositives:
totalPositives = numTP
else:
totalPositives = numPositives
recall = map(lambda x:x/float(totalPositives), tp)
precision = [tp[i]/(tp[i]+fp[i]) for i in range(0,len(tp))]
output = open(outFile,"w")
for i in range(0,len(recall)):
output.write(str(recall[i])+" "+str(precision[i])+"\n")
'''
PASCAL VOC 2012 devkit
mrec=[0 ; rec ; 1];
mpre=[0 ; prec ; 0];
for i=numel(mpre)-1:-1:1
mpre(i)=max(mpre(i),mpre(i+1));
end
i=find(mrec(2:end)~=mrec(1:end-1))+1;
ap=sum((mrec(i)-mrec(i-1)).*mpre(i));
'''
mrec = [0.0] + recall + [1.0]
mpre = [0.0] + precision + [0.0]
for i in range(len(mpre)-2, -1,-1):
mpre[i] = max(mpre[i],mpre[i+1])
idx = [i+1 for i in range(0,len(mrec)-1) if mrec[i+1] != mrec[i]]
AP2012 = 0.0
for i in idx:
AP2012 +=( mrec[i]-mrec[i-1])*mpre[i]
'''
PASCAL VOC 2007 devkit
ap=0;
for t=0:0.1:1
p=max(prec(rec>=t));
if isempty(p)
p=0;
end
ap=ap+p/11;
end
'''
AP2007 = 0.0
for t in range(0,11,1):
idx = [i for i in range(0,len(recall)) if recall[i] >= float(t)/10.0]
if len(idx) != 0:
p = max( [precision[i] for i in idx] )
else:
p = 0.0
AP2007 += p/11
print 'AP2012:',AP2012
print 'AP2007:',AP2007
output.write('0 '+str(AP2007))
output.close()
def computePrecAt(tp,K):
import numpy as np
print 'Prec@K',
for k in K:
print '(',str(k),':',np.sum(tp[0:k])/float(k),')',
print ''
def bigOverlap(box, gt):
if ldet.overlap(box,gt) > 0.5 and ldet.IoU(box,gt) < 0.5:
return 1.0
else:
return 0.0
# Main Program
if __name__ == "__main__":
params = cu.loadParams("overlap groundTruth detections output")
indexData = [x.split() for x in open(params['groundTruth'])]
detectionsData = [x.split() for x in open(params['detections'])]
overlapLimit = 1.0
if params['overlap'].startswith('big'):
minOverlap = float(params['overlap'].replace('big',''))
overlapMeasure = lambda x,y: np.exp( -( (1.0-ldet.overlap(x,y))**2 + (0.25-ldet.IoU(x,y))**2 ) )
#overlapMeasure = bigOverlap
elif params['overlap'].startswith('tight'):
minOverlap = float(params['overlap'].replace('tight',''))
overlapMeasure = ldet.IoU
elif params['overlap'].startswith('inside'):
minOverlap = float(params['overlap'].replace('inside',''))
overlapMeasure = lambda x,y: np.exp( -( (1.0-ldet.overlap(y,x))**2 + (0.25-ldet.IoU(x,y))**2 ) )
elif params['overlap'].startswith('OV'):
overlapMeasure = ldet.overlap
minOverlap = float(params['overlap'].replace('OV',''))
elif params['overlap'].startswith('IN'):
overlapMeasure = inside
minOverlap = float(params['overlap'].replace('IN',''))
else:
overlapMeasure = ldet.IoU
minOverlap = float(params['overlap'])
groundTruth = loadGroundTruthAnnotations(indexData)
numPositives = len(indexData)
print 'Annotated images:',len(groundTruth)
print 'Ground Truth Bounding Boxes:',len(indexData)
detections = loadDetections(detectionsData)
results = evaluateDetections(groundTruth,detections,minOverlap,params['output'],overlapMeasure,allowDuplicates=True,supOverlap=overlapLimit)
computePrecisionRecall(numPositives,results['tp'],results['fp'],params['output'])
computePrecAt(results['tp'],[20,50,100,200,300,400,500])
|
|
# -*- coding: utf-8 -*-
from __future__ import with_statement
from cms.api import create_page, create_title
from cms.apphook_pool import apphook_pool
from cms.appresolver import (applications_page_check, clear_app_resolvers,
get_app_patterns)
from cms.test_utils.testcases import CMSTestCase, SettingsOverrideTestCase
from cms.test_utils.util.context_managers import SettingsOverride
from cms.tests.menu_utils import DumbPageLanguageUrl
from cms.utils.i18n import force_language
from django.contrib.auth.models import User
from django.core.urlresolvers import clear_url_caches, reverse
import sys
from cms.models.pagemodel import Page
APP_NAME = 'SampleApp'
NS_APP_NAME = 'NamespacedApp'
APP_MODULE = "cms.test_utils.project.sampleapp.cms_app"
class ApphooksTestCase(CMSTestCase):
def setUp(self):
clear_app_resolvers()
clear_url_caches()
if APP_MODULE in sys.modules:
del sys.modules[APP_MODULE]
self.reload_urls()
def tearDown(self):
clear_app_resolvers()
clear_url_caches()
if APP_MODULE in sys.modules:
del sys.modules[APP_MODULE]
self.reload_urls()
def reload_urls(self):
from django.conf import settings
url_modules = [
'cms.urls',
# TODO: Add here intermediary modules which may
# include() the 'cms.urls' if it isn't included
# directly in the root urlconf.
# '...',
'cms.test_utils.project.second_cms_urls_for_apphook_tests',
settings.ROOT_URLCONF,
]
clear_app_resolvers()
clear_url_caches()
for module in url_modules:
if module in sys.modules:
del sys.modules[module]
def create_base_structure(self, apphook, title_langs, reverse_id=None):
apphook_pool.clear()
superuser = User.objects.create_superuser('admin', 'admin@admin.com', 'admin')
page = create_page("home", "nav_playground.html", "en",
created_by=superuser, published=True)
create_title('de', page.get_title(), page)
child_page = create_page("child_page", "nav_playground.html", "en",
created_by=superuser, published=True, parent=page)
create_title('de', child_page.get_title(), child_page)
child_child_page = create_page("child_child_page", "nav_playground.html",
"en", created_by=superuser, published=True, parent=child_page, apphook=apphook,
reverse_id=reverse_id)
create_title("de", child_child_page.get_title(), child_child_page, apphook=apphook)
child_child_page.publish()
# publisher_public is set to draft on publish, issue with onetoone reverse
child_child_page = self.reload(child_child_page)
if isinstance(title_langs, basestring):
titles = child_child_page.publisher_public.get_title_obj(title_langs)
else:
titles = [child_child_page.publisher_public.get_title_obj(l) for l in title_langs]
self.reload_urls()
return titles
def test_explicit_apphooks(self):
"""
Test explicit apphook loading with the CMS_APPHOOKS setting.
"""
apphooks = (
'%s.%s' % (APP_MODULE, APP_NAME),
)
with SettingsOverride(CMS_APPHOOKS=apphooks):
apphook_pool.clear()
hooks = apphook_pool.get_apphooks()
app_names = [hook[0] for hook in hooks]
self.assertEqual(len(hooks), 1)
self.assertEqual(app_names, [APP_NAME])
apphook_pool.clear()
def test_implicit_apphooks(self):
"""
Test implicit apphook loading with INSTALLED_APPS + cms_app.py
"""
apps = ['cms.test_utils.project.sampleapp']
with SettingsOverride(INSTALLED_APPS=apps, ROOT_URLCONF='cms.test_utils.project.urls_for_apphook_tests'):
apphook_pool.clear()
hooks = apphook_pool.get_apphooks()
app_names = [hook[0] for hook in hooks]
self.assertEqual(len(hooks), 3)
self.assertIn(NS_APP_NAME, app_names)
self.assertIn(APP_NAME, app_names)
apphook_pool.clear()
def test_apphook_on_root(self):
with SettingsOverride(ROOT_URLCONF='cms.test_utils.project.urls_for_apphook_tests'):
apphook_pool.clear()
superuser = User.objects.create_superuser('admin', 'admin@admin.com', 'admin')
page = create_page("apphooked-page", "nav_playground.html", "en",
created_by=superuser, published=True, apphook="SampleApp")
blank_page = create_page("not-apphooked-page", "nav_playground.html", "en",
created_by=superuser, published=True, apphook="", slug='blankapp')
english_title = page.title_set.all()[0]
self.assertEquals(english_title.language, 'en')
create_title("de", "aphooked-page-de", page, apphook="SampleApp")
self.assertTrue(page.publish())
self.assertTrue(blank_page.publish())
with force_language("en"):
response = self.client.get(self.get_pages_root())
self.assertTemplateUsed(response, 'sampleapp/home.html')
response = self.client.get('/en/blankapp/')
self.assertTemplateUsed(response, 'nav_playground.html')
apphook_pool.clear()
def test_apphook_on_root_reverse(self):
with SettingsOverride(ROOT_URLCONF='cms.test_utils.project.urls_for_apphook_tests'):
apphook_pool.clear()
superuser = User.objects.create_superuser('admin', 'admin@admin.com', 'admin')
page = create_page("apphooked-page", "nav_playground.html", "en",
created_by=superuser, published=True, apphook="SampleApp")
create_title("de", "aphooked-page-de", page, apphook="SampleApp")
self.assertTrue(page.publish())
self.reload_urls()
self.assertFalse(reverse('sample-settings').startswith('//'))
apphook_pool.clear()
def test_get_page_for_apphook(self):
with SettingsOverride(ROOT_URLCONF='cms.test_utils.project.second_urls_for_apphook_tests'):
en_title, de_title = self.create_base_structure(APP_NAME, ['en', 'de'])
with force_language("en"):
path = reverse('sample-settings')
request = self.get_request(path)
request.LANGUAGE_CODE = 'en'
attached_to_page = applications_page_check(request, path=path[1:]) # strip leading slash
self.assertEquals(attached_to_page.pk, en_title.page.pk)
response = self.client.get(path)
self.assertEquals(response.status_code, 200)
self.assertTemplateUsed(response, 'sampleapp/home.html')
self.assertContains(response, en_title.title)
with force_language("de"):
path = reverse('sample-settings')
request = self.get_request(path)
request.LANGUAGE_CODE = 'de'
attached_to_page = applications_page_check(request, path=path[1:]) # strip leading slash and language prefix
self.assertEquals(attached_to_page.pk, de_title.page.pk)
response = self.client.get(path)
self.assertEquals(response.status_code, 200)
self.assertTemplateUsed(response, 'sampleapp/home.html')
self.assertContains(response, de_title.title)
apphook_pool.clear()
def test_get_root_page_for_apphook_with_instance_namespace(self):
with SettingsOverride(ROOT_URLCONF='cms.test_utils.project.second_urls_for_apphook_tests'):
en_title = self.create_base_structure(NS_APP_NAME, 'en', 'instance_ns')
self.reload_urls()
with force_language("en"):
path = reverse('namespaced_app_ns:sample-root')
path_instance = reverse('instance_ns:sample-root')
self.assertEquals(path, path_instance)
request = self.get_request(path)
request.LANGUAGE_CODE = 'en'
attached_to_page = applications_page_check(request, path=path[1:]) # strip leading slash
self.assertEquals(attached_to_page.pk, en_title.page.pk)
apphook_pool.clear()
def test_get_child_page_for_apphook_with_instance_namespace(self):
with SettingsOverride(ROOT_URLCONF='cms.test_utils.project.second_urls_for_apphook_tests'):
en_title = self.create_base_structure(NS_APP_NAME, 'en', 'instance_ns')
with force_language("en"):
path = reverse('namespaced_app_ns:sample-settings')
path_instance1 = reverse('instance_ns:sample-settings')
path_instance2 = reverse('namespaced_app_ns:sample-settings', current_app='instance_ns')
self.assertEquals(path, path_instance1)
self.assertEquals(path, path_instance2)
request = self.get_request(path)
request.LANGUAGE_CODE = 'en'
attached_to_page = applications_page_check(request, path=path[1:]) # strip leading slash
self.assertEquals(attached_to_page.pk, en_title.page_id)
apphook_pool.clear()
def test_get_sub_page_for_apphook_with_implicit_current_app(self):
with SettingsOverride(ROOT_URLCONF='cms.test_utils.project.second_urls_for_apphook_tests'):
en_title = self.create_base_structure(NS_APP_NAME, 'en')
with force_language("en"):
path = reverse('namespaced_app_ns:current-app')
request = self.get_request(path)
request.LANGUAGE_CODE = 'en'
attached_to_page = applications_page_check(request, path=path[1:]) # strip leading slash
self.assertEquals(attached_to_page.pk, en_title.page.pk)
response = self.client.get(path)
self.assertEquals(response.status_code, 200)
self.assertTemplateUsed(response, 'sampleapp/app.html')
self.assertContains(response, 'namespaced_app_ns')
self.assertContains(response, path)
apphook_pool.clear()
def test_get_sub_page_for_apphook_with_explicit_current_app(self):
with SettingsOverride(ROOT_URLCONF='cms.test_utils.project.second_urls_for_apphook_tests'):
en_title = self.create_base_structure(NS_APP_NAME, 'en', 'instance_ns')
with force_language("en"):
path = reverse('namespaced_app_ns:current-app')
request = self.get_request(path)
request.LANGUAGE_CODE = 'en'
attached_to_page = applications_page_check(request, path=path[1:]) # strip leading slash
self.assertEquals(attached_to_page.pk, en_title.page.pk)
response = self.client.get(path)
self.assertEquals(response.status_code, 200)
self.assertTemplateUsed(response, 'sampleapp/app.html')
self.assertContains(response, 'instance_ns')
self.assertContains(response, path)
apphook_pool.clear()
def test_include_urlconf(self):
with SettingsOverride(ROOT_URLCONF='cms.test_utils.project.second_urls_for_apphook_tests'):
self.create_base_structure(APP_NAME, 'en')
path = reverse('extra_second')
response = self.client.get(path)
self.assertEquals(response.status_code, 200)
self.assertTemplateUsed(response, 'sampleapp/extra.html')
self.assertContains(response, "test included urlconf")
path = reverse('extra_first')
response = self.client.get(path)
self.assertEquals(response.status_code, 200)
self.assertTemplateUsed(response, 'sampleapp/extra.html')
self.assertContains(response, "test urlconf")
with force_language("de"):
path = reverse('extra_first')
response = self.client.get(path)
self.assertEquals(response.status_code, 200)
self.assertTemplateUsed(response, 'sampleapp/extra.html')
self.assertContains(response, "test urlconf")
with force_language("de"):
path = reverse('extra_second')
response = self.client.get(path)
self.assertEquals(response.status_code, 200)
self.assertTemplateUsed(response, 'sampleapp/extra.html')
self.assertContains(response, "test included urlconf")
apphook_pool.clear()
def test_apphook_breaking_under_home_with_new_path_caching(self):
with SettingsOverride(CMS_PERMISSION=False):
home = create_page("home", "nav_playground.html", "en", published=True)
child = create_page("child", "nav_playground.html", "en", published=True, parent=home)
# not-home is what breaks stuff, because it contains the slug of the home page
not_home = create_page("not-home", "nav_playground.html", "en", published=True, parent=child)
create_page("subchild", "nav_playground.html", "en", published=True, parent=not_home, apphook='SampleApp')
with force_language("en"):
self.reload_urls()
urlpatterns = get_app_patterns()
resolver = urlpatterns[0]
url = resolver.reverse('sample-root')
self.assertEqual(url, 'child/not-home/subchild/')
def test_apphook_urlpattern_order(self):
# this one includes the actual cms.urls, so it can be tested if
# they are loaded in the correct order (the cms page pattern must be last)
# (the other testcases replicate the inclusion code and thus don't test this)
with SettingsOverride(ROOT_URLCONF='cms.test_utils.project.urls'):
self.create_base_structure(APP_NAME, 'en')
path = reverse('extra_second')
response = self.client.get(path)
self.assertEquals(response.status_code, 200)
self.assertTemplateUsed(response, 'sampleapp/extra.html')
self.assertContains(response, "test included urlconf")
def test_apphooks_receive_url_params(self):
# make sure that urlparams actually reach the apphook views
with SettingsOverride(ROOT_URLCONF='cms.test_utils.project.urls'):
self.create_base_structure(APP_NAME, 'en')
path = reverse('sample-params', kwargs=dict(my_params='is-my-param-really-in-the-context-QUESTIONMARK'))
response = self.client.get(path)
self.assertEquals(response.status_code, 200)
self.assertTemplateUsed(response, 'sampleapp/home.html')
self.assertContains(response, 'my_params: is-my-param-really-in-the-context-QUESTIONMARK')
def test_multiple_apphooks(self):
# test for #1538
with SettingsOverride(ROOT_URLCONF='cms.test_utils.project.third_urls_for_apphook_tests'):
apphook_pool.clear()
superuser = User.objects.create_superuser('admin', 'admin@admin.com', 'admin')
home_page = create_page("home", "nav_playground.html", "en", created_by=superuser, published=True,)
apphook1_page = create_page("apphook1-page", "nav_playground.html", "en",
created_by=superuser, published=True, apphook="SampleApp")
apphook2_page = create_page("apphook2-page", "nav_playground.html", "en",
created_by=superuser, published=True, apphook="SampleApp2")
reverse('sample-root')
reverse('sample2-root')
apphook_pool.clear()
class ApphooksPageLanguageUrlTestCase(SettingsOverrideTestCase):
settings_overrides = {'ROOT_URLCONF': 'cms.test_utils.project.second_urls_for_apphook_tests'}
def setUp(self):
clear_app_resolvers()
clear_url_caches()
if APP_MODULE in sys.modules:
del sys.modules[APP_MODULE]
def tearDown(self):
clear_app_resolvers()
clear_url_caches()
if APP_MODULE in sys.modules:
del sys.modules[APP_MODULE]
def test_page_language_url_for_apphook(self):
apphook_pool.clear()
superuser = User.objects.create_superuser('admin', 'admin@admin.com', 'admin')
page = create_page("home", "nav_playground.html", "en",
created_by=superuser)
create_title('de', page.get_title(), page)
page.publish()
child_page = create_page("child_page", "nav_playground.html", "en",
created_by=superuser, parent=page)
create_title('de', child_page.get_title(), child_page)
child_page.publish()
child_child_page = create_page("child_child_page", "nav_playground.html",
"en", created_by=superuser, parent=child_page, apphook='SampleApp')
create_title("de", '%s_de' % child_child_page.get_title(), child_child_page, apphook='SampleApp')
child_child_page.publish()
# publisher_public is set to draft on publish, issue with onetoone reverse
child_child_page = self.reload(child_child_page)
with force_language("en"):
path = reverse('extra_first')
request = self.get_request(path)
request.LANGUAGE_CODE = 'en'
request.current_page = child_child_page
fake_context = {'request': request}
tag = DumbPageLanguageUrl()
output = tag.get_context(fake_context, 'en')
url = output['content']
self.assertEqual(url, '/en/child_page/child_child_page/extra_1/')
output = tag.get_context(fake_context, 'de')
url = output['content']
# look the extra "_de"
self.assertEqual(url, '/de/child_page/child_child_page_de/extra_1/')
output = tag.get_context(fake_context, 'fr')
url = output['content']
self.assertEqual(url, '/fr/child_page/child_child_page/extra_1/')
apphook_pool.clear()
|
|
import logging
import numpy as np
import scipy.stats
import scipy
from unittest import TestCase
import gspn.distributions as distributions
logger=logging.getLogger("test_distributions")
def fractional_error(a, b):
return np.abs((a-b)/a)
def check_fractional_error(a, b, tolerance, message):
if fractional_error(a, b)>tolerance:
logger.error("Fractional error of {0} too large. Expected "+
"{1} but found {2}".format(message, a, b))
return False
return True
class TestExponential(TestCase):
def test_average(self):
"""
This tests the theoretical mean and standard deviation.
"""
lam=0.5
te=0.3
rng=np.random.RandomState()
ed=distributions.ExponentialDistribution(lam, te)
cnt=10000
res=np.zeros(cnt)
for i in range(cnt):
res[i]=ed.sample(te, rng)
lambda_estimator=1/(np.average(res)-te)
logger.debug("Exponential estimator {0} lambda {1}".format(
lambda_estimator, lam))
too_low=lam < lambda_estimator*(1-1.96/np.sqrt(cnt))
self.assertTrue(not too_low)
too_high=lam > lambda_estimator*(1+1.96/np.sqrt(cnt))
self.assertTrue(not too_high)
variance=np.var(res-te)
check_fractional_error(variance, np.power(lam, -2), 0.01, "variance")
def test_integrals(self):
"""
Are the hazard integral and its inverse really inverses?
"""
tol=0.001
ed=distributions.ExponentialDistribution(0.5, 3)
for x in np.linspace(3, 7, num=5):
xa=ed.hazard_integral(3, x)
self.assertTrue(tol>abs(x-ed.implicit_hazard_integral(xa, 3)))
def test_samples(self):
"""
Sample from the scipy distribution and from ours. Compare.
"""
cnt=10000
rng=np.random.RandomState()
samples=np.zeros(cnt)
lam=2.0
te=0.7
now=1.1
exp_dist=distributions.ExponentialDistribution(lam, te)
for i in range(cnt):
samples[i]=exp_dist.sample(now, rng)
emp_dist=distributions.EmpiricalDistribution(samples)
system=scipy.stats.expon.rvs(scale=1./lam, loc=now, size=cnt)
system_dist=distributions.EmpiricalDistribution(system)
ks_fit=emp_dist.compare_empirical(system_dist)
logger.debug("Exponential test_samples ks {0}".format(ks_fit))
self.assertTrue(ks_fit<1.63)
def test_anderson_samples(self):
"""
Sample from the scipy distribution and from ours using Anderson's
method.
"""
cnt=10000
rng=np.random.RandomState()
lam=2.0
te=0.7
now=1.1
exp_dist=distributions.ExponentialDistribution(lam, te)
samples=distributions.anderson_sample_tester(exp_dist, now, cnt, rng)
emp_dist=distributions.EmpiricalDistribution(samples)
system=scipy.stats.expon.rvs(scale=1./lam, loc=now, size=cnt)
system_dist=distributions.EmpiricalDistribution(system)
ks_fit=emp_dist.compare_empirical(system_dist)
logger.debug("Exponential test_samples ks {0}".format(ks_fit))
self.assertTrue(ks_fit<1.63)
class TestWeibull(TestCase):
def test_samples(self):
have_a_test=False
self.assertTrue(have_a_test)
class TestGamma(TestCase):
def test_theoretical(self):
"""
Test theoretical Gamma.
"""
alpha=1.34
beta=0.18
theta=1.0/beta
te=0.0
now=0.0
tol=0.0001
rng=np.random.RandomState()
ed=distributions.GammaDistribution(alpha, beta, te)
cnt=10000
res=np.zeros(cnt)
for i in range(cnt):
res[i]=ed.sample(now, rng)
avg=np.average(res)
# The calculation of the average varies a lot, but that makes
# sense for a Gamma. What's a better statistic of the samples?
logger.debug("Gamma avg {0} theory {1}".format(avg, alpha*theta))
self.assertTrue(0.01>abs(avg-alpha*theta))
variance=np.var(res)
logger.debug("Gamma variance {0} theory {1}".format(
var, alpha*theta**2))
self.assert_true(tol>abs(var-alpha*theta**2))
skew=scipy.stats.skew(res)
logger.debug("Gamma skew {0} theory {1}".format(
skew, 2/np.sqrt(alpha)))
self.assert_true(tol>abs(skew-2/np.sqrt(alpha)))
def test_integrals(self):
"""
Are the Gamma hazard integral and its inverse really inverses?
"""
alpha=1.34
beta=0.18
theta=1.0/beta
te=0.2
now=1.1
tol=0.0001
rng=np.random.RandomState()
ed=distributions.GammaDistribution(alpha, beta, te)
for x in np.linspace(now, 2*now, num=5):
xa=ed.hazard_integral(now, x)
self.assertTrue(tol>abs(x-ed.implicit_hazard_integral(xa, now)))
def test_samples(self):
"""
Sample from the Gamma scipy distribution and from ours. Compare.
"""
cnt=10000
rng=np.random.RandomState()
samples=np.zeros(cnt)
alpha=1.34
beta=0.18
theta=1.0/beta
te=0.2
now=1.1
exp_dist=distributions.GammaDistribution(alpha, beta, te)
for i in range(cnt):
samples[i]=exp_dist.sample(now, rng)
emp_dist=distributions.EmpiricalDistribution(samples)
system=np.zeros(cnt)
for i in range(cnt):
v=now-1
while v<now:
v=scipy.stats.gamma.rvs(a=alpha, scale=1.0/beta, loc=0, size=1)
system[i]=v
system_dist=distributions.EmpiricalDistribution(system)
ks_fit=emp_dist.compare_empirical(system_dist)
logger.debug("Gamma test_samples ks {0}".format(ks_fit))
self.assertTrue(ks_fit<1.63)
def test_anderson_samples(self):
"""
Sample from the Gamma scipy distribution and from ours using Anderson's
method.
"""
cnt=10000
rng=np.random.RandomState()
alpha=1.34
beta=0.18
theta=1.0/beta
te=0.2
now=1.1
exp_dist=distributions.GammaDistribution(alpha, beta, te)
samples=distributions.anderson_sample_tester(exp_dist, now, cnt, rng)
emp_dist=distributions.EmpiricalDistribution(samples)
system=np.zeros(cnt)
for i in range(cnt):
v=now-1
while v<now:
v=scipy.stats.gamma.rvs(a=alpha, scale=1.0/beta, loc=0, size=1)
system[i]=v
system_dist=distributions.EmpiricalDistribution(system)
ks_fit=emp_dist.compare_empirical(system_dist)
logger.debug("Gamma test_samples ks {0}".format(ks_fit))
self.assertTrue(ks_fit<1.63)
class TestUniform(TestCase):
def test_theoretical(self):
"""
Test theoretical Uniform.
"""
a=1.0
b=2.0
te=0.5
now=2.3
tol=0.0001
rng=np.random.RandomState()
ed=distributions.UniformDistribution(a, b, te)
cnt=10000
res=np.zeros(cnt)
for i in range(cnt):
res[i]=ed.sample(now, rng)
avg=np.average(res)
logger.debug("Uniform avg {0} theory {1}".format(avg, 0.5*(b+te+now)))
self.assertTrue(0.01>abs(avg-0.5*(b+te+now)))
def test_integrals(self):
"""
Are the Uniform hazard integral and its inverse really inverses?
"""
a=1.0
b=2.0
te=0.5
now=2.3
tol=0.0001
rng=np.random.RandomState()
ed=distributions.UniformDistribution(a, b, te)
for t in np.linspace(now, te+b-0.00001, num=5):
xa=ed.hazard_integral(now, t)
t_ish=ed.implicit_hazard_integral(xa, now)
logger.debug("Uniform integrals {0} ish {1} xa {2}".format(
t, t_ish, xa))
self.assertTrue(tol>abs(t-t_ish))
def test_samples(self):
"""
Sample from the Uniform scipy distribution and from ours. Compare.
"""
cnt=10000
rng=np.random.RandomState()
samples=np.zeros(cnt)
a=1.0
b=2.0
te=0.5
now=2.3
exp_dist=distributions.UniformDistribution(a, b, te)
for i in range(cnt):
samples[i]=exp_dist.sample(now, rng)
emp_dist=distributions.EmpiricalDistribution(samples)
system=scipy.stats.uniform.rvs(loc=now, scale=b+te-now, size=cnt)
system_dist=distributions.EmpiricalDistribution(system)
ks_fit=emp_dist.compare_empirical(system_dist)
logger.debug("Uniform test_samples ks {0}".format(ks_fit))
self.assertTrue(ks_fit<1.63)
def test_anderson_samples(self):
"""
Sample from the Uniform scipy distribution and from ours using Anderson's
method.
"""
cnt=10000
rng=np.random.RandomState()
a=1.0
b=2.0
te=0.5
now=2.3
exp_dist=distributions.UniformDistribution(a, b, te)
samples=distributions.anderson_sample_tester(exp_dist, now, cnt, rng)
emp_dist=distributions.EmpiricalDistribution(samples)
system=scipy.stats.uniform.rvs(loc=now, scale=b+te-now, size=cnt)
system_dist=distributions.EmpiricalDistribution(system)
ks_fit=emp_dist.compare_empirical(system_dist)
logger.debug("Uniform test_samples ks {0}".format(ks_fit))
self.assertTrue(ks_fit<1.63)
|
|
# Copyright 2022 The Magenta Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for performance_encoder_decoder."""
import math
from absl.testing import absltest
from note_seq import performance_encoder_decoder
from note_seq import performance_lib
from note_seq.performance_encoder_decoder import ModuloPerformanceEventSequenceEncoderDecoder
from note_seq.performance_encoder_decoder import NotePerformanceEventSequenceEncoderDecoder
from note_seq.performance_encoder_decoder import PerformanceModuloEncoding
from note_seq.performance_lib import PerformanceEvent
cos = math.cos
sin = math.sin
pi = math.pi
class PerformanceOneHotEncodingTest(absltest.TestCase):
def setUp(self):
self.enc = performance_encoder_decoder.PerformanceOneHotEncoding(
num_velocity_bins=16)
def testEncodeDecode(self):
expected_pairs = [
(PerformanceEvent(
event_type=PerformanceEvent.NOTE_ON, event_value=60), 60),
(PerformanceEvent(
event_type=PerformanceEvent.NOTE_ON, event_value=0), 0),
(PerformanceEvent(
event_type=PerformanceEvent.NOTE_ON, event_value=127), 127),
(PerformanceEvent(
event_type=PerformanceEvent.NOTE_OFF, event_value=72), 200),
(PerformanceEvent(
event_type=PerformanceEvent.NOTE_OFF, event_value=0), 128),
(PerformanceEvent(
event_type=PerformanceEvent.NOTE_OFF, event_value=127), 255),
(PerformanceEvent(
event_type=PerformanceEvent.TIME_SHIFT, event_value=10), 265),
(PerformanceEvent(
event_type=PerformanceEvent.TIME_SHIFT, event_value=1), 256),
(PerformanceEvent(
event_type=PerformanceEvent.TIME_SHIFT, event_value=100), 355),
(PerformanceEvent(
event_type=PerformanceEvent.VELOCITY, event_value=5), 360),
(PerformanceEvent(
event_type=PerformanceEvent.VELOCITY, event_value=1), 356),
(PerformanceEvent(
event_type=PerformanceEvent.VELOCITY, event_value=16), 371)
]
for expected_event, expected_index in expected_pairs:
index = self.enc.encode_event(expected_event)
self.assertEqual(expected_index, index)
event = self.enc.decode_event(expected_index)
self.assertEqual(expected_event, event)
def testEventToNumSteps(self):
self.assertEqual(0, self.enc.event_to_num_steps(
PerformanceEvent(event_type=PerformanceEvent.NOTE_ON, event_value=60)))
self.assertEqual(0, self.enc.event_to_num_steps(
PerformanceEvent(event_type=PerformanceEvent.NOTE_OFF, event_value=67)))
self.assertEqual(0, self.enc.event_to_num_steps(
PerformanceEvent(event_type=PerformanceEvent.VELOCITY, event_value=10)))
self.assertEqual(1, self.enc.event_to_num_steps(
PerformanceEvent(
event_type=PerformanceEvent.TIME_SHIFT, event_value=1)))
self.assertEqual(45, self.enc.event_to_num_steps(
PerformanceEvent(
event_type=PerformanceEvent.TIME_SHIFT, event_value=45)))
self.assertEqual(100, self.enc.event_to_num_steps(
PerformanceEvent(
event_type=PerformanceEvent.TIME_SHIFT, event_value=100)))
class PerformanceModuloEncodingTest(absltest.TestCase):
"""Test class for PerformanceModuloEncoding."""
def setUp(self):
self._num_velocity_bins = 16
self._max_shift_steps = performance_lib.DEFAULT_MAX_SHIFT_STEPS
self.enc = PerformanceModuloEncoding(
num_velocity_bins=self._num_velocity_bins,
max_shift_steps=self._max_shift_steps)
self._expected_input_size = (
2 * performance_encoder_decoder.MODULO_PITCH_ENCODER_WIDTH +
performance_encoder_decoder.MODULO_VELOCITY_ENCODER_WIDTH +
performance_encoder_decoder.MODULO_TIME_SHIFT_ENCODER_WIDTH)
self._expected_num_classes = (self._num_velocity_bins +
self._max_shift_steps +
(performance_lib.MAX_MIDI_PITCH -
performance_lib.MIN_MIDI_PITCH + 1) * 2)
def testInputSize(self):
self.assertEqual(self._expected_input_size, self.enc.input_size)
def testEmbedPitchClass(self):
# The following are true only for semitone_steps = 1.
expected_pairs = [
(0, (cos(0.0), sin(0.0))),
(1, (cos(pi / 6.0), sin(pi / 6.0))),
(2, (cos(pi / 3.0), sin(pi / 3.0))),
(3, (cos(pi / 2.0), sin(pi / 2.0))),
(4, (cos(2.0 * pi / 3.0), sin(2.0 * pi / 3.0))),
(5, (cos(5.0 * pi / 6.0), sin(5.0 * pi / 6.0))),
(6, (cos(pi), sin(pi))),
(7, (cos(7.0 * pi / 6.0), sin(7.0 * pi / 6.0))),
(8, (cos(4.0 * pi / 3.0), sin(4.0 * pi / 3.0))),
(9, (cos(3.0 * pi / 2.0), sin(3.0 * pi / 2.0))),
(10, (cos(5.0 * pi / 3.0), sin(5.0 * pi / 3.0))),
(11, (cos(11.0 * pi / 6.0), sin(11.0 * pi / 6.0)))]
for note, expected_embedding in expected_pairs:
actual_embedding = self.enc.embed_pitch_class(note)
self.assertEqual(actual_embedding[0], expected_embedding[0])
self.assertEqual(actual_embedding[1], expected_embedding[1])
def testEmbedNote(self):
# The following are true only for semitone_steps = 1.
base = 72.0
expected_pairs = [
(0, (cos(0.0), sin(0.0))),
(13, (cos(pi * 13.0 / base), sin(pi * 13.0 / base))),
(26, (cos(pi * 26.0 / base), sin(pi * 26.0 / base))),
(39, (cos(pi * 39.0 / base), sin(pi * 39.0 / base))),
(52, (cos(pi * 52.0 / base), sin(pi * 52.0 / base))),
(65, (cos(pi * 65.0 / base), sin(pi * 65.0 / base))),
(78, (cos(pi * 78.0 / base), sin(pi * 78.0 / base))),
(91, (cos(pi * 91.0 / base), sin(pi * 91.0 / base))),
(104, (cos(pi * 104.0 / base), sin(pi * 104.0 / base))),
(117, (cos(pi * 117.0 / base), sin(pi * 117.0 / base))),
(130, (cos(pi * 130.0 / base), sin(pi * 130.0 / base))),
(143, (cos(pi * 143.0 / base), sin(pi * 143.0 / base)))]
for note, expected_embedding in expected_pairs:
actual_embedding = self.enc.embed_note(note)
self.assertEqual(actual_embedding[0], expected_embedding[0])
self.assertEqual(actual_embedding[1], expected_embedding[1])
def testEmbedTimeShift(self):
# The following are true only for semitone_steps = 1.
base = self._max_shift_steps # 100
expected_pairs = [
(0, (cos(0.0), sin(0.0))),
(2, (cos(2.0 * pi * 2.0 / base), sin(2.0 * pi * 2.0 / base))),
(5, (cos(2.0 * pi * 5.0 / base), sin(2.0 * pi * 5.0 / base))),
(13, (cos(2.0 * pi * 13.0 / base), sin(2.0 * pi * 13.0 / base))),
(20, (cos(2.0 * pi * 20.0 / base), sin(2.0 * pi * 20.0 / base))),
(45, (cos(2.0 * pi * 45.0 / base), sin(2.0 * pi * 45.0 / base))),
(70, (cos(2.0 * pi * 70.0 / base), sin(2.0 * pi * 70.0 / base))),
(99, (cos(2.0 * pi * 99.0 / base), sin(2.0 * pi * 99.0 / base)))]
for time_shift, expected_embedding in expected_pairs:
actual_embedding = self.enc.embed_time_shift(time_shift)
self.assertEqual(actual_embedding[0], expected_embedding[0])
self.assertEqual(actual_embedding[1], expected_embedding[1])
def testEmbedVelocity(self):
# The following are true only for semitone_steps = 1.
base = self._num_velocity_bins # 16
expected_pairs = [
(0, (cos(0.0), sin(0.0))),
(2, (cos(2.0 * pi * 2.0 / base), sin(2.0 * pi * 2.0 / base))),
(5, (cos(2.0 * pi * 5.0 / base), sin(2.0 * pi * 5.0 / base))),
(7, (cos(2.0 * pi * 7.0 / base), sin(2.0 * pi * 7.0 / base))),
(10, (cos(2.0 * pi * 10.0 / base), sin(2.0 * pi * 10.0 / base))),
(13, (cos(2.0 * pi * 13.0 / base), sin(2.0 * pi * 13.0 / base))),
(15, (cos(2.0 * pi * 15.0 / base), sin(2.0 * pi * 15.0 / base)))]
for velocity, expected_embedding in expected_pairs:
actual_embedding = self.enc.embed_velocity(velocity)
self.assertEqual(actual_embedding[0], expected_embedding[0])
self.assertEqual(actual_embedding[1], expected_embedding[1])
def testEncodeModuloEvent(self):
expected_pairs = [
(PerformanceEvent(event_type=PerformanceEvent.NOTE_ON, event_value=60),
(0, PerformanceEvent.NOTE_ON, 60)),
(PerformanceEvent(event_type=PerformanceEvent.NOTE_ON, event_value=0),
(0, PerformanceEvent.NOTE_ON, 0)),
(PerformanceEvent(event_type=PerformanceEvent.NOTE_ON, event_value=127),
(0, PerformanceEvent.NOTE_ON, 127)),
(PerformanceEvent(event_type=PerformanceEvent.NOTE_OFF, event_value=72),
(5, PerformanceEvent.NOTE_OFF, 72)),
(PerformanceEvent(event_type=PerformanceEvent.NOTE_OFF, event_value=0),
(5, PerformanceEvent.NOTE_OFF, 0)),
(PerformanceEvent(
event_type=PerformanceEvent.NOTE_OFF, event_value=127),
(5, PerformanceEvent.NOTE_OFF, 127)),
(PerformanceEvent(
event_type=PerformanceEvent.TIME_SHIFT, event_value=10),
(10, PerformanceEvent.TIME_SHIFT, 9)),
(PerformanceEvent(
event_type=PerformanceEvent.TIME_SHIFT, event_value=1),
(10, PerformanceEvent.TIME_SHIFT, 0)),
(PerformanceEvent(
event_type=PerformanceEvent.TIME_SHIFT, event_value=100),
(10, PerformanceEvent.TIME_SHIFT, 99)),
(PerformanceEvent(event_type=PerformanceEvent.VELOCITY, event_value=5),
(13, PerformanceEvent.VELOCITY, 4)),
(PerformanceEvent(event_type=PerformanceEvent.VELOCITY, event_value=1),
(13, PerformanceEvent.VELOCITY, 0)),
(PerformanceEvent(event_type=PerformanceEvent.VELOCITY, event_value=16),
(13, PerformanceEvent.VELOCITY, 15)),
]
# expected_encoded_modulo_event is of the following form:
# (offset, encoder_width, event_type, value, bins)
for event, expected_encoded_modulo_event in expected_pairs:
actual_encoded_modulo_event = self.enc.encode_modulo_event(event)
self.assertEqual(actual_encoded_modulo_event,
expected_encoded_modulo_event)
class ModuloPerformanceEventSequenceEncoderTest(absltest.TestCase):
"""Test class for ModuloPerformanceEventSequenceEncoder.
ModuloPerformanceEventSequenceEncoderDecoder is tightly coupled with the
PerformanceModuloEncoding, and PerformanceOneHotEncoding classes. As a result,
in the test set up, the test object is initialized with one of each objects
and tested accordingly. Since this class only modifies the input encoding
of performance events, and otherwise its treatment of labels is the same as
OneHotEventSequenceEncoderDecoder, the events_to_labels(), and
class_index_to_event() methods of the class are not tested.
"""
def setUp(self):
self._num_velocity_bins = 32
self._max_shift_steps = 100
self.enc = ModuloPerformanceEventSequenceEncoderDecoder(
num_velocity_bins=self._num_velocity_bins,
max_shift_steps=self._max_shift_steps)
self._expected_input_size = (
2 * performance_encoder_decoder.MODULO_PITCH_ENCODER_WIDTH +
performance_encoder_decoder.MODULO_VELOCITY_ENCODER_WIDTH +
performance_encoder_decoder.MODULO_TIME_SHIFT_ENCODER_WIDTH)
self._expected_num_classes = (self._num_velocity_bins +
self._max_shift_steps +
2 * (performance_lib.MAX_MIDI_PITCH -
performance_lib.MIN_MIDI_PITCH + 1))
def testInputSize(self):
self.assertEqual(self._expected_input_size, self.enc.input_size)
def testNumClasses(self):
self.assertEqual(self._expected_num_classes, self.enc.num_classes)
def testDefaultEventLabel(self):
label = self._expected_num_classes - self._num_velocity_bins - 1
self.assertEqual(label, self.enc.default_event_label)
def testEventsToInput(self):
num_shift_bins = self._max_shift_steps
num_velocity_bins = self._num_velocity_bins
slow_base = 2.0 * pi / 144.0
fast_base = 2.0 * pi / 12.0
shift_base = 2.0 * pi / num_shift_bins
velocity_base = 2.0 * pi / num_velocity_bins
expected_pairs = [
(PerformanceEvent(event_type=PerformanceEvent.NOTE_ON, event_value=60),
[1.0, cos(60.0 * slow_base), sin(60.0 * slow_base),
cos(60.0 * fast_base), sin(60.0 * fast_base),
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]),
(PerformanceEvent(event_type=PerformanceEvent.NOTE_ON, event_value=0),
[1.0, cos(0.0 * slow_base), sin(0.0 * slow_base),
cos(0.0 * fast_base), sin(0.0 * fast_base),
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]),
(PerformanceEvent(event_type=PerformanceEvent.NOTE_ON, event_value=127),
[1.0, cos(127.0 * slow_base), sin(127.0 * slow_base),
cos(127.0 * fast_base), sin(127.0 * fast_base),
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]),
(PerformanceEvent(event_type=PerformanceEvent.NOTE_OFF, event_value=72),
[0.0, 0.0, 0.0, 0.0, 0.0, 1.0,
cos(72.0 * slow_base), sin(72.0 * slow_base),
cos(72.0 * fast_base), sin(72.0 * fast_base),
0.0, 0.0, 0.0, 0.0, 0.0, 0.0]),
(PerformanceEvent(event_type=PerformanceEvent.NOTE_OFF, event_value=0),
[0.0, 0.0, 0.0, 0.0, 0.0, 1.0,
cos(0.0 * slow_base), sin(0.0 * slow_base),
cos(0.0 * fast_base), sin(0.0 * fast_base),
0.0, 0.0, 0.0, 0.0, 0.0, 0.0]),
(PerformanceEvent(
event_type=PerformanceEvent.NOTE_OFF, event_value=127),
[0.0, 0.0, 0.0, 0.0, 0.0, 1.0,
cos(127.0 * slow_base), sin(127.0 * slow_base),
cos(127.0 * fast_base), sin(127.0 * fast_base),
0.0, 0.0, 0.0, 0.0, 0.0, 0.0]),
(PerformanceEvent(
event_type=PerformanceEvent.TIME_SHIFT, event_value=10),
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
1.0, cos(9.0 * shift_base), sin(9.0 * shift_base),
0.0, 0.0, 0.0]),
(PerformanceEvent(
event_type=PerformanceEvent.TIME_SHIFT, event_value=1),
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
1.0, cos(0.0 * shift_base), sin(0.0 * shift_base),
0.0, 0.0, 0.0]),
(PerformanceEvent(
event_type=PerformanceEvent.TIME_SHIFT, event_value=100),
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
1.0, cos(99.0 * shift_base), sin(99.0 * shift_base),
0.0, 0.0, 0.0]),
(PerformanceEvent(event_type=PerformanceEvent.VELOCITY, event_value=5),
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0,
1.0, cos(4.0 * velocity_base), sin(4.0 * velocity_base)]),
(PerformanceEvent(event_type=PerformanceEvent.VELOCITY, event_value=1),
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0,
1.0, cos(0.0 * velocity_base), sin(0.0 * velocity_base)]),
(PerformanceEvent(event_type=PerformanceEvent.VELOCITY, event_value=16),
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0,
1.0, cos(15.0 * velocity_base), sin(15.0 * velocity_base)]),
]
events = []
position = 0
for event, expected_encoded_modulo_event in expected_pairs:
events += [event]
actual_encoded_modulo_event = self.enc.events_to_input(events, position)
position += 1
for i in range(self._expected_input_size):
self.assertAlmostEqual(expected_encoded_modulo_event[i],
actual_encoded_modulo_event[i])
class NotePerformanceEventSequenceEncoderDecoderTest(absltest.TestCase):
def setUp(self):
self.enc = NotePerformanceEventSequenceEncoderDecoder(
num_velocity_bins=16, max_shift_steps=99, max_duration_steps=500)
self.assertEqual(10, self.enc.shift_steps_segments)
self.assertEqual(20, self.enc.duration_steps_segments)
def testEncodeDecode(self):
pe = PerformanceEvent
performance = [
(pe(pe.TIME_SHIFT, 0), pe(pe.NOTE_ON, 60),
pe(pe.VELOCITY, 13), pe(pe.DURATION, 401)),
(pe(pe.TIME_SHIFT, 55), pe(pe.NOTE_ON, 64),
pe(pe.VELOCITY, 13), pe(pe.DURATION, 310)),
(pe(pe.TIME_SHIFT, 99), pe(pe.NOTE_ON, 67),
pe(pe.VELOCITY, 16), pe(pe.DURATION, 100)),
(pe(pe.TIME_SHIFT, 0), pe(pe.NOTE_ON, 67),
pe(pe.VELOCITY, 16), pe(pe.DURATION, 1)),
(pe(pe.TIME_SHIFT, 0), pe(pe.NOTE_ON, 67),
pe(pe.VELOCITY, 16), pe(pe.DURATION, 500)),
]
labels = [self.enc.events_to_label(performance, i)
for i in range(len(performance))]
expected_labels = [
(0, 0, 60, 12, 16, 0),
(5, 5, 64, 12, 12, 9),
(9, 9, 67, 15, 3, 24),
(0, 0, 67, 15, 0, 0),
(0, 0, 67, 15, 19, 24),
]
self.assertEqual(expected_labels, labels)
inputs = [self.enc.events_to_input(performance, i)
for i in range(len(performance))]
for input_ in inputs:
self.assertEqual(6, input_.nonzero()[0].shape[0])
decoded_performance = [self.enc.class_index_to_event(label, None)
for label in labels]
self.assertEqual(performance, decoded_performance)
if __name__ == '__main__':
absltest.main()
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=no-else-return
"""The Python interface to the Relay reference interpreter."""
from __future__ import absolute_import
import numpy as np
import tvm._ffi
from tvm.runtime import container, Object
from . import _backend
from .. import _make, analysis
from ... import nd
from ..expr import Tuple, RefCreate, Call, Constant, GlobalVar, const
from ..function import Function
from ..scope_builder import ScopeBuilder
@tvm._ffi.register_object("relay.ConstructorValue")
class ConstructorValue(Object):
def __init__(self, tag, fields, constructor):
self.__init_handle_by_constructor__(_make.ConstructorValue, tag, fields, constructor)
@tvm._ffi.register_object("relay.RefValue")
class RefValue(Object):
def __init__(self, value):
self.__init_handle_by_constructor__(_make.RefValue, value)
def _arg_to_ast(mod, arg):
if isinstance(arg, nd.NDArray):
return Constant(arg.copyto(nd.cpu(0)))
elif isinstance(arg, container.ADT):
return Tuple([_arg_to_ast(mod, field) for field in arg])
elif isinstance(arg, tuple):
return Tuple([_arg_to_ast(mod, field) for field in arg])
elif isinstance(arg, RefValue):
return RefCreate(_arg_to_ast(mod, arg.value))
elif isinstance(arg, ConstructorValue):
return Call(mod.get_constructor(arg.tag), [_arg_to_ast(mod, field) for field in arg.fields])
elif isinstance(arg, np.ndarray):
return Constant(nd.array(arg))
elif isinstance(arg, Constant):
return arg
else:
return const(arg)
class Executor(object):
"""An abstract interface for executing Relay programs."""
def _convert_args(self, expr, args, kwargs):
"""
Convert the combination of arguments and keyword arguments
into a sequence of arguments that may be passed to
a Relay evaluator.
We first provide all positional arguments, and then attempt
to fill in the remaining arguments using the keyword arguments. We
map the keyword arguments to the corresponding parameters, if there
is an ambiguity between positional and keyword arguments this
procedure will raise an error.
Parameters
----------
expr: relay.Expr
The expression to evaluate
args: List[tvm.nd.NDArray]
The arguments to pass to the evaluator.
kwargs: Dict[str, tvm.NDArrray]
The keyword arguments to pass to the evaluator.
Returns:
args: List[tvm.nd.NDArray]
The new arguments with all keyword arguments placed in the correct slot.
"""
assert expr is not None
if not kwargs:
return args
if kwargs and not isinstance(expr, Function):
raise Exception(
"can only supply keyword parameters for a " "relay.Function, found {0}".format(expr)
)
params = expr.params
param_names = [p.name_hint for p in params]
num_of_args = len(args)
cargs = list(args)[:]
for i, name in enumerate(param_names):
if i < num_of_args:
if kwargs.get(name):
raise Exception(
"duplicate argument supplied in "
"both positional args (at position: {0}), "
"and keyword argument (with name: {1})".format(i, name)
)
else:
cargs.append(kwargs[name])
if len(cargs) != len(params):
raise Exception(
"insufficient arguments, expected "
"{0}, provided {1}".format(len(cargs), len(params))
)
return tuple(cargs)
def _make_executor(self, expr=None):
"""
Construct a Python function that implements the evaluation
of expression.
Parameters
----------
expr: Optional[relay.Expr]
The Relay expression to execute.
Returns
-------
executor: function,
A Python function which implements the behavior of `expr`.
"""
raise NotImplementedError()
def evaluate(self, expr=None, binds=None):
"""
Evaluate a Relay expression on the executor.
Parameters
----------
expr: Optional[tvm.relay.Expr]
The expression to evaluate.
binds: Optional[Map[tvm.relay.Var, tvm.relay.Expr]]
Additional binding of free variable.
Returns
-------
val : Union[function, Object]
The evaluation result.
"""
if binds:
scope_builder = ScopeBuilder()
for key, value in binds.items():
scope_builder.let(key, _arg_to_ast(self.mod, value))
scope_builder.ret(expr)
expr = scope_builder.get()
if not expr:
return self._make_executor()
if isinstance(expr, Function):
assert not analysis.free_vars(expr)
if isinstance(expr, (Function, GlobalVar)):
return self._make_executor(expr)
# normal expression evaluated by running a function.
# TODO(mbs): This should really be type rather than syntax driven.
func = Function([], expr)
return self._make_executor(func)()
class Interpreter(Executor):
"""
Simple interpreter interface.
Parameters
----------
mod : tvm.IRModule
The module to support the execution.
device : Device
The runtime device to run the code on.
target : tvm.Target
The target option to build the function.
CAUTION: Despite the API the module is prepared upon each call to evaluate
rather than once in create_executor.
That is:
.. code-block:: python
executor = relay.create_executor(kind="debug", mod=module)
a = executor.evaluate(expr)(args1)
b = executor.evaluate(expr)(args2)
will prepare all the bindings in module twice. For efficiency, try to hoist
calls to evaluate as high as possible, preferably immediately after create_executor:
.. code-block:: python
func = relay.create_executor(kind="debug", mod=module).evaluate(expr)
a = func(args1)
b = func(args2)
"""
def __init__(self, mod, device, target):
self.mod = mod
self.device = device
self.target = target
def _make_executor(self, expr=None):
if expr is None or isinstance(expr, GlobalVar):
assert self.mod is not None
if expr is None:
# A missing expr denotes 'main' in the given module.
expr = self.mod.get_global_var("main")
# Evaluate expr to a packed function we can efficiently re-apply
# to Relay arguments.
func = _backend.EvalFunction(self.mod, expr, self.device, self.target)
def _apply_args(*args, **kwargs):
if isinstance(expr, GlobalVar):
# When expanding args, look inside the actual global definition so kwargs
# can be matched.
args = self._convert_args(self.mod[expr.name_hint], args, kwargs)
else:
args = self._convert_args(expr, args, kwargs)
# Reflect python arguments up into Relay.
relay_args = []
for arg in args:
relay_args.append(_arg_to_ast(self.mod, arg))
# Apply func to Relay args
return func(relay_args)
return _apply_args
|
|
"""
Set up the plot figures, axes, and items to be done for each frame.
This module is imported by the plotting routines and then the
function setplot is called to set the plot parameters.
This plotting function is for plotting when the surface-flagging method
is used.
The main difference is that the inner product plot is not produced.
"""
from clawpack.geoclaw import topotools
import pylab
import glob
from numpy import loadtxt
# --------------------------
def setplot(plotdata):
# --------------------------
"""
Specify what is to be plotted at each frame.
Input: plotdata, an instance of clawpack.visclaw.data.ClawPlotData.
Output: a modified version of plotdata.
"""
from clawpack.visclaw import colormaps, geoplot
plotdata.clearfigures() # clear any old figures,axes,items dat
plotdata.format = 'binary' # 'ascii', 'binary', 'netcdf'
try:
tsudata = open(plotdata.outdir+'/geoclaw.data').readlines()
for line in tsudata:
if 'sea_level' in line:
sea_level = float(line.split()[0])
print "sea_level = ",sea_level
except:
print "Could not read sea_level, setting to 0."
sea_level = 0.
clim_ocean = 0.3
clim_CC = 0.5
cmax_ocean = clim_ocean + sea_level
cmin_ocean = -clim_ocean + sea_level
cmax_CC = clim_CC + sea_level
cmin_CC = -clim_CC + sea_level
# To plot gauge locations on pcolor or contour plot, use this as
# an afteraxis function:
def addgauges(current_data):
from clawpack.visclaw import gaugetools
gaugetools.plot_gauge_locations(current_data.plotdata, \
gaugenos='all', format_string='ko', add_labels=True)
def timeformat(t):
from numpy import mod
hours = int(t/3600.)
tmin = mod(t,3600.)
min = int(tmin/60.)
sec = int(mod(tmin,60.))
timestr = '%s:%s:%s' % (hours,str(min).zfill(2),str(sec).zfill(2))
return timestr
def title_hours(current_data):
from pylab import title
t = current_data.t
timestr = timeformat(t)
title('%s after earthquake' % timestr)
def plotcc(current_data):
from pylab import plot,text
plot([235.8162], [41.745616],'wo')
text(235.8,41.9,'Cr.City',color='w',fontsize=10)
#-----------------------------------------
# Figure for big area
#-----------------------------------------
plotfigure = plotdata.new_plotfigure(name='Solution', figno=0)
plotfigure.kwargs = {'figsize': (8,7)}
# Set up for axes in this figure:
plotaxes = plotfigure.new_plotaxes()
plotaxes.title = 'Solution'
plotaxes.scaled = True
def aa(current_data):
from pylab import ticklabel_format,xticks,gca,cos,pi,yticks,title
plotcc(current_data)
title(' ')
addgauges(current_data)
ticklabel_format(format='plain',useOffset=False)
xticks([180, 200, 220, 240], rotation=20, fontsize = 28)
yticks(fontsize = 28)
a = gca()
a.set_aspect(1./cos(41.75*pi/180.))
plotaxes.afteraxes = aa
# Water
plotitem = plotaxes.new_plotitem(plot_type='2d_imshow')
plotitem.plot_var = geoplot.surface_or_depth
my_cmap = colormaps.make_colormap({-1.0: [0.0,0.0,1.0], \
-0.5: [0.5,0.5,1.0], \
0.0: [1.0,1.0,1.0], \
0.5: [1.0,0.5,0.5], \
1.0: [1.0,0.0,0.0]})
plotitem.imshow_cmap = my_cmap
plotitem.imshow_cmin = cmin_ocean
plotitem.imshow_cmax = cmax_ocean
plotitem.add_colorbar = False
plotitem.colorbar_shrink = 0.7
plotitem.amr_celledges_show = [0,0,0]
plotitem.amr_patchedges_show = [0]
# Land
plotitem = plotaxes.new_plotitem(plot_type='2d_imshow')
plotitem.plot_var = geoplot.land
plotitem.imshow_cmap = geoplot.land_colors
plotitem.imshow_cmin = 0.0
plotitem.imshow_cmax = 100.0
plotitem.add_colorbar = False
plotitem.amr_celledges_show = [0,0,0]
plotitem.amr_patchedges_show = [0]
plotaxes.xlimits = [180,240]
plotaxes.ylimits = [10,62]
# Add contour lines of bathymetry:
plotitem = plotaxes.new_plotitem(plot_type='2d_contour')
plotitem.show = False
plotitem.plot_var = geoplot.topo
from numpy import arange, linspace
plotitem.contour_levels = linspace(-6000,0,7)
plotitem.amr_contour_colors = ['g'] # color on each level
plotitem.kwargs = {'linestyles':'solid'}
plotitem.amr_contour_show = [0,0,1,0] # show contours only on finest level
plotitem.celledges_show = 0
plotitem.patchedges_show = 0
# Add contour lines of topography:
plotitem = plotaxes.new_plotitem(plot_type='2d_contour')
plotitem.show = False
plotitem.plot_var = geoplot.topo
from numpy import arange, linspace
plotitem.contour_levels = arange(0., 11., 1.)
plotitem.amr_contour_colors = ['g'] # color on each level
plotitem.kwargs = {'linestyles':'solid'}
plotitem.amr_contour_show = [0,0,0,1] # show contours only on finest level
plotitem.celledges_show = 0
plotitem.patchedges_show = 0
#-----------------------------------------
# Figure for levels
#-----------------------------------------
plotfigure = plotdata.new_plotfigure(name='Grid patches', figno=10)
plotfigure.kwargs = {'figsize': (8,7)}
# Set up for axes in this figure:
plotaxes = plotfigure.new_plotaxes()
plotaxes.title = 'Grid patches'
plotaxes.scaled = True
# Water
plotitem = plotaxes.new_plotitem(plot_type='2d_patch')
plotitem.amr_patch_bgcolor = [[1,1,1], [.7,.7,1], [1,0.4,0.4]]
plotitem.amr_patchedges_color = ['k','b','r']
plotitem.amr_celledges_show = [0]
plotitem.amr_patchedges_show = [0,1,1,0]
# Land
plotitem = plotaxes.new_plotitem(plot_type='2d_pcolor')
plotitem.plot_var = geoplot.land
plotitem.pcolor_cmap = geoplot.land_colors
plotitem.pcolor_cmin = 0.0
plotitem.pcolor_cmax = 100.0
plotitem.add_colorbar = False
plotitem.amr_celledges_show = [0]
plotitem.amr_patchedges_show = [0]
plotaxes.afteraxes = aa
plotaxes.xlimits = [180,240]
plotaxes.ylimits = [10,62]
#-----------------------------------------
# Figures for gauges
#-----------------------------------------
plotfigure = plotdata.new_plotfigure(name='gauge plot', figno=300, \
type='each_gauge')
# Set up for axes in this figure:
plotaxes = plotfigure.new_plotaxes()
plotaxes.ylimits = [-2,2]
plotaxes.title = 'Surface'
plotaxes.xlimits = [15000, 35000]
# Plot surface as blue curve:
plotitem = plotaxes.new_plotitem(plot_type='1d_plot')
plotitem.plot_var = 3
plotitem.plotstyle = 'b-'
def fix_gauge(current_data):
from pylab import plot, legend, xticks, floor, yticks,xlabel,savefig
t = current_data.t
gaugeno = current_data.gaugeno
n = int(floor(t.max()/1800.) + 2)
xticks([1800*i for i in range(n)],[str(i/2.) for i in range(n)],\
fontsize=15)
yticks(fontsize=15)
xlabel("Hours")
def add_legend(current_data):
try:
legend(['GeoClaw with Adjoint Method','GeoClaw with Surface Elevation'],'lower right')
except: pass
axis((0,t.max(),-0.3,0.3))
#-----------------------------------------
# Parameters used only when creating html and/or latex hardcopy
# e.g., via clawpack.visclaw.frametools.printframes:
plotdata.printfigs = True # print figures
plotdata.print_format = 'png' # file format
plotdata.print_framenos = 'all' # list of frames to print
plotdata.print_fignos = 'all' # list of figures to print
plotdata.print_gaugenos = 'all' # list of gauges to print
plotdata.html = True # create html files of plots?
plotdata.html_homelink = '../README.html' # pointer for top of index
plotdata.latex = True # create latex file of plots?
plotdata.latex_figsperline = 2 # layout of plots
plotdata.latex_framesperline = 1 # layout of plots
plotdata.latex_makepdf = False # also run pdflatex?
plotdata.html_movie = 'JSAnimation' # new style, or "4.x" for old style
return plotdata
|
|
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes and methods to manage all aspects of student assessments."""
__author__ = 'pgbovine@google.com (Philip Guo)'
import datetime
import logging
from models import courses
from models import models
from models import review
from models import student_work
from models import transforms
from models import utils
from models.models import Student
from models.models import ValidStudent
from models.models import Profile
from models.models import StudentAnswersEntity
from tools import verify
from utils import BaseHandler
from utils import HUMAN_READABLE_DATETIME_FORMAT
from google.appengine.ext import db
# questions per module - training 2 - 12 modules
# last is postcourse
# training
#MODULE_QUESTIONS = [4,10,7,5,5,5,5,7,5,5,5,11,7]
# recertification
MODULE_QUESTIONS = [2,4,5,4,3,7]
# mandatory modules 1 to 8 - needed?
#MANDATORY_MODULES = 8
# number of question modules
#MAX_MODULES = 6
MAX_MODULES = len(MODULE_QUESTIONS)-1
def calc_total_score(student):
#
mn = MODULE_QUESTIONS
# mm = MANDATORY_MODULES
#
overall_score = -1
ms = []
for i in range(1,MAX_MODULES+1):
course = 'a'+str(i)+'course'
ms.append(utils.get_score(student, course))
# get profile for this user - mandatary modules
valid = ValidStudent.get_valid(student.key().name())
prof = Profile.get_by_key_name(valid.profile)
auth = eval(prof.auth)
# complete = mandatory modules are done (have scores)
complete = True
i = 0
for score in ms[:MAX_MODULES]:
if auth[i]:
complete = complete and (score <> None)
i += 1
# compute overall score after mandatory modules are done
if complete:
part_score = 0
tq = 0
for i in range(MAX_MODULES):
if ms[i] <> None:
part_score += mn[i] * ms[i]
tq += mn[i]
# todo - somar 0.5 antes do int?
overall_score = int((part_score/tq)+0.5)
return overall_score
def store_score(course, student, assessment_name, assessment_type,score):
"""Stores a student's score on a particular assessment.
Args:
course: the course containing the assessment.
student: the student whose data is stored.
assessment_type: the type of the assessment.
score: the student's score on this assessment.
Returns:
the result of the assessment, if appropriate.
"""
# FIXME: Course creators can edit this code to implement custom
# assessment scoring and storage behavior
# TODO(pgbovine): Note that the latest version of answers are always saved,
# but scores are only saved if they're higher than the previous attempt.
# This can lead to unexpected analytics behavior. Resolve this.
existing_score = course.get_score(student, assessment_name)
# remember to cast to int for comparison
# logging.error('assessment name : %s exist score : %s score %s ',assessment_name,existing_score, score)
if assessment_name != 'postcourse':
if (existing_score is None) or (score > int(existing_score)):
utils.set_score(student, assessment_name, score)
# special handling for computing final score:
if assessment_name == 'postcourse':
# midcourse_score = utils.get_score(student, 'midcourse')
# if midcourse_score is None:
# midcourse_score = 0
# else:
# midcourse_score = int(midcourse_score)
if existing_score is None:
postcourse_score = score
else:
postcourse_score = int(existing_score)
if score > postcourse_score:
postcourse_score = score
# Calculate overall score based on a formula
overall_score = calc_total_score(student)
# logging.error('overall_score : %s ', overall_score)
# if utils.get_score(student, 'postcourse') == 0 and (overall_score > -1) :
# utils.set_score(student, 'postcourse', overall_score)
# utils.set_score(student, 'overall_score', overall_score)
# TODO(pgbovine): this changing of assessment_type is ugly ...
if overall_score == 100:
assessment_name = 'postcourse_100'
else:
if overall_score >= 90:
assessment_name = 'postcourse_pass'
else:
if overall_score > 0:
assessment_name = 'postcourse_fail'
else:
assessment_name = 'not_complete'
# utils.set_score(student, 'overall_score', overall_score)
# store the overall_score of the first run of training in post_course
# post_s= utils.get_score(student, 'postcourse')
# logging.error('postcourse : %s ', utils.get_score(student, 'postcourse'))
if utils.get_score(student, 'postcourse') == None and (overall_score > -1):
utils.set_score(student, 'postcourse', overall_score)
utils.set_score(student, 'overall_score', overall_score)
over_s= utils.get_score(student, 'overall_score')
if over_s <> None:
overall_score = calc_total_score(student)
utils.set_score(student, 'overall_score', overall_score)
return assessment_name
class AnswerHandler(BaseHandler):
"""Handler for saving assessment answers."""
# Find student entity and save answers
@db.transactional(xg=True)
def update_assessment_transaction(
self, email, assessment_name,assessment_type,new_answers, score):
"""Stores answer and updates user scores.
Args:
email: the student's email address.
assessment_type: the type of the assessment (as stated in unit.csv).
new_answers: the latest set of answers supplied by the student.
score: the numerical assessment score.
Returns:
the student instance.
"""
student = Student.get_enrolled_student_by_email(email)
course = self.get_course()
# It may be that old Student entities don't have user_id set; fix it.
if not student.user_id:
student.user_id = self.get_user().user_id()
answers = StudentAnswersEntity.get_by_key_name(student.user_id)
if not answers:
answers = StudentAnswersEntity(key_name=student.user_id)
answers.updated_on = datetime.datetime.now()
utils.set_answer(answers, assessment_name, new_answers)
store_score(course, student, assessment_name, assessment_type,score)
student.put()
answers.put()
# Also record the event, which is useful for tracking multiple
# submissions and history.
models.EventEntity.record(
'submit-assessment', self.get_user(), transforms.dumps({
'type': 'assessment-%s' % assessment_name,
'values': new_answers, 'location': 'AnswerHandler'}))
return student
def post(self):
"""Handles POST requests."""
student = self.personalize_page_and_get_enrolled()
if not student:
return
if not self.assert_xsrf_token_or_fail(self.request, 'assessment-post'):
return
course = self.get_course()
assessment_type = self.request.get('assessment_type')
assessment_name = self.request.get('assessment_name')
if not assessment_type:
self.error(404)
logging.error('No assessment type supplied.')
return
unit = course.find_unit_by_id(assessment_type)
if unit is None or unit.type != verify.UNIT_TYPE_ASSESSMENT:
self.error(404)
logging.error('No assessment named %s exists.', assessment_type)
return
self.template_value['navbar'] = {'course': True}
self.template_value['assessment'] = assessment_name
# self.template_value['assessment'] = self.request.get('assessment_name')
self.template_value['assessment_name'] = unit.title
self.template_value['is_last_assessment'] = (
course.is_last_assessment(unit))
# Convert answers from JSON to dict.
answers = self.request.get('answers')
answers = transforms.loads(answers) if answers else []
grader = unit.workflow.get_grader()
# Scores are not recorded for human-reviewed assignments.
score = 0
if grader == courses.AUTO_GRADER:
score = int(round(float(self.request.get('score'))))
# Record assessment transaction.
student = self.update_assessment_transaction(
student.key().name(), assessment_name, assessment_type, answers, score)
if grader == courses.HUMAN_GRADER:
rp = course.get_reviews_processor()
# Guard against duplicate submissions of a human-graded assessment.
previously_submitted = rp.does_submission_exist(
unit.unit_id, student.get_key())
if not previously_submitted:
# Check that the submission due date has not passed.
time_now = datetime.datetime.now()
submission_due_date = unit.workflow.get_submission_due_date()
if time_now > submission_due_date:
self.template_value['time_now'] = time_now.strftime(
HUMAN_READABLE_DATETIME_FORMAT)
self.template_value['submission_due_date'] = (
submission_due_date.strftime(
HUMAN_READABLE_DATETIME_FORMAT))
self.template_value['error_code'] = (
'assignment_deadline_exceeded')
self.render('error.html')
return
submission_key = student_work.Submission.write(
unit.unit_id, student.get_key(), answers)
rp.start_review_process_for(
unit.unit_id, submission_key, student.get_key())
# Record completion event in progress tracker.
course.get_progress_tracker().put_assessment_completed(
student, assessment_type)
self.template_value['previously_submitted'] = previously_submitted
matcher = unit.workflow.get_matcher()
self.template_value['matcher'] = matcher
if matcher == review.PEER_MATCHER:
self.template_value['review_dashboard_url'] = (
'reviewdashboard?unit=%s' % unit.unit_id
)
self.render('reviewed_assessment_confirmation.html')
return
else:
# Record completion event in progress tracker.
course.get_progress_tracker().put_assessment_completed(
student, assessment_type)
# Save the submission in the datastore, overwriting the earlier
# version if it exists.
submission_key = student_work.Submission.write(
unit.unit_id, student.get_key(), answers)
self.template_value['result'] = course.get_overall_result(student)
self.template_value['score'] = score
self.template_value['overall_score'] = course.get_overall_score(
student)
self.render('test_confirmation.html')
|
|
#!/usr/bin/env python
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import sys
import unittest
import checkxmlstyle
import helpers
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(
os.path.dirname(os.path.abspath(__file__))))))
from PRESUBMIT_test_mocks import MockFile, MockInputApi, MockOutputApi
class IncludedFilesTest(unittest.TestCase):
def testFileIncluded(self):
lines = []
mock_input_api = MockInputApi()
mock_input_api.files = [
MockFile('chrome/java/res_test/test.xml', lines),
MockFile('ui/test/java/res/test.xml', lines),
MockFile('content/java/res_test/test.xml', lines),
MockFile('components/test/java/res_test/test.xml', lines)
]
self.assertEqual(4,
len(list(checkxmlstyle.IncludedFiles(mock_input_api))))
def testFileExcluded(self):
lines = []
mock_input_api = MockInputApi()
mock_input_api.files = [
MockFile('chrome/res_test/test.xml', lines),
MockFile('ui/test/test.xml', lines),
MockFile('content/java/res.xml', lines),
MockFile('components/java/test.xml', lines),
MockFile('test/java/res/test.xml', lines)
]
self.assertEqual(0,
len(list(checkxmlstyle.IncludedFiles(mock_input_api))))
class ColorFormatTest(unittest.TestCase):
def testColorFormatIgnoredFile(self):
lines = ['<color name="color1">#61000000</color>',
'<color name="color2">#FFFFFF</color>',
'<color name="color3">#CCC</color>']
mock_input_api = MockInputApi()
mock_input_api.files = [MockFile('chrome/java/res_test/test.java', lines)]
errors = checkxmlstyle._CheckColorFormat(mock_input_api, MockOutputApi())
self.assertEqual(0, len(errors))
def testColorFormatTooShort(self):
lines = ['<color name="color1">#61000000</color>',
'<color name="color2">#FFFFFF</color>',
'<color name="color3">#CCC</color>']
mock_input_api = MockInputApi()
mock_input_api.files = [MockFile('chrome/java/res_test/test.xml', lines)]
errors = checkxmlstyle._CheckColorFormat(mock_input_api, MockOutputApi())
self.assertEqual(1, len(errors))
self.assertEqual(1, len(errors[0].items))
self.assertEqual(' chrome/java/res_test/test.xml:3',
errors[0].items[0].splitlines()[0])
def testColorInvalidAlphaValue(self):
lines = ['<color name="color1">#61000000</color>',
'<color name="color2">#FEFFFFFF</color>',
'<color name="color3">#FFCCCCCC</color>']
mock_input_api = MockInputApi()
mock_input_api.files = [MockFile('chrome/java/res_test/test.xml', lines)]
errors = checkxmlstyle._CheckColorFormat(mock_input_api, MockOutputApi())
self.assertEqual(1, len(errors))
self.assertEqual(1, len(errors[0].items))
self.assertEqual(' chrome/java/res_test/test.xml:3',
errors[0].items[0].splitlines()[0])
def testColorFormatLowerCase(self):
lines = ['<color name="color1">#61000000</color>',
'<color name="color2">#EFFFFFFF</color>',
'<color name="color3">#CcCcCC</color>']
mock_input_api = MockInputApi()
mock_input_api.files = [MockFile('chrome/java/res_test/test.xml', lines)]
errors = checkxmlstyle._CheckColorFormat(mock_input_api, MockOutputApi())
self.assertEqual(1, len(errors))
self.assertEqual(1, len(errors[0].items))
self.assertEqual(' chrome/java/res_test/test.xml:3',
errors[0].items[0].splitlines()[0])
class ColorReferencesTest(unittest.TestCase):
def testVectorDrawbleIgnored(self):
lines = ['<vector',
'tools:targetApi="21"',
'android:fillColor="#CCCCCC">',
'</vector>']
mock_input_api = MockInputApi()
mock_input_api.files = [MockFile('chrome/java/res_test/test.xml', lines)]
result = checkxmlstyle._CheckColorReferences(
mock_input_api, MockOutputApi())
self.assertEqual(1, len(result))
self.assertEqual(result[0].type, 'warning')
def testInvalidReference(self):
lines = ['<TextView',
'android:textColor="#FFFFFF" />']
mock_input_api = MockInputApi()
mock_input_api.files = [MockFile('chrome/java/res_test/test.xml', lines)]
errors = checkxmlstyle._CheckColorReferences(
mock_input_api, MockOutputApi())
self.assertEqual(1, len(errors))
self.assertEqual(1, len(errors[0].items))
self.assertEqual(' chrome/java/res_test/test.xml:2',
errors[0].items[0].splitlines()[0])
def testValidReference(self):
lines = ['<TextView',
'android:textColor="@color/color1" />']
mock_input_api = MockInputApi()
mock_input_api.files = [MockFile('chrome/java/res_test/test.xml', lines)]
errors = checkxmlstyle._CheckColorReferences(
mock_input_api, MockOutputApi())
self.assertEqual(0, len(errors))
def testValidReferenceInColorResources(self):
lines = ['<color name="color1">#61000000</color>']
mock_input_api = MockInputApi()
mock_input_api.files = [
MockFile(helpers.COLOR_PALETTE_RELATIVE_PATH, lines)]
errors = checkxmlstyle._CheckColorReferences(
mock_input_api, MockOutputApi())
self.assertEqual(0, len(errors))
def testReferenceInSemanticColors(self):
mock_input_api = MockInputApi()
mock_input_api.files = [
MockFile(helpers.COLOR_PALETTE_PATH,
['<resources><color name="a">#f0f0f0</color></resources>']),
MockFile('ui/android/java/res/values/semantic_colors_non_adaptive.xml',
[
'<color name="b">@color/hello<color>',
'<color name="c">@color/a<color>'
]),
MockFile('ui/android/java/res/values/semantic_colors_adaptive.xml',
['<color name="c">@color/a<color>'])
]
errors = checkxmlstyle._CheckSemanticColorsReferences(
mock_input_api, MockOutputApi())
self.assertEqual(1, len(errors))
def testReferenceInColorPalette(self):
mock_input_api = MockInputApi()
mock_input_api.files = [
MockFile(helpers.COLOR_PALETTE_PATH,
['<resources><color name="foo">#f0f0f0</color></resources>']),
MockFile('ui/android/java/res/values/semantic_colors_adaptive.xml',
['<color name="b">@color/foo<color>']),
MockFile('ui/android/java/res/values/colors.xml', [
'<color name="c">@color/b</color>',
'<color name="d">@color/b</color>',
'<color name="e">@color/foo</color>'
])
]
warnings = checkxmlstyle._CheckColorPaletteReferences(
mock_input_api, MockOutputApi())
self.assertEqual(1, len(warnings))
class DuplicateColorsTest(unittest.TestCase):
def testFailure(self):
lines = ['<color name="color1">#61000000</color>',
'<color name="color2">#61000000</color>']
mock_input_api = MockInputApi()
mock_input_api.files = [
MockFile(helpers.COLOR_PALETTE_RELATIVE_PATH, lines)]
errors = checkxmlstyle._CheckDuplicateColors(
mock_input_api, MockOutputApi())
self.assertEqual(1, len(errors))
self.assertEqual(2, len(errors[0].items))
self.assertEqual(' %s:1' % helpers.COLOR_PALETTE_RELATIVE_PATH,
errors[0].items[0].splitlines()[0])
self.assertEqual(' %s:2' % helpers.COLOR_PALETTE_RELATIVE_PATH,
errors[0].items[1].splitlines()[0])
def testSucess(self):
lines = ['<color name="color1">#61000000</color>',
'<color name="color1">#FFFFFF</color>']
mock_input_api = MockInputApi()
mock_input_api.files = [MockFile('chrome/java/res_test/colors.xml', lines)]
errors = checkxmlstyle._CheckDuplicateColors(
mock_input_api, MockOutputApi())
self.assertEqual(0, len(errors))
class XmlNamespacePrefixesTest(unittest.TestCase):
def testFailure(self):
lines = ['xmlns:chrome="http://schemas.android.com/apk/res-auto"']
mock_input_api = MockInputApi()
mock_input_api.files = [MockFile('chrome/java/res_test/file.xml', lines)]
errors = checkxmlstyle._CheckXmlNamespacePrefixes(
mock_input_api, MockOutputApi())
self.assertEqual(1, len(errors))
self.assertEqual(1, len(errors[0].items))
self.assertEqual(' chrome/java/res_test/file.xml:1',
errors[0].items[0].splitlines()[0])
def testSucess(self):
lines = ['xmlns:app="http://schemas.android.com/apk/res-auto"']
mock_input_api = MockInputApi()
mock_input_api.files = [MockFile('chrome/java/res_test/file.xml', lines)]
errors = checkxmlstyle._CheckXmlNamespacePrefixes(
mock_input_api, MockOutputApi())
self.assertEqual(0, len(errors))
class TextAppearanceTest(unittest.TestCase):
def testFailure_Style(self):
lines = [
'<resource>',
'<style name="TestTextAppearance">',
'<item name="android:textColor">@color/default_text_color_link</item>',
'<item name="android:textSize">14sp</item>',
'<item name="android:textStyle">bold</item>',
'<item name="android:fontFamily">some-font</item>',
'<item name="android:textAllCaps">true</item>',
'</style>',
'</resource>']
mock_input_api = MockInputApi()
mock_input_api.files = [MockFile('chrome/java/res_test/test.xml', lines)]
errors = checkxmlstyle._CheckTextAppearance(mock_input_api, MockOutputApi())
self.assertEqual(1, len(errors))
self.assertEqual(5, len(errors[0].items))
self.assertEqual(
(' chrome/java/res_test/test.xml:2 contains attribute '
'android:textColor'),
errors[0].items[0].splitlines()[0])
self.assertEqual(
' chrome/java/res_test/test.xml:2 contains attribute android:textSize',
errors[0].items[1].splitlines()[0])
self.assertEqual(
(' chrome/java/res_test/test.xml:2 contains attribute '
'android:textStyle'),
errors[0].items[2].splitlines()[0])
self.assertEqual(
(' chrome/java/res_test/test.xml:2 contains attribute '
'android:fontFamily'),
errors[0].items[3].splitlines()[0])
self.assertEqual(
(' chrome/java/res_test/test.xml:2 contains attribute '
'android:textAllCaps'),
errors[0].items[4].splitlines()[0])
def testSuccess_Style(self):
lines = [
'<resource>',
'<style name="TextAppearance.Test">',
'<item name="android:textColor">@color/default_text_color_link</item>',
'<item name="android:textSize">14sp</item>',
'<item name="android:textStyle">bold</item>',
'<item name="android:fontFamily">some-font</item>',
'<item name="android:textAllCaps">true</item>',
'</style>',
'<style name="TestStyle">',
'<item name="android:background">some_background</item>',
'</style>',
'</resource>']
mock_input_api = MockInputApi()
mock_input_api.files = [MockFile('chrome/java/res_test/test.xml', lines)]
errors = checkxmlstyle._CheckTextAppearance(mock_input_api, MockOutputApi())
self.assertEqual(0, len(errors))
def testFailure_Widget(self):
lines_top_level = [
'<TextView',
'xmlns:android="http://schemas.android.com/apk/res/android"',
'android:layout_width="match_parent"',
'android:layout_height="@dimen/snippets_article_header_height"',
'android:textColor="@color/snippets_list_header_text_color"',
'android:textSize="14sp" />']
lines_subcomponent_widget = [
'<RelativeLayout',
'xmlns:android="http://schemas.android.com/apk/res/android"',
'android:layout_width="match_parent"',
'android:layout_height="wrap_content">',
'<View',
'android:textColor="@color/error_text_color"',
'android:textSize="@dimen/text_size_medium"',
'android:textAllCaps="true"',
'android:background="@drawable/infobar_shadow_top"',
'android:visibility="gone" />',
'</RelativeLayout>']
mock_input_api = MockInputApi()
mock_input_api.files = [
MockFile('chrome/java/res_test/test1.xml', lines_top_level),
MockFile('chrome/java/res_test/test2.xml', lines_subcomponent_widget)]
errors = checkxmlstyle._CheckTextAppearance(mock_input_api, MockOutputApi())
self.assertEqual(1, len(errors))
self.assertEqual(5, len(errors[0].items))
self.assertEqual(
(' chrome/java/res_test/test1.xml:5 contains attribute '
'android:textColor'),
errors[0].items[0].splitlines()[0])
self.assertEqual(
(' chrome/java/res_test/test1.xml:6 contains attribute '
'android:textSize'),
errors[0].items[1].splitlines()[0])
self.assertEqual(
(' chrome/java/res_test/test2.xml:6 contains attribute '
'android:textColor'),
errors[0].items[2].splitlines()[0])
self.assertEqual(
(' chrome/java/res_test/test2.xml:7 contains attribute '
'android:textSize'),
errors[0].items[3].splitlines()[0])
self.assertEqual(
(' chrome/java/res_test/test2.xml:8 contains attribute '
'android:textAllCaps'),
errors[0].items[4].splitlines()[0])
def testSuccess_Widget(self):
lines = [
'<RelativeLayout',
'xmlns:android="http://schemas.android.com/apk/res/android"',
'android:layout_width="match_parent"',
'android:layout_height="wrap_content">',
'<View',
'android:background="@drawable/infobar_shadow_top"',
'android:visibility="gone" />',
'</RelativeLayout>']
mock_input_api = MockInputApi()
mock_input_api.files = [MockFile('chrome/java/res_test/test.xml', lines)]
errors = checkxmlstyle._CheckTextAppearance(
mock_input_api, MockOutputApi())
self.assertEqual(0, len(errors))
class NewTextAppearanceTest(unittest.TestCase):
def testFailure(self):
lines = [
'<resource>',
'<style name="TextAppearance.Test">',
'<item name="android:textColor">@color/default_text_color_link</item>',
'<item name="android:textSize">14sp</item>',
'</style>',
'</resource>']
mock_input_api = MockInputApi()
mock_input_api.files = [MockFile('chrome/java/res_test/test.xml', lines)]
errors = checkxmlstyle._CheckNewTextAppearance(
mock_input_api, MockOutputApi())
self.assertEqual(1, len(errors))
self.assertEqual(1, len(errors[0].items))
self.assertEqual(
' chrome/java/res_test/test.xml:2',
errors[0].items[0].splitlines()[0])
def testSuccess(self):
lines = [
'<resource>',
'<style name="TextAppearanceTest">',
'<item name="android:textColor">@color/default_text_color_link</item>',
'<item name="android:textSize">14sp</item>',
'</style>',
'</resource>']
mock_input_api = MockInputApi()
mock_input_api.files = [MockFile('chrome/java/res_test/test.xml', lines)]
errors = checkxmlstyle._CheckNewTextAppearance(
mock_input_api, MockOutputApi())
self.assertEqual(0, len(errors))
class UnfavoredLayoutAttributesTest(unittest.TestCase):
def testLineSpacingAttributesUsage(self):
xmlChanges = [
'<TextView android:id="@+id/test"',
' android:lineSpacingExtra="42dp"',
' android:lineSpacingMultiplier="42dp"',
'/>',
'<TextViewWithLeading android:id="@+id/test2"',
' app:leading="42dp"',
'/>'
]
mock_input_api = MockInputApi()
mock_input_api.files = [
MockFile('ui/android/java/res/layout/new_textview.xml', xmlChanges)
]
result = checkxmlstyle._CheckLineSpacingAttribute(
mock_input_api, MockOutputApi())
self.assertEqual(1, len(result))
self.assertEqual(2, len(result[0].items))
self.assertEqual(' ui/android/java/res/layout/new_textview.xml:2',
result[0].items[0].splitlines()[0])
self.assertEqual(' ui/android/java/res/layout/new_textview.xml:3',
result[0].items[1].splitlines()[0])
class UnfavoredWidgetsTest(unittest.TestCase):
def testButtonCompatUsage(self):
xmlChanges = [
'<Button',
' android:text="@string/hello"',
' android:text="@color/modern_blue_600"',
'/>',
'',
'<android.support.v7.widget.AppCompatButton',
' android:text="@string/welcome"',
' android:color="@color/modern_purple_300"',
'/>',
'<org.chromium.ui.widget.ButtonCompat',
' android:id="@+id/action_button"',
'/>'
]
mock_input_api = MockInputApi()
mock_input_api.files = [
MockFile('ui/android/java/res/layout/dropdown_item.xml', xmlChanges)
]
result = checkxmlstyle._CheckButtonCompatWidgetUsage(
mock_input_api, MockOutputApi())
self.assertEqual(1, len(result))
self.assertEqual(2, len(result[0].items))
self.assertEqual(' ui/android/java/res/layout/dropdown_item.xml:1',
result[0].items[0].splitlines()[0])
self.assertEqual(' ui/android/java/res/layout/dropdown_item.xml:6',
result[0].items[1].splitlines()[0])
if __name__ == '__main__':
unittest.main()
|
|
''' Contains the Tombot layer, which handles the messages. '''
import os
import sys
import logging
import time
import sqlite3
import threading
from apscheduler.schedulers import SchedulerNotRunningError
from yowsup.layers.interface \
import YowInterfaceLayer, ProtocolEntityCallback
from yowsup.layers \
import YowLayerEvent
from yowsup.layers.network \
import YowNetworkLayer
from yowsup.layers.protocol_messages.protocolentities \
import TextMessageProtocolEntity
from yowsup.layers.protocol_receipts.protocolentities \
import OutgoingReceiptProtocolEntity
from yowsup.layers.protocol_presence.protocolentities \
import AvailablePresenceProtocolEntity, UnavailablePresenceProtocolEntity
from . import plugins
from .helper_functions import unknown_command
import tombot.registry as registry
import tombot.rpc as rpc
class TomBotLayer(YowInterfaceLayer):
''' The tombot layer, a chatbot for WhatsApp. '''
# pylint: disable=too-many-instance-attributes
def __init__(self, config, scheduler):
super(self.__class__, self).__init__()
self.connected = False
self.config = config
self.scheduler = scheduler
logging.info('Current working directory: %s', os.getcwd())
try:
logging.info('Database location: %s',
config['Yowsup']['database'])
self.conn = sqlite3.connect(config['Yowsup']['database'],
detect_types=sqlite3.PARSE_DECLTYPES,
check_same_thread=False)
self.conn.text_factory = str
self.cursor = self.conn.cursor()
except KeyError:
logging.critical('Database could not be loaded!')
# Group list holder
self.known_groups = []
# Start rpc listener
host = 'localhost'
port = 10666
self.rpcserver = rpc.ThreadedTCPServer((host, port), rpc.ThreadedTCPRequestHandler, self)
server_thread = threading.Thread(target=self.rpcserver.serve_forever)
server_thread.daemon = True
server_thread.start()
# Start the passed scheduler
self.scheduler.start()
self.functions = {}
plugins.load_plugins()
self.functions.update(registry.COMMAND_DICT)
# Execute startup hooks
registry.fire_event(registry.BOT_START, self)
@ProtocolEntityCallback('iq')
def onIq(self, entity):
''' Handles incoming IQ messages, currently inactive. '''
# pylint: disable=invalid-name
if hasattr(entity, 'groupsList'):
logging.info('Discovered groups:')
logging.info(entity.groupsList)
self.known_groups = entity.groupsList
def onEvent(self, layerEvent):
''' Handles disconnection events and reconnects if we timed out.'''
# pylint: disable=invalid-name
logging.debug('Event %s received', layerEvent.getName())
if layerEvent.getName() == YowNetworkLayer.EVENT_STATE_DISCONNECTED:
reason = layerEvent.getArg('reason')
logging.warning(_('Connection lost: {}').format(reason))
registry.fire_event(registry.BOT_DISCONNECTED, self)
if reason == 'Connection Closed':
time.sleep(.5)
logging.warning(_('Reconnecting'))
self.getStack().broadcastEvent(YowLayerEvent(YowNetworkLayer.EVENT_STATE_CONNECT))
self.connected = False
return True
else:
logging.error('Fatal disconnect: %s', reason)
if self.connected and reason != 'Requested':
self.stop()
return False
elif layerEvent.getName() == YowNetworkLayer.EVENT_STATE_CONNECTED:
logging.info('Connection established.')
self.connected = True
self.set_online()
registry.fire_event(registry.BOT_CONNECTED, self)
return False
@ProtocolEntityCallback('message')
def onMessage(self, message):
''' Handles incoming messages and responds to them if needed. '''
# pylint: disable=invalid-name
logging.debug('Message %s from %s received, content: %s',
message.getId(), message.getFrom(), message.getBody())
receipt = OutgoingReceiptProtocolEntity(
message.getId(), message.getFrom(),
'read', message.getParticipant())
self.toLower(receipt)
time.sleep(0.2)
self.react(message)
registry.fire_event(registry.BOT_MSG_RECEIVE, self, message)
@ProtocolEntityCallback('receipt')
def onReceipt(self, entity):
''' Sends acknowledgements for read receipts. '''
# pylint: disable=invalid-name
#ack = OutgoingAckProtocolEntity(
#entity.getId(), 'receipt', entity.getType(), entity.getFrom())
logging.debug('Acking receipt')
self.toLower(entity.ack())
def toLower(self, entity):
''' Intercept entites if not connected and warn user. '''
if not self.connected:
logging.warning('Not connected, dropping entity!')
return
super(self.__class__, self).toLower(entity)
koekje = '\xf0\x9f\x8d\xaa'
triggers = [
'TOMBOT', 'TOMBOT,',
'BOT', 'BOT,',
'VRIEZIRI', 'VRIEZIRI,',
'VICTOR', 'VICTOR,',
'VIKTOR', 'VIKTOR,',
'MINION', 'MINION,',
]
def react(self, message):
''' Generates a response to a message using a response function and sends it. '''
content = message.getBody()
text = content.upper().split()
isgroup = False
if message.participant: # A trigger is required in groups
isgroup = True
if text[0] not in self.triggers:
return
text.remove(text[0])
try:
response = self.functions[text[0]](self, message)
except IndexError:
return
except KeyError:
if isgroup or content.startswith('@'):
return # no 'unknown command!' spam
response = unknown_command(message)
logging.debug('Failed command %s', text[0])
except UnicodeDecodeError as ex:
response = 'UnicodeDecodeError, see logs.'
logging.error(ex)
if response:
reply_message = TextMessageProtocolEntity(
response, to=message.getFrom())
self.toLower(reply_message)
def stop(self, restart=False):
''' Shut down the bot. '''
logging.info('Shutting down via stop method.')
# Execute shutdown hooks
registry.fire_event(registry.BOT_SHUTDOWN, self)
self.set_offline()
try:
self.scheduler.shutdown()
except SchedulerNotRunningError:
pass
self.rpcserver.shutdown()
self.rpcserver.server_close()
if self.connected:
self.broadcastEvent(YowLayerEvent(YowNetworkLayer.EVENT_STATE_DISCONNECT))
if restart:
sys.exit(3)
sys.exit(0)
# Helper functions
def set_online(self, *_):
''' Set presence as available '''
logging.debug('Setting presence online.')
entity = AvailablePresenceProtocolEntity()
self.toLower(entity)
def set_offline(self, *_):
''' Set presence as unavailable '''
logging.debug('Setting presence offline.')
entity = UnavailablePresenceProtocolEntity()
self.toLower(entity)
if __name__ == '__main__':
sys.exit("This script should be run via run.py and/or the tombot-run command.")
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TPU Distribution Strategy.
This is experimental. It's not ready for general use.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.distribute.python import cross_tower_ops as cross_tower_ops_lib
from tensorflow.contrib.distribute.python import one_device_strategy
from tensorflow.contrib.distribute.python import values
from tensorflow.contrib.tpu.python.ops import tpu_ops
from tensorflow.contrib.tpu.python.tpu import tpu
from tensorflow.contrib.tpu.python.tpu import tpu_system_metadata as tpu_system_metadata_lib
from tensorflow.contrib.tpu.python.tpu import training_loop
from tensorflow.python.eager import context
from tensorflow.python.eager import tape
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.training import device_util
from tensorflow.python.training import distribute as distribute_lib
from tensorflow.python.util import nest
_TPU_INITIALIZE_SYSTEM_COLLECTION = "TPU_STRATEGY_INITIALIZE"
def get_tpu_system_metadata(tpu_cluster_resolver):
"""Retrieves TPU system metadata given a TPUClusterResolver."""
master = tpu_cluster_resolver.master()
# pylint: disable=protected-access
cluster_spec = tpu_cluster_resolver.cluster_spec()
cluster_def = cluster_spec.as_cluster_def() if cluster_spec else None
tpu_system_metadata = (
tpu_system_metadata_lib._query_tpu_system_metadata(
master,
cluster_def=cluster_def,
query_topology=False))
return tpu_system_metadata
# TODO(jhseu): Deduplicate with MirroredStrategy?
def _create_tpu_mirrored_variable(devices, real_mirrored_creator, *args,
**kwargs): # pylint: disable=g-missing-docstring
# Figure out what collections this variable should be added to.
# We'll add the TPUMirroredVariable to those collections instead.
collections = kwargs.pop("collections", None)
if collections is None:
collections = [ops.GraphKeys.GLOBAL_VARIABLES]
kwargs["collections"] = []
# TODO(jhseu): Should we have different behavior for different
# synchronization settings?
# Get aggregation value
# TODO(jhseu): Support aggregation in a tower context.
aggregation = kwargs.pop("aggregation", vs.VariableAggregation.NONE)
if aggregation not in [
vs.VariableAggregation.NONE,
vs.VariableAggregation.SUM,
vs.VariableAggregation.MEAN,
vs.VariableAggregation.ONLY_FIRST_TOWER,
]:
raise ValueError("Invalid variable aggregation mode: {} for variable: {}"
.format(aggregation, kwargs["name"]))
# Ignore user-specified caching device, not needed for mirrored variables.
kwargs.pop("caching_device", None)
# TODO(josh11b,apassos): It would be better if variable initialization
# was never recorded on the tape instead of having to do this manually
# here.
with tape.stop_recording():
index = real_mirrored_creator(devices, *args, **kwargs)
result = values.TPUMirroredVariable(index, index[devices[0]], aggregation)
if not context.executing_eagerly():
g = ops.get_default_graph()
# If "trainable" is True, next_creator() will add the member variables
# to the TRAINABLE_VARIABLES collection, so we manually remove
# them and replace with the MirroredVariable. We can't set
# "trainable" to False for next_creator() since that causes functions
# like implicit_gradients to skip those variables.
if kwargs.get("trainable", True):
collections.append(ops.GraphKeys.TRAINABLE_VARIABLES)
l = g.get_collection_ref(ops.GraphKeys.TRAINABLE_VARIABLES)
for v in index.values():
l.remove(v)
g.add_to_collections(collections, result)
return result
# TODO(jhseu): Stop inheriting from OneDeviceStrategy.
class TPUStrategy(one_device_strategy.OneDeviceStrategy):
"""Experimental TPU distribution strategy implementation."""
def __init__(self, tpu_cluster_resolver, steps_per_run, num_cores=None):
"""Initializes the TPUStrategy object.
Args:
tpu_cluster_resolver: A tf.contrib.cluster_resolver.TPUClusterResolver,
which provides information about the TPU cluster.
steps_per_run: Number of steps to run on device before returning to the
host. Note that this can have side-effects on performance, hooks,
metrics, summaries etc.
This parameter is only used when Distribution Strategy is used with
estimator or keras.
num_cores: Number of cores to use on the TPU. If None specified, then
auto-detect the cores and topology of the TPU system.
"""
# TODO(sourabhbajaj): OneDeviceStrategy should be initialized with the
# master node fetched from the cluster resolver.
super(TPUStrategy, self).__init__("/device:CPU:0")
self._tpu_cluster_resolver = tpu_cluster_resolver
self._tpu_metadata = get_tpu_system_metadata(self._tpu_cluster_resolver)
# TODO(sourabhbajaj): Change this from num_cores to metadata_override
self._num_cores_override = num_cores
# TODO(jhseu): Switch to DeviceAssignment to support pods and model
# parallelism.
device_map = {d.name: i for i, d in enumerate(self._tpu_metadata.devices)
if "device:TPU:" in d.name}
self._device_index = values.PerDevice(device_map)
self._tpu_devices = sorted(device_map.keys())
# Only create variables for the number of towers we're running.
self._tpu_devices = self._tpu_devices[:self.num_towers]
# TODO(sourabhbajaj): Remove this once performance of running one step
# at a time is comparable to multiple steps.
self.steps_per_run = steps_per_run
self._require_static_shapes = True
def _get_enqueue_op_per_host(self, host_id, iterator, input_shapes,
iterations):
"""Create an enqueue op for a single host identified using host_id.
The while_loop op returned will run `iterations` times and in each run
enqueue batches for each shard.
Args:
host_id: integer, id of the host to run the enqueue ops on.
iterator: `tf.data` iterator to read the input data.
input_shapes: shape of inputs to be enqueue on the queue. This is same as
the value of `nest.flatten(iterator.output_shapes)`.
iterations: integer, number of iterations to be run; determines the
number of batches to be enqueued.
Returns:
while_loop_op running `iterations` times; in each run we enqueue a batch
on the infeed queue from the host with id `host_id` for each device shard.
"""
host = self.get_host_cpu_device(host_id)
def _infeed_enqueue_ops_fn():
"""Enqueue ops for one iteration."""
control_deps = []
sharded_inputs = []
enqueue_ops = []
with ops.device(host):
for _ in range(self.num_towers_per_host):
# Use control dependencies to ensure a deterministic ordering.
with ops.control_dependencies(control_deps):
inputs = nest.flatten(iterator.get_next())
control_deps.extend(inputs)
sharded_inputs.append(inputs)
for core_id, shard_input in enumerate(sharded_inputs):
enqueue_ops.append(
tpu_ops.infeed_enqueue_tuple(
inputs=shard_input,
shapes=input_shapes,
device_ordinal=core_id))
return enqueue_ops
def enqueue_ops_loop_body(i):
"""Callable for the loop body of the while_loop instantiated below."""
with ops.control_dependencies(_infeed_enqueue_ops_fn()):
return i + 1
with ops.device(host):
enqueue_op_per_host = control_flow_ops.while_loop(
lambda i: i < iterations,
enqueue_ops_loop_body,
[constant_op.constant(0)],
parallel_iterations=1)
return enqueue_op_per_host
def distribute_dataset(self, dataset_fn):
# TODO(priyag): Perhaps distribute across cores here.
return self._call_dataset_fn(dataset_fn)
# TODO(priyag): Deal with OutOfRange errors once b/111349762 is fixed.
# TODO(sourabhbajaj): Remove the initial_loop_values parameter when we have
# a mechanism to infer the outputs of `fn`. Pending b/110550782.
def _run_steps_on_dataset(self, fn, iterator, iterations,
initial_loop_values=None):
shapes = nest.flatten(iterator.output_shapes)
if any([not s.is_fully_defined() for s in shapes]):
raise ValueError(
'TPU currently requires fully defined shapes. Either use '
'set_shape() on the input tensors or use '
'dataset.batch(..., drop_remainder=True).')
types = nest.flatten(iterator.output_types)
enqueue_ops = [
self._get_enqueue_op_per_host(host_id, iterator, shapes, iterations)
for host_id in range(self.num_hosts)]
def dequeue_fn():
dequeued = tpu_ops.infeed_dequeue_tuple(dtypes=types, shapes=shapes)
return nest.pack_sequence_as(iterator.output_shapes, dequeued)
# Wrap `fn` for repeat.
if initial_loop_values is None:
initial_loop_values = {}
initial_loop_values = nest.flatten(initial_loop_values)
ctx = values.MultiStepContext()
def run_fn(*args, **kwargs):
"""Single step on the TPU device."""
del args, kwargs
fn_inputs = dequeue_fn()
if not isinstance(fn_inputs, tuple):
fn_inputs = (fn_inputs,)
fn_result = fn(ctx, *fn_inputs)
flat_last_step_outputs = nest.flatten(ctx.last_step_outputs)
if flat_last_step_outputs:
with ops.control_dependencies([fn_result]):
return [array_ops.identity(f) for f in flat_last_step_outputs]
else:
return fn_result
# TODO(sourabhbajaj): The input to while loop should be based on the output
# type of the step_fn
def iterate_on_tpu():
return training_loop.repeat(iterations, run_fn, initial_loop_values)
# We capture the control_flow_context at this point, before we run `fn`
# inside a while_loop and TPU replicate context. This is useful in cases
# where we might need to exit these contexts and get back to the outer
# context to do some things, for e.g. create an op which should be
# evaluated only once at the end of the loop on the host. One such usage
# is in creating metrics' value op.
self._outer_control_flow_context = (
ops.get_default_graph()._get_control_flow_context()) # pylint: disable=protected-access
replicate_inputs = [[]] * self.num_towers
replicate_outputs = tpu.replicate(iterate_on_tpu, replicate_inputs)
del self._outer_control_flow_context
ctx.run_op = control_flow_ops.group(replicate_outputs, enqueue_ops)
# Filter out any ops from the outputs, typically this would be the case
# when there were no tensor outputs.
last_step_tensor_outputs = [x for x in replicate_outputs
if not isinstance(x, ops.Operation)]
# Outputs are currently of the structure (grouped by device)
# [[output0_device0, output1_device0, output2_device0],
# [output0_device1, output1_device1, output2_device1]]
# Convert this to the following structure instead: (grouped by output)
# [[output0_device0, output0_device1],
# [output1_device0, output1_device1],
# [output2_device0, output2_device1]]
last_step_tensor_outputs = [list(x) for x in zip(*last_step_tensor_outputs)]
# Convert replicate_outputs to the original dict structure of
# last_step_outputs.
last_step_tensor_outputs_dict = nest.pack_sequence_as(
ctx.last_step_outputs, last_step_tensor_outputs)
for (name, aggregation) in ctx._last_step_outputs_aggregations.items(): # pylint: disable=protected-access
output = last_step_tensor_outputs_dict[name]
# For outputs that have already been aggregated, take the first value
# from the list as each value should be the same. Else return the full
# list of values.
# TODO(josh11b): If aggregation is NONE, we should return a PerDevice value.
if aggregation is not variables_lib.VariableAggregation.NONE:
# TODO(priyag): Should this return the element or a list with 1 element
last_step_tensor_outputs_dict[name] = output[0]
ctx._set_last_step_outputs(last_step_tensor_outputs_dict) # pylint: disable=protected-access
return ctx
def _call_for_each_tower(self, fn, *args, **kwargs):
# TODO(jhseu): Consider making it so call_for_each_tower implies that we're
# in a tpu.rewrite(), and update TPUMirroredVariable accordingly.
kwargs.pop('run_concurrently', None)
with one_device_strategy._OneDeviceTowerContext(self): # pylint: disable=protected-access
return fn(*args, **kwargs)
def initialize(self):
if context.executing_eagerly():
# TODO(priyag): Add appopriate call here when eager is supported for TPUs.
raise NotImplementedError('Eager mode not supported in TPUStrategy.')
else:
# TODO(jhseu): We need this hack because DistributionStrategies must be
# pickleable for copy.deepcopy(). Remove when initialize_system goes away.
graph = ops.get_default_graph()
tpu_init = graph.get_collection(_TPU_INITIALIZE_SYSTEM_COLLECTION)
if tpu_init:
return tpu_init
graph.add_to_collection(_TPU_INITIALIZE_SYSTEM_COLLECTION,
tpu.initialize_system())
return graph.get_collection(_TPU_INITIALIZE_SYSTEM_COLLECTION)
def finalize(self):
if context.executing_eagerly():
# TODO(priyag): Add appopriate call here when eager is supported for TPUs.
raise NotImplementedError('Eager mode not supported in TPUStrategy.')
else:
return [tpu.shutdown_system()]
def _get_devices_from(self, colocate_with=None):
# TODO(jhseu): Change this when we support model parallelism.
return self._tpu_devices
def _create_variable(self, next_creator, *args, **kwargs):
"""Create a TPUMirroredVariable. See `DistributionStrategy.scope`."""
colocate_with = kwargs.pop("colocate_with", None)
devices = self._get_devices_from(colocate_with)
def _real_mirrored_creator(devices, *args, **kwargs): # pylint: disable=g-missing-docstring
index = {}
for i, d in enumerate(devices):
with ops.device(d):
if i > 0:
# Give replicas meaningful distinct names:
var0name = index[devices[0]].name.split(":")[0]
# We append a / to variable names created on towers with id > 0 to
# ensure that we ignore the name scope and instead use the given
# name as the absolute name of the variable.
kwargs["name"] = "%s/replica_%d/" % (var0name, i)
# Initialize replicas with the same value:
if context.executing_eagerly():
kwargs["initial_value"] = array_ops.identity(
index[devices[0]].value())
else:
def initial_value_fn(device=d):
with ops.device(device):
return array_ops.identity(index[devices[0]].initial_value)
kwargs["initial_value"] = initial_value_fn
with context.context().device_policy(context.DEVICE_PLACEMENT_SILENT):
v = next_creator(*args, **kwargs)
assert not isinstance(v, values.TPUMirroredVariable)
index[d] = v
return index
return _create_tpu_mirrored_variable(devices, _real_mirrored_creator, *args,
**kwargs)
def _reduce(self, aggregation, value, destinations):
if values._enclosing_tpu_context() is not None: # pylint: disable=protected-access
if aggregation == vs.VariableAggregation.MEAN:
# TODO(jhseu): Revisit once we support model-parallelism.
value *= (1. / self.num_towers)
elif aggregation != vs.VariableAggregation.SUM:
raise NotImplementedError(
"Currently only support sum & mean in TPUStrategy.")
return tpu_ops.cross_replica_sum(value)
# Validate that the destination is same as the host device
# Note we don't do this when in replicate context as the reduction is
# performed on the TPU device itself.
devices = cross_tower_ops_lib.get_devices_from(destinations)
if len(devices) == 1:
assert device_util.canonicalize(devices[0]) == device_util.canonicalize(
self.get_host_cpu_device(0))
else:
raise ValueError('Multiple devices are not supported for TPUStrategy')
if aggregation == vs.VariableAggregation.ONLY_FIRST_TOWER:
return value[0]
output = math_ops.add_n(value)
if aggregation == vs.VariableAggregation.MEAN:
return output * (1. / len(value))
return output
def _update(self, var, options, fn, *args, **kwargs):
assert isinstance(var, values.TPUMirroredVariable)
should_group = options.pop("grouped")
assert not options # Validate that we are processing all of the options.
if values._enclosing_tpu_context() is not None: # pylint: disable=protected-access
if should_group:
return fn(var, *args, **kwargs)
else:
return [fn(var, *args, **kwargs)]
# Otherwise, we revert to MirroredStrategy behavior and update each variable
# directly.
updates = {}
for d, v in var._index.items(): # pylint: disable=protected-access
name = "update_%d" % self._device_index.get(d)
with ops.device(d), distribute_lib.UpdateContext(d), ops.name_scope(name):
# If args and kwargs are not mirrored, the value is returned as is.
updates[d] = fn(v,
*values.select_device_mirrored(d, args),
**values.select_device_mirrored(d, kwargs))
return values.update_regroup(self, updates, should_group)
# TODO(josh11b): Need to implement _update_non_slot()!
def read_var(self, var):
assert isinstance(var, values.TPUMirroredVariable)
return var.read_value()
def _unwrap(self, val):
if isinstance(val, values.DistributedValues):
# Return in a deterministic order.
return [val.get(device=d) for d in sorted(val.devices)]
elif isinstance(val, list):
# TODO(josh11b): We need to remove this case; per device values should
# be represented using a PerDevice wrapper instead of a list with
# one entry per device.
return val
return [val]
@property
def num_towers(self):
return self._num_cores_override or self._tpu_metadata.num_cores
@property
def num_hosts(self):
return self._tpu_metadata.num_hosts
@property
def num_towers_per_host(self):
return self._tpu_metadata.num_of_cores_per_host
@property
def num_replicas_in_sync(self):
return self.num_towers
@property
def between_graph(self):
return False
@property
def should_init(self):
return True
@property
def should_checkpoint(self):
return True
@property
def should_save_summary(self):
return True
@property
def worker_devices(self):
return self._tpu_devices
@property
def parameter_devices(self):
return self._tpu_devices
def get_host_cpu_device(self, host_id):
if self._tpu_cluster_resolver.get_master() in ('', 'local'):
return '/replica:0/task:0/device:CPU:0'
job_name = self._tpu_cluster_resolver.get_job_name() or 'tpu_worker'
return '/job:%s/task:%d/device:CPU:0' % (job_name, host_id)
def configure(self,
session_config=None,
cluster_spec=None,
task_type=None,
task_id=None):
del cluster_spec, task_type, task_id
if session_config:
session_config.isolate_session_state = True
cluster_spec = self._tpu_cluster_resolver.cluster_spec()
if cluster_spec:
session_config.cluster_def.CopyFrom(cluster_spec.as_cluster_def())
|
|
#!/usr/bin/env python
"""This file contains code to generate ZIP/TAR archives."""
import enum
import io
import os
from typing import Dict, Iterable, Iterator
import zipfile
from grr_response_core.lib import utils
from grr_response_core.lib.util import collection
from grr_response_core.lib.util.compat import yaml
from grr_response_server import data_store
from grr_response_server import file_store
from grr_response_server import flow_base
from grr_response_server.flows.general import export as flow_export
from grr_response_server.gui.api_plugins import client as api_client
from grr_response_server.rdfvalues import flow_objects as rdf_flow_objects
from grr_response_server.rdfvalues import objects as rdf_objects
def _ClientPathToString(client_path, prefix=""):
"""Returns a path-like String of client_path with optional prefix."""
return os.path.join(prefix, client_path.client_id, client_path.vfs_path)
class ArchiveFormat(enum.Enum):
ZIP = 1
TAR_GZ = 2
# TODO(user): this is a general purpose class that is designed to export
# files archives for any flow or hunt. I'd expect this class to be phased out
# as soon as flow-specific implementations mapping-based implementations
# are done for all the flows (see FlowArchiveGenerator below).
class CollectionArchiveGenerator(object):
"""Class that generates downloaded files archive from a collection."""
ZIP = ArchiveFormat.ZIP
TAR_GZ = ArchiveFormat.TAR_GZ
FILES_SKIPPED_WARNING = (
"# NOTE: Some files were skipped because they were referenced in the \n"
"# collection but were not downloaded by GRR, so there were no data \n"
"# blobs in the data store to archive.\n").encode("utf-8")
BATCH_SIZE = 1000
def __init__(self,
archive_format=ZIP,
prefix=None,
description=None,
predicate=None,
client_id=None):
"""CollectionArchiveGenerator constructor.
Args:
archive_format: May be ArchiveCollectionGenerator.ZIP or
ArchiveCollectionGenerator.TAR_GZ. Defaults to ZIP.
prefix: Name of the folder inside the archive that will contain all the
generated data.
description: String describing archive's contents. It will be included
into the auto-generated MANIFEST file. Defaults to 'Files archive
collection'.
predicate: If not None, only the files matching the predicate will be
archived, all others will be skipped. The predicate receives a
db.ClientPath as input.
client_id: The client_id to use when exporting a flow results collection.
Raises:
ValueError: if prefix is None.
"""
super().__init__()
if archive_format == self.ZIP:
self.archive_generator = utils.StreamingZipGenerator(
compression=zipfile.ZIP_DEFLATED)
elif archive_format == self.TAR_GZ:
self.archive_generator = utils.StreamingTarGenerator()
else:
raise ValueError("Unknown archive format: %s" % archive_format)
if not prefix:
raise ValueError("Prefix can't be None.")
self.prefix = prefix
self.description = description or "Files archive collection"
self.archived_files = set()
self.ignored_files = set()
self.failed_files = set()
self.processed_files = set()
self.predicate = predicate or (lambda _: True)
self.client_id = client_id
@property
def output_size(self):
return self.archive_generator.output_size
@property
def total_files(self):
return len(self.processed_files)
def _GenerateDescription(self):
"""Generates description into a MANIFEST file in the archive."""
manifest = {
"description": self.description,
"processed_files": len(self.processed_files),
"archived_files": len(self.archived_files),
"ignored_files": len(self.ignored_files),
"failed_files": len(self.failed_files)
}
if self.ignored_files:
manifest["ignored_files_list"] = [
_ClientPathToString(cp, prefix="aff4:") for cp in self.ignored_files
]
if self.failed_files:
manifest["failed_files_list"] = [
_ClientPathToString(cp, prefix="aff4:") for cp in self.failed_files
]
manifest_fd = io.BytesIO()
if self.total_files != len(self.archived_files):
manifest_fd.write(self.FILES_SKIPPED_WARNING)
manifest_fd.write(yaml.Dump(manifest).encode("utf-8"))
manifest_fd.seek(0)
st = os.stat_result(
(0o644, 0, 0, 0, 0, 0, len(manifest_fd.getvalue()), 0, 0, 0))
for chunk in self.archive_generator.WriteFromFD(
manifest_fd, os.path.join(self.prefix, "MANIFEST"), st=st):
yield chunk
def _GenerateClientInfo(self, client_id, client_fd):
"""Yields chucks of archive information for given client."""
summary_dict = client_fd.ToPrimitiveDict(stringify_leaf_fields=True)
summary = yaml.Dump(summary_dict).encode("utf-8")
client_info_path = os.path.join(self.prefix, client_id, "client_info.yaml")
st = os.stat_result((0o644, 0, 0, 0, 0, 0, len(summary), 0, 0, 0))
yield self.archive_generator.WriteFileHeader(client_info_path, st=st)
yield self.archive_generator.WriteFileChunk(summary)
yield self.archive_generator.WriteFileFooter()
def Generate(self, items):
"""Generates archive from a given collection.
Iterates the collection and generates an archive by yielding contents
of every referenced file.
Args:
items: Iterable of rdf_client_fs.StatEntry objects
Yields:
Binary chunks comprising the generated archive.
"""
client_ids = set()
for item_batch in collection.Batch(items, self.BATCH_SIZE):
client_paths = set()
for item in item_batch:
try:
client_path = flow_export.CollectionItemToClientPath(
item, self.client_id)
except flow_export.ItemNotExportableError:
continue
if not self.predicate(client_path):
self.ignored_files.add(client_path)
self.processed_files.add(client_path)
continue
client_ids.add(client_path.client_id)
client_paths.add(client_path)
for chunk in file_store.StreamFilesChunks(client_paths):
self.processed_files.add(chunk.client_path)
for output in self._WriteFileChunk(chunk=chunk):
yield output
self.processed_files |= client_paths - (
self.ignored_files | self.archived_files)
if client_ids:
client_infos = data_store.REL_DB.MultiReadClientFullInfo(client_ids)
for client_id, client_info in client_infos.items():
client = api_client.ApiClient().InitFromClientInfo(client_info)
for chunk in self._GenerateClientInfo(client_id, client):
yield chunk
for chunk in self._GenerateDescription():
yield chunk
yield self.archive_generator.Close()
def _WriteFileChunk(self, chunk):
"""Yields binary chunks, respecting archive file headers and footers.
Args:
chunk: the StreamedFileChunk to be written
"""
if chunk.chunk_index == 0:
# Make sure size of the original file is passed. It's required
# when output_writer is StreamingTarWriter.
st = os.stat_result((0o644, 0, 0, 0, 0, 0, chunk.total_size, 0, 0, 0))
target_path = _ClientPathToString(chunk.client_path, prefix=self.prefix)
yield self.archive_generator.WriteFileHeader(target_path, st=st)
yield self.archive_generator.WriteFileChunk(chunk.data)
if chunk.chunk_index == chunk.total_chunks - 1:
yield self.archive_generator.WriteFileFooter()
self.archived_files.add(chunk.client_path)
class FlowArchiveGenerator:
"""Archive generator for new-style flows that provide custom file mappings."""
BATCH_SIZE = 1000
def __init__(self, flow: rdf_flow_objects.Flow,
archive_format: ArchiveFormat):
self.flow = flow
self.archive_format = archive_format
if archive_format == ArchiveFormat.ZIP:
self.archive_generator = utils.StreamingZipGenerator(
compression=zipfile.ZIP_DEFLATED)
extension = "zip"
elif archive_format == ArchiveFormat.TAR_GZ:
self.archive_generator = utils.StreamingTarGenerator()
extension = "tar.gz"
else:
raise ValueError(f"Unknown archive format: {archive_format}")
self.prefix = "{}_{}_{}".format(
flow.client_id.replace(".", "_"), flow.flow_id, flow.flow_class_name)
self.filename = f"{self.prefix}.{extension}"
self.num_archived_files = 0
def _GenerateDescription(self, processed_files: Dict[str, str],
missing_files: Iterable[str]) -> Iterable[bytes]:
"""Generates a MANIFEST file in the archive."""
manifest = {
"processed_files": processed_files,
"missing_files": missing_files,
"client_id": self.flow.client_id,
"flow_id": self.flow.flow_id,
}
manifest_fd = io.BytesIO()
manifest_fd.write(yaml.Dump(manifest).encode("utf-8"))
manifest_fd.seek(0)
st = os.stat_result(
(0o644, 0, 0, 0, 0, 0, len(manifest_fd.getvalue()), 0, 0, 0))
for chunk in self.archive_generator.WriteFromFD(
manifest_fd, os.path.join(self.prefix, "MANIFEST"), st=st):
yield chunk
def _WriteFileChunk(self, chunk: file_store.StreamedFileChunk,
archive_paths_by_id: Dict[rdf_objects.PathID, str]):
"""Yields binary chunks, respecting archive file headers and footers.
Args:
chunk: the StreamedFileChunk to be written
archive_paths_by_id:
"""
if chunk.chunk_index == 0:
# Make sure size of the original file is passed. It's required
# when output_writer is StreamingTarWriter.
st = os.stat_result((0o644, 0, 0, 0, 0, 0, chunk.total_size, 0, 0, 0))
archive_path = (archive_paths_by_id or {}).get(chunk.client_path.path_id)
target_path = os.path.join(self.prefix, archive_path)
yield self.archive_generator.WriteFileHeader(target_path, st=st)
yield self.archive_generator.WriteFileChunk(chunk.data)
if chunk.chunk_index == chunk.total_chunks - 1:
self.num_archived_files += 1
yield self.archive_generator.WriteFileFooter()
def Generate(
self, mappings: Iterator[flow_base.ClientPathArchiveMapping]
) -> Iterator[bytes]:
"""Generates archive from a given set of client path mappings.
Iterates the mappings and generates an archive by yielding contents
of every referenced file.
Args:
mappings: A set of mappings defining the archive structure.
Yields:
Chunks of bytes of the generated archive.
"""
processed_files = {}
missing_files = set()
for mappings_batch in collection.Batch(mappings, self.BATCH_SIZE):
archive_paths_by_id = {}
for mapping in mappings_batch:
archive_paths_by_id[mapping.client_path.path_id] = mapping.archive_path
processed_in_batch = set()
for chunk in file_store.StreamFilesChunks(
[m.client_path for m in mappings_batch]):
processed_in_batch.add(chunk.client_path.path_id)
processed_files[chunk.client_path.vfs_path] = archive_paths_by_id[
chunk.client_path.path_id]
for output in self._WriteFileChunk(chunk, archive_paths_by_id):
yield output
for mapping in mappings_batch:
if mapping.client_path.path_id in processed_in_batch:
continue
missing_files.add(mapping.client_path.vfs_path)
for chunk in self._GenerateDescription(processed_files, missing_files):
yield chunk
yield self.archive_generator.Close()
@property
def output_size(self):
return self.archive_generator.output_size
|
|
"""This package contains the "front end" classes and functions
for Beaker caching.
Included are the :class:`.Cache` and :class:`.CacheManager` classes,
as well as the function decorators :func:`.region_decorate`,
:func:`.region_invalidate`.
"""
import warnings
import beaker.container as container
import beaker.util as util
from beaker.crypto.util import sha1
from beaker.exceptions import BeakerException, InvalidCacheBackendError
from beaker.synchronization import _threading
import beaker.ext.memcached as memcached
import beaker.ext.database as database
import beaker.ext.sqla as sqla
import beaker.ext.google as google
# Initialize the cache region dict
cache_regions = {}
"""Dictionary of 'region' arguments.
A "region" is a string name that refers to a series of cache
configuration arguments. An application may have multiple
"regions" - one which stores things in a memory cache, one
which writes data to files, etc.
The dictionary stores string key names mapped to dictionaries
of configuration arguments. Example::
from beaker.cache import cache_regions
cache_regions.update({
'short_term':{
'expire':'60',
'type':'memory'
},
'long_term':{
'expire':'1800',
'type':'dbm',
'data_dir':'/tmp',
}
})
"""
cache_managers = {}
class _backends(object):
initialized = False
def __init__(self, clsmap):
self._clsmap = clsmap
self._mutex = _threading.Lock()
def __getitem__(self, key):
try:
return self._clsmap[key]
except KeyError, e:
if not self.initialized:
self._mutex.acquire()
try:
if not self.initialized:
self._init()
self.initialized = True
return self._clsmap[key]
finally:
self._mutex.release()
raise e
def _init(self):
try:
import pkg_resources
# Load up the additional entry point defined backends
for entry_point in pkg_resources.iter_entry_points('beaker.backends'):
try:
namespace_manager = entry_point.load()
name = entry_point.name
if name in self._clsmap:
raise BeakerException("NamespaceManager name conflict,'%s' "
"already loaded" % name)
self._clsmap[name] = namespace_manager
except (InvalidCacheBackendError, SyntaxError):
# Ignore invalid backends
pass
except:
import sys
from pkg_resources import DistributionNotFound
# Warn when there's a problem loading a NamespaceManager
if not isinstance(sys.exc_info()[1], DistributionNotFound):
import traceback
from StringIO import StringIO
tb = StringIO()
traceback.print_exc(file=tb)
warnings.warn(
"Unable to load NamespaceManager "
"entry point: '%s': %s" % (
entry_point,
tb.getvalue()),
RuntimeWarning, 2)
except ImportError:
pass
# Initialize the basic available backends
clsmap = _backends({
'memory':container.MemoryNamespaceManager,
'dbm':container.DBMNamespaceManager,
'file':container.FileNamespaceManager,
'ext:memcached':memcached.MemcachedNamespaceManager,
'ext:database':database.DatabaseNamespaceManager,
'ext:sqla': sqla.SqlaNamespaceManager,
'ext:google': google.GoogleNamespaceManager,
})
def cache_region(region, *args):
"""Decorate a function such that its return result is cached,
using a "region" to indicate the cache arguments.
Example::
from beaker.cache import cache_regions, cache_region
# configure regions
cache_regions.update({
'short_term':{
'expire':'60',
'type':'memory'
}
})
@cache_region('short_term', 'load_things')
def load(search_term, limit, offset):
'''Load from a database given a search term, limit, offset.'''
return database.query(search_term)[offset:offset + limit]
The decorator can also be used with object methods. The ``self``
argument is not part of the cache key. This is based on the
actual string name ``self`` being in the first argument
position (new in 1.6)::
class MyThing(object):
@cache_region('short_term', 'load_things')
def load(self, search_term, limit, offset):
'''Load from a database given a search term, limit, offset.'''
return database.query(search_term)[offset:offset + limit]
Classmethods work as well - use ``cls`` as the name of the class argument,
and place the decorator around the function underneath ``@classmethod``
(new in 1.6)::
class MyThing(object):
@classmethod
@cache_region('short_term', 'load_things')
def load(cls, search_term, limit, offset):
'''Load from a database given a search term, limit, offset.'''
return database.query(search_term)[offset:offset + limit]
:param region: String name of the region corresponding to the desired
caching arguments, established in :attr:`.cache_regions`.
:param \*args: Optional ``str()``-compatible arguments which will uniquely
identify the key used by this decorated function, in addition
to the positional arguments passed to the function itself at call time.
This is recommended as it is needed to distinguish between any two functions
or methods that have the same name (regardless of parent class or not).
.. note::
The function being decorated must only be called with
positional arguments, and the arguments must support
being stringified with ``str()``. The concatenation
of the ``str()`` version of each argument, combined
with that of the ``*args`` sent to the decorator,
forms the unique cache key.
.. note::
When a method on a class is decorated, the ``self`` or ``cls``
argument in the first position is
not included in the "key" used for caching. New in 1.6.
"""
return _cache_decorate(args, None, None, region)
def region_invalidate(namespace, region, *args):
"""Invalidate a cache region corresponding to a function
decorated with :func:`.cache_region`.
:param namespace: The namespace of the cache to invalidate. This is typically
a reference to the original function (as returned by the :func:`.cache_region`
decorator), where the :func:`.cache_region` decorator applies a "memo" to
the function in order to locate the string name of the namespace.
:param region: String name of the region used with the decorator. This can be
``None`` in the usual case that the decorated function itself is passed,
not the string name of the namespace.
:param args: Stringifyable arguments that are used to locate the correct
key. This consists of the ``*args`` sent to the :func:`.cache_region`
decorator itself, plus the ``*args`` sent to the function itself
at runtime.
Example::
from beaker.cache import cache_regions, cache_region, region_invalidate
# configure regions
cache_regions.update({
'short_term':{
'expire':'60',
'type':'memory'
}
})
@cache_region('short_term', 'load_data')
def load(search_term, limit, offset):
'''Load from a database given a search term, limit, offset.'''
return database.query(search_term)[offset:offset + limit]
def invalidate_search(search_term, limit, offset):
'''Invalidate the cached storage for a given search term, limit, offset.'''
region_invalidate(load, 'short_term', 'load_data', search_term, limit, offset)
Note that when a method on a class is decorated, the first argument ``cls``
or ``self`` is not included in the cache key. This means you don't send
it to :func:`.region_invalidate`::
class MyThing(object):
@cache_region('short_term', 'some_data')
def load(self, search_term, limit, offset):
'''Load from a database given a search term, limit, offset.'''
return database.query(search_term)[offset:offset + limit]
def invalidate_search(self, search_term, limit, offset):
'''Invalidate the cached storage for a given search term, limit, offset.'''
region_invalidate(self.load, 'short_term', 'some_data', search_term, limit, offset)
"""
if callable(namespace):
if not region:
region = namespace._arg_region
namespace = namespace._arg_namespace
if not region:
raise BeakerException("Region or callable function "
"namespace is required")
else:
region = cache_regions[region]
cache = Cache._get_cache(namespace, region)
_cache_decorator_invalidate(cache, region['key_length'], args)
class Cache(object):
"""Front-end to the containment API implementing a data cache.
:param namespace: the namespace of this Cache
:param type: type of cache to use
:param expire: seconds to keep cached data
:param expiretime: seconds to keep cached data (legacy support)
:param starttime: time when cache was cache was
"""
def __init__(self, namespace, type='memory', expiretime=None,
starttime=None, expire=None, **nsargs):
try:
cls = clsmap[type]
if isinstance(cls, InvalidCacheBackendError):
raise cls
except KeyError:
raise TypeError("Unknown cache implementation %r" % type)
self.namespace_name = namespace
self.namespace = cls(namespace, **nsargs)
self.expiretime = expiretime or expire
self.starttime = starttime
self.nsargs = nsargs
@classmethod
def _get_cache(cls, namespace, kw):
key = namespace + str(kw)
try:
return cache_managers[key]
except KeyError:
cache_managers[key] = cache = cls(namespace, **kw)
return cache
def put(self, key, value, **kw):
self._get_value(key, **kw).set_value(value)
set_value = put
def get(self, key, **kw):
"""Retrieve a cached value from the container"""
return self._get_value(key, **kw).get_value()
get_value = get
def remove_value(self, key, **kw):
mycontainer = self._get_value(key, **kw)
mycontainer.clear_value()
remove = remove_value
def _get_value(self, key, **kw):
if isinstance(key, unicode):
key = key.encode('ascii', 'backslashreplace')
if 'type' in kw:
return self._legacy_get_value(key, **kw)
kw.setdefault('expiretime', self.expiretime)
kw.setdefault('starttime', self.starttime)
return container.Value(key, self.namespace, **kw)
@util.deprecated("Specifying a "
"'type' and other namespace configuration with cache.get()/put()/etc. "
"is deprecated. Specify 'type' and other namespace configuration to "
"cache_manager.get_cache() and/or the Cache constructor instead.")
def _legacy_get_value(self, key, type, **kw):
expiretime = kw.pop('expiretime', self.expiretime)
starttime = kw.pop('starttime', None)
createfunc = kw.pop('createfunc', None)
kwargs = self.nsargs.copy()
kwargs.update(kw)
c = Cache(self.namespace.namespace, type=type, **kwargs)
return c._get_value(key, expiretime=expiretime, createfunc=createfunc,
starttime=starttime)
def clear(self):
"""Clear all the values from the namespace"""
self.namespace.remove()
# dict interface
def __getitem__(self, key):
return self.get(key)
def __contains__(self, key):
return self._get_value(key).has_current_value()
def has_key(self, key):
return key in self
def __delitem__(self, key):
self.remove_value(key)
def __setitem__(self, key, value):
self.put(key, value)
class CacheManager(object):
def __init__(self, **kwargs):
"""Initialize a CacheManager object with a set of options
Options should be parsed with the
:func:`~beaker.util.parse_cache_config_options` function to
ensure only valid options are used.
"""
self.kwargs = kwargs
self.regions = kwargs.pop('cache_regions', {})
# Add these regions to the module global
cache_regions.update(self.regions)
def get_cache(self, name, **kwargs):
kw = self.kwargs.copy()
kw.update(kwargs)
return Cache._get_cache(name, kw)
def get_cache_region(self, name, region):
if region not in self.regions:
raise BeakerException('Cache region not configured: %s' % region)
kw = self.regions[region]
return Cache._get_cache(name, kw)
def region(self, region, *args):
"""Decorate a function to cache itself using a cache region
The region decorator requires arguments if there are more than
two of the same named function, in the same module. This is
because the namespace used for the functions cache is based on
the functions name and the module.
Example::
# Assuming a cache object is available like:
cache = CacheManager(dict_of_config_options)
def populate_things():
@cache.region('short_term', 'some_data')
def load(search_term, limit, offset):
return load_the_data(search_term, limit, offset)
return load('rabbits', 20, 0)
.. note::
The function being decorated must only be called with
positional arguments.
"""
return cache_region(region, *args)
def region_invalidate(self, namespace, region, *args):
"""Invalidate a cache region namespace or decorated function
This function only invalidates cache spaces created with the
cache_region decorator.
:param namespace: Either the namespace of the result to invalidate, or the
cached function
:param region: The region the function was cached to. If the function was
cached to a single region then this argument can be None
:param args: Arguments that were used to differentiate the cached
function as well as the arguments passed to the decorated
function
Example::
# Assuming a cache object is available like:
cache = CacheManager(dict_of_config_options)
def populate_things(invalidate=False):
@cache.region('short_term', 'some_data')
def load(search_term, limit, offset):
return load_the_data(search_term, limit, offset)
# If the results should be invalidated first
if invalidate:
cache.region_invalidate(load, None, 'some_data',
'rabbits', 20, 0)
return load('rabbits', 20, 0)
"""
return region_invalidate(namespace, region, *args)
def cache(self, *args, **kwargs):
"""Decorate a function to cache itself with supplied parameters
:param args: Used to make the key unique for this function, as in region()
above.
:param kwargs: Parameters to be passed to get_cache(), will override defaults
Example::
# Assuming a cache object is available like:
cache = CacheManager(dict_of_config_options)
def populate_things():
@cache.cache('mycache', expire=15)
def load(search_term, limit, offset):
return load_the_data(search_term, limit, offset)
return load('rabbits', 20, 0)
.. note::
The function being decorated must only be called with
positional arguments.
"""
return _cache_decorate(args, self, kwargs, None)
def invalidate(self, func, *args, **kwargs):
"""Invalidate a cache decorated function
This function only invalidates cache spaces created with the
cache decorator.
:param func: Decorated function to invalidate
:param args: Used to make the key unique for this function, as in region()
above.
:param kwargs: Parameters that were passed for use by get_cache(), note that
this is only required if a ``type`` was specified for the
function
Example::
# Assuming a cache object is available like:
cache = CacheManager(dict_of_config_options)
def populate_things(invalidate=False):
@cache.cache('mycache', type="file", expire=15)
def load(search_term, limit, offset):
return load_the_data(search_term, limit, offset)
# If the results should be invalidated first
if invalidate:
cache.invalidate(load, 'mycache', 'rabbits', 20, 0, type="file")
return load('rabbits', 20, 0)
"""
namespace = func._arg_namespace
cache = self.get_cache(namespace, **kwargs)
if hasattr(func, '_arg_region'):
key_length = cache_regions[func._arg_region]['key_length']
else:
key_length = kwargs.pop('key_length', 250)
_cache_decorator_invalidate(cache, key_length, args)
def _cache_decorate(deco_args, manager, kwargs, region):
"""Return a caching function decorator."""
cache = [None]
def decorate(func):
namespace = util.func_namespace(func)
skip_self = util.has_self_arg(func)
def cached(*args):
if not cache[0]:
if region is not None:
if region not in cache_regions:
raise BeakerException(
'Cache region not configured: %s' % region)
reg = cache_regions[region]
if not reg.get('enabled', True):
return func(*args)
cache[0] = Cache._get_cache(namespace, reg)
elif manager:
cache[0] = manager.get_cache(namespace, **kwargs)
else:
raise Exception("'manager + kwargs' or 'region' "
"argument is required")
if skip_self:
try:
cache_key = " ".join(map(str, deco_args + args[1:]))
except UnicodeEncodeError:
cache_key = " ".join(map(unicode, deco_args + args[1:]))
else:
try:
cache_key = " ".join(map(str, deco_args + args))
except UnicodeEncodeError:
cache_key = " ".join(map(unicode, deco_args + args))
if region:
key_length = cache_regions[region]['key_length']
else:
key_length = kwargs.pop('key_length', 250)
if len(cache_key) + len(namespace) > key_length:
cache_key = sha1(cache_key).hexdigest()
def go():
return func(*args)
return cache[0].get_value(cache_key, createfunc=go)
cached._arg_namespace = namespace
if region is not None:
cached._arg_region = region
return cached
return decorate
def _cache_decorator_invalidate(cache, key_length, args):
"""Invalidate a cache key based on function arguments."""
try:
cache_key = " ".join(map(str, args))
except UnicodeEncodeError:
cache_key = " ".join(map(unicode, args))
if len(cache_key) + len(cache.namespace_name) > key_length:
cache_key = sha1(cache_key).hexdigest()
cache.remove_value(cache_key)
|
|
# -*- coding: utf-8 -*-
# This coding header is significant for tests, as the debug view is parsing
# files to search for such a header to decode the source file content
from __future__ import unicode_literals
import importlib
import inspect
import os
import re
import sys
import tempfile
from unittest import skipIf
from django.core import mail
from django.core.files.uploadedfile import SimpleUploadedFile
from django.core.urlresolvers import reverse
from django.template.base import TemplateDoesNotExist
from django.test import RequestFactory, TestCase, override_settings
from django.utils import six
from django.utils.encoding import force_bytes, force_text
from django.views.debug import CallableSettingWrapper, ExceptionReporter
from .. import BrokenException, except_args
from ..views import (
custom_exception_reporter_filter_view, multivalue_dict_key_error,
non_sensitive_view, paranoid_view, sensitive_args_function_caller,
sensitive_kwargs_function_caller, sensitive_method_view, sensitive_view,
)
class CallableSettingWrapperTests(TestCase):
""" Unittests for CallableSettingWrapper
"""
def test_repr(self):
class WrappedCallable(object):
def __repr__(self):
return "repr from the wrapped callable"
def __call__(self):
pass
actual = repr(CallableSettingWrapper(WrappedCallable()))
self.assertEqual(actual, "repr from the wrapped callable")
@override_settings(DEBUG=True, ROOT_URLCONF="view_tests.urls")
class DebugViewTests(TestCase):
def test_files(self):
response = self.client.get('/raises/')
self.assertEqual(response.status_code, 500)
data = {
'file_data.txt': SimpleUploadedFile('file_data.txt', b'haha'),
}
response = self.client.post('/raises/', data)
self.assertContains(response, 'file_data.txt', status_code=500)
self.assertNotContains(response, 'haha', status_code=500)
def test_400(self):
# Ensure that when DEBUG=True, technical_500_template() is called.
response = self.client.get('/raises400/')
self.assertContains(response, '<div class="context" id="', status_code=400)
# Ensure no 403.html template exists to test the default case.
@override_settings(TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
}])
def test_403(self):
response = self.client.get('/raises403/')
self.assertContains(response, '<h1>403 Forbidden</h1>', status_code=403)
# Set up a test 403.html template.
@override_settings(TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'OPTIONS': {
'loaders': [
('django.template.loaders.locmem.Loader', {
'403.html': 'This is a test template for a 403 error.',
}),
],
},
}])
def test_403_template(self):
response = self.client.get('/raises403/')
self.assertContains(response, 'test template', status_code=403)
def test_404(self):
response = self.client.get('/raises404/')
self.assertEqual(response.status_code, 404)
def test_raised_404(self):
response = self.client.get('/views/raises404/')
self.assertContains(response, "<code>not-in-urls</code>, didn't match", status_code=404)
def test_404_not_in_urls(self):
response = self.client.get('/not-in-urls')
self.assertNotContains(response, "Raised by:", status_code=404)
self.assertContains(response, "<code>not-in-urls</code>, didn't match", status_code=404)
def test_technical_404(self):
response = self.client.get('/views/technical404/')
self.assertContains(response, "Raised by:", status_code=404)
self.assertContains(response, "view_tests.views.technical404", status_code=404)
def test_classbased_technical_404(self):
response = self.client.get('/views/classbased404/')
self.assertContains(response, "Raised by:", status_code=404)
self.assertContains(response, "view_tests.views.Http404View", status_code=404)
def test_view_exceptions(self):
for n in range(len(except_args)):
self.assertRaises(BrokenException, self.client.get,
reverse('view_exception', args=(n,)))
def test_non_l10ned_numeric_ids(self):
"""
Numeric IDs and fancy traceback context blocks line numbers shouldn't be localized.
"""
with self.settings(DEBUG=True, USE_L10N=True):
response = self.client.get('/raises500/')
# We look for a HTML fragment of the form
# '<div class="context" id="c38123208">', not '<div class="context" id="c38,123,208"'
self.assertContains(response, '<div class="context" id="', status_code=500)
match = re.search(b'<div class="context" id="(?P<id>[^"]+)">', response.content)
self.assertIsNotNone(match)
id_repr = match.group('id')
self.assertFalse(re.search(b'[^c0-9]', id_repr),
"Numeric IDs in debug response HTML page shouldn't be localized (value: %s)." % id_repr)
def test_template_exceptions(self):
for n in range(len(except_args)):
try:
self.client.get(reverse('template_exception', args=(n,)))
except Exception:
raising_loc = inspect.trace()[-1][-2][0].strip()
self.assertNotEqual(raising_loc.find('raise BrokenException'), -1,
"Failed to find 'raise BrokenException' in last frame of traceback, instead found: %s" %
raising_loc)
def test_template_loader_postmortem(self):
"""Tests for not existing file"""
template_name = "notfound.html"
with tempfile.NamedTemporaryFile(prefix=template_name) as tmpfile:
tempdir = os.path.dirname(tmpfile.name)
template_path = os.path.join(tempdir, template_name)
with override_settings(TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [tempdir],
}]):
response = self.client.get(reverse('raises_template_does_not_exist', kwargs={"path": template_name}))
self.assertContains(response, "%s (File does not exist)" % template_path, status_code=500, count=1)
def test_no_template_source_loaders(self):
"""
Make sure if you don't specify a template, the debug view doesn't blow up.
"""
self.assertRaises(TemplateDoesNotExist, self.client.get, '/render_no_template/')
@override_settings(ROOT_URLCONF='view_tests.default_urls')
def test_default_urlconf_template(self):
"""
Make sure that the default urlconf template is shown shown instead
of the technical 404 page, if the user has not altered their
url conf yet.
"""
response = self.client.get('/')
self.assertContains(
response,
"<h2>Congratulations on your first Django-powered page.</h2>"
)
@override_settings(ROOT_URLCONF='view_tests.regression_21530_urls')
def test_regression_21530(self):
"""
Regression test for bug #21530.
If the admin app include is replaced with exactly one url
pattern, then the technical 404 template should be displayed.
The bug here was that an AttributeError caused a 500 response.
"""
response = self.client.get('/')
self.assertContains(
response,
"Page not found <span>(404)</span>",
status_code=404
)
@override_settings(
DEBUG=True,
ROOT_URLCONF="view_tests.urls",
# No template directories are configured, so no templates will be found.
TEMPLATES=[{
'BACKEND': 'django.template.backends.dummy.TemplateStrings',
}],
)
class NonDjangoTemplatesDebugViewTests(TestCase):
def test_400(self):
# Ensure that when DEBUG=True, technical_500_template() is called.
response = self.client.get('/raises400/')
self.assertContains(response, '<div class="context" id="', status_code=400)
def test_403(self):
response = self.client.get('/raises403/')
self.assertContains(response, '<h1>403 Forbidden</h1>', status_code=403)
def test_404(self):
response = self.client.get('/raises404/')
self.assertEqual(response.status_code, 404)
def test_template_not_found_error(self):
# Raises a TemplateDoesNotExist exception and shows the debug view.
url = reverse('raises_template_does_not_exist', kwargs={"path": "notfound.html"})
response = self.client.get(url)
self.assertContains(response, '<div class="context" id="', status_code=500)
class ExceptionReporterTests(TestCase):
rf = RequestFactory()
def test_request_and_exception(self):
"A simple exception report can be generated"
try:
request = self.rf.get('/test_view/')
raise ValueError("Can't find my keys")
except ValueError:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertIn('<h1>ValueError at /test_view/</h1>', html)
self.assertIn('<pre class="exception_value">Can't find my keys</pre>', html)
self.assertIn('<th>Request Method:</th>', html)
self.assertIn('<th>Request URL:</th>', html)
self.assertIn('<th>Exception Type:</th>', html)
self.assertIn('<th>Exception Value:</th>', html)
self.assertIn('<h2>Traceback ', html)
self.assertIn('<h2>Request information</h2>', html)
self.assertNotIn('<p>Request data not supplied</p>', html)
def test_no_request(self):
"An exception report can be generated without request"
try:
raise ValueError("Can't find my keys")
except ValueError:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(None, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertIn('<h1>ValueError</h1>', html)
self.assertIn('<pre class="exception_value">Can't find my keys</pre>', html)
self.assertNotIn('<th>Request Method:</th>', html)
self.assertNotIn('<th>Request URL:</th>', html)
self.assertIn('<th>Exception Type:</th>', html)
self.assertIn('<th>Exception Value:</th>', html)
self.assertIn('<h2>Traceback ', html)
self.assertIn('<h2>Request information</h2>', html)
self.assertIn('<p>Request data not supplied</p>', html)
def test_eol_support(self):
"""Test that the ExceptionReporter supports Unix, Windows and Macintosh EOL markers"""
LINES = list('print %d' % i for i in range(1, 6))
reporter = ExceptionReporter(None, None, None, None)
for newline in ['\n', '\r\n', '\r']:
fd, filename = tempfile.mkstemp(text=False)
os.write(fd, force_bytes(newline.join(LINES) + newline))
os.close(fd)
try:
self.assertEqual(
reporter._get_lines_from_file(filename, 3, 2),
(1, LINES[1:3], LINES[3], LINES[4:])
)
finally:
os.unlink(filename)
def test_no_exception(self):
"An exception report can be generated for just a request"
request = self.rf.get('/test_view/')
reporter = ExceptionReporter(request, None, None, None)
html = reporter.get_traceback_html()
self.assertIn('<h1>Report at /test_view/</h1>', html)
self.assertIn('<pre class="exception_value">No exception message supplied</pre>', html)
self.assertIn('<th>Request Method:</th>', html)
self.assertIn('<th>Request URL:</th>', html)
self.assertNotIn('<th>Exception Type:</th>', html)
self.assertNotIn('<th>Exception Value:</th>', html)
self.assertNotIn('<h2>Traceback ', html)
self.assertIn('<h2>Request information</h2>', html)
self.assertNotIn('<p>Request data not supplied</p>', html)
def test_request_and_message(self):
"A message can be provided in addition to a request"
request = self.rf.get('/test_view/')
reporter = ExceptionReporter(request, None, "I'm a little teapot", None)
html = reporter.get_traceback_html()
self.assertIn('<h1>Report at /test_view/</h1>', html)
self.assertIn('<pre class="exception_value">I'm a little teapot</pre>', html)
self.assertIn('<th>Request Method:</th>', html)
self.assertIn('<th>Request URL:</th>', html)
self.assertNotIn('<th>Exception Type:</th>', html)
self.assertNotIn('<th>Exception Value:</th>', html)
self.assertNotIn('<h2>Traceback ', html)
self.assertIn('<h2>Request information</h2>', html)
self.assertNotIn('<p>Request data not supplied</p>', html)
def test_message_only(self):
reporter = ExceptionReporter(None, None, "I'm a little teapot", None)
html = reporter.get_traceback_html()
self.assertIn('<h1>Report</h1>', html)
self.assertIn('<pre class="exception_value">I'm a little teapot</pre>', html)
self.assertNotIn('<th>Request Method:</th>', html)
self.assertNotIn('<th>Request URL:</th>', html)
self.assertNotIn('<th>Exception Type:</th>', html)
self.assertNotIn('<th>Exception Value:</th>', html)
self.assertNotIn('<h2>Traceback ', html)
self.assertIn('<h2>Request information</h2>', html)
self.assertIn('<p>Request data not supplied</p>', html)
def test_non_utf8_values_handling(self):
"Non-UTF-8 exceptions/values should not make the output generation choke."
try:
class NonUtf8Output(Exception):
def __repr__(self):
return b'EXC\xe9EXC'
somevar = b'VAL\xe9VAL' # NOQA
raise NonUtf8Output()
except Exception:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(None, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertIn('VAL\\xe9VAL', html)
self.assertIn('EXC\\xe9EXC', html)
def test_unprintable_values_handling(self):
"Unprintable values should not make the output generation choke."
try:
class OomOutput(object):
def __repr__(self):
raise MemoryError('OOM')
oomvalue = OomOutput() # NOQA
raise ValueError()
except Exception:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(None, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertIn('<td class="code"><pre>Error in formatting', html)
def test_too_large_values_handling(self):
"Large values should not create a large HTML."
large = 256 * 1024
repr_of_str_adds = len(repr(''))
try:
class LargeOutput(object):
def __repr__(self):
return repr('A' * large)
largevalue = LargeOutput() # NOQA
raise ValueError()
except Exception:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(None, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertEqual(len(html) // 1024 // 128, 0) # still fit in 128Kb
self.assertIn('<trimmed %d bytes string>' % (large + repr_of_str_adds,), html)
@skipIf(six.PY2, 'Bug manifests on PY3 only')
def test_unfrozen_importlib(self):
"""
importlib is not a frozen app, but its loader thinks it's frozen which
results in an ImportError on Python 3. Refs #21443.
"""
try:
request = self.rf.get('/test_view/')
importlib.import_module('abc.def.invalid.name')
except Exception:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertIn('<h1>ImportError at /test_view/</h1>', html)
class PlainTextReportTests(TestCase):
rf = RequestFactory()
def test_request_and_exception(self):
"A simple exception report can be generated"
try:
request = self.rf.get('/test_view/')
raise ValueError("Can't find my keys")
except ValueError:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
text = reporter.get_traceback_text()
self.assertIn('ValueError at /test_view/', text)
self.assertIn("Can't find my keys", text)
self.assertIn('Request Method:', text)
self.assertIn('Request URL:', text)
self.assertIn('Exception Type:', text)
self.assertIn('Exception Value:', text)
self.assertIn('Traceback:', text)
self.assertIn('Request information:', text)
self.assertNotIn('Request data not supplied', text)
def test_no_request(self):
"An exception report can be generated without request"
try:
raise ValueError("Can't find my keys")
except ValueError:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(None, exc_type, exc_value, tb)
text = reporter.get_traceback_text()
self.assertIn('ValueError', text)
self.assertIn("Can't find my keys", text)
self.assertNotIn('Request Method:', text)
self.assertNotIn('Request URL:', text)
self.assertIn('Exception Type:', text)
self.assertIn('Exception Value:', text)
self.assertIn('Traceback:', text)
self.assertIn('Request data not supplied', text)
def test_no_exception(self):
"An exception report can be generated for just a request"
request = self.rf.get('/test_view/')
reporter = ExceptionReporter(request, None, None, None)
reporter.get_traceback_text()
def test_request_and_message(self):
"A message can be provided in addition to a request"
request = self.rf.get('/test_view/')
reporter = ExceptionReporter(request, None, "I'm a little teapot", None)
reporter.get_traceback_text()
def test_message_only(self):
reporter = ExceptionReporter(None, None, "I'm a little teapot", None)
reporter.get_traceback_text()
class ExceptionReportTestMixin(object):
# Mixin used in the ExceptionReporterFilterTests and
# AjaxResponseExceptionReporterFilter tests below
breakfast_data = {'sausage-key': 'sausage-value',
'baked-beans-key': 'baked-beans-value',
'hash-brown-key': 'hash-brown-value',
'bacon-key': 'bacon-value'}
def verify_unsafe_response(self, view, check_for_vars=True,
check_for_POST_params=True):
"""
Asserts that potentially sensitive info are displayed in the response.
"""
request = self.rf.post('/some_url/', self.breakfast_data)
response = view(request)
if check_for_vars:
# All variables are shown.
self.assertContains(response, 'cooked_eggs', status_code=500)
self.assertContains(response, 'scrambled', status_code=500)
self.assertContains(response, 'sauce', status_code=500)
self.assertContains(response, 'worcestershire', status_code=500)
if check_for_POST_params:
for k, v in self.breakfast_data.items():
# All POST parameters are shown.
self.assertContains(response, k, status_code=500)
self.assertContains(response, v, status_code=500)
def verify_safe_response(self, view, check_for_vars=True,
check_for_POST_params=True):
"""
Asserts that certain sensitive info are not displayed in the response.
"""
request = self.rf.post('/some_url/', self.breakfast_data)
response = view(request)
if check_for_vars:
# Non-sensitive variable's name and value are shown.
self.assertContains(response, 'cooked_eggs', status_code=500)
self.assertContains(response, 'scrambled', status_code=500)
# Sensitive variable's name is shown but not its value.
self.assertContains(response, 'sauce', status_code=500)
self.assertNotContains(response, 'worcestershire', status_code=500)
if check_for_POST_params:
for k, v in self.breakfast_data.items():
# All POST parameters' names are shown.
self.assertContains(response, k, status_code=500)
# Non-sensitive POST parameters' values are shown.
self.assertContains(response, 'baked-beans-value', status_code=500)
self.assertContains(response, 'hash-brown-value', status_code=500)
# Sensitive POST parameters' values are not shown.
self.assertNotContains(response, 'sausage-value', status_code=500)
self.assertNotContains(response, 'bacon-value', status_code=500)
def verify_paranoid_response(self, view, check_for_vars=True,
check_for_POST_params=True):
"""
Asserts that no variables or POST parameters are displayed in the response.
"""
request = self.rf.post('/some_url/', self.breakfast_data)
response = view(request)
if check_for_vars:
# Show variable names but not their values.
self.assertContains(response, 'cooked_eggs', status_code=500)
self.assertNotContains(response, 'scrambled', status_code=500)
self.assertContains(response, 'sauce', status_code=500)
self.assertNotContains(response, 'worcestershire', status_code=500)
if check_for_POST_params:
for k, v in self.breakfast_data.items():
# All POST parameters' names are shown.
self.assertContains(response, k, status_code=500)
# No POST parameters' values are shown.
self.assertNotContains(response, v, status_code=500)
def verify_unsafe_email(self, view, check_for_POST_params=True):
"""
Asserts that potentially sensitive info are displayed in the email report.
"""
with self.settings(ADMINS=[('Admin', 'admin@fattie-breakie.com')]):
mail.outbox = [] # Empty outbox
request = self.rf.post('/some_url/', self.breakfast_data)
view(request)
self.assertEqual(len(mail.outbox), 1)
email = mail.outbox[0]
# Frames vars are never shown in plain text email reports.
body_plain = force_text(email.body)
self.assertNotIn('cooked_eggs', body_plain)
self.assertNotIn('scrambled', body_plain)
self.assertNotIn('sauce', body_plain)
self.assertNotIn('worcestershire', body_plain)
# Frames vars are shown in html email reports.
body_html = force_text(email.alternatives[0][0])
self.assertIn('cooked_eggs', body_html)
self.assertIn('scrambled', body_html)
self.assertIn('sauce', body_html)
self.assertIn('worcestershire', body_html)
if check_for_POST_params:
for k, v in self.breakfast_data.items():
# All POST parameters are shown.
self.assertIn(k, body_plain)
self.assertIn(v, body_plain)
self.assertIn(k, body_html)
self.assertIn(v, body_html)
def verify_safe_email(self, view, check_for_POST_params=True):
"""
Asserts that certain sensitive info are not displayed in the email report.
"""
with self.settings(ADMINS=[('Admin', 'admin@fattie-breakie.com')]):
mail.outbox = [] # Empty outbox
request = self.rf.post('/some_url/', self.breakfast_data)
view(request)
self.assertEqual(len(mail.outbox), 1)
email = mail.outbox[0]
# Frames vars are never shown in plain text email reports.
body_plain = force_text(email.body)
self.assertNotIn('cooked_eggs', body_plain)
self.assertNotIn('scrambled', body_plain)
self.assertNotIn('sauce', body_plain)
self.assertNotIn('worcestershire', body_plain)
# Frames vars are shown in html email reports.
body_html = force_text(email.alternatives[0][0])
self.assertIn('cooked_eggs', body_html)
self.assertIn('scrambled', body_html)
self.assertIn('sauce', body_html)
self.assertNotIn('worcestershire', body_html)
if check_for_POST_params:
for k, v in self.breakfast_data.items():
# All POST parameters' names are shown.
self.assertIn(k, body_plain)
# Non-sensitive POST parameters' values are shown.
self.assertIn('baked-beans-value', body_plain)
self.assertIn('hash-brown-value', body_plain)
self.assertIn('baked-beans-value', body_html)
self.assertIn('hash-brown-value', body_html)
# Sensitive POST parameters' values are not shown.
self.assertNotIn('sausage-value', body_plain)
self.assertNotIn('bacon-value', body_plain)
self.assertNotIn('sausage-value', body_html)
self.assertNotIn('bacon-value', body_html)
def verify_paranoid_email(self, view):
"""
Asserts that no variables or POST parameters are displayed in the email report.
"""
with self.settings(ADMINS=[('Admin', 'admin@fattie-breakie.com')]):
mail.outbox = [] # Empty outbox
request = self.rf.post('/some_url/', self.breakfast_data)
view(request)
self.assertEqual(len(mail.outbox), 1)
email = mail.outbox[0]
# Frames vars are never shown in plain text email reports.
body = force_text(email.body)
self.assertNotIn('cooked_eggs', body)
self.assertNotIn('scrambled', body)
self.assertNotIn('sauce', body)
self.assertNotIn('worcestershire', body)
for k, v in self.breakfast_data.items():
# All POST parameters' names are shown.
self.assertIn(k, body)
# No POST parameters' values are shown.
self.assertNotIn(v, body)
@override_settings(ROOT_URLCONF='view_tests.urls')
class ExceptionReporterFilterTests(TestCase, ExceptionReportTestMixin):
"""
Ensure that sensitive information can be filtered out of error reports.
Refs #14614.
"""
rf = RequestFactory()
def test_non_sensitive_request(self):
"""
Ensure that everything (request info and frame variables) can bee seen
in the default error reports for non-sensitive requests.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(non_sensitive_view)
self.verify_unsafe_email(non_sensitive_view)
with self.settings(DEBUG=False):
self.verify_unsafe_response(non_sensitive_view)
self.verify_unsafe_email(non_sensitive_view)
def test_sensitive_request(self):
"""
Ensure that sensitive POST parameters and frame variables cannot be
seen in the default error reports for sensitive requests.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(sensitive_view)
self.verify_unsafe_email(sensitive_view)
with self.settings(DEBUG=False):
self.verify_safe_response(sensitive_view)
self.verify_safe_email(sensitive_view)
def test_paranoid_request(self):
"""
Ensure that no POST parameters and frame variables can be seen in the
default error reports for "paranoid" requests.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(paranoid_view)
self.verify_unsafe_email(paranoid_view)
with self.settings(DEBUG=False):
self.verify_paranoid_response(paranoid_view)
self.verify_paranoid_email(paranoid_view)
def test_multivalue_dict_key_error(self):
"""
#21098 -- Ensure that sensitive POST parameters cannot be seen in the
error reports for if request.POST['nonexistent_key'] throws an error.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(multivalue_dict_key_error)
self.verify_unsafe_email(multivalue_dict_key_error)
with self.settings(DEBUG=False):
self.verify_safe_response(multivalue_dict_key_error)
self.verify_safe_email(multivalue_dict_key_error)
def test_custom_exception_reporter_filter(self):
"""
Ensure that it's possible to assign an exception reporter filter to
the request to bypass the one set in DEFAULT_EXCEPTION_REPORTER_FILTER.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(custom_exception_reporter_filter_view)
self.verify_unsafe_email(custom_exception_reporter_filter_view)
with self.settings(DEBUG=False):
self.verify_unsafe_response(custom_exception_reporter_filter_view)
self.verify_unsafe_email(custom_exception_reporter_filter_view)
def test_sensitive_method(self):
"""
Ensure that the sensitive_variables decorator works with object
methods.
Refs #18379.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(sensitive_method_view,
check_for_POST_params=False)
self.verify_unsafe_email(sensitive_method_view,
check_for_POST_params=False)
with self.settings(DEBUG=False):
self.verify_safe_response(sensitive_method_view,
check_for_POST_params=False)
self.verify_safe_email(sensitive_method_view,
check_for_POST_params=False)
def test_sensitive_function_arguments(self):
"""
Ensure that sensitive variables don't leak in the sensitive_variables
decorator's frame, when those variables are passed as arguments to the
decorated function.
Refs #19453.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(sensitive_args_function_caller)
self.verify_unsafe_email(sensitive_args_function_caller)
with self.settings(DEBUG=False):
self.verify_safe_response(sensitive_args_function_caller, check_for_POST_params=False)
self.verify_safe_email(sensitive_args_function_caller, check_for_POST_params=False)
def test_sensitive_function_keyword_arguments(self):
"""
Ensure that sensitive variables don't leak in the sensitive_variables
decorator's frame, when those variables are passed as keyword arguments
to the decorated function.
Refs #19453.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(sensitive_kwargs_function_caller)
self.verify_unsafe_email(sensitive_kwargs_function_caller)
with self.settings(DEBUG=False):
self.verify_safe_response(sensitive_kwargs_function_caller, check_for_POST_params=False)
self.verify_safe_email(sensitive_kwargs_function_caller, check_for_POST_params=False)
def test_callable_settings(self):
"""
Callable settings should not be evaluated in the debug page (#21345).
"""
def callable_setting():
return "This should not be displayed"
with self.settings(DEBUG=True, FOOBAR=callable_setting):
response = self.client.get('/raises500/')
self.assertNotContains(response, "This should not be displayed", status_code=500)
def test_callable_settings_forbidding_to_set_attributes(self):
"""
Callable settings which forbid to set attributes should not break
the debug page (#23070).
"""
class CallableSettingWithSlots(object):
__slots__ = []
def __call__(self):
return "This should not be displayed"
with self.settings(DEBUG=True, WITH_SLOTS=CallableSettingWithSlots()):
response = self.client.get('/raises500/')
self.assertNotContains(response, "This should not be displayed", status_code=500)
def test_dict_setting_with_non_str_key(self):
"""
A dict setting containing a non-string key should not break the
debug page (#12744).
"""
with self.settings(DEBUG=True, FOOBAR={42: None}):
response = self.client.get('/raises500/')
self.assertContains(response, 'FOOBAR', status_code=500)
def test_sensitive_settings(self):
"""
The debug page should not show some sensitive settings
(password, secret key, ...).
"""
sensitive_settings = [
'SECRET_KEY',
'PASSWORD',
'API_KEY',
'AUTH_TOKEN',
]
for setting in sensitive_settings:
with self.settings(DEBUG=True, **{setting: "should not be displayed"}):
response = self.client.get('/raises500/')
self.assertNotContains(response, 'should not be displayed', status_code=500)
def test_settings_with_sensitive_keys(self):
"""
The debug page should filter out some sensitive information found in
dict settings.
"""
sensitive_settings = [
'SECRET_KEY',
'PASSWORD',
'API_KEY',
'AUTH_TOKEN',
]
for setting in sensitive_settings:
FOOBAR = {
setting: "should not be displayed",
'recursive': {setting: "should not be displayed"},
}
with self.settings(DEBUG=True, FOOBAR=FOOBAR):
response = self.client.get('/raises500/')
self.assertNotContains(response, 'should not be displayed', status_code=500)
class AjaxResponseExceptionReporterFilter(TestCase, ExceptionReportTestMixin):
"""
Ensure that sensitive information can be filtered out of error reports.
Here we specifically test the plain text 500 debug-only error page served
when it has been detected the request was sent by JS code. We don't check
for (non)existence of frames vars in the traceback information section of
the response content because we don't include them in these error pages.
Refs #14614.
"""
rf = RequestFactory(HTTP_X_REQUESTED_WITH='XMLHttpRequest')
def test_non_sensitive_request(self):
"""
Ensure that request info can bee seen in the default error reports for
non-sensitive requests.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(non_sensitive_view, check_for_vars=False)
with self.settings(DEBUG=False):
self.verify_unsafe_response(non_sensitive_view, check_for_vars=False)
def test_sensitive_request(self):
"""
Ensure that sensitive POST parameters cannot be seen in the default
error reports for sensitive requests.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(sensitive_view, check_for_vars=False)
with self.settings(DEBUG=False):
self.verify_safe_response(sensitive_view, check_for_vars=False)
def test_paranoid_request(self):
"""
Ensure that no POST parameters can be seen in the default error reports
for "paranoid" requests.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(paranoid_view, check_for_vars=False)
with self.settings(DEBUG=False):
self.verify_paranoid_response(paranoid_view, check_for_vars=False)
def test_custom_exception_reporter_filter(self):
"""
Ensure that it's possible to assign an exception reporter filter to
the request to bypass the one set in DEFAULT_EXCEPTION_REPORTER_FILTER.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(custom_exception_reporter_filter_view,
check_for_vars=False)
with self.settings(DEBUG=False):
self.verify_unsafe_response(custom_exception_reporter_filter_view,
check_for_vars=False)
|
|
"""Unit tests for reviewbot.tools.clang."""
from __future__ import unicode_literals
import os
try:
# Python 3.x
from plistlib import writePlist as dump_plist
except ImportError:
# Python 2.7
from plistlib import dump as dump_plist
import six
from reviewbot.tools.clang import ClangTool
from reviewbot.tools.testing import (BaseToolTestCase,
ToolTestCaseMetaclass,
integration_test,
simulation_test)
from reviewbot.utils.filesystem import tmpfiles
from reviewbot.utils.process import execute
@six.add_metaclass(ToolTestCaseMetaclass)
class ClangToolTests(BaseToolTestCase):
"""Unit tests for reviewbot.tools.clang.ClangTool."""
tool_class = ClangTool
tool_exe_config_key = 'clang'
tool_exe_path = '/path/to/clang'
@integration_test()
@simulation_test(plist_data={
'files': ['test.c'],
'diagnostics': [
{
'description': (
'Called function pointer is null (null '
'dereference)'
),
'location': {
'col': 5,
'file': 0,
'line': 5,
},
},
{
'description': (
"Value stored to 'i' during its "
"initialization is never read"
),
'location': {
'col': 9,
'file': 0,
'line': 7,
},
'path': [
{
'kind': 'event',
'ranges': [[
{
'col': 9,
'file': 0,
'line': 7,
},
{
'col': 9,
'file': 0,
'line': 8,
},
]],
},
],
},
],
})
def test_execute_with_c(self):
"""Testing ClangTool.execute with C file"""
review, review_file = self.run_tool_execute(
filename='test.c',
file_contents=(
b'int main()\n'
b'{\n'
b' void (*foo)(void);\n'
b' foo = 0;\n'
b' foo();\n'
b'\n'
b' int i = (1 /\n'
b' 0);\n'
b'}\n'
))
self.assertEqual(review.comments, [
{
'filediff_id': review_file.id,
'first_line': 5,
'num_lines': 1,
'text': (
'Called function pointer is null (null dereference)\n'
'\n'
'Column: 5'
),
'issue_opened': True,
'rich_text': False,
},
{
'filediff_id': review_file.id,
'first_line': 7,
'num_lines': 2,
'text': (
"Value stored to 'i' during its initialization is "
"never read\n"
"\n"
"Column: 9"
),
'issue_opened': True,
'rich_text': False,
},
])
self.assertSpyCalledWith(
execute,
[
self.tool_exe_path,
'-S',
'--analyze',
'-Xanalyzer',
'-analyzer-output=plist',
'test.c',
'-o',
tmpfiles[-1],
],
ignore_errors=True)
@integration_test()
@simulation_test(plist_data={
'files': ['test.m'],
'diagnostics': [
{
'description': (
"Value stored to 'i' during its initialization "
"is never read"
),
'location': {
'col': 13,
'file': 0,
'line': 4,
},
},
{
'description': 'Division by zero',
'location': {
'col': 19,
'file': 0,
'line': 4,
},
'path': [
{
'kind': 'event',
'ranges': [[
{
'col': 17,
'file': 0,
'line': 4,
},
{
'col': 21,
'file': 0,
'line': 4,
},
]],
},
],
},
],
})
def test_execute_with_objc(self):
"""Testing ClangTool.execute with ObjC file"""
review, review_file = self.run_tool_execute(
filename='test.m',
file_contents=(
b'int main()\n'
b'{\n'
b' @autoreleasepool {\n'
b' int i = 1 / 0;\n'
b' }\n'
b'\n'
b' return 0;\n'
b'}\n'
))
self.assertEqual(review.comments, [
{
'filediff_id': review_file.id,
'first_line': 4,
'num_lines': 1,
'text': (
"Value stored to 'i' during its initialization is "
"never read\n"
"\n"
"Column: 13"
),
'issue_opened': True,
'rich_text': False,
},
{
'filediff_id': review_file.id,
'first_line': 4,
'num_lines': 1,
'text': (
'Division by zero\n'
'\n'
'Column: 17'
),
'issue_opened': True,
'rich_text': False,
},
])
self.assertSpyCalledWith(
execute,
[
self.tool_exe_path,
'-S',
'--analyze',
'-Xanalyzer',
'-analyzer-output=plist',
'-ObjC',
'test.m',
'-o',
tmpfiles[-1],
],
ignore_errors=True)
@integration_test()
@simulation_test(output=(
"test.m:3:6: error: use of undeclared identifier 'badcode'\n"
" [badcode]\n"
" ^\n"
"1 error generated.\n"
))
def test_execute_with_objc_and_compiler_error(self):
"""Testing ClangTool.execute with ObjC file and compiler error"""
review, review_file = self.run_tool_execute(
filename='test.m',
file_contents=(
b'int main()\n'
b'{\n'
b' [badcode]\n'
b'\n'
b' return 0;\n'
b'}\n'
))
self.assertEqual(review.comments, [
{
'filediff_id': review_file.id,
'first_line': 1,
'num_lines': 1,
'text': (
"Clang could not analyze this file, due to the "
"following errors:\n"
"\n"
"```\n"
"test.m:3:6: error: use of undeclared identifier "
"'badcode'\n"
" [badcode]\n"
" ^\n"
"1 error generated.\n"
"```"
),
'issue_opened': True,
'rich_text': True,
},
])
self.assertSpyCalledWith(
execute,
[
self.tool_exe_path,
'-S',
'--analyze',
'-Xanalyzer',
'-analyzer-output=plist',
'-ObjC',
'test.m',
'-o',
tmpfiles[-1],
],
ignore_errors=True)
@integration_test()
@simulation_test(plist_data={
'files': ['test.mm'],
'diagnostics': [
{
'description': (
"Value stored to 'i' during its initialization "
"is never read"
),
'location': {
'col': 13,
'file': 0,
'line': 6,
},
},
{
'description': 'Division by zero',
'location': {
'col': 19,
'file': 0,
'line': 6,
},
'path': [
{
'kind': 'event',
'ranges': [[
{
'col': 17,
'file': 0,
'line': 6,
},
{
'col': 21,
'file': 0,
'line': 6,
},
]],
},
],
},
],
})
def test_execute_with_objcpp(self):
"""Testing ClangTool.execute with ObjC++ file"""
review, review_file = self.run_tool_execute(
filename='test.mm',
file_contents=(
b'class Foo {};\n'
b'\n'
b'int main()\n'
b'{\n'
b' @autoreleasepool {\n'
b' int i = 1 / 0;\n'
b' }\n'
b'\n'
b' return 0;\n'
b'}\n'
))
self.assertEqual(review.comments, [
{
'filediff_id': review_file.id,
'first_line': 6,
'num_lines': 1,
'text': (
"Value stored to 'i' during its initialization is "
"never read\n"
"\n"
"Column: 13"
),
'issue_opened': True,
'rich_text': False,
},
{
'filediff_id': review_file.id,
'first_line': 6,
'num_lines': 1,
'text': (
'Division by zero\n'
'\n'
'Column: 17'
),
'issue_opened': True,
'rich_text': False,
},
])
self.assertSpyCalledWith(
execute,
[
self.tool_exe_path,
'-S',
'--analyze',
'-Xanalyzer',
'-analyzer-output=plist',
'-ObjC++',
'test.mm',
'-o',
tmpfiles[-1],
],
ignore_errors=True)
@integration_test()
@simulation_test(output=(
"test.mm:5:6: error: use of undeclared identifier 'badcode'\n"
" [badcode]\n"
" ^\n"
"1 error generated.\n"
))
def test_execute_with_objcpp_and_compiler_error(self):
"""Testing ClangTool.execute with ObjC++ file and compiler error"""
review, review_file = self.run_tool_execute(
filename='test.mm',
file_contents=(
b'class Foo {};\n'
b'\n'
b'int main()\n'
b'{\n'
b' [badcode]\n'
b'\n'
b' return 0;\n'
b'}\n'
))
self.assertEqual(review.comments, [
{
'filediff_id': review_file.id,
'first_line': 1,
'num_lines': 1,
'text': (
"Clang could not analyze this file, due to the "
"following errors:\n"
"\n"
"```\n"
"test.mm:5:6: error: use of undeclared identifier "
"'badcode'\n"
" [badcode]\n"
" ^\n"
"1 error generated.\n"
"```"
),
'issue_opened': True,
'rich_text': True,
},
])
self.assertSpyCalledWith(
execute,
[
self.tool_exe_path,
'-S',
'--analyze',
'-Xanalyzer',
'-analyzer-output=plist',
'-ObjC++',
'test.mm',
'-o',
tmpfiles[-1],
],
ignore_errors=True)
@integration_test()
@simulation_test(plist_data={
'files': ['test.c'],
'diagnostics': [
{
'description': (
'Called function pointer is null (null '
'dereference)'
),
'location': {
'col': 5,
'line': 5,
'file': 0,
},
},
{
'description': (
"Value stored to 'i' during its initialization is "
"never read"
),
'location': {
'col': 13,
'line': 7,
'file': 0,
},
'path': [
{
'kind': 'event',
'ranges': [
[
{
'col': 9,
'file': 0,
'line': 7,
},
{
'col': 9,
'file': 0,
'line': 7,
},
],
[
{
'col': 13,
'file': 0,
'line': 7,
},
{
'col': 15,
'file': 0,
'line': 8,
},
],
],
},
],
},
],
})
def test_execute_with_cmdline_args(self):
"""Testing ClangTool.execute with cmdline_args setting"""
review, review_file = self.run_tool_execute(
filename='test.c',
file_contents=(
b'int main()\n'
b'{\n'
b' void (*foo)(void);\n'
b' foo = 0;\n'
b' foo();\n'
b'\n'
b' int i = (1 /\n'
b' 0);\n'
b'}\n'
),
tool_settings={
'cmdline_args': '-I/inc -W123',
})
self.assertEqual(review.comments, [
{
'filediff_id': review_file.id,
'first_line': 5,
'num_lines': 1,
'text': (
'Called function pointer is null (null dereference)\n'
'\n'
'Column: 5'
),
'issue_opened': True,
'rich_text': False,
},
{
'filediff_id': review_file.id,
'first_line': 7,
'num_lines': 2,
'text': (
"Value stored to 'i' during its initialization is never "
"read\n"
"\n"
"Column: 9"
),
'issue_opened': True,
'rich_text': False,
},
])
self.assertSpyCalledWith(
execute,
[
self.tool_exe_path,
'-S',
'--analyze',
'-Xanalyzer',
'-analyzer-output=plist',
'-I/inc',
'-W123',
'test.c',
'-o',
tmpfiles[-1],
],
ignore_errors=True)
def setup_simulation_test(self, plist_data=None, output=None):
"""Set up the simulation test for Clang.
This will spy on :py:func:`~reviewbot.utils.process.execute`, making
it write a plist file, if data is provided, or delete it if simulating
a compiler error.
Args:
plist_data (dict, optional):
The simulated plist data, if simulating a successful run.
output (unicode, optional):
The resulting compiler output, if simulating a compiler error.
"""
@self.spy_for(execute)
def _execute(cmdline, **kwargs):
filename = cmdline[-1]
if plist_data:
with open(cmdline[-1], 'wb') as fp:
dump_plist(plist_data, fp)
else:
# clang will delete the output file if there's a compiler
# error.
os.unlink(filename)
return output
|
|
import os, subprocess
import lxml
import json
import pysam
from django.contrib.auth.models import User
from django.http import HttpRequest
from django.test import TestCase
from django.test.client import Client
from tastypie.exceptions import NotRegistered, BadRequest
from core.models import BedEntry, QTLEntry, SNPEntry
from biodas import DAS, DasModelResource, DasResource
class BedResource(DasModelResource):
class Meta:
resource_name = 'bed'
queryset = BedEntry.objects.all()
class QTLResource(DasModelResource):
class Meta:
version = 36
resource_name = 'qtl'
queryset = QTLEntry.objects.all()
class SNPResource(DasModelResource):
class Meta:
version = 37
resource_name = 'snps'
queryset = SNPEntry.objects.all()
excludes = ['chrom', 'CHROM', 'region', 'KENT_BIN', 'kent_bin']
method = "NextGenSeq"
class FileBedResource(DasResource):
""" An example of a BED file used as a resource.
NOTE: This is not recommended for use for large bed files.
"""
filename = os.path.join(os.path.dirname(__file__), 'test.bed')
class Meta:
resource_name = 'testbed'
filename = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'fixtures/test.bed')
class FileBamResource(DasResource):
""" An example of a BAM file used as a resource
"""
class Meta:
resource_name = 'testbam'
filename = os.path.join(os.path.dirname(os.path.dirname(__file__)),
'fixtures/AKR_brain_test.bam')
class FileBamJsonResource(DasResource):
""" An example of a BAM file used as a resource
"""
class Meta:
resource_name = 'testjsonbam'
json = True
filename = os.path.join(os.path.dirname(os.path.dirname(__file__)),
'fixtures/AKR_brain_test.bam')
class ApiTestCase(TestCase):
urls = 'core.tests.api_urls'
def test_register(self):
""" Test basic registration of sources with the DAS server
"""
api = DAS()
self.assertEqual(len(api._registry), 0)
api.register(BedResource())
self.assertEqual(len(api._registry), 1)
api.register(QTLResource())
self.assertEqual(len(api._registry), 2)
api.register(SNPResource())
self.assertEqual(len(api._registry), 3)
def test_top_level(self):
api = DAS()
api.register(BedResource())
api.register(QTLResource())
request = HttpRequest()
resp = api.top_level(request)
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp['X-DAS-Version'], 'DAS/1.6')
class DasModelCalls(TestCase):
""" Test actual get responses from django models
"""
urls = 'core.tests.api_urls'
def setUp(self):
self.qtl = QTLEntry(chrom = 1, start = 27000, end = 29000,
gene="outside_interval", strand = True,
score = 20)
self.qtl.save()
self.qtl = QTLEntry(chrom = 1, start = 2000, end = 2600,
gene="within_interval", strand = True,
score = 50)
self.qtl.save()
self.qtl = QTLEntry(chrom = 2, start = 2000, end = 2600,
gene="chr2_test", strand = True,
score = 60)
self.qtl.save()
self.bed = BedEntry(chrom = 1, start = 2000, end = 2600,
gene="Testgene", strand = True)
self.bed.save()
self.snp = SNPEntry(chrom = 7, start=116182054, end=116182054,
rsID="rs959173", kent_bin=1471, counts=30)
self.snp.save()
self.snp = SNPEntry(chrom = 7, start=116182057, end=116182057,
rsID="rs959173", kent_bin=1471, counts=70)
self.snp.save()
def test_top_level(self):
""" Test top level discovery query.
"""
resp = self.client.get('/api/das/sources/')
self.assertEqual(resp.status_code, 200)
root = lxml.etree.fromstring(resp.content)
self.assertEqual(root.tag, 'SOURCES')
# Check queries
resp = self.client.get('/api/das/sources?version=36')
root = lxml.etree.fromstring(resp.content)
self.assertEqual(len(root), 1)
#resp = self.client.get('/api/das/sources?version=36?capabilit=1.5')
def test_resource_top_level(self):
""" Test the top level for the resources
"""
resp = self.client.get('/api/das/bed/')
root = lxml.etree.fromstring(resp.content)
self.assertEqual(len(root), 1)
resp = self.client.get('/api/das/qtl/')
self.assertEqual(len(root), 1)
def test_feature_biodas_basics(self):
""" Test for some of the top level required fields
"""
resp = self.client.get('/api/das/qtl/features?segment=1:100,20000')
dasgff = lxml.etree.fromstring(resp.content)
type_gff = dasgff.xpath("//GFF")
self.assertEqual(len(type_gff), 1)
type_segment = dasgff.xpath("//GFF/SEGMENT")
for i in type_segment:
self.assertEqual(i.get('id'), '1')
def test_method_is_added(self):
""" Make sure that the method and type are added correctly
This is a required fieeld
"""
resp = self.client.get('/api/das/qtl/features?segment=1:100,20000')
dasgff = lxml.etree.fromstring(resp.content)
type_ele = dasgff.xpath("//GFF/SEGMENT/FEATURE/METHOD")
self.assertGreater(len(type_ele), 0)
type_feat = dasgff.xpath("//GFF/SEGMENT/FEATURE")
self.assertEqual(len(type_feat), len(type_ele))
def test_type_is_added(self):
""" Make sure type is added correctly
This is a required field
"""
resp = self.client.get('/api/das/snps/features?segment=7:116182053,116182059')
dasgff = lxml.etree.fromstring(resp.content)
type_ele = dasgff.xpath("//GFF/SEGMENT/FEATURE/TYPE")
self.assertGreater(len(type_ele), 0)
type_feat = dasgff.xpath("//GFF/SEGMENT/FEATURE")
self.assertEqual(len(type_feat), len(type_ele))
def test_resource_queries(self):
""" Test segment queries on the resources
:TODO fix this.
"""
resp = self.client.get('/api/das/qtl/features?segment=1:100,20000')
dasgff = lxml.etree.fromstring(resp.content)
#self.assertEqual(dasgff[0][0][0].get('label'), 'within_interval')
def test_whole_segment_query(self):
""" Test that a whole segment query returns the whole segment
:TODO need to handle throttling
"""
resp = self.client.get('/api/das/qtl/features?segment=1')
segments = lxml.etree.fromstring(resp.content)[0][0]
self.assertEqual(len(segments), 2)
resp = self.client.get('/api/das/bed/features?segment=1')
segments = lxml.etree.fromstring(resp.content)[0][0]
self.assertEqual(len(segments), 1)
def test_arbitrary_fields(self):
""" Test the return of arbitrary fields from the model and test
exclusion
"""
resp =\
self.client.get('/api/das/snps/features?segment=7:116182053,116182059')
self.assertIn('START', resp.content)
self.assertIn('COUNTS', resp.content)
self.assertNotIn('CHROM', resp.content)
def test_json_response(self):
""" Test the return of arbitrary fields from the model and test
exclusion
"""
resp =\
self.client.get('/api/das/snps/features?segment=7:116182053,116182059',
HTTP_ACCEPT='application/xml')
self.assertIn('START', resp.content)
self.assertIn('COUNTS', resp.content)
self.assertNotIn('CHROM', resp.content)
def test_kent_binning(self):
""" Tests Kent Binning
:TODO this needs work
"""
self.assertEqual(1, 1)
class DasFileSourcesTest(TestCase):
def setUp(self):
self.fh = pysam.Samfile(os.path.join(os.path.dirname(os.path.dirname(__file__)),
'fixtures/AKR_brain_test.bam'))
def test_resource_top_level(self):
""" Test the top level response for a file resource
"""
resp = self.client.get('/api/das/testbed/')
root = lxml.etree.fromstring(resp.content)
self.assertEqual(len(root), 1)
# Add more checks to this
# BAM file check
resp = self.client.get('/api/das/testbam/')
root = lxml.etree.fromstring(resp.content)
self.assertEqual(len(root), 1)
def test_feature_queries(self):
""" Test region:start, end query on various file sources.
"""
resp = self.client.get('/api/das/testbed/features?segment=chr1:60,200')
segments = lxml.etree.fromstring(resp.content)[0][0]
self.assertEqual(len(segments), 2)
def test_whole_segment_query(self):
""" Test that a whole segment query returns the whole segment
"""
resp = self.client.get('/api/das/testbed/features?segment=chr1')
segments = lxml.etree.fromstring(resp.content)[0][0]
self.assertEqual(len(segments), 3)
def test_bam_feature_queries(self):
""" Test BAM feature queries
"""
resp =\
self.client.get(
'/api/das/testbam/features?segment=chr7:3299628,3300000')
segments = lxml.etree.fromstring(resp.content)[0][0]
counter = 0
reads = self.fh.fetch('chr7', 3299628, 3300000)
for i in reads:
counter += 1
self.assertEqual(len(segments), counter)
def test_stylesheet(self):
resp = self.client.get('/api/das/testbam/stylesheet/')
categories = lxml.etree.fromstring(resp.content)[0][0]
self.assertGreaterEqual(len(categories),1)
def test_json_feature_queries(self):
""" Test json feature queries
"""
resp =\
self.client.get(
'/api/das/testjsonbam/features?segment=chr7:3299628,3300000')
self.assertEqual(len(json.loads(resp.content)), 3)
|
|
"""This module contains the general information for OsiStart ManagedObject."""
from ...imcmo import ManagedObject
from ...imccoremeta import MoPropertyMeta, MoMeta
from ...imcmeta import VersionMeta
class OsiStartConsts:
ADMIN_STATE_TRIGGER = "trigger"
ADMIN_STATE_TRIGGERED = "triggered"
ANSWER_FILE_SHARE_TYPE_SCP = "scp"
ANSWER_FILE_SHARE_TYPE_SFTP = "sftp"
ANSWER_FILE_SHARE_TYPE_WWW = "www"
CONFIG_SHARE_TYPE_HTTP = "http"
CONFIG_SHARE_TYPE_SCP = "scp"
CONFIG_SHARE_TYPE_SFTP = "sftp"
CONFIG_SHARE_TYPE_TFTP = "tftp"
ISO_SHARE_TYPE_CIFS = "cifs"
ISO_SHARE_TYPE_NFS = "nfs"
ISO_SHARE_TYPE_SD = "sd"
ISO_SHARE_TYPE_WWW = "www"
REMOTE_SHARE_TYPE_SCP = "scp"
REMOTE_SHARE_TYPE_SFTP = "sftp"
REMOTE_SHARE_TYPE_TFTP = "tftp"
CONFIG_SHARE_TYPE_WWW = "www"
class OsiStart(ManagedObject):
"""This is OsiStart class."""
consts = OsiStartConsts()
naming_props = set([])
mo_meta = {
"classic": MoMeta("OsiStart", "osiStart", "osiStart", VersionMeta.Version301c, "InputOutput", 0x1fffffff, [], ["admin"], [u'osiController'], [], ["Get"]),
"modular": MoMeta("OsiStart", "osiStart", "osiStart", VersionMeta.Version301c, "InputOutput", 0x1fffffff, [], ["admin"], [u'osiController'], [], ["Get"])
}
prop_meta = {
"classic": {
"admin_state": MoPropertyMeta("admin_state", "adminState", "string", VersionMeta.Version301c, MoPropertyMeta.READ_WRITE, 0x2, None, None, None, ["trigger", "triggered"], []),
"answer_file_password": MoPropertyMeta("answer_file_password", "answerFilePassword", "string", VersionMeta.Version301c, MoPropertyMeta.READ_WRITE, 0x4, None, None, None, [], []),
"answer_file_share_file": MoPropertyMeta("answer_file_share_file", "answerFileShareFile", "string", VersionMeta.Version301c, MoPropertyMeta.READ_WRITE, 0x8, 0, 510, None, [], []),
"answer_file_share_ip": MoPropertyMeta("answer_file_share_ip", "answerFileShareIp", "string", VersionMeta.Version301c, MoPropertyMeta.READ_WRITE, 0x10, 0, 255, r"""(([a-zA-Z0-9]([a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])?\.)+[a-zA-Z]{2,6})|(([a-zA-Z0-9]([a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])?)+)|([1-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.([1-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])""", [], []),
"answer_file_share_path": MoPropertyMeta("answer_file_share_path", "answerFileSharePath", "string", VersionMeta.Version301c, MoPropertyMeta.READ_WRITE, 0x20, 0, 510, None, [], []),
"answer_file_share_type": MoPropertyMeta("answer_file_share_type", "answerFileShareType", "string", VersionMeta.Version301c, MoPropertyMeta.READ_WRITE, 0x40, None, None, None, ["scp", "sftp", "www"], []),
"answer_file_username": MoPropertyMeta("answer_file_username", "answerFileUsername", "string", VersionMeta.Version301c, MoPropertyMeta.READ_WRITE, 0x80, None, None, None, [], []),
"config_share_file": MoPropertyMeta("config_share_file", "configShareFile", "string", VersionMeta.Version301c, MoPropertyMeta.READ_WRITE, 0x100, 0, 510, None, [], []),
"config_share_ip": MoPropertyMeta("config_share_ip", "configShareIp", "string", VersionMeta.Version301c, MoPropertyMeta.READ_WRITE, 0x200, 0, 255, r"""(([a-zA-Z0-9]([a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])?\.)+[a-zA-Z]{2,6})|(([a-zA-Z0-9]([a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])?)+)|([1-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.([1-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])""", [], []),
"config_share_password": MoPropertyMeta("config_share_password", "configSharePassword", "string", VersionMeta.Version301c, MoPropertyMeta.READ_WRITE, 0x400, None, None, None, [], []),
"config_share_path": MoPropertyMeta("config_share_path", "configSharePath", "string", VersionMeta.Version301c, MoPropertyMeta.READ_WRITE, 0x800, 0, 510, None, [], []),
"config_share_type": MoPropertyMeta("config_share_type", "configShareType", "string", VersionMeta.Version301c, MoPropertyMeta.READ_WRITE, 0x1000, None, None, None, ["http", "scp", "sftp", "tftp"], []),
"config_share_username": MoPropertyMeta("config_share_username", "configShareUsername", "string", VersionMeta.Version301c, MoPropertyMeta.READ_WRITE, 0x2000, None, None, None, [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version301c, MoPropertyMeta.READ_WRITE, 0x4000, 0, 255, None, [], []),
"iso_share": MoPropertyMeta("iso_share", "isoShare", "string", VersionMeta.Version301c, MoPropertyMeta.READ_ONLY, 0x8000, None, None, r"""[ !#$%&\(\)\*\+,\-\./:;\?@\[\]_\{\|\}~a-zA-Z0-9]{1,255}""", [], []),
"iso_share_ip": MoPropertyMeta("iso_share_ip", "isoShareIp", "string", VersionMeta.Version301c, MoPropertyMeta.READ_WRITE, 0x10000, 0, 255, r"""(([0-9A-Fa-f]{1,4}:([0-9A-Fa-f]{1,4}:([0-9A-Fa-f]{1,4}:([0-9A-Fa-f]{1,4}:([0-9A-Fa-f]{1,4}:[0-9A-Fa-f]{0,4}|:[0-9A-Fa-f]{1,4})?|(:[0-9A-Fa-f]{1,4}){0,2})|(:[0-9A-Fa-f]{1,4}){0,3})|(:[0-9A-Fa-f]{1,4}){0,4})|:(:[0-9A-Fa-f]{1,4}){0,5})((:[0-9A-Fa-f]{1,4}){2}|:(25[0-5]|(2[0-4]|1[0-9]|[1-9])?[0-9])(\.(25[0-5]|(2[0-4]|1[0-9]|[1-9])?[0-9])){3})|(([0-9A-Fa-f]{1,4}:){1,6}|:):[0-9A-Fa-f]{0,4}|([0-9A-Fa-f]{1,4}:){7}:) |((([a-zA-Z0-9]([a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])?\.)+[a-zA-Z]{2,6})|(([a-zA-Z0-9]([a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])?)+)|([1-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.([1-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5]))""", [], []),
"iso_share_type": MoPropertyMeta("iso_share_type", "isoShareType", "string", VersionMeta.Version301c, MoPropertyMeta.READ_WRITE, 0x20000, None, None, None, ["cifs", "nfs", "sd", "www"], []),
"password": MoPropertyMeta("password", "password", "string", VersionMeta.Version301c, MoPropertyMeta.READ_WRITE, 0x40000, None, None, None, [], []),
"remote_share_file": MoPropertyMeta("remote_share_file", "remoteShareFile", "string", VersionMeta.Version301c, MoPropertyMeta.READ_WRITE, 0x80000, 0, 510, None, [], []),
"remote_share_ip": MoPropertyMeta("remote_share_ip", "remoteShareIp", "string", VersionMeta.Version301c, MoPropertyMeta.READ_WRITE, 0x100000, 0, 255, r"""(([a-zA-Z0-9]([a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])?\.)+[a-zA-Z]{2,6})|(([a-zA-Z0-9]([a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])?)+)|([1-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.([1-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])""", [], []),
"remote_share_password": MoPropertyMeta("remote_share_password", "remoteSharePassword", "string", VersionMeta.Version301c, MoPropertyMeta.READ_WRITE, 0x200000, None, None, None, [], []),
"remote_share_path": MoPropertyMeta("remote_share_path", "remoteSharePath", "string", VersionMeta.Version301c, MoPropertyMeta.READ_WRITE, 0x400000, 0, 510, None, [], []),
"remote_share_type": MoPropertyMeta("remote_share_type", "remoteShareType", "string", VersionMeta.Version301c, MoPropertyMeta.READ_WRITE, 0x800000, None, None, None, ["scp", "sftp", "tftp"], []),
"remote_share_username": MoPropertyMeta("remote_share_username", "remoteShareUsername", "string", VersionMeta.Version301c, MoPropertyMeta.READ_WRITE, 0x1000000, None, None, None, [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version301c, MoPropertyMeta.READ_WRITE, 0x2000000, 0, 255, None, [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version301c, MoPropertyMeta.READ_WRITE, 0x4000000, None, None, None, ["", "created", "deleted", "modified", "removed"], []),
"time_out": MoPropertyMeta("time_out", "timeOut", "uint", VersionMeta.Version301c, MoPropertyMeta.READ_WRITE, 0x8000000, None, None, None, [], ["30-240"]),
"username": MoPropertyMeta("username", "username", "string", VersionMeta.Version301c, MoPropertyMeta.READ_WRITE, 0x10000000, None, None, None, [], []),
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version301c, MoPropertyMeta.INTERNAL, None, None, None, None, [], []),
"iso_share_file": MoPropertyMeta("iso_share_file", "isoShareFile", "string", VersionMeta.Version301c, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"iso_share_path": MoPropertyMeta("iso_share_path", "isoSharePath", "string", VersionMeta.Version301c, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
},
"modular": {
"admin_state": MoPropertyMeta("admin_state", "adminState", "string", VersionMeta.Version301c, MoPropertyMeta.READ_WRITE, 0x2, None, None, None, ["trigger", "triggered"], []),
"answer_file_password": MoPropertyMeta("answer_file_password", "answerFilePassword", "string", VersionMeta.Version301c, MoPropertyMeta.READ_WRITE, 0x4, None, None, None, [], []),
"answer_file_share_file": MoPropertyMeta("answer_file_share_file", "answerFileShareFile", "string", VersionMeta.Version301c, MoPropertyMeta.READ_WRITE, 0x8, 0, 510, None, [], []),
"answer_file_share_ip": MoPropertyMeta("answer_file_share_ip", "answerFileShareIp", "string", VersionMeta.Version301c, MoPropertyMeta.READ_WRITE, 0x10, 0, 255, r"""(([a-zA-Z0-9]([a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])?\.)+[a-zA-Z]{2,6})|(([a-zA-Z0-9]([a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])?)+)|([1-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.([1-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])""", [], []),
"answer_file_share_path": MoPropertyMeta("answer_file_share_path", "answerFileSharePath", "string", VersionMeta.Version301c, MoPropertyMeta.READ_WRITE, 0x20, 0, 510, None, [], []),
"answer_file_share_type": MoPropertyMeta("answer_file_share_type", "answerFileShareType", "string", VersionMeta.Version301c, MoPropertyMeta.READ_WRITE, 0x40, None, None, None, ["scp", "sftp", "www"], []),
"answer_file_username": MoPropertyMeta("answer_file_username", "answerFileUsername", "string", VersionMeta.Version301c, MoPropertyMeta.READ_WRITE, 0x80, None, None, None, [], []),
"config_share_file": MoPropertyMeta("config_share_file", "configShareFile", "string", VersionMeta.Version301c, MoPropertyMeta.READ_WRITE, 0x100, 0, 510, None, [], []),
"config_share_ip": MoPropertyMeta("config_share_ip", "configShareIp", "string", VersionMeta.Version301c, MoPropertyMeta.READ_WRITE, 0x200, 0, 255, r"""(([a-zA-Z0-9]([a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])?\.)+[a-zA-Z]{2,6})|(([a-zA-Z0-9]([a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])?)+)|([1-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.([1-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])""", [], []),
"config_share_password": MoPropertyMeta("config_share_password", "configSharePassword", "string", VersionMeta.Version301c, MoPropertyMeta.READ_WRITE, 0x400, None, None, None, [], []),
"config_share_path": MoPropertyMeta("config_share_path", "configSharePath", "string", VersionMeta.Version301c, MoPropertyMeta.READ_WRITE, 0x800, 0, 510, None, [], []),
"config_share_type": MoPropertyMeta("config_share_type", "configShareType", "string", VersionMeta.Version301c, MoPropertyMeta.READ_WRITE, 0x1000, None, None, None, ["scp", "sftp", "www"], []),
"config_share_username": MoPropertyMeta("config_share_username", "configShareUsername", "string", VersionMeta.Version301c, MoPropertyMeta.READ_WRITE, 0x2000, None, None, None, [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version301c, MoPropertyMeta.READ_WRITE, 0x4000, 0, 255, None, [], []),
"iso_share": MoPropertyMeta("iso_share", "isoShare", "string", VersionMeta.Version301c, MoPropertyMeta.READ_ONLY, 0x8000, None, None, r"""[ !#$%&\(\)\*\+,\-\./:;\?@\[\]_\{\|\}~a-zA-Z0-9]{1,255}""", [], []),
"iso_share_ip": MoPropertyMeta("iso_share_ip", "isoShareIp", "string", VersionMeta.Version301c, MoPropertyMeta.READ_WRITE, 0x10000, 0, 255, r"""([0-9A-Fa-f]{1,4}:([0-9A-Fa-f]{1,4}:([0-9A-Fa-f]{1,4}:([0-9A-Fa-f]{1,4}:([0-9A-Fa-f]{1,4}:[0-9A-Fa-f]{0,4}|:[0-9A-Fa-f]{1,4})?|(:[0-9A-Fa-f]{1,4}){0,2})|(:[0-9A-Fa-f]{1,4}){0,3})|(:[0-9A-Fa-f]{1,4}){0,4})|:(:[0-9A-Fa-f]{1,4}){0,5})((:[0-9A-Fa-f]{1,4}){2}|:(25[0-5]|(2[0-4]|1[0-9]|[1-9])?[0-9])(\.(25[0-5]|(2[0-4]|1[0-9]|[1-9])?[0-9])){3})|(([0-9A-Fa-f]{1,4}:){1,6}|:):[0-9A-Fa-f]{0,4}|([0-9A-Fa-f]{1,4}:){7}:""", [], []),
"iso_share_type": MoPropertyMeta("iso_share_type", "isoShareType", "string", VersionMeta.Version301c, MoPropertyMeta.READ_WRITE, 0x20000, None, None, None, ["cifs", "nfs", "sd", "www"], []),
"password": MoPropertyMeta("password", "password", "string", VersionMeta.Version301c, MoPropertyMeta.READ_WRITE, 0x40000, None, None, None, [], []),
"remote_share_file": MoPropertyMeta("remote_share_file", "remoteShareFile", "string", VersionMeta.Version301c, MoPropertyMeta.READ_WRITE, 0x80000, 0, 510, None, [], []),
"remote_share_ip": MoPropertyMeta("remote_share_ip", "remoteShareIp", "string", VersionMeta.Version301c, MoPropertyMeta.READ_WRITE, 0x100000, 0, 255, r"""(([a-zA-Z0-9]([a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])?\.)+[a-zA-Z]{2,6})|(([a-zA-Z0-9]([a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])?)+)|([1-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.([1-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])""", [], []),
"remote_share_password": MoPropertyMeta("remote_share_password", "remoteSharePassword", "string", VersionMeta.Version301c, MoPropertyMeta.READ_WRITE, 0x200000, None, None, None, [], []),
"remote_share_path": MoPropertyMeta("remote_share_path", "remoteSharePath", "string", VersionMeta.Version301c, MoPropertyMeta.READ_WRITE, 0x400000, 0, 510, None, [], []),
"remote_share_type": MoPropertyMeta("remote_share_type", "remoteShareType", "string", VersionMeta.Version301c, MoPropertyMeta.READ_WRITE, 0x800000, None, None, None, ["scp", "sftp", "tftp"], []),
"remote_share_username": MoPropertyMeta("remote_share_username", "remoteShareUsername", "string", VersionMeta.Version301c, MoPropertyMeta.READ_WRITE, 0x1000000, None, None, None, [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version301c, MoPropertyMeta.READ_WRITE, 0x2000000, 0, 255, None, [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version301c, MoPropertyMeta.READ_WRITE, 0x4000000, None, None, None, ["", "created", "deleted", "modified", "removed"], []),
"time_out": MoPropertyMeta("time_out", "timeOut", "uint", VersionMeta.Version301c, MoPropertyMeta.READ_WRITE, 0x8000000, None, None, None, [], ["30-240"]),
"username": MoPropertyMeta("username", "username", "string", VersionMeta.Version301c, MoPropertyMeta.READ_WRITE, 0x10000000, None, None, None, [], []),
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version301c, MoPropertyMeta.INTERNAL, None, None, None, None, [], []),
"iso_share_file": MoPropertyMeta("iso_share_file", "isoShareFile", "string", VersionMeta.Version301c, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"iso_share_path": MoPropertyMeta("iso_share_path", "isoSharePath", "string", VersionMeta.Version301c, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
},
}
prop_map = {
"classic": {
"adminState": "admin_state",
"answerFilePassword": "answer_file_password",
"answerFileShareFile": "answer_file_share_file",
"answerFileShareIp": "answer_file_share_ip",
"answerFileSharePath": "answer_file_share_path",
"answerFileShareType": "answer_file_share_type",
"answerFileUsername": "answer_file_username",
"configShareFile": "config_share_file",
"configShareIp": "config_share_ip",
"configSharePassword": "config_share_password",
"configSharePath": "config_share_path",
"configShareType": "config_share_type",
"configShareUsername": "config_share_username",
"dn": "dn",
"isoShare": "iso_share",
"isoShareIp": "iso_share_ip",
"isoShareType": "iso_share_type",
"password": "password",
"remoteShareFile": "remote_share_file",
"remoteShareIp": "remote_share_ip",
"remoteSharePassword": "remote_share_password",
"remoteSharePath": "remote_share_path",
"remoteShareType": "remote_share_type",
"remoteShareUsername": "remote_share_username",
"rn": "rn",
"status": "status",
"timeOut": "time_out",
"username": "username",
"childAction": "child_action",
"isoShareFile": "iso_share_file",
"isoSharePath": "iso_share_path",
},
"modular": {
"adminState": "admin_state",
"answerFilePassword": "answer_file_password",
"answerFileShareFile": "answer_file_share_file",
"answerFileShareIp": "answer_file_share_ip",
"answerFileSharePath": "answer_file_share_path",
"answerFileShareType": "answer_file_share_type",
"answerFileUsername": "answer_file_username",
"configShareFile": "config_share_file",
"configShareIp": "config_share_ip",
"configSharePassword": "config_share_password",
"configSharePath": "config_share_path",
"configShareType": "config_share_type",
"configShareUsername": "config_share_username",
"dn": "dn",
"isoShare": "iso_share",
"isoShareIp": "iso_share_ip",
"isoShareType": "iso_share_type",
"password": "password",
"remoteShareFile": "remote_share_file",
"remoteShareIp": "remote_share_ip",
"remoteSharePassword": "remote_share_password",
"remoteSharePath": "remote_share_path",
"remoteShareType": "remote_share_type",
"remoteShareUsername": "remote_share_username",
"rn": "rn",
"status": "status",
"timeOut": "time_out",
"username": "username",
"childAction": "child_action",
"isoShareFile": "iso_share_file",
"isoSharePath": "iso_share_path",
},
}
def __init__(self, parent_mo_or_dn, **kwargs):
self._dirty_mask = 0
self.admin_state = None
self.answer_file_password = None
self.answer_file_share_file = None
self.answer_file_share_ip = None
self.answer_file_share_path = None
self.answer_file_share_type = None
self.answer_file_username = None
self.config_share_file = None
self.config_share_ip = None
self.config_share_password = None
self.config_share_path = None
self.config_share_type = None
self.config_share_username = None
self.iso_share = None
self.iso_share_ip = None
self.iso_share_type = None
self.password = None
self.remote_share_file = None
self.remote_share_ip = None
self.remote_share_password = None
self.remote_share_path = None
self.remote_share_type = None
self.remote_share_username = None
self.status = None
self.time_out = None
self.username = None
self.child_action = None
self.iso_share_file = None
self.iso_share_path = None
ManagedObject.__init__(self, "OsiStart", parent_mo_or_dn, **kwargs)
|
|
import sys
import os
import glob
import argparse
from subprocess import call
from ..common.bounding_box import BoundingBox
import json
import itertools
from ..common import utils
from optimize_mesh import optimize_meshes
import math
import numpy as np
from scipy import spatial
import multiprocessing as mp
from rh_renderer import models
SAMPLED_POINTS_NUM = 50
def compute_points_model_halo(url_optimized_mesh0, points_tree):
print "Computing Points Transform Model Halo"
# Sample SAMPLED_POINTS_NUM points to find the closest neighbors to these points
sampled_points_indices = np.random.choice(url_optimized_mesh0.shape[0], SAMPLED_POINTS_NUM, replace=False)
sampled_points = url_optimized_mesh0[np.array(sampled_points_indices)]
# Find the minimal distance between the sampled points to any other point, by finding the closest point to each point
# and take the minimum among the distances
distances, _ = points_tree.query(sampled_points, 2)
min_point_dist = np.min(distances[:,1])
halo = 2 * min_point_dist
print "Points model halo: {}".format(halo)
return halo
def get_points_transform_model(url_optimized_mesh, bbox, points_tree, halo):
# Find the tile bbox with a halo around it
bbox_with_halo = list(bbox)
bbox_with_halo[0] -= halo
bbox_with_halo[2] -= halo
bbox_with_halo[1] += halo
bbox_with_halo[3] += halo
# filter the matches according to the new bounding box
# (first pre-filter entire mesh using a halo of "diagonal + 2*halo + 1) around the top-left point)
top_left = np.array([bbox[0], bbox[2]])
bottom_right = np.array([bbox[1], bbox[3]])
pre_filtered_indices = points_tree.query_ball_point(top_left, np.linalg.norm(bottom_right - top_left) + 2 * halo + 1)
#print bbox_with_halo, "with filtered_indices:", pre_filtered_indices
if len(pre_filtered_indices) == 0:
print "Could not find any mesh points in bbox {}, skipping the tile"
return None
filtered_src_points = []
filtered_dest_points = []
for p_src, p_dest in zip(url_optimized_mesh[0][np.array(pre_filtered_indices)], url_optimized_mesh[1][np.array(pre_filtered_indices)]):
if (bbox_with_halo[0] <= p_src[0] <= bbox_with_halo[1]) and (bbox_with_halo[2] <= p_src[1] <= bbox_with_halo[3]):
filtered_src_points.append(p_src)
filtered_dest_points.append(p_dest)
if len(filtered_src_points) == 0:
print "Could not find any mesh points in bbox {}, skipping the tile"
return None
# print bbox_with_halo, "with pre_filtered_indices len:", len(pre_filtered_indices), "with matches_str:", matches_str
# create the tile transformation
model = models.PointsTransformModel((filtered_src_points, filtered_dest_points))
return model
def compute_new_bounding_box(tile_ts):
"""Computes a bounding box given the tile's transformations (if any),
and the new model to be applied last"""
# We must have a non-affine transformation, so compute the transformation of all the boundary pixels
# using a forward transformation from the boundaries of the source image to the destination
# Assumption: There won't be a pixel inside an image that goes out of the boundary
boundary1 = np.array([[float(p), 0.] for p in np.arange(tile_ts["width"])])
boundary2 = np.array([[float(p), float(tile_ts["height"] - 1)] for p in np.arange(tile_ts["width"])])
boundary3 = np.array([[0., float(p)] for p in np.arange(tile_ts["height"])])
boundary4 = np.array([[float(tile_ts["width"] - 1), float(p)] for p in np.arange(tile_ts["height"])])
boundaries = np.concatenate((boundary1, boundary2, boundary3, boundary4))
for modelspec in tile_ts.get("transforms", []):
model = models.Transforms.from_tilespec(modelspec)
boundaries = model.apply(boundaries)
# Find the bounding box of the boundaries
min_XY = np.min(boundaries, axis=0)
max_XY = np.max(boundaries, axis=0)
# Rounding to avoid float precision errors due to representation
new_bbox = [int(math.floor(round(min_XY[0], 5))), int(math.ceil(round(max_XY[0], 5))), int(math.floor(round(min_XY[1], 5))), int(math.ceil(round(max_XY[1], 5)))]
return new_bbox
def save_json_file(out_fname, data):
with open(out_fname, 'w') as outjson:
json.dump(data, outjson, sort_keys=True, indent=4)
print('Wrote tilespec to {0}'.format(out_fname))
sys.stdout.flush()
def save_optimized_mesh(ts_fname, url_optimized_mesh, out_dir):
print "Working on:", ts_fname
# Use the first tile to find the halo for the entire section
points_tree = spatial.KDTree(url_optimized_mesh[0])
halo = compute_points_model_halo(url_optimized_mesh[0], points_tree)
ts_base = os.path.basename(ts_fname)
out_fname = os.path.join(out_dir, ts_base)
# read tilespec
data = None
with open(ts_fname, 'r') as data_file:
data = json.load(data_file)
if len(data) > 0:
tiles_to_remove = []
# change the transfromation
for tile_index, tile in enumerate(data):
# Create the PointsTransformModel for the current tile
tile_model = get_points_transform_model(url_optimized_mesh, tile["bbox"], points_tree, halo)
if tile_model is None:
tiles_to_remove.append(tile_index)
else:
# Add the model to the tile
tile_transform = tile_model.to_modelspec()
tile.get("transforms", []).append(tile_transform)
# Compute new bounding box
tile["bbox"] = compute_new_bounding_box(tile)
for tile_index in sorted(tiles_to_remove, reverse=True):
print "Removing tile {} from {}".format(data[tile_index]["mipmapLevels"]["0"]["imageUrl"], out_fname)
del data[tile_index]
# save the output tile spec
save_json_file(out_fname, data)
else:
print('Nothing to write for tilespec {}'.format(ts_fname))
sys.stdout.flush()
def save_optimized_meshes(all_tile_urls, optimized_meshes, out_dir, processes_num=1):
# Do the actual multiprocessed save
pool = mp.Pool(processes=processes_num)
print("Using {} processes to save the output jsons".format(processes_num))
all_results = []
for ts_url in all_tile_urls:
ts_fname = ts_url.replace('file://', '')
res = pool.apply_async(save_optimized_mesh, (ts_fname, optimized_meshes[ts_fname], out_dir))
all_results.append(res)
for res in all_results:
res.get()
pool.close()
pool.join()
def read_ts_layers(tile_files):
tsfile_to_layerid = {}
print "Reading tilespec files"
# TODO - make sure its not a json files list
actual_tile_urls = []
with open(tile_files[0], 'r') as f:
actual_tile_urls = [line.strip('\n') for line in f.readlines()]
for url in actual_tile_urls:
file_name = url.replace('file://', '')
layerid = utils.read_layer_from_file(file_name)
tsfile = os.path.basename(url)
tsfile_to_layerid[tsfile] = layerid
return tsfile_to_layerid, actual_tile_urls
def optimize_layers_elastic(tile_files, corr_files, out_dir, max_layer_distance, conf=None, skip_layers=None, threads_num=4):
tsfile_to_layerid, all_tile_urls = read_ts_layers(tile_files)
# TODO: the tile_files order should imply the order of sections
# TODO - make sure its not a json files list
actual_corr_files = []
with open(corr_files[0], 'r') as f:
actual_corr_files = [line.replace('file://', '').strip('\n') for line in f.readlines()]
conf_dict = {}
hex_spacing = 1500 # default value (from block matching)
if conf is not None:
with open(conf, 'r') as f:
params = json.load(f)
conf_dict = params["OptimizeLayersElastic"]
hex_spacing = params["MatchLayersBlockMatching"]["hex_spacing"]
print(actual_corr_files)
# Create a per-layer optimized mesh
optimized_meshes = optimize_meshes(actual_corr_files, hex_spacing, conf_dict)
# Save the output
utils.create_dir(out_dir)
save_optimized_meshes(all_tile_urls, optimized_meshes, out_dir, threads_num)
print("Done.")
def main():
print(sys.argv)
# Command line parser
parser = argparse.ArgumentParser(description='Iterates over the tilespecs in a file, computing matches for each overlapping tile.')
parser.add_argument('--tile_files', metavar='tile_files', type=str, nargs='+', required=True,
help='the list of tile spec files to align')
parser.add_argument('--corr_files', metavar='corr_files', type=str, nargs='+', required=True,
help='the list of corr spec files that contain the matched layers')
parser.add_argument('-o', '--output_dir', type=str,
help='an output directory that will include the aligned sections tiles (default: .)',
default='./')
parser.add_argument('-c', '--conf_file_name', type=str,
help='the configuration file with the parameters for each step of the alignment process in json format (uses default parameters, if not supplied)',
default=None)
parser.add_argument('-t', '--threads_num', type=int,
help='the number of threads to use (default: 1)',
default=1)
parser.add_argument('-s', '--skip_layers', type=str,
help='the range of layers (sections) that will not be processed e.g., "2,3,9-11,18" (default: no skipped sections)',
default=None)
parser.add_argument('-d', '--max_layer_distance', type=int,
help='the largest distance between two layers to be matched (default: 1)',
default=1)
args = parser.parse_args()
print "tile_files: {0}".format(args.tile_files)
print "corr_files: {0}".format(args.corr_files)
optimize_layers_elastic(args.tile_files, args.corr_files,
args.output_dir, args.max_layer_distance,
conf=args.conf_file_name,
skip_layers=args.skip_layers, threads_num=args.threads_num)
if __name__ == '__main__':
main()
|
|
"""
OpenNSA JunOS backend.
Should work for EX and QXF switches.
"""
# configure snippet:
#
# ACTIVATION:
# configure
# set vlan opennsa-1234 vlan-id 1234
# set interfaces ge-0/0/1 unit 0 family ethernet-switching vlan members onsa-1234
# set interfaces ge-0/0/2 unit 0 family ethernet-switching vlan members onsa-1234
# commit
# DE-ACTIVATION:
# configure
# delete vlan opennsa-1234 vlan-id 1234
# delete interfaces ge-0/0/1 unit 0 family ethernet-switching vlan members onsa-1234
# delete interfaces ge-0/0/2 unit 0 family ethernet-switching vlan members onsa-1234
# commit
import random
from twisted.python import log
from twisted.internet import defer
from opennsa import constants as cnt, config
from opennsa.backends.common import genericbackend, ssh
# parameterized commands
COMMAND_CONFIGURE = 'configure'
COMMAND_COMMIT = 'commit'
COMMAND_SET_VLAN = 'set vlans opennsa-%i vlan-id %i'
COMMAND_SET_INTERFACE_VLAN = 'set interfaces %s unit 0 family ethernet-switching vlan members opennsa-%i'
COMMAND_DELETE_VLAN = 'delete vlans opennsa-%i'
COMMAND_DELETE_INTERFACE_VLAN = 'delete interfaces %s unit 0 family ethernet-switching vlan members opennsa-%i'
LOG_SYSTEM = 'JuniperEX'
def createConfigureCommands(source_nrm_port, dest_nrm_port, vlan):
vl = COMMAND_SET_VLAN % (vlan, vlan)
p1 = COMMAND_SET_INTERFACE_VLAN % (source_nrm_port, vlan)
p2 = COMMAND_SET_INTERFACE_VLAN % (dest_nrm_port, vlan)
commands = [ vl, p1, p2 ]
return commands
def createDeleteCommands(source_nrm_port, dest_nrm_port, vlan):
p1 = COMMAND_DELETE_INTERFACE_VLAN % (source_nrm_port, vlan)
p2 = COMMAND_DELETE_INTERFACE_VLAN % (dest_nrm_port, vlan)
vl = COMMAND_DELETE_VLAN % vlan
commands = [ p1, p2, vl ]
return commands
class SSHChannel(ssh.SSHChannel):
name = 'session'
def __init__(self, conn):
ssh.SSHChannel.__init__(self, conn=conn)
self.line = ''
self.wait_defer = None
self.wait_line = None
@defer.inlineCallbacks
def sendCommands(self, commands):
LT = '\r' # line termination
try:
yield self.conn.sendRequest(self, 'shell', '', wantReply=1)
d = self.waitForLine('>')
self.write(COMMAND_CONFIGURE + LT)
yield d
log.msg('Entered configure mode', debug=True, system=LOG_SYSTEM)
for cmd in commands:
log.msg('CMD> %s' % cmd, system=LOG_SYSTEM)
d = self.waitForLine('[edit]')
self.write(cmd + LT)
yield d
# commit commands, check for 'commit complete' as success
# not quite sure how to handle failure here
## test stuff
#d = self.waitForLine('[edit]')
#self.write('commit check' + LT)
d = self.waitForLine('commit complete')
self.write(COMMAND_COMMIT + LT)
yield d
except Exception as e:
log.msg('Error sending commands: %s' % str(e))
raise e
log.msg('Commands successfully committed', debug=True, system=LOG_SYSTEM)
self.sendEOF()
self.closeIt()
def waitForLine(self, line):
self.wait_line = line
self.wait_defer = defer.Deferred()
return self.wait_defer
def matchLine(self, line):
if self.wait_line and self.wait_defer:
if self.wait_line in line.strip():
d = self.wait_defer
self.wait_line = None
self.wait_defer = None
d.callback(self)
else:
pass
def dataReceived(self, data):
if len(data) == 0:
pass
else:
self.line += data
if '\n' in data:
lines = [ line.strip() for line in self.line.split('\n') if line.strip() ]
self.line = ''
for l in lines:
self.matchLine(l)
class JuniperEXCommandSender:
def __init__(self, host, port, ssh_host_fingerprint, user, ssh_public_key_path, ssh_private_key_path):
self.ssh_connection_creator = \
ssh.SSHConnectionCreator(host, port, [ ssh_host_fingerprint ], user, ssh_public_key_path, ssh_private_key_path)
self.ssh_connection = None # cached connection
def _getSSHChannel(self):
def setSSHConnectionCache(ssh_connection):
log.msg('SSH Connection created and cached', system=LOG_SYSTEM)
self.ssh_connection = ssh_connection
return ssh_connection
def gotSSHConnection(ssh_connection):
channel = SSHChannel(conn = ssh_connection)
ssh_connection.openChannel(channel)
return channel.channel_open
if self.ssh_connection and not self.ssh_connection.transport.factory.stopped:
log.msg('Reusing SSH connection', debug=True, system=LOG_SYSTEM)
return gotSSHConnection(self.ssh_connection)
else:
# since creating a new connection should be uncommon, we log it
# this makes it possible to see if something fucks up and creates connections continuously
log.msg('Creating new SSH connection', system=LOG_SYSTEM)
d = self.ssh_connection_creator.getSSHConnection()
d.addCallback(setSSHConnectionCache)
d.addCallback(gotSSHConnection)
return d
def _sendCommands(self, commands):
def gotChannel(channel):
d = channel.sendCommands(commands)
return d
d = self._getSSHChannel()
d.addCallback(gotChannel)
return d
def setupLink(self, source_nrm_port, dest_nrm_port, vlan):
commands = createConfigureCommands(source_nrm_port, dest_nrm_port, vlan)
return self._sendCommands(commands)
def teardownLink(self, source_nrm_port, dest_nrm_port, vlan):
commands = createDeleteCommands(source_nrm_port, dest_nrm_port, vlan)
return self._sendCommands(commands)
# --------
class JunosEXTarget(object):
def __init__(self, port, vlan=None):
self.port = port
self.vlan = vlan
def __str__(self):
if self.vlan:
return '<JunosEXTarget %s#%i>' % (self.port, self.vlan)
else:
return '<JunosEXTarget %s>' % self.port
class JuniperEXConnectionManager:
def __init__(self, port_map, host, port, host_fingerprint, user, ssh_public_key, ssh_private_key):
self.port_map = port_map
self.command_sender = JuniperEXCommandSender(host, port, host_fingerprint, user, ssh_public_key, ssh_private_key)
def getResource(self, port, label):
assert label is None or label.type_ == cnt.ETHERNET_VLAN, 'Label must be None or VLAN'
return label.labelValue() # vlan is a global resource, only one be used at a time
def getTarget(self, port, label):
assert label is None or label.type_ == cnt.ETHERNET_VLAN, 'Label must be None or VLAN'
if label.type_ == cnt.ETHERNET_VLAN:
vlan = int(label.labelValue())
assert 1 <= vlan <= 4095, 'Invalid label value for vlan: %s' % label.labelValue()
return JunosEXTarget(self.port_map[port], vlan)
def createConnectionId(self, source_target, dest_target):
return 'EX-' + str(random.randint(100000,999999))
def canSwapLabel(self, label_type):
return False # not yet anyway
def setupLink(self, connection_id, source_target, dest_target, bandwidth):
assert source_target.vlan == dest_target.vlan, 'VLANs must match'
def linkUp(_):
log.msg('Link %s -> %s up' % (source_target, dest_target), system=LOG_SYSTEM)
d = self.command_sender.setupLink(source_target.port, dest_target.port, dest_target.vlan)
d.addCallback(linkUp)
return d
def teardownLink(self, connection_id, source_target, dest_target, bandwidth):
assert source_target.vlan == dest_target.vlan, 'VLANs must match'
def linkDown(_):
log.msg('Link %s -> %s down' % (source_target, dest_target), system=LOG_SYSTEM)
d = self.command_sender.teardownLink(source_target.port, dest_target.port, dest_target.vlan)
d.addCallback(linkDown)
return d
def JuniperEXBackend(network_name, nrm_ports, parent_requester, cfg):
name = 'JuniperEX %s' % network_name
nrm_map = dict( [ (p.name, p) for p in nrm_ports ] ) # for the generic backend
port_map = dict( [ (p.name, p.interface) for p in nrm_ports ] ) # for the nrm backend
# extract config items
host = cfg[config.JUNIPER_HOST]
port = cfg.get(config.JUNIPER_PORT, 22)
host_fingerprint = cfg[config.JUNIPER_HOST_FINGERPRINT]
user = cfg[config.JUNIPER_USER]
ssh_public_key = cfg[config.JUNIPER_SSH_PUBLIC_KEY]
ssh_private_key = cfg[config.JUNIPER_SSH_PRIVATE_KEY]
cm = JuniperEXConnectionManager(port_map, host, port, host_fingerprint, user, ssh_public_key, ssh_private_key)
return genericbackend.GenericBackend(network_name, nrm_map, cm, parent_requester, name)
|
|
from ref import sopr
sopr_general_issue_codes = sopr.general_issue_codes.keys()
transformed_ld1_schema = {
"type": "object",
"properties": {
"document_id": {
"type": "string",
"format": "uuid_hex",
},
"affiliated_organizations_url": {
"type": ["null", "string"],
"format": "url_http"
},
"signature": {
"type": "string",
"blank": False
},
"datetimes": {
"type": "object",
"properties": {
"signature_date": {
"type": "string",
"format": "date-time"
},
"effective_date": {
"type": "string",
"format": "date-time"
}
}
},
"registration_type": {
"type": "object",
"properties": {
"new_client_for_existing_registrant": {
"type": "boolean"
},
"new_registrant": {
"type": "boolean"
},
"is_amendment": {
"type": "boolean"
}
}
},
"registrant": {
"type": [
{
"title": "Individual Registrant",
"type": "object",
"properties": {
"organization_or_lobbying_firm": {
"type": "boolean",
"enum": [False, ]
},
"self_employed_individual": {
"type": "boolean",
"enum": [True, ]
},
"registrant_org_name": {
"type": "null"
},
"registrant_individual_prefix": {
"type": "string"
},
"registrant_individual_firstname": {
"type": "string"
},
"registrant_individual_lastname": {
"type": "string"
},
"registrant_address_one": {
"type": "string",
},
"registrant_address_two": {
"type": "string",
"blank": True
},
"registrant_city": {
"type": "string"
},
"registrant_state": {
"type": "string",
"pattern": "[A-Z]{2}"
},
"registrant_zip": {
"type": "string",
"pattern": "^\d{5}(?:[-\s]\d{4})?$"
},
"registrant_country": {
"type": "string"
},
"registrant_ppb_city": {
"type": "string",
"blank": True
},
"registrant_ppb_state": {
"type": "string",
"blank": True
},
"registrant_ppb_zip": {
"type": "string",
"blank": True
},
"registrant_ppb_country": {
"type": "string",
"blank": True
},
"registrant_international_phone": {
"type": "boolean"
},
"registrant_contact_name": {
"type": "string"
},
"registrant_contact_phone": {
"type": "string"
},
"registrant_contact_email": {
"type": "string",
"format": "email"
},
"registrant_general_description": {
"type": "string",
},
"registrant_house_id": {
"type": "string",
"pattern": "\d+"
},
"registrant_senate_id": {
"type": "string",
"pattern": "\d+"
}
}
},
{
"title": "Organizational Registrant",
"type": "object",
"properties": {
"organization_or_lobbying_firm": {
"type": "boolean",
"enum": [True, ]
},
"self_employed_individual": {
"type": "boolean",
"enum": [False, ]
},
"registrant_org_name": {
"type": "string"
},
"registrant_individual_prefix": {
"type": "null"
},
"registrant_individual_firstname": {
"type": "null"
},
"registrant_individual_lastname": {
"type": "null"
},
"registrant_address_one": {
"type": "string",
},
"registrant_address_two": {
"type": "string",
"blank": True
},
"registrant_city": {
"type": "string"
},
"registrant_state": {
"type": "string",
"pattern": "[A-Z]{2}"
},
"registrant_zip": {
"type": "string",
"pattern": "^\d{5}(?:[-\s]\d{4})?$"
},
"registrant_country": {
"type": "string"
},
"registrant_ppb_city": {
"type": "string",
"blank": True
},
"registrant_ppb_state": {
"type": "string",
"blank": True
},
"registrant_ppb_zip": {
"type": "string",
"blank": True
},
"registrant_ppb_country": {
"type": "string",
"blank": True
},
"registrant_international_phone": {
"type": "boolean"
},
"registrant_contact_name": {
"type": "string"
},
"registrant_contact_phone": {
"type": "string"
},
"registrant_contact_email": {
"type": "string",
"format": "email"
},
"registrant_general_description": {
"type": "string",
},
"registrant_house_id": {
"type": "string",
"pattern": "\d+"
},
"registrant_senate_id": {
"type": "string",
"pattern": "\d+"
}
}
},
]
},
"client": {
"type": [
{
"title": "Client who is also the registrant",
"type": "object",
"properties": {
"client_self": {
"type": "boolean",
"enum": [True, ]
},
"client_name": {
"type": "string"
},
"client_general_description": {
"type": "string",
"blank": True
},
"client_address": {
"type": "string",
"blank": True
},
"client_city": {
"type": "string",
"blank": True
},
"client_state": {
"type": "string",
"blank": True
},
"client_zip": {
"type": "string",
"blank": True
},
"client_country": {
"type": "string",
"blank": True
},
"client_ppb_city": {
"type": "string",
"blank": True
},
"client_ppb_state": {
"type": "string",
"blank": True
},
"client_ppb_zip": {
"type": "string",
"blank": True
},
"client_ppb_country": {
"type": "string",
"blank": True
}
}
},
{
"title": "Client who is not the registrant",
"type": "object",
"properties": {
"client_self": {
"type": "boolean",
"enum": [False, ]
},
"client_name": {
"type": "string"
},
"client_general_description": {
"type": "string"
},
"client_address": {
"type": "string"
},
"client_city": {
"type": "string"
},
"client_zip": {
"type": "string"
},
"client_state": {
"type": "string"
},
"client_country": {
"type": "string"
},
"client_ppb_city": {
"type": "string",
"blank": True
},
"client_ppb_state": {
"type": "string",
"blank": True
},
"client_ppb_zip": {
"type": "string",
"blank": True
},
"client_ppb_country": {
"type": "string",
"blank": True
}
}
}
]
},
"lobbying_issues": {
"items": {
"type": "object",
"properties": {
"general_issue_area": {
"type": "string",
"enum": sopr_general_issue_codes
}
}
}
},
"affiliated_organizations": {
"items": {
"type": "object",
"properties": {
"affiliated_organization_name": {
"type": "string"
},
"affiliated_organization_address": {
"type": "string"
},
"affiliated_organization_city": {
"type": "string"
},
"affiliated_organization_state": {
"type": "string"
},
"affiliated_organization_zip": {
"type": "string"
},
"affiliated_organization_country": {
"type": "string"
},
"affiliated_organization_ppb_state": {
"type": "string",
"blank": True
},
"affiliated_organization_ppb_city": {
"type": "string",
"blank": True
},
"affiliated_organization_ppb_country": {
"type": "string",
"blank": True
}
}
}
},
"lobbying_issues_detail": {
"type": "string"
},
"foreign_entities": {
"type": "array",
"items": {
"type": "object",
"properties": {
"foreign_entity_name": {
"type": "string"
},
"foreign_entity_address": {
"type": "string"
},
"foreign_entity_city": {
"type": "string"
},
"foreign_entity_state": {
"type": "string"
},
"foreign_entity_country": {
"type": "string"
},
"foreign_entity_ppb_state": {
"type": "string"
},
"foreign_entity_ppb_country": {
"type": "string"
},
"foreign_entity_amount": {
"type": "number"
},
"foreign_entity_ownership_percentage": {
"type": "number"
}
}
}
},
"lobbyists": {
"items": {
"type": "object",
"properties": {
"lobbyist_suffix": {
"type": "string",
"blank": True
},
"lobbyist_first_name": {
"type": "string"
},
"lobbyist_last_name": {
"type": "string"
},
"lobbyist_covered_official_position": {
"type": "string",
"blank": True
}
}
}
}
}
}
transformed_ld2_schema = {
"type": "object",
"properties": {
"document_id": {
"type": "string",
"format": "uuid_hex",
},
"client_registrant_senate_id": {
"type": "string",
"pattern": "[0-9]+-[0-9]"
},
"client_registrant_house_id": {
"type": "string",
"pattern": "[0-9]+"
},
"report_type": {
"type": "object",
"properties": {
"year": {
"type": "string",
"pattern": "\d{4}"
},
"quarter": {
"type": "string",
"pattern": "Q[1-4]"
},
"is_amendment": {
"type": "boolean"
},
"is_termination": {
"type": "boolean"
},
"no_activity": {
"type": "boolean"
}
}
},
"signature": {
"type": "string"
},
"income_less_than_five_thousand": {
"type": ["null", "boolean"]
},
"income_amount": {
"type": ["null", "number"],
"exclusiveMinimum": 5000
},
"expense_less_than_five_thousand": {
"type": ["null", "boolean"]
},
"expense_reporting_method": {
"type": ["string", "null"],
"enum": ["a", "b", "c"]
},
"expense_amount": {
"type": ["null", "number"],
"exclusiveMinimum": 5000
},
"datetimes": {
"type": "object",
"properties": {
"signature_date": {
"type": "string",
"format": "date-time"
},
"termination_date": {
"type": ["null", "string"],
"format": "date-time"
}
}
},
"registrant": {
"type": "object",
"properties": {
"organization_or_lobbying_firm": {
"type": "boolean"
},
"self_employed_individual": {
"type": "boolean"
},
"registrant_name": {
"type": "string"
},
"registrant_address_one": {
"type": "string",
},
"registrant_address_two": {
"type": "string",
"blank": True
},
"registrant_city": {
"type": "string"
},
"registrant_state": {
"type": "string",
"pattern": "[A-Z]{2}"
},
"registrant_zip": {
"type": "string",
"pattern": "^\d{5}(?:[-\s]\d{4})?$"
},
"registrant_country": {
"type": "string"
},
"registrant_ppb_city": {
"type": "string",
"blank": True
},
"registrant_ppb_state": {
"type": "string",
"blank": True
},
"registrant_ppb_zip": {
"type": "string",
"blank": True
},
"registrant_ppb_country": {
"type": "string",
"blank": True
},
"registrant_contact_name": {
"type": "string"
},
"registrant_contact_name_prefix": {
"type": "string"
},
"registrant_contact_phone": {
"type": "string"
},
"registrant_contact_email": {
"type": "string",
"format": "email"
}
}
},
"client": {
"client_name": {
"type": "string"
},
"client_self": {
"type": "boolean"
},
"client_state_or_local_government": {
"type": "boolean"
}
},
"lobbying_activities": {
"items": {
"type": "object",
"properties": {
"general_issue_area": {
"type": "string",
"enum": sopr_general_issue_codes
},
"houses_and_agencies_none": {
"type": "boolean"
},
"specific_issues": {
"type": "string",
"blank": True
},
"houses_and_agencies": {
"type": "string",
"blank": True
},
"foreign_entity_interest_none": {
"type": "boolean"
},
"foreign_entity_interest": {
"type": "string",
"blank": True
},
"lobbyists": {
"items": {
"type": "object",
"properties": {
"lobbyist_covered_official_position": {
"type": "string",
"blank": True
},
"lobbyist_is_new": {
"type": "boolean"
},
"lobbyist_first_name": {
"type": "string",
},
"lobbyist_last_name": {
"type": "string",
},
"lobbyist_suffix": {
"type": "string",
"blank": True
}
}
}
}
}
}
},
"registration_update": {
"type": "object",
"properties": {
"client_address": {
"type": "string",
"blank": True
},
"client_city": {
"type": "string",
"blank": True
},
"client_state": {
"type": "string",
"blank": True
},
"client_zip": {
"type": "string",
"blank": True
},
"client_country": {
"type": "string",
"blank": True
},
"client_ppb_city": {
"type": "string",
"blank": True
},
"client_ppb_state": {
"type": "string",
"blank": True
},
"client_ppb_zip": {
"type": "string",
"blank": True
},
"client_ppb_country": {
"type": "string",
"blank": True
},
"client_general_description": {
"type": "string",
"blank": True
},
"removed_lobbyists": {
"items": {
"type": "object",
"properties": {
"lobbyist_first_name": {
"type": "string"
},
"lobbyist_last_name": {
"type": "string"
}
}
}
},
"removed_lobbying_issues": {
"items": {
"type": "object",
"properties": {
"general_issue_area": {
"type": "string",
"enum": sopr_general_issue_codes
}
}
}
},
"removed_foreign_entities": {
"items": {
"type": "object",
"properties": {
"foreign_entity_name": {
"type": "string"
}
}
}
},
"removed_affiliated_organizations": {
"items": {
"type": "object",
"properties": {
"affiliated_organization_name": {
"type": "string"
}
}
}
},
"added_affiliated_organizations": {
"items": {
"type": "object",
"properties": {
"affiliated_organization_name": {
"type": "string",
"blank": True
},
"affiliated_organization_address": {
"type": "string",
"blank": True
},
"affiliated_organization_city": {
"type": "string",
"blank": True
},
"affiliated_organization_state": {
"type": "string",
"blank": True
},
"affiliated_organization_zip": {
"type": "string",
"blank": True
},
"affiliated_organization_country": {
"type": "string",
"blank": True
},
"affiliated_organization_ppb_state": {
"type": "string",
"blank": True
},
"affiliated_organization_ppb_city": {
"type": "string",
"blank": True
},
"affiliated_organization_ppb_country": {
"type": "string",
"blank": True
}
}
}
},
"added_foreign_entities": {
"items": {
"type": "object",
"properties": {
"foreign_entity_name": {
"type": "string",
"blank": True
},
"foreign_entity_address": {
"type": "string",
"blank": True
},
"foreign_entity_city": {
"type": "string",
"blank": True
},
"foreign_entity_state": {
"type": "string",
"blank": True
},
"foreign_entity_country": {
"type": "string",
"blank": True
},
"foreign_entity_ppb_state": {
"type": "string",
"blank": True
},
"foreign_entity_ppb_country": {
"type": "string",
"blank": True
},
"foreign_entity_amount": {
"type": "number",
"blank": True
},
"foreign_entity_ownership_percentage": {
"type": "number",
"blank": True
}
}
}
}
}
}
}
}
|
|
from __future__ import absolute_import
from django import forms
from django.conf import settings
from django.contrib.auth.forms import SetPasswordForm, AuthenticationForm, \
PasswordResetForm
from django.core.exceptions import ValidationError
from django.core.urlresolvers import reverse
from django.core.validators import validate_email
from django.db.models.query import QuerySet
from django.utils.translation import ugettext as _
from jinja2 import Markup as mark_safe
from zerver.lib.actions import do_change_password, is_inactive, user_email_is_unique
from zerver.lib.name_restrictions import is_reserved_subdomain, is_disposable_domain
from zerver.lib.request import JsonableError
from zerver.lib.send_email import send_email, FromAddress
from zerver.lib.users import check_full_name
from zerver.lib.utils import get_subdomain, check_subdomain
from zerver.models import Realm, get_user_profile_by_email, UserProfile, \
get_realm_by_email_domain, get_realm, \
get_unique_open_realm, email_to_domain, email_allowed_for_realm
from zproject.backends import password_auth_enabled
import logging
import re
import DNS
from typing import Any, Callable, List, Optional, Text, Dict
MIT_VALIDATION_ERROR = u'That user does not exist at MIT or is a ' + \
u'<a href="https://ist.mit.edu/email-lists">mailing list</a>. ' + \
u'If you want to sign up an alias for Zulip, ' + \
u'<a href="mailto:support@zulipchat.com">contact us</a>.'
WRONG_SUBDOMAIN_ERROR = "Your Zulip account is not a member of the " + \
"organization associated with this subdomain. " + \
"Please contact %s with any questions!" % (FromAddress.SUPPORT,)
def email_is_not_mit_mailing_list(email):
# type: (Text) -> None
"""Prevent MIT mailing lists from signing up for Zulip"""
if "@mit.edu" in email:
username = email.rsplit("@", 1)[0]
# Check whether the user exists and can get mail.
try:
DNS.dnslookup("%s.pobox.ns.athena.mit.edu" % username, DNS.Type.TXT)
except DNS.Base.ServerError as e:
if e.rcode == DNS.Status.NXDOMAIN:
raise ValidationError(mark_safe(MIT_VALIDATION_ERROR))
else:
raise
class RegistrationForm(forms.Form):
MAX_PASSWORD_LENGTH = 100
full_name = forms.CharField(max_length=UserProfile.MAX_NAME_LENGTH)
# The required-ness of the password field gets overridden if it isn't
# actually required for a realm
password = forms.CharField(widget=forms.PasswordInput, max_length=MAX_PASSWORD_LENGTH)
realm_subdomain = forms.CharField(max_length=Realm.MAX_REALM_SUBDOMAIN_LENGTH, required=False)
def __init__(self, *args, **kwargs):
# type: (*Any, **Any) -> None
# Since the superclass doesn't except random extra kwargs, we
# remove it from the kwargs dict before initializing.
realm_creation = kwargs['realm_creation']
del kwargs['realm_creation']
super(RegistrationForm, self).__init__(*args, **kwargs)
if settings.TERMS_OF_SERVICE:
self.fields['terms'] = forms.BooleanField(required=True)
self.fields['realm_name'] = forms.CharField(
max_length=Realm.MAX_REALM_NAME_LENGTH,
required=realm_creation)
def clean_full_name(self):
# type: () -> Text
try:
return check_full_name(self.cleaned_data['full_name'])
except JsonableError as e:
raise ValidationError(e.msg)
def clean_realm_subdomain(self):
# type: () -> str
if settings.REALMS_HAVE_SUBDOMAINS:
error_strings = {
'too short': _("Subdomain needs to have length 3 or greater."),
'extremal dash': _("Subdomain cannot start or end with a '-'."),
'bad character': _("Subdomain can only have lowercase letters, numbers, and '-'s."),
'unavailable': _("Subdomain unavailable. Please choose a different one.")}
else:
error_strings = {
'too short': _("Short name needs at least 3 characters."),
'extremal dash': _("Short name cannot start or end with a '-'."),
'bad character': _("Short name can only have lowercase letters, numbers, and '-'s."),
'unavailable': _("Short name unavailable. Please choose a different one.")}
subdomain = self.cleaned_data['realm_subdomain']
if not subdomain:
return ''
if len(subdomain) < 3:
raise ValidationError(error_strings['too short'])
if subdomain[0] == '-' or subdomain[-1] == '-':
raise ValidationError(error_strings['extremal dash'])
if not re.match('^[a-z0-9-]*$', subdomain):
raise ValidationError(error_strings['bad character'])
if is_reserved_subdomain(subdomain) or \
get_realm(subdomain) is not None:
raise ValidationError(error_strings['unavailable'])
return subdomain
class ToSForm(forms.Form):
terms = forms.BooleanField(required=True)
class HomepageForm(forms.Form):
email = forms.EmailField(validators=[is_inactive])
def __init__(self, *args, **kwargs):
# type: (*Any, **Any) -> None
self.realm = kwargs.pop('realm', None)
super(HomepageForm, self).__init__(*args, **kwargs)
def clean_email(self):
# type: () -> str
"""Returns the email if and only if the user's email address is
allowed to join the realm they are trying to join."""
email = self.cleaned_data['email']
if get_unique_open_realm():
return email
# Otherwise, the user is trying to join a specific realm.
realm = self.realm
if realm is None and not settings.REALMS_HAVE_SUBDOMAINS:
realm = get_realm_by_email_domain(email)
if realm is None:
if settings.REALMS_HAVE_SUBDOMAINS:
raise ValidationError(_("The organization you are trying to "
"join using {email} does not "
"exist.").format(email=email))
else:
raise ValidationError(_("Your email address, {email}, does not "
"correspond to any existing "
"organization.").format(email=email))
if realm.invite_required:
raise ValidationError(_("Please request an invite for {email} "
"from the organization "
"administrator.").format(email=email))
if not email_allowed_for_realm(email, realm):
raise ValidationError(
_("Your email address, {email}, is not in one of the domains "
"that are allowed to register for accounts in this organization.").format(
string_id=realm.string_id, email=email))
if realm.is_zephyr_mirror_realm:
email_is_not_mit_mailing_list(email)
return email
def email_is_not_disposable(email):
# type: (Text) -> None
if is_disposable_domain(email_to_domain(email)):
raise ValidationError(_("Please use your real email address."))
class RealmCreationForm(forms.Form):
# This form determines whether users can create a new realm.
email = forms.EmailField(validators=[user_email_is_unique, email_is_not_disposable])
class LoggingSetPasswordForm(SetPasswordForm):
def save(self, commit=True):
# type: (bool) -> UserProfile
do_change_password(self.user, self.cleaned_data['new_password1'],
commit=commit)
return self.user
class ZulipPasswordResetForm(PasswordResetForm):
def get_users(self, email):
# type: (str) -> QuerySet
"""Given an email, return matching user(s) who should receive a reset.
This is modified from the original in that it allows non-bot
users who don't have a usable password to reset their
passwords.
"""
if not password_auth_enabled:
logging.info("Password reset attempted for %s even though password auth is disabled." % (email,))
return []
result = UserProfile.objects.filter(email__iexact=email, is_active=True,
is_bot=False)
if len(result) == 0:
logging.info("Password reset attempted for %s; no active account." % (email,))
return result
def send_mail(self, subject_template_name, email_template_name,
context, from_email, to_email, html_email_template_name=None):
# type: (str, str, Dict[str, Any], str, str, str) -> None
"""
Currently we don't support accounts in multiple subdomains using
a single email address. We override this function so that we do
not send a reset link to an email address if the reset attempt is
done on the subdomain which does not match user.realm.subdomain.
Once we start supporting accounts with the same email in
multiple subdomains, we may be able to refactor this function.
A second reason we override this function is so that we can send
the mail through the functions in zerver.lib.send_email, to match
how we send all other mail in the codebase.
"""
user = get_user_profile_by_email(to_email)
attempted_subdomain = get_subdomain(getattr(self, 'request'))
context['attempted_realm'] = False
if not check_subdomain(user.realm.subdomain, attempted_subdomain):
context['attempted_realm'] = get_realm(attempted_subdomain)
send_email('zerver/emails/password_reset', to_user_id=user.id,
from_name="Zulip Account Security",
from_address=FromAddress.NOREPLY, context=context)
def save(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Currently we don't support accounts in multiple subdomains using
a single email addresss. We override this function so that we can
inject request parameter in context. This parameter will be used
by send_mail function.
Once we start supporting accounts with the same email in
multiple subdomains, we may be able to delete or refactor this
function.
"""
setattr(self, 'request', kwargs.get('request'))
super(ZulipPasswordResetForm, self).save(*args, **kwargs)
class CreateUserForm(forms.Form):
full_name = forms.CharField(max_length=100)
email = forms.EmailField()
class OurAuthenticationForm(AuthenticationForm):
def clean_username(self):
# type: () -> str
email = self.cleaned_data['username']
try:
user_profile = get_user_profile_by_email(email)
except UserProfile.DoesNotExist:
return email
if user_profile.realm.deactivated:
error_msg = u"""Sorry for the trouble, but %s has been deactivated.
Please contact %s to reactivate this group.""" % (
user_profile.realm.name,
FromAddress.SUPPORT)
raise ValidationError(mark_safe(error_msg))
if not user_profile.is_active and not user_profile.is_mirror_dummy:
error_msg = (u"Sorry for the trouble, but your account has been "
u"deactivated. Please contact %s to reactivate "
u"it.") % (FromAddress.SUPPORT,)
raise ValidationError(mark_safe(error_msg))
if not check_subdomain(get_subdomain(self.request), user_profile.realm.subdomain):
logging.warning("User %s attempted to password login to wrong subdomain %s" %
(user_profile.email, get_subdomain(self.request)))
raise ValidationError(mark_safe(WRONG_SUBDOMAIN_ERROR))
return email
class MultiEmailField(forms.Field):
def to_python(self, emails):
# type: (Text) -> List[Text]
"""Normalize data to a list of strings."""
if not emails:
return []
return [email.strip() for email in emails.split(',')]
def validate(self, emails):
# type: (List[Text]) -> None
"""Check if value consists only of valid emails."""
super(MultiEmailField, self).validate(emails)
for email in emails:
validate_email(email)
class FindMyTeamForm(forms.Form):
emails = MultiEmailField(
help_text=_("Add up to 10 comma-separated email addresses."))
def clean_emails(self):
# type: () -> List[Text]
emails = self.cleaned_data['emails']
if len(emails) > 10:
raise forms.ValidationError(_("Please enter at most 10 emails."))
return emails
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of ClusterResolvers for GCE instance groups."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.distribute.cluster_resolver.cluster_resolver import ClusterResolver
from tensorflow.python.training.server_lib import ClusterSpec
from tensorflow.python.util.tf_export import tf_export
_GOOGLE_API_CLIENT_INSTALLED = True
try:
from googleapiclient import discovery # pylint: disable=g-import-not-at-top
from oauth2client.client import GoogleCredentials # pylint: disable=g-import-not-at-top
except ImportError:
_GOOGLE_API_CLIENT_INSTALLED = False
@tf_export('distribute.cluster_resolver.GCEClusterResolver')
class GCEClusterResolver(ClusterResolver):
"""ClusterResolver for Google Compute Engine.
This is an implementation of cluster resolvers for the Google Compute Engine
instance group platform. By specifying a project, zone, and instance group,
this will retrieve the IP address of all the instances within the instance
group and return a ClusterResolver object suitable for use for distributed
TensorFlow.
Note: this cluster resolver cannot retrieve `task_type`, `task_id` or
`rpc_layer`. To use it with some distribution strategies like
`tf.distribute.experimental.MultiWorkerMirroredStrategy`, you will need to
specify `task_type` and `task_id` in the constructor.
Usage example with tf.distribute.Strategy:
```Python
# On worker 0
cluster_resolver = GCEClusterResolver("my-project", "us-west1",
"my-instance-group",
task_type="worker", task_id=0)
strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy(
cluster_resolver=cluster_resolver)
# On worker 1
cluster_resolver = GCEClusterResolver("my-project", "us-west1",
"my-instance-group",
task_type="worker", task_id=1)
strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy(
cluster_resolver=cluster_resolver)
```
"""
def __init__(self,
project,
zone,
instance_group,
port,
task_type='worker',
task_id=0,
rpc_layer='grpc',
credentials='default',
service=None):
"""Creates a new GCEClusterResolver object.
This takes in a few parameters and creates a GCEClusterResolver project. It
will then use these parameters to query the GCE API for the IP addresses of
each instance in the instance group.
Args:
project: Name of the GCE project.
zone: Zone of the GCE instance group.
instance_group: Name of the GCE instance group.
port: Port of the listening TensorFlow server (default: 8470)
task_type: Name of the TensorFlow job this GCE instance group of VM
instances belong to.
task_id: The task index for this particular VM, within the GCE
instance group. In particular, every single instance should be assigned
a unique ordinal index within an instance group manually so that they
can be distinguished from each other.
rpc_layer: The RPC layer TensorFlow should use to communicate across
instances.
credentials: GCE Credentials. If nothing is specified, this defaults to
GoogleCredentials.get_application_default().
service: The GCE API object returned by the googleapiclient.discovery
function. (Default: discovery.build('compute', 'v1')). If you specify a
custom service object, then the credentials parameter will be ignored.
Raises:
ImportError: If the googleapiclient is not installed.
"""
self._project = project
self._zone = zone
self._instance_group = instance_group
self._task_type = task_type
self._task_id = task_id
self._rpc_layer = rpc_layer
self._port = port
self._credentials = credentials
if credentials == 'default':
if _GOOGLE_API_CLIENT_INSTALLED:
self._credentials = GoogleCredentials.get_application_default()
if service is None:
if not _GOOGLE_API_CLIENT_INSTALLED:
raise ImportError('googleapiclient must be installed before using the '
'GCE cluster resolver')
self._service = discovery.build(
'compute', 'v1',
credentials=self._credentials)
else:
self._service = service
def cluster_spec(self):
"""Returns a ClusterSpec object based on the latest instance group info.
This returns a ClusterSpec object for use based on information from the
specified instance group. We will retrieve the information from the GCE APIs
every time this method is called.
Returns:
A ClusterSpec containing host information retrieved from GCE.
"""
request_body = {'instanceState': 'RUNNING'}
request = self._service.instanceGroups().listInstances(
project=self._project,
zone=self._zone,
instanceGroups=self._instance_group,
body=request_body,
orderBy='name')
worker_list = []
while request is not None:
response = request.execute()
items = response['items']
for instance in items:
instance_name = instance['instance'].split('/')[-1]
instance_request = self._service.instances().get(
project=self._project,
zone=self._zone,
instance=instance_name)
if instance_request is not None:
instance_details = instance_request.execute()
ip_address = instance_details['networkInterfaces'][0]['networkIP']
instance_url = '%s:%s' % (ip_address, self._port)
worker_list.append(instance_url)
request = self._service.instanceGroups().listInstances_next(
previous_request=request,
previous_response=response)
worker_list.sort()
return ClusterSpec({self._task_type: worker_list})
def master(self, task_type=None, task_id=None, rpc_layer=None):
task_type = task_type if task_type is not None else self._task_type
task_id = task_id if task_id is not None else self._task_id
if task_type is not None and task_id is not None:
master = self.cluster_spec().task_address(task_type, task_id)
if rpc_layer or self._rpc_layer:
return '%s://%s' % (rpc_layer or self._rpc_layer, master)
else:
return master
return ''
@property
def task_type(self):
return self._task_type
@property
def task_id(self):
return self._task_id
@task_type.setter
def task_type(self, task_type):
raise RuntimeError(
'You cannot reset the task_type of the GCEClusterResolver after it has '
'been created.')
@task_id.setter
def task_id(self, task_id):
self._task_id = task_id
@property
def rpc_layer(self):
return self._rpc_layer
@rpc_layer.setter
def rpc_layer(self, rpc_layer):
self._rpc_layer = rpc_layer
|
|
from airflow.models import DAG
from airflow.operators.dummy_operator import DummyOperator
from datetime import datetime, timedelta
from airflow.operators import PythonOperator
from airflow.hooks import RedisHook
from airflow.models import Variable
from subdags.format_utility import get_threshold
from subdags.format_utility import get_device_type_from_name
from subdags.format_utility import get_previous_device_states
from subdags.format_utility import memcachelist
from subdags.format_utility import forward_five_min
from subdags.format_utility import backtrack_x_min
from subdags.events_utility import get_device_alarm_tuple
from subdags.events_utility import update_device_state_values
from subdags.events_utility import update_last_device_down
from airflow.operators import ExternalTaskSensor
import json
import logging
import traceback
from airflow.hooks import MemcacheHook
import time
import math
import sys
#TODO: Create operator changed from previous
default_args = {
'owner': 'wireless',
'depends_on_past': False,
'start_date': datetime.now() - timedelta(minutes=2),
'email': ['vipulsharma144@gmail.com'],
'email_on_failure': False,
'email_on_retry': False,
'retries': 0,
'retry_delay': timedelta(minutes=1),
'provide_context': True,
'catchup': False,
# 'queue': 'bash_queue',
# 'pool': 'backfill',
# 'priority_weight': 10,
# 'end_date': datetime(2016, 1, 1),
}
##############################DAG CONFIG ENDS###############################################33333################################
aggregate_nw_tasks={}
aggregate_sv_tasks={}
logging.basicConfig(level=logging.ERROR)
redis_hook_4 = RedisHook(redis_conn_id="redis_hook_4")
rules = eval(Variable.get('rules'))
memc_con = MemcacheHook(memc_cnx_id = 'memc_cnx')
exclude_network_datasource = eval(Variable.get("exclude_network_datasource"))
databases=eval(Variable.get('databases'))
redis_hook_5 = RedisHook(redis_conn_id="redis_hook_5")
redis_hook_2 = RedisHook(redis_conn_id="redis_cnx_2")
redis_availablity_0 = RedisHook(redis_conn_id="redis_availablity_0")
all_devices_states = get_previous_device_states(redis_hook_5)
all_devices_states_rta = get_previous_device_states(redis_hook_5,"rta")
redis_hook_network_alarms = RedisHook(redis_conn_id="redis_hook_network_alarms")
event_rules = eval(Variable.get('event_rules'))
operators = eval(Variable.get('operators')) #get operator Dict from
config = eval(Variable.get("system_config"))
debug_mode = eval(Variable.get("debug_mode"))
activate_all_tab = eval(Variable.get("activate_all_tab"))
result_nw_memc_key = []
result_sv_memc_key = []
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
#################################Init Global Var ends###################################################################################
def format_etl(parent_dag_name, child_dag_name, start_date, schedule_interval, celery_queue):
network_slots = list(redis_hook_4.get_keys("nw_ospf*"))
service_slots = list(redis_hook_4.get_keys("sv_ospf*"))
network_slots.extend(redis_hook_4.get_keys("nw_vrfprv*")) #TODO: very bad approach to get pub and vrf daata
service_slots.extend(redis_hook_4.get_keys("sv_vrfprv*"))
network_slots.extend(redis_hook_4.get_keys("nw_pub*")) #TODO: very bad approach to get pub and vrf daata
service_slots.extend(redis_hook_4.get_keys("sv_pub*"))
logging.info("****Calling Format ETL Subdag****")
temp_dir_path = ""
dag_subdag_format = DAG(
dag_id="%s.%s" % (parent_dag_name, child_dag_name),
schedule_interval=schedule_interval,
start_date=start_date,
)
def get_severity_values(service):
all_sev = rules.get(service)
try:
if all_sev:
severity_len = len(all_sev)+1
else:
severity_len = 0
logging.warning("No rules in rules varaible for %s"%(service))
except Exception:
logging.error("No rules in rules varaible for %s due which exception is generated."%(service))
sev_values = []
for i in range(1,severity_len):
sev_values.append(all_sev.get("Severity"+str(i))[1].get("value"))
return sev_values
#TODO: Add EXOR iff required
def evaluate_condition(rules,current_value):
result = 'False'
result_all = []
for i in range(1,len(rules),2):
threshold_value = rules[i].get('value') #get threshold from rules dict
operator = rules[i].get('operator') #get operator from rules
service_name = rules[i].get('name')
symbol = operators.get(operator) #get symbol from dict
if threshold_value != '' and current_value != '':
#logging.info("\n Evaluating ")
#logging.info("Evaluating the Formula ---> %s%s%s of %s as %s"%(str(current_value),str(symbol),str(threshold_value) , str(service_name) ,eval("%s%s%s"%(float(current_value),str(symbol),float(threshold_value)))))
try:
if eval("%s%s%s"%(float(current_value),str(symbol),float(threshold_value))):
result_all.append('True')
else:
result_all.append('False')
except (NameError, SyntaxError, TypeError, ValueError):
if eval('\''+str(current_value)+'\''+symbol+'\''+str(threshold_value)+'\''):
result_all.append('True')
else:
result_all.append('False')
except Exception:
logging.info("Some WTF Exception")
if eval('\''+str(current_value)+'\''+symbol+'\''+str(threshold_value)+'\''):
result_all.append('True')
else:
result_all.append('False')
else:
result_all.append('False')
try:
#logging.info(rules)
#logging.info("i="+str(i))
if rules[i+1] == 'AND' or rules[i+1] == 'OR' and rules[i+1] != None:
result_all.append(rules[i+1].lower())
except IndexError:
#logging.info('No Conjugator or the rule ended')
continue
#logging.info("The Result of %s After compiling booleans ---> %s"%(str(service_name),str(result_all)))
if len(result_all) == 1:
result = eval(result_all[0])
elif len(result_all) % 2 != 0:
result = eval(" ".join(result_all))
else:
logging.info("Please Check the syntax of rules")
#logging.info("returning ; %s"%str(result))
return result
def calculate_severity(service,cur,host_state="",ds=""):
final_severity = []
global rules
if not (ds == "pl" and host_state == "down"):
#TODO: Currently using loop to get teh Dic value can get hashed value provided the total severity for the devices remain fixed need to consult
try:
total_severities = rules.get(service) #TODO: Handle if service not found
total_severities_len = len(total_severities)
#Severity 1 will be the first to checked and should be the top priority i.e if
except TypeError:
logging.info("The specified service "+service+" does not have a rule specified in rules variable")
return 'unknown'
for i in range(1,total_severities_len+1):
current_severity = ""
sv_rules = total_severities.get("Severity"+str(i))
if sv_rules[0]:
current_severity = sv_rules[0]
else:
current_severity = 'unknown'
logging.warning("Please provide severity name for " + str(service))
result = evaluate_condition(sv_rules,cur)
if result:
return current_severity
#final_severity = final_severity.append(evaluate_condition(rules,cur)) #Later can be used to get all the SEV and then based upon priority decide Severity
#logging.info("The Final Result for Service "+service+" is " + str(result) +" having Val "+ str(cur) +" and Severity : "+ str(current_severity))
else:
continue
elif host_state=="down" and ds == "pl":
return host_state
else:
return "up"
if (ds == "pl" or ds == "rta"):
return 'up'
else:
return "ok"
#only required for UP and Down servereties of network devices
def calculate_refer(hostname,current_sev,ds,all_down_devices_states,processing_time):
if current_sev == "down" or current_sev == "up":
try:
if all_down_devices_states.get(hostname).get('state') == None:
logging.info("Got None")
return 0
if all_down_devices_states.get(hostname).get('state') != current_sev: # to see if severity has chnaged
old_sev = all_down_devices_states.get(hostname).get('state')
if current_sev == "down" and old_sev == "up":
return processing_time #backtrack 5 min slot to give last down time
elif current_sev == "up" and old_sev == "down":
#return backtrack_x_min(processing_time,300)
return all_down_devices_states.get(hostname).get('since')
#return forward_five_min(int(time.time())) #Formawrd 5 min slot
elif all_down_devices_states.get(hostname).get('state') == current_sev:
#TODO : See this please something is wrong
if all_down_devices_states.get(hostname).get('since') != 'None':
since_time = all_down_devices_states.get(hostname).get('since')
#logging.info("Since %s %s"%(since_time,hostname))
return since_time
except AttributeError:
logging.info("Problem in fetching the refer value .Not able to find the host %s "%hostname)
#traceback.print_exc()
except Exception:
logging.info("Error while calculating refer")
else:
try:
#logging.info("HOST : %s %s"%(len(all_down_devices_states),hostname))
if all_down_devices_states.get(hostname):
since_time = all_down_devices_states.get(hostname).get('since')
#logging.info("Returning %s "%(since_time))
return since_time
except Exception:
logging.info("Device %s not found in the dict for ds : %s"%(hostname,ds))
"""
This function is used to calculate age for the give severity and add it in the dict
"""
def calculate_age(hostname,current_severity,datasource,current_time):
if datasource == "pl":
previous_device_state = all_devices_states
elif datasource == "rta":
previous_device_state = all_devices_states_rta
else:
logging.info("Datasource is neither rta nor pl")
return 0
try:
device_state = previous_device_state.get(hostname)
if device_state.get("state") == current_severity:
age = device_state.get("since")
return age
if age != None and age != '':
if age == 'None':
return current_time
else:
return age
else:
logging.info("Got the devices %s but unable to fetch the since key "%(hostname))
return 0
elif device_state.get("state") != current_severity:
return current_time
except Exception:
logging.info("Unable to get state for device %s will be created when updating refer"%(hostname))
traceback.print_exc()
###########################################################------------NETWORK--------------- ##################################################
def network_format(**kwargs):
device_to_be_converted_down =eval(Variable.get("device_converted_down"))
redis_queue_slot=kwargs.get('task_instance_key_str').split("_")[2:-3]
all_down_devices_states = get_previous_device_states(redis_hook_5,"down")
state_has_changed = True #TODO: this variable is used to decided that do
#we need to update teh refer dict in memc and redis \
#i.e it is only changed if there is some updation in the dict , currently defaulted to dict
logging.info("Getting from redis Key ->"+ "_".join(redis_queue_slot))
redis_queue_slot="_".join(redis_queue_slot)
slot_data = redis_hook_4.rget(redis_queue_slot)
if len(slot_data) <= 0 :
return "No Data"
#slot_data = [[u'110556',u'10.171.132.2',0,1491822300,1491622173,u'rta=1.151ms;50.000;60.00;0;pl=30%;10;20;; rtmax=1.389ms;;;; rtmin=1.035ms;;;;']] * 10
network_list = []
for slot in slot_data:
slot=eval(slot)
network_dict = {
'site': 'unknown' ,
'host': 'unknown',
'service': 'unknown',
'ip_address': 'unknown',
'severity': 'unknown',
'age': 'unknown',
'ds': 'unknown',
'cur': 'unknown',
'war': 'unknown',
'cric': 'unknown',
'check_time': 'unknown',
'local_timestamp': 'unknown' ,
'refer':'unknown',
'min_value':'unknown',
'max_value':'unknown',
'avg_value':'unknown',
'machine_name':'unknown'
}
try:
#logging.info("Getting From %s"%slot[0])
device_type = get_device_type_from_name(slot[0]) #HANDLE IF DEVICE NOT FOUND
except ValueError:
logging.error("Couldn't find Hostmk dict need to calculate the thresholds Please run SYNC DAG")
#TODO: Automatically run sync dag here if not found
except Exception:
logging.error("Unable to find the deivce tpe for %s"%slot[0])
traceback.print_exc()
#print "len of slot is ==========> "+str(len(slot))
threshold_values = get_threshold(slot[-1])
rt_min_cur = threshold_values.get('rtmin').get('cur')
rt_max_cur = threshold_values.get('rtmax').get('cur')
host_state = "up" if not int(slot[2]) else "down"
network_dict['site'] = "_".join(redis_queue_slot.split("_")[1:4])
network_dict['host'] = slot[0]
network_dict['service'] ="ping"
network_dict['ip_address'] = slot[1]
network_dict['check_time'] = int((slot[3]))if slot[3] else 0
network_dict['local_timestamp'] = forward_five_min(int(slot[3])) #cieled in next multiple of 5
network_dict['min_value'] = rt_min_cur
network_dict['max_value'] = rt_max_cur
network_dict['avg_value'] = round((float(rt_max_cur)+float(rt_min_cur))/2,2)
network_dict['machine_name'] = redis_queue_slot.split("_")[1]
#print slot[0]
#print device_type
for data_source in threshold_values:
if data_source in exclude_network_datasource:
continue
value = threshold_values.get(data_source).get("cur")
key=str(device_type+"_"+data_source)
thresholds_from_rules = get_severity_values(key)
if float(value) == 100:
host_state = "down"
else:
host_state = "up"
if network_dict['ip_address'] in device_to_be_converted_down:
network_dict['severity'] ="down"
else:
#logging.info("IP: %s"%(slot[1]))
network_dict['severity'] =calculate_severity(key,value,host_state,data_source)
network_dict['ds'] = data_source
network_dict['cur'] = value
network_dict['war'] = thresholds_from_rules[1] if thresholds_from_rules else ''
network_dict['cric'] = thresholds_from_rules[0] if thresholds_from_rules else ''
network_dict['refer'] = str(calculate_refer(slot[0],network_dict['severity'],data_source,all_down_devices_states,network_dict['local_timestamp'])) #TODO: Here the same refer is beig calculate for pl and rta which will although be one only
network_dict['age'] = calculate_age(slot[0],network_dict['severity'],data_source,network_dict['local_timestamp'])
network_list.append(network_dict.copy())
try:
#create_file_and_write_data(redis_queue_slot+"_result",str(network_list)) #TODO: Here file is overwritten and no record is maintened it is to be discussed
if len(network_list) > 0:
redis_hook_4.rpush(str(redis_queue_slot)+"_result",network_list)
logging.info("Redis Connection made and data inserted successfully")
logging.info("Successfully Inserted data to Redis KEY :"+redis_queue_slot+"_result")
else:
logging.info("No Data At %s"%(str(redis_queue_slot)))
redis_hook_4.rpush(str(redis_queue_slot)+"_result",[])
except Exception:
logging.info("Unable to Insert to Redis")
traceback.print_exc()
################################################################SERVICE############################################################
def service_format(**kwargs):
kpi_helper_services =['wimax_dl_intrf', 'wimax_ul_intrf', 'cambium_ul_jitter','cambium_rereg_count']
rad5k_helper_service = ['rad5k_ss_dl_uas','rad5k_ss_ul_modulation']
wimax_sector_id_list = ['wimax_pmp1_ul_util_bgp','wimax_pmp2_dl_util_bgp','wimax_pmp1_dl_util_bgp','wimax_pmp2_ul_util_bgp']
provis_services= ['wimax_ul_rssi','wimax_dl_rssi','wimax_dl_cinr','wimax_dl_cinr','wimax_ss_ptx_invent','cambium_ul_rssi','cambium_dl_rssi','cambium_dl_jitter','cambium_ul_jitter','cambium_rereg_count','radwin_rssi','radwin_uas']
rad5k_jet_helper_service = ['rad5kjet_ss_uas','rad5kjet_ss_ul_modulation']
ss_provis_helper_serv_data = []
data_dict_sample = {'age': 'unknown',
'check_time': 'unknown',
'ds': 'unknown',
'host': 'unknown',
'ip_address': 'unknown',
'local_timestamp': 'unknown',
'cric': 'unknown',
'cur': 'unknown',
'war':'unknown',
'refer': 'unknown',
'service': 'unknown',
'severity':'unknown',
'site': 'unknown',
'machine_name':'unknown'}
redis_queue_slot=kwargs.get('task_instance_key_str').split("_")[2:-3]
redis_queue_slot="_".join(redis_queue_slot)
logging.info("Getting from redis Key ->"+ redis_queue_slot)
site_name = "_".join(redis_queue_slot.split("_")[1:4])
device_down_list = redis_hook_4.rget("current_down_devices_%s"%site_name)
start_time = time.time()
slot_data = redis_hook_4.rget(redis_queue_slot)
logging.info("Time for redis Input = "+ str(time.time() - start_time))
service_list = []
start_time = time.time()
try:
if len(slot_data) > 0:
for device_data in slot_data:
data_dict = data_dict_sample
device_data = eval(device_data)
refer = ""
ds_values = device_data[7].split('=')
if len(device_data) < 8 or device_data[0] in device_down_list or not len(device_data[-1]):
logging.info("Ommiting device %s"%device_data[0])
continue
#################################################################ADDITIONAL HANDLING FOR PMP PORT##########################################
if device_data[2] in wimax_sector_id_list:
port_type = device_data[2].split("_")[1]
key1 = str(device_data[0])+"_"+str(port_type)+"_sec"
try: #str(hostname)+"_pmp1_sec"
refer = memc_con.get(key1)
except Exception:
logging.info("Unable to find wimax sector data from MEMC")
#####################################################################################################################################
severity_war_cric = get_severity_values(device_data[2])
#logging.info("FOR device %s"%device_data[0])
data_dict['host'] = device_data[0]
data_dict['ip_address'] = device_data[1]
data_dict['ds'] = ds_values[0] if len(ds_values) >= 1 else ''
data_dict['check_time'] = int(device_data[4]) if device_data[4] else 0
data_dict['local_timestamp'] = forward_five_min(int(device_data[4]))#Floored in previous multiple of 5
data_dict['service'] = device_data[2]
data_dict['cur'] = ds_values[1].split(';')[0] if len(ds_values) > 1 else ''
data_dict['war'] = severity_war_cric[1] if len(severity_war_cric) > 1 else ''
data_dict['cric'] = severity_war_cric[0] if len(severity_war_cric) > 0 else ''
#ds_values[1].split(';')[2]
data_dict['severity'] =calculate_severity(device_data[2],ds_values[1].split(';')[0]) if len(ds_values) > 1 else '' #TODO: get data from static DB
data_dict['age'] = int(device_data[5]) if device_data[5] else 0 #TODO: Calclate at my end change of severiaty
data_dict['site'] = site_name
data_dict['refer'] = refer
data_dict['min_value'] = data_dict['cur']
data_dict['max_value'] = data_dict['cur']
data_dict['avg_value'] = data_dict['cur']
data_dict['machine_name']=redis_queue_slot.split("_")[1]
service_list.append(data_dict.copy())
##########################SOME ADDITIONAL HANDLING OF SERVICES##########################################################################
ip_address = data_dict['ip_address']
if ip_address == '10.133.26.79':
print data_dict
value = data_dict['cur']
if str(data_dict['service']) in rad5k_helper_service:
if str(data_dict['service']) == 'rad5k_ss_ul_modulation':
key = ip_address+ "_rad5k_ss_ul_mod"
key = str(key)
memcachelist(key,value,memc_con)
elif str(data_dict['service']) == 'rad5k_ss_dl_uas':
key = ip_address+ "_uas_list"
key = str(key)
memcachelist(key,value,memc_con)
if str(data_dict['service']) in rad5k_jet_helper_service:
if str(data_dict['service']) == 'rad5kjet_ss_ul_modulation':
key = ip_address+ "_rad5kjet_ss_ul_mod"
key = str(key)
memcachelist(key,value,memc_con)
elif str(data_dict['service']) == 'rad5kjet_ss_uas':
key = ip_address+ "_rad5kjet_uas_list"
key = str(key)
memcachelist(key,value,memc_con)
if str(data_dict['service']) in provis_services:
ss_provis_helper_serv_data.append({
'device_name': str(data_dict['host']),
'service_name': str(data_dict['service']),
'current_value': str(data_dict['cur'])
})
if str(data_dict['service']) in kpi_helper_services:
key = ip_address+ "_"+str(data_dict['service'])
key = str(key)
logging.info("Setting %s"%(key))
memcachelist(key,data_dict['severity'],memc_con)
if device_data[2] in provis_services:
key = "provis:"+str(device_data[0])+":"+str(device_data[2])
#logging.info(key)
try:
memc_con.set(key,value)
except Exception:
logging.info("Unable to set Provisional KPI data into MEMC")
####################################################################################################################ENDS##############################
logging.info("Time for FOR LOOP = "+ str(time.time() - start_time))
else:
logging.info("No Data for input")
except IndexError,e:
logging.info("Some Problem with the dataset ---> \n"+str(device_data))
traceback.print_exc()
except Exception:
traceback.print_exc()
try:
#create_file_and_write_data(redis_queue_slot+"_result",str(service_list)) #TODO: Here file is overwritten and no record is maintened it is to be discussed
json_obj = json.dumps(service_list)
logging.info("About to dump data to Redis of size(JSON) of Service : KB -" + str(sys.getsizeof(json_obj)/1024 ))
if len(json_obj) > 0 :
redis_hook_4.rpush(str(redis_queue_slot)+"_result",service_list)
logging.info("Successfully Inserted data to redis KEY :"+redis_queue_slot+"_result of length "+str(len(service_list)))
else:
logging.info("0 Len data recieved after processing omitting ALL")
if len(ss_provis_helper_serv_data) > 0:
redis_hook_4.rpush("%s_provis_availablity"%(site_name),ss_provis_helper_serv_data)
else:
logging.info("No Provis helper data")
except Exception:
logging.info("Unable to Wite to redis key created")
traceback.print_exc()
#TODO: Break this task in two task nw and sv aggregate data
def aggregate_nw_data(**kwargs):
logging.info("Aggregating Network Data")
nw_memc_keys = eval(Variable.get("network_memc_key"))
task_for_machine=kwargs.get('task_instance_key_str').split("_")[3]
global redis_hook_4
nw_data = {}
nw_data[str(task_for_machine)] = []
#Filter the data with None values i.e slot which are not processed or no data there to report
for key in nw_memc_keys:
if task_for_machine in key:
redis_data_nw = redis_hook_4.rget(key)
current_machine = key.split("_")[1]
if redis_data_nw != None:
nw_data.get(current_machine).append(redis_data_nw)
else:
logging.info("There is No Network data for slot in memc : "+key )
else:
continue
#TODO: change the code so the setting of key for both become independent i.e set above
try:
logging.info("Here Dumping data the number should be 1 the actual number is %d"%len(nw_data))
for nw_memc_keys in nw_data:
logging.info("About to dump data to Memcache of size of Network : " + str(sys.getsizeof(nw_data)))
json_obj = json.dumps(nw_data)
logging.info("About to dump data to Memcache of size(JSON) of Service : " + str(sys.getsizeof(json_obj)))
redis_hook_4.rpush("nw_agg_nocout_"+str(nw_memc_keys),nw_data.get(nw_memc_keys))
memc_con.set("nw_agg_nocout_"+str(nw_memc_keys),nw_data.get(nw_memc_keys))
logging.info("Dumped Network data to Memcache of length " + str(len(nw_data)) +" at "+ "nw_nocout_"+str(nw_memc_keys))
return True
except Exception:
logging.error("Unable to put in the combined Network data to memcache.")
traceback.print_exc()
return False
def store_availablity_data_in_redis(**kwargs):
nocout_site_name = kwargs.get('params').get('site')
data_type = kwargs.get('params').get('dev_type')
all_data = []
this_time = datetime.now()
t_stmp = this_time + timedelta(minutes=-(this_time.minute % 5))
t_stmp = t_stmp.replace(second=0,microsecond=0)
current_time =int(time.mktime(t_stmp.timetuple()))
data = redis_hook_4.get("%s")
try:
if data_type == "network":
set_name = nocout_site_name + "_network"
keys = redis_hook_4.get_keys("nw_%s_*_result"%(nocout_site_name))
else:
set_name = nocout_site_name + "_service"
keys = redis_hook_4.get_keys("sv_%s_*_result"%(nocout_site_name))
for key in keys:
data = redis_hook_4.rget(key)
for datum in data:
datum=eval(datum)
all_data.extend(datum)
if not debug_mode:
try:
redis_availablity_0.zadd_compress(set_name,current_time,all_data)
except Exception:
logging.error("Unable to add availablity data in redis check zadd_compress in redis_loader_hook")
else:
logging.info("Debug Mode is active Not inserting availablity data")
except Exception,e:
logging.info("Error in storing redis : %s"%(e))
pass
def aggregate_sv_data(**kwargs):
logging.info("Aggregating Service Data")
sv_memc_keys = eval(Variable.get("service_memc_key"))
task_for_machine=kwargs.get('task_instance_key_str').split("_")[3]
sv_data = {}
sv_data[str(task_for_machine)] = []
#Filter the data with None values i.e slot which are not processed or no data there to report
for key in sv_memc_keys:
if task_for_machine in key:
redis_data_sv = redis_hook_4.rget(key)
current_machine = key.split("_")[1]
if redis_data_sv != None:
sv_data.get(current_machine).append(redis_data_sv)
else:
logging.warning("There is No Service data for slot in Redis : "+key )
#TODO: change the code so the setting of key for both become independent i.e set above
try:
logging.info("Here Dumping data the number should be 1 the actual number is %d"%len(sv_data))
for sv_memc_keys in sv_data:
logging.info("About to dump data to Memcache of size of Service : " + str(sys.getsizeof(sv_data)))
json_obj = json.dumps(sv_data)
logging.info("About to dump data to Redis of size(JSON) of Service :(SIZE) " + str(sys.getsizeof(json_obj)) + " (NAME) sv_agg_nocout_"+str(sv_memc_keys))
if sv_data.get(sv_memc_keys):
redis_hook_4.rpush("sv_agg_nocout_"+str(sv_memc_keys),sv_data.get(sv_memc_keys))
logging.info("Inserted data in redis")
return True
else:
logging.info("Unable to insert SV ")
except Exception:
logging.error("Unable to put in the combined service data to memcache.")
traceback.print_exc()
return False
#this function is used to get all the slots and then combine their data into one site data
def extract_and_distribute_nw(**kwargs):
site=kwargs.get('params').get('site')
all_pl_rta_trap_dict = {}
all_pl_rta_trap_dict[site] = []
start_time = time.time()
for redis_key in network_slots:
if "_result" in redis_key:
continue
if site in redis_key:
try:
network_data = redis_hook_4.rget(redis_key+"_result")
except Exception:
logging.error("Unable to get the result key from redis")
if len(network_data) > 0:
all_pl_rta_trap_dict.get(site).extend(get_device_alarm_tuple(network_data,event_rules)) #TODO: Imporove args
else:
logging.info("No Data Found in redis at key: %s"%(redis_key+"_result"))
logging.info("TIME : %s"%(time.time() - start_time))
start_time = time.time()
if len(all_pl_rta_trap_dict.get(site)) > 0:
redis_key = 'network_smptt_%s' % site
try:
redis_hook_4.rpush(redis_key,all_pl_rta_trap_dict.get(site))
logging.info("successfully inserted data into key : %s"%(redis_key))
except Exception:
logging.error("Unable to insert data to redis.")
else:
logging.info("No Traps recieved")
logging.info("TIME : %s"%(time.time() - start_time))
def aggregate_smptt(**kwargs):
logging.info("Aggregating Network SMPTT Data")
machine_data=[]
task_for_machine=kwargs.get('params').get('machine')
network_slots_smptt = redis_hook_4.get_keys("network_smptt_%s_*"%task_for_machine)
for machine_slots_in_redis in network_slots_smptt:
machine_data.extend(redis_hook_4.rget(machine_slots_in_redis))
logging.info("--->%s"%len(machine_data))
if len(machine_data) > 0:
for k,traps in enumerate(machine_data):
machine_data[k] = traps
logging.info("%s of length %s"%("queue:network:snmptt:%s"%task_for_machine,len(machine_data)))
if not debug_mode and activate_all_tab:
redis_hook_network_alarms.rpush("queue:network:snmptt:%s"%task_for_machine,machine_data)
else:
logging.info("Debug Mode is active , not inserting into redis")
else:
logging.info("No Data Foubnd onto site : %s" %task_for_machine)
#########################################################################TASKS#######################################################################
aggregate_nw_smptt_tasks = {}
event_site_tasks = {}
service_format_sensor_dict = {}
network_format_sensor_dict = {}
network_sensor_sites = []
service_sensor_sites = []
availablity_nw_tasks = {}
availablity_sv_tasks = {}
update_refer = PythonOperator(
task_id="update_device_states",
provide_context=False,
python_callable=update_device_state_values,
#params={"site":site},
#redis_hook=redis_hook_4,
dag=dag_subdag_format,
queue=celery_queue
)
update_last_device_down_task = PythonOperator(
task_id="update_last_device_down",
provide_context=False,
python_callable=update_last_device_down,
#params={"site":site},
#redis_hook=redis_hook_4,
dag=dag_subdag_format,
queue=celery_queue
)
for db in databases:
db_name=db.split("_")[1]
aggregate_sv_data_task = PythonOperator(
task_id="aggregate_%s_sv_data"%db_name,
provide_context=True,
python_callable=aggregate_sv_data,
#params={"ip":machine.get('ip'),"port":site.get('port')},
dag=dag_subdag_format,
trigger_rule = 'all_done',
queue=celery_queue
)
aggregate_nw_data_task = PythonOperator(
task_id="aggregate_%s_nw_data"%db_name,
provide_context=True,
python_callable=aggregate_nw_data,
#params={"ip":machine.get('ip'),"port":site.get('port')},
dag=dag_subdag_format,
trigger_rule = 'all_done',
queue=celery_queue,
)
aggregate_smptt_task = PythonOperator(
task_id="aggregate_%s_nw_smptt_data"%db_name,
provide_context=True,
python_callable=aggregate_smptt,
params={"machine":db_name},
dag=dag_subdag_format,
trigger_rule = 'all_done',
queue=celery_queue
)
aggregate_nw_tasks[db_name] = aggregate_nw_data_task
aggregate_sv_tasks[db_name] = aggregate_sv_data_task
aggregate_nw_smptt_tasks[db_name] = aggregate_smptt_task
aggregate_smptt_task >> update_refer
aggregate_smptt_task >> update_last_device_down_task
#####################################################################################################################################################
#####################################################################################################################################################
try:
result_nw_memc_key = []
for machine in config:
sites = machine.get('sites')
for site in sites:
site_name = site.get('name')
total_slot = int(Variable.get("nw_%s_slots"%(site_name)))
machine_name = site_name.split("_")[0] #To make UAT Compatible
if site_name not in network_sensor_sites:
nw_extract_task_sensor = ExternalTaskSensor(
external_dag_id="ETL.NETWORK",
external_task_id="Network_extract_%s"%site_name,
task_id="sense_nw_%s_extract_task"%site_name,
poke_interval =2,
trigger_rule = 'all_done',
dag=dag_subdag_format,
queue=celery_queue
)
network_sensor_sites.append(site_name)
if site_name not in event_site_tasks.keys():
event_nw = PythonOperator(
task_id="discover_event_nw_%s_"%(site_name),
provide_context=True,
python_callable=extract_and_distribute_nw,
params={"site":site_name},
dag=dag_subdag_format,
queue=celery_queue
)
event_site_tasks[site_name] = event_nw
event_nw >> aggregate_nw_smptt_tasks.get(machine_name)
if site_name not in availablity_nw_tasks.keys():
availablity_task = PythonOperator(
task_id="store_nw_%s"%(site_name),
provide_context=True,
python_callable=store_availablity_data_in_redis,
params={"site":site_name,'dev_type':"network"},
dag=dag_subdag_format,
queue=celery_queue
)
availablity_nw_tasks[site_name] = availablity_task
if site_name not in availablity_sv_tasks.keys():
availablity_task = PythonOperator(
task_id="store_sv_%s"%(site_name),
provide_context=True,
python_callable=store_availablity_data_in_redis,
params={"site":site_name,'dev_type':"service"},
dag=dag_subdag_format,
queue=celery_queue
)
availablity_sv_tasks[site_name] = availablity_task
for slot in range(1,total_slot+1):
network_tasks = None
task = ("nw_%s_slot_%s"%(site_name,slot))
task_name = task.split("_")
result_list = task.split("_")
task_name.append("format")
result_list.append("result")
name = "_".join(task_name)
result_nw_memc_key.append("_".join(result_list))
network_tasks=PythonOperator(
task_id="%s"%name,
provide_context=True,
python_callable=network_format,
#params={"previous_all_device_states":previous_all_device_states},
dag=dag_subdag_format,
queue=celery_queue
)
try:
nw_extract_task_sensor >> network_tasks
network_tasks >> event_site_tasks.get(site_name)
network_tasks >> availablity_nw_tasks.get(site_name)
network_tasks >> aggregate_nw_tasks.get(machine_name)
except Exception:
logging.info("Unable to attach tasks to %s"%site)
traceback.print_exc()
Variable.set("network_memc_key",str(result_nw_memc_key))
except Exception:
logging.error("There is an error while create format tasks for network data")
traceback.print_exc()
#####################################################################################################################################################
#####################################################################################################################################################
try:
result_sv_memc_key=[]
for machine in config:
sites = machine.get('sites')
for site in sites:
site_name = site.get('name')
total_slot = int(Variable.get("sv_%s_slots"%(site_name)))
#machine_name = machine.get("Name")
machine_name = site_name.split("_")[0]
if site_name not in service_sensor_sites:
service_format_task_sensor = ExternalTaskSensor( #here task for the same site could be made more than once but it get overriden at the end
external_dag_id="ETL.SERVICE",
external_task_id="Service_extract_%s"%site_name,
task_id="sense_sv_%s_extract_task"%site_name,
poke_interval =2,
trigger_rule = 'all_done',
#sla=timedelta(minutes=1),
dag=dag_subdag_format,
queue=celery_queue
)
service_sensor_sites.append(site_name)
for slot in range(1,total_slot+1):
task = ("sv_%s_slot_%s"%(site_name,slot))
task_name = task.split("_")
result_list = task.split("_")
task_name.append("format")
result_list.append("result")
result_sv_memc_key.append("_".join(result_list))
name = "_".join(task_name)
service_tasks = PythonOperator(
task_id="%s"%name,
provide_context=True,
python_callable=service_format,
#params={"ip":machine.get('ip'),"port":site.get('port')},
dag=dag_subdag_format,
queue=celery_queue
)
service_format_task_sensor >> service_tasks
service_tasks >> aggregate_sv_tasks.get(machine_name)
service_tasks >> availablity_sv_tasks.get(site_name)
Variable.set("service_memc_key",str(result_sv_memc_key))
except Exception:
logging.error("There is an error while create format tasks for Service data")
traceback.print_exc()
return dag_subdag_format
|
|
# Copyright 2018 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import datetime
import flask
import flask_sqlalchemy
import hashlib
import hmac
import json
import logging
import math
import os
import pbkdf2
import re
import sqlalchemy as sqlalchemy_base
import time
from sqlalchemy import exc
from sqlalchemy import func
from sqlalchemy import orm
from sqlalchemy.ext import hybrid
from scoreboard import attachments
from scoreboard import errors
from scoreboard import main
from scoreboard import utils
app = main.get_app()
db = flask_sqlalchemy.SQLAlchemy(app)
class Team(db.Model):
"""A Team of Players (Team of 1 if not using Teams)."""
tid = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(120), unique=True, nullable=False, index=True)
score = db.Column(db.Integer, default=0) # Denormalized
last_solve = db.Column(db.DateTime, nullable=True)
players = db.relationship(
'User', backref=db.backref('team', lazy='joined'), lazy='dynamic')
answers = db.relationship('Answer', backref='team', lazy='select',
cascade='delete')
score_history = db.relationship(
'ScoreHistory',
backref=db.backref('team', lazy='joined'),
lazy='select', cascade='delete')
def __repr__(self):
return '<Team: %s>' % self.name.encode('utf-8')
def __str__(self):
return self.name
@property
def code(self):
secret_key = (app.config.get('TEAM_SECRET_KEY') or
app.config.get('SECRET_KEY'))
return hmac.new(utils.to_bytes(secret_key),
self.name.encode('utf-8'),
hashlib.sha256).hexdigest()[:12]
@property
def solves(self):
return len(self.answers)
def update_score(self):
old_score = self.score
self.score = sum(a.current_points for a in self.answers)
if self.score != old_score:
# Add score history entry
if not getattr(self, '_pending_sh', False):
ScoreHistory.add_entry(self)
self._pending_sh = True
def can_access(self, user=None):
"""Check if player can access team."""
user = user or User.current()
if user.admin:
return True
return user.team == self
@classmethod
def create(cls, name):
team = cls()
db.session.add(team)
team.name = name
return team
@classmethod
def get_by_name(cls, name):
try:
return cls.query.filter_by(name=name).one()
except exc.InvalidRequestError:
return None
@classmethod
def enumerate(cls, with_history=False, above_zero=False):
if with_history:
base = cls.query.options(orm.joinedload(cls.score_history))
else:
base = cls.query
if above_zero:
base = base.filter(cls.score > 0)
sorting = base.order_by(cls.score.desc(), cls.last_solve)
return enumerate(sorting.all(), 1)
@classmethod
def all(cls, with_history=True):
if with_history:
base = cls.query.options(orm.joinedload(cls.score_history))
else:
base = cls.query
base = base.options(orm.joinedload(cls.answers))
base = base.order_by(cls.name)
return base.all()
@classmethod
def current(cls):
try:
return flask.g.team
except AttributeError:
user = User.current()
if user:
flask.g.team = user.team
return user.team
else:
flask.g.team = None
class ScoreHistory(db.Model):
team_tid = db.Column(db.Integer, db.ForeignKey('team.tid'), nullable=False,
primary_key=True)
when = db.Column(db.DateTime, nullable=False, primary_key=True,
default=datetime.datetime.utcnow)
score = db.Column(db.Integer, default=0, nullable=False)
@classmethod
def add_entry(cls, team):
entry = cls()
entry.team = team
entry.score = team.score
db.session.merge(entry)
class User(db.Model):
"""A single User for login. Player or admin."""
uid = db.Column(db.Integer, primary_key=True)
email = db.Column(db.String(120), unique=True, nullable=False, index=True)
nick = db.Column(db.String(80), unique=True, nullable=False, index=True)
pwhash = db.Column(db.String(48)) # pbkdf2.crypt == 48 bytes
admin = db.Column(db.Boolean, default=False, index=True)
team_tid = db.Column(db.Integer, db.ForeignKey('team.tid'))
create_ip = db.Column(db.String(45)) # max 45 bytes for IPv6
last_login_ip = db.Column(db.String(45))
api_key = db.Column(db.String(32), index=True)
api_key_updated = db.Column(db.DateTime)
def set_password(self, password):
self.pwhash = pbkdf2.crypt(password)
def __repr__(self):
return '<User: %s <%s>>' % (self.nick.encode('utf-8'), self.email)
def __str__(self):
return self.nick
def promote(self):
"""Promote a user to admin."""
empty_team = self.team and set(self.team.players.all()) == set([self])
if self.team and len(self.team.answers):
raise AssertionError(
'Cannot promote player whose team has solved answers!')
self.admin = True
team = self.team
self.team = None
if empty_team:
db.session.delete(team)
def get_token(self, token_type='pwreset', expires=None):
"""Generate a user-specific token."""
expires = expires or int(time.time()) + 7200 # 2 hours
token_plain = '%d:%d:%s:%s' % (
self.uid, expires, token_type, self.pwhash)
mac = hmac.new(
utils.to_bytes(app.config.get('SECRET_KEY')),
utils.to_bytes(token_plain),
hashlib.sha1).digest()
token = utils.to_bytes('%d:' % expires) + mac
return base64.urlsafe_b64encode(token)
def verify_token(self, token, token_type='pwreset'):
"""Verify a user-specific token."""
token = utils.to_bytes(token)
try:
decoded = base64.urlsafe_b64decode(token)
expires, mac = decoded.split(b':', 1)
except ValueError:
raise errors.ValidationError('Invalid token.')
if float(expires) < time.time():
raise errors.ValidationError('Expired token.')
expected = self.get_token(token_type=token_type, expires=int(expires))
if not utils.compare_digest(expected, token):
raise errors.ValidationError('Invalid token.')
return True
def reset_api_key(self):
"""Reset a user's api key."""
new_key = os.urandom(16)
try:
self.api_key = new_key.hex() # Python 3
except AttributeError:
self.api_key = new_key.encode('hex') # Python 2
self.api_key_update = datetime.datetime.now()
@classmethod
def get_by_email(cls, email):
try:
return cls.query.filter_by(email=email).one()
except exc.InvalidRequestError:
return None
@classmethod
def get_by_nick(cls, nick):
try:
return cls.query.filter_by(nick=nick).one()
except exc.InvalidRequestError:
return None
@classmethod
def get_by_api_key(cls, token):
if not token:
return None
try:
return cls.query.filter_by(api_key=token).one()
except exc.InvalidRequestError:
return None
@classmethod
def login_user(cls, email, password):
try:
user = cls.query.filter_by(email=email).one()
except exc.InvalidRequestError:
return None
if pbkdf2.crypt(password, user.pwhash) == user.pwhash:
if flask.has_request_context():
user.last_login_ip = flask.request.remote_addr
db.session.commit()
return user
return None
@classmethod
def create(cls, email, nick, password, team=None):
first_user = True if not cls.query.count() else False
user = cls()
db.session.add(user)
user.email = email
user.nick = nick
user.set_password(password)
if not first_user:
user.team = team
else:
user.promote()
if flask.has_request_context():
user.create_ip = flask.request.remote_addr
return user
@classmethod
def current(cls):
try:
return flask.g.user
except AttributeError:
uid = flask.session.get('user')
if uid is not None:
# For some reason, .get() does not join!
user = cls.query.options(orm.joinedload(cls.team)).filter(
cls.uid == uid).first()
flask.g.user = user
flask.g.team = user.team
if user:
# Bump expiration time on session
utils.session_for_user(user)
return user
@classmethod
def all(cls):
return cls.query.order_by(
cls.admin.desc(),
cls.nick).all()
tag_challenge_association = db.Table(
'tag_chall_association', db.Model.metadata,
db.Column('challenge_cid', db.BigInteger,
db.ForeignKey('challenge.cid')),
db.Column('tag_tagslug', db.String(100),
db.ForeignKey('tag.tagslug')))
class Tag(db.Model):
"""A Tag to be Applied to Challenges"""
tagslug = db.Column(db.String(100), unique=True, primary_key=True,
nullable=False, index=True)
name = db.Column(db.String(100), unique=True, nullable=False)
description = db.Column(db.Text)
challenges = db.relationship('Challenge',
backref=db.backref('tags', lazy='joined'),
secondary='tag_chall_association',
lazy='joined')
def __repr__(self):
return '<Tag: %s/%s>' % (self.tagslug, self.name)
def slugify(self):
self.tagslug = '-'.join(w.lower() for w in re.split(r'\W+', self.name))
@classmethod
def create(cls, name, description):
tag = cls()
tag.name = name
tag.description = description
tag.slugify()
db.session.add(tag)
return tag
def get_challenges(self, unlocked_only=True, sort=True, force_query=False):
if (force_query or
'challenges' in sqlalchemy_base.inspect(self).unloaded):
return self._get_challenges_query(
unlocked_only=unlocked_only, sort=sort)
return self._get_challenges_cached(
unlocked_only=unlocked_only, sort=sort)
def _get_challenges_cached(self, unlocked_only=True, sort=True):
challenges = self.challenges
if unlocked_only:
challenges = [c for c in challenges if c.unlocked]
if sort:
challenges = sorted(challenges, key=lambda c: c.weight)
return challenges
def _get_challenges_query(self, unlocked_only=True, sort=True):
challenges = Challenge.query.filter(
Challenge.tags.any(tagslug=self.tagslug))
if unlocked_only:
unlocked_identity = True
challenges = challenges.filter(
Challenge.unlocked == unlocked_identity)
if not sort:
return challenges
return challenges.order_by(Challenge.weight)
class Challenge(db.Model):
"""A single challenge to be played."""
cid = db.Column(db.BigInteger, primary_key=True, autoincrement=False)
name = db.Column(db.String(100), nullable=False)
description = db.Column(db.Text, nullable=False)
points = db.Column(db.Integer, nullable=False)
min_points = db.Column(db.Integer, nullable=True)
validator = db.Column(db.String(24), nullable=False,
default='static_pbkdf2')
answer_hash = db.Column(db.String(48)) # Protect answers
unlocked = db.Column(db.Boolean, default=False)
weight = db.Column(db.Integer, nullable=False) # Order for display
prerequisite = db.Column(db.Text, nullable=False) # Prerequisite Metadata
cur_points = db.Column(db.Integer, nullable=True)
answers = db.relationship('Answer',
backref=db.backref('challenge', lazy='joined'),
lazy='select')
def __repr__(self):
return '<Challenge: %d/%s>' % (self.cid, self.name)
def is_answered(self, team=None, answers=None):
if team is None:
team = Team.current()
if not team:
return False
if answers is not None:
for a in answers:
if a.team_tid == team.tid and a.challenge_cid == self.cid:
return True
return False
return bool(Answer.query.filter(Answer.challenge == self,
Answer.team == team).count())
@hybrid.hybrid_property
def solves(self):
try:
return self._solves
except AttributeError:
self._solves = len(self.answers)
return self._solves
@solves.expression
def solves(cls):
return func.count(cls.answers)
@property
def answered(self):
if not Team.current():
return False
return self.is_answered(answers=Team.current().answers)
@property
def teaser(self):
if not app.config.get('TEASE_HIDDEN'):
return False
if not Team.current():
return False
return not self.unlocked_for_team(Team.current())
@property
def current_points(self):
mode = app.config.get('SCORING', 'plain')
value = self.points
if mode == 'plain':
self.cur_points = value
elif mode == 'progressive':
speed = app.config.get('SCORING_SPEED', 12)
min_points = 0 if self.min_points is None else self.min_points
self.cur_points = self.log_score(
value, min_points, speed, self.solves)
return self.cur_points
@staticmethod
def log_score(max_points, min_points, midpoint, solves):
# Algorithm designed by symmetric
# logit(u, l, m, s, x) =
# (u - l) * ((1.0 / (1.0 + exp((1.0/s) * (x - m)))) /
# (1.0 / (1.0 + exp((1.0/s) * (1 - m))))) + l
if solves == 0:
return max_points
def log_func(midpoint, solves):
spread = midpoint / 3.0
delta = solves - midpoint
return (
1.0 / (1.0 + math.exp((1.0 / spread) * delta)))
max_delta = (max_points - min_points)
base_point = log_func(midpoint, 1.0)
cur_point = log_func(midpoint, solves)
return math.ceil(max_delta * cur_point / base_point + min_points)
def unlocked_for_team(self, team):
"""Checks if prerequisites are met for this team."""
if not self.unlocked:
return False
if not self.prerequisite:
return True
try:
prereq = json.loads(self.prerequisite)
except ValueError:
logging.error('Unable to parse prerequisite data for challenge %d',
self.cid)
return False
if prereq['type'] == 'None':
return True
if not team:
return False
try:
eval_func = getattr(self, 'prereq_' + prereq['type'])
except AttributeError:
logging.error(
'Could not find prerequisite function for challenge %d',
self.cid)
return False
return eval_func(prereq, team)
def prereq_solved(self, prereq, team):
"""Require that another challenge be solved first."""
chall = Challenge.query.get(int(prereq['challenge']))
if not chall:
logging.error('Challenge %d prerequisite depends on '
'non-existent challenge %d.', self.cid,
int(prereq['challenge']))
return False
return chall.is_answered(team=team, answers=team.answers)
@classmethod
def create(cls, name, description, points, answer, unlocked=False,
validator='static_pbkdf2'):
challenge = cls()
challenge.name = name
challenge.description = description
challenge.cid = utils.generate_id()
challenge.points = points
challenge.answer_hash = answer
challenge.unlocked = unlocked
challenge.validator = validator
weight = db.session.query(db.func.max(Challenge.weight)).scalar()
challenge.weight = (weight + 1) if weight else 1
challenge.prerequisite = ''
db.session.add(challenge)
return challenge
def add_tags(self, tags):
for tag in tags:
self.tags.append(tag)
def delete(self):
db.session.delete(self)
def set_attachments(self, attachments):
aid_set = set()
old_attachments = list(self.attachments)
for a in attachments:
aid_set.add(a['aid'])
attachment = Attachment.query.get(a['aid'])
if not attachment:
logging.warning(
'Trying to add attachment %s that does not exist: %s' %
(a['filename'], a['aid']))
self.attachments.append(attachment)
for a in old_attachments:
if a.aid not in aid_set:
self.attachments.remove(a)
def set_prerequisite(self, prerequisite):
if not prerequisite:
self.prerequisite = ''
return
if 'type' in prerequisite and prerequisite['type'] == 'None':
self.prerequisite = ''
else:
self.prerequisite = json.dumps(prerequisite)
def set_tags(self, tags):
tag_set = set()
old_tags = list(self.tags)
for t in tags:
tag_set.add(t['tagslug'])
tag = Tag.query.get(t['tagslug'])
if tag:
self.tags.append(tag)
else:
app.logger.warning('Skipping tag %s which does not exist' %
t['tagslug'])
for t in old_tags:
if t.tagslug not in tag_set:
self.tags.remove(t)
def update_answers(self, exclude_team=None):
"""Update answers for variable scoring."""
mode = app.config.get('SCORING')
if mode == 'plain':
return
if mode == 'progressive':
for a in self.answers:
if a.team == exclude_team:
continue
a.team.update_score()
@classmethod
def get_joined_query(cls):
"""Get a prejoined-query with answers and teams."""
return cls.query.options(
orm.joinedload(cls.answers).joinedload(Answer.team))
attach_challenge_association = db.Table(
'attach_chall_association', db.Model.metadata,
db.Column(
'challenge_cid', db.BigInteger,
db.ForeignKey('challenge.cid')),
db.Column(
'attachment_aid', db.String(64),
db.ForeignKey('attachment.aid')))
class Attachment(db.Model):
"""Attachment to a challenge."""
aid = db.Column(db.String(64), primary_key=True)
filename = db.Column(db.String(100), nullable=False)
content_type = db.Column(db.String(100))
storage_path = db.Column(db.String(256))
challenges = db.relationship(
'Challenge', backref=db.backref('attachments', lazy='joined'),
secondary='attach_chall_association', lazy='joined')
def __str__(self):
return repr(self)
def __repr__(self):
return '<Attachment %s>' % self.aid
def delete(self, from_disk=True):
if from_disk:
try:
attachments.backend.delete(self)
except IOError as ex:
app.logger.exception("Couldn't delete: %s", str(ex))
db.session.delete(self)
def set_challenges(self, challenges):
cid_set = set()
old_challenges = list(self.challenges)
for a in challenges:
cid_set.add(a['cid'])
challenge = Challenge.query.get(a['cid'])
if not challenge:
app.logger.warning('No challenge found with cid %d' % a['cid'])
continue
self.challenges.append(challenge)
for a in old_challenges:
if a.cid not in cid_set:
self.challenges.remove(a)
@classmethod
def create(cls, aid, filename, content_type):
attachment = cls()
attachment.aid = aid
attachment.filename = filename
attachment.content_type = content_type
db.session.add(attachment)
return attachment
class Answer(db.Model):
"""Log a successfully submitted answer."""
challenge_cid = db.Column(
db.BigInteger, db.ForeignKey('challenge.cid'), primary_key=True)
team_tid = db.Column(
db.Integer, db.ForeignKey('team.tid'), primary_key=True)
timestamp = db.Column(db.DateTime)
answer_hash = db.Column(db.String(48)) # Store hash of team+answer
submit_ip = db.Column(db.String(45)) # Source IP for submission
first_blood = db.Column(db.Integer, default=0, nullable=False)
@classmethod
def create(cls, challenge, team, answer_text):
answer = cls()
answer.first_blood = 0
if not challenge.solves:
if app.config.get('FIRST_BLOOD_MIN', 0) <= challenge.points:
answer.first_blood = app.config.get('FIRST_BLOOD', 0)
answer.challenge = challenge
answer.team = team
answer.timestamp = datetime.datetime.utcnow()
if answer_text:
answer.answer_hash = pbkdf2.crypt(team.name + answer_text)
if flask.request:
answer.submit_ip = flask.request.remote_addr
db.session.add(answer)
# remove cache here
del challenge._solves
return answer
@property
def current_points(self):
if utils.GameTime.state(self.timestamp) == "AFTER":
return 0
return self.challenge.current_points + self.first_blood
class News(db.Model):
"""News updates & broadcasts."""
NEWS_TYPES = [
'Broadcast', # Admin broadcast
'Unicast', # Team-specific update
]
nid = db.Column(db.Integer, primary_key=True)
news_type = db.Column(db.Enum(*NEWS_TYPES), nullable=False)
timestamp = db.Column(db.DateTime, default=datetime.datetime.utcnow)
author = db.Column(db.String(100))
message = db.Column(db.Text)
audience_team_tid = db.Column(db.Integer, db.ForeignKey('team.tid'))
audience_team = db.relationship('Team')
@classmethod
def broadcast(cls, author, message):
news = cls(
news_type='Broadcast',
author=author,
message=message)
db.session.add(news)
return news
@classmethod
def game_broadcast(cls, author=None, message=None):
if message is None:
raise ValueError('Missing message.')
author = author or app.config.get('SYSTEM_NAME')
if not utils.GameTime.open():
return
return cls.broadcast(author, message)
@classmethod
def unicast(cls, team, author, message):
news = cls(
news_type='Unicast',
author=author,
message=message)
if isinstance(team, Team):
news.audience_team = team
elif isinstance(team, int):
news.audience_team_tid = team
else:
raise ValueError('Invalid value for team.')
db.session.add(news)
return news
@classmethod
def for_team(cls, team, limit=10):
return cls.query.filter(
((cls.news_type != 'Unicast') |
(cls.audience_team == team))
).order_by(cls.timestamp.desc()).limit(limit)
@classmethod
def for_public(cls, limit=10):
return cls.query.filter(
cls.news_type != 'Unicast'
).order_by(cls.timestamp.desc()).limit(limit)
class Page(db.Model):
"""Represent static pages to be rendered with Markdown."""
path = db.Column(db.String(100), primary_key=True)
title = db.Column(db.String(100), nullable=False)
contents = db.Column(db.Text, nullable=False)
class NonceFlagUsed(db.Model):
"""Single-time used flags."""
challenge_cid = db.Column(db.BigInteger, db.ForeignKey('challenge.cid'),
primary_key=True)
nonce = db.Column(db.BigInteger, primary_key=True)
team_tid = db.Column(db.Integer, db.ForeignKey('team.tid'))
@classmethod
def create(cls, challenge, nonce, team):
entity = cls()
entity.challenge_cid = challenge.cid
entity.nonce = nonce
entity.team_tid = team.tid
db.session.add(entity)
# Shortcut for commiting
def commit():
db.session.commit()
|
|
from collections.abc import Iterable
import numpy as np
from robosuite.models.objects import CompositeObject
from robosuite.utils.mjcf_utils import BLUE, CYAN, GREEN, RED, CustomMaterial, add_to_dict
class HammerObject(CompositeObject):
"""
Generates a Hammer object with a cylindrical or box-shaped handle, cubic head, cylindrical face and triangular claw
(used in Handover task)
Args:
name (str): Name of this Hammer object
handle_shape (str): Either "box", for a box-shaped handle, or "cylinder", for a cylindrically-shaped handle
handle_radius (float or 2-array of float): Either specific or range of values to draw randomly from
uniformly for the handle radius
handle_length (float or 2-array of float): Either specific or range of values to draw randomly from
uniformly for the handle length
handle_density (float or 2-array of float): Either specific or range of values to draw randomly from
uniformly for the handle density (in SI units). Note that this value is scaled x4 for the hammer head
handle_friction (float or 2-array of float): Either specific or range of values to draw randomly from
uniformly for the handle friction. Note that Mujoco default values are used for the head
head_density_ratio (float): Ratio of density of handle to head (including face and claw)
use_texture (bool): If true, geoms will be defined by realistic textures and rgba values will be ignored
rgba_handle (4-array or None): If specified, sets handle rgba values
rgba_head (4-array or None): If specified, sets handle rgba values
rgba_face (4-array or None): If specified, sets handle rgba values
rgba_claw (4-array or None): If specified, sets handle rgba values
Raises:
ValueError: [Invalid handle shape]
"""
def __init__(
self,
name,
handle_shape="box",
handle_radius=(0.015, 0.02),
handle_length=(0.1, 0.25),
handle_density=(100, 250),
handle_friction=(3.0, 5.0),
head_density_ratio=2.0,
use_texture=True,
rgba_handle=None,
rgba_head=None,
rgba_face=None,
rgba_claw=None,
):
# Set name
self._name = name
# Set handle type and density ratio
self.handle_shape = handle_shape
self.head_density_ratio = head_density_ratio
# Set radius and length ranges
self.handle_radius_range = handle_radius if isinstance(handle_radius, Iterable) else [handle_radius] * 2
self.handle_length_range = handle_length if isinstance(handle_length, Iterable) else [handle_length] * 2
self.handle_density_range = handle_density if isinstance(handle_density, Iterable) else [handle_density] * 2
self.handle_friction_range = handle_friction if isinstance(handle_friction, Iterable) else [handle_friction] * 2
# Sample actual radius and length, as well as head half-size
self.handle_radius = np.random.uniform(self.handle_radius_range[0], self.handle_radius_range[1])
self.handle_length = np.random.uniform(self.handle_length_range[0], self.handle_length_range[1])
self.handle_density = np.random.uniform(self.handle_density_range[0], self.handle_density_range[1])
self.handle_friction = np.random.uniform(self.handle_friction_range[0], self.handle_friction_range[1])
self.head_halfsize = np.random.uniform(self.handle_radius, self.handle_radius * 1.2)
# Initialize RGBA values and texture flag
self.use_texture = use_texture
self.rgba_handle = rgba_handle if rgba_handle is not None else RED
self.rgba_head = rgba_head if rgba_head is not None else CYAN
self.rgba_face = rgba_face if rgba_face is not None else BLUE
self.rgba_claw = rgba_claw if rgba_claw is not None else GREEN
# Create dictionary of values to create geoms for composite object and run super init
super().__init__(**self._get_geom_attrs())
# Define materials we want to use for this object
tex_attrib = {
"type": "cube",
}
mat_attrib = {
"texrepeat": "3 3",
"specular": "0.4",
"shininess": "0.1",
}
metal = CustomMaterial(
texture="SteelScratched",
tex_name="metal",
mat_name="metal_mat",
tex_attrib=tex_attrib,
mat_attrib=mat_attrib,
)
wood = CustomMaterial(
texture="WoodLight",
tex_name="wood",
mat_name="wood_mat",
tex_attrib=tex_attrib,
mat_attrib=mat_attrib,
)
# Append materials to object
self.append_material(metal)
self.append_material(wood)
def _get_geom_attrs(self):
"""
Creates geom elements that will be passed to superclass CompositeObject constructor
Returns:
dict: args to be used by CompositeObject to generate geoms
"""
full_size = np.array(
(3.2 * self.head_halfsize, self.head_halfsize, self.handle_length + 2 * self.head_halfsize)
)
# Initialize dict of obj args that we'll pass to the CompositeObject constructor
base_args = {
"total_size": full_size / 2.0,
"name": self.name,
"locations_relative_to_center": True,
"obj_types": "all",
}
obj_args = {}
# Add handle component
assert self.handle_shape in {
"cylinder",
"box",
}, "Error loading hammer: Handle type must either be 'box' or 'cylinder', got {}.".format(self.handle_shape)
add_to_dict(
dic=obj_args,
geom_types="cylinder" if self.handle_shape == "cylinder" else "box",
geom_locations=(0, 0, 0),
geom_quats=(1, 0, 0, 0),
geom_sizes=np.array([self.handle_radius, self.handle_length / 2.0])
if self.handle_shape == "cylinder"
else np.array([self.handle_radius, self.handle_radius, self.handle_length / 2.0]),
geom_names="handle",
geom_rgbas=None if self.use_texture else self.rgba_handle,
geom_materials="wood_mat" if self.use_texture else None,
geom_frictions=(self.handle_friction, 0.005, 0.0001),
density=self.handle_density,
)
# Add head component
add_to_dict(
dic=obj_args,
geom_types="box",
geom_locations=(0, 0, self.handle_length / 2.0 + self.head_halfsize),
geom_quats=(1, 0, 0, 0),
geom_sizes=np.array([self.head_halfsize * 2, self.head_halfsize, self.head_halfsize]),
geom_names="head",
geom_rgbas=None if self.use_texture else self.rgba_head,
geom_materials="metal_mat" if self.use_texture else None,
geom_frictions=None,
density=self.handle_density * self.head_density_ratio,
)
# Add neck component
add_to_dict(
dic=obj_args,
geom_types="cylinder",
geom_locations=(self.head_halfsize * 2.2, 0, self.handle_length / 2.0 + self.head_halfsize),
geom_quats=(0.707106, 0, 0.707106, 0),
geom_sizes=np.array([self.head_halfsize * 0.8, self.head_halfsize * 0.2]),
geom_names="neck",
geom_rgbas=None if self.use_texture else self.rgba_face,
geom_materials="metal_mat" if self.use_texture else None,
geom_frictions=None,
density=self.handle_density * self.head_density_ratio,
)
# Add face component
add_to_dict(
dic=obj_args,
geom_types="cylinder",
geom_locations=(self.head_halfsize * 2.8, 0, self.handle_length / 2.0 + self.head_halfsize),
geom_quats=(0.707106, 0, 0.707106, 0),
geom_sizes=np.array([self.head_halfsize, self.head_halfsize * 0.4]),
geom_names="face",
geom_rgbas=None if self.use_texture else self.rgba_face,
geom_materials="metal_mat" if self.use_texture else None,
geom_frictions=None,
density=self.handle_density * self.head_density_ratio,
)
# Add claw component
add_to_dict(
dic=obj_args,
geom_types="box",
geom_locations=(-self.head_halfsize * 2, 0, self.handle_length / 2.0 + self.head_halfsize),
geom_quats=(0.9238795, 0, 0.3826834, 0),
geom_sizes=np.array([self.head_halfsize * 0.7072, self.head_halfsize * 0.95, self.head_halfsize * 0.7072]),
geom_names="claw",
geom_rgbas=None if self.use_texture else self.rgba_claw,
geom_materials="metal_mat" if self.use_texture else None,
geom_frictions=None,
density=self.handle_density * self.head_density_ratio,
)
# Add back in base args
obj_args.update(base_args)
# Return this dict
return obj_args
@property
def init_quat(self):
"""
Generates a new random orientation for the hammer
Returns:
np.array: (x, y, z, w) quaternion orientation for the hammer
"""
# Randomly sample between +/- flip (such that the hammer head faces one way or the other)
return np.array([0.5, -0.5, 0.5, -0.5]) if np.random.rand() >= 0.5 else np.array([-0.5, -0.5, -0.5, -0.5])
@property
def handle_geoms(self):
"""
Returns:
list of str: geom names corresponding to hammer handle
"""
return self.correct_naming(["handle"])
@property
def head_geoms(self):
"""
Returns:
list of str: geom names corresponding to hammer head
"""
return self.correct_naming(["head"])
@property
def face_geoms(self):
"""
Returns:
list of str: geom names corresponding to hammer face
"""
return self.correct_naming(["neck", "face"])
@property
def claw_geoms(self):
"""
Returns:
list of str: geom names corresponding to hammer claw
"""
return self.correct_naming(["claw"])
@property
def all_geoms(self):
"""
Returns:
list of str: geom names corresponding to all hammer components
"""
return self.handle_geoms + self.head_geoms + self.face_geoms + self.claw_geoms
@property
def bottom_offset(self):
return np.array([0, 0, -self.handle_radius])
@property
def top_offset(self):
return np.array([0, 0, self.handle_radius])
@property
def horizontal_radius(self):
return self.head_halfsize + 0.5 * self.handle_length
|
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file './examples/VideoTemplate.ui'
#
# Created: Mon Feb 17 20:39:30 2014
# by: pyside-uic 0.2.14 running on PySide 1.1.2
#
# WARNING! All changes made in this file will be lost!
from PySide import QtCore, QtGui
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(695, 798)
self.centralwidget = QtGui.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.gridLayout_2 = QtGui.QGridLayout(self.centralwidget)
self.gridLayout_2.setObjectName("gridLayout_2")
self.downsampleCheck = QtGui.QCheckBox(self.centralwidget)
self.downsampleCheck.setObjectName("downsampleCheck")
self.gridLayout_2.addWidget(self.downsampleCheck, 8, 0, 1, 2)
self.scaleCheck = QtGui.QCheckBox(self.centralwidget)
self.scaleCheck.setObjectName("scaleCheck")
self.gridLayout_2.addWidget(self.scaleCheck, 4, 0, 1, 1)
self.gridLayout = QtGui.QGridLayout()
self.gridLayout.setObjectName("gridLayout")
self.rawRadio = QtGui.QRadioButton(self.centralwidget)
self.rawRadio.setObjectName("rawRadio")
self.gridLayout.addWidget(self.rawRadio, 3, 0, 1, 1)
self.gfxRadio = QtGui.QRadioButton(self.centralwidget)
self.gfxRadio.setChecked(True)
self.gfxRadio.setObjectName("gfxRadio")
self.gridLayout.addWidget(self.gfxRadio, 2, 0, 1, 1)
self.stack = QtGui.QStackedWidget(self.centralwidget)
self.stack.setObjectName("stack")
self.page = QtGui.QWidget()
self.page.setObjectName("page")
self.gridLayout_3 = QtGui.QGridLayout(self.page)
self.gridLayout_3.setObjectName("gridLayout_3")
self.graphicsView = GraphicsView(self.page)
self.graphicsView.setObjectName("graphicsView")
self.gridLayout_3.addWidget(self.graphicsView, 0, 0, 1, 1)
self.stack.addWidget(self.page)
self.page_2 = QtGui.QWidget()
self.page_2.setObjectName("page_2")
self.gridLayout_4 = QtGui.QGridLayout(self.page_2)
self.gridLayout_4.setObjectName("gridLayout_4")
self.rawImg = RawImageWidget(self.page_2)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.rawImg.sizePolicy().hasHeightForWidth())
self.rawImg.setSizePolicy(sizePolicy)
self.rawImg.setObjectName("rawImg")
self.gridLayout_4.addWidget(self.rawImg, 0, 0, 1, 1)
self.stack.addWidget(self.page_2)
self.page_3 = QtGui.QWidget()
self.page_3.setObjectName("page_3")
self.gridLayout_5 = QtGui.QGridLayout(self.page_3)
self.gridLayout_5.setObjectName("gridLayout_5")
self.rawGLImg = RawImageGLWidget(self.page_3)
self.rawGLImg.setObjectName("rawGLImg")
self.gridLayout_5.addWidget(self.rawGLImg, 0, 0, 1, 1)
self.stack.addWidget(self.page_3)
self.gridLayout.addWidget(self.stack, 0, 0, 1, 1)
self.rawGLRadio = QtGui.QRadioButton(self.centralwidget)
self.rawGLRadio.setObjectName("rawGLRadio")
self.gridLayout.addWidget(self.rawGLRadio, 4, 0, 1, 1)
self.gridLayout_2.addLayout(self.gridLayout, 1, 0, 1, 4)
self.dtypeCombo = QtGui.QComboBox(self.centralwidget)
self.dtypeCombo.setObjectName("dtypeCombo")
self.dtypeCombo.addItem("")
self.dtypeCombo.addItem("")
self.dtypeCombo.addItem("")
self.gridLayout_2.addWidget(self.dtypeCombo, 3, 2, 1, 1)
self.label = QtGui.QLabel(self.centralwidget)
self.label.setObjectName("label")
self.gridLayout_2.addWidget(self.label, 3, 0, 1, 1)
self.rgbLevelsCheck = QtGui.QCheckBox(self.centralwidget)
self.rgbLevelsCheck.setObjectName("rgbLevelsCheck")
self.gridLayout_2.addWidget(self.rgbLevelsCheck, 4, 1, 1, 1)
self.horizontalLayout_2 = QtGui.QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.minSpin2 = SpinBox(self.centralwidget)
self.minSpin2.setEnabled(False)
self.minSpin2.setObjectName("minSpin2")
self.horizontalLayout_2.addWidget(self.minSpin2)
self.label_3 = QtGui.QLabel(self.centralwidget)
self.label_3.setAlignment(QtCore.Qt.AlignCenter)
self.label_3.setObjectName("label_3")
self.horizontalLayout_2.addWidget(self.label_3)
self.maxSpin2 = SpinBox(self.centralwidget)
self.maxSpin2.setEnabled(False)
self.maxSpin2.setObjectName("maxSpin2")
self.horizontalLayout_2.addWidget(self.maxSpin2)
self.gridLayout_2.addLayout(self.horizontalLayout_2, 5, 2, 1, 1)
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.minSpin1 = SpinBox(self.centralwidget)
self.minSpin1.setObjectName("minSpin1")
self.horizontalLayout.addWidget(self.minSpin1)
self.label_2 = QtGui.QLabel(self.centralwidget)
self.label_2.setAlignment(QtCore.Qt.AlignCenter)
self.label_2.setObjectName("label_2")
self.horizontalLayout.addWidget(self.label_2)
self.maxSpin1 = SpinBox(self.centralwidget)
self.maxSpin1.setObjectName("maxSpin1")
self.horizontalLayout.addWidget(self.maxSpin1)
self.gridLayout_2.addLayout(self.horizontalLayout, 4, 2, 1, 1)
self.horizontalLayout_3 = QtGui.QHBoxLayout()
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
self.minSpin3 = SpinBox(self.centralwidget)
self.minSpin3.setEnabled(False)
self.minSpin3.setObjectName("minSpin3")
self.horizontalLayout_3.addWidget(self.minSpin3)
self.label_4 = QtGui.QLabel(self.centralwidget)
self.label_4.setAlignment(QtCore.Qt.AlignCenter)
self.label_4.setObjectName("label_4")
self.horizontalLayout_3.addWidget(self.label_4)
self.maxSpin3 = SpinBox(self.centralwidget)
self.maxSpin3.setEnabled(False)
self.maxSpin3.setObjectName("maxSpin3")
self.horizontalLayout_3.addWidget(self.maxSpin3)
self.gridLayout_2.addLayout(self.horizontalLayout_3, 6, 2, 1, 1)
self.lutCheck = QtGui.QCheckBox(self.centralwidget)
self.lutCheck.setObjectName("lutCheck")
self.gridLayout_2.addWidget(self.lutCheck, 7, 0, 1, 1)
self.alphaCheck = QtGui.QCheckBox(self.centralwidget)
self.alphaCheck.setObjectName("alphaCheck")
self.gridLayout_2.addWidget(self.alphaCheck, 7, 1, 1, 1)
self.gradient = GradientWidget(self.centralwidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.gradient.sizePolicy().hasHeightForWidth())
self.gradient.setSizePolicy(sizePolicy)
self.gradient.setObjectName("gradient")
self.gridLayout_2.addWidget(self.gradient, 7, 2, 1, 2)
spacerItem = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.gridLayout_2.addItem(spacerItem, 3, 3, 1, 1)
self.fpsLabel = QtGui.QLabel(self.centralwidget)
font = QtGui.QFont()
font.setPointSize(12)
self.fpsLabel.setFont(font)
self.fpsLabel.setAlignment(QtCore.Qt.AlignCenter)
self.fpsLabel.setObjectName("fpsLabel")
self.gridLayout_2.addWidget(self.fpsLabel, 0, 0, 1, 4)
self.rgbCheck = QtGui.QCheckBox(self.centralwidget)
self.rgbCheck.setObjectName("rgbCheck")
self.gridLayout_2.addWidget(self.rgbCheck, 3, 1, 1, 1)
self.label_5 = QtGui.QLabel(self.centralwidget)
self.label_5.setObjectName("label_5")
self.gridLayout_2.addWidget(self.label_5, 2, 0, 1, 1)
self.horizontalLayout_4 = QtGui.QHBoxLayout()
self.horizontalLayout_4.setObjectName("horizontalLayout_4")
self.framesSpin = QtGui.QSpinBox(self.centralwidget)
self.framesSpin.setButtonSymbols(QtGui.QAbstractSpinBox.NoButtons)
self.framesSpin.setProperty("value", 10)
self.framesSpin.setObjectName("framesSpin")
self.horizontalLayout_4.addWidget(self.framesSpin)
self.widthSpin = QtGui.QSpinBox(self.centralwidget)
self.widthSpin.setButtonSymbols(QtGui.QAbstractSpinBox.PlusMinus)
self.widthSpin.setMaximum(10000)
self.widthSpin.setProperty("value", 512)
self.widthSpin.setObjectName("widthSpin")
self.horizontalLayout_4.addWidget(self.widthSpin)
self.heightSpin = QtGui.QSpinBox(self.centralwidget)
self.heightSpin.setButtonSymbols(QtGui.QAbstractSpinBox.NoButtons)
self.heightSpin.setMaximum(10000)
self.heightSpin.setProperty("value", 512)
self.heightSpin.setObjectName("heightSpin")
self.horizontalLayout_4.addWidget(self.heightSpin)
self.gridLayout_2.addLayout(self.horizontalLayout_4, 2, 1, 1, 2)
self.sizeLabel = QtGui.QLabel(self.centralwidget)
self.sizeLabel.setText("")
self.sizeLabel.setObjectName("sizeLabel")
self.gridLayout_2.addWidget(self.sizeLabel, 2, 3, 1, 1)
MainWindow.setCentralWidget(self.centralwidget)
self.retranslateUi(MainWindow)
self.stack.setCurrentIndex(2)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(QtGui.QApplication.translate("MainWindow", "MainWindow", None, QtGui.QApplication.UnicodeUTF8))
self.downsampleCheck.setText(QtGui.QApplication.translate("MainWindow", "Auto downsample", None, QtGui.QApplication.UnicodeUTF8))
self.scaleCheck.setText(QtGui.QApplication.translate("MainWindow", "Scale Data", None, QtGui.QApplication.UnicodeUTF8))
self.rawRadio.setText(QtGui.QApplication.translate("MainWindow", "RawImageWidget", None, QtGui.QApplication.UnicodeUTF8))
self.gfxRadio.setText(QtGui.QApplication.translate("MainWindow", "GraphicsView + ImageItem", None, QtGui.QApplication.UnicodeUTF8))
self.rawGLRadio.setText(QtGui.QApplication.translate("MainWindow", "RawGLImageWidget", None, QtGui.QApplication.UnicodeUTF8))
self.dtypeCombo.setItemText(0, QtGui.QApplication.translate("MainWindow", "uint8", None, QtGui.QApplication.UnicodeUTF8))
self.dtypeCombo.setItemText(1, QtGui.QApplication.translate("MainWindow", "uint16", None, QtGui.QApplication.UnicodeUTF8))
self.dtypeCombo.setItemText(2, QtGui.QApplication.translate("MainWindow", "float", None, QtGui.QApplication.UnicodeUTF8))
self.label.setText(QtGui.QApplication.translate("MainWindow", "Data type", None, QtGui.QApplication.UnicodeUTF8))
self.rgbLevelsCheck.setText(QtGui.QApplication.translate("MainWindow", "RGB", None, QtGui.QApplication.UnicodeUTF8))
self.label_3.setText(QtGui.QApplication.translate("MainWindow", "<--->", None, QtGui.QApplication.UnicodeUTF8))
self.label_2.setText(QtGui.QApplication.translate("MainWindow", "<--->", None, QtGui.QApplication.UnicodeUTF8))
self.label_4.setText(QtGui.QApplication.translate("MainWindow", "<--->", None, QtGui.QApplication.UnicodeUTF8))
self.lutCheck.setText(QtGui.QApplication.translate("MainWindow", "Use Lookup Table", None, QtGui.QApplication.UnicodeUTF8))
self.alphaCheck.setText(QtGui.QApplication.translate("MainWindow", "alpha", None, QtGui.QApplication.UnicodeUTF8))
self.fpsLabel.setText(QtGui.QApplication.translate("MainWindow", "FPS", None, QtGui.QApplication.UnicodeUTF8))
self.rgbCheck.setText(QtGui.QApplication.translate("MainWindow", "RGB", None, QtGui.QApplication.UnicodeUTF8))
self.label_5.setText(QtGui.QApplication.translate("MainWindow", "Image size", None, QtGui.QApplication.UnicodeUTF8))
from pyqtgraph.widgets.RawImageWidget import RawImageGLWidget, RawImageWidget
from pyqtgraph import GradientWidget, SpinBox, GraphicsView
|
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import sys, os
import time
from pynfc import *
from threading import Thread
RED='\033[01;31m'
GREEN='\033[01;32m'
NC='\033[0m' # No Color
import os
import time
pwm_num = 2
pwm_dir = '/sys/class/pwm/pwmchip0/pwm' + str(pwm_num)
def beeper_setup():
if not os.path.exists(pwm_dir):
open('/sys/class/pwm/pwmchip0/export', 'w').write(str(pwm_num) + '\n')
open(pwm_dir + '/enable', 'w').write('0\n')
open(pwm_dir + '/period', 'w').write('250000\n')
open(pwm_dir + '/duty_cycle', 'w').write('125000\n')
def beeper_on():
open(pwm_dir + '/enable', 'w').write('1\n')
def beeper_off():
open(pwm_dir + '/enable', 'w').write('0\n')
def read_adc_channel(channel):
n_times = 10
raw_sum = 0
for i in xrange(n_times):
raw = int(open('/sys/bus/iio/devices/iio:device0/in_voltage%d_raw' % channel).read())
raw_sum += raw
raw_mean = float(raw_sum) / n_times
return raw_mean / 4096. * 1.85
def get_vin():
return read_adc_channel(1) * 7.061
def get_vbat():
return read_adc_channel(7) * 4
def read_ultralight(n):
hex_data = ''
for i in xrange(4):
status, data = n.sendAPDU(['30', hex(i * 4)[2:].zfill(2)])
hex_data += data
return hex_data
#~ BUZZER = 32
LOCK_GPIO = 52
EXIT_BUTTON_GPIO = 38
def export_gpio(gpio):
open('/sys/class/gpio/export','wt').write("%d\n" % gpio)
def gpio_set_direction(gpio, direction):
open('/sys/class/gpio/gpio%d/direction' % gpio,'wt').write(direction + "\n")
def init_gpios():
for gpio in (LOCK_GPIO,):
export_gpio(gpio)
gpio_set_direction(gpio, "out")
for gpio in (EXIT_BUTTON_GPIO,):
export_gpio(gpio)
gpio_set_direction(gpio, "in")
def gpio_set_value(gpio, value):
open('/sys/class/gpio/gpio%d/value' % gpio,'wt').write("%d\n" % value)
def gpio_get_value(gpio):
return bool(int(open('/sys/class/gpio/gpio%d/value' % gpio).read().strip()))
def led_set_brightness(led, value):
open('/sys/class/leds/%s/brightness' % led,'wt').write("%d\n" % value)
def handle_exit_button():
while True:
#~ print "handle_exit_button iter"
value = gpio_get_value(EXIT_BUTTON_GPIO)
if not value:
grant_access()
time.sleep(1)
def grant_access():
""" Opens lock for a couple of seconds"""
print "grant access"
led_set_brightness('red', 0)
led_set_brightness('green', 1)
beeper_on()
gpio_set_value(LOCK_GPIO, 0) # open lock
time.sleep(0.5)
beeper_off()
time.sleep(3)
gpio_set_value(LOCK_GPIO, 1) # close lock
led_set_brightness('green', 0)
led_set_brightness('red', 1)
if __name__ == '__main__':
init_gpios()
beeper_setup()
#~ exit_button_thread = Thread(target = handle_exit_button)
#~ exit_button_thread.daemon = True
#~ exit_button_thread.start()
nfc = NFC(0) # Select first NFC device
nfc.powerOn()
prev_result = None
led_color = 0
led_set_brightness('red', 1)
led_set_brightness('green', 0)
beeper_off()
gpio_set_value(LOCK_GPIO, 1)
while True:
# Select card
c = nfc.selectISO14443A()
access_granted = False
card_result = c.uid if c else None
if card_result != prev_result:
print "\033c"
if c:
print GREEN + "Card: " + NC + "[%s] %s" % (c.atqa, c.uid)
if c.atqa == '0044':
print "Found Mifare Ultralight card"
try:
ul_data = read_ultralight(nfc)
metro_num = int(ul_data[37:45], 16)
print 'Metro UL card: ' + RED + str(metro_num) + NC
if (metro_num % 2 == 0):
access_granted = True
except:
print "Error"
if access_granted:
print "access granted"
grant_access()
print "end"
else:
for i in xrange(3):
beeper_on()
led_set_brightness('red', 1)
time.sleep(0.1)
beeper_off()
led_set_brightness('red', 0)
time.sleep(0.1)
led_set_brightness('red', 1)
beeper_off()
else:
print "No card in field"
led_color = 0
prev_result = card_result
|
|
"""
Manage groups on Linux, OpenBSD and NetBSD
.. important::
If you feel that Salt should be using this module to manage groups on a
minion, and it is using a different module (or gives an error similar to
*'group.info' is not available*), see :ref:`here
<module-provider-override>`.
"""
import functools
import logging
import os
import salt.utils.files
import salt.utils.stringutils
try:
import grp
except ImportError:
pass
log = logging.getLogger(__name__)
# Define the module's virtual name
__virtualname__ = "group"
def __virtual__():
"""
Set the user module if the kernel is Linux or OpenBSD
"""
if __grains__["kernel"] in ("Linux", "OpenBSD", "NetBSD"):
return __virtualname__
return (
False,
"The groupadd execution module cannot be loaded: "
" only available on Linux, OpenBSD and NetBSD",
)
def add(name, gid=None, system=False, root=None):
"""
Add the specified group
name
Name of the new group
gid
Use GID for the new group
system
Create a system account
root
Directory to chroot into
CLI Example:
.. code-block:: bash
salt '*' group.add foo 3456
"""
cmd = ["groupadd"]
if gid:
cmd.append("-g {}".format(gid))
if system and __grains__["kernel"] != "OpenBSD":
cmd.append("-r")
if root is not None:
cmd.extend(("-R", root))
cmd.append(name)
ret = __salt__["cmd.run_all"](cmd, python_shell=False)
return not ret["retcode"]
def delete(name, root=None):
"""
Remove the named group
name
Name group to delete
root
Directory to chroot into
CLI Example:
.. code-block:: bash
salt '*' group.delete foo
"""
cmd = ["groupdel"]
if root is not None:
cmd.extend(("-R", root))
cmd.append(name)
ret = __salt__["cmd.run_all"](cmd, python_shell=False)
return not ret["retcode"]
def info(name, root=None):
"""
Return information about a group
name
Name of the group
root
Directory to chroot into
CLI Example:
.. code-block:: bash
salt '*' group.info foo
"""
if root is not None:
getgrnam = functools.partial(_getgrnam, root=root)
else:
getgrnam = functools.partial(grp.getgrnam)
try:
grinfo = getgrnam(name)
except KeyError:
return {}
else:
return _format_info(grinfo)
def _format_info(data):
"""
Return formatted information in a pretty way.
"""
return {
"name": data.gr_name,
"passwd": data.gr_passwd,
"gid": data.gr_gid,
"members": data.gr_mem,
}
def getent(refresh=False, root=None):
"""
Return info on all groups
refresh
Force a refresh of group information
root
Directory to chroot into
CLI Example:
.. code-block:: bash
salt '*' group.getent
"""
if "group.getent" in __context__ and not refresh:
return __context__["group.getent"]
ret = []
if root is not None:
getgrall = functools.partial(_getgrall, root=root)
else:
getgrall = functools.partial(grp.getgrall)
for grinfo in getgrall():
ret.append(_format_info(grinfo))
__context__["group.getent"] = ret
return ret
def _chattrib(name, key, value, param, root=None):
"""
Change an attribute for a named user
"""
pre_info = info(name, root=root)
if not pre_info:
return False
if value == pre_info[key]:
return True
cmd = ["groupmod"]
if root is not None:
cmd.extend(("-R", root))
cmd.extend((param, value, name))
__salt__["cmd.run"](cmd, python_shell=False)
return info(name, root=root).get(key) == value
def chgid(name, gid, root=None):
"""
Change the gid for a named group
name
Name of the group to modify
gid
Change the group ID to GID
root
Directory to chroot into
CLI Example:
.. code-block:: bash
salt '*' group.chgid foo 4376
"""
return _chattrib(name, "gid", gid, "-g", root=root)
def adduser(name, username, root=None):
"""
Add a user in the group.
name
Name of the group to modify
username
Username to add to the group
root
Directory to chroot into
CLI Example:
.. code-block:: bash
salt '*' group.adduser foo bar
Verifies if a valid username 'bar' as a member of an existing group 'foo',
if not then adds it.
"""
on_redhat_5 = (
__grains__.get("os_family") == "RedHat"
and __grains__.get("osmajorrelease") == "5"
)
on_suse_11 = (
__grains__.get("os_family") == "Suse"
and __grains__.get("osmajorrelease") == "11"
)
if __grains__["kernel"] == "Linux":
if on_redhat_5:
cmd = ["gpasswd", "-a", username, name]
elif on_suse_11:
cmd = ["usermod", "-A", name, username]
else:
cmd = ["gpasswd", "--add", username, name]
if root is not None:
cmd.extend(("--root", root))
else:
cmd = ["usermod", "-G", name, username]
if root is not None:
cmd.extend(("-R", root))
retcode = __salt__["cmd.retcode"](cmd, python_shell=False)
return not retcode
def deluser(name, username, root=None):
"""
Remove a user from the group.
name
Name of the group to modify
username
Username to delete from the group
root
Directory to chroot into
CLI Example:
.. code-block:: bash
salt '*' group.deluser foo bar
Removes a member user 'bar' from a group 'foo'. If group is not present
then returns True.
"""
on_redhat_5 = (
__grains__.get("os_family") == "RedHat"
and __grains__.get("osmajorrelease") == "5"
)
on_suse_11 = (
__grains__.get("os_family") == "Suse"
and __grains__.get("osmajorrelease") == "11"
)
grp_info = __salt__["group.info"](name)
try:
if username in grp_info["members"]:
if __grains__["kernel"] == "Linux":
if on_redhat_5:
cmd = ["gpasswd", "-d", username, name]
elif on_suse_11:
cmd = ["usermod", "-R", name, username]
else:
cmd = ["gpasswd", "--del", username, name]
if root is not None:
cmd.extend(("--root", root))
retcode = __salt__["cmd.retcode"](cmd, python_shell=False)
elif __grains__["kernel"] == "OpenBSD":
out = __salt__["cmd.run_stdout"](
"id -Gn {}".format(username), python_shell=False
)
cmd = ["usermod", "-S"]
cmd.append(",".join([g for g in out.split() if g != str(name)]))
cmd.append("{}".format(username))
retcode = __salt__["cmd.retcode"](cmd, python_shell=False)
else:
log.error("group.deluser is not yet supported on this platform")
return False
return not retcode
else:
return True
except Exception: # pylint: disable=broad-except
return True
def members(name, members_list, root=None):
"""
Replaces members of the group with a provided list.
name
Name of the group to modify
members_list
Username list to set into the group
root
Directory to chroot into
CLI Example:
.. code-block:: bash
salt '*' group.members foo 'user1,user2,user3,...'
Replaces a membership list for a local group 'foo'.
foo:x:1234:user1,user2,user3,...
"""
on_redhat_5 = (
__grains__.get("os_family") == "RedHat"
and __grains__.get("osmajorrelease") == "5"
)
on_suse_11 = (
__grains__.get("os_family") == "Suse"
and __grains__.get("osmajorrelease") == "11"
)
if __grains__["kernel"] == "Linux":
if on_redhat_5:
cmd = ["gpasswd", "-M", members_list, name]
elif on_suse_11:
for old_member in __salt__["group.info"](name).get("members"):
__salt__["cmd.run"](
"groupmod -R {} {}".format(old_member, name), python_shell=False
)
cmd = ["groupmod", "-A", members_list, name]
else:
cmd = ["gpasswd", "--members", members_list, name]
if root is not None:
cmd.extend(("--root", root))
retcode = __salt__["cmd.retcode"](cmd, python_shell=False)
elif __grains__["kernel"] == "OpenBSD":
retcode = 1
grp_info = __salt__["group.info"](name)
if grp_info and name in grp_info["name"]:
__salt__["cmd.run"]("groupdel {}".format(name), python_shell=False)
__salt__["cmd.run"](
"groupadd -g {} {}".format(grp_info["gid"], name), python_shell=False
)
for user in members_list.split(","):
if user:
retcode = __salt__["cmd.retcode"](
["usermod", "-G", name, user], python_shell=False
)
if not retcode == 0:
break
# provided list is '': users previously deleted from group
else:
retcode = 0
else:
log.error("group.members is not yet supported on this platform")
return False
return not retcode
def _getgrnam(name, root=None):
"""
Alternative implementation for getgrnam, that use only /etc/group
"""
root = root or "/"
passwd = os.path.join(root, "etc/group")
with salt.utils.files.fopen(passwd) as fp_:
for line in fp_:
line = salt.utils.stringutils.to_unicode(line)
comps = line.strip().split(":")
if len(comps) < 4:
log.debug("Ignoring group line: %s", line)
continue
if comps[0] == name:
# Generate a getpwnam compatible output
comps[2] = int(comps[2])
comps[3] = comps[3].split(",") if comps[3] else []
return grp.struct_group(comps)
raise KeyError("getgrnam(): name not found: {}".format(name))
def _getgrall(root=None):
"""
Alternative implemetantion for getgrall, that use only /etc/group
"""
root = root or "/"
passwd = os.path.join(root, "etc/group")
with salt.utils.files.fopen(passwd) as fp_:
for line in fp_:
line = salt.utils.stringutils.to_unicode(line)
comps = line.strip().split(":")
if len(comps) < 4:
log.debug("Ignoring group line: %s", line)
continue
# Generate a getgrall compatible output
comps[2] = int(comps[2])
comps[3] = comps[3].split(",") if comps[3] else []
yield grp.struct_group(comps)
|
|
# Copyright 2013 Intel.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import mock
from oslo_serialization import jsonutils
import testtools
from nova import db
from nova import objects
from nova.objects import pci_device_pool
from nova.tests.functional.v3 import api_sample_base
from nova.tests.functional.v3 import test_servers
skip_msg = "Bug 1426241"
fake_db_dev_1 = {
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': None,
'id': 1,
'compute_node_id': 1,
'address': '0000:04:10.0',
'vendor_id': '8086',
'numa_node': 0,
'product_id': '1520',
'dev_type': 'type-VF',
'status': 'available',
'dev_id': 'pci_0000_04_10_0',
'label': 'label_8086_1520',
'instance_uuid': '69ba1044-0766-4ec0-b60d-09595de034a1',
'request_id': None,
'extra_info': '{"key1": "value1", "key2": "value2"}'
}
fake_db_dev_2 = {
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': None,
'id': 2,
'compute_node_id': 1,
'address': '0000:04:10.1',
'vendor_id': '8086',
'numa_node': 1,
'product_id': '1520',
'dev_type': 'type-VF',
'status': 'available',
'dev_id': 'pci_0000_04_10_1',
'label': 'label_8086_1520',
'instance_uuid': 'd5b446a6-a1b4-4d01-b4f0-eac37b3a62fc',
'request_id': None,
'extra_info': '{"key3": "value3", "key4": "value4"}'
}
class ExtendedServerPciSampleJsonTest(test_servers.ServersSampleBase):
extension_name = "os-pci"
def setUp(self):
raise testtools.TestCase.skipException(skip_msg)
def test_show(self):
uuid = self._post_server()
response = self._do_get('servers/%s' % uuid)
subs = self._get_regexes()
subs['hostid'] = '[a-f0-9]+'
self._verify_response('server-get-resp', subs, response, 200)
def test_detail(self):
self._post_server()
response = self._do_get('servers/detail')
subs = self._get_regexes()
subs['hostid'] = '[a-f0-9]+'
self._verify_response('servers-detail-resp', subs, response, 200)
class ExtendedHyervisorPciSampleJsonTest(api_sample_base.ApiSampleTestBaseV3):
ADMIN_API = True
extra_extensions_to_load = ['os-hypervisors']
extension_name = 'os-pci'
def setUp(self):
raise testtools.TestCase.skipException(skip_msg)
super(ExtendedHyervisorPciSampleJsonTest, self).setUp()
cpu_info = collections.OrderedDict([
('arch', 'x86_64'),
('model', 'Nehalem'),
('vendor', 'Intel'),
('features', ['pge', 'clflush']),
('topology', {
'cores': 1,
'threads': 1,
'sockets': 4,
}),
])
self.fake_compute_node = objects.ComputeNode(
cpu_info=jsonutils.dumps(cpu_info),
current_workload=0,
disk_available_least=0,
host_ip="1.1.1.1",
state="up",
status="enabled",
free_disk_gb=1028,
free_ram_mb=7680,
hypervisor_hostname="fake-mini",
hypervisor_type="fake",
hypervisor_version=1000,
id=1,
local_gb=1028,
local_gb_used=0,
memory_mb=8192,
memory_mb_used=512,
running_vms=0,
vcpus=1,
vcpus_used=0,
service_id=2,
host='043b3cacf6f34c90a7245151fc8ebcda',
pci_device_pools=pci_device_pool.from_pci_stats(
{"count": 5,
"vendor_id": "8086",
"product_id": "1520",
"keya": "valuea",
"extra_info": {
"phys_function": '[["0x0000", '
'"0x04", "0x00",'
' "0x1"]]',
"key1": "value1"}}),)
self.fake_service = objects.Service(
id=2,
host='043b3cacf6f34c90a7245151fc8ebcda',
disabled=False,
disabled_reason=None)
@mock.patch("nova.servicegroup.API.service_is_up", return_value=True)
@mock.patch("nova.objects.Service.get_by_compute_host")
@mock.patch("nova.objects.ComputeNode.get_by_id")
def test_pci_show(self, mock_obj, mock_svc_get, mock_service):
mock_obj.return_value = self.fake_compute_node
mock_svc_get.return_value = self.fake_service
hypervisor_id = 1
response = self._do_get('os-hypervisors/%s' % hypervisor_id)
subs = {
'hypervisor_id': hypervisor_id,
}
subs.update(self._get_regexes())
self._verify_response('hypervisors-pci-show-resp',
subs, response, 200)
@mock.patch("nova.servicegroup.API.service_is_up", return_value=True)
@mock.patch("nova.objects.Service.get_by_compute_host")
@mock.patch("nova.objects.ComputeNodeList.get_all")
def test_pci_detail(self, mock_obj, mock_svc_get, mock_service):
mock_obj.return_value = [self.fake_compute_node]
mock_svc_get.return_value = self.fake_service
hypervisor_id = 1
subs = {
'hypervisor_id': hypervisor_id
}
response = self._do_get('os-hypervisors/detail')
subs.update(self._get_regexes())
self._verify_response('hypervisors-pci-detail-resp',
subs, response, 200)
class PciSampleJsonTest(api_sample_base.ApiSampleTestBaseV3):
ADMIN_API = True
extension_name = "os-pci"
def setUp(self):
raise testtools.TestCase.skipException(skip_msg)
def _fake_pci_device_get_by_id(self, context, id):
return fake_db_dev_1
def _fake_pci_device_get_all_by_node(self, context, id):
return [fake_db_dev_1, fake_db_dev_2]
def test_pci_show(self):
self.stubs.Set(db, 'pci_device_get_by_id',
self._fake_pci_device_get_by_id)
response = self._do_get('os-pci/1')
subs = self._get_regexes()
self._verify_response('pci-show-resp', subs, response, 200)
def test_pci_index(self):
self.stubs.Set(db, 'pci_device_get_all_by_node',
self._fake_pci_device_get_all_by_node)
response = self._do_get('os-pci')
subs = self._get_regexes()
self._verify_response('pci-index-resp', subs, response, 200)
def test_pci_detail(self):
self.stubs.Set(db, 'pci_device_get_all_by_node',
self._fake_pci_device_get_all_by_node)
response = self._do_get('os-pci/detail')
subs = self._get_regexes()
self._verify_response('pci-detail-resp', subs, response, 200)
|
|
# -*- coding: utf-8 -*-
# Copyright 2014, 2015 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ._base import Config
class ServerConfig(Config):
def read_config(self, config):
self.server_name = config["server_name"]
self.pid_file = self.abspath(config.get("pid_file"))
self.web_client = config["web_client"]
self.web_client_location = config.get("web_client_location", None)
self.soft_file_limit = config["soft_file_limit"]
self.daemonize = config.get("daemonize")
self.print_pidfile = config.get("print_pidfile")
self.use_frozen_dicts = config.get("use_frozen_dicts", True)
self.listeners = config.get("listeners", [])
bind_port = config.get("bind_port")
if bind_port:
self.listeners = []
bind_host = config.get("bind_host", "")
gzip_responses = config.get("gzip_responses", True)
names = ["client", "webclient"] if self.web_client else ["client"]
self.listeners.append({
"port": bind_port,
"bind_address": bind_host,
"tls": True,
"type": "http",
"resources": [
{
"names": names,
"compress": gzip_responses,
},
{
"names": ["federation"],
"compress": False,
}
]
})
unsecure_port = config.get("unsecure_port", bind_port - 400)
if unsecure_port:
self.listeners.append({
"port": unsecure_port,
"bind_address": bind_host,
"tls": False,
"type": "http",
"resources": [
{
"names": names,
"compress": gzip_responses,
},
{
"names": ["federation"],
"compress": False,
}
]
})
manhole = config.get("manhole")
if manhole:
self.listeners.append({
"port": manhole,
"bind_address": "127.0.0.1",
"type": "manhole",
})
metrics_port = config.get("metrics_port")
if metrics_port:
self.listeners.append({
"port": metrics_port,
"bind_address": config.get("metrics_bind_host", "127.0.0.1"),
"tls": False,
"type": "http",
"resources": [
{
"names": ["metrics"],
"compress": False,
},
]
})
# Attempt to guess the content_addr for the v0 content repostitory
content_addr = config.get("content_addr")
if not content_addr:
for listener in self.listeners:
if listener["type"] == "http" and not listener.get("tls", False):
unsecure_port = listener["port"]
break
else:
raise RuntimeError("Could not determine 'content_addr'")
host = self.server_name
if ':' not in host:
host = "%s:%d" % (host, unsecure_port)
else:
host = host.split(':')[0]
host = "%s:%d" % (host, unsecure_port)
content_addr = "http://%s" % (host,)
self.content_addr = content_addr
def default_config(self, config_dir_path, server_name):
if ":" in server_name:
bind_port = int(server_name.split(":")[1])
unsecure_port = bind_port - 400
else:
bind_port = 8448
unsecure_port = 8008
pid_file = self.abspath("homeserver.pid")
return """\
## Server ##
# The domain name of the server, with optional explicit port.
# This is used by remote servers to connect to this server,
# e.g. matrix.org, localhost:8080, etc.
server_name: "%(server_name)s"
# When running as a daemon, the file to store the pid in
pid_file: %(pid_file)s
# Whether to serve a web client from the HTTP/HTTPS root resource.
web_client: True
# Set the soft limit on the number of file descriptors synapse can use
# Zero is used to indicate synapse should set the soft limit to the
# hard limit.
soft_file_limit: 0
# List of ports that Synapse should listen on, their purpose and their
# configuration.
listeners:
# Main HTTPS listener
# For when matrix traffic is sent directly to synapse.
-
# The port to listen for HTTPS requests on.
port: %(bind_port)s
# Local interface to listen on.
# The empty string will cause synapse to listen on all interfaces.
bind_address: ''
# This is a 'http' listener, allows us to specify 'resources'.
type: http
tls: true
# Use the X-Forwarded-For (XFF) header as the client IP and not the
# actual client IP.
x_forwarded: false
# List of HTTP resources to serve on this listener.
resources:
-
# List of resources to host on this listener.
names:
- client # The client-server APIs, both v1 and v2
- webclient # The bundled webclient.
# Should synapse compress HTTP responses to clients that support it?
# This should be disabled if running synapse behind a load balancer
# that can do automatic compression.
compress: true
- names: [federation] # Federation APIs
compress: false
# Unsecure HTTP listener,
# For when matrix traffic passes through loadbalancer that unwraps TLS.
- port: %(unsecure_port)s
tls: false
bind_address: ''
type: http
x_forwarded: false
resources:
- names: [client, webclient]
compress: true
- names: [federation]
compress: false
# Turn on the twisted telnet manhole service on localhost on the given
# port.
# - port: 9000
# bind_address: 127.0.0.1
# type: manhole
""" % locals()
def read_arguments(self, args):
if args.manhole is not None:
self.manhole = args.manhole
if args.daemonize is not None:
self.daemonize = args.daemonize
if args.print_pidfile is not None:
self.print_pidfile = args.print_pidfile
def add_arguments(self, parser):
server_group = parser.add_argument_group("server")
server_group.add_argument("-D", "--daemonize", action='store_true',
default=None,
help="Daemonize the home server")
server_group.add_argument("--print-pidfile", action='store_true',
default=None,
help="Print the path to the pidfile just"
" before daemonizing")
server_group.add_argument("--manhole", metavar="PORT", dest="manhole",
type=int,
help="Turn on the twisted telnet manhole"
" service on the given port.")
|
|
'''
Test properties attached to a widget
'''
import unittest
from kivy.event import EventDispatcher
from functools import partial
class TestProperty(EventDispatcher):
pass
wid = TestProperty()
class PropertiesTestCase(unittest.TestCase):
def test_base(self):
from kivy.properties import Property
a = Property(-1)
a.link(wid, 'a')
a.link_deps(wid, 'a')
self.assertEqual(a.get(wid), -1)
a.set(wid, 0)
self.assertEqual(a.get(wid), 0)
a.set(wid, 1)
self.assertEqual(a.get(wid), 1)
def test_observer(self):
from kivy.properties import Property
a = Property(-1)
a.link(wid, 'a')
a.link_deps(wid, 'a')
self.assertEqual(a.get(wid), -1)
global observe_called
observe_called = 0
def observe(obj, value):
global observe_called
observe_called = 1
a.bind(wid, observe)
a.set(wid, 0)
self.assertEqual(a.get(wid), 0)
self.assertEqual(observe_called, 1)
observe_called = 0
a.set(wid, 0)
self.assertEqual(a.get(wid), 0)
self.assertEqual(observe_called, 0)
a.set(wid, 1)
self.assertEqual(a.get(wid), 1)
self.assertEqual(observe_called, 1)
def test_objectcheck(self):
from kivy.properties import ObjectProperty
a = ObjectProperty(False)
a.link(wid, 'a')
a.link_deps(wid, 'a')
self.assertEqual(a.get(wid), False)
a.set(wid, True)
self.assertEqual(a.get(wid), True)
def test_stringcheck(self):
from kivy.properties import StringProperty
a = StringProperty()
a.link(wid, 'a')
a.link_deps(wid, 'a')
self.assertEqual(a.get(wid), '')
a.set(wid, 'hello')
self.assertEqual(a.get(wid), 'hello')
try:
a.set(wid, 88) # number shouldn't be accepted
self.fail('string accept number, fail.')
except ValueError:
pass
def test_numericcheck(self):
from kivy.properties import NumericProperty
a = NumericProperty()
a.link(wid, 'a')
a.link_deps(wid, 'a')
self.assertEqual(a.get(wid), 0)
a.set(wid, 99)
self.assertEqual(a.get(wid), 99)
#try:
# a.set(wid, '') # string shouldn't be accepted
# self.fail('number accept string, fail.')
#except ValueError:
# pass
def test_listcheck(self):
from kivy.properties import ListProperty
a = ListProperty()
a.link(wid, 'a')
a.link_deps(wid, 'a')
self.assertEqual(a.get(wid), [])
a.set(wid, [1, 2, 3])
self.assertEqual(a.get(wid), [1, 2, 3])
def test_dictcheck(self):
from kivy.properties import DictProperty
a = DictProperty()
a.link(wid, 'a')
a.link_deps(wid, 'a')
self.assertEqual(a.get(wid), {})
a.set(wid, {'foo': 'bar'})
self.assertEqual(a.get(wid), {'foo': 'bar'})
def test_propertynone(self):
from kivy.properties import NumericProperty
a = NumericProperty(0, allownone=True)
a.link(wid, 'a')
a.link_deps(wid, 'a')
self.assertEqual(a.get(wid), 0)
try:
a.set(wid, None)
self.assertEqual(a.get(wid), None)
except ValueError:
pass
a.set(wid, 1)
self.assertEqual(a.get(wid), 1)
def test_alias(self):
from kivy.properties import NumericProperty, AliasProperty
wid.__class__.x = x = NumericProperty(0)
x.link(wid, 'x')
x.link_deps(wid, 'x')
wid.__class__.width = width = NumericProperty(100)
width.link(wid, 'width')
width.link_deps(wid, 'width')
def get_right(self):
return x.get(self) + width.get(self)
def set_right(self, value):
x.set(self, value - width.get(self))
right = AliasProperty(get_right, set_right, bind=('x', 'width'))
right.link(wid, 'right')
right.link_deps(wid, 'right')
self.assertEqual(right.get(wid), 100)
x.set(wid, 500)
self.assertEqual(right.get(wid), 600)
width.set(wid, 50)
self.assertEqual(right.get(wid), 550)
right.set(wid, 100)
self.assertEqual(width.get(wid), 50)
self.assertEqual(x.get(wid), 50)
# test observer
global observe_called
observe_called = 0
def observe(obj, value):
global observe_called
observe_called = 1
right.bind(wid, observe)
x.set(wid, 100)
self.assertEqual(observe_called, 1)
observe_called = 0
x.set(wid, 100)
self.assertEqual(observe_called, 0)
width.set(wid, 900)
self.assertEqual(observe_called, 1)
observe_called = 0
right.set(wid, 700)
self.assertEqual(observe_called, 1)
observe_called = 0
right.set(wid, 700)
self.assertEqual(observe_called, 0)
def test_reference(self):
from kivy.properties import NumericProperty, ReferenceListProperty
x = NumericProperty(0)
x.link(wid, 'x')
x.link_deps(wid, 'x')
y = NumericProperty(0)
y.link(wid, 'y')
y.link_deps(wid, 'y')
pos = ReferenceListProperty(x, y)
pos.link(wid, 'pos')
pos.link_deps(wid, 'pos')
self.assertEqual(x.get(wid), 0)
self.assertEqual(y.get(wid), 0)
self.assertEqual(pos.get(wid), [0, 0])
x.set(wid, 50)
self.assertEqual(pos.get(wid), [50, 0])
y.set(wid, 50)
self.assertEqual(pos.get(wid), [50, 50])
pos.set(wid, [0, 0])
self.assertEqual(pos.get(wid), [0, 0])
self.assertEqual(x.get(wid), 0)
self.assertEqual(y.get(wid), 0)
# test observer
global observe_called
observe_called = 0
def observe(obj, value):
global observe_called
observe_called = 1
pos.bind(wid, observe)
self.assertEqual(observe_called, 0)
x.set(wid, 99)
self.assertEqual(observe_called, 1)
def test_reference_child_update(self):
from kivy.properties import NumericProperty, ReferenceListProperty
x = NumericProperty(0)
x.link(wid, 'x')
x.link_deps(wid, 'x')
y = NumericProperty(0)
y.link(wid, 'y')
y.link_deps(wid, 'y')
pos = ReferenceListProperty(x, y)
pos.link(wid, 'pos')
pos.link_deps(wid, 'pos')
pos.get(wid)[0] = 10
self.assertEqual(pos.get(wid), [10, 0])
pos.get(wid)[:] = (20, 30)
self.assertEqual(pos.get(wid), [20, 30])
def test_dict(self):
from kivy.properties import DictProperty
x = DictProperty()
x.link(wid, 'x')
x.link_deps(wid, 'x')
# test observer
global observe_called
observe_called = 0
def observe(obj, value):
global observe_called
observe_called = 1
x.bind(wid, observe)
observe_called = 0
x.get(wid)['toto'] = 1
self.assertEqual(observe_called, 1)
observe_called = 0
x.get(wid)['toto'] = 2
self.assertEqual(observe_called, 1)
observe_called = 0
x.get(wid)['youupi'] = 2
self.assertEqual(observe_called, 1)
observe_called = 0
del x.get(wid)['toto']
self.assertEqual(observe_called, 1)
observe_called = 0
x.get(wid).update({'bleh': 5})
self.assertEqual(observe_called, 1)
def test_aliasproperty_with_cache(self):
from kivy.properties import NumericProperty, AliasProperty
global observe_called
observe_called = 0
class CustomAlias(EventDispatcher):
basevalue = NumericProperty(1)
def _get_prop(self):
global observe_called
observe_called += 1
return self.basevalue * 2
def _set_prop(self, value):
self.basevalue = value / 2
prop = AliasProperty(_get_prop, _set_prop,
bind=('basevalue', ), cache=True)
# initial checks
wid = CustomAlias()
self.assertEqual(observe_called, 0)
self.assertEqual(wid.basevalue, 1)
self.assertEqual(observe_called, 0)
# first call, goes in cache
self.assertEqual(wid.prop, 2)
self.assertEqual(observe_called, 1)
# second call, cache used
self.assertEqual(wid.prop, 2)
self.assertEqual(observe_called, 1)
# change the base value, should trigger an update for the cache
wid.basevalue = 4
self.assertEqual(observe_called, 2)
# now read the value again, should use the cache too
self.assertEqual(wid.prop, 8)
self.assertEqual(observe_called, 2)
# change the prop itself, should trigger an update for the cache
wid.prop = 4
self.assertEqual(observe_called, 3)
self.assertEqual(wid.basevalue, 2)
self.assertEqual(wid.prop, 4)
self.assertEqual(observe_called, 3)
def test_bounded_numeric_property(self):
from kivy.properties import BoundedNumericProperty
bnp = BoundedNumericProperty(0.0, min=0.0, max=3.5)
bnp.link(wid, 'bnp')
bnp.set(wid, 1)
bnp.set(wid, 0.0)
bnp.set(wid, 3.1)
bnp.set(wid, 3.5)
self.assertRaises(ValueError, partial(bnp.set, wid, 3.6))
self.assertRaises(ValueError, partial(bnp.set, wid, -3))
def test_bounded_numeric_property_error_value(self):
from kivy.properties import BoundedNumericProperty
bnp = BoundedNumericProperty(0, min=-5, max=5, errorvalue=1)
bnp.link(wid, 'bnp')
bnp.set(wid, 1)
self.assertEqual(bnp.get(wid), 1)
bnp.set(wid, 5)
self.assertEqual(bnp.get(wid), 5)
bnp.set(wid, 6)
self.assertEqual(bnp.get(wid), 1)
bnp.set(wid, -5)
self.assertEqual(bnp.get(wid), -5)
bnp.set(wid, -6)
self.assertEqual(bnp.get(wid), 1)
def test_bounded_numeric_property_error_handler(self):
from kivy.properties import BoundedNumericProperty
bnp = BoundedNumericProperty(
0, min=-5, max=5,
errorhandler=lambda x: 5 if x > 5 else -5)
bnp.link(wid, 'bnp')
bnp.set(wid, 1)
self.assertEqual(bnp.get(wid), 1)
bnp.set(wid, 5)
self.assertEqual(bnp.get(wid), 5)
bnp.set(wid, 10)
self.assertEqual(bnp.get(wid), 5)
bnp.set(wid, -5)
self.assertEqual(bnp.get(wid), -5)
bnp.set(wid, -10)
self.assertEqual(bnp.get(wid), -5)
def test_numeric_string_with_units_check(self):
from kivy.properties import NumericProperty
a = NumericProperty()
a.link(wid, 'a')
a.link_deps(wid, 'a')
self.assertEqual(a.get(wid), 0)
a.set(wid, '55dp')
self.assertEqual(a.get(wid), 55)
self.assertEqual(a.get_format(wid), 'dp')
a.set(wid, u'55dp')
self.assertEqual(a.get(wid), 55)
self.assertEqual(a.get_format(wid), 'dp')
a.set(wid, '99in')
self.assertEqual(a.get(wid), 9504.0)
self.assertEqual(a.get_format(wid), 'in')
a.set(wid, u'99in')
self.assertEqual(a.get(wid), 9504.0)
self.assertEqual(a.get_format(wid), 'in')
|
|
#! /usr/bin/python
#
# Copyright (C) 2009-2012 Mikhail Sobolev
# Contact: Mikhail Sobolev <mss@mawhrin.net>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
A simple OPDS browser
(work in progress)
"""
import sys
from copy import copy
import webbrowser
from PyQt4 import QtGui, QtCore
from cgi import escape
from urllib import quote_plus
from pprint import pformat
from opds import load
def data_to_icon(data):
"""
a simple converter from set of bytes to a Pixmap
:param data: bytes
:rtype: QPixmap
"""
pixmap = QtGui.QPixmap()
pixmap.loadFromData(data)
return QtGui.QIcon(pixmap)
_cached_icons = {}
def get_icon(name, filename):
"""
get icon for the specified name
Performs basic caching to speed things up
"""
global _cached_icons
if name not in _cached_icons:
with open(filename, 'rb') as icon:
_cached_icons[name] = data_to_icon(icon.read())
return _cached_icons[name]
class OPDSGeneric(QtGui.QListWidgetItem):
""" ... """
def __init__(self, entry, type=QtGui.QListWidgetItem.Type):
super(OPDSGeneric, self).__init__(None, type)
self._entry = entry
self.init_item()
def init_item(self):
"""
set item icon and text
"""
self.setIcon(get_icon('default.png', 'images/default.png'))
self.setText(self._entry['title'][1])
def html(self):
"""
produce HTML representation for the entry
"""
result = [
'<h1>%s</h1>' % escape(self._entry['title'][1]),
'<small>Last updated: %s</small>' % self._entry['updated']
]
result.append('<table>')
for key in [key for key in sorted(self._entry.keys()) if key not in [ 'title', 'updated' ]]:
result.append('<tr><td>%s</td><td>%s</td>' % (escape(key), escape(pformat(self._entry[key]))))
result.append('</table>')
return ''.join(result)
def activate(self, _):
"""do nothing!"""
class OPDSCatalogue(OPDSGeneric):
"""..."""
def __init__(self, entry):
super(OPDSCatalogue, self).__init__(entry, QtGui.QListWidgetItem.Type+1)
links = [ link for link in entry['links'] if is_catalogue_link(link) ]
assert len(links) > 0, 'Oopsy-oops.'
if len(links) > 1:
print >> sys.stderr, 'Got more than one catalogue link:'
for link in links:
print >> sys.stderr, ' ', pformat(link)
self._link = links[0]['href']
def activate(self, browser):
"""load catalogue link in the main browser"""
browser.load_url(self._link)
class OPDSEntry(OPDSGeneric):
"""..."""
def __init__(self, entry):
super(OPDSEntry, self).__init__(entry, QtGui.QListWidgetItem.Type+2)
def html(self):
"""
prepare HTML for showing in the right pane
"""
result = [
'<h1>%s</h1>' % escape(self._entry['title'][1]),
'<small>Last updated: %s</small>' % self._entry['updated']
]
result.append('<h2>Author</h2>')
if self._entry['author']:
result.append('''\
<dl>
<dt>Name</dt>
<dd>%(name)s</dd>
<dt>e-mail</dt>
<dd>%(email)s</dd>
<dt>URL</dt>
<dd>%(url)s</dt>
</dl>''' % self._entry['author'])
else:
result.append('<p>Author is unknown</p>')
content_type, content_body = self._entry['content'][0], self._entry['content'][1]
if content_type and content_body:
result.append('<h2>Content</h2>')
if content_type == 'text':
result.append('<p>%s</p>' % escape(content_body))
elif content_type == 'html': # MSS: if I understand correctly, spec requires proper escaping to be in place
result.append(content_body)
elif content_type == 'xhtml':
result.append(content_body)
else:
result.append('<p><b>Unknown type</b>: %s</p>' % content_type)
result.append('<p>%s</p>' % escape(content_body))
if self._entry['dcore']:
result.append('<h2>Dublin Core</h2>')
result.append('<dl>')
for tag, value, attrib in self._entry['dcore']:
if attrib:
result.append('''\
<dt>%s (%s)</dt>
<dd>%s</dd>''' % (tag, pformat(attrib), value))
else:
result.append('''\
<dt>%s</dt>
<dd>%s</dd>''' % (tag, value))
result.append('</dl>')
if self._entry['links']:
result.append('<h2>Links</h2>')
result.append('<ul>')
for link in self._entry['links']:
tempo = copy(link)
# +++ Hack +++
if 'title' not in tempo:
tempo['title'] = tempo['href']
if 'rel' in tempo:
tempo['rel'] = 'rel="%s"' % tempo['rel']
else:
tempo['rel'] = ''
# --- Hack ---
result.append('<li><a href="%(href)s" %(rel)s type="%(type)s">%(title)s</a></li>' % tempo)
result.append('</ul>')
if self._entry['others']:
result.append('<h2>Other elements</h2>')
for item in self._entry['others']:
result.append('<p>%s</p>' % escape(item))
return ''.join(result)
def activate(self, _):
"""not implemented"""
print 'Not implemented!'
def is_catalogue_link(link):
"""check whether the specified link points to a catalogue"""
return link['type'] == 'application/atom+xml' and 'rel' not in link
def is_catalogue(links):
"""check whether the specified set of links is for a catalogue
(unfortunately, currently there's no better way to distinguish between
catalogue & book entries
"""
return len([ link for link in links if is_catalogue_link(link) ]) > 0
def get_item(entry):
"""
determines the entry type (catalog/book) and creates an instance of the
corresponding QListWidgetItem derivative
:param entry: list of { 'type' : <type>, 'href' : <href>, ... }
:rtype: OPDSGeneric/OPDSCatalogue/OPDSEntry
"""
if is_catalogue(entry['links']):
result = OPDSCatalogue(entry)
else:
result = OPDSEntry(entry)
return result
class OPDSBrowser(QtGui.QMainWindow):
def __init__(self, home_url):
super(OPDSBrowser, self).__init__()
self._home_url = home_url
self._current_url = None
self._history = []
self._cache = {}
self._create_widgets()
splitter = QtGui.QSplitter()
splitter.addWidget(self._items)
splitter.addWidget(self._text_viewer)
self.setCentralWidget(splitter)
self.setWindowTitle('OPDS Browser')
self.resize(800, 600)
self.go_home()
def _create_widgets(self):
"""create all necessary widgets"""
self._items = QtGui.QListWidget()
self._items.currentItemChanged.connect(self.update_preview)
self._items.itemActivated.connect(self.load_item)
self._text_viewer = QtGui.QTextBrowser()
self._text_viewer.setOpenExternalLinks(False)
self._text_viewer.setOpenLinks(False)
self._text_viewer.anchorClicked.connect(self.open_link)
# Among these three only _back is going to be used (when @ home,
# there's no way back :))
self._home = QtGui.QAction(QtGui.QIcon('images/gohome.png'), 'Home', self)
self._home.triggered.connect(self.go_home)
self._back = QtGui.QAction(QtGui.QIcon('images/previous.png'), 'Back', self)
self._back.triggered.connect(self.go_back)
self._add = QtGui.QAction(QtGui.QIcon('images/add.png'), 'Up', self)
self._add.triggered.connect(self.add_item)
self._search_what = QtGui.QComboBox()
self._search = QtGui.QLineEdit(self)
self._search.setSizePolicy(QtGui.QSizePolicy.Expanding, self._search.sizePolicy().verticalPolicy())
self._search.returnPressed.connect(self.do_search)
self._disable_search()
# We do not really need this object in future, so not storing
toolbar = self.addToolBar('main toolbar')
toolbar.addAction(self._home)
toolbar.addAction(self._back)
toolbar.addSeparator()
toolbar.addAction(self._add)
toolbar = self.addToolBar('search toolbar')
toolbar.addWidget(self._search_what)
toolbar.addWidget(self._search)
def _disable_search(self):
"""disable search functionality"""
self._search_what.clear()
self._search_what.addItem('<No search>')
self._search_what.setDisabled(True)
self._search.setDisabled(True)
def _add_search_item(self, name, link):
"""adds a search item to the combobox"""
# TODO: should I validate the link first (must have {searchTerms})?
if not self._search_what.isEnabled():
self._search_what.clear()
self._search_what.setEnabled(True)
self._search.setEnabled(True)
self._search_what.addItem(name, QtCore.QVariant(link))
def open_link(self, link):
"""opens the link in an external browser"""
webbrowser.open(link.toString())
def _cached_data(self, url):
"""a simple cache for downloaded data"""
if url not in self._cache:
self._cache[url] = load(url, True)
return self._cache[url]
def _load_url(self, url):
"""loads the provided URL"""
self._current_url = url
data = self._cached_data(url)
if data['title'][0] == 'text':
self.setWindowTitle(data['title'][1])
else:
self.setWindowTitle('Unknown format (%s): %s' % data['title'])
self._items.clear()
for entry in data['entries']:
self._items.addItem(get_item(entry))
self._items.setCurrentRow(0)
self._disable_search()
for link in [ link for link in data['links'] if link['type'] == 'application/atom+xml' and link.get('rel') == 'search' ]:
self._add_search_item(link.get('title', 'Search'), link['href'])
def load_url(self, url):
"""
loads the provided URL
This method stores the currently viewed URL in the `_history` and then
calls `_load_url`.
"""
if self._current_url is not None:
self._history.append(self._current_url)
self._load_url(url)
def update_preview(self, current, previous):
if current is not None:
assert isinstance(current, OPDSGeneric), 'something went really wrong'
self._text_viewer.setHtml(current.html())
def load_item(self, item):
"""activate the double-clicked item"""
item.activate(self)
def go_home(self):
"""do go home! :)"""
self._history = [] # clear the history
self._load_url(self._home_url)
def go_back(self):
"""'back' implementation"""
if len(self._history) > 0:
self._load_url(self._history.pop())
else:
self._back.setDisabled(True)
def add_item(self):
"""
...
"""
print 'add'
def do_search(self):
"""perform the search"""
qv_link = self._search_what.itemData(self._search_what.currentIndex())
if qv_link.isValid() and qv_link.canConvert(QtCore.QVariant.String):
link = str(qv_link.toString())
terms = unicode(self._search.text()).encode('utf-8')
self._search.selectAll()
self.load_url(link.replace('{searchTerms}', quote_plus(terms)))
else:
pass # should I print something or show a dialog here?
def main(args):
"""the actual worker"""
app = QtGui.QApplication(args)
browser = OPDSBrowser('http://www.feedbooks.com/catalog.atom')
browser.show()
sys.exit(app.exec_())
if __name__ == '__main__':
main(sys.argv)
# vim:ts=4:sw=4:et
|
|
from django.test.client import RequestFactory
from template_debug.tests.base import TemplateDebugTestCase
from template_debug.utils import (_flatten, get_variables, get_details,
is_valid_in_template, get_attributes)
try:
from django.utils.six import PY3
except ImportError:
range = xrange
PY3 = False
class FlattenTestCase(TemplateDebugTestCase):
"""TestCase for _flatten"""
def test_flattens_inner_list(self):
"Assure arbitrarily nested lists are flattened"
nested_list = [1, [2, [3, 4, [5], ], 6, 7], 8]
self.assertEqual(list(_flatten(nested_list)), list(range(1, 9)))
def test_flattens_tuples(self):
"Assure nested tuples are also flattened"
nested_tuples = (1, (2, 3, (4, ), 5), 6)
self.assertEqual(list(_flatten(nested_tuples)), list(range(1, 7)))
def test_flattens_sets(self):
"Assure nested sets are flattened"
nested_sets = set([1, frozenset([2, 3]), 4])
self.assertEqual(list(_flatten(nested_sets)), list(range(1, 5)))
def test_flatten_nested_combinations(self):
"Assure nested iterables are flattened"
nested = [1, frozenset([2, 3]), (4, (5,), 6), [7], 8]
self.assertEqual(list(_flatten(nested)), list(range(1, 9)))
def test_flatten_generator(self):
"Assure generators are flattened"
gens = [1, list(range(2, 4)), (num for num in (4, list(range(5, 7))))]
self.assertEqual(list(_flatten(gens)), list(range(1, 7)))
def test_flatten_string_unchanged(self):
"Assure strings are left intact"
data = ['abc', ['abc', ['abc']], 'abc']
self.assertEqual(list(_flatten(data)), ['abc', 'abc', 'abc', 'abc'])
def test_processor(request):
return {
'custom_processor_var': 1,
}
class GetVariablesTestCase(TemplateDebugTestCase):
"""TestCase for get_variables"""
def setUp(self):
factory = RequestFactory()
self.request = factory.get('/foo/')
self.known_globals = ['request', 'user']
def test_global_context_processors(self):
"""
Assure get_variables contains known global context processors such as
request and user
"""
variables = set(get_variables(self._get_context(self.request)))
self.assertTrue(variables.issuperset(set(self.known_globals)))
def test_returned_variable(self):
"""
Assure get_variables returns variables unique to the context
"""
variables = get_variables(self._get_context(self.request, {}))
self.assertTrue('a' not in variables)
variables = get_variables(self._get_context(self.request, {'a': 3}))
self.assertTrue('a' in variables)
def test_custom_processors(self):
variables = get_variables(self._get_context(
self.request, {}, processors=[])
)
self.assertTrue('custom_processor_var' not in variables)
variables = get_variables(self._get_context(
self.request, {}, processors=[test_processor])
)
self.assertTrue('custom_processor_var' in variables)
class TestClass(object):
def _private(self):
return 'private'
def takes_args(self, x):
return x
def alters_data(self):
return 'data was changed'
alters_data.alters_data = True
def valid_method(self):
return True
def has_kwargs(self, foobars=None):
return foobars
class IsValidInTemplateTestCase(TemplateDebugTestCase):
def setUp(self):
request = RequestFactory().get('/foo/')
test_object = TestClass()
context = self._get_context(request, {'test_object': test_object})
self.test_object = context['test_object']
def test_private(self):
is_valid = is_valid_in_template(self.test_object, '_private')
self.assertEqual(is_valid, False,
'is_valid should be false for private methods'
)
def test_takes_args(self):
is_valid = is_valid_in_template(self.test_object, 'takes_args')
self.assertEqual(is_valid, False,
'is_valid should be false methods that require arguments'
)
def test_alters_data(self):
is_valid = is_valid_in_template(self.test_object, 'alters_data')
self.assertEqual(is_valid, False,
'is_valid should be false for the methods with .alters_data = True'
)
def test_valid_method(self):
is_valid = is_valid_in_template(self.test_object, 'valid_method')
self.assertEqual(is_valid, True,
'is_valid should be true for methods that are accessible to templates'
)
def test_has_kwargs(self):
is_valid = is_valid_in_template(self.test_object, 'has_kwargs')
self.assertEqual(is_valid, True,
'is_valid should be true for methods that take kwargs'
)
class GetAttributesTestCase(TemplateDebugTestCase):
def setUp(self):
request = RequestFactory().get('/foo/')
test_object = TestClass()
context = self._get_context(request, {'test_object': test_object})
self.test_object = context['test_object']
def test_valid_list(self):
valid_attributes = set(get_attributes(self.test_object))
self.assertEqual(set(['has_kwargs', 'valid_method']), valid_attributes,
'has_kwargs and valid_method are the only valid routines of TestObject'
)
class GetDetailsTestCase(TemplateDebugTestCase):
def setUp(self):
self.user = self.create_user(username='test', password='test')
self.client.login(username='test', password='test')
def test_invalid_managers_hidden(self):
"""
Assure managers that aren't accessible from model instances are hidden
"""
user = self.get_context()['user']
user_details = get_details(user)
invalid_managers = []
for attr in dir(user):
try:
getattr(user, attr)
except:
invalid_managers.append(attr)
self.assertTrue(all([not manager in user_details.keys()
for manager in invalid_managers]))
def test_set_value_method(self):
"""Assure methods have their value set to 'method'"""
user_details = get_details(self.get_context()['user'])
self.assertEqual(user_details['get_full_name'], 'routine')
def test_set_value_managers(self):
user = self.get_context()['user']
user_details = get_details(user)
managers = []
for key in user_details.keys():
value = getattr(self.user, key, None)
kls = getattr(getattr(value, '__class__', ''), '__name__', '')
if kls in ('ManyRelatedManager', 'RelatedManager', 'EmptyManager'):
managers.append(key)
for key, value in user_details.items():
if key in managers:
self.assertTrue(value in
('ManyRelatedManager', 'RelatedManager', 'EmptyManager',)
)
def test_module_and_class_added(self):
user_details = get_details(self.get_context()['user'])
self.assertEqual(user_details['META_module_name'],
'django.utils.functional')
self.assertEqual(user_details['META_class_name'], 'User')
def test_get_details_c_extensions(self):
"""
Ensures get_details works on objects with callables that are
implemented in C extensions. inspect.getargspec fails with a TypeError
for such callables, and get_details needs to handle this gracefully
N.B. Only Python >=2.7 has bit_length C routine on Booleans so the test
has to be skipped for Python2.6
"""
if hasattr(True, 'bit_length'):
try:
details = get_details(True)
except TypeError:
self.fail('Fails to handle C routines for call to inspect.argspec')
self.assertEqual(details['bit_length'], 'routine')
user_details = get_details(self.get_context()['user'])
self.assertTrue(any((
user_details['META_module_name'], 'django.contrib.auth.models',
user_details['META_module_name'], 'django.utils.functional'
)))
self.assertTrue(any((
user_details['META_class_name'] == 'User',
user_details['META_class_name'] == 'AnonymousUser'
)))
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=import-self, invalid-name, line-too-long, unused-argument
"""Caffe2 frontend"""
from __future__ import absolute_import as _abs
import tvm
from nnvm import symbol as _sym
from .common import get_nnvm_op, Renamer, AttrConverter as AttrCvt
from .onnx_caffe2_utils import dimension_picker, dimension_constraint, infer_channels, revert_caffe2_pad
from . import onnx
__all__ = ['from_caffe2']
def _clean_up_pool_args(args):
""" A helper function to clean up common arguments in conv and pooling ops.
"""
assert isinstance(args, dict)
if 'stride_h' in args and 'stride_w' in args:
assert 'stride' not in args and 'strides' not in args
args['strides'] = [args['stride_h'], args['stride_w']]
args.pop('stride_h')
args.pop('stride_w')
elif 'stride' in args:
args['strides'] = [args['stride'], args['stride']]
args.pop('stride')
# rename 'kernel', 'kernels', to 'kernel_shape'
if 'kernel_h' in args and 'kernel_w' in args:
assert 'kernel' not in args and 'kernels' not in args
args['kernel_shape'] = [args['kernel_h'], args['kernel_w']]
args.pop('kernel_h')
args.pop('kernel_w')
elif 'kernel' in args:
args['kernel_shape'] = [args['kernel'], args['kernel']]
args.pop('kernel')
elif 'kernels' in args:
args['kernel_shape'] = args['kernels']
args.pop('kernels')
if 'pad_t' in args and 'pad_l' in args and 'pad_b' in args and 'pad_r' in args:
assert 'pad' not in args and 'pads' not in args
args['pads'] = [
args['pad_t'], args['pad_l'], args['pad_b'], args['pad_r']
]
for pad in ['pad_t', 'pad_l', 'pad_b', 'pad_r']:
args.pop(pad)
elif 'pad' in args:
args['pads'] = [args['pad'], args['pad']]
args.pop('pad')
if 'dilation_h' in args and 'dilation_w' in args:
assert 'dilation' not in args and 'dilations' not in args
args['dilations'] = [args['dilation_h'], args['dilation_w']]
args.pop('dilation_h')
args.pop('dilation_w')
elif 'dilation' in args:
args['dilations'] = [args['dilation'], args['dilation']]
args.pop('dilation')
return args
class Caffe2OpConverter(object):
""" A helper class for holding Caffe2 op converters.
"""
@classmethod
def get_converter(cls):
""" Get converter.
:return: converter, which should be `_impl`.
"""
if hasattr(cls, '_impl'):
return getattr(cls, '_impl')
raise tvm.error.OpNotImplemented(
'Operator {} is not implemented in frontend Caffe2.'.format(cls.__name__))
_caffe2_internal_args = {
# nnpack args
'algo',
'convolution_transform_strategy',
'float16_compute',
'shared_buffer',
# training args
'init_params',
'cudnn_exhaustive_search',
'exhaustive_search',
# training args
'adj',
'hwgq',
# args that we don't care
'legacy_pad',
}
class Pool(Caffe2OpConverter):
""" A helper class for pool op converters.
"""
name = ''
@classmethod
def _impl(cls, inputs, args, params):
_clean_up_pool_args(args)
if 'global_pooling' in args and args['global_pooling'] == 1:
op_name = dimension_picker('global_' + cls.name)
return get_nnvm_op(op_name(args))(*inputs)
return AttrCvt(
op_name=dimension_picker(cls.name),
transforms={
'kernel_shape': 'pool_size',
'pads': ('padding', (0, 0), revert_caffe2_pad),
'strides': 'strides',
},
excludes={
# TVM poolop does not support dilation
'dilations',
},
ignores=_caffe2_internal_args | {'global_pooling', 'order'},
custom_check=dimension_constraint())(inputs, args, params)
class AveragePool(Pool):
name = 'avg_pool'
class MaxPool(Pool):
name = 'max_pool'
class Conv(Caffe2OpConverter):
""" Operator converter for Conv.
"""
@classmethod
def _impl(cls, inputs, args, params):
# get number of channels
channels = infer_channels(inputs[1], params)
args['channels'] = channels
_clean_up_pool_args(args)
return AttrCvt(
op_name=dimension_picker('conv'),
transforms={
'group': ('groups', 1),
'kernel_shape':
'kernel_size',
'pads': ('padding', (0, 0), revert_caffe2_pad),
'strides':
'strides',
'dilations': ('dilation', (1, 1)),
'order':
('layout', ("NCHW"),
lambda x: x if isinstance(x, str) else x.decode('UTF-8')),
},
excludes={},
ignores=_caffe2_internal_args,
extras={'use_bias': len(inputs) == 3},
custom_check=dimension_constraint())(inputs, args, params)
class Concat(Caffe2OpConverter):
""" Operator converter for Concat.
"""
@classmethod
def _impl(cls, inputs, args, params):
def _get_axis_from_order_str(order):
order = order if isinstance(order, str) else order.decode('UTF-8')
if order == 'NCHW':
return 1
if order == 'NHWC':
return 3
raise tvm.error.OpAttributeInvalid('Value {} in attribute {} of operator {} is not valid.'.format(order, 'order', 'Concat'))
return AttrCvt(
op_name='concatenate',
transforms={
'order': ('axis', (1), _get_axis_from_order_str),
},
excludes={
'add_axis',
})(inputs, args, params)
class NormalizePlanarYUV(Caffe2OpConverter):
""" Operator converter for NormalizePlanarYUV.
caffe2 definition: https://github.com/pytorch/pytorch/blob/master/caffe2/operators/norm_planar_yuv_op.cc
"""
@classmethod
def _impl(cls, inputs, args, params):
assert len(inputs) == 3
mean = _sym.expand_dims(inputs[1], axis=2, num_newaxis=2)
std = _sym.expand_dims(inputs[2], axis=2, num_newaxis=2)
return _sym.broadcast_div(_sym.broadcast_sub(inputs[0], mean), std)
class ResizeNearest(Caffe2OpConverter):
""" Operator converter for Upsample (nearest mode).
"""
@classmethod
def _impl(cls, inputs, args, params):
width_scale = args['width_scale'] if 'width_scale' in args else 1
height_scale = args['height_scale'] if 'height_scale' in args else 1
assert width_scale == height_scale
return _sym.upsampling(
inputs[0], scale=int(width_scale), method="NEAREST_NEIGHBOR")
class FC(Caffe2OpConverter):
""" Operator converter for FC.
"""
@classmethod
def _impl(cls, inputs, args, params):
inputs[0] = _sym.flatten(inputs[0])
args['units'] = infer_channels(inputs[1], params)
return AttrCvt(
'dense',
ignores=['axis', 'axis_w'],
extras={'use_bias': len(inputs) == 3},
)(inputs, args, params)
class SpatialBN(Caffe2OpConverter):
""" Operator converter for SpatialBN.
"""
@classmethod
def _impl(cls, inputs, args, params):
return AttrCvt(
op_name='batch_norm',
disables=['momentum'],
ignores=[
'order', 'spatial', 'is_test', 'consumed_inputs', 'num_batches'
])(inputs, args, params)
# compatible operators that do NOT require any conversion.
_identity_list = []
# _convert_map defines maps of name to converter functor(callable)
# for 1 to 1 mapping, use Renamer if nothing but name is different
# use AttrCvt if attributes need to be converted
# for 1 to N mapping(composed), use custom callable functions
# for N to 1 mapping, currently not supported(?)
# Minimal set of ops for squeezenet and resnet50
def _get_convert_map():
return {
# caffe2/onnx common operators
'Add': onnx.Add.get_converter(opset=1),
'Sum': onnx.Sum.get_converter(opset=1),
'Softmax': onnx.Softmax.get_converter(opset=1),
# nn
'AveragePool': AveragePool.get_converter(),
'MaxPool': MaxPool.get_converter(),
'Conv': Conv.get_converter(),
'Concat': Concat.get_converter(),
'FC': FC.get_converter(),
'SpatialBN': SpatialBN.get_converter(),
'ResizeNearest': ResizeNearest.get_converter(),
'Relu': AttrCvt('relu', {}, ignores=['order']),
'Sigmoid': Renamer('sigmoid'),
'Dropout': AttrCvt('dropout', {'ratio': 'rate'}, ignores=['is_test']),
# c2 image preprocessing ops
'NormalizePlanarYUV': NormalizePlanarYUV.get_converter(),
}
class Caffe2NetDef(object):
"""A helper class for handling nnvm graph copying from pb2.GraphProto.
Definition: https://github.com/pytorch/pytorch/blob/master/caffe2/proto/caffe2.proto
"""
def __init__(self):
self._nodes = {}
self._params = {}
self._visited_nodes = set()
self._ops = {}
def from_caffe2(self, init_net, predict_net):
"""Construct nnvm nodes from caffe2 graph.
Parameters
----------
workspace : Caffe2 workspace
predict_net : protobuf object
Returns
-------
sym : nnvm.sym.Symbol
The returned nnvm symbol
params : dict
A dict of name: tvm.nd.array pairs, used as pretrained weights
"""
from caffe2.python import workspace
workspace.RunNetOnce(init_net)
# Input
input_name = predict_net.op[0].input[0]
# Params
self._params = {}
used_blobs = set()
for c2_op in predict_net.op:
for i in c2_op.input:
used_blobs.add(i)
for blob in workspace.Blobs():
if blob in used_blobs and blob != input_name:
self._params[blob] = tvm.nd.array(workspace.FetchBlob(blob))
# Variables
self._nodes = {}
for blob in predict_net.external_input:
self._nodes[blob] = _sym.Variable(name=blob)
# Ops
for c2_op in predict_net.op:
for blob in c2_op.output:
self._ops[blob] = c2_op
for c2_op in predict_net.op:
self._process_op(c2_op)
# Outputs
out = []
for blob in predict_net.external_output:
out.append(self._nodes[blob])
if len(out) > 1:
sym = _sym.Group(out)
else:
sym = out[0]
return sym, self._params
def _get_node(self, blob):
"""Get the nnvm Symbol of blob and detect cyclic dependency in the graph."""
if blob in self._nodes:
return self._nodes[blob]
assert blob not in self._visited_nodes, 'Cyclic dependency in the graph (in {})'.format(
blob)
self._visited_nodes.add(blob)
self._process_op(self._ops[blob])
return self._nodes[blob]
def _process_op(self, c2_op):
op_type = c2_op.type
args = self._parse_arg(c2_op.arg)
inputs = [self._get_node(i) for i in c2_op.input]
tvm_op = self._convert_operator(op_type, inputs, args)
# Ignore all outputs except the first one
self._nodes[c2_op.output[0]] = tvm_op[0]
def _parse_arg(self, arg):
"""Convert a list of Argument to a dict, with names as keys."""
args = {}
for a in arg:
for f in ['f', 'i', 's']:
if a.HasField(f):
args[a.name] = getattr(a, f)
for f in ['floats', 'ints', 'strings']:
if list(getattr(a, f)):
assert a.name not in args, "Only one type of attr is allowed"
args[a.name] = tuple(getattr(a, f))
for f in ['n']:
if a.HasField(f):
raise NotImplementedError(
"Field {} is not supported in nnvm.".format(f))
for f in ['nets']:
if list(getattr(a, f)):
raise NotImplementedError(
"Field {} is not supported in nnvm.".format(f))
if a.name not in args:
raise ValueError("Cannot parse attribute: \n{}\n.".format(a))
return args
def _convert_operator(self,
op_type,
inputs,
args,
identity_list=None,
convert_map=None):
"""Convert from Caffe2 operator to nnvm operator.
The converter must specify conversions explicitly for incompatible name, and
apply handlers to operator attributes.
Parameters
----------
op_type : str
Operator name, such as Convolution, FullyConnected
inputs : list of nnvm.Symbol
List of input symbols.
args : dict
Dict of operator attributes
identity_list : list
List of operators that don't require conversion
convert_map : dict
Dict of name : callable, where name is the op's name that
require conversion to nnvm, callable are functions which
take args and return (new_op_type, new_args)
Returns
-------
sym : nnvm.Symbol
Converted nnvm Symbol
"""
identity_list = identity_list if identity_list else _identity_list
convert_map = convert_map if convert_map else _get_convert_map()
if op_type in identity_list:
sym = get_nnvm_op(op_type)(*inputs, **args)
elif op_type in convert_map:
# Add a sanitizing step to convert all byte strings in args to strings
sym = convert_map[op_type](inputs, args, self._params)
else:
raise tvm.error.OpNotImplemented(
'Operator {} is not supported in frontend Caffe2.'.format(op_type))
return sym
def from_caffe2(init_net, predict_net):
"""Load caffe2 graph which contains init_net and predict_net into nnvm graph.
Parameters
----------
init_net : protobuf object
Caffe2 NetDef containing the weights
predict_net : protobuf object
Caffe2 NetDef containing the graph
Returns
-------
sym : nnvm.Symbol
Compatible nnvm symbol
params : dict of str to tvm.ndarray
Dict of converted parameters stored in tvm.ndarray format
"""
caffe2 = Caffe2NetDef()
return caffe2.from_caffe2(init_net, predict_net)
|
|
#
# File: sites.py
# Author: Ivan Gonzalez
#
""" A module for single sites.
"""
import numpy as np
from dmrg_exceptions import DMRGException
from numpy import sqrt
class Site(object):
"""A general single site
You use this class to create a single site. The site comes empty (i.e.
with no operators included), but for th identity operator. You should
add operators you need to make you site up.
Parameters
----------
dim : an int
Size of the Hilbert space. The dimension must be at least 1. A site of
dim = 1 represents the vaccum (or something strange like that, it's
used for demo purposes mostly.)
operators : a dictionary of string and numpy array (with ndim = 2).
Operators for the site.
Examples
--------
>>> from dmrg_solution.core.sites import Site
>>> brand_new_site = Site(2)
>>> # the Hilbert space has dimension 2
>>> print brand_new_site.dim
2
>>> # the only operator is the identity
>>> print brand_new_site.operators
{'id': array([[ 1., 0.],
[ 0., 1.]])}
"""
def __init__(self, dim):
"""Creates an empty site of dimension dim.
Raises
------
DMRGException
if `dim` < 1.
Notes
-----
Postcond : The identity operator (ones in the diagonal, zeros elsewhere)
is added to the `self.operators` dictionary.
"""
if dim < 1:
raise DMRGException("Site dim must be at least 1")
super(Site, self).__init__()
self.dim = dim
self.operators = { "id" : np.eye(self.dim, self.dim) }
def add_operator(self, operator_name):
"""Adds an operator to the site.
Parameters
----------
operator_name : string
The operator name.
Raises
------
DMRGException
if `operator_name` is already in the dict.
Notes
-----
Postcond:
- `self.operators` has one item more, and
- the newly created operator is a (`self.dim`, `self.dim`)
matrix of full of zeros.
Examples
--------
>>> from dmrg_solution.core.sites import Site
>>> new_site = Site(2)
>>> print new_site.operators.keys()
['id']
>>> new_site.add_operator('s_z')
>>> print new_site.operators.keys()
['s_z', 'id']
>>> # note that the newly created op has all zeros
>>> print new_site.operators['s_z']
[[ 0. 0.]
[ 0. 0.]]
"""
if str(operator_name) in self.operators.keys():
raise DMRGException("Operator name exists already")
else:
self.operators[str(operator_name)] = np.zeros((self.dim, self.dim))
class PauliSite(Site):
"""A site for spin 1/2 models.
You use this site for models where the single sites are spin
one-half sites. The Hilbert space is ordered such as the first state
is the spin dn, and the second state is the spin up. Therefore e.g.
you have the following relation between operator matrix elements:
.. math::
\langle \dnarrow \left| A \\right|\uparrow \\rangle = A_{0,1}
Notes
-----
Postcond : The site has already built-in the spin operators for s_z, s_p, s_m.
Examples
--------
>>> from dmrg_solution.core.sites import PauliSite
>>> pauli_site = PauliSite()
>>> # check all it's what you expected
>>> print pauli_site.dim
2
>>> print pauli_site.operators.keys()
['s_p', 's_z', 's_m', 'id']
>>> print pauli_site.operators['s_z']
[[-1. 0.]
[ 0. 1.]]
>>> print pauli_site.operators['s_x']
[[ 0. 1.]
[ 1. 0.]]
"""
def __init__(self):
"""Creates the spin one-half site with Pauli matrices.
Notes
-----
Postcond : the dimension is set to 2, and the Pauli matrices
are added as operators.
"""
super(PauliSite, self).__init__(2)
# add the operators
self.add_operator("s_z")
self.add_operator("s_x")
# for clarity
s_z = self.operators["s_z"]
s_x = self.operators["s_x"]
# set the matrix elements different from zero to the right values
s_z[0, 0] = -1.0
s_z[1, 1] = 1.0
s_x[0, 1] = 1.0
s_x[1, 0] = 1.0
class SpinOneHalfSite(Site):
"""A site for spin 1/2 models.
You use this site for models where the single sites are spin
one-half sites. The Hilbert space is ordered such as the first state
is the spin dn, and the second state is the spin up. Therefore e.g.
you have the following relation between operator matrix elements:
.. math::
\langle \dnarrow \left| A \\right|\uparrow \\rangle = A_{0,1}
Notes
-----
Postcond : The site has already built-in the spin operators for s_z, s_p, s_m.
Examples
--------
>>> from dmrg_solution.core.sites import SpinOneHalfSite
>>> spin_one_half_site = SpinOneHalfSite()
>>> # check all it's what you expected
>>> print spin_one_half_site.dim
2
>>> print spin_one_half_site.operators.keys()
['s_p', 's_z', 's_m', 'id']
>>> print spin_one_half_site.operators['s_z']
[[-0.5 0. ]
[ 0. 0.5]]
>>> print spin_one_half_site.operators['s_p']
[[ 0. 0.]
[ 1. 0.]]
>>> print spin_one_half_site.operators['s_m']
[[ 0. 1.]
[ 0. 0.]]
"""
def __init__(self):
"""Creates the spin one-half site.
Notes
-----
Postcond : the dimension is set to 2, and the Pauli matrices
are added as operators.
"""
super(SpinOneHalfSite, self).__init__(2)
# add the operators
self.add_operator("s_z")
self.add_operator("s_p")
self.add_operator("s_m")
self.add_operator("s_x")
# for clarity
s_z = self.operators["s_z"]
s_p = self.operators["s_p"]
s_m = self.operators["s_m"]
s_x = self.operators["s_x"]
# set the matrix elements different from zero to the right values
s_z[0, 0] = -0.5
s_z[1, 1] = 0.5
s_p[1, 0] = 1.0
s_m[0, 1] = 1.0
s_x[0, 1] = 0.5
s_x[1, 0] = 0.5
class ElectronicSite(Site):
"""A site for electronic models
You use this site for models where the single sites are electron
sites. The Hilbert space is ordered such as:
- the first state, labelled 0, is the empty site,
- the second, labelled 1, is spin dn,
- the third, labelled 2, is spin up, and
- the fourth, labelled 3, is double occupancy.
Notes
-----
Postcond: The site has already built-in the spin operators for:
- c_up : destroys an spin up electron,
- c_up_dag, creates an spin up electron,
- c_dn, destroys an spin down electron,
- c_dn_dag, creates an spin down electron,
- s_z, component z of spin,
- s_p, raises the component z of spin,
- s_m, lowers the component z of spin,
- n_up, number of electrons with spin up,
- n_dn, number of electrons with spin down,
- n, number of electrons, i.e. n_up+n_dn, and
- u, number of double occupancies, i.e. n_up*n_dn.
Examples
--------
>>> from dmrg_solution.core.sites import ElectronicSite
>>> hubbard_site = ElectronicSite()
>>> # check all it's what you expected
>>> print hubbard_site.dim
4
>>> print hubbard_site.operators.keys() # doctest: +ELLIPSIS
['s_p', ...]
>>> print hubbard_site.operators['n_dn']
[[ 0. 0. 0. 0.]
[ 0. 1. 0. 0.]
[ 0. 0. 0. 0.]
[ 0. 0. 0. 1.]]
>>> print hubbard_site.operators['n_up']
[[ 0. 0. 0. 0.]
[ 0. 0. 0. 0.]
[ 0. 0. 1. 0.]
[ 0. 0. 0. 1.]]
>>> print hubbard_site.operators['u']
[[ 0. 0. 0. 0.]
[ 0. 0. 0. 0.]
[ 0. 0. 0. 0.]
[ 0. 0. 0. 1.]]
"""
def __init__(self):
super(ElectronicSite, self).__init__(8)
# add the operators
self.add_operator("rp_up")
self.add_operator("rp_down")
self.add_operator("rm_up")
self.add_operator("rm_down")
self.add_operator("rprm_up_plus")
self.add_operator("rprm_down_plus")
self.add_operator("rprm_up_minus")
self.add_operator("rprm_down_minus")
self.add_operator("rprm_up_plus_dag")
self.add_operator("rprm_down_plus_dag")
self.add_operator("rprm_up_minus_dag")
self.add_operator("rprm_down_minus_dag")
self.add_operator("dimer")
self.add_operator("n_up")
self.add_operator("n_down")
self.add_operator("n")
self.add_operator("u")
# for clarity
rm_up = self.operators["rm_up"]
rm_down = self.operators["rm_down"]
rp_up = self.operators["rp_up"]
rp_down = self.operators["rp_down"]
rprm_up_plus = self.operators["rprm_up_plus"]
rprm_down_plus = self.operators["rprm_down_plus"]
rprm_up_minus = self.operators["rprm_up_minus"]
rprm_down_minus = self.operators["rprm_down_minus"]
rprm_up_plus_dag = self.operators["rprm_up_plus_dag"]
rprm_down_plus_dag = self.operators["rprm_down_plus_dag"]
rprm_up_minus_dag = self.operators["rprm_up_minus_dag"]
rprm_down_minus_dag = self.operators["rprm_down_minus_dag"]
dimer = self.operators["dimer"]
n_up = self.operators["n_up"]
n_down = self.operators["n_down"]
n = self.operators["n"]
u = self.operators["u"]
# set the matrix elements different from zero to the right values
# TODO: missing s_p, s_m
rp_up[4,0] = 1./sqrt(2.)
rp_up[5,1] = -1./sqrt(2.)
rp_up[4,2] = 1./sqrt(2.)
rp_up[5,3] = -1./sqrt(2.)
rp_up[0,4] = 1./sqrt(2.)
rp_up[2,4] = 1./sqrt(2.)
rp_up[1,5] = 1./sqrt(2.)
rp_up[3,5] = 1./sqrt(2.)
rp_up[4,6] = 1.
rp_up[4,7] = 1.
rm_up[5,0] = -1./sqrt(2.)
rm_up[4,1] = 1./sqrt(2.)
rm_up[5,2] = 1./sqrt(2.)
rm_up[4,3] = -1./sqrt(2.)
rm_up[1,4] = -1./sqrt(2.)
rm_up[3,4] = 1./sqrt(2.)
rm_up[0,5] = -1./sqrt(2.)
rm_up[2,5] = 1./sqrt(2.)
rm_up[5,6] = 1.
rm_up[5,7] = 1.
rp_down[4,0] = 1./sqrt(2.)
rp_down[5,1] = 1./sqrt(2.)
rp_down[4,2] = 1./sqrt(2.)
rp_down[5,3] = -1./sqrt(2.)
rp_down[6,4] = 1.
rp_down[7,4] = 1.
rm_down[5,0] = -1./sqrt(2.)
rm_down[4,1] = -1./sqrt(2.)
rm_down[5,2] = 1./sqrt(2.)
rm_down[4,3] = -1./sqrt(2.)
rm_down[6,5] = -1.
rm_down[7,5] = -1.
# w, v = np.linalg.eigh(dimer)
# for i in range(len(w)):
# print w[i], v[:,i]
# rprm_up_plus =rp_up + rm_up
# rprm_up_minus = rp_up - rm_up
# rprm_down_plus = rp_down + rm_down
# rprm_down_minus = rp_down - rm_down
#
# rprm_up_plus_dag = rp_up.T + rm_up.T
# rprm_up_minus_dag = rp_up.T - rm_up.T
# rprm_down_plus_dag = rp_down.T + rm_down.T
# rprm_down_minus_dag = rp_down.T - rm_down.T
#
# n_even_up = 0.5*rprm_up_plus_dag.dot(rprm_up_plus)
# n_even_down = 0.5*rprm_down_plus_dag.dot(rprm_down_plus)
# n_odd_up = 0.5*rprm_up_minus_dag.dot(rprm_up_minus)
# n_odd_down = 0.5*rprm_down_minus_dag.dot(rprm_down_minus)
#
# n_up = n_even_up + n_odd_up
# n_down = n_even_down + n_odd_down
# n = n_up+n_down
#
# dimer = rp_up.T.dot(rp_up) + rp_down.T.dot(rp_down) - rm_up.T.dot(rm_up) - rm_down.T.dot(rm_down)
## u= (n_even_up - 0.5 *np.eye(8,8)).dot(n_even_down - 0.5 *np.eye(8,8))
## u= (n_odd_up - 0.5 *np.eye(8,8)).dot(n_odd_down - 0.5 *np.eye(8,8))
# u = np.add((n_even_up - 0.5 *np.eye(8,8)).dot(n_even_down - 0.5 *np.eye(8,8)), (n_odd_up - 0.5 *np.eye(8,8)).dot(n_odd_down - 0.5 *np.eye(8,8)))
#
# names = self.operators.keys()
# names.remove('id')
# names.remove('rp_up')
# names.remove('rp_down')
# names.remove('rm_up')
# names.remove('rm_down')
# for name in names:
# for i in range(len(eval(name))):
# for ii in range(len(eval(name))):
# if eval(name)[i,ii] != 0.0:
# print '\t' + name + '[%s,%s] = %s' %( i, ii, eval(name)[i,ii])
# print '\n'
u[0,0] = -0.5
u[1,1] = -0.5
u[2,2] = 0.5
u[2,6] = -0.707106781187
u[2,7] = -0.707106781187
u[3,3] = 0.5
u[4,4] = 0.5
u[5,5] = 0.5
u[6,2] = 0.707106781187
u[6,6] = -0.5
u[6,7] = -1.0
u[7,2] = 0.707106781187
u[7,6] = -1.0
u[7,7] = -0.5
rprm_down_minus[4,0] = 0.707106781187
rprm_down_minus[4,1] = 0.707106781187
rprm_down_minus[4,2] = 0.707106781187
rprm_down_minus[4,3] = 0.707106781187
rprm_down_minus[5,0] = 0.707106781187
rprm_down_minus[5,1] = 0.707106781187
rprm_down_minus[5,2] = -0.707106781187
rprm_down_minus[5,3] = -0.707106781187
rprm_down_minus[6,4] = 1.0
rprm_down_minus[6,5] = 1.0
rprm_down_minus[7,4] = 1.0
rprm_down_minus[7,5] = 1.0
rprm_up_plus[0,4] = 0.707106781187
rprm_up_plus[0,5] = -0.707106781187
rprm_up_plus[1,4] = -0.707106781187
rprm_up_plus[1,5] = 0.707106781187
rprm_up_plus[2,4] = 0.707106781187
rprm_up_plus[2,5] = 0.707106781187
rprm_up_plus[3,4] = 0.707106781187
rprm_up_plus[3,5] = 0.707106781187
rprm_up_plus[4,0] = 0.707106781187
rprm_up_plus[4,1] = 0.707106781187
rprm_up_plus[4,2] = 0.707106781187
rprm_up_plus[4,3] = -0.707106781187
rprm_up_plus[4,6] = 1.0
rprm_up_plus[4,7] = 1.0
rprm_up_plus[5,0] = -0.707106781187
rprm_up_plus[5,1] = -0.707106781187
rprm_up_plus[5,2] = 0.707106781187
rprm_up_plus[5,3] = -0.707106781187
rprm_up_plus[5,6] = 1.0
rprm_up_plus[5,7] = 1.0
n_up[0,0] = 1.0
n_up[1,1] = 1.0
n_up[2,2] = 1.0
n_up[2,6] = 1.41421356237
n_up[2,7] = 1.41421356237
n_up[3,3] = 1.0
n_up[4,4] = 2.0
n_up[5,5] = 2.0
n_up[6,2] = 1.41421356237
n_up[6,6] = 2.0
n_up[6,7] = 2.0
n_up[7,2] = 1.41421356237
n_up[7,6] = 2.0
n_up[7,7] = 2.0
rprm_up_plus_dag[0,4] = 0.707106781187
rprm_up_plus_dag[0,5] = -0.707106781187
rprm_up_plus_dag[1,4] = 0.707106781187
rprm_up_plus_dag[1,5] = -0.707106781187
rprm_up_plus_dag[2,4] = 0.707106781187
rprm_up_plus_dag[2,5] = 0.707106781187
rprm_up_plus_dag[3,4] = -0.707106781187
rprm_up_plus_dag[3,5] = -0.707106781187
rprm_up_plus_dag[4,0] = 0.707106781187
rprm_up_plus_dag[4,1] = -0.707106781187
rprm_up_plus_dag[4,2] = 0.707106781187
rprm_up_plus_dag[4,3] = 0.707106781187
rprm_up_plus_dag[5,0] = -0.707106781187
rprm_up_plus_dag[5,1] = 0.707106781187
rprm_up_plus_dag[5,2] = 0.707106781187
rprm_up_plus_dag[5,3] = 0.707106781187
rprm_up_plus_dag[6,4] = 1.0
rprm_up_plus_dag[6,5] = 1.0
rprm_up_plus_dag[7,4] = 1.0
rprm_up_plus_dag[7,5] = 1.0
rprm_up_minus[0,4] = 0.707106781187
rprm_up_minus[0,5] = 0.707106781187
rprm_up_minus[1,4] = 0.707106781187
rprm_up_minus[1,5] = 0.707106781187
rprm_up_minus[2,4] = 0.707106781187
rprm_up_minus[2,5] = -0.707106781187
rprm_up_minus[3,4] = -0.707106781187
rprm_up_minus[3,5] = 0.707106781187
rprm_up_minus[4,0] = 0.707106781187
rprm_up_minus[4,1] = -0.707106781187
rprm_up_minus[4,2] = 0.707106781187
rprm_up_minus[4,3] = 0.707106781187
rprm_up_minus[4,6] = 1.0
rprm_up_minus[4,7] = 1.0
rprm_up_minus[5,0] = 0.707106781187
rprm_up_minus[5,1] = -0.707106781187
rprm_up_minus[5,2] = -0.707106781187
rprm_up_minus[5,3] = -0.707106781187
rprm_up_minus[5,6] = -1.0
rprm_up_minus[5,7] = -1.0
rprm_down_plus_dag[0,4] = 0.707106781187
rprm_down_plus_dag[0,5] = -0.707106781187
rprm_down_plus_dag[1,4] = -0.707106781187
rprm_down_plus_dag[1,5] = 0.707106781187
rprm_down_plus_dag[2,4] = 0.707106781187
rprm_down_plus_dag[2,5] = 0.707106781187
rprm_down_plus_dag[3,4] = -0.707106781187
rprm_down_plus_dag[3,5] = -0.707106781187
rprm_down_plus_dag[4,6] = 1.0
rprm_down_plus_dag[4,7] = 1.0
rprm_down_plus_dag[5,6] = -1.0
rprm_down_plus_dag[5,7] = -1.0
rprm_down_plus[4,0] = 0.707106781187
rprm_down_plus[4,1] = -0.707106781187
rprm_down_plus[4,2] = 0.707106781187
rprm_down_plus[4,3] = -0.707106781187
rprm_down_plus[5,0] = -0.707106781187
rprm_down_plus[5,1] = 0.707106781187
rprm_down_plus[5,2] = 0.707106781187
rprm_down_plus[5,3] = -0.707106781187
rprm_down_plus[6,4] = 1.0
rprm_down_plus[6,5] = -1.0
rprm_down_plus[7,4] = 1.0
rprm_down_plus[7,5] = -1.0
rprm_up_minus_dag[0,4] = 0.707106781187
rprm_up_minus_dag[0,5] = 0.707106781187
rprm_up_minus_dag[1,4] = -0.707106781187
rprm_up_minus_dag[1,5] = -0.707106781187
rprm_up_minus_dag[2,4] = 0.707106781187
rprm_up_minus_dag[2,5] = -0.707106781187
rprm_up_minus_dag[3,4] = 0.707106781187
rprm_up_minus_dag[3,5] = -0.707106781187
rprm_up_minus_dag[4,0] = 0.707106781187
rprm_up_minus_dag[4,1] = 0.707106781187
rprm_up_minus_dag[4,2] = 0.707106781187
rprm_up_minus_dag[4,3] = -0.707106781187
rprm_up_minus_dag[5,0] = 0.707106781187
rprm_up_minus_dag[5,1] = 0.707106781187
rprm_up_minus_dag[5,2] = -0.707106781187
rprm_up_minus_dag[5,3] = 0.707106781187
rprm_up_minus_dag[6,4] = 1.0
rprm_up_minus_dag[6,5] = -1.0
rprm_up_minus_dag[7,4] = 1.0
rprm_up_minus_dag[7,5] = -1.0
n[0,0] = 2.0
n[1,1] = 2.0
n[2,2] = 2.0
n[2,6] = 1.41421356237
n[2,7] = 1.41421356237
n[3,3] = 2.0
n[4,4] = 4.0
n[5,5] = 4.0
n[6,2] = 1.41421356237
n[6,6] = 2.0
n[6,7] = 2.0
n[7,2] = 1.41421356237
n[7,6] = 2.0
n[7,7] = 2.0
n_down[0,0] = 1.0
n_down[1,1] = 1.0
n_down[2,2] = 1.0
n_down[3,3] = 1.0
n_down[4,4] = 2.0
n_down[5,5] = 2.0
dimer[0,2] = 2.0
dimer[0,6] = 1.41421356237
dimer[0,7] = 1.41421356237
dimer[2,0] = 2.0
dimer[4,4] = 2.0
dimer[5,5] = -2.0
dimer[6,0] = 1.41421356237
dimer[7,0] = 1.41421356237
rprm_down_minus_dag[0,4] = 0.707106781187
rprm_down_minus_dag[0,5] = 0.707106781187
rprm_down_minus_dag[1,4] = 0.707106781187
rprm_down_minus_dag[1,5] = 0.707106781187
rprm_down_minus_dag[2,4] = 0.707106781187
rprm_down_minus_dag[2,5] = -0.707106781187
rprm_down_minus_dag[3,4] = 0.707106781187
rprm_down_minus_dag[3,5] = -0.707106781187
rprm_down_minus_dag[4,6] = 1.0
rprm_down_minus_dag[4,7] = 1.0
rprm_down_minus_dag[5,6] = 1.0
rprm_down_minus_dag[5,7] = 1.0
|
|
#!/usr/bin/env python
# normalDate.py - version 1.0 - 20000717
#hacked by Robin Becker 10/Apr/2001
#major changes include
# using Types instead of type(0) etc
# BusinessDate class
# __radd__, __rsub__ methods
# formatMS stuff
# derived from an original version created
# by Jeff Bauer of Rubicon Research and used
# with his kind permission
__version__=''' $Id: normalDate.py,v 1.1 2006/05/26 19:19:44 thomas Exp $ '''
_bigBangScalar = -4345732 # based on (-9999, 1, 1) BC/BCE minimum
_bigCrunchScalar = 2958463 # based on (9999,12,31) AD/CE maximum
_daysInMonthNormal = [31,28,31,30,31,30,31,31,30,31,30,31]
_daysInMonthLeapYear = [31,29,31,30,31,30,31,31,30,31,30,31]
_dayOfWeekName = ['Monday', 'Tuesday', 'Wednesday', 'Thursday',
'Friday', 'Saturday', 'Sunday']
_monthName = ['January', 'February', 'March', 'April', 'May', 'June',
'July','August','September','October','November','December']
from types import IntType, StringType, ListType, TupleType
import string, re, time
if hasattr(time,'struct_time'):
_DateSeqTypes = (ListType,TupleType,time.struct_time)
else:
_DateSeqTypes = (ListType,TupleType)
_fmtPat = re.compile('\\{(m{1,5}|yyyy|yy|d{1,4})\\}',re.MULTILINE|re.IGNORECASE)
_iso_re = re.compile(r'(\d\d\d\d|\d\d)-(\d\d)-(\d\d)')
def getStdMonthNames():
return map(string.lower,_monthName)
def getStdShortMonthNames():
return map(lambda x: x[:3],getStdMonthNames())
def getStdDayNames():
return map(string.lower,_dayOfWeekName)
def getStdShortDayNames():
return map(lambda x: x[:3],getStdDayNames())
def isLeapYear(year):
"""determine if specified year is leap year, returns Python boolean"""
if year < 1600:
if year % 4:
return 0
else:
return 1
elif year % 4 != 0:
return 0
elif year % 100 != 0:
return 1
elif year % 400 != 0:
return 0
else:
return 1
class NormalDateException(Exception):
"""Exception class for NormalDate"""
pass
class NormalDate:
"""
NormalDate is a specialized class to handle dates without
all the excess baggage (time zones, daylight savings, leap
seconds, etc.) of other date structures. The minimalist
strategy greatly simplifies its implementation and use.
Internally, NormalDate is stored as an integer with values
in a discontinuous range of -99990101 to 99991231. The
integer value is used principally for storage and to simplify
the user interface. Internal calculations are performed by
a scalar based on Jan 1, 1900.
Valid NormalDate ranges include (-9999,1,1) B.C.E. through
(9999,12,31) C.E./A.D.
1.0 - No changes, except the version number. After 3 years of use
by various parties I think we can consider it stable.
0.8 - added Prof. Stephen Walton's suggestion for a range method
- module author resisted the temptation to use lambda <0.5 wink>
0.7 - added Dan Winkler's suggestions for __add__, __sub__ methods
0.6 - modifications suggested by Kevin Digweed to fix:
- dayOfWeek, dayOfWeekAbbrev, clone methods
- permit NormalDate to be a better behaved superclass
0.5 - minor tweaking
0.4 - added methods __cmp__, __hash__
- added Epoch variable, scoped to the module
- added setDay, setMonth, setYear methods
0.3 - minor touch-ups
0.2 - fixed bug for certain B.C.E leap years
- added Jim Fulton's suggestions for short alias class name =ND
and __getstate__, __setstate__ methods
Special thanks: Roedy Green
"""
def __init__(self, normalDate=None):
"""
Accept 1 of 4 values to initialize a NormalDate:
1. None - creates a NormalDate for the current day
2. integer in yyyymmdd format
3. string in yyyymmdd format
4. tuple in (yyyy, mm, dd) - localtime/gmtime can also be used
"""
if normalDate is None:
self.setNormalDate(time.localtime(time.time()))
else:
self.setNormalDate(normalDate)
def add(self, days):
"""add days to date; use negative integers to subtract"""
if not type(days) is IntType:
raise NormalDateException( \
'add method parameter must be integer type')
self.normalize(self.scalar() + days)
def __add__(self, days):
"""add integer to normalDate and return a new, calculated value"""
if not type(days) is IntType:
raise NormalDateException( \
'__add__ parameter must be integer type')
cloned = self.clone()
cloned.add(days)
return cloned
def __radd__(self,days):
'''for completeness'''
return self.__add__(days)
def clone(self):
"""return a cloned instance of this normalDate"""
return self.__class__(self.normalDate)
def __cmp__(self, target):
if target is None:
return 1
elif not hasattr(target, 'normalDate'):
return 1
else:
return cmp(self.normalDate, target.normalDate)
def day(self):
"""return the day as integer 1-31"""
return int(repr(self.normalDate)[-2:])
def dayOfWeek(self):
"""return integer representing day of week, Mon=0, Tue=1, etc."""
return apply(dayOfWeek, self.toTuple())
def dayOfWeekAbbrev(self):
"""return day of week abbreviation for current date: Mon, Tue, etc."""
return _dayOfWeekName[self.dayOfWeek()][:3]
def dayOfWeekName(self):
"""return day of week name for current date: Monday, Tuesday, etc."""
return _dayOfWeekName[self.dayOfWeek()]
def dayOfYear(self):
"""day of year"""
if self.isLeapYear():
daysByMonth = _daysInMonthLeapYear
else:
daysByMonth = _daysInMonthNormal
priorMonthDays = 0
for m in xrange(self.month() - 1):
priorMonthDays = priorMonthDays + daysByMonth[m]
return self.day() + priorMonthDays
def daysBetweenDates(self, normalDate):
"""
return value may be negative, since calculation is
self.scalar() - arg
"""
if type(normalDate) is _NDType:
return self.scalar() - normalDate.scalar()
else:
return self.scalar() - NormalDate(normalDate).scalar()
def equals(self, target):
if type(target) is _NDType:
if target is None:
return self.normalDate is None
else:
return self.normalDate == target.normalDate
else:
return 0
def endOfMonth(self):
"""returns (cloned) last day of month"""
return self.__class__(self.__repr__()[-8:-2]+str(self.lastDayOfMonth()))
def firstDayOfMonth(self):
"""returns (cloned) first day of month"""
return self.__class__(self.__repr__()[-8:-2]+"01")
def formatUS(self):
"""return date as string in common US format: MM/DD/YY"""
d = self.__repr__()
return "%s/%s/%s" % (d[-4:-2], d[-2:], d[-6:-4])
def formatUSCentury(self):
"""return date as string in 4-digit year US format: MM/DD/YYYY"""
d = self.__repr__()
return "%s/%s/%s" % (d[-4:-2], d[-2:], d[-8:-4])
def _fmtM(self):
return str(self.month())
def _fmtMM(self):
return '%02d' % self.month()
def _fmtMMM(self):
return self.monthAbbrev()
def _fmtMMMM(self):
return self.monthName()
def _fmtMMMMM(self):
return self.monthName()[0]
def _fmtD(self):
return str(self.day())
def _fmtDD(self):
return '%02d' % self.day()
def _fmtDDD(self):
return self.dayOfWeekAbbrev()
def _fmtDDDD(self):
return self.dayOfWeekName()
def _fmtYY(self):
return '%02d' % (self.year()%100)
def _fmtYYYY(self):
return str(self.year())
def formatMS(self,fmt):
'''format like MS date using the notation
{YY} --> 2 digit year
{YYYY} --> 4 digit year
{M} --> month as digit
{MM} --> 2 digit month
{MMM} --> abbreviated month name
{MMMM} --> monthname
{MMMMM} --> first character of monthname
{D} --> day of month as digit
{DD} --> 2 digit day of month
{DDD} --> abrreviated weekday name
{DDDD} --> weekday name
'''
r = fmt[:]
f = 0
while 1:
m = _fmtPat.search(r,f)
if m:
y = getattr(self,'_fmt'+string.upper(m.group()[1:-1]))()
i, j = m.span()
r = (r[0:i] + y) + r[j:]
f = i + len(y)
else:
return r
def __getstate__(self):
"""minimize persistent storage requirements"""
return self.normalDate
def __hash__(self):
return hash(self.normalDate)
def __int__(self):
return self.normalDate
def isLeapYear(self):
"""
determine if specified year is leap year, returning true (1) or
false (0)
"""
return isLeapYear(self.year())
def _isValidNormalDate(self, normalDate):
"""checks for date validity in [-]yyyymmdd format"""
if type(normalDate) is not IntType:
return 0
if len(repr(normalDate)) > 9:
return 0
if normalDate < 0:
dateStr = "%09d" % normalDate
else:
dateStr = "%08d" % normalDate
if len(dateStr) < 8:
return 0
elif len(dateStr) == 9:
if (dateStr[0] != '-' and dateStr[0] != '+'):
return 0
year = int(dateStr[:-4])
if year < -9999 or year > 9999 or year == 0:
return 0 # note: zero (0) is not a valid year
month = int(dateStr[-4:-2])
if month < 1 or month > 12:
return 0
if isLeapYear(year):
maxDay = _daysInMonthLeapYear[month - 1]
else:
maxDay = _daysInMonthNormal[month - 1]
day = int(dateStr[-2:])
if day < 1 or day > maxDay:
return 0
if year == 1582 and month == 10 and day > 4 and day < 15:
return 0 # special case of 10 days dropped: Oct 5-14, 1582
return 1
def lastDayOfMonth(self):
"""returns last day of the month as integer 28-31"""
if self.isLeapYear():
return _daysInMonthLeapYear[self.month() - 1]
else:
return _daysInMonthNormal[self.month() - 1]
def localeFormat(self):
"""override this method to use your preferred locale format"""
return self.formatUS()
def month(self):
"""returns month as integer 1-12"""
return int(repr(self.normalDate)[-4:-2])
def monthAbbrev(self):
"""returns month as a 3-character abbreviation, i.e. Jan, Feb, etc."""
return _monthName[self.month() - 1][:3]
def monthName(self):
"""returns month name, i.e. January, February, etc."""
return _monthName[self.month() - 1]
def normalize(self, scalar):
"""convert scalar to normalDate"""
if scalar < _bigBangScalar:
msg = "normalize(%d): scalar below minimum" % \
_bigBangScalar
raise NormalDateException(msg)
if scalar > _bigCrunchScalar:
msg = "normalize(%d): scalar exceeds maximum" % \
_bigCrunchScalar
raise NormalDateException(msg)
from math import floor
if scalar >= -115860:
year = 1600 + int(floor((scalar + 109573) / 365.2425))
elif scalar >= -693597:
year = 4 + int(floor((scalar + 692502) / 365.2425))
else:
year = -4 + int(floor((scalar + 695058) / 365.2425))
days = scalar - firstDayOfYear(year) + 1
if days <= 0:
year = year - 1
days = scalar - firstDayOfYear(year) + 1
daysInYear = 365
if isLeapYear(year):
daysInYear = daysInYear + 1
if days > daysInYear:
year = year + 1
days = scalar - firstDayOfYear(year) + 1
# add 10 days if between Oct 15, 1582 and Dec 31, 1582
if (scalar >= -115860 and scalar <= -115783):
days = days + 10
if isLeapYear(year):
daysByMonth = _daysInMonthLeapYear
else:
daysByMonth = _daysInMonthNormal
dc = 0; month = 12
for m in xrange(len(daysByMonth)):
dc = dc + daysByMonth[m]
if dc >= days:
month = m + 1
break
# add up the days in prior months
priorMonthDays = 0
for m in xrange(month - 1):
priorMonthDays = priorMonthDays + daysByMonth[m]
day = days - priorMonthDays
self.setNormalDate((year, month, day))
def range(self, days):
"""Return a range of normalDates as a list. Parameter
may be an int or normalDate."""
if type(days) is not IntType:
days = days - self # if not int, assume arg is normalDate type
r = []
for i in range(days):
r.append(self + i)
return r
def __repr__(self):
"""print format: [-]yyyymmdd"""
# Note: When disassembling a NormalDate string, be sure to
# count from the right, i.e. epochMonth = int(`Epoch`[-4:-2]),
# or the slice won't work for dates B.C.
if self.normalDate < 0:
return "%09d" % self.normalDate
else:
return "%08d" % self.normalDate
def scalar(self):
"""days since baseline date: Jan 1, 1900"""
(year, month, day) = self.toTuple()
days = firstDayOfYear(year) + day - 1
if self.isLeapYear():
for m in xrange(month - 1):
days = days + _daysInMonthLeapYear[m]
else:
for m in xrange(month - 1):
days = days + _daysInMonthNormal[m]
if year == 1582:
if month > 10 or (month == 10 and day > 4):
days = days - 10
return days
def setDay(self, day):
"""set the day of the month"""
maxDay = self.lastDayOfMonth()
if day < 1 or day > maxDay:
msg = "day is outside of range 1 to %d" % maxDay
raise NormalDateException(msg)
(y, m, d) = self.toTuple()
self.setNormalDate((y, m, day))
def setMonth(self, month):
"""set the month [1-12]"""
if month < 1 or month > 12:
raise NormalDateException('month is outside range 1 to 12')
(y, m, d) = self.toTuple()
self.setNormalDate((y, month, d))
def setNormalDate(self, normalDate):
"""
accepts date as scalar string/integer (yyyymmdd) or tuple
(year, month, day, ...)"""
tn=type(normalDate)
if tn is IntType:
self.normalDate = normalDate
elif tn is StringType:
try:
self.normalDate = int(normalDate)
except:
m = _iso_re.match(normalDate)
if m:
self.setNormalDate(m.group(1)+m.group(2)+m.group(3))
else:
raise NormalDateException("unable to setNormalDate(%s)" % `normalDate`)
elif tn in _DateSeqTypes:
self.normalDate = int("%04d%02d%02d" % normalDate[:3])
elif tn is _NDType:
self.normalDate = normalDate.normalDate
if not self._isValidNormalDate(self.normalDate):
raise NormalDateException("unable to setNormalDate(%s)" % `normalDate`)
def setYear(self, year):
if year == 0:
raise NormalDateException('cannot set year to zero')
elif year < -9999:
raise NormalDateException('year cannot be less than -9999')
elif year > 9999:
raise NormalDateException('year cannot be greater than 9999')
(y, m, d) = self.toTuple()
self.setNormalDate((year, m, d))
__setstate__ = setNormalDate
def __sub__(self, v):
if type(v) is IntType:
return self.__add__(-v)
return self.scalar() - v.scalar()
def __rsub__(self,v):
if type(v) is IntType:
return NormalDate(v) - self
else:
return v.scalar() - self.scalar()
def toTuple(self):
"""return date as (year, month, day) tuple"""
return (self.year(), self.month(), self.day())
def year(self):
"""return year in yyyy format, negative values indicate B.C."""
return int(repr(self.normalDate)[:-4])
################# Utility functions #################
def bigBang():
"""return lower boundary as a NormalDate"""
return NormalDate((-9999, 1, 1))
def bigCrunch():
"""return upper boundary as a NormalDate"""
return NormalDate((9999, 12, 31))
def dayOfWeek(y, m, d):
"""return integer representing day of week, Mon=0, Tue=1, etc."""
if m == 1 or m == 2:
m = m + 12
y = y - 1
return (d + 2*m + 3*(m+1)/5 + y + y/4 - y/100 + y/400) % 7
def firstDayOfYear(year):
"""number of days to the first of the year, relative to Jan 1, 1900"""
if type(year) is not IntType:
msg = "firstDayOfYear() expected integer, got %s" % type(year)
raise NormalDateException(msg)
if year == 0:
raise NormalDateException('first day of year cannot be zero (0)')
elif year < 0: # BCE calculation
firstDay = (year * 365) + int((year - 1) / 4) - 693596
else: # CE calculation
leapAdjust = int((year + 3) / 4)
if year > 1600:
leapAdjust = leapAdjust - int((year + 99 - 1600) / 100) + \
int((year + 399 - 1600) / 400)
firstDay = year * 365 + leapAdjust - 693963
if year > 1582:
firstDay = firstDay - 10
return firstDay
def FND(d):
'''convert to ND if required'''
return (type(d) is _NDType) and d or ND(d)
Epoch=bigBang()
ND=NormalDate
_NDType = type(Epoch)
BDEpoch=ND(15821018)
BDEpochScalar = -115857
class BusinessDate(NormalDate):
"""
Specialised NormalDate
"""
def add(self, days):
"""add days to date; use negative integers to subtract"""
if not type(days) is IntType:
raise NormalDateException('add method parameter must be integer type')
self.normalize(self.scalar() + days)
def __add__(self, days):
"""add integer to BusinessDate and return a new, calculated value"""
if not type(days) is IntType:
raise NormalDateException('__add__ parameter must be integer type')
cloned = self.clone()
cloned.add(days)
return cloned
def __sub__(self, v):
return type(v) is IntType and self.__add__(-v) or self.scalar() - v.scalar()
def asNormalDate(self):
return ND(self.normalDate)
def daysBetweenDates(self, normalDate):
return self.asNormalDate.daysBetweenDates(normalDate)
def _checkDOW(self):
if self.dayOfWeek()>4: raise NormalDateException("%s isn't a business day" % `self.normalDate`)
def normalize(self, i):
i = int(i)
NormalDate.normalize(self,(i/5)*7+i%5+BDEpochScalar)
def scalar(self):
d = self.asNormalDate()
i = d - BDEpoch #luckily BDEpoch is a Monday so we don't have a problem
#concerning the relative weekday
return 5*(i/7) + i%7
def setNormalDate(self, normalDate):
NormalDate.setNormalDate(self,normalDate)
self._checkDOW()
if __name__ == '__main__':
today = NormalDate()
print "NormalDate test:"
print " Today (%s) is: %s %s" % (today, today.dayOfWeekAbbrev(), today.localeFormat())
yesterday = today - 1
print " Yesterday was: %s %s" % (yesterday.dayOfWeekAbbrev(), yesterday.localeFormat())
tomorrow = today + 1
print " Tomorrow will be: %s %s" % (tomorrow.dayOfWeekAbbrev(), tomorrow.localeFormat())
print " Days between tomorrow and yesterday: %d" % (tomorrow - yesterday)
print today.formatMS('{d}/{m}/{yy}')
print today.formatMS('{dd}/{m}/{yy}')
print today.formatMS('{ddd} {d}/{m}/{yy}')
print today.formatMS('{dddd} {d}/{m}/{yy}')
print today.formatMS('{d}/{mm}/{yy}')
print today.formatMS('{d}/{mmm}/{yy}')
print today.formatMS('{d}/{mmmm}/{yy}')
print today.formatMS('{d}/{m}/{yyyy}')
b = BusinessDate('20010116')
print 'b=',b,'b.scalar()', b.scalar()
|
|
# Copyright 2016 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.lib.services.identity.v3 import inherited_roles_client
from tempest.tests.lib import fake_auth_provider
from tempest.tests.lib.services import base
class TestInheritedRolesClient(base.BaseServiceTest):
FAKE_LIST_INHERITED_ROLES = {
"roles": [
{
"id": "1",
"name": "test",
"links": "example.com"
},
{
"id": "2",
"name": "test2",
"links": "example.com"
}
]
}
def setUp(self):
super(TestInheritedRolesClient, self).setUp()
fake_auth = fake_auth_provider.FakeAuthProvider()
self.client = inherited_roles_client.InheritedRolesClient(
fake_auth, 'identity', 'regionOne')
def _test_create_inherited_role_on_domains_user(self, bytes_body=False):
self.check_service_client_function(
self.client.create_inherited_role_on_domains_user,
'tempest.lib.common.rest_client.RestClient.put',
{},
bytes_body,
domain_id="b344506af7644f6794d9cb316600b020",
user_id="123",
role_id="1234",
status=204)
def _test_list_inherited_project_role_for_user_on_domain(
self, bytes_body=False):
self.check_service_client_function(
self.client.list_inherited_project_role_for_user_on_domain,
'tempest.lib.common.rest_client.RestClient.get',
self.FAKE_LIST_INHERITED_ROLES,
bytes_body,
domain_id="b344506af7644f6794d9cb316600b020",
user_id="123")
def _test_create_inherited_role_on_domains_group(self, bytes_body=False):
self.check_service_client_function(
self.client.create_inherited_role_on_domains_group,
'tempest.lib.common.rest_client.RestClient.put',
{},
bytes_body,
domain_id="b344506af7644f6794d9cb316600b020",
group_id="123",
role_id="1234",
status=204)
def _test_list_inherited_project_role_for_group_on_domain(
self, bytes_body=False):
self.check_service_client_function(
self.client.list_inherited_project_role_for_group_on_domain,
'tempest.lib.common.rest_client.RestClient.get',
self.FAKE_LIST_INHERITED_ROLES,
bytes_body,
domain_id="b344506af7644f6794d9cb316600b020",
group_id="123")
def _test_create_inherited_role_on_projects_user(self, bytes_body=False):
self.check_service_client_function(
self.client.create_inherited_role_on_projects_user,
'tempest.lib.common.rest_client.RestClient.put',
{},
bytes_body,
project_id="b344506af7644f6794d9cb316600b020",
user_id="123",
role_id="1234",
status=204)
def _test_create_inherited_role_on_projects_group(self, bytes_body=False):
self.check_service_client_function(
self.client.create_inherited_role_on_projects_group,
'tempest.lib.common.rest_client.RestClient.put',
{},
bytes_body,
project_id="b344506af7644f6794d9cb316600b020",
group_id="123",
role_id="1234",
status=204)
def test_create_inherited_role_on_domains_user_with_str_body(self):
self._test_create_inherited_role_on_domains_user()
def test_create_inherited_role_on_domains_user_with_bytes_body(self):
self._test_create_inherited_role_on_domains_user(bytes_body=True)
def test_create_inherited_role_on_domains_group_with_str_body(self):
self._test_create_inherited_role_on_domains_group()
def test_create_inherited_role_on_domains_group_with_bytes_body(self):
self._test_create_inherited_role_on_domains_group(bytes_body=True)
def test_create_inherited_role_on_projects_user_with_str_body(self):
self._test_create_inherited_role_on_projects_user()
def test_create_inherited_role_on_projects_group_with_bytes_body(self):
self._test_create_inherited_role_on_projects_group(bytes_body=True)
def test_list_inherited_project_role_for_user_on_domain_with_str_body(
self):
self._test_list_inherited_project_role_for_user_on_domain()
def test_list_inherited_project_role_for_user_on_domain_with_bytes_body(
self):
self._test_list_inherited_project_role_for_user_on_domain(
bytes_body=True)
def test_list_inherited_project_role_for_group_on_domain_with_str_body(
self):
self._test_list_inherited_project_role_for_group_on_domain()
def test_list_inherited_project_role_for_group_on_domain_with_bytes_body(
self):
self._test_list_inherited_project_role_for_group_on_domain(
bytes_body=True)
def test_delete_inherited_role_from_user_on_domain(self):
self.check_service_client_function(
self.client.delete_inherited_role_from_user_on_domain,
'tempest.lib.common.rest_client.RestClient.delete',
{},
domain_id="b344506af7644f6794d9cb316600b020",
user_id="123",
role_id="1234",
status=204)
def test_check_user_inherited_project_role_on_domain(self):
self.check_service_client_function(
self.client.check_user_inherited_project_role_on_domain,
'tempest.lib.common.rest_client.RestClient.head',
{},
domain_id="b344506af7644f6794d9cb316600b020",
user_id="123",
role_id="1234",
status=204)
def test_delete_inherited_role_from_group_on_domain(self):
self.check_service_client_function(
self.client.delete_inherited_role_from_group_on_domain,
'tempest.lib.common.rest_client.RestClient.delete',
{},
domain_id="b344506af7644f6794d9cb316600b020",
group_id="123",
role_id="1234",
status=204)
def test_check_group_inherited_project_role_on_domain(self):
self.check_service_client_function(
self.client.check_group_inherited_project_role_on_domain,
'tempest.lib.common.rest_client.RestClient.head',
{},
domain_id="b344506af7644f6794d9cb316600b020",
group_id="123",
role_id="1234",
status=204)
def test_delete_inherited_role_from_user_on_project(self):
self.check_service_client_function(
self.client.delete_inherited_role_from_user_on_project,
'tempest.lib.common.rest_client.RestClient.delete',
{},
project_id="b344506af7644f6794d9cb316600b020",
user_id="123",
role_id="1234",
status=204)
def test_check_user_has_flag_on_inherited_to_project(self):
self.check_service_client_function(
self.client.check_user_has_flag_on_inherited_to_project,
'tempest.lib.common.rest_client.RestClient.head',
{},
project_id="b344506af7644f6794d9cb316600b020",
user_id="123",
role_id="1234",
status=204)
def test_delete_inherited_role_from_group_on_project(self):
self.check_service_client_function(
self.client.delete_inherited_role_from_group_on_project,
'tempest.lib.common.rest_client.RestClient.delete',
{},
project_id="b344506af7644f6794d9cb316600b020",
group_id="123",
role_id="1234",
status=204)
def test_check_group_has_flag_on_inherited_to_project(self):
self.check_service_client_function(
self.client.check_group_has_flag_on_inherited_to_project,
'tempest.lib.common.rest_client.RestClient.head',
{},
project_id="b344506af7644f6794d9cb316600b020",
group_id="123",
role_id="1234",
status=204)
|
|
# Copyright (c) 2012 Terence Honles <terence@honles.com> (maintainer)
# Copyright (c) 2008 Giorgos Verigakis <verigak@gmail.com> (author)
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
from __future__ import division
from ctypes import *
from ctypes.util import find_library
from errno import *
from os import strerror
from platform import machine, system
from signal import signal, SIGINT, SIG_DFL
from stat import S_IFDIR
from traceback import print_exc
import logging
try:
from functools import partial
except ImportError:
# http://docs.python.org/library/functools.html#functools.partial
def partial(func, *args, **keywords):
def newfunc(*fargs, **fkeywords):
newkeywords = keywords.copy()
newkeywords.update(fkeywords)
return func(*(args + fargs), **newkeywords)
newfunc.func = func
newfunc.args = args
newfunc.keywords = keywords
return newfunc
try:
basestring
except NameError:
basestring = str
class c_timespec(Structure):
_fields_ = [('tv_sec', c_long), ('tv_nsec', c_long)]
class c_utimbuf(Structure):
_fields_ = [('actime', c_timespec), ('modtime', c_timespec)]
class c_stat(Structure):
pass # Platform dependent
_system = system()
_machine = machine()
if _system == 'Darwin':
_libiconv = CDLL(find_library('iconv'), RTLD_GLOBAL) # libfuse dependency
_libfuse_path = (find_library('fuse4x') or find_library('osxfuse') or
find_library('fuse'))
else:
_libfuse_path = find_library('fuse')
if not _libfuse_path:
raise EnvironmentError('Unable to find libfuse')
else:
_libfuse = CDLL(_libfuse_path)
if _system == 'Darwin' and hasattr(_libfuse, 'macfuse_version'):
_system = 'Darwin-MacFuse'
if _system in ('Darwin', 'Darwin-MacFuse', 'FreeBSD'):
ENOTSUP = 45
c_dev_t = c_int32
c_fsblkcnt_t = c_ulong
c_fsfilcnt_t = c_ulong
c_gid_t = c_uint32
c_mode_t = c_uint16
c_off_t = c_int64
c_pid_t = c_int32
c_uid_t = c_uint32
setxattr_t = CFUNCTYPE(c_int, c_char_p, c_char_p, POINTER(c_byte),
c_size_t, c_int, c_uint32)
getxattr_t = CFUNCTYPE(c_int, c_char_p, c_char_p, POINTER(c_byte),
c_size_t, c_uint32)
if _system == 'Darwin':
c_stat._fields_ = [
('st_dev', c_dev_t),
('st_mode', c_mode_t),
('st_nlink', c_uint16),
('st_ino', c_uint64),
('st_uid', c_uid_t),
('st_gid', c_gid_t),
('st_rdev', c_dev_t),
('st_atimespec', c_timespec),
('st_mtimespec', c_timespec),
('st_ctimespec', c_timespec),
('st_birthtimespec', c_timespec),
('st_size', c_off_t),
('st_blocks', c_int64),
('st_blksize', c_int32),
('st_flags', c_int32),
('st_gen', c_int32),
('st_lspare', c_int32),
('st_qspare', c_int64)]
else:
c_stat._fields_ = [
('st_dev', c_dev_t),
('st_ino', c_uint32),
('st_mode', c_mode_t),
('st_nlink', c_uint16),
('st_uid', c_uid_t),
('st_gid', c_gid_t),
('st_rdev', c_dev_t),
('st_atimespec', c_timespec),
('st_mtimespec', c_timespec),
('st_ctimespec', c_timespec),
('st_size', c_off_t),
('st_blocks', c_int64),
('st_blksize', c_int32)]
elif _system == 'Linux':
ENOTSUP = 95
c_dev_t = c_ulonglong
c_fsblkcnt_t = c_ulonglong
c_fsfilcnt_t = c_ulonglong
c_gid_t = c_uint
c_mode_t = c_uint
c_off_t = c_longlong
c_pid_t = c_int
c_uid_t = c_uint
setxattr_t = CFUNCTYPE(c_int, c_char_p, c_char_p, POINTER(c_byte),
c_size_t, c_int)
getxattr_t = CFUNCTYPE(c_int, c_char_p, c_char_p, POINTER(c_byte),
c_size_t)
if _machine == 'x86_64':
c_stat._fields_ = [
('st_dev', c_dev_t),
('st_ino', c_ulong),
('st_nlink', c_ulong),
('st_mode', c_mode_t),
('st_uid', c_uid_t),
('st_gid', c_gid_t),
('__pad0', c_int),
('st_rdev', c_dev_t),
('st_size', c_off_t),
('st_blksize', c_long),
('st_blocks', c_long),
('st_atimespec', c_timespec),
('st_mtimespec', c_timespec),
('st_ctimespec', c_timespec)]
elif _machine == 'ppc':
c_stat._fields_ = [
('st_dev', c_dev_t),
('st_ino', c_ulonglong),
('st_mode', c_mode_t),
('st_nlink', c_uint),
('st_uid', c_uid_t),
('st_gid', c_gid_t),
('st_rdev', c_dev_t),
('__pad2', c_ushort),
('st_size', c_off_t),
('st_blksize', c_long),
('st_blocks', c_longlong),
('st_atimespec', c_timespec),
('st_mtimespec', c_timespec),
('st_ctimespec', c_timespec)]
else:
# i686, use as fallback for everything else
c_stat._fields_ = [
('st_dev', c_dev_t),
('__pad1', c_ushort),
('__st_ino', c_ulong),
('st_mode', c_mode_t),
('st_nlink', c_uint),
('st_uid', c_uid_t),
('st_gid', c_gid_t),
('st_rdev', c_dev_t),
('__pad2', c_ushort),
('st_size', c_off_t),
('st_blksize', c_long),
('st_blocks', c_longlong),
('st_atimespec', c_timespec),
('st_mtimespec', c_timespec),
('st_ctimespec', c_timespec),
('st_ino', c_ulonglong)]
else:
raise NotImplementedError('%s is not supported.' % _system)
class c_statvfs(Structure):
_fields_ = [
('f_bsize', c_ulong),
('f_frsize', c_ulong),
('f_blocks', c_fsblkcnt_t),
('f_bfree', c_fsblkcnt_t),
('f_bavail', c_fsblkcnt_t),
('f_files', c_fsfilcnt_t),
('f_ffree', c_fsfilcnt_t),
('f_favail', c_fsfilcnt_t)]
if _system == 'FreeBSD':
c_fsblkcnt_t = c_uint64
c_fsfilcnt_t = c_uint64
setxattr_t = CFUNCTYPE(c_int, c_char_p, c_char_p, POINTER(c_byte),
c_size_t, c_int)
getxattr_t = CFUNCTYPE(c_int, c_char_p, c_char_p, POINTER(c_byte),
c_size_t)
class c_statvfs(Structure):
_fields_ = [
('f_bavail', c_fsblkcnt_t),
('f_bfree', c_fsblkcnt_t),
('f_blocks', c_fsblkcnt_t),
('f_favail', c_fsfilcnt_t),
('f_ffree', c_fsfilcnt_t),
('f_files', c_fsfilcnt_t),
('f_bsize', c_ulong),
('f_flag', c_ulong),
('f_frsize', c_ulong)]
class fuse_file_info(Structure):
_fields_ = [
('flags', c_int),
('fh_old', c_ulong),
('writepage', c_int),
('direct_io', c_uint, 1),
('keep_cache', c_uint, 1),
('flush', c_uint, 1),
('padding', c_uint, 29),
('fh', c_uint64),
('lock_owner', c_uint64)]
class fuse_context(Structure):
_fields_ = [
('fuse', c_voidp),
('uid', c_uid_t),
('gid', c_gid_t),
('pid', c_pid_t),
('private_data', c_voidp)]
_libfuse.fuse_get_context.restype = POINTER(fuse_context)
class fuse_operations(Structure):
_fields_ = [
('getattr', CFUNCTYPE(c_int, c_char_p, POINTER(c_stat))),
('readlink', CFUNCTYPE(c_int, c_char_p, POINTER(c_byte), c_size_t)),
('getdir', c_voidp), # Deprecated, use readdir
('mknod', CFUNCTYPE(c_int, c_char_p, c_mode_t, c_dev_t)),
('mkdir', CFUNCTYPE(c_int, c_char_p, c_mode_t)),
('unlink', CFUNCTYPE(c_int, c_char_p)),
('rmdir', CFUNCTYPE(c_int, c_char_p)),
('symlink', CFUNCTYPE(c_int, c_char_p, c_char_p)),
('rename', CFUNCTYPE(c_int, c_char_p, c_char_p)),
('link', CFUNCTYPE(c_int, c_char_p, c_char_p)),
('chmod', CFUNCTYPE(c_int, c_char_p, c_mode_t)),
('chown', CFUNCTYPE(c_int, c_char_p, c_uid_t, c_gid_t)),
('truncate', CFUNCTYPE(c_int, c_char_p, c_off_t)),
('utime', c_voidp), # Deprecated, use utimens
('open', CFUNCTYPE(c_int, c_char_p, POINTER(fuse_file_info))),
('read', CFUNCTYPE(c_int, c_char_p, POINTER(c_byte), c_size_t,
c_off_t, POINTER(fuse_file_info))),
('write', CFUNCTYPE(c_int, c_char_p, POINTER(c_byte), c_size_t,
c_off_t, POINTER(fuse_file_info))),
('statfs', CFUNCTYPE(c_int, c_char_p, POINTER(c_statvfs))),
('flush', CFUNCTYPE(c_int, c_char_p, POINTER(fuse_file_info))),
('release', CFUNCTYPE(c_int, c_char_p, POINTER(fuse_file_info))),
('fsync', CFUNCTYPE(c_int, c_char_p, c_int, POINTER(fuse_file_info))),
('setxattr', setxattr_t),
('getxattr', getxattr_t),
('listxattr', CFUNCTYPE(c_int, c_char_p, POINTER(c_byte), c_size_t)),
('removexattr', CFUNCTYPE(c_int, c_char_p, c_char_p)),
('opendir', CFUNCTYPE(c_int, c_char_p, POINTER(fuse_file_info))),
('readdir', CFUNCTYPE(c_int, c_char_p, c_voidp,
CFUNCTYPE(c_int, c_voidp, c_char_p,
POINTER(c_stat), c_off_t),
c_off_t, POINTER(fuse_file_info))),
('releasedir', CFUNCTYPE(c_int, c_char_p, POINTER(fuse_file_info))),
('fsyncdir', CFUNCTYPE(c_int, c_char_p, c_int,
POINTER(fuse_file_info))),
('init', CFUNCTYPE(c_voidp, c_voidp)),
('destroy', CFUNCTYPE(c_voidp, c_voidp)),
('access', CFUNCTYPE(c_int, c_char_p, c_int)),
('create', CFUNCTYPE(c_int, c_char_p, c_mode_t,
POINTER(fuse_file_info))),
('ftruncate', CFUNCTYPE(c_int, c_char_p, c_off_t,
POINTER(fuse_file_info))),
('fgetattr', CFUNCTYPE(c_int, c_char_p, POINTER(c_stat),
POINTER(fuse_file_info))),
('lock', CFUNCTYPE(c_int, c_char_p, POINTER(fuse_file_info),
c_int, c_voidp)),
('utimens', CFUNCTYPE(c_int, c_char_p, POINTER(c_utimbuf))),
('bmap', CFUNCTYPE(c_int, c_char_p, c_size_t, POINTER(c_ulonglong))),
]
def time_of_timespec(ts):
return ts.tv_sec + ts.tv_nsec / 10 ** 9
def set_st_attrs(st, attrs):
for key, val in attrs.items():
if key in ('st_atime', 'st_mtime', 'st_ctime', 'st_birthtime'):
timespec = getattr(st, key + 'spec')
timespec.tv_sec = int(val)
timespec.tv_nsec = int((val - timespec.tv_sec) * 10 ** 9)
elif hasattr(st, key):
setattr(st, key, val)
def fuse_get_context():
'Returns a (uid, gid, pid) tuple'
ctxp = _libfuse.fuse_get_context()
ctx = ctxp.contents
return ctx.uid, ctx.gid, ctx.pid
class FuseOSError(OSError):
def __init__(self, errno):
super(FuseOSError, self).__init__(errno, strerror(errno))
class FUSE(object):
'''
This class is the lower level interface and should not be subclassed under
normal use. Its methods are called by fuse.
Assumes API version 2.6 or later.
'''
OPTIONS = (
('foreground', '-f'),
('debug', '-d'),
('nothreads', '-s'),
)
def __init__(self, operations, mountpoint, raw_fi=False, encoding='utf-8',
**kwargs):
'''
Setting raw_fi to True will cause FUSE to pass the fuse_file_info
class as is to Operations, instead of just the fh field.
This gives you access to direct_io, keep_cache, etc.
'''
self.operations = operations
self.raw_fi = raw_fi
self.encoding = encoding
args = ['fuse']
args.extend(flag for arg, flag in self.OPTIONS
if kwargs.pop(arg, False))
kwargs.setdefault('fsname', operations.__class__.__name__)
args.append('-o')
args.append(','.join(self._normalize_fuse_options(**kwargs)))
args.append(mountpoint)
args = [arg.encode(encoding) for arg in args]
argv = (c_char_p * len(args))(*args)
fuse_ops = fuse_operations()
for name, prototype in fuse_operations._fields_:
if prototype != c_voidp and getattr(operations, name, None):
op = partial(self._wrapper, getattr(self, name))
setattr(fuse_ops, name, prototype(op))
try:
old_handler = signal(SIGINT, SIG_DFL)
except ValueError:
old_handler = SIG_DFL
err = _libfuse.fuse_main_real(len(args), argv, pointer(fuse_ops),
sizeof(fuse_ops), None)
try:
signal(SIGINT, old_handler)
except ValueError:
pass
del self.operations # Invoke the destructor
if err:
raise RuntimeError(err)
@staticmethod
def _normalize_fuse_options(**kargs):
for key, value in kargs.items():
if isinstance(value, bool):
if value is True: yield key
else:
yield '%s=%s' % (key, value)
@staticmethod
def _wrapper(func, *args, **kwargs):
'Decorator for the methods that follow'
try:
return func(*args, **kwargs) or 0
except OSError, e:
return -(e.errno or EFAULT)
except:
print_exc()
return -EFAULT
def getattr(self, path, buf):
return self.fgetattr(path, buf, None)
def readlink(self, path, buf, bufsize):
ret = self.operations('readlink', path.decode(self.encoding)) \
.encode(self.encoding)
# copies a string into the given buffer
# (null terminated and truncated if necessary)
data = create_string_buffer(ret[:bufsize - 1])
memmove(buf, data, len(data))
return 0
def mknod(self, path, mode, dev):
return self.operations('mknod', path.decode(self.encoding), mode, dev)
def mkdir(self, path, mode):
return self.operations('mkdir', path.decode(self.encoding), mode)
def unlink(self, path):
return self.operations('unlink', path.decode(self.encoding))
def rmdir(self, path):
return self.operations('rmdir', path.decode(self.encoding))
def symlink(self, source, target):
'creates a symlink `target -> source` (e.g. ln -s source target)'
return self.operations('symlink', target.decode(self.encoding),
source.decode(self.encoding))
def rename(self, old, new):
return self.operations('rename', old.decode(self.encoding),
new.decode(self.encoding))
def link(self, source, target):
'creates a hard link `target -> source` (e.g. ln source target)'
return self.operations('link', target.decode(self.encoding),
source.decode(self.encoding))
def chmod(self, path, mode):
return self.operations('chmod', path.decode(self.encoding), mode)
def chown(self, path, uid, gid):
# Check if any of the arguments is a -1 that has overflowed
if c_uid_t(uid + 1).value == 0:
uid = -1
if c_gid_t(gid + 1).value == 0:
gid = -1
return self.operations('chown', path.decode(self.encoding), uid, gid)
def truncate(self, path, length):
return self.operations('truncate', path.decode(self.encoding), length)
def open(self, path, fip):
fi = fip.contents
if self.raw_fi:
return self.operations('open', path.decode(self.encoding), fi)
else:
fi.fh = self.operations('open', path.decode(self.encoding),
fi.flags)
return 0
def read(self, path, buf, size, offset, fip):
if self.raw_fi:
fh = fip.contents
else:
fh = fip.contents.fh
ret = self.operations('read', path.decode(self.encoding), size,
offset, fh)
if not ret: return 0
retsize = len(ret)
assert retsize <= size, \
'actual amount read %d greater than expected %d' % (retsize, size)
data = create_string_buffer(ret, retsize)
memmove(buf, ret, retsize)
return retsize
def write(self, path, buf, size, offset, fip):
data = string_at(buf, size)
if self.raw_fi:
fh = fip.contents
else:
fh = fip.contents.fh
return self.operations('write', path.decode(self.encoding), data,
offset, fh)
def statfs(self, path, buf):
stv = buf.contents
attrs = self.operations('statfs', path.decode(self.encoding))
for key, val in attrs.items():
if hasattr(stv, key):
setattr(stv, key, val)
return 0
def flush(self, path, fip):
if self.raw_fi:
fh = fip.contents
else:
fh = fip.contents.fh
return self.operations('flush', path.decode(self.encoding), fh)
def release(self, path, fip):
if self.raw_fi:
fh = fip.contents
else:
fh = fip.contents.fh
return self.operations('release', path.decode(self.encoding), fh)
def fsync(self, path, datasync, fip):
if self.raw_fi:
fh = fip.contents
else:
fh = fip.contents.fh
return self.operations('fsync', path.decode(self.encoding), datasync,
fh)
def setxattr(self, path, name, value, size, options, *args):
return self.operations('setxattr', path.decode(self.encoding),
name.decode(self.encoding),
string_at(value, size), options, *args)
def getxattr(self, path, name, value, size, *args):
ret = self.operations('getxattr', path.decode(self.encoding),
name.decode(self.encoding), *args)
retsize = len(ret)
# allow size queries
if not value: return retsize
# do not truncate
if retsize > size: return -ERANGE
buf = create_string_buffer(ret, retsize) # Does not add trailing 0
memmove(value, buf, retsize)
return retsize
def listxattr(self, path, namebuf, size):
attrs = self.operations('listxattr', path.decode(self.encoding)) or ''
ret = '\x00'.join(attrs).encode(self.encoding) + '\x00'
retsize = len(ret)
# allow size queries
if not namebuf: return retsize
# do not truncate
if retsize > size: return -ERANGE
buf = create_string_buffer(ret, retsize)
memmove(namebuf, buf, retsize)
return retsize
def removexattr(self, path, name):
return self.operations('removexattr', path.decode(self.encoding),
name.decode(self.encoding))
def opendir(self, path, fip):
# Ignore raw_fi
fip.contents.fh = self.operations('opendir',
path.decode(self.encoding))
return 0
def readdir(self, path, buf, filler, offset, fip):
# Ignore raw_fi
for item in self.operations('readdir', path.decode(self.encoding),
fip.contents.fh):
if isinstance(item, basestring):
name, st, offset = item, None, 0
else:
name, attrs, offset = item
if attrs:
st = c_stat()
set_st_attrs(st, attrs)
else:
st = None
if filler(buf, name.encode(self.encoding), st, offset) != 0:
break
return 0
def releasedir(self, path, fip):
# Ignore raw_fi
return self.operations('releasedir', path.decode(self.encoding),
fip.contents.fh)
def fsyncdir(self, path, datasync, fip):
# Ignore raw_fi
return self.operations('fsyncdir', path.decode(self.encoding),
datasync, fip.contents.fh)
def init(self, conn):
return self.operations('init', '/')
def destroy(self, private_data):
return self.operations('destroy', '/')
def access(self, path, amode):
return self.operations('access', path.decode(self.encoding), amode)
def create(self, path, mode, fip):
fi = fip.contents
path = path.decode(self.encoding)
if self.raw_fi:
return self.operations('create', path, mode, fi)
else:
fi.fh = self.operations('create', path, mode)
return 0
def ftruncate(self, path, length, fip):
if self.raw_fi:
fh = fip.contents
else:
fh = fip.contents.fh
return self.operations('truncate', path.decode(self.encoding),
length, fh)
def fgetattr(self, path, buf, fip):
memset(buf, 0, sizeof(c_stat))
st = buf.contents
if not fip:
fh = fip
elif self.raw_fi:
fh = fip.contents
else:
fh = fip.contents.fh
attrs = self.operations('getattr', path.decode(self.encoding), fh)
set_st_attrs(st, attrs)
return 0
def lock(self, path, fip, cmd, lock):
if self.raw_fi:
fh = fip.contents
else:
fh = fip.contents.fh
return self.operations('lock', path.decode(self.encoding), fh, cmd,
lock)
def utimens(self, path, buf):
if buf:
atime = time_of_timespec(buf.contents.actime)
mtime = time_of_timespec(buf.contents.modtime)
times = (atime, mtime)
else:
times = None
return self.operations('utimens', path.decode(self.encoding), times)
def bmap(self, path, blocksize, idx):
return self.operations('bmap', path.decode(self.encoding), blocksize,
idx)
import datetime
class Operations(object):
'''
This class should be subclassed and passed as an argument to FUSE on
initialization. All operations should raise a FuseOSError exception on
error.
When in doubt of what an operation should do, check the FUSE header file
or the corresponding system call man page.
'''
def __call__(self, op, *args):
# Uncomment to see each filesystem call.
# now = datetime.datetime.now()
# print '[fuse %02d:%02d:%02d.%d]' % (now.hour, now.minute, now.second, now.microsecond),
# print op, tuple(x if len(str(x)) < 100 else str(x)[:100] + '...' for x in args)
if not hasattr(self, op):
raise FuseOSError(EFAULT)
return getattr(self, op)(*args)
def access(self, path, amode):
return 0
bmap = None
def chmod(self, path, mode):
raise FuseOSError(EROFS)
def chown(self, path, uid, gid):
raise FuseOSError(EROFS)
def create(self, path, mode, fi=None):
'''
When raw_fi is False (default case), fi is None and create should
return a numerical file handle.
When raw_fi is True the file handle should be set directly by create
and return 0.
'''
raise FuseOSError(EROFS)
def destroy(self, path):
'Called on filesystem destruction. Path is always /'
pass
def flush(self, path, fh):
return 0
def fsync(self, path, datasync, fh):
return 0
def fsyncdir(self, path, datasync, fh):
return 0
def getattr(self, path, fh=None):
'''
Returns a dictionary with keys identical to the stat C structure of
stat(2).
st_atime, st_mtime and st_ctime should be floats.
NOTE: There is an incombatibility between Linux and Mac OS X
concerning st_nlink of directories. Mac OS X counts all files inside
the directory, while Linux counts only the subdirectories.
'''
if path != '/':
raise FuseOSError(ENOENT)
return dict(st_mode=(S_IFDIR | 0755), st_nlink=2)
def getxattr(self, path, name, position=0):
raise FuseOSError(ENOTSUP)
def init(self, path):
'''
Called on filesystem initialization. (Path is always /)
Use it instead of __init__ if you start threads on initialization.
'''
pass
def link(self, target, source):
'creates a hard link `target -> source` (e.g. ln source target)'
raise FuseOSError(EROFS)
def listxattr(self, path):
return []
lock = None
def mkdir(self, path, mode):
raise FuseOSError(EROFS)
def mknod(self, path, mode, dev):
raise FuseOSError(EROFS)
def open(self, path, flags):
'''
When raw_fi is False (default case), open should return a numerical
file handle.
When raw_fi is True the signature of open becomes:
open(self, path, fi)
and the file handle should be set directly.
'''
return 0
def opendir(self, path):
'Returns a numerical file handle.'
return 0
def read(self, path, size, offset, fh):
'Returns a string containing the data requested.'
raise FuseOSError(EIO)
def readdir(self, path, fh):
'''
Can return either a list of names, or a list of (name, attrs, offset)
tuples. attrs is a dict as in getattr.
'''
return ['.', '..']
def readlink(self, path):
raise FuseOSError(ENOENT)
def release(self, path, fh):
return 0
def releasedir(self, path, fh):
return 0
def removexattr(self, path, name):
raise FuseOSError(ENOTSUP)
def rename(self, old, new):
raise FuseOSError(EROFS)
def rmdir(self, path):
raise FuseOSError(EROFS)
def setxattr(self, path, name, value, options, position=0):
raise FuseOSError(ENOTSUP)
def statfs(self, path):
'''
Returns a dictionary with keys identical to the statvfs C structure of
statvfs(3).
On Mac OS X f_bsize and f_frsize must be a power of 2
(minimum 512).
'''
return {}
def symlink(self, target, source):
'creates a symlink `target -> source` (e.g. ln -s source target)'
raise FuseOSError(EROFS)
def truncate(self, path, length, fh=None):
raise FuseOSError(EROFS)
def unlink(self, path):
raise FuseOSError(EROFS)
def utimens(self, path, times=None):
'Times is a (atime, mtime) tuple. If None use current time.'
return 0
def write(self, path, data, offset, fh):
raise FuseOSError(EROFS)
class LoggingMixIn:
log = logging.getLogger('fuse.log-mixin')
def __call__(self, op, path, *args):
self.log.debug('-> %s %s %s', op, path, repr(args))
ret = '[Unhandled Exception]'
try:
ret = getattr(self, op)(path, *args)
return ret
except OSError, e:
ret = str(e)
raise
finally:
self.log.debug('<- %s %s', op, repr(ret))
|
|
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from azure.common import AzureException
from dateutil import parser
from azure.storage.common._http import HTTPResponse
try:
from xml.etree import cElementTree as ETree
except ImportError:
from xml.etree import ElementTree as ETree
from azure.storage.common._common_conversion import (
_decode_base64_to_text,
_to_str,
_get_content_md5
)
from azure.storage.common._deserialization import (
_parse_properties,
_to_int,
_parse_metadata,
_convert_xml_to_signed_identifiers,
_bool,
)
from .models import (
Container,
Blob,
BlobBlock,
BlobBlockList,
BlobBlockState,
BlobProperties,
PageRange,
ContainerProperties,
AppendBlockProperties,
PageBlobProperties,
ResourceProperties,
BlobPrefix,
AccountInformation,
UserDelegationKey, BatchSubResponse)
from ._encryption import _decrypt_blob
from azure.storage.common.models import _list
from azure.storage.common._error import (
_validate_content_match,
_ERROR_DECRYPTION_FAILURE,
)
from io import BytesIO
_HTTP_LINE_ENDING = "\r\n"
def _parse_cpk_headers(response, properties):
server_encrypted = response.headers.get('x-ms-request-server-encrypted')
if server_encrypted is not None:
properties.request_server_encrypted = _bool(server_encrypted)
properties.encryption_key_sha256 = response.headers.get('x-ms-encryption-key-sha256')
def _parse_base_properties(response):
'''
Extracts basic response headers.
'''
resource_properties = ResourceProperties()
resource_properties.last_modified = parser.parse(response.headers.get('last-modified'))
resource_properties.etag = response.headers.get('etag')
_parse_cpk_headers(response, resource_properties)
return resource_properties
def _parse_page_properties(response):
'''
Extracts page response headers.
'''
put_page = PageBlobProperties()
put_page.last_modified = parser.parse(response.headers.get('last-modified'))
put_page.etag = response.headers.get('etag')
put_page.sequence_number = _to_int(response.headers.get('x-ms-blob-sequence-number'))
_parse_cpk_headers(response, put_page)
return put_page
def _parse_append_block(response):
'''
Extracts append block response headers.
'''
append_block = AppendBlockProperties()
append_block.last_modified = parser.parse(response.headers.get('last-modified'))
append_block.etag = response.headers.get('etag')
append_block.append_offset = _to_int(response.headers.get('x-ms-blob-append-offset'))
append_block.committed_block_count = _to_int(response.headers.get('x-ms-blob-committed-block-count'))
_parse_cpk_headers(response, append_block)
return append_block
def _parse_snapshot_blob(response, name):
'''
Extracts snapshot return header.
'''
snapshot = response.headers.get('x-ms-snapshot')
return _parse_blob(response, name, snapshot)
def _parse_lease(response):
'''
Extracts lease time and ID return headers.
'''
lease = {'time': response.headers.get('x-ms-lease-time')}
if lease['time']:
lease['time'] = _to_int(lease['time'])
lease['id'] = response.headers.get('x-ms-lease-id')
return lease
def _parse_blob(response, name, snapshot, validate_content=False, require_encryption=False,
key_encryption_key=None, key_resolver_function=None, start_offset=None, end_offset=None):
if response is None:
return None
metadata = _parse_metadata(response)
props = _parse_properties(response, BlobProperties)
# For range gets, only look at 'x-ms-blob-content-md5' for overall MD5
content_settings = getattr(props, 'content_settings')
if 'content-range' in response.headers:
if 'x-ms-blob-content-md5' in response.headers:
setattr(content_settings, 'content_md5', _to_str(response.headers['x-ms-blob-content-md5']))
else:
delattr(content_settings, 'content_md5')
if validate_content:
computed_md5 = _get_content_md5(response.body)
_validate_content_match(response.headers['content-md5'], computed_md5)
if key_encryption_key is not None or key_resolver_function is not None:
try:
response.body = _decrypt_blob(require_encryption, key_encryption_key, key_resolver_function,
response, start_offset, end_offset)
except:
raise AzureException(_ERROR_DECRYPTION_FAILURE)
return Blob(name, snapshot, response.body, props, metadata)
def _parse_container(response, name):
if response is None:
return None
metadata = _parse_metadata(response)
props = _parse_properties(response, ContainerProperties)
return Container(name, props, metadata)
def _convert_xml_to_signed_identifiers_and_access(response):
acl = _convert_xml_to_signed_identifiers(response)
acl.public_access = response.headers.get('x-ms-blob-public-access')
return acl
def _convert_xml_to_containers(response):
'''
<?xml version="1.0" encoding="utf-8"?>
<EnumerationResults ServiceEndpoint="https://myaccount.blob.core.windows.net">
<Prefix>string-value</Prefix>
<Marker>string-value</Marker>
<MaxResults>int-value</MaxResults>
<Containers>
<Container>
<Name>container-name</Name>
<Properties>
<Last-Modified>date/time-value</Last-Modified>
<Etag>etag</Etag>
<LeaseStatus>locked | unlocked</LeaseStatus>
<LeaseState>available | leased | expired | breaking | broken</LeaseState>
<LeaseDuration>infinite | fixed</LeaseDuration>
<PublicAccess>blob | container</PublicAccess>
<HasImmutabilityPolicy>true | false</HasImmutabilityPolicy>
<HasLegalHold>true | false</HasLegalHold>
</Properties>
<Metadata>
<metadata-name>value</metadata-name>
</Metadata>
</Container>
</Containers>
<NextMarker>marker-value</NextMarker>
</EnumerationResults>
'''
if response is None or response.body is None:
return None
containers = _list()
list_element = ETree.fromstring(response.body)
# Set next marker
setattr(containers, 'next_marker', list_element.findtext('NextMarker'))
containers_element = list_element.find('Containers')
for container_element in containers_element.findall('Container'):
# Name element
container = Container()
container.name = container_element.findtext('Name')
# Metadata
metadata_root_element = container_element.find('Metadata')
if metadata_root_element is not None:
container.metadata = dict()
for metadata_element in metadata_root_element:
container.metadata[metadata_element.tag] = metadata_element.text
# Properties
properties_element = container_element.find('Properties')
container.properties.etag = properties_element.findtext('Etag')
container.properties.last_modified = parser.parse(properties_element.findtext('Last-Modified'))
container.properties.lease_status = properties_element.findtext('LeaseStatus')
container.properties.lease_state = properties_element.findtext('LeaseState')
container.properties.lease_duration = properties_element.findtext('LeaseDuration')
container.properties.public_access = properties_element.findtext('PublicAccess')
container.properties.has_immutability_policy = properties_element.findtext('HasImmutabilityPolicy')
container.properties.has_legal_hold = properties_element.findtext('HasLegalHold')
# Add container to list
containers.append(container)
return containers
LIST_BLOBS_ATTRIBUTE_MAP = {
'Last-Modified': (None, 'last_modified', parser.parse),
'Etag': (None, 'etag', _to_str),
'x-ms-blob-sequence-number': (None, 'sequence_number', _to_int),
'BlobType': (None, 'blob_type', _to_str),
'Content-Length': (None, 'content_length', _to_int),
'ServerEncrypted': (None, 'server_encrypted', _bool),
'Content-Type': ('content_settings', 'content_type', _to_str),
'Content-Encoding': ('content_settings', 'content_encoding', _to_str),
'Content-Disposition': ('content_settings', 'content_disposition', _to_str),
'Content-Language': ('content_settings', 'content_language', _to_str),
'Content-MD5': ('content_settings', 'content_md5', _to_str),
'Cache-Control': ('content_settings', 'cache_control', _to_str),
'LeaseStatus': ('lease', 'status', _to_str),
'LeaseState': ('lease', 'state', _to_str),
'LeaseDuration': ('lease', 'duration', _to_str),
'CopyId': ('copy', 'id', _to_str),
'CopySource': ('copy', 'source', _to_str),
'CopyStatus': ('copy', 'status', _to_str),
'CopyProgress': ('copy', 'progress', _to_str),
'CopyCompletionTime': ('copy', 'completion_time', _to_str),
'CopyStatusDescription': ('copy', 'status_description', _to_str),
'AccessTier': (None, 'blob_tier', _to_str),
'AccessTierChangeTime': (None, 'blob_tier_change_time', parser.parse),
'AccessTierInferred': (None, 'blob_tier_inferred', _bool),
'ArchiveStatus': (None, 'rehydration_status', _to_str),
'DeletedTime': (None, 'deleted_time', parser.parse),
'RemainingRetentionDays': (None, 'remaining_retention_days', _to_int),
'Creation-Time': (None, 'creation_time', parser.parse),
}
def _convert_xml_to_blob_list(response):
'''
<?xml version="1.0" encoding="utf-8"?>
<EnumerationResults ServiceEndpoint="http://myaccount.blob.core.windows.net/" ContainerName="mycontainer">
<Prefix>string-value</Prefix>
<Marker>string-value</Marker>
<MaxResults>int-value</MaxResults>
<Delimiter>string-value</Delimiter>
<Blobs>
<Blob>
<Name>blob-name</name>
<Deleted>true</Deleted>
<Snapshot>date-time-value</Snapshot>
<Properties>
<Last-Modified>date-time-value</Last-Modified>
<Etag>etag</Etag>
<Content-Length>size-in-bytes</Content-Length>
<Content-Type>blob-content-type</Content-Type>
<Content-Encoding />
<Content-Language />
<Content-MD5 />
<Cache-Control />
<x-ms-blob-sequence-number>sequence-number</x-ms-blob-sequence-number>
<BlobType>BlockBlob|PageBlob|AppendBlob</BlobType>
<LeaseStatus>locked|unlocked</LeaseStatus>
<LeaseState>available | leased | expired | breaking | broken</LeaseState>
<LeaseDuration>infinite | fixed</LeaseDuration>
<CopyId>id</CopyId>
<CopyStatus>pending | success | aborted | failed </CopyStatus>
<CopySource>source url</CopySource>
<CopyProgress>bytes copied/bytes total</CopyProgress>
<CopyCompletionTime>datetime</CopyCompletionTime>
<CopyStatusDescription>error string</CopyStatusDescription>
<AccessTier>P4 | P6 | P10 | P20 | P30 | P40 | P50 | P60 | Archive | Cool | Hot</AccessTier>
<AccessTierChangeTime>date-time-value</AccessTierChangeTime>
<AccessTierInferred>true</AccessTierInferred>
<DeletedTime>datetime</DeletedTime>
<RemainingRetentionDays>int</RemainingRetentionDays>
<Creation-Time>date-time-value</Creation-Time>
</Properties>
<Metadata>
<Name>value</Name>
</Metadata>
</Blob>
<BlobPrefix>
<Name>blob-prefix</Name>
</BlobPrefix>
</Blobs>
<NextMarker />
</EnumerationResults>
'''
if response is None or response.body is None:
return None
blob_list = _list()
list_element = ETree.fromstring(response.body)
setattr(blob_list, 'next_marker', list_element.findtext('NextMarker'))
blobs_element = list_element.find('Blobs')
blob_prefix_elements = blobs_element.findall('BlobPrefix')
if blob_prefix_elements is not None:
for blob_prefix_element in blob_prefix_elements:
prefix = BlobPrefix()
prefix.name = blob_prefix_element.findtext('Name')
blob_list.append(prefix)
for blob_element in blobs_element.findall('Blob'):
blob = Blob()
blob.name = blob_element.findtext('Name')
blob.snapshot = blob_element.findtext('Snapshot')
deleted = blob_element.findtext('Deleted')
if deleted:
blob.deleted = _bool(deleted)
# Properties
properties_element = blob_element.find('Properties')
if properties_element is not None:
for property_element in properties_element:
info = LIST_BLOBS_ATTRIBUTE_MAP.get(property_element.tag)
if info is None:
setattr(blob.properties, property_element.tag, _to_str(property_element.text))
elif info[0] is None:
setattr(blob.properties, info[1], info[2](property_element.text))
else:
attr = getattr(blob.properties, info[0])
setattr(attr, info[1], info[2](property_element.text))
# Metadata
metadata_root_element = blob_element.find('Metadata')
if metadata_root_element is not None:
blob.metadata = dict()
for metadata_element in metadata_root_element:
blob.metadata[metadata_element.tag] = metadata_element.text
# Add blob to list
blob_list.append(blob)
return blob_list
def _convert_xml_to_blob_name_list(response):
'''
<?xml version="1.0" encoding="utf-8"?>
<EnumerationResults ServiceEndpoint="http://myaccount.blob.core.windows.net/" ContainerName="mycontainer">
<Prefix>string-value</Prefix>
<Marker>string-value</Marker>
<MaxResults>int-value</MaxResults>
<Delimiter>string-value</Delimiter>
<Blobs>
<Blob>
<Name>blob-name</name>
<Deleted>true</Deleted>
<Snapshot>date-time-value</Snapshot>
<Properties>
<Last-Modified>date-time-value</Last-Modified>
<Etag>etag</Etag>
<Content-Length>size-in-bytes</Content-Length>
<Content-Type>blob-content-type</Content-Type>
<Content-Encoding />
<Content-Language />
<Content-MD5 />
<Cache-Control />
<x-ms-blob-sequence-number>sequence-number</x-ms-blob-sequence-number>
<BlobType>BlockBlob|PageBlob|AppendBlob</BlobType>
<LeaseStatus>locked|unlocked</LeaseStatus>
<LeaseState>available | leased | expired | breaking | broken</LeaseState>
<LeaseDuration>infinite | fixed</LeaseDuration>
<CopyId>id</CopyId>
<CopyStatus>pending | success | aborted | failed </CopyStatus>
<CopySource>source url</CopySource>
<CopyProgress>bytes copied/bytes total</CopyProgress>
<CopyCompletionTime>datetime</CopyCompletionTime>
<CopyStatusDescription>error string</CopyStatusDescription>
<AccessTier>P4 | P6 | P10 | P20 | P30 | P40 | P50 | P60 | Archive | Cool | Hot</AccessTier>
<AccessTierChangeTime>date-time-value</AccessTierChangeTime>
<AccessTierInferred>true</AccessTierInferred>
<DeletedTime>datetime</DeletedTime>
<RemainingRetentionDays>int</RemainingRetentionDays>
<Creation-Time>date-time-value</Creation-Time>
</Properties>
<Metadata>
<Name>value</Name>
</Metadata>
</Blob>
<BlobPrefix>
<Name>blob-prefix</Name>
</BlobPrefix>
</Blobs>
<NextMarker />
</EnumerationResults>
'''
if response is None or response.body is None:
return None
blob_list = _list()
list_element = ETree.fromstring(response.body)
setattr(blob_list, 'next_marker', list_element.findtext('NextMarker'))
blobs_element = list_element.find('Blobs')
blob_prefix_elements = blobs_element.findall('BlobPrefix')
if blob_prefix_elements is not None:
for blob_prefix_element in blob_prefix_elements:
blob_list.append(blob_prefix_element.findtext('Name'))
for blob_element in blobs_element.findall('Blob'):
blob_list.append(blob_element.findtext('Name'))
return blob_list
def _convert_xml_to_block_list(response):
'''
<?xml version="1.0" encoding="utf-8"?>
<BlockList>
<CommittedBlocks>
<Block>
<Name>base64-encoded-block-id</Name>
<Size>size-in-bytes</Size>
</Block>
</CommittedBlocks>
<UncommittedBlocks>
<Block>
<Name>base64-encoded-block-id</Name>
<Size>size-in-bytes</Size>
</Block>
</UncommittedBlocks>
</BlockList>
Converts xml response to block list class.
'''
if response is None or response.body is None:
return None
block_list = BlobBlockList()
list_element = ETree.fromstring(response.body)
committed_blocks_element = list_element.find('CommittedBlocks')
if committed_blocks_element is not None:
for block_element in committed_blocks_element.findall('Block'):
block_id = _decode_base64_to_text(block_element.findtext('Name', ''))
block_size = int(block_element.findtext('Size'))
block = BlobBlock(id=block_id, state=BlobBlockState.Committed)
block._set_size(block_size)
block_list.committed_blocks.append(block)
uncommitted_blocks_element = list_element.find('UncommittedBlocks')
if uncommitted_blocks_element is not None:
for block_element in uncommitted_blocks_element.findall('Block'):
block_id = _decode_base64_to_text(block_element.findtext('Name', ''))
block_size = int(block_element.findtext('Size'))
block = BlobBlock(id=block_id, state=BlobBlockState.Uncommitted)
block._set_size(block_size)
block_list.uncommitted_blocks.append(block)
return block_list
def _convert_xml_to_page_ranges(response):
'''
<?xml version="1.0" encoding="utf-8"?>
<PageList>
<PageRange>
<Start>Start Byte</Start>
<End>End Byte</End>
</PageRange>
<ClearRange>
<Start>Start Byte</Start>
<End>End Byte</End>
</ClearRange>
<PageRange>
<Start>Start Byte</Start>
<End>End Byte</End>
</PageRange>
</PageList>
'''
if response is None or response.body is None:
return None
page_list = list()
list_element = ETree.fromstring(response.body)
for page_range_element in list_element:
if page_range_element.tag == 'PageRange':
is_cleared = False
elif page_range_element.tag == 'ClearRange':
is_cleared = True
else:
pass # ignore any unrecognized Page Range types
page_list.append(
PageRange(
int(page_range_element.findtext('Start')),
int(page_range_element.findtext('End')),
is_cleared
)
)
return page_list
def _parse_account_information(response):
account_info = AccountInformation()
account_info.sku_name = response.headers['x-ms-sku-name']
account_info.account_kind = response.headers['x-ms-account-kind']
return account_info
def _convert_xml_to_user_delegation_key(response):
"""
<?xml version="1.0" encoding="utf-8"?>
<UserDelegationKey>
<SignedOid> Guid </SignedOid>
<SignedTid> Guid </SignedTid>
<SignedStart> String, formatted ISO Date </SignedStart>
<SignedExpiry> String, formatted ISO Date </SignedExpiry>
<SignedService>b</SignedService>
<SignedVersion> String, rest api version used to create delegation key </SignedVersion>
<Value>Ovg+o0K/0/2V8upg7AwlyAPCriEcOSXKuBu2Gv/PU70Y7aWDW3C2ZRmw6kYWqPWBaM1GosLkcSZkgsobAlT+Sw==</value>
</UserDelegationKey >
Converts xml response to UserDelegationKey class.
"""
if response is None or response.body is None:
return None
delegation_key = UserDelegationKey()
key_element = ETree.fromstring(response.body)
delegation_key.signed_oid = key_element.findtext('SignedOid')
delegation_key.signed_tid = key_element.findtext('SignedTid')
delegation_key.signed_start = key_element.findtext('SignedStart')
delegation_key.signed_expiry = key_element.findtext('SignedExpiry')
delegation_key.signed_service = key_element.findtext('SignedService')
delegation_key.signed_version = key_element.findtext('SignedVersion')
delegation_key.value = key_element.findtext('Value')
return delegation_key
def _ingest_batch_response(batch_response, batch_sub_requests):
"""
Takes the response to a batch request and parses the response into the separate responses.
:param :class:`~azure.storage.common._http.HTTPResponse` batch_response:
batchResponse The response of the HTTP batch request generated by this object.
:return: sub-responses parsed from batch HTTP response
:rtype: list of :class:`~azure.storage.common._http.HTTPResponse`
"""
parsed_batch_sub_response_list = []
# header value format: `multipart/mixed; boundary=<delimiter>`
response_delimiter = batch_response.headers.get('content-type').split("=")[1]
response_body = batch_response.body.decode('utf-8')
# split byte[] on the "substring" "--<delim>\r\n"
sub_response_list = response_body.split("--" + response_delimiter + _HTTP_LINE_ENDING)
# strip final, slightly different delim "\r\n--<delim>--" off last entry
sub_response_list[len(sub_response_list) - 1] = \
sub_response_list[len(sub_response_list) - 1].split(_HTTP_LINE_ENDING + "--" + response_delimiter + "--")[0]
for sub_response in sub_response_list:
if len(sub_response) != 0:
http_response = _parse_sub_response_to_http_response(sub_response)
is_successful = 200 <= http_response.status < 300
index_of_sub_request = _to_int(http_response.headers.get('Content-ID'))
batch_sub_request = batch_sub_requests[index_of_sub_request]
parsed_batch_sub_response_list.append(BatchSubResponse(is_successful, http_response, batch_sub_request))
return parsed_batch_sub_response_list
def _parse_sub_response_to_http_response(sub_response):
"""
Header: Value (1 or more times)
HTTP/<version> <statusCode> <statusName>
Header: Value (1 or more times)
body (if any)
:param sub_response:
The raw bytes of this sub-response.
:return: An HttpResponse object.
"""
empty_line = _HTTP_LINE_ENDING.encode('utf-8')
num_empty_lines = 0
batch_http_sub_response = HTTPResponse(None, '', dict(), b'')
try:
body_stream = BytesIO()
body_stream.write(sub_response.encode('utf-8'))
body_stream.seek(0)
while True:
line = body_stream.readline()
if line == b'':
return batch_http_sub_response
if line.startswith("HTTP".encode('utf-8')):
batch_http_sub_response.status = _to_int(line.decode('utf-8').split(" ")[1])
elif line == empty_line:
num_empty_lines += 1
elif line.startswith("x-ms-error-code".encode('utf-8')):
batch_http_sub_response.message = line.decode('utf-8').split(": ")[1].rstrip()
elif num_empty_lines is 2:
batch_http_sub_response.body += line
else:
header = line.decode('utf-8').split(": ")[0]
value = line.decode('utf-8').split(": ")[1].rstrip()
batch_http_sub_response.headers[header] = value
finally:
body_stream.close()
return batch_http_sub_response
|
|
# Lint as: python3
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for saver."""
import os
import tempfile
import time
from absl.testing import parameterized
from lingvo import compat as tf
from lingvo.core import cluster_factory
from lingvo.core import py_utils
from lingvo.core import saver
from lingvo.core import test_utils
from lingvo.tasks.image.params import mnist
import numpy as np
# pylint: disable=g-direct-tensorflow-import
from tensorflow.python.platform import test
# pylint: enable=g-direct-tensorflow-import
class SaverTest(test_utils.TestCase, parameterized.TestCase):
@staticmethod
def _buildGraphAndSaver(logdir,
keep_latest_n=5,
keep_every_n_hours=None,
save_async=False):
tf.random.set_seed(123)
g = tf.Graph()
with g.as_default():
p = mnist.LeNet5().Task()
p.input = mnist.LeNet5().Train()
with cluster_factory.ForTestingWorker(mode='sync', job='controller'):
_ = p.Instantiate()
gsv = py_utils.GetOrCreateGlobalStepVar()
inc = gsv.assign_add(1)
variables = tf.all_variables()
sanity_checks = [([gsv], saver.InRange(0, 10))]
for var in variables:
sanity_checks.append(([var], saver.IsFinite()))
sav = saver.Saver(
logdir,
variables,
sanity_checks,
keep_latest_n=keep_latest_n,
keep_every_n_hours=keep_every_n_hours,
async_save=save_async)
return g, sav, inc
@staticmethod
def _checkpointIds(logdir):
filenames = tf.io.gfile.glob('{}/*'.format(logdir))
print('\n'.join(filenames))
ckpt_ids = []
for f in filenames:
if f.endswith('.meta'):
ckpt_id = saver.Saver.GetCheckpointId(f)
ckpt_ids.append(ckpt_id)
# Sort ascending.
ckpt_ids.sort()
return ckpt_ids
@parameterized.parameters(True, False)
def testBasic(self, save_async):
logdir = tempfile.mkdtemp()
# Create a dummy file that looks like a checkpoint that shouldn't
# be touched.
with tf.io.gfile.GFile(logdir + '/ckpt-foo', 'w') as f:
f.write('contents')
g, sav, inc = self._buildGraphAndSaver(logdir, save_async=save_async)
with self.session(graph=g) as sess:
# Creates a few checkpoints.
sess.run(tf.global_variables_initializer())
for _ in range(10):
sess.run(inc)
_ = sav.Save(sess)
# Restore to the latest.
sess.run(tf.global_variables_initializer())
_ = sav.Restore(sess)
# Restore to a specific checkpoint.
sess.run(tf.global_variables_initializer())
_ = sav.Restore(sess, checkpoint_id=6)
# Increments global_step out of range, Save() fails.
for _ in range(5):
sess.run(inc)
with self.assertRaises(tf.errors.AbortedError):
_ = sav.Save(sess)
# Async saving throws the error only in the next attempt
_ = sav.Save(sess)
filenames = tf.io.gfile.glob('{}/*'.format(logdir))
filenames = [x[len(logdir) + 1:] for x in filenames]
self.assertIn('checkpoint', filenames)
meta_files = []
for f in filenames:
if f.endswith('.meta'):
meta_files.append(f)
# A .meta for each checkpoint.
self.assertLen(meta_files, 6)
# 1 for checkpoint. 3 files per checkpoint. 5 good checkpoints, 1 bad.
# 1 extra file contains the error message, and 1 dummy file
self.assertLen(filenames, 1 + (5 + 1) * 3 + 1 + 1)
@test.mock.patch.object(saver, 'time')
def testBothPolicies(self, mock_time):
"""Test indefinite retention policy and recent policy."""
fake_time = time.time()
mock_time.time.return_value = fake_time
logdir = tempfile.mkdtemp()
g, sav, inc = self._buildGraphAndSaver(
logdir, keep_latest_n=2, keep_every_n_hours=5)
with self.session(graph=g) as sess:
# Creates a few checkpoints.
sess.run(tf.global_variables_initializer())
for _ in range(9):
sess.run(inc)
_ = sav.Save(sess)
# Advance mock time one-ish hour.
fake_time += 3601.0
mock_time.time.return_value = fake_time
ckpt_ids = self._checkpointIds(logdir)
# 1,6 are kept due to indefinite policy
# 8,9 due to recent policy.
self.assertEqual([1, 6, 8, 9], ckpt_ids)
def testRecentOnlyPreempt(self):
"""Test only recent retention policy when there's pre-emptions."""
logdir = tempfile.mkdtemp()
g, sav, inc = self._buildGraphAndSaver(
logdir, keep_latest_n=5, keep_every_n_hours=None)
with self.session(graph=g) as sess:
# Creates a few checkpoints.
sess.run(tf.global_variables_initializer())
for _ in range(5):
sess.run(inc)
_ = sav.Save(sess)
# Restore to the latest.
sess.run(tf.global_variables_initializer())
_ = sav.Restore(sess)
# Simulate a pre-emption, create a brand new graph/saver.
g, sav, inc = self._buildGraphAndSaver(
logdir, keep_latest_n=5, keep_every_n_hours=None)
with self.session(graph=g) as sess:
# Creates a few checkpoints.
sess.run(tf.global_variables_initializer())
_ = sav.Restore(sess)
for _ in range(4):
sess.run(inc)
_ = sav.Save(sess)
ckpt_ids = self._checkpointIds(logdir)
# Expect only the most recent 5.
self.assertEqual([5, 6, 7, 8, 9], ckpt_ids)
def testIndefinitePreempt(self):
"""Test indefinite retention policy when there's pre-emptions."""
logdir = tempfile.mkdtemp()
g, sav, inc = self._buildGraphAndSaver(
logdir, keep_latest_n=0, keep_every_n_hours=1e-9)
with self.session(graph=g) as sess:
# Creates a few checkpoints.
sess.run(tf.global_variables_initializer())
for _ in range(5):
sess.run(inc)
_ = sav.Save(sess)
# Restore to the latest.
sess.run(tf.global_variables_initializer())
_ = sav.Restore(sess)
# Simulate a pre-emption, create a brand new graph/saver and run
# a few steps.
g, sav, inc = self._buildGraphAndSaver(
logdir, keep_latest_n=1, keep_every_n_hours=1e-9)
with self.session(graph=g) as sess:
# Creates a few checkpoints.
sess.run(tf.global_variables_initializer())
_ = sav.Restore(sess)
for _ in range(4):
sess.run(inc)
_ = sav.Save(sess)
ckpt_ids = self._checkpointIds(logdir)
# We expect all 9 checkpoints.
self.assertEqual([1, 2, 3, 4, 5, 6, 7, 8, 9], ckpt_ids)
@test.mock.patch.object(saver, 'time')
def testBothPoliciesPreempt(self, mock_time):
"""Test indefinite retention policy and recent policy."""
fake_time = time.time()
mock_time.time.return_value = fake_time
logdir = tempfile.mkdtemp()
g, sav, inc = self._buildGraphAndSaver(
logdir, keep_latest_n=2, keep_every_n_hours=5)
with self.session(graph=g) as sess:
# Creates a few checkpoints.
sess.run(tf.global_variables_initializer())
for _ in range(6):
sess.run(inc)
_ = sav.Save(sess)
# Advance mock time one-ish hour.
fake_time += 3601.0
mock_time.time.return_value = fake_time
fake_time += 100000.0
mock_time.time.return_value = fake_time
# Simulate a pre-emption, create a brand new graph/saver and run
# a few steps.
g, sav, inc = self._buildGraphAndSaver(
logdir, keep_latest_n=2, keep_every_n_hours=5)
with self.session(graph=g) as sess:
# Creates a few checkpoints.
sess.run(tf.global_variables_initializer())
_ = sav.Restore(sess)
for _ in range(4):
sess.run(inc)
_ = sav.Save(sess)
fake_time += 3601.0
mock_time.time.return_value = fake_time
ckpt_ids = self._checkpointIds(logdir)
# 1,6,7 are kept due to indefinite policy
# 9,10 due to recent policy.
self.assertEqual([1, 6, 7, 9, 10], ckpt_ids)
def testSingleCheckpoint(self):
logdir = tempfile.mkdtemp()
g = tf.Graph()
with g.as_default():
_ = py_utils.GetOrCreateGlobalStepVar()
sav = saver.Saver(logdir, tf.all_variables(), [], keep_latest_n=1)
with self.session(graph=g) as sess:
sess.run(tf.global_variables_initializer())
_ = sav.Save(sess)
def testWriteReadNpArrays(self):
prefix = os.path.join(tempfile.mkdtemp(), 'nptest')
nmap = py_utils.NestedMap()
nmap.train = np.random.normal(size=(3, 3))
nmap.test = np.random.normal(size=(1, 3))
nmap.foo = py_utils.NestedMap()
nmap.foo.bar = np.arange(10).astype(np.int32).reshape([2, 5])
saver.WriteNpArrays(prefix, nmap)
files = sorted(tf.io.gfile.glob(prefix + '*'))
self.assertLen(files, 2)
self.assertEqual(files[0], prefix + '.data-00000-of-00001')
self.assertEqual(files[1], prefix + '.index')
read_nmap = saver.ReadNpArrays(prefix, nmap.Transform(lambda x: x.dtype))
self.assertTrue(nmap.IsCompatible(read_nmap))
self.assertAllEqual(nmap.train, read_nmap.train)
self.assertAllEqual(nmap.test, read_nmap.test)
self.assertAllEqual(nmap.foo.bar, read_nmap.foo.bar)
if __name__ == '__main__':
tf.test.main()
|
|
from elasticsearch import Elasticsearch
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
plt.ioff()
from matplotlib.backends.backend_pdf import PdfPages
from matplotlib.dates import AutoDateLocator, AutoDateFormatter
import numpy as np
from scipy import stats
import datetime as dt
import math
import json
import pprint
with open("config", "r+") as txt:
contents = list(map(str.rstrip, txt))
esCon = Elasticsearch([{
'host': contents[4], 'port': contents[5]
}], timeout=1000)
pp = pprint.PrettyPrinter(indent=4)
def utcDate(time):
return dt.datetime.fromtimestamp(time, dt.timezone.utc)
def utcStamp(time):
return (dt.datetime.strptime(time,'%Y-%m-%dT%X')).replace(tzinfo=dt.timezone.utc).timestamp()
scrollPreserve="3m"
startDate = "2017-02-14T00:00:00"
endDate = "2017-02-15T00:00:00"
utcStart = utcStamp(startDate)
utcEnd = utcStamp(endDate)
oneDay = np.multiply(24,np.multiply(60,60))
querySize = 10000
def esConAgg(field):
queryBody={"aggs": {
"dev": {
"terms": {"field":field}
}
}
}
scannerCon = esCon.search(index="net-health",
body=queryBody,
doc_type="DIGIRECO",
size=querySize,
search_type="query_then_fetch",
scroll=scrollPreserve)
scrollIdCon = scannerCon['aggregations']['dev']
conTotalRec = scrollIdCon['buckets']
arrRet = np.array([])
if conTotalRec == 0:
return None
else:
for hit in conTotalRec:
arrRet = np.append(arrRet, hit['key'])
return arrRet
def esClear(ids):
scannerCon = esCon.clear_scroll(scroll_id=ids)
return scannerCon
def esConQuery(src, dest, slot):
queryBody={"query" :
{"bool": {
"must": [
{"match" :
{"src" : src}
},
{"match" :
{"dest" : dest}
},
{"match" :
{"LastRemoteHost" : slot}
},
{"range" : {
"beginDate" : {
"gt" : int(utcStart),
"lt" : int((utcStart + oneDay))
}
}
}
]
}
}, "sort": {"beginDate": {"order": "desc"}}
}
scannerCon = esCon.search(index="net-health",
body=queryBody,
doc_type="DIGIRECO",
size=querySize,
search_type="query_then_fetch",
scroll=scrollPreserve)
scrollIdCon = scannerCon['_scroll_id']
conTotalRec = scannerCon["hits"]["total"]
idList = []
arrRet = {}
if conTotalRec == 0:
return None
else:
while conTotalRec > 0:
idList.append(str(scrollIdCon))
responseCon = esCon.scroll(scroll_id=scrollIdCon,
scroll=scrollPreserve)
for hit in responseCon["hits"]["hits"]:
workflow = str(hit["_source"]["Workflow"])
if not workflow in arrRet:
arrRet[workflow] = {}
if 'meansrcThroughput' in hit["_source"]:
#if float(hit["_source"]["meansrcThroughput"]) > 0 and float(hit["_source"]["meanCpuEff"]) > 0 and float(hit["_source"]["meanInputGB"]) > 0:
if not 'meansrcThroughput' in arrRet[workflow]:
arrRet[workflow]['meansrcThroughput'] = np.reshape(np.array([hit["_source"]["meansrcThroughput"],
hit["_source"]["meanCpuEff"],
hit["_source"]["meanInputGB"]]), (1,3))
else:
arrRet[workflow]['meansrcThroughput'] = np.vstack((arrRet[workflow]['meansrcThroughput'],
np.array([hit["_source"]["meansrcThroughput"],
hit["_source"]["meanCpuEff"],
hit["_source"]["meanInputGB"]])))
if 'meandestThroughput' in hit["_source"]:
#if float(hit["_source"]["meandestThroughput"]) > 0 and float(hit["_source"]["meanCpuEff"]) > 0 and float(hit["_source"]["meanInputGB"]) > 0:
if not 'meandestThroughput' in arrRet[workflow]:
arrRet[workflow]['meandestThroughput'] = np.reshape(np.array([hit["_source"]["meandestThroughput"],
hit["_source"]["meanCpuEff"],
hit["_source"]["meanInputGB"]]), (1,3))
else:
arrRet[workflow]['meandestThroughput'] = np.vstack((arrRet[workflow]['meandestThroughput'],
np.array([hit["_source"]["meandestThroughput"],
hit["_source"]["meanCpuEff"],
hit["_source"]["meanInputGB"]])))
if 'meansrcPacket' in hit["_source"]:
#if float(hit["_source"]["meansrcPacket"]) > 0 and float(hit["_source"]["meanCpuEff"]) > 0 and float(hit["_source"]["meanInputGB"]) > 0:
if not 'meansrcPacket' in arrRet[workflow]:
arrRet[workflow]['meansrcPacket'] = np.reshape(np.array([hit["_source"]["meansrcPacket"],
hit["_source"]["meanCpuEff"],
hit["_source"]["meanInputGB"]]), (1,3))
else:
arrRet[workflow]['meansrcPacket'] = np.vstack((arrRet[workflow]['meansrcPacket'],
np.array([hit["_source"]["meansrcPacket"],
hit["_source"]["meanCpuEff"],
hit["_source"]["meanInputGB"]])))
if 'meandestPacket' in hit["_source"]:
#if float(hit["_source"]["meandestPacket"]) > 0 and float(hit["_source"]["meanCpuEff"]) > 0 and float(hit["_source"]["meanInputGB"]) > 0:
if not 'meandestPacket' in arrRet[workflow]:
arrRet[workflow]['meandestPacket'] = np.reshape(np.array([hit["_source"]["meandestPacket"],
hit["_source"]["meanCpuEff"],
hit["_source"]["meanInputGB"]]), (1,3))
else:
arrRet[workflow]['meandestPacket'] = np.vstack((arrRet[workflow]['meandestPacket'],
np.array([hit["_source"]["meandestPacket"],
hit["_source"]["meanCpuEff"],
hit["_source"]["meanInputGB"]])))
if 'meansrcLatency' in hit["_source"]:
#if float(hit["_source"]["meansrcLatency"]) > 0 and float(hit["_source"]["meanCpuEff"]) > 0 and float(hit["_source"]["meanInputGB"]) > 0:
if not 'meansrcLatency' in arrRet[workflow]:
arrRet[workflow]['meansrcLatency'] = np.reshape(np.array([hit["_source"]["meansrcLatency"],
hit["_source"]["meanCpuEff"],
hit["_source"]["meanInputGB"]]), (1,3))
else:
arrRet[workflow]['meansrcLatency'] = np.vstack((arrRet[workflow]['meansrcLatency'],
np.array([hit["_source"]["meansrcLatency"],
hit["_source"]["meanCpuEff"],
hit["_source"]["meanInputGB"]])))
if 'meandestLatency' in hit["_source"]:
#if float(hit["_source"]["meandestLatency"]) > 0 and float(hit["_source"]["meanCpuEff"]) > 0 and float(hit["_source"]["meanInputGB"]) > 0:
if not 'meandestLatency' in arrRet[workflow]:
arrRet[workflow]['meandestLatency'] = np.reshape(np.array([hit["_source"]["meandestLatency"],
hit["_source"]["meanCpuEff"],
hit["_source"]["meanInputGB"]]), (1,3))
else:
arrRet[workflow]['meandestLatency'] = np.vstack((arrRet[workflow]['meandestLatency'],
np.array([hit["_source"]["meandestLatency"],
hit["_source"]["meanCpuEff"],
hit["_source"]["meanInputGB"]])))
conTotalRec -= len(responseCon['hits']['hits'])
esClear(idList)
return arrRet
#print(esConAgg("src"))
#print(esConAgg("dest"))
def main(utcStart):
with PdfPages('PDFOut/CMS_OverByte.pdf') as pc:
d = pc.infodict()
d['Title'] = 'CMS Scatter Plots'
d['Author'] = u'Jerrod T. Dixon\xe4nen'
d['Subject'] = 'Plot of network affects on grid jobs'
d['Keywords'] = 'PdfPages matplotlib CMS grid'
d['CreationDate'] = dt.datetime.today()
d['ModDate'] = dt.datetime.today()
countBit = {}
countBit["total"] = 0
countPong = {}
countPong["total"] = 0
with open("WorkOut/OverByte.out", "w") as ww:
while utcStart <= utcEnd:
srcSites = esConAgg("src")
destSites = esConAgg("dest")
prevSites = esConAgg("LastRemoteHost")
workDate = utcDate(utcStart)
for ping in srcSites:
for pong in destSites:
for slot in prevSites:
qResults = esConQuery(ping, pong, slot)
if not type(qResults) == type(None):
ww.write(str(workDate.strftime('%d-%B-%Y') + "\n"))
for hit in qResults:
if str(ping + "TO" + pong) not in countBit:
countBit[str(ping + "TO" + pong)] = 0
if str(pong) not in countPong:
countPong[str(pong)] = 0
if str(slot) not in countBit:
countBit[str(slot)] = 0
countPong[str(pong)] += 1
countPong["total"] += 1
if 'meansrcThroughput' in qResults[hit]:
srcThrough = qResults[hit]['meansrcThroughput']
print("meansrcThroughput")
pp.pprint(srcThrough)
cslope, cintercept, cr_value, cp_value, cstd_err = stats.linregress(srcThrough[:,0],srcThrough[:,1])
eslope, eintercept, er_value, ep_value, estd_err = stats.linregress(srcThrough[:,0],srcThrough[:,2])
if cp_value < 0.05 and ep_value < 0.05:
if cslope < 0:
countBit[str(ping + "TO" + pong)] += 1
countBit[str(slot)] += 1
countBit["total"] += 1
figsT, axsT = plt.subplots(2, sharex=True)
axsT[0].scatter(srcThrough[:,0],srcThrough[:,1])
axsT[1].scatter(srcThrough[:,0],srcThrough[:,2])
axsT[0].set_ylabel("meanCpuEff")
axsT[1].set_ylabel("meanInputGB")
axsT[1].set_xlabel("Source Throughput (" + hit + ")")
axsT[0].set_title(str(ping + " to " + pong + " on " + workDate.strftime('%d-%B-%Y')))
pc.savefig(figsT)
plt.close(figsT)
ww.write(str("Workflow: " + hit + "\n"))
ww.write(str("Work site: " + ping + "\n"))
ww.write(str("Data site: " + pong + "\n"))
ww.write(str("Throughput value measured at work site\n"))
ww.write(str("X: Source Throughput Y: meanCpuEff\n"))
ww.write(str("c_Slope: " + str(cslope) + "\n"))
ww.write(str("c_Intercept: " + str(cintercept) + "\n"))
ww.write(str("c_R Value: " + str(cr_value) + "\n"))
ww.write(str("c_P Value: " + str(cp_value) + "\n"))
ww.write(str("c_std err: " + str(cstd_err) + "\n"))
ww.write(str("X: Source Throughput Y: Event Rate\n"))
ww.write(str("e_Slope: " + str(eslope) + "\n"))
ww.write(str("e_Intercept: " + str(eintercept) + "\n"))
ww.write(str("e_R Value: " + str(er_value) + "\n"))
ww.write(str("e_P Value: " + str(ep_value) + "\n"))
ww.write(str("e_std err: " + str(estd_err) + "\n"))
ww.write(str("\n\n"))
if 'meandestThroughput' in qResults[hit]:
destThrough = qResults[hit]['meandestThroughput']
print("meandestThroughput")
pp.pprint(destThrough)
eslope, eintercept, er_value, ep_value, estd_err = stats.linregress(destThrough[:,0],destThrough[:,2])
cslope, cintercept, cr_value, cp_value, cstd_err = stats.linregress(destThrough[:,0],destThrough[:,1])
if ep_value < 0.05 and cp_value < 0.05:
if cslope < 0:
countBit[str(ping + "TO" + pong)] += 1
countBit[str(slot)] += 1
countBit["total"] += 1
figdT, axdT = plt.subplots(2, sharex=True)
axdT[0].scatter(destThrough[:,0],destThrough[:,1])
axdT[1].scatter(destThrough[:,0],destThrough[:,2])
axdT[0].set_ylabel("meanCpuEff")
axdT[1].set_ylabel("meanInputGB")
axdT[1].set_xlabel("Destination Throughput (" + hit + ")")
axdT[0].set_title(str(ping + " to " + pong + " on " + workDate.strftime('%d-%B-%Y')))
pc.savefig(figdT)
plt.close(figdT)
ww.write(str("Workflow: " + hit + "\n"))
ww.write(str("Work site: " + ping + "\n"))
ww.write(str("Data site: " + pong + "\n"))
ww.write(str("Throughput value measured at data site\n"))
ww.write(str("X: Destination Throughput Y: meanCpuEff\n"))
ww.write(str("c_Slope: " + str(cslope) + "\n"))
ww.write(str("c_Intercept: " + str(cintercept) + "\n"))
ww.write(str("c_R Value: " + str(cr_value) + "\n"))
ww.write(str("c_P Value: " + str(cp_value) + "\n"))
ww.write(str("c_std err: " + str(cstd_err) + "\n"))
ww.write(str("X: Destination Throughput Y: Event Rate\n"))
ww.write(str("e_Slope: " + str(eslope) + "\n"))
ww.write(str("e_Intercept: " + str(eintercept) + "\n"))
ww.write(str("e_R Value: " + str(er_value) + "\n"))
ww.write(str("e_P Value: " + str(ep_value) + "\n"))
ww.write(str("e_std err: " + str(estd_err) + "\n"))
ww.write(str("\n\n"))
if 'meansrcPacket' in qResults[hit]:
meansrcPacket = qResults[hit]['meansrcPacket']
print("meansrcPacket")
pp.pprint(meansrcPacket)
cslope, cintercept, cr_value, cp_value, cstd_err = stats.linregress(meansrcPacket[:,0],meansrcPacket[:,1])
eslope, eintercept, er_value, ep_value, estd_err = stats.linregress(meansrcPacket[:,0],meansrcPacket[:,2])
if cp_value < 0.05 and ep_value < 0.05:
if cslope < 0:
countBit[str(ping + "TO" + pong)] += 1
countBit[str(slot)] += 1
countBit["total"] += 1
figsP, axsP = plt.subplots(2, sharex=True)
axsP[0].scatter(meansrcPacket[:,0],meansrcPacket[:,1])
axsP[1].scatter(meansrcPacket[:,0],meansrcPacket[:,2])
axsP[0].set_ylabel("meanCpuEff")
axsP[1].set_ylabel("meanInputGB")
axsP[1].set_xlabel("Source Packet Loss (" + hit + ")")
axsP[0].set_title(str(ping + " to " + pong + " on " + workDate.strftime('%d-%B-%Y')))
pc.savefig(figsP)
plt.close(figsP)
ww.write(str("Workflow: " + hit + "\n"))
ww.write(str("Work site: " + ping + "\n"))
ww.write(str("Data site: " + pong + "\n"))
ww.write(str("Packet loss value measured at work site\n"))
ww.write(str("X: Source Packet Loss Y: meanCpuEff\n"))
ww.write(str("c_Slope: " + str(cslope) + "\n"))
ww.write(str("c_Intercept: " + str(cintercept) + "\n"))
ww.write(str("c_R Value: " + str(cr_value) + "\n"))
ww.write(str("c_P Value: " + str(cp_value) + "\n"))
ww.write(str("c_std err: " + str(cstd_err) + "\n"))
ww.write(str("X: Source Packet Loss Y: Event Rate\n"))
ww.write(str("e_Slope: " + str(eslope) + "\n"))
ww.write(str("e_Intercept: " + str(eintercept) + "\n"))
ww.write(str("e_R Value: " + str(er_value) + "\n"))
ww.write(str("e_P Value: " + str(ep_value) + "\n"))
ww.write(str("e_std err: " + str(estd_err) + "\n"))
ww.write(str("\n\n"))
if 'meandestPacket' in qResults[hit]:
meandestPacket = qResults[hit]['meandestPacket']
print("meandestPacket")
pp.pprint(meandestPacket)
cslope, cintercept, cr_value, cp_value, cstd_err = stats.linregress(meandestPacket[:,0],meandestPacket[:,1])
eslope, eintercept, er_value, ep_value, estd_err = stats.linregress(meandestPacket[:,0],meandestPacket[:,2])
if cp_value < 0.05 and ep_value < 0.05:
if cslope < 0:
countBit[str(ping + "TO" + pong)] += 1
countBit[str(slot)] += 1
countBit["total"] += 1
figdP, axdP = plt.subplots(2, sharex=True)
axdP[0].scatter(meandestPacket[:,0],meandestPacket[:,1])
axdP[1].scatter(meandestPacket[:,0],meandestPacket[:,2])
axdP[0].set_ylabel("meanCpuEff")
axdP[1].set_ylabel("meanInputGB")
axdP[1].set_xlabel("Destination Packet Loss (" + hit + ")")
axdP[0].set_title(str(ping + " to " + pong + " on " + workDate.strftime('%d-%B-%Y')))
pc.savefig(figdP)
plt.close(figdP)
ww.write(str("Workflow: " + hit + "\n"))
ww.write(str("Work site: " + ping + "\n"))
ww.write(str("Data site: " + pong + "\n"))
ww.write(str("Packet loss value measured at data site\n"))
ww.write(str("X: Destination Packet Loss Y: meanCpuEff\n"))
ww.write(str("c_Slope: " + str(cslope) + "\n"))
ww.write(str("c_Intercept: " + str(cintercept) + "\n"))
ww.write(str("c_R Value: " + str(cr_value) + "\n"))
ww.write(str("c_P Value: " + str(cp_value) + "\n"))
ww.write(str("c_std err: " + str(cstd_err) + "\n"))
ww.write(str("X: Destination Packet Loss Y: Event Rate\n"))
ww.write(str("e_Slope: " + str(eslope) + "\n"))
ww.write(str("e_Intercept: " + str(eintercept) + "\n"))
ww.write(str("e_R Value: " + str(er_value) + "\n"))
ww.write(str("e_P Value: " + str(ep_value) + "\n"))
ww.write(str("e_std err: " + str(estd_err) + "\n"))
ww.write(str("\n\n"))
if 'meansrcLatency' in qResults[hit]:
meansrcLatency = qResults[hit]['meansrcLatency']
print("meansrcLatency")
pp.pprint(meansrcLatency)
cslope, cintercept, cr_value, cp_value, cstd_err = stats.linregress(meansrcLatency[:,0],meansrcLatency[:,1])
eslope, eintercept, er_value, ep_value, estd_err = stats.linregress(meansrcLatency[:,0],meansrcLatency[:,2])
if cp_value < 0.05 and ep_value < 0.05:
if cslope < 0:
countBit[str(ping + "TO" + pong)] += 1
countBit[str(slot)] += 1
countBit["total"] += 1
figL, axL = plt.subplots(2, sharex=True)
axL[0].scatter(meansrcLatency[:,0],meansrcLatency[:,1])
axL[1].scatter(meansrcLatency[:,0],meansrcLatency[:,2])
axL[0].set_ylabel("meanCpuEff")
axL[1].set_ylabel("meanInputGB")
axL[1].set_xlabel("Source Latency (" + hit + ")")
axL[0].set_title(str(ping + " to " + pong + " on " + workDate.strftime('%d-%B-%Y')))
pc.savefig(figL)
plt.close(figL)
ww.write(str("Workflow: " + hit + "\n"))
ww.write(str("Work site: " + ping + "\n"))
ww.write(str("Data site: " + pong + "\n"))
ww.write(str("Latency value measured at work site\n"))
ww.write(str("X: Source Latency Y: meanCpuEff\n"))
ww.write(str("c_Slope: " + str(cslope) + "\n"))
ww.write(str("c_Intercept: " + str(cintercept) + "\n"))
ww.write(str("c_R Value: " + str(cr_value) + "\n"))
ww.write(str("c_P Value: " + str(cp_value) + "\n"))
ww.write(str("c_std err: " + str(cstd_err) + "\n"))
ww.write(str("X: Source Latency Y: Event Rate\n"))
ww.write(str("e_Slope: " + str(eslope) + "\n"))
ww.write(str("e_Intercept: " + str(eintercept) + "\n"))
ww.write(str("e_R Value: " + str(er_value) + "\n"))
ww.write(str("e_P Value: " + str(ep_value) + "\n"))
ww.write(str("e_std err: " + str(estd_err) + "\n"))
ww.write(str("\n\n"))
if 'meandestLatency' in qResults[hit]:
meandestLatency = qResults[hit]['meandestLatency']
print("meandestLatency")
pp.pprint(meandestLatency)
cslope, cintercept, cr_value, cp_value, cstd_err = stats.linregress(meandestLatency[:,0],meandestLatency[:,1])
eslope, eintercept, er_value, ep_value, estd_err = stats.linregress(meandestLatency[:,0],meandestLatency[:,2])
if cp_value < 0.05 and ep_value < 0.05:
if cslope < 0:
countBit[str(ping + "TO" + pong)] += 1
countBit[str(slot)] += 1
countBit["total"] += 1
figP, axP = plt.subplots(2, sharex=True)
axP[1].scatter(meandestLatency[:,0],meandestLatency[:,2])
axP[0].scatter(meandestLatency[:,0],meandestLatency[:,1])
axP[0].set_ylabel("meanCpuEff")
axP[1].set_ylabel("meanInputGB")
axP[1].set_xlabel("Destination Latency (" + hit + ")")
axP[0].set_title(str(ping + " to " + pong + " on " + workDate.strftime('%d-%B-%Y')))
pc.savefig(figP)
plt.close(figP)
ww.write(str("Workflow: " + hit + "\n"))
ww.write(str("Work site: " + ping + "\n"))
ww.write(str("Data site: " + pong + "\n"))
ww.write(str("Latency value measured at data site\n"))
ww.write(str("X: Destination Latency Y: meanCpuEff\n"))
ww.write(str("c_Slope: " + str(cslope) + "\n"))
ww.write(str("c_Intercept: " + str(cintercept) + "\n"))
ww.write(str("c_R Value: " + str(cr_value) + "\n"))
ww.write(str("c_P Value: " + str(cp_value) + "\n"))
ww.write(str("c_std err: " + str(cstd_err) + "\n"))
ww.write(str("X: Destination Latency Y: Event Rate\n"))
ww.write(str("e_Slope: " + str(eslope) + "\n"))
ww.write(str("e_Intercept: " + str(eintercept) + "\n"))
ww.write(str("e_R Value: " + str(er_value) + "\n"))
ww.write(str("e_P Value: " + str(ep_value) + "\n"))
ww.write(str("e_std err: " + str(estd_err) + "\n"))
ww.write(str("\n\n"))
utcStart = utcStart + oneDay
ww.write("\n\n\n")
counter = 0
for hit in countBit:
if hit != "total":
counter += countBit[str(hit)]
ww.write(str(hit + " occurs " + str(countBit[str(hit)]/countBit["total"]) + "\n"))
ww.write("\n")
ww.write("Total occurs " + str(counter/countBit["total"]))
ww.write("\n\n\n")
for hit in countPong:
if hit != "total":
ww.write(str(hit + " data location " + str(countPong[str(hit)]/countPong["total"]) + "\n"))
#axC[1].scatter(destRes[:,0],destRes[:,1])
#axC[1].set_ylabel("meanCpuEff")
# Run Main code
print("start")
main(utcStart)
print("finish")
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from math import exp
import warnings
import numpy
from numpy import array
from pyspark import RDD, since
from pyspark.streaming import DStream
from pyspark.mllib.common import callMLlibFunc, _py2java, _java2py
from pyspark.mllib.linalg import DenseVector, SparseVector, _convert_to_vector
from pyspark.mllib.regression import (
LabeledPoint, LinearModel, _regression_train_wrapper,
StreamingLinearAlgorithm)
from pyspark.mllib.util import Saveable, Loader, inherit_doc
__all__ = ['LogisticRegressionModel', 'LogisticRegressionWithSGD', 'LogisticRegressionWithLBFGS',
'SVMModel', 'SVMWithSGD', 'NaiveBayesModel', 'NaiveBayes',
'StreamingLogisticRegressionWithSGD']
class LinearClassificationModel(LinearModel):
"""
A private abstract class representing a multiclass classification
model. The categories are represented by int values: 0, 1, 2, etc.
"""
def __init__(self, weights, intercept):
super(LinearClassificationModel, self).__init__(weights, intercept)
self._threshold = None
@since('1.4.0')
def setThreshold(self, value):
"""
Sets the threshold that separates positive predictions from
negative predictions. An example with prediction score greater
than or equal to this threshold is identified as a positive,
and negative otherwise. It is used for binary classification
only.
"""
self._threshold = value
@property
@since('1.4.0')
def threshold(self):
"""
Returns the threshold (if any) used for converting raw
prediction scores into 0/1 predictions. It is used for
binary classification only.
"""
return self._threshold
@since('1.4.0')
def clearThreshold(self):
"""
Clears the threshold so that `predict` will output raw
prediction scores. It is used for binary classification only.
"""
self._threshold = None
@since('1.4.0')
def predict(self, test):
"""
Predict values for a single data point or an RDD of points
using the model trained.
"""
raise NotImplementedError
class LogisticRegressionModel(LinearClassificationModel):
"""
Classification model trained using Multinomial/Binary Logistic
Regression.
:param weights:
Weights computed for every feature.
:param intercept:
Intercept computed for this model. (Only used in Binary Logistic
Regression. In Multinomial Logistic Regression, the intercepts will
not bea single value, so the intercepts will be part of the
weights.)
:param numFeatures:
The dimension of the features.
:param numClasses:
The number of possible outcomes for k classes classification problem
in Multinomial Logistic Regression. By default, it is binary
logistic regression so numClasses will be set to 2.
>>> data = [
... LabeledPoint(0.0, [0.0, 1.0]),
... LabeledPoint(1.0, [1.0, 0.0]),
... ]
>>> lrm = LogisticRegressionWithSGD.train(sc.parallelize(data), iterations=10)
>>> lrm.predict([1.0, 0.0])
1
>>> lrm.predict([0.0, 1.0])
0
>>> lrm.predict(sc.parallelize([[1.0, 0.0], [0.0, 1.0]])).collect()
[1, 0]
>>> lrm.clearThreshold()
>>> lrm.predict([0.0, 1.0])
0.279...
>>> sparse_data = [
... LabeledPoint(0.0, SparseVector(2, {0: 0.0})),
... LabeledPoint(1.0, SparseVector(2, {1: 1.0})),
... LabeledPoint(0.0, SparseVector(2, {0: 1.0})),
... LabeledPoint(1.0, SparseVector(2, {1: 2.0}))
... ]
>>> lrm = LogisticRegressionWithSGD.train(sc.parallelize(sparse_data), iterations=10)
>>> lrm.predict(array([0.0, 1.0]))
1
>>> lrm.predict(array([1.0, 0.0]))
0
>>> lrm.predict(SparseVector(2, {1: 1.0}))
1
>>> lrm.predict(SparseVector(2, {0: 1.0}))
0
>>> import os, tempfile
>>> path = tempfile.mkdtemp()
>>> lrm.save(sc, path)
>>> sameModel = LogisticRegressionModel.load(sc, path)
>>> sameModel.predict(array([0.0, 1.0]))
1
>>> sameModel.predict(SparseVector(2, {0: 1.0}))
0
>>> from shutil import rmtree
>>> try:
... rmtree(path)
... except:
... pass
>>> multi_class_data = [
... LabeledPoint(0.0, [0.0, 1.0, 0.0]),
... LabeledPoint(1.0, [1.0, 0.0, 0.0]),
... LabeledPoint(2.0, [0.0, 0.0, 1.0])
... ]
>>> data = sc.parallelize(multi_class_data)
>>> mcm = LogisticRegressionWithLBFGS.train(data, iterations=10, numClasses=3)
>>> mcm.predict([0.0, 0.5, 0.0])
0
>>> mcm.predict([0.8, 0.0, 0.0])
1
>>> mcm.predict([0.0, 0.0, 0.3])
2
.. versionadded:: 0.9.0
"""
def __init__(self, weights, intercept, numFeatures, numClasses):
super(LogisticRegressionModel, self).__init__(weights, intercept)
self._numFeatures = int(numFeatures)
self._numClasses = int(numClasses)
self._threshold = 0.5
if self._numClasses == 2:
self._dataWithBiasSize = None
self._weightsMatrix = None
else:
self._dataWithBiasSize = self._coeff.size // (self._numClasses - 1)
self._weightsMatrix = self._coeff.toArray().reshape(self._numClasses - 1,
self._dataWithBiasSize)
@property
@since('1.4.0')
def numFeatures(self):
"""
Dimension of the features.
"""
return self._numFeatures
@property
@since('1.4.0')
def numClasses(self):
"""
Number of possible outcomes for k classes classification problem
in Multinomial Logistic Regression.
"""
return self._numClasses
@since('0.9.0')
def predict(self, x):
"""
Predict values for a single data point or an RDD of points
using the model trained.
"""
if isinstance(x, RDD):
return x.map(lambda v: self.predict(v))
x = _convert_to_vector(x)
if self.numClasses == 2:
margin = self.weights.dot(x) + self._intercept
if margin > 0:
prob = 1 / (1 + exp(-margin))
else:
exp_margin = exp(margin)
prob = exp_margin / (1 + exp_margin)
if self._threshold is None:
return prob
else:
return 1 if prob > self._threshold else 0
else:
best_class = 0
max_margin = 0.0
if x.size + 1 == self._dataWithBiasSize:
for i in range(0, self._numClasses - 1):
margin = x.dot(self._weightsMatrix[i][0:x.size]) + \
self._weightsMatrix[i][x.size]
if margin > max_margin:
max_margin = margin
best_class = i + 1
else:
for i in range(0, self._numClasses - 1):
margin = x.dot(self._weightsMatrix[i])
if margin > max_margin:
max_margin = margin
best_class = i + 1
return best_class
@since('1.4.0')
def save(self, sc, path):
"""
Save this model to the given path.
"""
java_model = sc._jvm.org.apache.spark.mllib.classification.LogisticRegressionModel(
_py2java(sc, self._coeff), self.intercept, self.numFeatures, self.numClasses)
java_model.save(sc._jsc.sc(), path)
@classmethod
@since('1.4.0')
def load(cls, sc, path):
"""
Load a model from the given path.
"""
java_model = sc._jvm.org.apache.spark.mllib.classification.LogisticRegressionModel.load(
sc._jsc.sc(), path)
weights = _java2py(sc, java_model.weights())
intercept = java_model.intercept()
numFeatures = java_model.numFeatures()
numClasses = java_model.numClasses()
threshold = java_model.getThreshold().get()
model = LogisticRegressionModel(weights, intercept, numFeatures, numClasses)
model.setThreshold(threshold)
return model
class LogisticRegressionWithSGD(object):
"""
.. versionadded:: 0.9.0
.. note:: Deprecated in 2.0.0. Use ml.classification.LogisticRegression or
LogisticRegressionWithLBFGS.
"""
@classmethod
@since('0.9.0')
def train(cls, data, iterations=100, step=1.0, miniBatchFraction=1.0,
initialWeights=None, regParam=0.01, regType="l2", intercept=False,
validateData=True, convergenceTol=0.001):
"""
Train a logistic regression model on the given data.
:param data:
The training data, an RDD of LabeledPoint.
:param iterations:
The number of iterations.
(default: 100)
:param step:
The step parameter used in SGD.
(default: 1.0)
:param miniBatchFraction:
Fraction of data to be used for each SGD iteration.
(default: 1.0)
:param initialWeights:
The initial weights.
(default: None)
:param regParam:
The regularizer parameter.
(default: 0.01)
:param regType:
The type of regularizer used for training our model.
Supported values:
- "l1" for using L1 regularization
- "l2" for using L2 regularization (default)
- None for no regularization
:param intercept:
Boolean parameter which indicates the use or not of the
augmented representation for training data (i.e., whether bias
features are activated or not).
(default: False)
:param validateData:
Boolean parameter which indicates if the algorithm should
validate data before training.
(default: True)
:param convergenceTol:
A condition which decides iteration termination.
(default: 0.001)
"""
warnings.warn(
"Deprecated in 2.0.0. Use ml.classification.LogisticRegression or "
"LogisticRegressionWithLBFGS.")
def train(rdd, i):
return callMLlibFunc("trainLogisticRegressionModelWithSGD", rdd, int(iterations),
float(step), float(miniBatchFraction), i, float(regParam), regType,
bool(intercept), bool(validateData), float(convergenceTol))
return _regression_train_wrapper(train, LogisticRegressionModel, data, initialWeights)
class LogisticRegressionWithLBFGS(object):
"""
.. versionadded:: 1.2.0
"""
@classmethod
@since('1.2.0')
def train(cls, data, iterations=100, initialWeights=None, regParam=0.0, regType="l2",
intercept=False, corrections=10, tolerance=1e-6, validateData=True, numClasses=2):
"""
Train a logistic regression model on the given data.
:param data:
The training data, an RDD of LabeledPoint.
:param iterations:
The number of iterations.
(default: 100)
:param initialWeights:
The initial weights.
(default: None)
:param regParam:
The regularizer parameter.
(default: 0.0)
:param regType:
The type of regularizer used for training our model.
Supported values:
- "l1" for using L1 regularization
- "l2" for using L2 regularization (default)
- None for no regularization
:param intercept:
Boolean parameter which indicates the use or not of the
augmented representation for training data (i.e., whether bias
features are activated or not).
(default: False)
:param corrections:
The number of corrections used in the LBFGS update.
If a known updater is used for binary classification,
it calls the ml implementation and this parameter will
have no effect. (default: 10)
:param tolerance:
The convergence tolerance of iterations for L-BFGS.
(default: 1e-6)
:param validateData:
Boolean parameter which indicates if the algorithm should
validate data before training.
(default: True)
:param numClasses:
The number of classes (i.e., outcomes) a label can take in
Multinomial Logistic Regression.
(default: 2)
>>> data = [
... LabeledPoint(0.0, [0.0, 1.0]),
... LabeledPoint(1.0, [1.0, 0.0]),
... ]
>>> lrm = LogisticRegressionWithLBFGS.train(sc.parallelize(data), iterations=10)
>>> lrm.predict([1.0, 0.0])
1
>>> lrm.predict([0.0, 1.0])
0
"""
def train(rdd, i):
return callMLlibFunc("trainLogisticRegressionModelWithLBFGS", rdd, int(iterations), i,
float(regParam), regType, bool(intercept), int(corrections),
float(tolerance), bool(validateData), int(numClasses))
if initialWeights is None:
if numClasses == 2:
initialWeights = [0.0] * len(data.first().features)
else:
if intercept:
initialWeights = [0.0] * (len(data.first().features) + 1) * (numClasses - 1)
else:
initialWeights = [0.0] * len(data.first().features) * (numClasses - 1)
return _regression_train_wrapper(train, LogisticRegressionModel, data, initialWeights)
class SVMModel(LinearClassificationModel):
"""
Model for Support Vector Machines (SVMs).
:param weights:
Weights computed for every feature.
:param intercept:
Intercept computed for this model.
>>> data = [
... LabeledPoint(0.0, [0.0]),
... LabeledPoint(1.0, [1.0]),
... LabeledPoint(1.0, [2.0]),
... LabeledPoint(1.0, [3.0])
... ]
>>> svm = SVMWithSGD.train(sc.parallelize(data), iterations=10)
>>> svm.predict([1.0])
1
>>> svm.predict(sc.parallelize([[1.0]])).collect()
[1]
>>> svm.clearThreshold()
>>> svm.predict(array([1.0]))
1.44...
>>> sparse_data = [
... LabeledPoint(0.0, SparseVector(2, {0: -1.0})),
... LabeledPoint(1.0, SparseVector(2, {1: 1.0})),
... LabeledPoint(0.0, SparseVector(2, {0: 0.0})),
... LabeledPoint(1.0, SparseVector(2, {1: 2.0}))
... ]
>>> svm = SVMWithSGD.train(sc.parallelize(sparse_data), iterations=10)
>>> svm.predict(SparseVector(2, {1: 1.0}))
1
>>> svm.predict(SparseVector(2, {0: -1.0}))
0
>>> import os, tempfile
>>> path = tempfile.mkdtemp()
>>> svm.save(sc, path)
>>> sameModel = SVMModel.load(sc, path)
>>> sameModel.predict(SparseVector(2, {1: 1.0}))
1
>>> sameModel.predict(SparseVector(2, {0: -1.0}))
0
>>> from shutil import rmtree
>>> try:
... rmtree(path)
... except:
... pass
.. versionadded:: 0.9.0
"""
def __init__(self, weights, intercept):
super(SVMModel, self).__init__(weights, intercept)
self._threshold = 0.0
@since('0.9.0')
def predict(self, x):
"""
Predict values for a single data point or an RDD of points
using the model trained.
"""
if isinstance(x, RDD):
return x.map(lambda v: self.predict(v))
x = _convert_to_vector(x)
margin = self.weights.dot(x) + self.intercept
if self._threshold is None:
return margin
else:
return 1 if margin > self._threshold else 0
@since('1.4.0')
def save(self, sc, path):
"""
Save this model to the given path.
"""
java_model = sc._jvm.org.apache.spark.mllib.classification.SVMModel(
_py2java(sc, self._coeff), self.intercept)
java_model.save(sc._jsc.sc(), path)
@classmethod
@since('1.4.0')
def load(cls, sc, path):
"""
Load a model from the given path.
"""
java_model = sc._jvm.org.apache.spark.mllib.classification.SVMModel.load(
sc._jsc.sc(), path)
weights = _java2py(sc, java_model.weights())
intercept = java_model.intercept()
threshold = java_model.getThreshold().get()
model = SVMModel(weights, intercept)
model.setThreshold(threshold)
return model
class SVMWithSGD(object):
"""
.. versionadded:: 0.9.0
"""
@classmethod
@since('0.9.0')
def train(cls, data, iterations=100, step=1.0, regParam=0.01,
miniBatchFraction=1.0, initialWeights=None, regType="l2",
intercept=False, validateData=True, convergenceTol=0.001):
"""
Train a support vector machine on the given data.
:param data:
The training data, an RDD of LabeledPoint.
:param iterations:
The number of iterations.
(default: 100)
:param step:
The step parameter used in SGD.
(default: 1.0)
:param regParam:
The regularizer parameter.
(default: 0.01)
:param miniBatchFraction:
Fraction of data to be used for each SGD iteration.
(default: 1.0)
:param initialWeights:
The initial weights.
(default: None)
:param regType:
The type of regularizer used for training our model.
Allowed values:
- "l1" for using L1 regularization
- "l2" for using L2 regularization (default)
- None for no regularization
:param intercept:
Boolean parameter which indicates the use or not of the
augmented representation for training data (i.e. whether bias
features are activated or not).
(default: False)
:param validateData:
Boolean parameter which indicates if the algorithm should
validate data before training.
(default: True)
:param convergenceTol:
A condition which decides iteration termination.
(default: 0.001)
"""
def train(rdd, i):
return callMLlibFunc("trainSVMModelWithSGD", rdd, int(iterations), float(step),
float(regParam), float(miniBatchFraction), i, regType,
bool(intercept), bool(validateData), float(convergenceTol))
return _regression_train_wrapper(train, SVMModel, data, initialWeights)
@inherit_doc
class NaiveBayesModel(Saveable, Loader):
"""
Model for Naive Bayes classifiers.
:param labels:
List of labels.
:param pi:
Log of class priors, whose dimension is C, number of labels.
:param theta:
Log of class conditional probabilities, whose dimension is C-by-D,
where D is number of features.
>>> data = [
... LabeledPoint(0.0, [0.0, 0.0]),
... LabeledPoint(0.0, [0.0, 1.0]),
... LabeledPoint(1.0, [1.0, 0.0]),
... ]
>>> model = NaiveBayes.train(sc.parallelize(data))
>>> model.predict(array([0.0, 1.0]))
0.0
>>> model.predict(array([1.0, 0.0]))
1.0
>>> model.predict(sc.parallelize([[1.0, 0.0]])).collect()
[1.0]
>>> sparse_data = [
... LabeledPoint(0.0, SparseVector(2, {1: 0.0})),
... LabeledPoint(0.0, SparseVector(2, {1: 1.0})),
... LabeledPoint(1.0, SparseVector(2, {0: 1.0}))
... ]
>>> model = NaiveBayes.train(sc.parallelize(sparse_data))
>>> model.predict(SparseVector(2, {1: 1.0}))
0.0
>>> model.predict(SparseVector(2, {0: 1.0}))
1.0
>>> import os, tempfile
>>> path = tempfile.mkdtemp()
>>> model.save(sc, path)
>>> sameModel = NaiveBayesModel.load(sc, path)
>>> sameModel.predict(SparseVector(2, {0: 1.0})) == model.predict(SparseVector(2, {0: 1.0}))
True
>>> from shutil import rmtree
>>> try:
... rmtree(path)
... except OSError:
... pass
.. versionadded:: 0.9.0
"""
def __init__(self, labels, pi, theta):
self.labels = labels
self.pi = pi
self.theta = theta
@since('0.9.0')
def predict(self, x):
"""
Return the most likely class for a data vector
or an RDD of vectors
"""
if isinstance(x, RDD):
return x.map(lambda v: self.predict(v))
x = _convert_to_vector(x)
return self.labels[numpy.argmax(self.pi + x.dot(self.theta.transpose()))]
def save(self, sc, path):
"""
Save this model to the given path.
"""
java_labels = _py2java(sc, self.labels.tolist())
java_pi = _py2java(sc, self.pi.tolist())
java_theta = _py2java(sc, self.theta.tolist())
java_model = sc._jvm.org.apache.spark.mllib.classification.NaiveBayesModel(
java_labels, java_pi, java_theta)
java_model.save(sc._jsc.sc(), path)
@classmethod
@since('1.4.0')
def load(cls, sc, path):
"""
Load a model from the given path.
"""
java_model = sc._jvm.org.apache.spark.mllib.classification.NaiveBayesModel.load(
sc._jsc.sc(), path)
# Can not unpickle array.array from Pyrolite in Python3 with "bytes"
py_labels = _java2py(sc, java_model.labels(), "latin1")
py_pi = _java2py(sc, java_model.pi(), "latin1")
py_theta = _java2py(sc, java_model.theta(), "latin1")
return NaiveBayesModel(py_labels, py_pi, numpy.array(py_theta))
class NaiveBayes(object):
"""
.. versionadded:: 0.9.0
"""
@classmethod
@since('0.9.0')
def train(cls, data, lambda_=1.0):
"""
Train a Naive Bayes model given an RDD of (label, features)
vectors.
This is the Multinomial NB (U{http://tinyurl.com/lsdw6p}) which
can handle all kinds of discrete data. For example, by
converting documents into TF-IDF vectors, it can be used for
document classification. By making every vector a 0-1 vector,
it can also be used as Bernoulli NB (U{http://tinyurl.com/p7c96j6}).
The input feature values must be nonnegative.
:param data:
RDD of LabeledPoint.
:param lambda_:
The smoothing parameter.
(default: 1.0)
"""
first = data.first()
if not isinstance(first, LabeledPoint):
raise ValueError("`data` should be an RDD of LabeledPoint")
labels, pi, theta = callMLlibFunc("trainNaiveBayesModel", data, lambda_)
return NaiveBayesModel(labels.toArray(), pi.toArray(), numpy.array(theta))
@inherit_doc
class StreamingLogisticRegressionWithSGD(StreamingLinearAlgorithm):
"""
Train or predict a logistic regression model on streaming data.
Training uses Stochastic Gradient Descent to update the model based on
each new batch of incoming data from a DStream.
Each batch of data is assumed to be an RDD of LabeledPoints.
The number of data points per batch can vary, but the number
of features must be constant. An initial weight
vector must be provided.
:param stepSize:
Step size for each iteration of gradient descent.
(default: 0.1)
:param numIterations:
Number of iterations run for each batch of data.
(default: 50)
:param miniBatchFraction:
Fraction of each batch of data to use for updates.
(default: 1.0)
:param regParam:
L2 Regularization parameter.
(default: 0.0)
:param convergenceTol:
Value used to determine when to terminate iterations.
(default: 0.001)
.. versionadded:: 1.5.0
"""
def __init__(self, stepSize=0.1, numIterations=50, miniBatchFraction=1.0, regParam=0.0,
convergenceTol=0.001):
self.stepSize = stepSize
self.numIterations = numIterations
self.regParam = regParam
self.miniBatchFraction = miniBatchFraction
self.convergenceTol = convergenceTol
self._model = None
super(StreamingLogisticRegressionWithSGD, self).__init__(
model=self._model)
@since('1.5.0')
def setInitialWeights(self, initialWeights):
"""
Set the initial value of weights.
This must be set before running trainOn and predictOn.
"""
initialWeights = _convert_to_vector(initialWeights)
# LogisticRegressionWithSGD does only binary classification.
self._model = LogisticRegressionModel(
initialWeights, 0, initialWeights.size, 2)
return self
@since('1.5.0')
def trainOn(self, dstream):
"""Train the model on the incoming dstream."""
self._validate(dstream)
def update(rdd):
# LogisticRegressionWithSGD.train raises an error for an empty RDD.
if not rdd.isEmpty():
self._model = LogisticRegressionWithSGD.train(
rdd, self.numIterations, self.stepSize,
self.miniBatchFraction, self._model.weights,
regParam=self.regParam, convergenceTol=self.convergenceTol)
dstream.foreachRDD(update)
def _test():
import doctest
from pyspark.sql import SparkSession
import pyspark.mllib.classification
globs = pyspark.mllib.classification.__dict__.copy()
spark = SparkSession.builder\
.master("local[4]")\
.appName("mllib.classification tests")\
.getOrCreate()
globs['sc'] = spark.sparkContext
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
spark.stop()
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
|
|
"""
Provides an APIView class that is the base of all views in REST framework.
"""
from django.conf import settings
from django.core.exceptions import PermissionDenied
from django.db import connections, models
from django.http import Http404
from django.http.response import HttpResponseBase
from django.utils.cache import cc_delim_re, patch_vary_headers
from django.utils.encoding import smart_str
from django.views.decorators.csrf import csrf_exempt
from django.views.generic import View
from rest_framework import exceptions, status
from rest_framework.request import Request
from rest_framework.response import Response
from rest_framework.schemas import DefaultSchema
from rest_framework.settings import api_settings
from rest_framework.utils import formatting
def get_view_name(view):
"""
Given a view instance, return a textual name to represent the view.
This name is used in the browsable API, and in OPTIONS responses.
This function is the default for the `VIEW_NAME_FUNCTION` setting.
"""
# Name may be set by some Views, such as a ViewSet.
name = getattr(view, 'name', None)
if name is not None:
return name
name = view.__class__.__name__
name = formatting.remove_trailing_string(name, 'View')
name = formatting.remove_trailing_string(name, 'ViewSet')
name = formatting.camelcase_to_spaces(name)
# Suffix may be set by some Views, such as a ViewSet.
suffix = getattr(view, 'suffix', None)
if suffix:
name += ' ' + suffix
return name
def get_view_description(view, html=False):
"""
Given a view instance, return a textual description to represent the view.
This name is used in the browsable API, and in OPTIONS responses.
This function is the default for the `VIEW_DESCRIPTION_FUNCTION` setting.
"""
# Description may be set by some Views, such as a ViewSet.
description = getattr(view, 'description', None)
if description is None:
description = view.__class__.__doc__ or ''
description = formatting.dedent(smart_str(description))
if html:
return formatting.markup_description(description)
return description
def set_rollback():
for db in connections.all():
if db.settings_dict['ATOMIC_REQUESTS'] and db.in_atomic_block:
db.set_rollback(True)
def exception_handler(exc, context):
"""
Returns the response that should be used for any given exception.
By default we handle the REST framework `APIException`, and also
Django's built-in `Http404` and `PermissionDenied` exceptions.
Any unhandled exceptions may return `None`, which will cause a 500 error
to be raised.
"""
if isinstance(exc, Http404):
exc = exceptions.NotFound()
elif isinstance(exc, PermissionDenied):
exc = exceptions.PermissionDenied()
if isinstance(exc, exceptions.APIException):
headers = {}
if getattr(exc, 'auth_header', None):
headers['WWW-Authenticate'] = exc.auth_header
if getattr(exc, 'wait', None):
headers['Retry-After'] = '%d' % exc.wait
if isinstance(exc.detail, (list, dict)):
data = exc.detail
else:
data = {'detail': exc.detail}
set_rollback()
return Response(data, status=exc.status_code, headers=headers)
return None
class APIView(View):
# The following policies may be set at either globally, or per-view.
renderer_classes = api_settings.DEFAULT_RENDERER_CLASSES
parser_classes = api_settings.DEFAULT_PARSER_CLASSES
authentication_classes = api_settings.DEFAULT_AUTHENTICATION_CLASSES
throttle_classes = api_settings.DEFAULT_THROTTLE_CLASSES
permission_classes = api_settings.DEFAULT_PERMISSION_CLASSES
content_negotiation_class = api_settings.DEFAULT_CONTENT_NEGOTIATION_CLASS
metadata_class = api_settings.DEFAULT_METADATA_CLASS
versioning_class = api_settings.DEFAULT_VERSIONING_CLASS
# Allow dependency injection of other settings to make testing easier.
settings = api_settings
schema = DefaultSchema()
@classmethod
def as_view(cls, **initkwargs):
"""
Store the original class on the view function.
This allows us to discover information about the view when we do URL
reverse lookups. Used for breadcrumb generation.
"""
if isinstance(getattr(cls, 'queryset', None), models.query.QuerySet):
def force_evaluation():
raise RuntimeError(
'Do not evaluate the `.queryset` attribute directly, '
'as the result will be cached and reused between requests. '
'Use `.all()` or call `.get_queryset()` instead.'
)
cls.queryset._fetch_all = force_evaluation
view = super().as_view(**initkwargs)
view.cls = cls
view.initkwargs = initkwargs
# Note: session based authentication is explicitly CSRF validated,
# all other authentication is CSRF exempt.
return csrf_exempt(view)
@property
def allowed_methods(self):
"""
Wrap Django's private `_allowed_methods` interface in a public property.
"""
return self._allowed_methods()
@property
def default_response_headers(self):
headers = {
'Allow': ', '.join(self.allowed_methods),
}
if len(self.renderer_classes) > 1:
headers['Vary'] = 'Accept'
return headers
def http_method_not_allowed(self, request, *args, **kwargs):
"""
If `request.method` does not correspond to a handler method,
determine what kind of exception to raise.
"""
raise exceptions.MethodNotAllowed(request.method)
def permission_denied(self, request, message=None, code=None):
"""
If request is not permitted, determine what kind of exception to raise.
"""
if request.authenticators and not request.successful_authenticator:
raise exceptions.NotAuthenticated()
raise exceptions.PermissionDenied(detail=message, code=code)
def throttled(self, request, wait):
"""
If request is throttled, determine what kind of exception to raise.
"""
raise exceptions.Throttled(wait)
def get_authenticate_header(self, request):
"""
If a request is unauthenticated, determine the WWW-Authenticate
header to use for 401 responses, if any.
"""
authenticators = self.get_authenticators()
if authenticators:
return authenticators[0].authenticate_header(request)
def get_parser_context(self, http_request):
"""
Returns a dict that is passed through to Parser.parse(),
as the `parser_context` keyword argument.
"""
# Note: Additionally `request` and `encoding` will also be added
# to the context by the Request object.
return {
'view': self,
'args': getattr(self, 'args', ()),
'kwargs': getattr(self, 'kwargs', {})
}
def get_renderer_context(self):
"""
Returns a dict that is passed through to Renderer.render(),
as the `renderer_context` keyword argument.
"""
# Note: Additionally 'response' will also be added to the context,
# by the Response object.
return {
'view': self,
'args': getattr(self, 'args', ()),
'kwargs': getattr(self, 'kwargs', {}),
'request': getattr(self, 'request', None)
}
def get_exception_handler_context(self):
"""
Returns a dict that is passed through to EXCEPTION_HANDLER,
as the `context` argument.
"""
return {
'view': self,
'args': getattr(self, 'args', ()),
'kwargs': getattr(self, 'kwargs', {}),
'request': getattr(self, 'request', None)
}
def get_view_name(self):
"""
Return the view name, as used in OPTIONS responses and in the
browsable API.
"""
func = self.settings.VIEW_NAME_FUNCTION
return func(self)
def get_view_description(self, html=False):
"""
Return some descriptive text for the view, as used in OPTIONS responses
and in the browsable API.
"""
func = self.settings.VIEW_DESCRIPTION_FUNCTION
return func(self, html)
# API policy instantiation methods
def get_format_suffix(self, **kwargs):
"""
Determine if the request includes a '.json' style format suffix
"""
if self.settings.FORMAT_SUFFIX_KWARG:
return kwargs.get(self.settings.FORMAT_SUFFIX_KWARG)
def get_renderers(self):
"""
Instantiates and returns the list of renderers that this view can use.
"""
return [renderer() for renderer in self.renderer_classes]
def get_parsers(self):
"""
Instantiates and returns the list of parsers that this view can use.
"""
return [parser() for parser in self.parser_classes]
def get_authenticators(self):
"""
Instantiates and returns the list of authenticators that this view can use.
"""
return [auth() for auth in self.authentication_classes]
def get_permissions(self):
"""
Instantiates and returns the list of permissions that this view requires.
"""
return [permission() for permission in self.permission_classes]
def get_throttles(self):
"""
Instantiates and returns the list of throttles that this view uses.
"""
return [throttle() for throttle in self.throttle_classes]
def get_content_negotiator(self):
"""
Instantiate and return the content negotiation class to use.
"""
if not getattr(self, '_negotiator', None):
self._negotiator = self.content_negotiation_class()
return self._negotiator
def get_exception_handler(self):
"""
Returns the exception handler that this view uses.
"""
return self.settings.EXCEPTION_HANDLER
# API policy implementation methods
def perform_content_negotiation(self, request, force=False):
"""
Determine which renderer and media type to use render the response.
"""
renderers = self.get_renderers()
conneg = self.get_content_negotiator()
try:
return conneg.select_renderer(request, renderers, self.format_kwarg)
except Exception:
if force:
return (renderers[0], renderers[0].media_type)
raise
def perform_authentication(self, request):
"""
Perform authentication on the incoming request.
Note that if you override this and simply 'pass', then authentication
will instead be performed lazily, the first time either
`request.user` or `request.auth` is accessed.
"""
request.user
def check_permissions(self, request):
"""
Check if the request should be permitted.
Raises an appropriate exception if the request is not permitted.
"""
for permission in self.get_permissions():
if not permission.has_permission(request, self):
self.permission_denied(
request,
message=getattr(permission, 'message', None),
code=getattr(permission, 'code', None)
)
def check_object_permissions(self, request, obj):
"""
Check if the request should be permitted for a given object.
Raises an appropriate exception if the request is not permitted.
"""
for permission in self.get_permissions():
if not permission.has_object_permission(request, self, obj):
self.permission_denied(
request,
message=getattr(permission, 'message', None),
code=getattr(permission, 'code', None)
)
def check_throttles(self, request):
"""
Check if request should be throttled.
Raises an appropriate exception if the request is throttled.
"""
throttle_durations = []
for throttle in self.get_throttles():
if not throttle.allow_request(request, self):
throttle_durations.append(throttle.wait())
if throttle_durations:
# Filter out `None` values which may happen in case of config / rate
# changes, see #1438
durations = [
duration for duration in throttle_durations
if duration is not None
]
duration = max(durations, default=None)
self.throttled(request, duration)
def determine_version(self, request, *args, **kwargs):
"""
If versioning is being used, then determine any API version for the
incoming request. Returns a two-tuple of (version, versioning_scheme)
"""
if self.versioning_class is None:
return (None, None)
scheme = self.versioning_class()
return (scheme.determine_version(request, *args, **kwargs), scheme)
# Dispatch methods
def initialize_request(self, request, *args, **kwargs):
"""
Returns the initial request object.
"""
parser_context = self.get_parser_context(request)
return Request(
request,
parsers=self.get_parsers(),
authenticators=self.get_authenticators(),
negotiator=self.get_content_negotiator(),
parser_context=parser_context
)
def initial(self, request, *args, **kwargs):
"""
Runs anything that needs to occur prior to calling the method handler.
"""
self.format_kwarg = self.get_format_suffix(**kwargs)
# Perform content negotiation and store the accepted info on the request
neg = self.perform_content_negotiation(request)
request.accepted_renderer, request.accepted_media_type = neg
# Determine the API version, if versioning is in use.
version, scheme = self.determine_version(request, *args, **kwargs)
request.version, request.versioning_scheme = version, scheme
# Ensure that the incoming request is permitted
self.perform_authentication(request)
self.check_permissions(request)
self.check_throttles(request)
def finalize_response(self, request, response, *args, **kwargs):
"""
Returns the final response object.
"""
# Make the error obvious if a proper response is not returned
assert isinstance(response, HttpResponseBase), (
'Expected a `Response`, `HttpResponse` or `HttpStreamingResponse` '
'to be returned from the view, but received a `%s`'
% type(response)
)
if isinstance(response, Response):
if not getattr(request, 'accepted_renderer', None):
neg = self.perform_content_negotiation(request, force=True)
request.accepted_renderer, request.accepted_media_type = neg
response.accepted_renderer = request.accepted_renderer
response.accepted_media_type = request.accepted_media_type
response.renderer_context = self.get_renderer_context()
# Add new vary headers to the response instead of overwriting.
vary_headers = self.headers.pop('Vary', None)
if vary_headers is not None:
patch_vary_headers(response, cc_delim_re.split(vary_headers))
for key, value in self.headers.items():
response[key] = value
return response
def handle_exception(self, exc):
"""
Handle any exception that occurs, by returning an appropriate response,
or re-raising the error.
"""
if isinstance(exc, (exceptions.NotAuthenticated,
exceptions.AuthenticationFailed)):
# WWW-Authenticate header for 401 responses, else coerce to 403
auth_header = self.get_authenticate_header(self.request)
if auth_header:
exc.auth_header = auth_header
else:
exc.status_code = status.HTTP_403_FORBIDDEN
exception_handler = self.get_exception_handler()
context = self.get_exception_handler_context()
response = exception_handler(exc, context)
if response is None:
self.raise_uncaught_exception(exc)
response.exception = True
return response
def raise_uncaught_exception(self, exc):
if settings.DEBUG:
request = self.request
renderer_format = getattr(request.accepted_renderer, 'format')
use_plaintext_traceback = renderer_format not in ('html', 'api', 'admin')
request.force_plaintext_errors(use_plaintext_traceback)
raise exc
# Note: Views are made CSRF exempt from within `as_view` as to prevent
# accidental removal of this exemption in cases where `dispatch` needs to
# be overridden.
def dispatch(self, request, *args, **kwargs):
"""
`.dispatch()` is pretty much the same as Django's regular dispatch,
but with extra hooks for startup, finalize, and exception handling.
"""
self.args = args
self.kwargs = kwargs
request = self.initialize_request(request, *args, **kwargs)
self.request = request
self.headers = self.default_response_headers # deprecate?
try:
self.initial(request, *args, **kwargs)
# Get the appropriate handler method
if request.method.lower() in self.http_method_names:
handler = getattr(self, request.method.lower(),
self.http_method_not_allowed)
else:
handler = self.http_method_not_allowed
response = handler(request, *args, **kwargs)
except Exception as exc:
response = self.handle_exception(exc)
self.response = self.finalize_response(request, response, *args, **kwargs)
return self.response
def options(self, request, *args, **kwargs):
"""
Handler method for HTTP 'OPTIONS' request.
"""
if self.metadata_class is None:
return self.http_method_not_allowed(request, *args, **kwargs)
data = self.metadata_class().determine_metadata(request, self)
return Response(data, status=status.HTTP_200_OK)
|
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Starting point for routing EC2 requests.
"""
import urlparse
from eventlet.green import httplib
from oslo.config import cfg
import six
import webob
import webob.dec
import webob.exc
from nova.api.ec2 import apirequest
from nova.api.ec2 import ec2utils
from nova.api.ec2 import faults
from nova.api import validator
from nova import context
from nova import exception
from nova.openstack.common.gettextutils import _
from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common import memorycache
from nova.openstack.common import timeutils
from nova import utils
from nova import wsgi
LOG = logging.getLogger(__name__)
ec2_opts = [
cfg.IntOpt('lockout_attempts',
default=5,
help='Number of failed auths before lockout.'),
cfg.IntOpt('lockout_minutes',
default=15,
help='Number of minutes to lockout if triggered.'),
cfg.IntOpt('lockout_window',
default=15,
help='Number of minutes for lockout window.'),
cfg.StrOpt('keystone_ec2_url',
default='http://localhost:5000/v2.0/ec2tokens',
help='URL to get token from ec2 request.'),
cfg.BoolOpt('ec2_private_dns_show_ip',
default=False,
help='Return the IP address as private dns hostname in '
'describe instances'),
cfg.BoolOpt('ec2_strict_validation',
default=True,
help='Validate security group names'
' according to EC2 specification'),
cfg.IntOpt('ec2_timestamp_expiry',
default=300,
help='Time in seconds before ec2 timestamp expires'),
]
CONF = cfg.CONF
CONF.register_opts(ec2_opts)
CONF.import_opt('use_forwarded_for', 'nova.api.auth')
## Fault Wrapper around all EC2 requests ##
class FaultWrapper(wsgi.Middleware):
"""Calls the middleware stack, captures any exceptions into faults."""
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
try:
return req.get_response(self.application)
except Exception as ex:
LOG.exception(_("FaultWrapper: %s"), unicode(ex))
return faults.Fault(webob.exc.HTTPInternalServerError())
class RequestLogging(wsgi.Middleware):
"""Access-Log akin logging for all EC2 API requests."""
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
start = timeutils.utcnow()
rv = req.get_response(self.application)
self.log_request_completion(rv, req, start)
return rv
def log_request_completion(self, response, request, start):
apireq = request.environ.get('ec2.request', None)
if apireq:
controller = apireq.controller
action = apireq.action
else:
controller = None
action = None
ctxt = request.environ.get('nova.context', None)
delta = timeutils.utcnow() - start
seconds = delta.seconds
microseconds = delta.microseconds
LOG.info(
"%s.%ss %s %s %s %s:%s %s [%s] %s %s",
seconds,
microseconds,
request.remote_addr,
request.method,
"%s%s" % (request.script_name, request.path_info),
controller,
action,
response.status_int,
request.user_agent,
request.content_type,
response.content_type,
context=ctxt)
class Lockout(wsgi.Middleware):
"""Lockout for x minutes on y failed auths in a z minute period.
x = lockout_timeout flag
y = lockout_window flag
z = lockout_attempts flag
Uses memcached if lockout_memcached_servers flag is set, otherwise it
uses a very simple in-process cache. Due to the simplicity of
the implementation, the timeout window is started with the first
failed request, so it will block if there are x failed logins within
that period.
There is a possible race condition where simultaneous requests could
sneak in before the lockout hits, but this is extremely rare and would
only result in a couple of extra failed attempts.
"""
def __init__(self, application):
"""middleware can use fake for testing."""
self.mc = memorycache.get_client()
super(Lockout, self).__init__(application)
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
access_key = str(req.params['AWSAccessKeyId'])
failures_key = "authfailures-%s" % access_key
failures = int(self.mc.get(failures_key) or 0)
if failures >= CONF.lockout_attempts:
detail = _("Too many failed authentications.")
raise webob.exc.HTTPForbidden(detail=detail)
res = req.get_response(self.application)
if res.status_int == 403:
failures = self.mc.incr(failures_key)
if failures is None:
# NOTE(vish): To use incr, failures has to be a string.
self.mc.set(failures_key, '1', time=CONF.lockout_window * 60)
elif failures >= CONF.lockout_attempts:
LOG.warn(_('Access key %(access_key)s has had %(failures)d '
'failed authentications and will be locked out '
'for %(lock_mins)d minutes.'),
{'access_key': access_key,
'failures': failures,
'lock_mins': CONF.lockout_minutes})
self.mc.set(failures_key, str(failures),
time=CONF.lockout_minutes * 60)
return res
class EC2KeystoneAuth(wsgi.Middleware):
"""Authenticate an EC2 request with keystone and convert to context."""
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
request_id = context.generate_request_id()
signature = req.params.get('Signature')
if not signature:
msg = _("Signature not provided")
return faults.ec2_error_response(request_id, "AuthFailure", msg,
status=400)
access = req.params.get('AWSAccessKeyId')
if not access:
msg = _("Access key not provided")
return faults.ec2_error_response(request_id, "AuthFailure", msg,
status=400)
# Make a copy of args for authentication and signature verification.
auth_params = dict(req.params)
# Not part of authentication args
auth_params.pop('Signature')
cred_dict = {
'access': access,
'signature': signature,
'host': req.host,
'verb': req.method,
'path': req.path,
'params': auth_params,
}
if "ec2" in CONF.keystone_ec2_url:
creds = {'ec2Credentials': cred_dict}
else:
creds = {'auth': {'OS-KSEC2:ec2Credentials': cred_dict}}
creds_json = jsonutils.dumps(creds)
headers = {'Content-Type': 'application/json'}
o = urlparse.urlparse(CONF.keystone_ec2_url)
if o.scheme == "http":
conn = httplib.HTTPConnection(o.netloc)
else:
conn = httplib.HTTPSConnection(o.netloc)
conn.request('POST', o.path, body=creds_json, headers=headers)
response = conn.getresponse()
data = response.read()
if response.status != 200:
if response.status == 401:
msg = response.reason
else:
msg = _("Failure communicating with keystone")
return faults.ec2_error_response(request_id, "AuthFailure", msg,
status=response.status)
result = jsonutils.loads(data)
conn.close()
try:
token_id = result['access']['token']['id']
user_id = result['access']['user']['id']
project_id = result['access']['token']['tenant']['id']
user_name = result['access']['user'].get('name')
project_name = result['access']['token']['tenant'].get('name')
roles = [role['name'] for role
in result['access']['user']['roles']]
except (AttributeError, KeyError) as e:
LOG.exception(_("Keystone failure: %s") % e)
msg = _("Failure communicating with keystone")
return faults.ec2_error_response(request_id, "AuthFailure", msg,
status=400)
remote_address = req.remote_addr
if CONF.use_forwarded_for:
remote_address = req.headers.get('X-Forwarded-For',
remote_address)
catalog = result['access']['serviceCatalog']
ctxt = context.RequestContext(user_id,
project_id,
user_name=user_name,
project_name=project_name,
roles=roles,
auth_token=token_id,
remote_address=remote_address,
service_catalog=catalog)
req.environ['nova.context'] = ctxt
return self.application
class NoAuth(wsgi.Middleware):
"""Add user:project as 'nova.context' to WSGI environ."""
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
if 'AWSAccessKeyId' not in req.params:
raise webob.exc.HTTPBadRequest()
user_id, _sep, project_id = req.params['AWSAccessKeyId'].partition(':')
project_id = project_id or user_id
remote_address = req.remote_addr
if CONF.use_forwarded_for:
remote_address = req.headers.get('X-Forwarded-For', remote_address)
ctx = context.RequestContext(user_id,
project_id,
is_admin=True,
remote_address=remote_address)
req.environ['nova.context'] = ctx
return self.application
class Requestify(wsgi.Middleware):
def __init__(self, app, controller):
super(Requestify, self).__init__(app)
self.controller = importutils.import_object(controller)
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
non_args = ['Action', 'Signature', 'AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Version', 'Timestamp']
args = dict(req.params)
try:
expired = ec2utils.is_ec2_timestamp_expired(req.params,
expires=CONF.ec2_timestamp_expiry)
if expired:
msg = _("Timestamp failed validation.")
LOG.exception(msg)
raise webob.exc.HTTPForbidden(detail=msg)
# Raise KeyError if omitted
action = req.params['Action']
# Fix bug lp:720157 for older (version 1) clients
version = req.params['SignatureVersion']
if int(version) == 1:
non_args.remove('SignatureMethod')
if 'SignatureMethod' in args:
args.pop('SignatureMethod')
for non_arg in non_args:
# Remove, but raise KeyError if omitted
args.pop(non_arg)
except KeyError:
raise webob.exc.HTTPBadRequest()
except exception.InvalidRequest as err:
raise webob.exc.HTTPBadRequest(explanation=unicode(err))
LOG.debug(_('action: %s'), action)
for key, value in args.items():
LOG.debug(_('arg: %(key)s\t\tval: %(value)s'),
{'key': key, 'value': value})
# Success!
api_request = apirequest.APIRequest(self.controller, action,
req.params['Version'], args)
req.environ['ec2.request'] = api_request
return self.application
class Authorizer(wsgi.Middleware):
"""Authorize an EC2 API request.
Return a 401 if ec2.controller and ec2.action in WSGI environ may not be
executed in nova.context.
"""
def __init__(self, application):
super(Authorizer, self).__init__(application)
self.action_roles = {
'CloudController': {
'DescribeAvailabilityZones': ['all'],
'DescribeRegions': ['all'],
'DescribeSnapshots': ['all'],
'DescribeKeyPairs': ['all'],
'CreateKeyPair': ['all'],
'DeleteKeyPair': ['all'],
'DescribeSecurityGroups': ['all'],
'ImportKeyPair': ['all'],
'AuthorizeSecurityGroupIngress': ['netadmin'],
'RevokeSecurityGroupIngress': ['netadmin'],
'CreateSecurityGroup': ['netadmin'],
'DeleteSecurityGroup': ['netadmin'],
'GetConsoleOutput': ['projectmanager', 'sysadmin'],
'DescribeVolumes': ['projectmanager', 'sysadmin'],
'CreateVolume': ['projectmanager', 'sysadmin'],
'AttachVolume': ['projectmanager', 'sysadmin'],
'DetachVolume': ['projectmanager', 'sysadmin'],
'DescribeInstances': ['all'],
'DescribeAddresses': ['all'],
'AllocateAddress': ['netadmin'],
'ReleaseAddress': ['netadmin'],
'AssociateAddress': ['netadmin'],
'DisassociateAddress': ['netadmin'],
'RunInstances': ['projectmanager', 'sysadmin'],
'TerminateInstances': ['projectmanager', 'sysadmin'],
'RebootInstances': ['projectmanager', 'sysadmin'],
'UpdateInstance': ['projectmanager', 'sysadmin'],
'StartInstances': ['projectmanager', 'sysadmin'],
'StopInstances': ['projectmanager', 'sysadmin'],
'DeleteVolume': ['projectmanager', 'sysadmin'],
'DescribeImages': ['all'],
'DeregisterImage': ['projectmanager', 'sysadmin'],
'RegisterImage': ['projectmanager', 'sysadmin'],
'DescribeImageAttribute': ['all'],
'ModifyImageAttribute': ['projectmanager', 'sysadmin'],
'UpdateImage': ['projectmanager', 'sysadmin'],
'CreateImage': ['projectmanager', 'sysadmin'],
},
'AdminController': {
# All actions have the same permission: ['none'] (the default)
# superusers will be allowed to run them
# all others will get HTTPUnauthorized.
},
}
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
context = req.environ['nova.context']
controller = req.environ['ec2.request'].controller.__class__.__name__
action = req.environ['ec2.request'].action
allowed_roles = self.action_roles[controller].get(action, ['none'])
if self._matches_any_role(context, allowed_roles):
return self.application
else:
LOG.audit(_('Unauthorized request for controller=%(controller)s '
'and action=%(action)s'),
{'controller': controller, 'action': action},
context=context)
raise webob.exc.HTTPUnauthorized()
def _matches_any_role(self, context, roles):
"""Return True if any role in roles is allowed in context."""
if context.is_admin:
return True
if 'all' in roles:
return True
if 'none' in roles:
return False
return any(role in context.roles for role in roles)
class Validator(wsgi.Middleware):
def validate_ec2_id(val):
if not validator.validate_str()(val):
return False
try:
ec2utils.ec2_id_to_id(val)
except exception.InvalidEc2Id:
return False
return True
validator.validate_ec2_id = validate_ec2_id
validator.DEFAULT_VALIDATOR = {
'instance_id': validator.validate_ec2_id,
'volume_id': validator.validate_ec2_id,
'image_id': validator.validate_ec2_id,
'attribute': validator.validate_str(),
'image_location': validator.validate_image_path,
'public_ip': utils.is_valid_ipv4,
'region_name': validator.validate_str(),
'group_name': validator.validate_str(max_length=255),
'group_description': validator.validate_str(max_length=255),
'size': validator.validate_int(),
'user_data': validator.validate_user_data
}
def __init__(self, application):
super(Validator, self).__init__(application)
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
if validator.validate(req.environ['ec2.request'].args,
validator.DEFAULT_VALIDATOR):
return self.application
else:
raise webob.exc.HTTPBadRequest()
def exception_to_ec2code(ex):
"""Helper to extract EC2 error code from exception.
For other than EC2 exceptions (those without ec2_code attribute),
use exception name.
"""
if hasattr(ex, 'ec2_code'):
code = ex.ec2_code
else:
code = type(ex).__name__
return code
def ec2_error_ex(ex, req, code=None, message=None, unexpected=False):
"""
Return an EC2 error response based on passed exception and log
the exception on an appropriate log level:
* DEBUG: expected errors
* ERROR: unexpected errors
All expected errors are treated as client errors and 4xx HTTP
status codes are always returned for them.
Unexpected 5xx errors may contain sensitive information,
suppress their messages for security.
"""
if not code:
code = exception_to_ec2code(ex)
status = getattr(ex, 'code', None)
if not status:
status = 500
if unexpected:
log_fun = LOG.error
if ex.args and status < 500:
log_msg = _("Unexpected %(ex_name)s raised: %(ex_str)s")
else:
log_msg = _("Unexpected %(ex_name)s raised")
else:
log_fun = LOG.debug
if ex.args:
log_msg = _("%(ex_name)s raised: %(ex_str)s")
else:
log_msg = _("%(ex_name)s raised")
# NOTE(jruzicka): For compatibility with EC2 API, treat expected
# exceptions as client (4xx) errors. The exception error code is 500
# by default and most exceptions inherit this from NovaException even
# though they are actually client errors in most cases.
if status >= 500:
status = 400
context = req.environ['nova.context']
request_id = context.request_id
log_msg_args = {
'ex_name': type(ex).__name__,
'ex_str': unicode(ex)
}
log_fun(log_msg % log_msg_args, context=context)
if ex.args and not message and (not unexpected or status < 500):
message = unicode(ex.args[0])
if unexpected:
# Log filtered environment for unexpected errors.
env = req.environ.copy()
for k in env.keys():
if not isinstance(env[k], six.string_types):
env.pop(k)
log_fun(_('Environment: %s') % jsonutils.dumps(env))
if not message:
message = _('Unknown error occurred.')
return faults.ec2_error_response(request_id, code, message, status=status)
class Executor(wsgi.Application):
"""Execute an EC2 API request.
Executes 'ec2.action' upon 'ec2.controller', passing 'nova.context' and
'ec2.action_args' (all variables in WSGI environ.) Returns an XML
response, or a 400 upon failure.
"""
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
context = req.environ['nova.context']
api_request = req.environ['ec2.request']
try:
result = api_request.invoke(context)
except exception.InstanceNotFound as ex:
ec2_id = ec2utils.id_to_ec2_inst_id(ex.kwargs['instance_id'])
message = ex.msg_fmt % {'instance_id': ec2_id}
return ec2_error_ex(ex, req, message=message)
except exception.VolumeNotFound as ex:
ec2_id = ec2utils.id_to_ec2_vol_id(ex.kwargs['volume_id'])
message = ex.msg_fmt % {'volume_id': ec2_id}
return ec2_error_ex(ex, req, message=message)
except exception.SnapshotNotFound as ex:
ec2_id = ec2utils.id_to_ec2_snap_id(ex.kwargs['snapshot_id'])
message = ex.msg_fmt % {'snapshot_id': ec2_id}
return ec2_error_ex(ex, req, message=message)
except (exception.CannotDisassociateAutoAssignedFloatingIP,
exception.FloatingIpAssociated,
exception.FloatingIpNotFound,
exception.ImageNotActive,
exception.InvalidInstanceIDMalformed,
exception.InvalidKeypair,
exception.InvalidParameterValue,
exception.InvalidPortRange,
exception.InvalidVolume,
exception.KeyPairExists,
exception.KeypairNotFound,
exception.MissingParameter,
exception.NoFloatingIpInterface,
exception.NoMoreFixedIps,
exception.NotAuthorized,
exception.QuotaError,
exception.SecurityGroupExists,
exception.SecurityGroupLimitExceeded,
exception.SecurityGroupRuleExists,
exception.VolumeUnattached,
# Following aren't translated to valid EC2 errors.
exception.ImageNotFound,
exception.ImageNotFoundEC2,
exception.InvalidAttribute,
exception.InvalidRequest,
exception.NotFound) as ex:
return ec2_error_ex(ex, req)
except Exception as ex:
return ec2_error_ex(ex, req, unexpected=True)
else:
resp = webob.Response()
resp.status = 200
resp.headers['Content-Type'] = 'text/xml'
resp.body = str(result)
return resp
|
|
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from __future__ import division
from __future__ import print_function
import time
import socket
import subprocess
import sys
import os
import signal
from optparse import OptionParser
parser = OptionParser()
parser.add_option('--genpydirs', type='string', dest='genpydirs',
default='default,slots,newstyle,newstyleslots,dynamic,dynamicslots',
help='directory extensions for generated code, used as suffixes for \"gen-py-*\" added sys.path for individual tests')
parser.add_option("--port", type="int", dest="port", default=9090,
help="port number for server to listen on")
parser.add_option('-v', '--verbose', action="store_const",
dest="verbose", const=2,
help="verbose output")
parser.add_option('-q', '--quiet', action="store_const",
dest="verbose", const=0,
help="minimal output")
parser.set_defaults(verbose=1)
options, args = parser.parse_args()
generated_dirs = []
for gp_dir in options.genpydirs.split(','):
generated_dirs.append('gen-py-%s' % (gp_dir))
SCRIPTS = ['TSimpleJSONProtocolTest.py',
'SerializationTest.py',
'TestEof.py',
'TestSyntax.py',
'TestSocket.py']
FRAMED = ["TNonblockingServer"]
SKIP_ZLIB = ['TNonblockingServer', 'THttpServer']
SKIP_SSL = ['TNonblockingServer', 'THttpServer']
EXTRA_DELAY = dict(TProcessPoolServer=5.5)
PROTOS= [
'accel',
'binary',
'compact',
'json']
SERVERS = [
"TSimpleServer",
"TThreadedServer",
"TThreadPoolServer",
"TProcessPoolServer", # new!
"TForkingServer",
"TNonblockingServer",
"THttpServer" ]
# Test for presence of multiprocessing module, and if it is not present, then
# remove it from the list of available servers.
try:
import multiprocessing
except:
print('Warning: the multiprocessing module is unavailable. Skipping tests for TProcessPoolServer')
SERVERS.remove('TProcessPoolServer')
try:
import ssl
except:
print('Warning, no ssl module available. Skipping all SSL tests.')
SKIP_SSL.extend(SERVERS)
# commandline permits a single class name to be specified to override SERVERS=[...]
if len(args) == 1:
if args[0] in SERVERS:
SERVERS = args
else:
print('Unavailable server type "%s", please choose one of: %s' % (args[0], SERVERS))
sys.exit(0)
def relfile(fname):
return os.path.join(os.path.dirname(__file__), fname)
def runScriptTest(genpydir, script):
script_args = [sys.executable, relfile(script) ]
script_args.append('--genpydir=%s' % genpydir)
serverproc = subprocess.Popen(script_args)
print('\nTesting script: %s\n----' % (' '.join(script_args)))
ret = subprocess.call(script_args)
if ret != 0:
raise Exception("Script subprocess failed, retcode=%d, args: %s" % (ret, ' '.join(script_args)))
def runServiceTest(genpydir, server_class, proto, port, use_zlib, use_ssl):
# Build command line arguments
server_args = [sys.executable, relfile('TestServer.py') ]
cli_args = [sys.executable, relfile('TestClient.py') ]
for which in (server_args, cli_args):
which.append('--genpydir=%s' % genpydir)
which.append('--protocol=%s' % proto) # accel, binary or compact
which.append('--port=%d' % port) # default to 9090
if use_zlib:
which.append('--zlib')
if use_ssl:
which.append('--ssl')
if options.verbose == 0:
which.append('-q')
if options.verbose == 2:
which.append('-v')
# server-specific option to select server class
server_args.append(server_class)
# client-specific cmdline options
if server_class in FRAMED:
cli_args.append('--transport=framed')
else:
cli_args.append('--transport=buffered')
if server_class == 'THttpServer':
cli_args.append('--http=/')
if options.verbose > 0:
print('Testing server %s: %s' % (server_class, ' '.join(server_args)))
serverproc = subprocess.Popen(server_args)
def ensureServerAlive():
if serverproc.poll() is not None:
print(('FAIL: Server process (%s) failed with retcode %d')
% (' '.join(server_args), serverproc.returncode))
raise Exception('Server subprocess %s died, args: %s'
% (server_class, ' '.join(server_args)))
# Wait for the server to start accepting connections on the given port.
sock = socket.socket()
sleep_time = 0.1 # Seconds
max_attempts = 100
try:
attempt = 0
while sock.connect_ex(('127.0.0.1', port)) != 0:
attempt += 1
if attempt >= max_attempts:
raise Exception("TestServer not ready on port %d after %.2f seconds"
% (port, sleep_time * attempt))
ensureServerAlive()
time.sleep(sleep_time)
finally:
sock.close()
try:
if options.verbose > 0:
print('Testing client: %s' % (' '.join(cli_args)))
ret = subprocess.call(cli_args)
if ret != 0:
raise Exception("Client subprocess failed, retcode=%d, args: %s" % (ret, ' '.join(cli_args)))
finally:
# check that server didn't die
ensureServerAlive()
extra_sleep = EXTRA_DELAY.get(server_class, 0)
if extra_sleep > 0 and options.verbose > 0:
print('Giving %s (proto=%s,zlib=%s,ssl=%s) an extra %d seconds for child'
'processes to terminate via alarm'
% (server_class, proto, use_zlib, use_ssl, extra_sleep))
time.sleep(extra_sleep)
os.kill(serverproc.pid, signal.SIGKILL)
serverproc.wait()
test_count = 0
# run tests without a client/server first
print('----------------')
print(' Executing individual test scripts with various generated code directories')
print(' Directories to be tested: ' + ', '.join(generated_dirs))
print(' Scripts to be tested: ' + ', '.join(SCRIPTS))
print('----------------')
for genpydir in generated_dirs:
for script in SCRIPTS:
runScriptTest(genpydir, script)
print('----------------')
print(' Executing Client/Server tests with various generated code directories')
print(' Servers to be tested: ' + ', '.join(SERVERS))
print(' Directories to be tested: ' + ', '.join(generated_dirs))
print(' Protocols to be tested: ' + ', '.join(PROTOS))
print(' Options to be tested: ZLIB(yes/no), SSL(yes/no)')
print('----------------')
for try_server in SERVERS:
for genpydir in generated_dirs:
for try_proto in PROTOS:
for with_zlib in (False, True):
# skip any servers that don't work with the Zlib transport
if with_zlib and try_server in SKIP_ZLIB:
continue
for with_ssl in (False, True):
# skip any servers that don't work with SSL
if with_ssl and try_server in SKIP_SSL:
continue
test_count += 1
if options.verbose > 0:
print('\nTest run #%d: (includes %s) Server=%s, Proto=%s, zlib=%s, SSL=%s' % (test_count, genpydir, try_server, try_proto, with_zlib, with_ssl))
runServiceTest(genpydir, try_server, try_proto, options.port, with_zlib, with_ssl)
if options.verbose > 0:
print('OK: Finished (includes %s) %s / %s proto / zlib=%s / SSL=%s. %d combinations tested.' % (genpydir, try_server, try_proto, with_zlib, with_ssl, test_count))
|
|
#
# Copyright 2015 BMC Software, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import json
import requests
import urllib
import logging
import boundary.util as util
class ApiCall(object):
def __init__(self, api_host="api.truesight.bmc.com", email=None, api_token=None):
"""
:param api_host: api end point host
:param email: TrueSight Pulse account e-mail
:param api_token: TrueSight Pulse api token
:return: returns nothing
:Example:
from boundary import API
api = API(email="foo@bary.com", api_token="api.xxxxxxxxxx-yyyy"
"""
self._kwargs = None
self._methods = {"DELETE": self._do_delete,
"GET": self._do_get,
"POST": self._do_post,
"PUT": self._do_put}
self._api_host = "premium-api.boundary.com"
self._email = None
self._api_token = None
self._curl = False
# All member variables related to REST CALL
self._scheme = "https"
self._method = "GET"
self._headers = None
self._data = None
self._url = None
self._path = None
self._url_parameters = None
self._api_result = None
self.logLevel = None
# Set the api_host, email, api token set by environment
# variables then override with those passed in
self._get_environment()
if api_host is not None:
self._api_host = api_host
if email is not None:
self._email = email
if api_token is not None:
self._api_token = api_token
#
# data
#
@property
def data(self):
"""
Value of the HTTP payload
:return:
"""
return self._data
@data.setter
def data(self, data):
self._data = data
#
# headers
#
@property
def headers(self):
return self._headers
@headers.setter
def headers(self, headers):
self._headers = headers
#
# method
#
@property
def method(self):
"""
"""
return self._method
@method.setter
def method(self, value):
"""
Before assigning the value validate that is in one of the
HTTP methods we implement
"""
keys = self._methods.keys()
if value not in keys:
raise AttributeError("Method value not in " + str(keys))
else:
self._method = value
#
# path
#
@property
def path(self):
return self._path
@path.setter
def path(self, value):
self._path = value
#
# url_parameters
#
@property
def url_parameters(self):
return self._url_parameters
@url_parameters.setter
def url_parameters(self, url_parameters):
self._url_parameters = url_parameters
def _get_environment(self):
"""
Gets the configuration stored in environment variables
"""
if 'TSP_EMAIL' in os.environ:
self._email = os.environ['TSP_EMAIL']
if 'TSP_API_TOKEN' in os.environ:
self._api_token = os.environ['TSP_API_TOKEN']
if 'TSP_API_HOST' in os.environ:
self._api_host = os.environ['TSP_API_HOST']
else:
self._api_host = 'api.truesight.bmc.com'
def _get_url_parameters(self):
"""
Encode URL parameters
"""
url_parameters = ''
if self._url_parameters is not None:
url_parameters = '?' + urllib.urlencode(self._url_parameters)
return url_parameters
def metric_get(self, enabled=False, custom=False):
"""
Returns a metric definition identified by name
:param enabled: Return only enabled metrics
:param custom: Return only custom metrics
:return Metrics:
"""
self.path = 'v1/metrics?enabled={0}&{1}'.format(enabled, custom)
self._call_api()
self._handle_results()
return self.metrics
def get_api_parameters(self):
pass
def handle_api_results(self):
pass
def _do_get(self):
"""
HTTP Get Request
"""
return requests.get(self._url, data=self._data, headers=self._headers, auth=(self._email, self._api_token))
def _do_delete(self):
"""
HTTP Delete Request
"""
return requests.delete(self._url, data=self._data, headers=self._headers, auth=(self._email, self._api_token))
def _do_post(self):
"""
HTTP Post Request
"""
return requests.post(self._url, data=self._data, headers=self._headers, auth=(self._email, self._api_token))
def _do_put(self):
"""
HTTP Put Request
"""
return requests.put(self._url, data=self._data, headers=self._headers, auth=(self._email, self._api_token))
def good_response(self, status_code):
"""
Determines what status codes represent a good response from an API call.
"""
return status_code == requests.codes.ok
def form_url(self):
return "{0}://{1}/{2}{3}".format(self._scheme, self._api_host, self._path, self._get_url_parameters())
def _curl_output(self):
headers = ""
if self._headers is not None:
for key in self._headers:
headers = headers + ' -H "{0}: {1}"'.format(key, self._headers[key])
data = None
if self._data is not None:
data = " -d '{0}'".format(self._data)
else:
data = ''
url = ' "{0}"'.format(self.form_url())
print('curl -X {0} -u "{1}:{2}"{3}{4}{5}'.format(self._method,
self._email,
self._api_token,
headers,
data,
url))
def _call_api(self):
"""
Make an API call to get the metric definition
"""
self._url = self.form_url()
if self._headers is not None:
logging.debug(self._headers)
if self._data is not None:
logging.debug(self._data)
if len(self._get_url_parameters()) > 0:
logging.debug(self._get_url_parameters())
result = self._methods[self._method]()
if not self.good_response(result.status_code):
logging.error(self._url)
logging.error(self._method)
if self._data is not None:
logging.error(self._data)
logging.error(result)
self._api_result = result
def handle_key_word_args(self):
pass
def api_call(self):
self._get_environment()
self.handle_key_word_args()
self.get_api_parameters()
self._call_api()
return self._handle_api_results()
def _handle_api_results(self):
result = None
# Only process if we get HTTP result of 200
if self._api_result.status_code == requests.codes.ok:
result = json.loads(self._api_result.text)
return result
|
|
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Code for probability densities."""
import abc
import pickle
from annealed_flow_transport import train_vae
import annealed_flow_transport.aft_types as tp
import annealed_flow_transport.cox_process_utils as cp_utils
import chex
import haiku as hk
import jax
import jax.numpy as jnp
import jax.scipy.linalg as slinalg
from jax.scipy.special import logsumexp
from jax.scipy.stats import multivariate_normal
from jax.scipy.stats import norm
import numpy as np
import tensorflow_datasets as tfds
# TypeDefs
NpArray = np.ndarray
Array = jnp.ndarray
ConfigDict = tp.ConfigDict
class LogDensity(metaclass=abc.ABCMeta):
"""Abstract base class from which all log densities should inherit."""
def __init__(self, config: ConfigDict, num_dim: int):
self._check_constructor_inputs(config, num_dim)
self._config = config
self._num_dim = num_dim
@abc.abstractmethod
def _check_constructor_inputs(self, config: ConfigDict, num_dim: int):
"""Check the config and number of dimensions of the class.
Will typically raise Assertion like errors.
Args:
config: Configuration for the log density.
num_dim: Number of dimensions expected for the density.
"""
def __call__(self, x: Array) -> Array:
"""Evaluate the log density with automatic shape checking.
This calls evaluate_log_density which needs to be implemented
in derived classes.
Args:
x: Array of shape (num_batch, num_dim) containing input points.
Returns:
Array of shape (num_batch,) with corresponding log densities.
"""
self._check_input_shape(x)
output = self.evaluate_log_density(x)
self._check_output_shape(x, output)
return output
@abc.abstractmethod
def evaluate_log_density(self, x: Array) -> Array:
"""Evaluate the log density.
Args:
x: Array of shape (num_batch, num_dim) containing input to log density
Returns:
Array of shape (num_batch,) containing values of log densities.
"""
def _check_input_shape(self, vector_in: Array):
chex.assert_shape(vector_in, (None, self._num_dim))
def _check_output_shape(self, vector_in: Array, vector_out: Array):
num_batch = vector_in.shape[0]
chex.assert_shape(vector_out, (num_batch,))
def _check_members_types(self, config: ConfigDict, expected_members_types):
for elem, elem_type in expected_members_types:
if elem not in config:
raise ValueError("LogDensity config element not found: ", elem)
if not isinstance(config[elem], elem_type):
msg = "LogDensity config element " + elem + " is not of type " + str(
elem_type)
raise TypeError(msg)
def _check_expected_num_dim(self,
num_dim: int,
expected_num_dim: int,
class_name: str):
"""In the case where num_dim has an expected static value, confirm this."""
if expected_num_dim != num_dim:
msg = "num_dim is expected to be "+str(expected_num_dim)
msg += " for density "+class_name
raise ValueError(msg)
class NormalDistribution(LogDensity):
"""A univariate normal distribution with configurable scale and location.
num_dim should be 1 and config should include scalars "loc" and "scale"
"""
def _check_constructor_inputs(self, config: ConfigDict, num_dim: int):
self._check_expected_num_dim(num_dim, 1, type(self).__name__)
expected_members_types = [("loc", float),
("scale", float),
]
self._check_members_types(config, expected_members_types)
def evaluate_log_density(self, x: Array) -> Array:
output = norm.logpdf(x,
loc=self._config.loc,
scale=self._config.scale)[:, 0]
return output
class MultivariateNormalDistribution(LogDensity):
"""A normalized multivariate normal distribution.
Each element of the mean vector has the same value config.shared_mean
Each element of the diagonal covariance matrix has value config.diagonal_cov
"""
def _check_constructor_inputs(self, config: ConfigDict, unused_dim: int):
expected_members_types = [("shared_mean", float),
("diagonal_cov", float)
]
self._check_members_types(config, expected_members_types)
def evaluate_log_density(self, x: Array) -> Array:
mean = jnp.ones(self._num_dim) * self._config.shared_mean
cov = jnp.diag(jnp.ones(self._num_dim) * self._config.diagonal_cov)
output = multivariate_normal.logpdf(x,
mean=mean,
cov=cov)
return output
class FunnelDistribution(LogDensity):
"""The funnel distribution from https://arxiv.org/abs/physics/0009028.
num_dim should be 10. config is unused in this case.
"""
def _check_constructor_inputs(self, unused_config: ConfigDict, num_dim: int):
self._check_expected_num_dim(num_dim, 10, type(self).__name__)
def evaluate_log_density(self, x: Array) -> Array:
def unbatched(x):
v = x[0]
log_density_v = norm.logpdf(v,
loc=0.,
scale=3.)
variance_other = jnp.exp(v)
other_dim = self._num_dim - 1
cov_other = jnp.eye(other_dim) * variance_other
mean_other = jnp.zeros(other_dim)
log_density_other = multivariate_normal.logpdf(x[1:],
mean=mean_other,
cov=cov_other)
chex.assert_equal_shape([log_density_v, log_density_other])
return log_density_v + log_density_other
output = jax.vmap(unbatched)(x)
return output
class LogGaussianCoxPines(LogDensity):
"""Log Gaussian Cox process posterior in 2D for pine saplings data.
This follows Heng et al 2020 https://arxiv.org/abs/1708.08396 .
config.file_path should point to a csv file of num_points columns
and 2 rows containg the Finnish pines data.
config.use_whitened is a boolean specifying whether or not to use a
reparameterization in terms of the Cholesky decomposition of the prior.
See Section G.4 of https://arxiv.org/abs/2102.07501 for more detail.
The experiments in the paper have this set to False.
num_dim should be the square of the lattice sites per dimension.
So for a 40 x 40 grid num_dim should be 1600.
"""
def __init__(self,
config: ConfigDict,
num_dim: int):
super().__init__(config, num_dim)
# Discretization is as in Controlled Sequential Monte Carlo
# by Heng et al 2017 https://arxiv.org/abs/1708.08396
self._num_latents = num_dim
self._num_grid_per_dim = int(np.sqrt(num_dim))
bin_counts = jnp.array(
cp_utils.get_bin_counts(self.get_pines_points(config.file_path),
self._num_grid_per_dim))
self._flat_bin_counts = jnp.reshape(bin_counts, (self._num_latents))
# This normalizes by the number of elements in the grid
self._poisson_a = 1./self._num_latents
# Parameters for LGCP are as estimated in Moller et al, 1998
# "Log Gaussian Cox processes" and are also used in Heng et al.
self._signal_variance = 1.91
self._beta = 1./33
self._bin_vals = cp_utils.get_bin_vals(self._num_grid_per_dim)
def short_kernel_func(x, y):
return cp_utils.kernel_func(x, y, self._signal_variance,
self._num_grid_per_dim, self._beta)
self._gram_matrix = cp_utils.gram(short_kernel_func, self._bin_vals)
self._cholesky_gram = jnp.linalg.cholesky(self._gram_matrix)
self._white_gaussian_log_normalizer = -0.5 * self._num_latents * jnp.log(
2. * jnp.pi)
half_log_det_gram = jnp.sum(jnp.log(jnp.abs(jnp.diag(self._cholesky_gram))))
self._unwhitened_gaussian_log_normalizer = -0.5 * self._num_latents * jnp.log(
2. * jnp.pi) - half_log_det_gram
# The mean function is a constant with value mu_zero.
self._mu_zero = jnp.log(126.) - 0.5*self._signal_variance
if self._config.use_whitened:
self._posterior_log_density = self.whitened_posterior_log_density
else:
self._posterior_log_density = self.unwhitened_posterior_log_density
def _check_constructor_inputs(self, config: ConfigDict, num_dim: int):
expected_members_types = [("use_whitened", bool)]
self._check_members_types(config, expected_members_types)
num_grid_per_dim = int(np.sqrt(num_dim))
if num_grid_per_dim * num_grid_per_dim != num_dim:
msg = ("num_dim needs to be a square number for LogGaussianCoxPines "
"density.")
raise ValueError(msg)
if not config.file_path:
msg = "Please specify a path in config for the Finnish pines data csv."
raise ValueError(msg)
def get_pines_points(self, file_path):
"""Get the pines data points."""
with open(file_path, "rt") as input_file:
b = np.genfromtxt(input_file, delimiter=",")
return b
def whitened_posterior_log_density(self, white: Array) -> Array:
quadratic_term = -0.5 * jnp.sum(white**2)
prior_log_density = self._white_gaussian_log_normalizer + quadratic_term
latent_function = cp_utils.get_latents_from_white(white, self._mu_zero,
self._cholesky_gram)
log_likelihood = cp_utils.poisson_process_log_likelihood(
latent_function, self._poisson_a, self._flat_bin_counts)
return prior_log_density + log_likelihood
def unwhitened_posterior_log_density(self, latents: Array) -> Array:
white = cp_utils.get_white_from_latents(latents, self._mu_zero,
self._cholesky_gram)
prior_log_density = -0.5 * jnp.sum(
white * white) + self._unwhitened_gaussian_log_normalizer
log_likelihood = cp_utils.poisson_process_log_likelihood(
latents, self._poisson_a, self._flat_bin_counts)
return prior_log_density + log_likelihood
def evaluate_log_density(self, x: Array) -> Array:
return jax.vmap(self._posterior_log_density)(x)
class ChallengingTwoDimensionalMixture(LogDensity):
"""A challenging mixture of Gaussians in two dimensions.
num_dim should be 2. config is unused in this case.
"""
def _check_constructor_inputs(self, unused_config: ConfigDict, num_dim: int):
self._check_expected_num_dim(num_dim, 2, type(self).__name__)
def raw_log_density(self, x: Array) -> Array:
"""A raw log density that we will then symmetrize."""
mean_a = jnp.array([3.0, 0.])
mean_b = jnp.array([-2.5, 0.])
mean_c = jnp.array([2.0, 3.0])
means = jnp.stack((mean_a, mean_b, mean_c), axis=0)
cov_a = jnp.array([[0.7, 0.], [0., 0.05]])
cov_b = jnp.array([[0.7, 0.], [0., 0.05]])
cov_c = jnp.array([[1.0, 0.95], [0.95, 1.0]])
covs = jnp.stack((cov_a, cov_b, cov_c), axis=0)
log_weights = jnp.log(jnp.array([1./3, 1./3., 1./3.]))
l = jnp.linalg.cholesky(covs)
y = slinalg.solve_triangular(l, x[None, :] - means, lower=True, trans=0)
mahalanobis_term = -1/2 * jnp.einsum("...i,...i->...", y, y)
n = means.shape[-1]
normalizing_term = -n / 2 * np.log(2 * np.pi) - jnp.log(
l.diagonal(axis1=-2, axis2=-1)).sum(axis=1)
individual_log_pdfs = mahalanobis_term + normalizing_term
mixture_weighted_pdfs = individual_log_pdfs + log_weights
return logsumexp(mixture_weighted_pdfs)
def make_2d_invariant(self, log_density, x: Array) -> Array:
density_a = log_density(x)
density_b = log_density(np.flip(x))
return jnp.logaddexp(density_a, density_b) - jnp.log(2)
def evaluate_log_density(self, x: Array) -> Array:
density_func = lambda x: self.make_2d_invariant(self.raw_log_density, x)
return jax.vmap(density_func)(x)
class AutoEncoderLikelihood(LogDensity):
"""Generative decoder log p(x,z| theta) as a function of latents z.
This evaluates log p(x,z| theta) = log p(x, z| theta ) + log p(z) for a VAE.
Here x is an binarized MNIST Image, z are real valued latents, theta denotes
the generator neural network parameters.
Since x is fixed and z is a random variable this is the log of an unnormalized
z density p(x, z | theta)
The normalizing constant is a marginal p(x | theta) = int p(x, z | theta) dz.
The normalized target density is the posterior over latents p(z|x, theta).
The likelihood uses a pretrained generator neural network.
It is contained in a pickle file specifed by config.params_filesname
A script producing such a pickle file can be found in train_vae.py
The resulting pretrained network used in the AFT paper
can be found at data/vae.pickle
The binarized MNIST test set image used is specfied by config.image_index
"""
def __init__(self, config: ConfigDict, num_dim: int):
super().__init__(config, num_dim)
self._vae_params = self._get_vae_params(config.params_filename)
test_batch_size = 1
test_ds = train_vae.load_dataset(tfds.Split.TEST, test_batch_size)
for unused_index in range(self._config.image_index):
unused_batch = next(test_ds)
self._test_image = next(test_ds)["image"]
assert self._test_image.shape[0] == 1 # Batch size needs to be 1.
assert self._test_image.shape[1:] == train_vae.MNIST_IMAGE_SHAPE
self.entropy_eval = hk.transform(self.cross_entropy_eval_func)
def _check_constructor_inputs(self, config: ConfigDict, num_dim: int):
self._check_expected_num_dim(num_dim, 30, type(self).__name__)
expected_members_types = [("params_filename", str),
("image_index", int)
]
num_mnist_test = 10000
in_range = config.image_index >= 0 and config.image_index < num_mnist_test
if not in_range:
msg = "VAE image_index must be greater than or equal to zero "
msg += "and strictly less than "+str(num_mnist_test)+"."
raise ValueError(msg)
def _get_vae_params(self, ckpt_filename):
with open(ckpt_filename, "rb") as f:
vae_params = pickle.load(f)
return vae_params
def cross_entropy_eval_func(self, data: Array, latent: Array) -> Array:
"""Evaluate the binary cross entropy for given latent and data.
Needs to be called within a Haiku transform.
Args:
data: Array of shape (1, image_shape)
latent: Array of shape (num_latent_dim,)
Returns:
Array, value of binary cross entropy for single data point in question.
"""
chex.assert_rank(latent, 1)
chex.assert_rank(data, 4) # Shape should be (1, 28, 28, 1) hence rank 4.
vae = train_vae.ConvVAE()
# New axis here required for batch size = 1 for VAE compatibility.
batch_latent = latent[None, :]
logits = vae.decoder(batch_latent)
chex.assert_equal_shape([logits, data])
return train_vae.binary_cross_entropy_from_logits(logits, data)
def log_prior(self, latent: Array) -> Array:
"""Latent shape (num_dim,) -> standard multivariate log density."""
chex.assert_rank(latent, 1)
log_norm_gaussian = -0.5*self._num_dim * jnp.log(2.*jnp.pi)
data_term = - 0.5 * jnp.sum(jnp.square(latent))
return data_term + log_norm_gaussian
def total_log_probability(self, latent: Array) -> Array:
chex.assert_rank(latent, 1)
log_prior = self.log_prior(latent)
dummy_rng_key = 0
# Data point log likelihood is negative of loss for batch size of 1.
log_likelihood = -1. * self.entropy_eval.apply(
self._vae_params, dummy_rng_key, self._test_image, latent)
total_log_probability = log_prior + log_likelihood
return total_log_probability
def evaluate_log_density(self, x: Array) -> Array:
return jax.vmap(self.total_log_probability)(x)
|
|
import os
import shutil
import sys
import traceback
import six
import chainer
def _reduce(grad_list):
if not grad_list:
return None
if len(grad_list) >= 2:
grad_list[:] = [chainer.functions.add(*grad_list)]
return grad_list[0]
def _pure(grad):
return [] if grad is None else [grad]
def _pop_or_none(grad_list):
return grad_list.pop() if grad_list else None
def _grad_var_from_alive_node(node):
# Used by `accumulate_grad_inputs` option of `GradTable`
var = node.get_variable_or_none()
if var is None:
return None
else:
gv = var.grad_var
var.grad_var = None
return gv
class GradTable(object):
"""Dict of nodes to references of gradients
The gradients are stored as references to them in the backprop process. The
current implementation uses lists. Keep the lengths of lists <= 1 for the
strict accumulation of gradients. Leave them to accumulate gradients
lazily.
Args:
accumulate_grad_inputs (bool): Fallback to grad_var of input variables.
However, the current implementation reproduces the legacy behavior,
i.e. to read ``grad_var`` of node when the node has not been added.
"""
def __init__(self, accumulate_grad_inputs=False):
self.grads = {}
self._load_if_new = accumulate_grad_inputs
def __setitem__(self, node, grad):
assert node is not None
self.grads[node] = _pure(grad)
def accumulate(self, node, grad):
self.get_as_list(node).append(grad)
def get_as_list(self, node):
assert node is not None
grads = self.grads
if node not in grads:
if self._load_if_new and node.creator_node is None:
node._check_old_style_gradient()
# accumulate the gradient only if the node is a leaf
grads[node] = _pure(_grad_var_from_alive_node(node))
else:
grads[node] = []
return grads[node]
def pop(self, node):
if node is None:
return None
grads = self.grads
if node in grads:
return _reduce(grads.pop(node))
if self._load_if_new:
return _grad_var_from_alive_node(node)
else:
return None
def assert_no_grads(self):
for gx in self.grads.values():
assert gx == []
def backprop_step(
func, target_input_indexes, grad_outputs, grad_inputs, is_debug):
"""Accumulates gradients of a FunctionNode
This routine is used by :meth:`chainer.Variable.backward` and
:func:`chainer.grad`.
Args:
func (~chainer.FunctionNode): The function for which gradients are
accumulated.
target_input_indexes (tuple of int): Sorted indices of the inputs
that require gradients. It is guaranteed that this tuple contains
at least one element.
grad_outputs (tuple of Variable): Gradients w.r.t. the output
variables. If the gradient w.r.t. an output variable is not
given, the corresponding element is ``None``.
grad_inputs (dict): References of the gradients w.r.t. the input
variables.
is_debug (bool): ``True`` if the debug mode is enabled.
"""
if is_debug:
assert isinstance(target_input_indexes, tuple)
assert target_input_indexes == tuple(sorted(target_input_indexes))
assert isinstance(grad_outputs, tuple)
if func.backward_accumulate.__code__ \
is not chainer.FunctionNode.backward_accumulate.__code__:
# backward_accumulate is overridden
grad_inputs_tuple = tuple([
_pop_or_none(grad_inputs[func.inputs[i]])
for i in target_input_indexes
])
# Call backward_accumulate()
try:
gxs = func.backward_accumulate(
target_input_indexes, grad_outputs, grad_inputs_tuple)
except Exception as e:
_reraise_with_stack(func, e)
else: # otherwise, backward should be overridden
# Call backward()
try:
gxs = func.backward(
target_input_indexes, grad_outputs)
except Exception as e:
_reraise_with_stack(func, e)
if is_debug:
for gx in gxs:
if not (gx is None or isinstance(gx, chainer.Variable)):
raise ValueError(func._get_error_message(
'type of gradients returned from backward is '
'incorrect: '
'{} != expected {}'.format(
type(gx), chainer.Variable)))
len_gxs = len(gxs)
if len_gxs == len(func.inputs):
gxs = tuple([gxs[i] for i in target_input_indexes])
elif len_gxs != len(target_input_indexes):
msg = 'number of gradients returned from backward is incorrect: '
if len(func.inputs) == len(target_input_indexes):
msg += (
'%s != expected %s' % (len_gxs, len(func.inputs)))
else:
msg += (
'%s != expected %s or %s'
% (len_gxs, len(func.inputs), len(target_input_indexes)))
raise ValueError(func._get_error_message(msg))
for i, gx in six.moves.zip(target_input_indexes, gxs):
if gx is None or gx.raw_array is None:
continue
grad_inputs[func.inputs[i]].append(gx)
if is_debug:
node_x = func.inputs[i]
g_input_list = grad_inputs[node_x]
if gx.shape != node_x.shape:
raise ValueError(func._get_error_message(
'shape of gradients returned from backward is '
'incorrect: '
'input-index={}, actual {} != expected {}'.format(
i, gx.shape, node_x.shape)))
if gx is not None and g_input_list:
g_input = g_input_list[0]
if gx.shape != g_input.shape:
raise ValueError(func._get_error_message(
'shape of gradients returned from backward is '
'incorrect: '
'input-index={}, actual {} != expected {}'.format(
i, gx.shape, g_input.shape)))
if gx.dtype != g_input.dtype:
raise ValueError(func._get_error_message(
'dtype of gradients returned from backward is '
'incorrect: '
'input-index={}, actual {} != expected {}'.format(
i, gx.dtype, g_input.dtype)))
del gxs
if is_debug:
# each grad is a list of variables
# iter_gxs expands it as a sequence of variables.
def iter_gxs(gxs):
for gx in gxs:
for gx_elem in gx:
yield gx_elem
for gx in iter_gxs(grad_inputs.values()):
if chainer.backend._contains_nan(gx.data):
raise RuntimeError(
'NaN is detected on backward computation of {}'
.format(func.label))
if not func.lazy_grad_sum:
for gx in grad_inputs.values():
_reduce(gx)
def _get_columns():
# Returns the terminal column width.
if sys.version_info >= (3, 3):
cols, rows = shutil.get_terminal_size()
return cols
return int(os.getenv('COLUMNS', 80))
def _reraise_with_stack(func, e):
if func.stack is not None:
# Reraise any type of exceptions including the following:
# - Chainer raises RuntimeError for NaN values; and
# - NumPy raises FloatingPointError for invalid values.
# TODO(kataoka): unify variable._check_grad_type and below
additional_message = \
'\n{}\nStacktrace of the function is below:\n{}'.format(
'-' * _get_columns(),
''.join(traceback.format_list(func.stack[:-1])))
if e.args:
e.args = (e.args[0] + additional_message,) + e.args[1:]
else:
e.args = (additional_message,)
raise
|
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from fairseq.utils import new_arange
# -------------- Helper Functions --------------------------------------------------- #
def load_libnat():
try:
from fairseq import libnat_cuda
return libnat_cuda, True
except ImportError as e:
print(str(e) + "... fall back to CPU version")
try:
from fairseq import libnat
return libnat, False
except ImportError as e:
import sys
sys.stderr.write(
"ERROR: missing libnat_cuda. run `python setup.py build_ext --inplace`\n"
)
raise e
def _get_ins_targets(in_tokens, out_tokens, padding_idx, unk_idx):
libnat, use_cuda = load_libnat()
def _get_ins_targets_cuda(in_tokens, out_tokens, padding_idx, unk_idx):
in_masks = in_tokens.ne(padding_idx)
out_masks = out_tokens.ne(padding_idx)
mask_ins_targets, masked_tgt_masks = libnat.generate_insertion_labels(
out_tokens.int(),
libnat.levenshtein_distance(
in_tokens.int(),
out_tokens.int(),
in_masks.sum(1).int(),
out_masks.sum(1).int(),
),
)
masked_tgt_masks = masked_tgt_masks.bool() & out_masks
mask_ins_targets = mask_ins_targets.type_as(in_tokens)[
:, 1 : in_masks.size(1)
].masked_fill_(~in_masks[:, 1:], 0)
masked_tgt_tokens = out_tokens.masked_fill(masked_tgt_masks, unk_idx)
return masked_tgt_masks, masked_tgt_tokens, mask_ins_targets
def _get_ins_targets_cpu(in_tokens, out_tokens, padding_idx, unk_idx):
in_seq_len, out_seq_len = in_tokens.size(1), out_tokens.size(1)
in_tokens_list = [
[t for t in s if t != padding_idx] for i, s in enumerate(in_tokens.tolist())
]
out_tokens_list = [
[t for t in s if t != padding_idx]
for i, s in enumerate(out_tokens.tolist())
]
full_labels = libnat.suggested_ed2_path(
in_tokens_list, out_tokens_list, padding_idx
)
mask_inputs = [
[len(c) if c[0] != padding_idx else 0 for c in a[:-1]] for a in full_labels
]
# generate labels
masked_tgt_masks = []
for mask_input in mask_inputs:
mask_label = []
for beam_size in mask_input[1:-1]: # HACK 1:-1
mask_label += [0] + [1 for _ in range(beam_size)]
masked_tgt_masks.append(
mask_label + [0 for _ in range(out_seq_len - len(mask_label))]
)
mask_ins_targets = [
mask_input[1:-1]
+ [0 for _ in range(in_seq_len - 1 - len(mask_input[1:-1]))]
for mask_input in mask_inputs
]
# transform to tensor
masked_tgt_masks = torch.tensor(
masked_tgt_masks, device=out_tokens.device
).bool()
mask_ins_targets = torch.tensor(mask_ins_targets, device=in_tokens.device)
masked_tgt_tokens = out_tokens.masked_fill(masked_tgt_masks, unk_idx)
return masked_tgt_masks, masked_tgt_tokens, mask_ins_targets
if use_cuda:
return _get_ins_targets_cuda(in_tokens, out_tokens, padding_idx, unk_idx)
return _get_ins_targets_cpu(in_tokens, out_tokens, padding_idx, unk_idx)
def _get_del_targets(in_tokens, out_tokens, padding_idx):
libnat, use_cuda = load_libnat()
def _get_del_targets_cuda(in_tokens, out_tokens, padding_idx):
in_masks = in_tokens.ne(padding_idx)
out_masks = out_tokens.ne(padding_idx)
word_del_targets = libnat.generate_deletion_labels(
in_tokens.int(),
libnat.levenshtein_distance(
in_tokens.int(),
out_tokens.int(),
in_masks.sum(1).int(),
out_masks.sum(1).int(),
),
)
word_del_targets = word_del_targets.type_as(in_tokens).masked_fill_(
~in_masks, 0
)
return word_del_targets
def _get_del_targets_cpu(in_tokens, out_tokens, padding_idx):
out_seq_len = out_tokens.size(1)
with torch.cuda.device_of(in_tokens):
in_tokens_list = [
[t for t in s if t != padding_idx]
for i, s in enumerate(in_tokens.tolist())
]
out_tokens_list = [
[t for t in s if t != padding_idx]
for i, s in enumerate(out_tokens.tolist())
]
full_labels = libnat.suggested_ed2_path(
in_tokens_list, out_tokens_list, padding_idx
)
word_del_targets = [b[-1] for b in full_labels]
word_del_targets = [
labels + [0 for _ in range(out_seq_len - len(labels))]
for labels in word_del_targets
]
# transform to tensor
word_del_targets = torch.tensor(word_del_targets, device=out_tokens.device)
return word_del_targets
if use_cuda:
return _get_del_targets_cuda(in_tokens, out_tokens, padding_idx)
return _get_del_targets_cpu(in_tokens, out_tokens, padding_idx)
def _apply_ins_masks(
in_tokens, in_scores, mask_ins_pred, padding_idx, unk_idx, eos_idx
):
in_masks = in_tokens.ne(padding_idx)
in_lengths = in_masks.sum(1)
# HACK: hacky way to shift all the paddings to eos first.
in_tokens.masked_fill_(~in_masks, eos_idx)
mask_ins_pred.masked_fill_(~in_masks[:, 1:], 0)
out_lengths = in_lengths + mask_ins_pred.sum(1)
out_max_len = out_lengths.max()
out_masks = new_arange(out_lengths, out_max_len)[None, :] < out_lengths[:, None]
reordering = (mask_ins_pred + in_masks[:, 1:].long()).cumsum(1)
out_tokens = (
in_tokens.new_zeros(in_tokens.size(0), out_max_len)
.fill_(padding_idx)
.masked_fill_(out_masks, unk_idx)
)
out_tokens[:, 0] = in_tokens[:, 0]
out_tokens.scatter_(1, reordering, in_tokens[:, 1:])
out_scores = None
if in_scores is not None:
in_scores.masked_fill_(~in_masks, 0)
out_scores = in_scores.new_zeros(*out_tokens.size())
out_scores[:, 0] = in_scores[:, 0]
out_scores.scatter_(1, reordering, in_scores[:, 1:])
return out_tokens, out_scores
def _apply_ins_words(in_tokens, in_scores, word_ins_pred, word_ins_scores, unk_idx):
word_ins_masks = in_tokens.eq(unk_idx)
out_tokens = in_tokens.masked_scatter(word_ins_masks, word_ins_pred[word_ins_masks])
if in_scores is not None:
out_scores = in_scores.masked_scatter(
word_ins_masks, word_ins_scores[word_ins_masks]
)
else:
out_scores = None
return out_tokens, out_scores
def _apply_del_words(
in_tokens, in_scores, in_attn, word_del_pred, padding_idx, bos_idx, eos_idx
):
# apply deletion to a tensor
in_masks = in_tokens.ne(padding_idx)
bos_eos_masks = in_tokens.eq(bos_idx) | in_tokens.eq(eos_idx)
max_len = in_tokens.size(1)
word_del_pred.masked_fill_(~in_masks, 1)
word_del_pred.masked_fill_(bos_eos_masks, 0)
reordering = new_arange(in_tokens).masked_fill_(word_del_pred, max_len).sort(1)[1]
out_tokens = in_tokens.masked_fill(word_del_pred, padding_idx).gather(1, reordering)
out_scores = None
if in_scores is not None:
out_scores = in_scores.masked_fill(word_del_pred, 0).gather(1, reordering)
out_attn = None
if in_attn is not None:
_mask = word_del_pred[:, :, None].expand_as(in_attn)
_reordering = reordering[:, :, None].expand_as(in_attn)
out_attn = in_attn.masked_fill(_mask, 0.0).gather(1, _reordering)
return out_tokens, out_scores, out_attn
def _skip(x, mask):
"""
Getting sliced (dim=0) tensor by mask. Supporting tensor and list/dict of tensors.
"""
if isinstance(x, int):
return x
if x is None:
return None
if isinstance(x, torch.Tensor):
if x.size(0) == mask.size(0):
return x[mask]
elif x.size(1) == mask.size(0):
return x[:, mask]
if isinstance(x, list):
return [_skip(x_i, mask) for x_i in x]
if isinstance(x, dict):
return {k: _skip(v, mask) for k, v in x.items()}
raise NotImplementedError
def _skip_encoder_out(encoder, encoder_out, mask):
if not mask.any():
return encoder_out
else:
return encoder.reorder_encoder_out(
encoder_out, mask.nonzero(as_tuple=False).squeeze()
)
def _fill(x, mask, y, padding_idx):
"""
Filling tensor x with y at masked positions (dim=0).
"""
if x is None:
return y
assert x.dim() == y.dim() and mask.size(0) == x.size(0)
assert x.dim() == 2 or (x.dim() == 3 and x.size(2) == y.size(2))
n_selected = mask.sum()
assert n_selected == y.size(0)
if n_selected == x.size(0):
return y
if x.size(1) < y.size(1):
dims = [x.size(0), y.size(1) - x.size(1)]
if x.dim() == 3:
dims.append(x.size(2))
x = torch.cat([x, x.new_zeros(*dims).fill_(padding_idx)], 1)
x[mask] = y
elif x.size(1) > y.size(1):
x[mask] = padding_idx
if x.dim() == 2:
x[mask, : y.size(1)] = y
else:
x[mask, : y.size(1), :] = y
else:
x[mask] = y
return x
|
|
###############################################################################
#
# ptsvgconstraints.py - represent layout constraints for writing as Dunnart SVG
#
# File: ptsvgconstraints.py
# Author: Alex Stivala
# Created: February 2008
#
# $Id: ptsvgconstraints.py 3064 2009-12-18 03:15:57Z alexs $
#
# A PTVSVGConstraint may be one of several types for separation constraints,
# distribution constraints, alignment constraints etc.
#
###############################################################################
import sys
from ptsvgnode import PTSVGNode,PTGRAPH_NS,get_residue_strings,PTSVGNodeTerminus
from ptsecstruct import stride_chainid_to_pdb_chainid
from ptutils import get_int_icode,char_if_not_blank,biopdbresid_to_pdbresseq
from Bio.PDB import *
#-----------------------------------------------------------------------------
#
# Constants
#
#-----------------------------------------------------------------------------
DUNNART_ALIGN_TOP = 0 # dunnart constants (guideline.h)
DUNNART_ALIGN_MIDDLE = 1
DUNNART_ALIGN_BOTTOM = 2
DUNNART_ALIGN_LEFT = 3
DUNNART_ALIGN_CENTER = 4
DUNNART_ALIGN_RIGHT = 5
DUNNART_GUIDE_TYPE_VERT = 100
DUNNART_GUIDE_TYPE_HORI = 101
# list of connector colors. Use only dark colors (and full
# opacity) since connectors are drawn as thin lines so light
# colors will be hard to see.
DUNNART_DEFAULT_LINE_COLOR = "000000ff" # black
DUNNART_LINE_COLORS = [ DUNNART_DEFAULT_LINE_COLOR,
"0000ffff", # blue
"551a8bff", # purple4
"8b008bff", # magenta4
"8b0000ff", # darkred
"8b2323ff", # brown4
"8fbc8fff", # dark sea green
"191970ff", # midnight blue
"9400d3ff" # dark violet
]
#-----------------------------------------------------------------------------
#
# Class definitions
#
#-----------------------------------------------------------------------------
class PTSVGIndGuide:
"""
PTSVGIndGuide represents a guideline for aligning shapes on.
"""
def __init__(self, xmlid, pos, direction):
"""
Create a PTSVGIndGuide given unique XML id, position, and direction.
Parameters:
xmlid - unique XML id for this guide
pos - position (x or y depending on direction)
direction - DUNNART_GUIDE_TYPE_VERT or _HORI
Raises Exceptions:
ValueError for bad direction
"""
if direction not in [DUNNART_GUIDE_TYPE_HORI, DUNNART_GUIDE_TYPE_VERT]:
raise ValueError('bad direction ' + direction)
self.xmlid = xmlid
self.pos = pos
self.direction = direction
def write_svg(self, fh):
"""
Write this indguide to the SVG file
Parameters:
fh - open filehandle to write SVG XML text to
Return value:
None
"""
fh.write(' <dunnart:node dunnart:type="indGuide" ' +
'dunnart:position="' + str(self.pos) + '" ' +
'dunnart:direction="' + str(self.direction) + '" ' +
'id="' + str(self.xmlid) + '"/>\n')
def translate(self, xshift, yshift):
"""
Move theindguide left/right by xshift (-/+) and/or up/down by
yshift (-/+)..
Parameters:
xshift - amount to move left(-) or right(+) by
yshift - amount to mvoe up(-) or down(+) by
Return value:
None
Modifies data members:
pos
"""
if self.direction == DUNNART_GUIDE_TYPE_HORI:
self.pos += yshift
else: # VERT
self.pos += xshift
class PTSVGDistribution:
"""
PTSVGDistribution represents the distribution handle for distribution
constraint.
"""
def __init__(self, xmlid, direction, sepdistance, position):
"""
Construct a PTSVGDistribution, given direction, separation
distance, position and id.
Parameters:
xmlid - unique XML identifier for this object
direction - DUNNART_GUID_TYPE_HORI or _VERT
sepdistance - separation distance
position - position (x or y depending on HORI or VERT) for handle
Raises Exceptions:
ValueError for bad direction
"""
if direction not in [DUNNART_GUIDE_TYPE_HORI, DUNNART_GUIDE_TYPE_VERT]:
raise ValueError('bad direction ' + direction)
self.xmlid = xmlid
self.direction = direction
self.sepdistance = sepdistance
self.position = position
def write_svg(self, fh):
"""
Write this distribution to the SVG file
Parameters:
fh - open filehandle to write SVG XML text to
Return value:
None
"""
fh.write(' <dunnart:node dunnart:type="distribution" ' +
'dunnart:direction="' + str(self.direction) + '" ' +
'dunnart:sepDistance="' +
str(self.sepdistance) + '" ' +
'dunnart:position="' + str(self.position) + '" ' +
'id="' + str(self.xmlid) + '"/>\n')
def translate(self, xshift, yshift):
"""
Move the handle left/right by xshift (-/+) and/or up/down by
yshift (-/+)..
Parameters:
xshift - amount to move left(-) or right(+) by
yshift - amount to mvoe up(-) or down(+) by
Return value:
None
Modifies data members:
position
"""
if self.direction == DUNNART_GUIDE_TYPE_HORI:
self.position += xshift
else: # VERT
self.position += yshift
class PTSVGDistroConstraint:
"""
PTSVGDistroConstraint represents a distribution constraint, that is
a common separation between a set of indguides. It refers
back to its PTSVGDistribution which represents the distribution handle.
"""
def __init__(self, indguide1, indguide2, distro):
"""
Construct a PTSVGDistroConstraint given two indguides and a
distribution.
Parameters:
indguide1 - PTSVGIndGuide for one object to align
indguide2 - PTSVGIndGuide for the other object to align
distro - PTSVGDistribution to align induides with
"""
self.indguide1 = indguide1
self.indguide2 = indguide2
self.distro = distro
def write_svg(self, fh):
"""
Write this constraint to the SVG file
Parameters:
fh - open filehandle to write SVG XML text to
Return value:
None
"""
fh.write(' <dunnart:node dunnart:type="constraint" ' +
'isMultiway="1" relType="distribution" ' +
'constraintID="' +str(self.distro.xmlid)+'" '+
'objOneID="'+str(self.indguide1.xmlid)+'" '+
'objTwoID="' +str(self.indguide2.xmlid)+'"/>\n')
def translate(self, xshift, yshift):
"""
Translation has no menaing for an actual constraint
"""
return
class PTSVGAlignmentConstraint:
"""
PTSVGAlignmentConstraint represents an alignment constraint, that is
a shape is aligned on an indguide.
"""
def __init__(self, indguide, svgnode, alignpos):
"""
Construct a PTSVGAlignmentConstraint, given a PTSVGIndGuide object,
PTSVGNode derived object (for a shape) and an alignment position.
Parameters:
indguide - PTSVGIndGuide to align on
svgnode - PTSVGNode to align
alignpos - DUNNART_ALIGN_LEFT/CENTER/RIGHT/TOP/MIDDLE/BOTTOM
Raises Exceptions:
ValueError for invalid alignpos
TypeError for wrong type of indguide or svgnode
"""
if not isinstance(svgnode, PTSVGNode):
raise TypeError('wrong type for svgnode')
if not isinstance(indguide, PTSVGIndGuide):
raise TypeError('wront type for indguide')
if (alignpos not in [DUNNART_ALIGN_LEFT, DUNNART_ALIGN_CENTER,
DUNNART_ALIGN_RIGHT, DUNNART_ALIGN_TOP,
DUNNART_ALIGN_MIDDLE,DUNNART_ALIGN_BOTTOM]):
raise ValueError('invalid alignpos ' + alignpos)
self.indguide = indguide
self.svgnode = svgnode
self.alignpos = alignpos
def write_svg(self, fh):
"""
Write this constraint to the SVG file
Parameters:
fh - open filehandle to write SVG XML text to
Return value:
None
"""
fh.write(' <dunnart:node dunnart:type="constraint" ' +
'isMultiway="1" relType="alignment" ' +
'constraintID="' + str(self.indguide.xmlid) +
'" ' +
'objOneID="' + str(self.svgnode.xmlid) + '" ' +
'alignmentPos="' + str(self.alignpos) + '" />\n')
def translate(self, xshift, yshift):
"""
Translation has no menaing for an actual constraint
"""
return
# This is not a constraint but might a well go here
class PTSVGConnector:
"""
PTSVGConnector represents a connector (line) between shapes
"""
def __init__(self, xmlid, src, dest, srcFlags, dstFlags, color,
directed = False):
"""
Create a PTSVGConnector.
Parameters:
xmlid - unique XML id for this connector
src - PTSVGNode connector is FROM
dest - PTSVGNode connector is TO
srcFlags - connector flags (for ports, DUNNART_DEFAULT_PORT, etc.)
destFlags - connector flags
color - line color hex RGB string
directed - (default False) If True, puts arrowhead on dest end
"""
self.xmlid = xmlid
self.src = src
self.dest = dest
self.srcFlags = srcFlags
self.dstFlags = dstFlags
self.color = color
self.directed = directed
# The following are built by build_resname_sequence():
self.issued_discontinuous_warning = False
self.residue_list = [] # list of Bio.PDB Residues in the coil region
self.resname_list = [] # list of 3 letter residue names in this coild
# region
self.resid_list = [] # list of string PDB residue sequence numbers
self.nterm_resname = None # 3 letter residue name of residue immediately
# N-terminal of this coil region, or ""
# for N-terminus
self.nterm_resid = None # PDB residue sequence number of residue
# immediately N-terminal of this coil region,
# or None for N-terminus.
self.cterm_resname = None # 3 letter residue name of residue immediately
# C-terminal of this coil region, or ""
# for C-terminus
self.cterm_resid = None # PDB residue sequence number of residue
# immediately C-terminal of this coil region,
# or None for C-terminus.
def get_residue_list(self, pdb_residue_list, pdb_resid_dict):
"""
Return the list of Bio.PDB Residue objects for the residues in the
coil region represneted by this connector.
Parameters:
pdb_residue_list - list of all residues (for all chains) in the protein
pdb_resid_dict - dict of { {chainid,pdb_resseq) : seqindx }
where chainid and pdb_resseq make up
the PDB residue identifier, the pdb_resseq
being string resnum+icode if any e.g.
'60' or '60A', seqindx is the indiex
into sequential list of all residues
pdb_residue_list.
Return value:
tuple (nterm_residue, cterm_residue, residue_list)
where
nterm_residue is the Bio.PDB Residue object of residue immediately
nterminal of this coil region, or None if N terminus
cterm_residue is the Bio.PDB Residue object of residue immediately
cterminal of this coil regino, of None if C terminus
residue_list is list of Bio.PDB Residue objects of residues in
this coil region.
Uses data members (read/write):
residue_list - used to memoize building of the residue_list
"""
if isinstance(self.src, PTSVGNodeTerminus):
nterm_residue = None
start_indx = 0
else:
nterm_residue = self.src.get_residue_list()[-1]
start_indx = pdb_resid_dict[(self.src.chainid,
self.src.get_end_res_seq())] + 1
if isinstance(self.dest, PTSVGNodeTerminus):
cterm_residue = None
end_indx = len(pdb_residue_list) - 1
else:
cterm_residue = self.dest.get_residue_list()[0]
end_indx = pdb_resid_dict[(self.dest.chainid,
self.dest.get_start_res_seq())] - 1
if self.residue_list: # memoization: use if already computed
resisdue_list = self.residue_list
residue_list = pdb_residue_list[start_indx : end_indx + 1]
self.residue_list = residue_list
return (nterm_residue, cterm_residue, residue_list)
def build_resname_sequence(self, pdb_residue_list, pdb_resid_dict):
"""
Build list of (3 letter) residue names in sequence for the residues
in this node (SSE). E.g. and matching
list of PDB residue sequence numbers
Parameters:
pdb_residue_list - list of all residues (for all chains) in the protein
pdb_resid_dict - dict of { {chainid,pdb_resseq) : seqindx }
where chainid and pdb_resseq make up
the PDB residue identifier, the pdb_resseq
being string resnum+icode if any e.g.
'60' or '60A', seqindx is the indiex
into sequential list of all residues
pdb_residue_list.
Return value: None
Uses data member (write): resname_list
resid_list
"""
(nterm_residue, cterm_residue, residue_list) =\
self.get_residue_list(pdb_residue_list, pdb_resid_dict)
self.resname_list = [residue.get_resname() for residue in residue_list]
# id of a residue in Bio.PDB is tuple (hetatm, resseqnum, icode)
self.resid_list = [str(residue.get_id()[1]) +
char_if_not_blank(residue.get_id()[2])
for residue in residue_list]
if nterm_residue:
self.nterm_resid = nterm_residue.get_id()[1]
self.nterm_resname = nterm_residue.get_resname()
else:
self.nterm_resid = None
self.nterm_resname = ""
if cterm_residue:
self.cterm_resid = cterm_residue.get_id()[1]
self.cterm_resname = cterm_residue.get_resname()
else:
self.cterm_resid = None
self.cterm_resname = ""
def write_svg(self, fh):
"""
Write this connector to the SVG file
Parameters:
fh - open filehandle to write SVG XML text to
Return value:
None
"""
if self.directed:
directed_str = "1"
else:
directed_str = "0"
# put first and last residue sequence numbers at left and right
# of hovertext string, with residues in sequence in between e.g.
# "134 ASP LYS ARG 136". For only single residue it will
# be just like single-residue hovertext in shapes e.g. "ASP 134"
# and for no residues (connector between two
# adajcnet SSEs with no coil regino in between) we will put it like
# "(134-135)" indicating the two residue sequence numbers it joins.
# TODO: have per-residue hovertext like helices and strands.
(residue_names, residue_ids) = get_residue_strings(self.resname_list,
self.resid_list)
if len(self.resname_list) == 0:
if self.nterm_resid and self.cterm_resid:
hovertext = '(' + str(self.nterm_resid) + '-' +\
str(self.cterm_resid) + ')'
elif self.cterm_resid:
hovertext = '(N-' + str(self.cterm_resid) + ')'
else:
hovertext = '(' + str(self.nterm_resid) + '-C)'
elif len(self.resname_list) == 1:
hovertext = self.resname_list[0] + " " + str(self.resid_list[0])
else:
hovertext = str(self.resid_list[0]) + " " + residue_names + " " +\
str(self.resid_list[-1])
fh.write(' <dunnart:node id="' + str(self.xmlid) + '" ' +
'dunnart:srcID="' + str(self.src.xmlid) + '" ' +
'dunnart:dstID="' + str(self.dest.xmlid) + '" ' +
'dunnart:srcFlags="' + str(self.srcFlags) + '" ' +
'dunnart:dstFlags="' + str(self.dstFlags) + '" ' +
'dunnart:directed="' + directed_str + '" ' +
'dunnart:lineColour="' + self.color + '" ' +
PTGRAPH_NS + ':' + 'residueNames="' +
residue_names + '" ' +
PTGRAPH_NS + ':' + 'residueSeqNums="' +
residue_ids +
'" ' +
PTGRAPH_NS + ':' + 'hovertext="' + hovertext + '" '
'dunnart:type="connAvoidPoly"/>\n')
class PTSVGSeparation:
"""
PTSVGSeparation represents information about separatino between indguides
to be used with PTSVGSeparationConstraint.
"""
def __init__(self, xmlid, direction, sepdistance, position):
"""
Construct a PTSVGSeparation, given direction, separation
distance, position and id.
Parameters:
xmlid - unique XML identifier for this object
direction - DUNNART_GUID_TYPE_HORI or _VERT
sepdistance - separation distance
position - position (x or y depending on HORI or VERT) for handle
"""
self.xmlid = xmlid
self.direction = direction
self.sepdistance = sepdistance
self.position = position
def write_svg(self, fh):
"""
Write this separation to the SVG file
Parameters:
fh - open filehandle to write SVG XML text to
Return value:
None
"""
fh.write(' <dunnart:node dunnart:type="separation" ' +
'dunnart:direction="' + str(self.direction) + '" ' +
'dunnart:sepDistance="' +
str(self.sepdistance) + '" ' +
'dunnart:position="' + str(self.position) + '" ' +
'id="' + str(self.xmlid) + '" ' +
'dunnart:equality="1" ' +
'/>\n')
##############################################################################
#
# Obsolete code below here. Separation (equality) constraints are no longer
# used in Dunnart (as of 0.15), now distribution constraints can be used
# instead.
#
##############################################################################
class PTSVGSeparationConstraint:
"""
PTSVGSeparationConstraint represents a separation constraint, that is
a separation between a pair of indguides. It refers back to its
PTSVGSeparation which specifies the separation distance, direction etc.
"""
def __init__(self, indguide1, indguide2, sep):
"""
Construct a PTSVGDistroConstraint given two indguides and a
distribution.
Parameters:
indguide1 - PTSVGIndGuide for one object
indguide2 - PTSVGIndGuide for the other object
sep - PTSVGSeparation object specifying the separation
"""
self.indguide1 = indguide1
self.indguide2 = indguide2
self.sep = sep
def write_svg(self, fh):
"""
Write this constraint to the SVG file
Parameters:
fh - open filehandle to write SVG XML text to
Return value:
None
"""
fh.write(' <dunnart:node dunnart:type="constraint" isMultiway="1" '\
'relType="separation" constraintID="' + str(self.sep.xmlid)
+'" '\
'objOneID="' + str(self.indguide1.xmlid) + '" ' \
'objTwoID="' + str(self.indguide2.xmlid) + '" />\n')
|
|
"""
Python implementation of the fast ICA algorithms.
Reference: Tables 8.3 and 8.4 page 196 in the book:
Independent Component Analysis, by Hyvarinen et al.
"""
# Authors: Pierre Lafaye de Micheaux, Stefan van der Walt, Gael Varoquaux,
# Bertrand Thirion, Alexandre Gramfort, Denis A. Engemann
# License: BSD 3 clause
import warnings
import numpy as np
from scipy import linalg
from ..base import BaseEstimator, TransformerMixin, _ClassNamePrefixFeaturesOutMixin
from ..exceptions import ConvergenceWarning
from ..utils import check_array, as_float_array, check_random_state
from ..utils.validation import check_is_fitted
from ..utils.validation import FLOAT_DTYPES
__all__ = ["fastica", "FastICA"]
def _gs_decorrelation(w, W, j):
"""
Orthonormalize w wrt the first j rows of W.
Parameters
----------
w : ndarray of shape (n,)
Array to be orthogonalized
W : ndarray of shape (p, n)
Null space definition
j : int < p
The no of (from the first) rows of Null space W wrt which w is
orthogonalized.
Notes
-----
Assumes that W is orthogonal
w changed in place
"""
w -= np.linalg.multi_dot([w, W[:j].T, W[:j]])
return w
def _sym_decorrelation(W):
"""Symmetric decorrelation
i.e. W <- (W * W.T) ^{-1/2} * W
"""
s, u = linalg.eigh(np.dot(W, W.T))
# u (resp. s) contains the eigenvectors (resp. square roots of
# the eigenvalues) of W * W.T
return np.linalg.multi_dot([u * (1.0 / np.sqrt(s)), u.T, W])
def _ica_def(X, tol, g, fun_args, max_iter, w_init):
"""Deflationary FastICA using fun approx to neg-entropy function
Used internally by FastICA.
"""
n_components = w_init.shape[0]
W = np.zeros((n_components, n_components), dtype=X.dtype)
n_iter = []
# j is the index of the extracted component
for j in range(n_components):
w = w_init[j, :].copy()
w /= np.sqrt((w**2).sum())
for i in range(max_iter):
gwtx, g_wtx = g(np.dot(w.T, X), fun_args)
w1 = (X * gwtx).mean(axis=1) - g_wtx.mean() * w
_gs_decorrelation(w1, W, j)
w1 /= np.sqrt((w1**2).sum())
lim = np.abs(np.abs((w1 * w).sum()) - 1)
w = w1
if lim < tol:
break
n_iter.append(i + 1)
W[j, :] = w
return W, max(n_iter)
def _ica_par(X, tol, g, fun_args, max_iter, w_init):
"""Parallel FastICA.
Used internally by FastICA --main loop
"""
W = _sym_decorrelation(w_init)
del w_init
p_ = float(X.shape[1])
for ii in range(max_iter):
gwtx, g_wtx = g(np.dot(W, X), fun_args)
W1 = _sym_decorrelation(np.dot(gwtx, X.T) / p_ - g_wtx[:, np.newaxis] * W)
del gwtx, g_wtx
# builtin max, abs are faster than numpy counter parts.
lim = max(abs(abs(np.diag(np.dot(W1, W.T))) - 1))
W = W1
if lim < tol:
break
else:
warnings.warn(
"FastICA did not converge. Consider increasing "
"tolerance or the maximum number of iterations.",
ConvergenceWarning,
)
return W, ii + 1
# Some standard non-linear functions.
# XXX: these should be optimized, as they can be a bottleneck.
def _logcosh(x, fun_args=None):
alpha = fun_args.get("alpha", 1.0) # comment it out?
x *= alpha
gx = np.tanh(x, x) # apply the tanh inplace
g_x = np.empty(x.shape[0])
# XXX compute in chunks to avoid extra allocation
for i, gx_i in enumerate(gx): # please don't vectorize.
g_x[i] = (alpha * (1 - gx_i**2)).mean()
return gx, g_x
def _exp(x, fun_args):
exp = np.exp(-(x**2) / 2)
gx = x * exp
g_x = (1 - x**2) * exp
return gx, g_x.mean(axis=-1)
def _cube(x, fun_args):
return x**3, (3 * x**2).mean(axis=-1)
def fastica(
X,
n_components=None,
*,
algorithm="parallel",
whiten="warn",
fun="logcosh",
fun_args=None,
max_iter=200,
tol=1e-04,
w_init=None,
random_state=None,
return_X_mean=False,
compute_sources=True,
return_n_iter=False,
):
"""Perform Fast Independent Component Analysis.
The implementation is based on [1]_.
Read more in the :ref:`User Guide <ICA>`.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples and
`n_features` is the number of features.
n_components : int, default=None
Number of components to extract. If None no dimension reduction
is performed.
algorithm : {'parallel', 'deflation'}, default='parallel'
Apply a parallel or deflational FASTICA algorithm.
whiten : str or bool, default="warn"
Specify the whitening strategy to use.
If 'arbitrary-variance' (default), a whitening with variance arbitrary is used.
If 'unit-variance', the whitening matrix is rescaled to ensure that each
recovered source has unit variance.
If False, the data is already considered to be whitened, and no
whitening is performed.
.. deprecated:: 1.1
From version 1.3, `whiten='unit-variance'` will be used by default.
`whiten=True` is deprecated from 1.1 and will raise ValueError in 1.3.
Use `whiten=arbitrary-variance` instead.
fun : {'logcosh', 'exp', 'cube'} or callable, default='logcosh'
The functional form of the G function used in the
approximation to neg-entropy. Could be either 'logcosh', 'exp',
or 'cube'.
You can also provide your own function. It should return a tuple
containing the value of the function, and of its derivative, in the
point. The derivative should be averaged along its last dimension.
Example:
def my_g(x):
return x ** 3, np.mean(3 * x ** 2, axis=-1)
fun_args : dict, default=None
Arguments to send to the functional form.
If empty or None and if fun='logcosh', fun_args will take value
{'alpha' : 1.0}
max_iter : int, default=200
Maximum number of iterations to perform.
tol : float, default=1e-04
A positive scalar giving the tolerance at which the
un-mixing matrix is considered to have converged.
w_init : ndarray of shape (n_components, n_components), default=None
Initial un-mixing array of dimension (n.comp,n.comp).
If None (default) then an array of normal r.v.'s is used.
random_state : int, RandomState instance or None, default=None
Used to initialize ``w_init`` when not specified, with a
normal distribution. Pass an int, for reproducible results
across multiple function calls.
See :term:`Glossary <random_state>`.
return_X_mean : bool, default=False
If True, X_mean is returned too.
compute_sources : bool, default=True
If False, sources are not computed, but only the rotation matrix.
This can save memory when working with big data. Defaults to True.
return_n_iter : bool, default=False
Whether or not to return the number of iterations.
Returns
-------
K : ndarray of shape (n_components, n_features) or None
If whiten is 'True', K is the pre-whitening matrix that projects data
onto the first n_components principal components. If whiten is 'False',
K is 'None'.
W : ndarray of shape (n_components, n_components)
The square matrix that unmixes the data after whitening.
The mixing matrix is the pseudo-inverse of matrix ``W K``
if K is not None, else it is the inverse of W.
S : ndarray of shape (n_samples, n_components) or None
Estimated source matrix
X_mean : ndarray of shape (n_features,)
The mean over features. Returned only if return_X_mean is True.
n_iter : int
If the algorithm is "deflation", n_iter is the
maximum number of iterations run across all components. Else
they are just the number of iterations taken to converge. This is
returned only when return_n_iter is set to `True`.
Notes
-----
The data matrix X is considered to be a linear combination of
non-Gaussian (independent) components i.e. X = AS where columns of S
contain the independent components and A is a linear mixing
matrix. In short ICA attempts to `un-mix' the data by estimating an
un-mixing matrix W where ``S = W K X.``
While FastICA was proposed to estimate as many sources
as features, it is possible to estimate less by setting
n_components < n_features. It this case K is not a square matrix
and the estimated A is the pseudo-inverse of ``W K``.
This implementation was originally made for data of shape
[n_features, n_samples]. Now the input is transposed
before the algorithm is applied. This makes it slightly
faster for Fortran-ordered input.
References
----------
.. [1] A. Hyvarinen and E. Oja, "Fast Independent Component Analysis",
Algorithms and Applications, Neural Networks, 13(4-5), 2000,
pp. 411-430.
"""
est = FastICA(
n_components=n_components,
algorithm=algorithm,
whiten=whiten,
fun=fun,
fun_args=fun_args,
max_iter=max_iter,
tol=tol,
w_init=w_init,
random_state=random_state,
)
S = est._fit(X, compute_sources=compute_sources)
if est._whiten in ["unit-variance", "arbitrary-variance"]:
K = est.whitening_
X_mean = est.mean_
else:
K = None
X_mean = None
returned_values = [K, est._unmixing, S]
if return_X_mean:
returned_values.append(X_mean)
if return_n_iter:
returned_values.append(est.n_iter_)
return returned_values
class FastICA(_ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator):
"""FastICA: a fast algorithm for Independent Component Analysis.
The implementation is based on [1]_.
Read more in the :ref:`User Guide <ICA>`.
Parameters
----------
n_components : int, default=None
Number of components to use. If None is passed, all are used.
algorithm : {'parallel', 'deflation'}, default='parallel'
Apply parallel or deflational algorithm for FastICA.
whiten : str or bool, default="warn"
Specify the whitening strategy to use.
If 'arbitrary-variance' (default), a whitening with variance arbitrary is used.
If 'unit-variance', the whitening matrix is rescaled to ensure that each
recovered source has unit variance.
If False, the data is already considered to be whitened, and no
whitening is performed.
.. deprecated:: 1.1
From version 1.3 whiten='unit-variance' will be used by default.
`whiten=True` is deprecated from 1.1 and will raise ValueError in 1.3.
Use `whiten=arbitrary-variance` instead.
fun : {'logcosh', 'exp', 'cube'} or callable, default='logcosh'
The functional form of the G function used in the
approximation to neg-entropy. Could be either 'logcosh', 'exp',
or 'cube'.
You can also provide your own function. It should return a tuple
containing the value of the function, and of its derivative, in the
point. Example::
def my_g(x):
return x ** 3, (3 * x ** 2).mean(axis=-1)
fun_args : dict, default=None
Arguments to send to the functional form.
If empty and if fun='logcosh', fun_args will take value
{'alpha' : 1.0}.
max_iter : int, default=200
Maximum number of iterations during fit.
tol : float, default=1e-4
Tolerance on update at each iteration.
w_init : ndarray of shape (n_components, n_components), default=None
The mixing matrix to be used to initialize the algorithm.
random_state : int, RandomState instance or None, default=None
Used to initialize ``w_init`` when not specified, with a
normal distribution. Pass an int, for reproducible results
across multiple function calls.
See :term:`Glossary <random_state>`.
Attributes
----------
components_ : ndarray of shape (n_components, n_features)
The linear operator to apply to the data to get the independent
sources. This is equal to the unmixing matrix when ``whiten`` is
False, and equal to ``np.dot(unmixing_matrix, self.whitening_)`` when
``whiten`` is True.
mixing_ : ndarray of shape (n_features, n_components)
The pseudo-inverse of ``components_``. It is the linear operator
that maps independent sources to the data.
mean_ : ndarray of shape(n_features,)
The mean over features. Only set if `self.whiten` is True.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
n_iter_ : int
If the algorithm is "deflation", n_iter is the
maximum number of iterations run across all components. Else
they are just the number of iterations taken to converge.
whitening_ : ndarray of shape (n_components, n_features)
Only set if whiten is 'True'. This is the pre-whitening matrix
that projects data onto the first `n_components` principal components.
See Also
--------
PCA : Principal component analysis (PCA).
IncrementalPCA : Incremental principal components analysis (IPCA).
KernelPCA : Kernel Principal component analysis (KPCA).
MiniBatchSparsePCA : Mini-batch Sparse Principal Components Analysis.
SparsePCA : Sparse Principal Components Analysis (SparsePCA).
References
----------
.. [1] A. Hyvarinen and E. Oja, Independent Component Analysis:
Algorithms and Applications, Neural Networks, 13(4-5), 2000,
pp. 411-430.
Examples
--------
>>> from sklearn.datasets import load_digits
>>> from sklearn.decomposition import FastICA
>>> X, _ = load_digits(return_X_y=True)
>>> transformer = FastICA(n_components=7,
... random_state=0,
... whiten='unit-variance')
>>> X_transformed = transformer.fit_transform(X)
>>> X_transformed.shape
(1797, 7)
"""
def __init__(
self,
n_components=None,
*,
algorithm="parallel",
whiten="warn",
fun="logcosh",
fun_args=None,
max_iter=200,
tol=1e-4,
w_init=None,
random_state=None,
):
super().__init__()
self.n_components = n_components
self.algorithm = algorithm
self.whiten = whiten
self.fun = fun
self.fun_args = fun_args
self.max_iter = max_iter
self.tol = tol
self.w_init = w_init
self.random_state = random_state
def _fit(self, X, compute_sources=False):
"""Fit the model.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples
and `n_features` is the number of features.
compute_sources : bool, default=False
If False, sources are not computes but only the rotation matrix.
This can save memory when working with big data. Defaults to False.
Returns
-------
S : ndarray of shape (n_samples, n_components) or None
Sources matrix. `None` if `compute_sources` is `False`.
"""
self._whiten = self.whiten
if self._whiten == "warn":
warnings.warn(
"From version 1.3 whiten='unit-variance' will be used by default.",
FutureWarning,
)
self._whiten = "arbitrary-variance"
if self._whiten is True:
warnings.warn(
"From version 1.3 whiten=True should be specified as "
"whiten='arbitrary-variance' (its current behaviour). This "
"behavior is deprecated in 1.1 and will raise ValueError in 1.3.",
FutureWarning,
stacklevel=2,
)
self._whiten = "arbitrary-variance"
XT = self._validate_data(
X, copy=self._whiten, dtype=FLOAT_DTYPES, ensure_min_samples=2
).T
fun_args = {} if self.fun_args is None else self.fun_args
random_state = check_random_state(self.random_state)
alpha = fun_args.get("alpha", 1.0)
if not 1 <= alpha <= 2:
raise ValueError("alpha must be in [1,2]")
if self.fun == "logcosh":
g = _logcosh
elif self.fun == "exp":
g = _exp
elif self.fun == "cube":
g = _cube
elif callable(self.fun):
def g(x, fun_args):
return self.fun(x, **fun_args)
else:
exc = ValueError if isinstance(self.fun, str) else TypeError
raise exc(
"Unknown function %r;"
" should be one of 'logcosh', 'exp', 'cube' or callable"
% self.fun
)
n_features, n_samples = XT.shape
n_components = self.n_components
if not self._whiten and n_components is not None:
n_components = None
warnings.warn("Ignoring n_components with whiten=False.")
if n_components is None:
n_components = min(n_samples, n_features)
if n_components > min(n_samples, n_features):
n_components = min(n_samples, n_features)
warnings.warn(
"n_components is too large: it will be set to %s" % n_components
)
if self._whiten:
# Centering the features of X
X_mean = XT.mean(axis=-1)
XT -= X_mean[:, np.newaxis]
# Whitening and preprocessing by PCA
u, d, _ = linalg.svd(XT, full_matrices=False, check_finite=False)
del _
K = (u / d).T[:n_components] # see (6.33) p.140
del u, d
X1 = np.dot(K, XT)
# see (13.6) p.267 Here X1 is white and data
# in X has been projected onto a subspace by PCA
X1 *= np.sqrt(n_samples)
else:
# X must be casted to floats to avoid typing issues with numpy
# 2.0 and the line below
X1 = as_float_array(XT, copy=False) # copy has been taken care of
w_init = self.w_init
if w_init is None:
w_init = np.asarray(
random_state.normal(size=(n_components, n_components)), dtype=X1.dtype
)
else:
w_init = np.asarray(w_init)
if w_init.shape != (n_components, n_components):
raise ValueError(
"w_init has invalid shape -- should be %(shape)s"
% {"shape": (n_components, n_components)}
)
if self.max_iter < 1:
raise ValueError(
"max_iter should be greater than 1, got (max_iter={})".format(
self.max_iter
)
)
kwargs = {
"tol": self.tol,
"g": g,
"fun_args": fun_args,
"max_iter": self.max_iter,
"w_init": w_init,
}
if self.algorithm == "parallel":
W, n_iter = _ica_par(X1, **kwargs)
elif self.algorithm == "deflation":
W, n_iter = _ica_def(X1, **kwargs)
else:
raise ValueError(
"Invalid algorithm: must be either `parallel` or `deflation`."
)
del X1
self.n_iter_ = n_iter
if compute_sources:
if self._whiten:
S = np.linalg.multi_dot([W, K, XT]).T
else:
S = np.dot(W, XT).T
else:
S = None
if self._whiten:
if self._whiten == "unit-variance":
if not compute_sources:
S = np.linalg.multi_dot([W, K, XT]).T
S_std = np.std(S, axis=0, keepdims=True)
S /= S_std
W /= S_std.T
self.components_ = np.dot(W, K)
self.mean_ = X_mean
self.whitening_ = K
else:
self.components_ = W
self.mixing_ = linalg.pinv(self.components_, check_finite=False)
self._unmixing = W
return S
def fit_transform(self, X, y=None):
"""Fit the model and recover the sources from X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
X_new : ndarray of shape (n_samples, n_components)
Estimated sources obtained by transforming the data with the
estimated unmixing matrix.
"""
return self._fit(X, compute_sources=True)
def fit(self, X, y=None):
"""Fit the model to X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
self : object
Returns the instance itself.
"""
self._fit(X, compute_sources=False)
return self
def transform(self, X, copy=True):
"""Recover the sources from X (apply the unmixing matrix).
Parameters
----------
X : array-like of shape (n_samples, n_features)
Data to transform, where `n_samples` is the number of samples
and `n_features` is the number of features.
copy : bool, default=True
If False, data passed to fit can be overwritten. Defaults to True.
Returns
-------
X_new : ndarray of shape (n_samples, n_components)
Estimated sources obtained by transforming the data with the
estimated unmixing matrix.
"""
check_is_fitted(self)
X = self._validate_data(
X, copy=(copy and self._whiten), dtype=FLOAT_DTYPES, reset=False
)
if self._whiten:
X -= self.mean_
return np.dot(X, self.components_.T)
def inverse_transform(self, X, copy=True):
"""Transform the sources back to the mixed data (apply mixing matrix).
Parameters
----------
X : array-like of shape (n_samples, n_components)
Sources, where `n_samples` is the number of samples
and `n_components` is the number of components.
copy : bool, default=True
If False, data passed to fit are overwritten. Defaults to True.
Returns
-------
X_new : ndarray of shape (n_samples, n_features)
Reconstructed data obtained with the mixing matrix.
"""
check_is_fitted(self)
X = check_array(X, copy=(copy and self._whiten), dtype=FLOAT_DTYPES)
X = np.dot(X, self.mixing_.T)
if self._whiten:
X += self.mean_
return X
@property
def _n_features_out(self):
"""Number of transformed output features."""
return self.components_.shape[0]
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ------------------------------------------------
# Fachhochschule Bielefeld
# Ingenieurwissenschaften und Mathematik
# Ingenieurinformatik - Studienarbeit
# Michel Asmus, Marcel Bernauer
# ------------------------------------------------
# project: felix
# servo-class
# ------------------------------------------------
import os
import math
import logging
import sys
logger = logging.getLogger(__name__)
logger.debug('Logging in {0} started.'.format(__name__))
try:
import dynamixel_functions as dynamixel
logger.debug('Imported dynamixel_functions.')
except Exception as e:
logger.critical("Importing dynamixel_functions failed!")
logger.debug(e)
class servo:
# =======================================
# Public class attributes
# =======================================
#TODO: configure debug-structure (servo)
#TODO: maybe build a dictionary?
# Control table address
ADDR_PRO_MAX_POSITION_LIMIT = 36
ADDR_PRO_MIN_POSITION_LIMIT = 40
ADDR_PRO_TORQUE_ENABLE = 562
ADDR_PRO_GOAL_POSITION = 596
ADDR_PRO_GOAL_TORQUE = 604
ADDR_PRO_GOAL_VELOCITY = 600
ADDR_PRO_PRESENT_POSITION = 611
ADDR_PRO_PRESENT_VELOCITY = 615
ADDR_PRO_PRESENT_CURRENT = 621
# Movement values
TORQUE_ENABLE = 1
TORQUE_DISABLE = 0
DXL_MOVING_STATUS_THRESHOLD = 20
# Protocol version
PROTOCOL_VERSION = 2
# Communication values
COMM_SUCCESS = 0
COMM_TX_FAIL = -1001
port_num = -1 # Port-No. will be set in 'initialize_port'
# For Dynamixel H42-20-S300-R
ticks_per_turn = 303750
ticks_per_half_turn= ticks_per_turn/2
# Set True to get debug-info
debug = True
# =======================================
# Private methods
# =======================================
# Constructor saves motor-specific settings
def __init__(self, DXL_ID, BAUDRATE, POS_MIN, POS_MAX, CLOCKWISE, DEVICENAME):
#TODO: optimize initialization
self.ID = DXL_ID
self.BAUDRATE = BAUDRATE
self.POS_MIN = POS_MIN
self.POS_MAX = POS_MAX
self.CLOCKWISE = CLOCKWISE
self.DEVICENAME = DEVICENAME
# =======================================
# Public methods
# =======================================
# Establishes a connection to the motor and transmits motor-specific settings
def initialize_port(self):
try:
servo.port_num = dynamixel.portHandler(self.DEVICENAME)
except Exception as e:
logger.critical('Working with dynamixel porthandler failed. Exiting...')
logger.debug(e)
quit()
dynamixel.packetHandler()
success_open_port = dynamixel.openPort(servo.port_num)
if servo.debug:
if success_open_port:
logger.info("Succeeded to open the port!")
else:
logger.critical("Failed to open the port! Exiting...")
quit()
if success_open_port:
success_set_baudrate = dynamixel.setBaudRate(servo.port_num, self.BAUDRATE)
if servo.debug:
if success_set_baudrate:
logger.info("Succeeded to change the baudrate!")
else:
logger.critical("Failed to change the baudrate! Exiting...")
quit()
# Close communication with USB-to-Dynamixel
def close_port(self):
dynamixel.closePort(servo.port_num)
# Activates power consumption for halting position
def enable_torque(self):
dynamixel.write1ByteTxRx(servo.port_num, servo.PROTOCOL_VERSION, self.ID, servo.ADDR_PRO_TORQUE_ENABLE,
self.TORQUE_ENABLE)
dxl_comm_result = dynamixel.getLastTxRxResult(servo.port_num, servo.PROTOCOL_VERSION)
dxl_error = dynamixel.getLastRxPacketError(servo.port_num, servo.PROTOCOL_VERSION)
#-
if dxl_comm_result != servo.COMM_SUCCESS:
logger.debug(dynamixel.getTxRxResult(servo.PROTOCOL_VERSION, dxl_comm_result))
elif dxl_error != 0:
logger.error(dynamixel.getRxPacketError(servo.PROTOCOL_VERSION, dxl_error))
# Deactivates power consumption for manual operation
def disable_torque(self):
dynamixel.write1ByteTxRx(servo.port_num, servo.PROTOCOL_VERSION, self.ID, servo.ADDR_PRO_TORQUE_ENABLE,
servo.TORQUE_DISABLE)
dxl_comm_result = dynamixel.getLastTxRxResult(servo.port_num, servo.PROTOCOL_VERSION)
dxl_error = dynamixel.getLastRxPacketError(servo.port_num, servo.PROTOCOL_VERSION)
#-
if dxl_comm_result != servo.COMM_SUCCESS:
logger.debug(dynamixel.getTxRxResult(servo.PROTOCOL_VERSION, dxl_comm_result))
elif dxl_error != 0:
logger.error(dynamixel.getRxPacketError(servo.PROTOCOL_VERSION, dxl_error))
# Moves to target position
def write_position(self, dxl_goal_position):
if not self.CLOCKWISE:
dxl_goal_position=dxl_goal_position*(-1)
dynamixel.write4ByteTxRx(servo.port_num, servo.PROTOCOL_VERSION, self.ID, servo.ADDR_PRO_GOAL_POSITION,
dxl_goal_position)
dxl_comm_result = dynamixel.getLastTxRxResult(servo.port_num, servo.PROTOCOL_VERSION)
dxl_error = dynamixel.getLastRxPacketError(servo.port_num, servo.PROTOCOL_VERSION)
#-
if dxl_comm_result != servo.COMM_SUCCESS:
logger.debug(dynamixel.getTxRxResult(servo.PROTOCOL_VERSION, dxl_comm_result))
elif dxl_error != 0:
logger.error(dynamixel.getRxPacketError(servo.PROTOCOL_VERSION, dxl_error))
if dxl_goal_position > self.POS_MAX or dxl_goal_position < self.POS_MIN:
logger.error('Goalposition of Servo {0} out of range!'.format(self.ID))
# Returns present position
def read_present_position(self):
dxl_present_position = dynamixel.read4ByteTxRx(servo.port_num, servo.PROTOCOL_VERSION, self.ID,
servo.ADDR_PRO_PRESENT_POSITION)
dxl_comm_result = dynamixel.getLastTxRxResult(servo.port_num, servo.PROTOCOL_VERSION)
dxl_error = dynamixel.getLastRxPacketError(servo.port_num, servo.PROTOCOL_VERSION)
if dxl_comm_result == servo.COMM_SUCCESS:
if not self.CLOCKWISE:
dxl_present_position=dxl_present_position*(-1)
return dxl_present_position
#-
else:
if dxl_comm_result != servo.COMM_SUCCESS:
logger.debug(dynamixel.getTxRxResult(servo.PROTOCOL_VERSION, dxl_comm_result))
elif dxl_error != 0:
logger.error(dynamixel.getRxPacketError(servo.PROTOCOL_VERSION, dxl_error))
return servo.read_present_position(self)
# Set desired velocity of movement
def write_velocity(self, dxl_goal_velocity):
dynamixel.write4ByteTxRx(servo.port_num, servo.PROTOCOL_VERSION, self.ID, servo.ADDR_PRO_GOAL_VELOCITY,
dxl_goal_velocity)
dxl_comm_result = dynamixel.getLastTxRxResult(servo.port_num, servo.PROTOCOL_VERSION)
dxl_error = dynamixel.getLastRxPacketError(servo.port_num, servo.PROTOCOL_VERSION)
#-
if dxl_comm_result != servo.COMM_SUCCESS:
logger.debug(dynamixel.getTxRxResult(servo.PROTOCOL_VERSION, dxl_comm_result))
elif dxl_error != 0:
logger.error(dynamixel.getRxPacketError(servo.PROTOCOL_VERSION, dxl_error))
# Set maximum and minimum of possible position
# Positions given in ticks
def write_position_limits(self):
#try to change maximum position
dynamixel.write4ByteTxRx(servo.port_num, servo.PROTOCOL_VERSION, self.ID, servo.ADDR_PRO_MAX_POSITION_LIMIT, self.POS_MAX )
dxl_comm_result = dynamixel.getLastTxRxResult(servo.port_num, servo.PROTOCOL_VERSION)
dxl_error = dynamixel.getLastRxPacketError(servo.port_num, servo.PROTOCOL_VERSION)
#-
if dxl_comm_result != servo.COMM_SUCCESS:
logger.info("successfully changed maximum position")
logger.debug(dynamixel.getTxRxResult(servo.PROTOCOL_VERSION, dxl_comm_result))
elif dxl_error != 0:
logger.error(dynamixel.getRxPacketError(servo.PROTOCOL_VERSION, dxl_error))
# try to change minimum position
dynamixel.write4ByteTxRx(servo.port_num, servo.PROTOCOL_VERSION, self.ID, servo.ADDR_PRO_MIN_POSITION_LIMIT, self.POS_MIN)
dxl_comm_result = dynamixel.getLastTxRxResult(servo.port_num, servo.PROTOCOL_VERSION)
dxl_error = dynamixel.getLastRxPacketError(servo.port_num, servo.PROTOCOL_VERSION)
#-
if dxl_comm_result != servo.COMM_SUCCESS:
logger.info("successfully changed minimum position")
logger.debug(dynamixel.getTxRxResult(servo.PROTOCOL_VERSION, dxl_comm_result))
elif dxl_error != 0:
logger.error(dynamixel.getRxPacketError(servo.PROTOCOL_VERSION, dxl_error))
# Convert ticks to degrees
def tick_to_deg(self, tick):
deg = tick*(180/(self.ticks_per_turn/2))
return deg
# Convert degrees to ticks
def deg_to_tick(self, deg):
tick = int(float(deg)*(151875/180))
return tick
|
|
# -*- coding: utf-8 -*-
"""
firebat.agr
~~~~~~~~~~~
Aggregate test results.
"""
import os
import string
import datetime
from BaseHTTPServer import BaseHTTPRequestHandler as rh
import pprint
pp = pprint.PrettyPrinter(indent=4)
import simplejson as json
from simplejson.decoder import JSONDecodeError
try:
import numpy.mean as mean
except ImportError:
mean = lambda n: round(float(sum(n) / len(n)), 2)
from firebat.console.stepper import series_from_schema, schema_format_err
from firebat.console.helpers import exit_err
class PhoutStat(object):
"""Phantom out statistic processor and container.
Attributes:
last_epoach: float, last added log line stime stamp.
def_percentiles: list, default percentiles responce time evaluated by.
percentiles_serieses: dict, keys - percentiles, values - data serieces.
resp: dict, key - epoach time stamp, vals - pre-aggregated statistic.
codes_set: set, uniq HTTP status code from log.
codes_series: dict, ready to jsonify high chart data series.
codes_tbl: dict, whole test HTTP status codes statistic.
errno_set: set, uniq TCP socket errno codes from log.
errno_series: dict, ready to jsonify high chart data series.
errno_tbl: dict, whole test TCP socket errno statistic.
resp_time_vals: list, values for responce time estimate.
resp_by_times: dict, pre-aggregated according *resp_time_vals* replies.
responses_num: float, total for whole test responces number.
http_codes_num: float, total for whole test HTTP status codes number.
#reply_series: dict, ready to jsonify high chart data series.
responce_per_second_series: list, rdy .. high chart data series.
total_tx: float, total for whole test transmited bytes value.
total_rx: float, total for whole test revived bytes value.
tx_series: dict, ready to jsonify high chart data series.
rx_series: dict, ready to jsonify high chart data series.
rtt_fracts: list, RTT parts provided by Phantom @see
http://phantom-doc-ru.readthedocs.org/en/latest/analyzing_result_data.html#phout-txt
rtt_fracts_series: dict, ready to jsonify high chart data series.
"""
def __init__(self, fire):
self.first_epoach = None
self.last_epoach = 0.0
self.def_percentiles = [100, 99, 98, 95, 90, 85, 80, 75, 50]
self.percentiles_serieses = {}
for p in self.def_percentiles:
self.percentiles_serieses[str(p)] = []
self.responce_per_second_series = []
self.resp = {}
self.codes_set = set()
self.codes_series = {}
self.codes_tbl = {}
self.errno_set = set()
self.errno_series = {}
self.errno_tbl = {}
try:
#self.time_periods = fire['time_periods']
self.resp_time_vals = fire['time_periods']
except KeyError:
exit_err('Can\'t parse *time_periods* fire attribute, it\'s ' +
'necessary!')
# convert time bounds to milliseconds
for idx, resp_time_val in enumerate(self.resp_time_vals):
self.resp_time_vals[idx] = bound_to_ms(str(resp_time_val),
self.resp_time_vals)
self.resp_time_vals.sort()
self.resp_by_times = {k: {'num': 0, 'percentil': 0} for k in\
self.resp_time_vals}
self.responses_num = 0.0
self.http_codes_num = 0.0
self.total_tx = 0.0
self.total_rx = 0.0
self.tx_series = {'name': 'tx', 'data': [], }
self.rx_series = {'name': 'rx', 'data': [], }
self.rtt_fracts = ['con_ms', 'send_ms', 'proc_ms', 'resp_ms']
self.rtt_fracts_series = {}
for part in self.rtt_fracts:
self.rtt_fracts_series[part] = {'name': part, 'data': [], }
def add_resp(self, line, is_interactive=False):
'''Process regular log line.
Args:
is_interactive: bool, is it running test stats or post facto proc.
line: str - cvs contains fields:
* epoch: int, time stamp.
* rtt: int, request round trip time.
* http_status: int, responce HTTP status code.
* errno: str, errno code from TCP socket.
* req_byte: int, request size in bytes.
* resp_byte: int, responce size in bytes.
* con_ms: float, TCP connection establishing time in milliseconds.
* send_ms: float, request sending time in milliseconds.
* proc_ms: float, awaiting responce time in milliseconds.
* resp_ms: float, getting responce time in milliseconds.
Returns:
nothing, just update obj attributes
'''
l_spltd = line.split()
# in phantom v.14 line have from 11 to 12 fields, @see:
# http://phantom-doc-ru.rtfd.org/en/latest/analyzing_result_data.html
if len(l_spltd) == 12:
epoch, tag, rtt, con_mcs, send_mcs, proc_mcs, resp_mcs, phantom_exec, \
req_byte, resp_byte, errno, http_status = l_spltd
elif len(l_spltd) == 11:
epoch, rtt, con_mcs, send_mcs, proc_mcs, resp_mcs, phantom_exec, \
req_byte, resp_byte, errno, http_status = l_spltd
else:
return None
epoch = int(epoch.split('.')[0]) # cut out fractional part of epoach
if epoch != self.last_epoach:
self.last_epoach = epoch
self.is_second_pass = True
if self.first_epoach == None:
self.first_epoach = epoch
rtt_ms = int(rtt) / 1000
http_status = int(http_status)
req_byte = int(req_byte)
resp_byte = int(resp_byte)
con_ms = float(con_mcs) / 1000
send_ms = float(send_mcs) / 1000
proc_ms = float(proc_mcs) / 1000
resp_ms = float(resp_mcs) / 1000
self.responses_num += 1
# find out, what time interval current RTT belong to
resp_time_intervals = self.resp_time_vals[:]
resp_time_intervals.append(rtt_ms)
resp_time_intervals.sort()
idx = resp_time_intervals.index(rtt_ms) # interval num of current RTT
try:
self.resp_by_times[resp_time_intervals[idx + 1]]['num'] += 1
except IndexError:
# RTT contains phantom working costs, so total RTT
# can be bigger than maximum of *time_periods*.
if idx == len(resp_time_intervals) - 1:
self.resp_by_times[self.resp_time_vals[-1]]['num'] += 1
else:
exit_err('Buggy indx: %s\nperiods: %s in resp' % (idx, resp_time_intervals))
try:
self.resp[epoch]['rtt'].append(rtt_ms)
except KeyError:
self.resp[epoch] = {
'percentiles': [],
'rtt': [],
'rps': 0,
'codes': {},
'errno': {},
}
self.resp[epoch]['rtt'].append(rtt_ms)
if http_status != 0: # 0 mean transport layer error.
self.http_codes_num += 1
# HTTP status codes processing for each req
try:
self.resp[epoch]['codes'][http_status] += 1
except KeyError:
self.resp[epoch]['codes'][http_status] = 1
self.codes_set.update([http_status])
# for all test
try:
self.codes_tbl[http_status]['num'] += 1
except KeyError:
self.codes_tbl[http_status] = {'num': 1}
# Socket errno processing for each req
try:
self.resp[epoch]['errno'][errno] += 1
except KeyError:
self.resp[epoch]['errno'][errno] = 1
self.errno_set.update([errno,])
# for all test
try:
self.errno_tbl[errno]['num'] += 1
except KeyError:
self.errno_tbl[errno] = {'num': 1}
# Tx/Rx bytes processing for each req
try:
self.resp[epoch]['tx'] += req_byte
self.resp[epoch]['rx'] += resp_byte
except KeyError:
self.resp[epoch]['tx'] = req_byte
self.resp[epoch]['rx'] = resp_byte
# rtt fractions for each req
if not 'rtt_fract' in self.resp[epoch]:
self.resp[epoch]['rtt_fract'] = {}
for part in self.rtt_fracts:
try:
self.resp[epoch]['rtt_fract'][part].append(vars()[part])
except KeyError:
self.resp[epoch]['rtt_fract'][part] = [vars()[part], ]
if is_interactive:
self.sum_up()
#def calc_percentiles(self, scrend_out_stmps=None):
#def sum_up(self, filtered_stmps=None, realtime=True):
def sum_up(self, filtered_stmps=None):
'''Aggregate added responces data.
* resp time percentiles
* HTTP status codes
* Errno codes
Args:
filtered_stmps: time stamps will be used in charts.
Returns:
nothing, just update obj attributes
'''
# agregation pre requirements
for c in self.codes_set:
self.codes_series[c] = []
for e in self.errno_set:
self.errno_series[e] = []
for epoch, r in self.resp.iteritems():
# filter time stamps if necessary.
if (not filtered_stmps) or (epoch in filtered_stmps):
# responce time calc
r['rtt'].sort()
r['replies_num'] = len(r['rtt'])
for p in self.def_percentiles:
if p == 100:
elem_no = -1
else:
elem_no = int(r['replies_num'] * (p / 100.0))
resp_time = r['rtt'][elem_no]
# convers resp_time from microseconds to milliseconds
self.percentiles_serieses[str(p)].append((epoch, resp_time / 1000))
self.responce_per_second_series.append((epoch, r['rps']))
# status codes
for c in self.codes_set:
val = r['codes'].get(c, 0)
self.codes_series[c].append((epoch, int(val)))
# errno
for e in self.errno_set:
try:
self.errno_series[e].append((epoch,
r['errno'].get(e, 0)))
except KeyError:
self.errno_series[e] = [(epoch, r['errno'].get(e, 0))]
# tx/rx
self.tx_series['data'].append((epoch, r['tx']))
self.rx_series['data'].append((epoch, r['rx']))
self.total_tx += r['tx']
self.total_rx += r['rx']
# rtt parts
for part in self.rtt_fracts:
mean_val = mean(r['rtt_fract'][part])
self.rtt_fracts_series[part]['data'].append((epoch,
mean_val))
def tbls_as_dict(self):
'''Represent tables data as dict.
Returns:
result: dict.
'''
result = {}
self.calc_time_period_tbl()
result['resp_by_times'] = self.resp_by_times
self.calc_errno_tbl()
for code, value in self.errno_tbl.iteritems():
value['msg'] = os.strerror(int(code))
result['errno_tbl'] = self.errno_tbl
self.calc_codes_tbl()
for code, value in self.codes_tbl.iteritems():
value['msg'] = rh.responses.get(int(code), None)
if value['msg']:
value['msg'] = value['msg'][0]
result['codes_tbl'] = self.codes_tbl
return result
def hcds_as_dict(self):
'''Represent multiple highchart data seriess as dict.
Returns:
result: dict.
'''
result = {
'errno': self.get_status_codes_hcds(),
'codes': self.get_status_codes_hcds(),
'resp_perc': self.get_resp_perc_hcds(),
}
return result
def get_resp_perc_hcds(self):
'''Make highcharts data series for resp time percentiles chart.
Returns:
result: list of dicts
'''
resp_perc = []
for key in sorted(self.percentiles_serieses.iterkeys(), key=lambda key: int(key),
reverse=True):
name = key
resp_perc.append({
'name': name,
'data': self.percentiles_serieses[key],
})
return resp_perc
def get_errno_hcds(self):
'''Make highcharts data series for errno chart.
Returns:
result: list of dicts
'''
result = []
for errno_name, series in self.errno_series.iteritems():
result.append({
'name': errno_name,
'data': series,
})
return result
def get_status_codes_hcds(self):
'''Make highcharts data series for HTTP status codes chart.
Returns:
result: list of dicts
'''
status_codes_series = []
for key, val in self.codes_series.iteritems():
status_codes_series.append({
'name': key,
'data': val,
})
return status_codes_series
def calc_time_period_tbl(self):
# time for period table
prev = {
'val': 0,
'key': 0,
}
for key in sorted(self.resp_by_times.iterkeys()):
self.resp_by_times[key]['percentil'] = round(\
(self.resp_by_times[key]['num'] / self.responses_num) * 100, 2)
self.resp_by_times[key]['btw'] = '%s -- %s' % (prev['key'], key)
prev['val'] = round(self.resp_by_times[key]['percentil'] +\
prev['val'], 2)
self.resp_by_times[key]['perc_above'] = prev['val']
prev['key'] = key
def calc_codes_tbl(self):
# HTTP status codes table
for idx, val in self.codes_tbl.iteritems():
val['percentil'] = round((val['num'] / self.http_codes_num) * 100,
2)
def calc_errno_tbl(self):
# Errno table
for idx, val in self.errno_tbl.iteritems():
val['percentil'] = round((val['num'] / self.responses_num) * 100,
2)
#def get_resp_perc_hcds(self):
# '''Make highcharts data series for resp time percentiles chart.
# Returns:
# result: list of dicts
# '''
# resp_perc = []
# #self.series['1'] = self.series.pop('rps') # to sort dict keys as ints
# for key in sorted(self.responce_per_second_series.iterkeys(),
# key=lambda key: int(key), reverse=True):
# name = key
# resp_perc.append({
# 'name': name,
# 'data': self.series[key],
# })
# return resp_perc
def get_fire(json_path='.fire_up.json'):
'''Read JSON encoded file with fire dict inside.
Args:
json_path: file path
Returns:
fire: dict, describes fire(job) options.
'''
try:
with open(json_path, 'r') as fire_fh:
return json.loads(fire_fh.read())
except IOError, e:
exit_err('Could not read "%s": %s\n' % (json_path, e))
except JSONDecodeError, e:
exit_err('Could not parse fire config file: %s\n%s' % (json_path, e))
def validate_bound(bound):
'''Check conformity of bound short notation
Args:
bound: str with declare time bound in short notation
Returns:
bool, true if short notation is valid
'''
trans_table = string.maketrans('', '')
allowed = string.digits + 'sm'
return not bound.translate(trans_table, allowed)
def bound_to_ms(bound, time_periods):
'''Transfer bound from short notation to milliseconds
Args:
bound: str with declare time bound in short notation
Returns:
int, time bound in milliseconds
'''
if not validate_bound(bound):
schema_format_err(time_periods, msg=', Time periods malformed')
if bound.endswith('s'):
bound = int(bound.rstrip('s')) * 10 ** 3
elif bound.endswith('m'):
bound = int(bound.rstrip('m')) * 60 * 10 ** 3
else:
bound = int(bound)
return bound
def calc_expected_rps(fire, started_at=None):
'''Calculate theoretical request per second sequinse.
Args:
fire: dict, current fire(job) options.
Returns:
result: list of tuples.
'''
result = []
#try:
# offset = int(fire['started_at'])
#except TypeError:
# exit_err('Can\'t parse fire *started_at* attribute, config malformed.')
offset = fire.get('started_at', None)
if not offset and started_at:
offset = started_at
else:
exit_err('Can\'t parse fire *started_at* attribute, config malformed.')
offset = int(offset)
for schema in fire['load']:
series = series_from_schema(schema, offset)
result.extend(series)
offset = series[-1][0] + 1
return result
def output_data(stat, calc_load_series, series_path='data_series.js'):
''' Write JS file with charts data series inside.
Args:
stat: obj, phout_stat instance.
calc_load_series: list, theoretical rps sequinse.
series_path: result file path.
Returns:
nothing, just write the file.
'''
# debug ticks
#resp_prc = stat.get_resp_perc_hcds()
#codes = stat.get_status_codes_hcds()
#errno = stat.get_errno_hcds()
#num = 22
#print ' rps: %s' % calc_load_series['data'][num][0]
#for i in range (9):
# print ' perc%s: %s' % (i, resp_prc[i]['data'][num][0])
#for i in range (8):
# #print 'codes%s: %s' % (i, codes[i]['data'][num])
# print 'codes%s: %s' % (i, codes[i]['data'][num][0])
#for i in range (2):
# print 'errno%s: %s' % (i, errno[i]['data'][num][0])
# d1
with open(series_path, 'w+') as ds_fh:
ds_fh.write('rps_series = ' + json.dumps(calc_load_series) + ';\n')
ds_fh.write('resp_percentiles_series = ' +\
json.dumps(stat.get_resp_perc_hcds()) + ';\n')
ds_fh.write('status_codes_series = ' +\
json.dumps(stat.get_status_codes_hcds()) + ';\n')
ds_fh.write('errno_series = ' +\
json.dumps(stat.get_errno_hcds()) + ';\n')
with open(series_path.replace('.js', '1.js'), 'w+') as ds1_fh:
ds1_fh.write('rps_series = ' + json.dumps(calc_load_series) + ';\n')
ds1_fh.write('reply_series = ' + json.dumps(stat.reply_series) + ';\n')
ds1_fh.write('tx_series = ' + json.dumps(stat.tx_series) + ';\n')
ds1_fh.write('rx_series = ' + json.dumps(stat.rx_series) + ';\n')
# rtt parts
for part in stat.rtt_fracts:
ds1_fh.write('%s_series = ' % part +\
json.dumps(stat.rtt_fracts_series[part]) + ';\n')
def get_pages_context(stat, fire):
''' Create dict with data to render Web pages templates.
Args:
stat: phout_stat class instance.
fire: dict, fire data.
Returns:
ctx: dict, result dict used by Jinja2 template engine.
'''
ctx = {}
ctx['tgt_addr'] = fire.get('addr')
ctx['src_host'] = fire.get('src_host')
ctx['load'] = fire['load']
ctx['tags'] = fire.get('tag')
started_at = datetime.datetime.fromtimestamp(float(fire['started_at']))
ended_at = datetime.datetime.fromtimestamp(stat.last_epoach)
ctx['date'] = started_at.strftime('%d %B %Y')
ctx['from'] = started_at.strftime('%H:%M:%S')
ctx['to'] = ended_at.strftime('%H:%M:%S')
ctx['duration'] = str(ended_at - started_at)
if fire.get('owner') == 'uid':
ctx['owner'] = fire.get('uid')
else:
ctx['owner'] = fire.get('owner')
# TODO: add to daemon fire update func
ctx['src_host'] = fire.get('src_host')
stat.calc_time_period_tbl()
ctx['boundaries'] = stat.boundaries
stat.calc_errno_tbl()
for code, value in stat.errno_tbl.iteritems():
value['msg'] = os.strerror(int(code))
ctx['errno_tbl'] = stat.errno_tbl
stat.calc_codes_tbl()
for code, value in stat.codes_tbl.iteritems():
value['msg'] = rh.responses.get(int(code), None)
if value['msg']:
value['msg'] = value['msg'][0]
ctx['codes_tbl'] = stat.codes_tbl
return ctx
def process_phout(phout_fh, points_num=200, dst_file='data_series.js',
fire_path='.fire_up.json'):
''' Read phout fire log, aggregate data, create charts data series.
Args:
phout_fh: File object with log data.
Returns:
static Web app on file system.
'''
fire = get_fire(json_path=fire_path)
calc_load_series = { # rps calculated data series
'name': 'rps',
'data': get_calc_load_series(fire),
}
# get only some points according to points_num value
if points_num < len(calc_load_series['data']):
step_size = int(len(calc_load_series['data']) / points_num)
calc_load_series['data'] = calc_load_series['data'][0::step_size]
scrend_out_stmps = [el[0] for el in calc_load_series['data']]
p_stat = phout_stat(fire)
current_epoch = 0
for l in phout_fh:
l_spltd = l.split()
# in phantom v.14 line have 12 fields, @see:
# http://phantom-doc-ru.rtfd.org/en/latest/analyzing_result_data.html
if len(l_spltd) == 11: # No tag may be paserd in different ways
l_spltd.insert(1, None)
if len(l_spltd) != 12:
print 'Malformed line in phout file: %s' % l
print l_spltd
epoch, tag, rtt, con_mcs, send_mcs, proc_mcs, resp_mcs, phantom_exec, \
req_byte, resp_byte, errno, http_status = l_spltd
epoch = int(epoch.split('.')[0]) # cut out fractional part of epoach
if epoch > current_epoch:
if current_epoch == 0:
p_stat.first_epoach = epoch
current_epoch = epoch
p_stat.last_epoach = epoch
p_stat.add_resp(int(current_epoch), int(rtt), int(http_status), errno,
int(req_byte), int(resp_byte), float(con_mcs) / 1000,
float(send_mcs) / 1000, float(proc_mcs) / 1000,
float(resp_mcs) / 1000)
# all phout lines parsed, time to aggregate data to expected metrics
p_stat.calc_percentiles(scrend_out_stmps)
output_data(p_stat, calc_load_series, series_path=dst_file)
return get_pages_context(p_stat, fire)
def proc_whole_phout(fire, points_num=600, oc=None):
phout_path = '%s/%s' % (fire['wd'], 'phout.txt')
stat = PhoutStat(fire)
with open(phout_path, 'r') as fh:
for line in fh:
stat.add_resp(line, is_interactive=False)
expected_rps = { # rps calculated data series
'name': 'expected_rps',
'data': calc_expected_rps(fire, started_at=stat.first_epoach),
}
# filltering out *points_num* stamps from whole replies range.
if points_num < len(expected_rps['data']):
step_size = int(len(expected_rps['data']) / points_num)
expected_rps['data'] = expected_rps['data'][0::step_size]
filtered_stmps = [el[0] for el in expected_rps['data']]
stat.sum_up(filtered_stmps=filtered_stmps)
result, msg = (True, 'API client missing')
if oc:
fire_diff = {
'id': fire['id'],
'result': {
'tbls': stat.tbls_as_dict(),
'hcds': stat.hcds_as_dict(),
},
}
result, msg = oc.push_fire_updates(fire_diff)
return result, msg
|
|
# Copyright 2020 Google LLC
#
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file or at
# https://developers.google.com/open-source/licenses/bsd
from weakref import WeakKeyDictionary
from functools import partial
from itertools import count
import ctypes
import numpy as np
import jax
from jax.lib import xla_client as xc
from jax.interpreters import xla
from jax.interpreters import batching
from ... import Atom
from ...native_function import IdxRepTy, ScalarType, RectContArrayType
from ... import api
def primitive(f):
if not isinstance(f, Atom):
raise TypeError("DexPrimitive expects a function atom as an argument")
return partial(dex_apply_p.bind, func_atom=f)
compiler_cache = WeakKeyDictionary()
def get_compiled(func_atom):
compiled = compiler_cache.get(func_atom, None)
if compiled is None:
compiled = compiler_cache[func_atom] = func_atom.compile()
return compiled
dex_apply_p = jax.core.Primitive('dex_apply')
@dex_apply_p.def_impl
def dex_call_impl(*args, func_atom):
return get_compiled(func_atom)(*args)
# === abstract evaluation / shape inference ===
def dex_call_abstract_eval_with_shape(*args, func_atom):
# TODO: Make it possible to get the signature without compiling the function
native_func = get_compiled(func_atom)
arg_sig = native_func.explicit_argument_signature
res_sig = native_func.result_signature
if len(args) != len(arg_sig):
raise RuntimeError(f"Dex function expects {len(arg_sig)} arguments, but was given {len(args)}")
if not all(isinstance(arg, jax.core.ShapedArray) for arg in args):
raise RuntimeError("Cannot perform evaluation of Dex functions without known shapes")
# Check arguments and infer shape parameters
shape_vars = {}
for i, (arg, b) in enumerate(zip(args, arg_sig)):
expected_dtype = np.dtype(b.type.ctype)
if arg.dtype != expected_dtype:
raise RuntimeError(f"dtype mismatch in arg {i}: expected {expected_dtype}, got {arg.dtype}")
if isinstance(b.type, ScalarType):
expected_shape = ()
elif isinstance(b.type, RectContArrayType):
expected_shape = b.type.shape
else:
raise AssertionError("Unhandled case!")
if len(arg.shape) != len(expected_shape):
raise RuntimeError(f"rank mismatch in arg {i}: expected {len(expected_shape)}, got {len(arg.shape)}")
inferred_shape = tuple(
size if isinstance(size, int) else shape_vars.setdefault(size, real_size)
for size, real_size in zip(expected_shape, arg.shape))
if arg.shape != inferred_shape:
raise RuntimeError(f"shape mismatch in arg {i}: expected {inferred_shape}, got {arg.shape}")
# Infer result types
result_avals = []
for b in res_sig:
dtype = np.dtype(b.type.ctype)
if isinstance(b.type, ScalarType):
shape = ()
elif isinstance(b.type, RectContArrayType):
shape = tuple(shape_vars.get(size, size) for size in b.type.shape)
result_avals.append(jax.core.ShapedArray(shape, dtype))
assert len(result_avals) == 1 # TODO: Make dex_call a multiple_results primitive
return result_avals[0], shape_vars
@dex_apply_p.def_abstract_eval
def dex_call_abstract_eval(*args, **kwargs):
return dex_call_abstract_eval_with_shape(*args, **kwargs)[0]
# === xla translation ===
PyCapsule_Destructor = ctypes.CFUNCTYPE(None, ctypes.py_object)
PyCapsule_New = ctypes.pythonapi.PyCapsule_New
PyCapsule_New.restype = ctypes.py_object
PyCapsule_New.argtypes = (ctypes.c_void_p, ctypes.c_char_p, PyCapsule_Destructor)
def make_custom_call_target(func_ptr):
return PyCapsule_New(func_ptr, b"xla._CUSTOM_CALL_TARGET", PyCapsule_Destructor(0))
# TODO: Better lifetime management. func_atoms will be quite often created on the fly
# at trace time when different transforms are applied, and I'm pretty sure that
# the XLA executables outlive jaxprs formed by tracing.
custom_call_id = count()
custom_call_cache = {}
def dex_call_cpu_translation(b, *args, func_atom):
xla_shapes = list(map(b.get_shape, args))
result_aval, shape_vars = dex_call_abstract_eval_with_shape(
*(jax.core.ShapedArray(xshape.dimensions(), xshape.numpy_dtype())
for xshape in xla_shapes),
func_atom=func_atom)
result_xshape = xc.Shape.array_shape(result_aval.dtype, result_aval.shape)
custom_call = custom_call_cache.get(func_atom, None)
native = get_compiled(func_atom)
if custom_call is None:
assert len(args) == len(native.explicit_argument_signature)
assert 1 == len(native.result_signature)
custom_call_ctype = ctypes.CFUNCTYPE(None,
ctypes.c_void_p,
ctypes.POINTER(ctypes.c_void_p * len(args)))
@custom_call_ctype
def trampoline(result_ptr, arg_ptr_array):
name_to_cval = {name: IdxRepTy(value) for name, value in shape_vars.items()}
for binder, ptr in zip(native.explicit_argument_signature, arg_ptr_array.contents):
if isinstance(binder.type, ScalarType):
cval = ctypes.cast(ptr, ctypes.POINTER(binder.type.arg_ctype)).contents
elif isinstance(binder.type, RectContArrayType):
cval = ctypes.cast(ptr, binder.type.arg_ctype)
else:
raise AssertionError("Unexpected binder type")
name_to_cval[binder.name] = cval
result_binder = native.result_signature[0]
name_to_cval[result_binder.name] = ctypes.cast(result_ptr, result_binder.type.ref_ctype)
native.callable(*(name_to_cval[name] for name in native.ccall_signature))
trampoline_addr = ctypes.c_void_p.from_param(trampoline)
custom_call_name = f"dex_custom_call{next(custom_call_id)}".encode('ascii')
xc.register_custom_call_target(custom_call_name,
make_custom_call_target(trampoline_addr))
custom_call_cache[func_atom] = (custom_call_name, trampoline)
# TODO: Unregister custom calls at some point?
else:
custom_call_name, *_ = custom_call
return xc.ops.CustomCall(b, custom_call_name, operands=args, shape=result_xshape)
jax.interpreters.xla.backend_specific_translations['cpu'][dex_apply_p] = dex_call_cpu_translation
# === batching ===
def dex_call_batched(batched_args, batched_dims, func_atom):
"""Batching function for dex primitives.
Args:
batched_args: The possibly-batched arguments.
batched_dims: A sequence of the same length as `batched_args`, where each
entry indicates the batching axis of the corresponding entry to `args`,
or None if that argument should not be batched. Not all entries can be
None.
Returns:
2-tuple containing the result of the batched function, and the result axis
which was batched, which is always zero.
"""
module = func_atom.module.copy()
# Move axes so that we only have to deal with the zero axis being batched.
uniform_batched_args = [
batching.moveaxis(arg, bd, 0) if bd is not batching.not_mapped else arg
for arg, bd in zip(batched_args, batched_dims)
]
# This assumes not all entries in batched_dims are None.
batch_size = next(
arg.shape[0] for arg, bd in zip(uniform_batched_args, batched_dims)
if bd is not batching.not_mapped)
# Add the current function atom as a variable in the context, so that we can
# use it to apply batching.
func_name = func_atom.name
assert func_name is not None
# Only index into the arguments which are batched. `i` is the index used for
# the Dex for loop constructor.
batched_fn_params = [
f"x{param_idx}" if dim is batching.not_mapped else f"x{param_idx}.i"
for param_idx, dim in enumerate(batched_dims)
]
# This is the actual batching expression
batched_fn = module.eval(
r"\ " + " ".join(f"x{i}" for i in range(len(batched_args))) + ". "
+ f"for i:(Fin {batch_size}). {func_name} "
+ " ".join(batched_fn_params))
return primitive(batched_fn)(*uniform_batched_args), 0
batching.primitive_batchers[dex_apply_p] = dex_call_batched
# === jvp / linearize ===
def dex_call_jvp(arg_values, arg_tangents, func_atom):
"""Evaluates the function output at arg_values, and the linearized function
(linearized about arg_values) at arg_tangents.
Args:
arg_values: A tuple of arguments.
arg_tangents: A tuple with the tangents of the arguments. The tuple has the
same length as the arg_values. Some of the tangents may also be the
special value ad.Zero to specify a zero tangent.
func_atom: Function atom to linearize. The result type of this function
atom must be a single array type.
Returns:
A pair of the primal output and the tangent.
"""
assert len(func_atom.compile().result_signature) == 1
num_args = len(arg_values)
module = func_atom.module.copy()
# Helper functions to build strings of primal and tangent inputs.
def arg_string(prefix):
return " ".join(f"{prefix}{i}" for i in range(num_args))
def tuple_string(prefix):
return "(" + ", ".join(f"{prefix}{i}" for i in range(num_args)) + ")"
# Add the current function atom as a variable in the context, so that we can
# use it to apply batching.
jax_func_name = func_atom.name
assert jax_func_name is not None
# `linearize` only seems to work properly for functions which take a single
# input argument, so we uncurry `func_atom` to make it into this form. The
# evaluated string for three function arguments should look like:
# ```
# \ (x0, x1, x2). jax_func x0 x1 x2
# ```
uncurried = module.eval(
f"\\ {tuple_string('x')}. {jax_func_name} {arg_string('x')}")
jax_func_uncurried_name = uncurried.name
assert jax_func_uncurried_name is not None
# We create separate primitives for the primal and tangent evaluations, since
# we only want to apply tranposition to the tangent evaluation function.
#
# Here we write out the tangent evaluation expression in pointful style.
# The evaluated string for three function arguments should look like:
# ```
# \ x0 x1 x2 u0 u1 u2.
# linearized = linearize jax_func_uncurried (x0, x1, x2)
# snd linearized (u0 u1 u2)
# ```
evaluate_linearized = module.eval(
f"\\ {arg_string('x')} {arg_string('u')}." +
f"\n linearized = linearize {jax_func_uncurried_name} {tuple_string('x')}" +
f"\n snd linearized {tuple_string('u')}")
# Materialize jax.ad.Zero values into actual arrays of zeros.
# TODO: Make the handling of Zeros more efficient by omitting them from the
# linearize expression. This would avoid having to create these zero
# arguments, although it might make constructing the transpose expression
# more fiddly.
tangents_no_zeros = [
jax.lax.zeros_like_array(arg) if type(tan) is jax.ad.Zero else tan
for arg, tan in zip(arg_values, arg_tangents)
]
return (
primitive(func_atom)(*arg_values),
primitive(evaluate_linearized)(*arg_values, *tangents_no_zeros),
)
jax.interpreters.ad.primitive_jvps[dex_apply_p] = dex_call_jvp
# === transpose ===
# alias to avoid confusion around overloading of "primal".
_is_linear_input = jax.ad.is_undefined_primal
def dex_call_evaluate_linearized_transpose(cotangents, *args, func_atom):
"""Evaluates the transpose of a function atom. """
# `func_atom` is assumed to be of the form of `evaluate_linearized` from
# `dex_call_jvp`, applied to a some function atom, called `f`, say.
# Concretely, if `f` has three primal arguments, `func_atom` should look like:
# ```
# \ x0 x1 x2 u0 u1 u2.
# intermediate_linearized = linearize f (x0, x1, x2)
# snd intermediate_linearized (u0 u1 u2)
# ```
# In particular, its arguments are assumed to be `num_primals` primal inputs,
# followed by `num_primals` tangent inputs.
assert len(args) % 2 == 0
num_primals = len(args) // 2
module = func_atom.module.copy()
primals, tangents = args[:num_primals], args[num_primals:]
# Helper functions to build strings of primal and tangent inputs.
def arg_string(prefix, index_set):
return " ".join(f"{prefix}{i}" for i in index_set)
def tuple_string(prefix, index_set):
return "(" + ", ".join(f"{prefix}{i}" for i in index_set) + ")"
# JAX uses `UndefinedPrimal` instances to mark input variables which the
# function needs to be transposed with respect to, and (consequently) for
# which no concrete values are available. `_is_linear_input` tests if the
# input is such an instance.
#
# `func_atom` is only guaranteed to be linear in its tangent inputs, so we
# check here that we're not expected to tranpose it with respect to any primal
# inputs. JAX *should* take care of this automatically, but this mechanism is
# somewhat poorly documented so its worth double checking.
if any(_is_linear_input(p) for p in primals):
raise RuntimeError("Primal inputs to transpose primitive are undefined.")
# Add `func_atom` as a variable `linearized` in the context.
linearized_name = func_atom.name
assert linearized_name is not None
# Form lists of the indices in `tangents` which correspond to linear inputs
# (which we are expected to transpose w.r.t.) and constant inputs (which we
# are not). The constant inputs will be exactly the arrays of zeros which are
# instantiated in the JVP in response to a `Zero` argument.
tangent_input_indices = [
i for i, t in enumerate(tangents) if _is_linear_input(t)
]
tangent_constant_indices = [
i for i, t in enumerate(tangents) if not _is_linear_input(t)
]
# In this case, there are no cotangents to output. Not sure if JAX would skip
# calling this function in this case or not.
if len(tangent_input_indices) == 0:
return (None,) * len(args)
# Form a lambda which partially evaluates `linearized` at the constant primal
# and constant tangent values, with the remaining arguments (i.e. the linear
# input tangents) combined into a tuple, and then transpose the lambda.
#
# For a three-input primal function with constant input for the tangent
# parameter at index 1, the evaluated string should look like:
# ```
# \ x0 x1 x2 u1 ct.
# transposeLinear (\(t0, t2). linearized x0 x1 x2 t0 u1 t2) ct
# ```
# - The `x` variables are the (constant) inputs to the primal function. These
# should always be supplied by JAX.
# - The `u` variables are the constant tangent inputs, i.e. those which JAX
# does not need us to include in the transpose.
# - The `t` variables are the linear inputs which we are transposing with
# respect to. These are tangent inputs to `linearized`.
# x0 x1 x2 u1 ct
transposed_atom_params = (
arg_string("x", range(num_primals)) + " " +
arg_string("u", tangent_constant_indices) + " ct")
# (t0, t2)
linear_lambda_params = tuple_string("t", tangent_input_indices)
# t0 u1 t2
linearized_tangent_inputs = (" ".join(
f"t{i}" if jax.ad.is_undefined_primal(t) else f"u{i}"
for i, t in enumerate(tangents)))
# x0 x1 x2 t0 u1 t2
linearized_inputs = (
arg_string("x", range(num_primals)) + " " + linearized_tangent_inputs)
# \ x0 x1 x2 u1 ct.
# transposeLinear (\(t0, t2). linearized x0 x1 x2 t0 u1 t2) ct
transposed = module.eval(
f"\\ {transposed_atom_params}. transposeLinear " +
f"(\ {linear_lambda_params}. {linearized_name} {linearized_inputs}) ct"
)
# Tuple of cotangents relating to linear tangent inputs. In the given
# example, this would be a tuple of the two cotangents relating to inputs 0
# and 2.
resulting_cotangents = primitive(transposed)(
*primals, *[tangents[i] for i in tangent_constant_indices], cotangents)
# If there is only one resulting cotangent, we need to make it into a tuple
# so we can still zip over it.
if len(tangent_input_indices) == 1:
resulting_cotangents = (resulting_cotangents,)
# Pack the output with `None`s where the inputs are constants, as required by
# JAX.
result = [None] * len(args)
for ct_idx, ct in zip(tangent_input_indices, resulting_cotangents):
result[num_primals + ct_idx] = ct
return tuple(result)
jax.interpreters.ad.primitive_transposes[dex_apply_p] = dex_call_evaluate_linearized_transpose
|
|
#!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 35894
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
|
|
# ===============================================================================
# Copyright 2015 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from __future__ import absolute_import
import math
import re
from chaco.axis import PlotAxis
from enable.component_editor import ComponentEditor
from enable.label import Label
# ============= standard library imports ========================
from numpy import array, float64
from traits.api import HasTraits
from traitsui.api import View, UItem
# ============= local library imports ==========================
# http://stackoverflow.com/questions/2358890/python-lexical-analysis-and-tokenization
# http://effbot.org/zone/xml-scanner.htm
xml = re.compile(
r"""
<([/?!]?\w+) # 1. tags
|&(\#?\w+); # 2. entities
|([^<>&'\"=\s]+) # 3. text strings (no special characters)
|(\s+) # 4. whitespace
|(.) # 5. special characters
""",
re.VERBOSE,
)
def tokenize(text):
scan = xml.scanner(text)
while 1:
m = scan.match()
if not m:
break
tok = m.group(m.lastindex)
if tok != ">":
yield tok
def clean(text):
t = "".join(
(ti for ti in tokenize(text) if ti not in ("sup", "/sup", "sub", "/sub"))
)
return t
class MPlotAxis(PlotAxis):
def clone(self, ax):
for attr in (
"mapper",
"origin",
"title_font",
"title_spacing",
"title_color",
"tick_weight",
"tick_color",
"tick_label_font",
"tick_label_color",
"tick_label_rotate_angle",
"tick_label_alignment",
"tick_label_margin",
"tick_label_offset",
"tick_label_position",
"tick_label_formatter",
"tick_in",
"tick_out",
"tick_visible",
"tick_interval",
"tick_generator",
"orientation",
"axis_line_visible",
"axis_line_color",
"axis_line_weight",
"axis_line_style",
"small_haxis_style",
"ensure_labels_bounded",
"ensure_ticks_bounded",
"bgcolor",
"use_draw_order",
"component",
"resizable",
"tag",
"use",
):
try:
setattr(self, attr, getattr(ax, attr))
except AttributeError:
pass
def _draw_title(self, gc, label=None, axis_offset=None):
if label is None:
title_label = MLLabel(
text=self.title,
font=self.title_font,
color=self.title_color,
rotate_angle=self.title_angle,
orientation=self.orientation,
)
else:
title_label = label
# get the _rotated_ bounding box of the label
tl_bounds = array(title_label.get_bounding_box(gc), float64)
text_center_to_corner = -tl_bounds / 2.0
# which axis are we moving away from the axis line along?
axis_index = self._major_axis.argmin()
if self.title_spacing != "auto":
axis_offset = self.title_spacing
if (self.title_spacing) and (axis_offset is None):
if not self.ticklabel_cache:
axis_offset = 25
else:
axis_offset = (
max([l._bounding_box[axis_index] for l in self.ticklabel_cache])
* 1.3
)
offset = (self._origin_point + self._end_axis_point) / 2
axis_dist = self.tick_out + tl_bounds[axis_index] / 2.0 + axis_offset
offset -= self._inside_vector * axis_dist
offset += text_center_to_corner
gc.translate_ctm(*offset)
title_label.draw(gc)
gc.translate_ctm(*(-offset))
return
class MLLabel(Label):
_text_positions = None
# mltext = Str
_cached_text_width = None
def _text_changed(self):
self._cached_text_width = None
self._calculate_text_positions()
def _calculate_text_positions(self):
texts = []
offset = 0
for ti in tokenize(self.text):
if ti == "sup":
offset = 1
elif ti == "sub":
offset = -1
elif ti in ("/sup", "/sub"):
offset = 0
else:
texts.append((offset, ti))
self._text_positions = texts
def _calculate_text_width(self, gc):
ofont = self.font
sfont = self.font.copy()
sfont.size = int(sfont.size * 0.95)
suph = int(ofont.size * 0.5)
subh = -int(ofont.size * 0.3)
s = 0
mh = 0
if self._cached_text_width is None:
for offset, text in self._text_positions:
with gc:
if offset == 1:
gc.translate_ctm(0, suph)
gc.set_font(sfont)
elif offset == -1:
gc.set_font(sfont)
gc.translate_ctm(0, subh)
else:
gc.set_font(ofont)
w, h, _, _ = gc.get_full_text_extent(text)
s += w
self._cached_text_width = s
return self._cached_text_width
def _draw_mainlayer(self, gc, view_bounds=None, mode="normal"):
self._calculate_text_width(gc)
self._calc_line_positions(gc)
with gc:
gc.translate_ctm(*self.position)
gc.set_font(self.font)
gc.set_fill_color(self.color_)
poss = self._text_positions
if self.orientation in ("top", "bottom"):
self._draw_horizontal(gc, poss)
else:
self._draw_vertical(gc, poss)
def _draw_vertical(self, gc, poss):
bb = self._bounding_box
gc.translate_ctm(bb[1] - 2, 0)
gc.rotate_ctm(math.radians(90))
self._draw_horizontal(gc, poss)
def _draw_horizontal(self, gc, poss):
ofont = self.font
sfont = self.font.copy()
sfont.size = int(sfont.size * 0.95)
suph = int(ofont.size * 0.5)
subh = -int(ofont.size * 0.3)
# need to correct for the difference between the enable.Label's calculated width and the actual
# width. Label calculates the width without the markup so its greater than the real width.
w = self._calculate_text_width(gc)
ow = self._bounding_box[0]
gc.translate_ctm((ow - w) / 2, 0)
x = 0
for offset, text in poss:
with gc:
if offset == 1:
gc.translate_ctm(0, suph)
gc.set_font(sfont)
elif offset == -1:
gc.set_font(sfont)
gc.translate_ctm(0, subh)
else:
gc.set_font(ofont)
w, h, _, _ = gc.get_full_text_extent(text)
gc.set_text_position(x, 0)
gc.show_text(text)
x += w
class Demo(HasTraits):
def traits_view(self):
v = View(UItem("plot", editor=ComponentEditor()), resizable=True)
return v
if __name__ == "__main__":
# m = MLLabel()
# m.text = '<sup>40</sup>Ar'
# d = Demo()
# d.plot = Plot()
# d.plot.padding_left = 80
# d.plot.data = ArrayPlotData()
# d.plot.data['x'] = [1, 2, 3, 4]
# d.plot.data['y'] = [1, 2, 3, 4]
# d.plot.plot(('x', 'y'))
from pychron.graph.stacked_graph import StackedGraph
g = StackedGraph()
plot = g.new_plot(padding_left=100, padding_bottom=100)
xa = plot.x_axis
xa.title_color = "red"
xa.title = "sasfas"
nxa = MPlotAxis()
nxa.title = "<sup>39</sup>Ar/<sup>40</sup>Ar"
# nxa.title = '39Ar/40Ar'
nxa.clone(xa)
ya = plot.y_axis
ya.title_color = "red"
ya.title = "sasfas"
ya.title_font = "modern 36"
nya = MPlotAxis()
nya.title = "<sup>39</sup>Ar/<sup>40</sup>Ar"
# nya.title = '39Ar/40Ar'
nya.clone(ya)
plot.x_axis = nxa
plot.y_axis = nya
plot = g.new_plot(padding_left=100, padding_bottom=100)
plot.y_axis.title = "Foo"
plot.y_axis.title_font = "modern 18"
g.configure_traits()
# ============= EOF =============================================
|
|
"""
mosaic.py
A simple python script that creates a mosaic from an input image.
Dependencies:
Python Imaging Library, available at http://www.pythonware.com/products/pil/
Summary of methods:
print_fn(s) prints s to the terminal only when the verbose option is selected
gcd(a,b) returns the greatest common divisor of a and b
max_color(img) returns the most frequent color of img
average_value(img) returns the average R,G,B values of an img
square_crop(img) crops img into the largest possible square; the crop is centered
center_crop(img, resolution) crops img into the largest possible rectangle with aspect ratio resolution; the crop is centered
build_chest(directory) returns a computed dictionary with key=feature of img and val = directory location of img
nearest_neighbor(img, directory) finds the image in directory most similar to img based on feature
vector_error(v,u) returns the linear difference (sum) of two vectors u and v
mosaic(input_image, image_stash, resolution, thumbnail_size, func, num_of_images) does the magic
cleanup(input_image, directory) cleans up after the magic
Copyright (c) 2010, Sameep Tandon
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the Sameep Tandon nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL Sameep Tandon BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
I feel legit using a Berkeley Software License. GO BEARS!
"""
from PIL import Image # instead for "import Image" !
import math
import os
import sys
inf = 1e1000
verbose = False
def print_fn( s ):
"""
prints diagnostic messages if the verbose option has been enabled
@s: string s to print
"""
global verbose
if verbose:
print(s)
def gcd(a,b):
"""
Returns the greatest common divisor of two numbers, a and b
@a: input number 1
@b: input number 2
"""
while b != 0:
a, b = b, a%b
return a
def max_color (img):
"""
Returns the most frequent color used in the img
@img: An image object
"""
return max( img.getcolors(img.size[0] * img.size[1]) )[1]
def average_value (img, lowerbound=(0,0), upperbound=None):
"""
Returns the average (R,G,B) of the image
@img: an Image object
@lowerbound: optional parameter; (min_x, min_y) tuple of pixels
@upperbound: optional parameter; (max_x, max_y) tuple of pixels
"""
min_x, min_y = lowerbound
if upperbound == None:
upperbound = img.size
max_x, max_y = upperbound
img_width, img_height = img.size
if min_x < 0 or min_y < 0 or max_x > img_width or max_y > img_height or max_x < min_x or max_y < min_y:
print_fn ("Warning: Bad input; dumping data below.")
print_fn ("img_width, img_height = " + str(img.size))
print_fn ("min_x, min_y = " + str(lowerbound))
print_fn ("max_x, max_y = " + str(upperbound))
sys.exit(2)
return None
r = 0
g = 0
b = 0
count = 0
pix = img.load()
for x in range(min_x, max_x):
for y in range(min_y, max_y):
temp_r, temp_g, temp_b = pix[x,y]
r += temp_r
g += temp_g
b += temp_b
count += 1
return ( float(r) / float(count), float(g) / float(count), float(b) / float(count) )
def square_crop (img):
"""
Returns a square crop (with square at center) of an image
@img: the input img file
"""
return center_crop(img, (1,1))
def center_crop (img, resolution, constrained_aspect_ratio=1):
"""
Returns an image file that has been cropped to be proportional to the resolution such that the crop is done in
the center
@img: the input img file
@resolution: a tuple (x,y) for which the resulting image will be proportional to
@constrained_aspect_ratio: an indicator stating whether the aspect ratio OF THE RESOLUTION must be constrained
"""
try:
im = Image.open(img)
except IOError:
print_fn ("Could not open input image file at " + img)
raise IOError
x,y = ( resolution[0] / gcd(resolution[0], resolution[1]), resolution[1] / gcd(resolution[0],resolution[1]) )
im_width, im_height = im.size
box_x = 0
box_y = 0
if constrained_aspect_ratio:
while box_x+x <= im_width and box_y+y <= im_height:
box_x += x
box_y += y
width_to_crop = im_width - box_x
height_to_crop = im_height - box_y
else:
width_to_crop = im_width % resolution[0]
height_to_crop = im_height % resolution[1]
im = im.crop( (width_to_crop / 2 + (width_to_crop % 2), height_to_crop / 2 + (height_to_crop % 2), im_width - width_to_crop / 2, im_height - height_to_crop / 2) )
return im
def build_chest(directory, chest, func=max_color, thumbnail_size=(50,50), num_of_images=None):
"""
Returns a dictionary with key = func(img), value = img
@directory: a directory of images to build the mosaic out of
@func: optional parameter; classification method used. average_value or max_color are viable choices
@thumbnail_size: size of each image in the mosaic
@num_of_images: max number of images to use
"""
targetDir = directory + "/"
tmpDir = targetDir.split('/')[0] + "/temp/"
for file in os.listdir(targetDir):
if os.path.isdir(targetDir + "/" + file):
build_chest(targetDir + "/" + file, chest, func, thumbnail_size, num_of_images)
if not os.path.isdir(tmpDir):
os.mkdir(tmpDir)
if num_of_images == None:
num_of_images = len(os.listdir(targetDir))
for file in os.listdir(targetDir):
if not os.path.isdir(targetDir + "/" + file):
try:
im_transform = center_crop(targetDir + file, thumbnail_size)
im_transform.thumbnail(thumbnail_size, Image.ANTIALIAS)
print_fn ("Creating file " + tmpDir + os.path.splitext(file)[0] + ".thumbnail.jpg")
im_transform.save(tmpDir + os.path.splitext(file)[0] + ".thumbnail.jpg", "JPEG")
im_transform = Image.open(tmpDir + os.path.splitext(file)[0] + ".thumbnail.jpg")
key = func(im_transform) # func has been chosen in main() : it's either a max or an average on the colors in the thumbnail.
chest[key] = tmpDir + os.path.splitext(file)[0] + ".thumbnail.jpg"
num_of_images -= 1
except IOError:
print_fn (file + " is not an image file; Skipping it")
return chest
def nearest_neighbor(img, chest, func=max_color, chest_keys=None):
"""
Returns an image in the chest that is closest in color to img
@img: img to classify
@chest: a dictionary with key = func(img), value = img
@func: optional parameter; classification method used. average_value or max_color are viable choices
@chest_keys: optional parameter; all the keys of chest, used to save computation time
"""
min_error = inf
argmin_error = None
img_val = func(img)
min_key = None
for key in chest.keys():
if vector_error(img_val, key) < min_error:
min_error = vector_error(img_val, key)
argmin_error = chest[key]
min_key = key
del chest[min_key]
return argmin_error
def vector_error (v, u):
"""
Returns the magnitude of the difference vector
@v: vector v, represented as an iterable object in order (tuple or list)
@u: vector u, same length as v
"""
diff = list(v)
error = 0
for i in range(0,len(v)):
diff[i] -= u[i]
error += math.fabs(diff[i])
return error
def mosaic (input_image, image_stash, resolution=(25,25), thumbnail_size=(50,50), func=average_value, num_of_images=None):
"""
Saves an image file that is a mosaic of input_image.
@input_image: the location of the input image
@image_stash: the directory of all the possible images to put in the mosaic
@resolution: a tuple (x,y) of size rectangles to break the input image into
@thumbnail_size: the size of each thumbnail image in the mosaic
@func: the classifier function to use
@num_of_images: the number of images to use in the mosaic
"""
im = center_crop (input_image, resolution, False)
chest = { }
build_chest(image_stash, chest, func, thumbnail_size, num_of_images)
chest_keys = chest.keys( )
im_width, im_height = im.size
im.save(os.getcwd() + "/" + os.path.splitext(input_image)[0] + ".tmp.jpg", "JPEG")
im = Image.open(os.getcwd() + "/" + os.path.splitext(input_image)[0] + ".tmp.jpg")
mos_size = ((im_width / resolution[0]) * thumbnail_size[0], (im_height / resolution[1]) * thumbnail_size[1])
mos = Image.new(im.mode, mos_size, (30, 20, 255))
for x in range( im_width / resolution[0] ):
print_fn (str(x+1) + " of " + str( im_width / resolution[0] ) + " columns")
for y in range (im_height / resolution[1] ):
start_x = x * resolution[0]
start_y = y * resolution[1]
end_x = start_x + resolution[0]
end_y = start_y + resolution[1]
box = (start_x, start_y, end_x, end_y)
query = im.transform(resolution, Image.EXTENT,box)
reply = Image.open( nearest_neighbor(query, chest, func, chest_keys) )
start_x = x * thumbnail_size[0]
start_y = y * thumbnail_size[1]
end_x = start_x + thumbnail_size[0]
end_y = start_y + thumbnail_size[1]
box = (start_x, start_y, end_x, end_y)
mos.paste(reply, box)
mos.save(os.getcwd() + "/" + os.path.splitext(input_image)[0] + ".mosaic.jpg", "JPEG")
cleanup( input_image, image_stash )
def cleanup(input_image, directory):
"""
Cleans up the mess
@input_image: filename of the input image
@directory: the directory where the mess was made
"""
print_fn ("Initializing cleanup procedure. Deleting tmp files")
targetDir = directory + "/"
tmpDir = targetDir + "temp/"
os.remove(os.getcwd() + "/" + os.path.splitext(input_image)[0] + ".tmp.jpg")
for file in os.listdir(tmpDir):
print_fn ("Removing file " + tmpDir + file)
os.remove(tmpDir + file)
os.removedirs(tmpDir)
print_fn ("Cleanup complete.")
def main():
from optparse import OptionParser
usage = "usage: %prog -i [input image] -s [directory of images] -r [x] [y] -t [x] [y]\n"
usage += "Optional arguments -n, -a, -v"
parser = OptionParser(usage=usage)
parser.add_option("-i", "--input", dest="input_image", help="Input Image File")
parser.add_option("-s", "--stash", dest="image_stash", help="Directory of images")
parser.add_option("-r", "--resolution", dest="resolution", help="Size of tile to inspect at in input image", nargs=2)
parser.add_option("-t", "--thumbnail", dest="thumbnail", help="Size of tile to write in output image", nargs=2)
parser.add_option("-n", "--numImages", dest="number_of_images", help="Number of images to look at in stash")
parser.add_option("-a", "--averageValue", dest="func", help="Average Value Classifier; instead of default MAX_COLOR. Average Value is better, but slower", action="store_true")
parser.add_option("-v", "--verbose", dest="verbose", help="Verbose option; Prints diagnostic messages", action="store_true")
(options, args) = parser.parse_args()
if not options.input_image or not options.image_stash or not options.resolution or not options.thumbnail or not len(options.resolution)==2 or not len(options.thumbnail) == 2:
print("Incorrect Usage; please see python mosaic.py --help")
sys.exit(2)
if options.verbose:
global verbose
verbose = True
try:
input_image = options.input_image
image_stash = options.image_stash
resolution = ( int(options.resolution[0]), int(options.resolution[1]) )
thumbnail_size = ( int(options.thumbnail[0]), int(options.thumbnail[1]) )
func = max_color
if options.func:
func = average_value
except:
print("Incorrect Usage; please see python mosaic.py --help")
sys.exit(2)
if options.number_of_images:
number_of_images = int(options.number_of_images)
mosaic( input_image, image_stash, resolution, thumbnail_size, func, number_of_images )
else:
mosaic( input_image, image_stash, resolution, thumbnail_size, func)
sys.exit()
if __name__ == "__main__":
main()
|
|
from __future__ import with_statement #NEW ADD from http://stackoverflow.com/questions/12681315/google-app-engine-base64-photo-save-from-ios-application
import mimetypes
import os
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from django.conf import settings
from django.core.files.base import File
from django.core.files.storage import Storage
from django.core.files.uploadedfile import UploadedFile
from django.core.files.uploadhandler import FileUploadHandler, \
StopFutureHandlers
from django.core.exceptions import ImproperlyConfigured
from django.http import HttpResponse
from django.utils.encoding import smart_str, force_unicode
from google.appengine.api import files
from google.appengine.api.images import get_serving_url, NotImageError
from google.appengine.ext.blobstore import BlobInfo, BlobKey, delete, \
create_upload_url, BLOB_KEY_HEADER, BLOB_RANGE_HEADER, BlobReader
def prepare_upload(request, url, **kwargs):
return create_upload_url(url), {}
def serve_file(request, file, save_as, content_type, **kwargs):
if hasattr(file, 'file') and hasattr(file.file, 'blobstore_info'):
blobkey = file.file.blobstore_info.key()
elif hasattr(file, 'blobstore_info'):
blobkey = file.blobstore_info.key()
else:
raise ValueError("The provided file can't be served via the "
"Google App Engine Blobstore.")
response = HttpResponse(content_type=content_type)
response[BLOB_KEY_HEADER] = str(blobkey)
response['Accept-Ranges'] = 'bytes'
http_range = request.META.get('HTTP_RANGE')
if http_range is not None:
response[BLOB_RANGE_HEADER] = http_range
if save_as:
response['Content-Disposition'] = smart_str(
u'attachment; filename=%s' % save_as)
if file.size is not None:
response['Content-Length'] = file.size
return response
class BlobstoreStorage(Storage):
"""Google App Engine Blobstore storage backend."""
def _open(self, name, mode='rb'):
return BlobstoreFile(name, mode, self)
def _save(self, name, content):
name = name.replace('\\', '/')
if hasattr(content, 'file') and \
hasattr(content.file, 'blobstore_info'):
data = content.file.blobstore_info
elif hasattr(content, 'blobstore_info'):
data = content.blobstore_info
elif isinstance(content, File):
guessed_type = mimetypes.guess_type(name)[0]
file_name = files.blobstore.create(mime_type=guessed_type or 'application/octet-stream',
_blobinfo_uploaded_filename=name)
with files.open(file_name, 'a') as f:
for chunk in content.chunks():
f.write(chunk)
files.finalize(file_name)
data = files.blobstore.get_blob_key(file_name)
else:
raise ValueError("The App Engine storage backend only supports "
"BlobstoreFile instances or File instances.")
if isinstance(data, (BlobInfo, BlobKey)):
# We change the file name to the BlobKey's str() value.
if isinstance(data, BlobInfo):
data = data.key()
return '%s/%s' % (data, name.lstrip('/'))
else:
raise ValueError("The App Engine Blobstore only supports "
"BlobInfo values. Data can't be uploaded "
"directly. You have to use the file upload "
"handler.")
def delete(self, name):
delete(self._get_key(name))
def exists(self, name):
return self._get_blobinfo(name) is not None
def size(self, name):
return self._get_blobinfo(name).size
def url(self, name):
try:
return get_serving_url(self._get_blobinfo(name))
except NotImageError:
return None
def created_time(self, name):
return self._get_blobinfo(name).creation
def get_valid_name(self, name):
return force_unicode(name).strip().replace('\\', '/')
def get_available_name(self, name):
return name.replace('\\', '/')
def _get_key(self, name):
return BlobKey(name.split('/', 1)[0])
def _get_blobinfo(self, name):
return BlobInfo.get(self._get_key(name))
class BlobstoreFile(File):
def __init__(self, name, mode, storage):
self.name = name
self._storage = storage
self._mode = mode
self.blobstore_info = storage._get_blobinfo(name)
@property
def size(self):
return self.blobstore_info.size
def write(self, content):
raise NotImplementedError()
@property
def file(self):
if not hasattr(self, '_file'):
self._file = BlobReader(self.blobstore_info.key())
return self._file
class BlobstoreFileUploadHandler(FileUploadHandler):
"""
File upload handler for the Google App Engine Blobstore.
"""
def new_file(self, *args, **kwargs):
super(BlobstoreFileUploadHandler, self).new_file(*args, **kwargs)
blobkey = self.content_type_extra.get('blob-key')
self.active = blobkey is not None
if self.active:
self.blobkey = BlobKey(blobkey)
raise StopFutureHandlers()
def receive_data_chunk(self, raw_data, start):
"""
Add the data to the StringIO file.
"""
if not self.active:
return raw_data
def file_complete(self, file_size):
"""
Return a file object if we're activated.
"""
if not self.active:
return
return BlobstoreUploadedFile(
blobinfo=BlobInfo(self.blobkey),
charset=self.charset)
class BlobstoreUploadedFile(UploadedFile):
"""
A file uploaded into memory (i.e. stream-to-memory).
"""
def __init__(self, blobinfo, charset):
super(BlobstoreUploadedFile, self).__init__(
BlobReader(blobinfo.key()), blobinfo.filename,
blobinfo.content_type, blobinfo.size, charset)
self.blobstore_info = blobinfo
def open(self, mode=None):
pass
def chunks(self, chunk_size=1024 * 128):
self.file.seek(0)
while True:
content = self.read(chunk_size)
if not content:
break
yield content
def multiple_chunks(self, chunk_size=1024 * 128):
return True
|
|
import os
import errno
import json
import urllib
import urllib2
import socket
import csv
from urlparse import urlparse
from zipfile import ZipFile
from osgeo import ogr, osr
ogr.UseExceptions()
import logging
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger('oa')
def mkdirsp(path):
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
class ExtractionError(Exception):
pass
class ExtractionTask(object):
@classmethod
def from_type_string(clz, type_string):
if type_string.lower() == 'http':
return Urllib2ExtractionTask()
elif type_string.lower() == 'ftp':
return Urllib2ExtractionTask()
elif type_string.lower() == 'esri':
return EsriRestExtractionTask()
else:
raise KeyError("I don't know how to extract for type {}".format(type_string))
def extract(self, source_urls):
raise NotImplementedError()
class Urllib2ExtractionTask(ExtractionTask):
USER_AGENT = 'openaddresses-extract/1.0 (https://github.com/openaddresses/openaddresses)'
CHUNK = 16 * 1024
logger = logger.getChild('urllib2')
def extract(self, source_key, source_urls):
output_files = []
download_path = os.path.join('.', 'workdir', source_key, 'http')
mkdirsp(download_path)
for source_url in source_urls:
file_path = os.path.join(download_path, source_url.split('/')[-1])
if os.path.exists(file_path):
output_files.append(file_path)
self.logger.debug("File exists %s", file_path)
continue
self.logger.debug("Requesting %s", source_url)
headers = {'User-Agent': self.USER_AGENT}
try:
req = urllib2.Request(source_url, headers=headers)
resp = urllib2.urlopen(req)
except urllib2.URLError as e:
raise ExtractionError("Could not connect to URL", e)
size = 0
with open(file_path, 'wb') as fp:
while True:
chunk = resp.read(self.CHUNK)
size += len(chunk)
if not chunk:
break
fp.write(chunk)
output_files.append(file_path)
self.logger.info("Downloaded %s bytes for file %s", size, file_path)
return output_files
class EsriRestExtractionTask(ExtractionTask):
USER_AGENT = 'openaddresses-extract/1.0 (https://github.com/openaddresses/openaddresses)'
logger = logger.getChild('urllib2')
def convert_esrijson_to_geojson(self, geom_type, esri_feature):
if geom_type == 'esriGeometryPoint':
geometry = {
"type": "Point",
"coordinates": [
esri_feature['geometry']['x'],
esri_feature['geometry']['y']
]
}
elif geom_type == 'esriGeometryMultipoint':
geometry = {
"type": "MultiPoint",
"coordinates": [
[geom[0], geom[1]] for geom in esri_feature['geometry']['points']
]
}
elif geom_type == 'esriGeometryPolygon':
geometry = {
"type": "Polygon",
"coordinates": [
[
[geom[0], geom[1]] for geom in ring
] for ring in esri_feature['geometry']['rings']
]
}
else:
raise KeyError("Don't know how to convert esri geometry type {}".format(geom_type))
return {
"type": "Feature",
"properties": esri_feature.get('attributes'),
"geometry": geometry
}
def extract(self, source_key, source_urls):
output_files = []
download_path = os.path.join('.', 'workdir', source_key, 'esri')
mkdirsp(download_path)
for source_url in source_urls:
size = 0
parts = urlparse(source_url)
file_path = os.path.join(download_path, parts.netloc + '.json')
if os.path.exists(file_path):
output_files.append(file_path)
self.logger.debug("File exists %s", file_path)
continue
with open(file_path, 'w') as f:
f.write('{\n"type": "FeatureCollection",\n"features": [\n')
start = 0
width = 500
while True:
query_url = source_url + '/query'
query_args = urllib.urlencode({
'where': 'objectid >= {} and objectid < {}'.format(start, (start + width)),
'geometryPrecision': 7,
'returnGeometry': True,
'outSR': 4326,
'outFields': '*',
'f': 'JSON',
})
query_url += '?' + query_args
self.logger.debug("Requesting %s", query_url)
headers = {'User-Agent': self.USER_AGENT}
try:
req = urllib2.Request(query_url, headers=headers)
resp = urllib2.urlopen(req, timeout=10)
data = json.load(resp)
except urllib2.URLError as e:
raise ExtractionError("Could not connect to URL", e)
except socket.timeout as e:
raise ExtractionError("Timeout when connecting to URL", e)
except ValueError as e:
raise ExtractionError("Could not parse JSON", e)
finally:
# Wipe out whatever we had written out so far
f.truncate()
error = data.get('error')
if error:
raise ExtractionError("Problem querying ESRI dataset: %s", error['message'])
geometry_type = data.get('geometryType')
features = data.get('features')
f.write(',\n'.join([
json.dumps(self.convert_esrijson_to_geojson(geometry_type, feature)) for feature in features
]))
size += len(features)
if len(features) == 0:
break
else:
f.write(',\n')
start += width
f.write('\n]\n}\n')
self.logger.info("Downloaded %s ESRI features for file %s", size, file_path)
output_files.append(file_path)
return output_files
class DecompressionError(Exception):
pass
class DecompressionTask(object):
@classmethod
def from_type_string(clz, type_string):
if type_string == None:
return NoopDecompressTask()
elif type_string.lower() == 'zip':
return ZipDecompressTask()
else:
raise KeyError("I don't know how to decompress for type {}".format(type_string))
def extract(self, source_paths):
raise NotImplementedError()
class NoopDecompressTask(DecompressionTask):
def decompress(self, source_key, source_paths):
return source_paths
class ZipDecompressTask(DecompressionTask):
logger = logger.getChild('unzip')
def decompress(self, source_key, source_paths):
output_files = []
expand_path = os.path.join('.', 'workdir', source_key, 'unzipped')
mkdirsp(expand_path)
for source_path in source_paths:
with ZipFile(source_path, 'r') as z:
for name in z.namelist():
expanded_file_path = z.extract(name, expand_path)
self.logger.debug("Expanded file %s", expanded_file_path)
output_files.append(expanded_file_path)
return output_files
class ConvertToCsvTask(object):
logger = logger.getChild('convert')
known_types = ('.shp', '.json', '.csv', '.kml')
def convert(self, source_key, source_paths):
output_files = []
convert_path = os.path.join('.', 'workdir', source_key, 'converted')
mkdirsp(convert_path)
for source_path in source_paths:
filename = os.path.basename(source_path)
basename, ext = os.path.splitext(filename)
file_path = os.path.join(convert_path, basename + '.csv')
if ext not in self.known_types:
self.logger.debug("Skipping %s because I don't know how to convert it", source_path)
continue
if os.path.exists(file_path):
output_files.append(file_path)
self.logger.debug("File exists %s", file_path)
continue
in_datasource = ogr.Open(source_path, 0)
in_layer = in_datasource.GetLayer()
inSpatialRef = in_layer.GetSpatialRef()
self.logger.info("Converting a layer to CSV: %s", in_layer)
in_layer_defn = in_layer.GetLayerDefn()
out_fieldnames = []
for i in range(0, in_layer_defn.GetFieldCount()):
field_defn = in_layer_defn.GetFieldDefn(i)
out_fieldnames.append(field_defn.GetName())
out_fieldnames.append('centroid')
outSpatialRef = osr.SpatialReference()
outSpatialRef.ImportFromEPSG(4326)
coordTransform = osr.CoordinateTransformation(inSpatialRef, outSpatialRef)
with open(file_path, 'w') as f:
writer = csv.DictWriter(f, fieldnames=out_fieldnames)
writer.writeheader()
in_feature = in_layer.GetNextFeature()
while in_feature:
row = dict()
for i in range(0, in_layer_defn.GetFieldCount()):
field_defn = in_layer_defn.GetFieldDefn(i)
row[field_defn.GetNameRef()] = in_feature.GetField(i)
geom = in_feature.GetGeometryRef()
geom.Transform(coordTransform)
row['centroid'] = geom.Centroid().ExportToWkt()
writer.writerow(row)
in_feature.Destroy()
in_feature = in_layer.GetNextFeature()
in_datasource.Destroy()
output_files.append(file_path)
return output_files
class ConformTask(object):
logger = logger.getChild('conform')
def conform(self, source_key, source_paths):
def skip(k):
if not k.startswith('us'):
return True
for dirpath, dirnames, filenames in os.walk('/Users/iandees/Workspace/addresses/addresses/sources'):
for f in filenames:
if f.endswith('.json'):
with open(os.path.join(dirpath, f), 'r') as sf:
source_data = json.load(sf)
source_key = f[:-5]
if skip(source_key): continue
# Download the source data
source_type = source_data.get('type')
if not source_type:
logger.error("Source %s does not have a type set", source_key)
continue
try:
task = ExtractionTask.from_type_string(source_type)
data_urls = source_data.get('data')
if not isinstance(data_urls, list):
data_urls = [data_urls]
filenames = task.extract(source_key, data_urls)
except KeyError as e:
print "Don't know how to process {}: {}".format(f, e)
continue
except ExtractionError as e:
print "Problem extracting {}: {}".format(f, e)
continue
# Decompress the downloaded file
try:
task = DecompressionTask.from_type_string(source_data.get('compression'))
filenames = task.decompress(source_key, filenames)
except KeyError:
print "Don't know how to decompress {}".format(f)
continue
# Convert the source data to a CSV
task = ConvertToCsvTask()
filenames = task.convert(source_key, filenames)
# Now we map the fields in the CSV to OpenAddresses field names
task = ConformTask()
filenames = task.conform(source_key, filenames)
print filenames
|
|
from __future__ import print_function
import sys
import sqlparse
from sqlparse.sql import Comparison, Identifier, Where
from .parseutils import last_word, extract_tables, find_prev_keyword
from .special import parse_special_command
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
if PY3:
string_types = str
else:
string_types = basestring
def suggest_type(full_text, text_before_cursor):
"""Takes the full_text that is typed so far and also the text before the
cursor to suggest completion type and scope.
Returns a tuple with a type of entity ('table', 'column' etc) and a scope.
A scope for a column category will be a list of tables.
"""
word_before_cursor = last_word(text_before_cursor,
include='many_punctuations')
identifier = None
# If we've partially typed a word then word_before_cursor won't be an empty
# string. In that case we want to remove the partially typed string before
# sending it to the sqlparser. Otherwise the last token will always be the
# partially typed string which renders the smart completion useless because
# it will always return the list of keywords as completion.
if word_before_cursor:
if word_before_cursor[-1] == '(' or word_before_cursor[0] == '\\':
parsed = sqlparse.parse(text_before_cursor)
else:
parsed = sqlparse.parse(
text_before_cursor[:-len(word_before_cursor)])
# word_before_cursor may include a schema qualification, like
# "schema_name.partial_name" or "schema_name.", so parse it
# separately
p = sqlparse.parse(word_before_cursor)[0]
if p.tokens and isinstance(p.tokens[0], Identifier):
identifier = p.tokens[0]
else:
parsed = sqlparse.parse(text_before_cursor)
if len(parsed) > 1:
# Multiple statements being edited -- isolate the current one by
# cumulatively summing statement lengths to find the one that bounds the
# current position
current_pos = len(text_before_cursor)
stmt_start, stmt_end = 0, 0
for statement in parsed:
stmt_len = len(statement.to_unicode())
stmt_start, stmt_end = stmt_end, stmt_end + stmt_len
if stmt_end >= current_pos:
text_before_cursor = full_text[stmt_start:current_pos]
full_text = full_text[stmt_start:]
break
elif parsed:
# A single statement
statement = parsed[0]
else:
# The empty string
statement = None
# Check for special commands and handle those separately
if statement:
# Be careful here because trivial whitespace is parsed as a statement,
# but the statement won't have a first token
tok1 = statement.token_first()
if tok1 and tok1.value == '\\':
return suggest_special(text_before_cursor)
last_token = statement and statement.token_prev(len(statement.tokens)) or ''
return suggest_based_on_last_token(last_token, text_before_cursor,
full_text, identifier)
def suggest_special(text):
text = text.lstrip()
cmd, arg = parse_special_command(text)
if cmd == text:
# Trying to complete the special command itself
return [{'type': 'special'}]
if cmd in ('\\u', '\\r'):
return [{'type': 'database'}]
if cmd in ('\\T'):
return [{'type': 'table_format'}]
if cmd in ['\\f', '\\fs', '\\fd']:
return [{'type': 'favoritequery'}]
if cmd in ['\\dt']:
return [
{'type': 'table', 'schema': []},
{'type': 'view', 'schema': []},
{'type': 'schema'},
]
return [{'type': 'keyword'}, {'type': 'special'}]
def suggest_based_on_last_token(token, text_before_cursor, full_text, identifier):
if isinstance(token, string_types):
token_v = token.lower()
elif isinstance(token, Comparison):
# If 'token' is a Comparison type such as
# 'select * FROM abc a JOIN def d ON a.id = d.'. Then calling
# token.value on the comparison type will only return the lhs of the
# comparison. In this case a.id. So we need to do token.tokens to get
# both sides of the comparison and pick the last token out of that
# list.
token_v = token.tokens[-1].value.lower()
elif isinstance(token, Where):
# sqlparse groups all tokens from the where clause into a single token
# list. This means that token.value may be something like
# 'where foo > 5 and '. We need to look "inside" token.tokens to handle
# suggestions in complicated where clauses correctly
prev_keyword, text_before_cursor = find_prev_keyword(text_before_cursor)
return suggest_based_on_last_token(prev_keyword, text_before_cursor,
full_text, identifier)
else:
token_v = token.value.lower()
if not token:
return [{'type': 'keyword'}, {'type': 'special'}]
elif token_v.endswith('('):
p = sqlparse.parse(text_before_cursor)[0]
if p.tokens and isinstance(p.tokens[-1], Where):
# Four possibilities:
# 1 - Parenthesized clause like "WHERE foo AND ("
# Suggest columns/functions
# 2 - Function call like "WHERE foo("
# Suggest columns/functions
# 3 - Subquery expression like "WHERE EXISTS ("
# Suggest keywords, in order to do a subquery
# 4 - Subquery OR array comparison like "WHERE foo = ANY("
# Suggest columns/functions AND keywords. (If we wanted to be
# really fancy, we could suggest only array-typed columns)
column_suggestions = suggest_based_on_last_token('where',
text_before_cursor, full_text, identifier)
# Check for a subquery expression (cases 3 & 4)
where = p.tokens[-1]
prev_tok = where.token_prev(len(where.tokens) - 1)
if isinstance(prev_tok, Comparison):
# e.g. "SELECT foo FROM bar WHERE foo = ANY("
prev_tok = prev_tok.tokens[-1]
prev_tok = prev_tok.value.lower()
if prev_tok == 'exists':
return [{'type': 'keyword'}]
else:
return column_suggestions
# Get the token before the parens
prev_tok = p.token_prev(len(p.tokens) - 1)
if prev_tok and prev_tok.value and prev_tok.value.lower() == 'using':
# tbl1 INNER JOIN tbl2 USING (col1, col2)
tables = extract_tables(full_text)
# suggest columns that are present in more than one table
return [{'type': 'column', 'tables': tables, 'drop_unique': True}]
elif p.token_first().value.lower() == 'select':
# If the lparen is preceeded by a space chances are we're about to
# do a sub-select.
if last_word(text_before_cursor,
'all_punctuations').startswith('('):
return [{'type': 'keyword'}]
elif p.token_first().value.lower() == 'show':
return [{'type': 'show'}]
# We're probably in a function argument list
return [{'type': 'column', 'tables': extract_tables(full_text)}]
elif token_v in ('set', 'by', 'distinct'):
return [{'type': 'column', 'tables': extract_tables(full_text)}]
elif token_v in ('show'):
return [{'type': 'show'}]
elif token_v in ('to',):
p = sqlparse.parse(text_before_cursor)[0]
if p.token_first().value.lower() == 'change':
return [{'type': 'change'}]
else:
return [{'type': 'user'}]
elif token_v in ('user', 'for'):
return [{'type': 'user'}]
elif token_v in ('select', 'where', 'having'):
# Check for a table alias or schema qualification
parent = (identifier and identifier.get_parent_name()) or []
if parent:
tables = extract_tables(full_text)
tables = [t for t in tables if identifies(parent, *t)]
return [{'type': 'column', 'tables': tables},
{'type': 'table', 'schema': parent},
{'type': 'view', 'schema': parent},
{'type': 'function', 'schema': parent}]
else:
return [{'type': 'column', 'tables': extract_tables(full_text)},
{'type': 'function', 'schema': []},
{'type': 'keyword'}]
elif (token_v.endswith('join') and token.is_keyword) or (token_v in
('copy', 'from', 'update', 'into', 'describe', 'truncate',
'desc', 'explain')):
schema = (identifier and identifier.get_parent_name()) or []
# Suggest tables from either the currently-selected schema or the
# public schema if no schema has been specified
suggest = [{'type': 'table', 'schema': schema}]
if not schema:
# Suggest schemas
suggest.insert(0, {'type': 'schema'})
# Only tables can be TRUNCATED, otherwise suggest views
if token_v != 'truncate':
suggest.append({'type': 'view', 'schema': schema})
return suggest
elif token_v in ('table', 'view', 'function'):
# E.g. 'DROP FUNCTION <funcname>', 'ALTER TABLE <tablname>'
rel_type = token_v
schema = (identifier and identifier.get_parent_name()) or []
if schema:
return [{'type': rel_type, 'schema': schema}]
else:
return [{'type': 'schema'}, {'type': rel_type, 'schema': []}]
elif token_v == 'on':
tables = extract_tables(full_text) # [(schema, table, alias), ...]
parent = (identifier and identifier.get_parent_name()) or []
if parent:
# "ON parent.<suggestion>"
# parent can be either a schema name or table alias
tables = [t for t in tables if identifies(parent, *t)]
return [{'type': 'column', 'tables': tables},
{'type': 'table', 'schema': parent},
{'type': 'view', 'schema': parent},
{'type': 'function', 'schema': parent}]
else:
# ON <suggestion>
# Use table alias if there is one, otherwise the table name
aliases = [t[2] or t[1] for t in tables]
suggest = [{'type': 'alias', 'aliases': aliases}]
# The lists of 'aliases' could be empty if we're trying to complete
# a GRANT query. eg: GRANT SELECT, INSERT ON <tab>
# In that case we just suggest all tables.
if not aliases:
suggest.append({'type': 'table', 'schema': parent})
return suggest
elif token_v in ('use', 'database', 'template', 'connect'):
# "\c <db", "use <db>", "DROP DATABASE <db>",
# "CREATE DATABASE <newdb> WITH TEMPLATE <db>"
return [{'type': 'database'}]
elif token_v == 'tableformat':
return [{'type': 'table_format'}]
elif token_v.endswith(',') or token_v in ['=', 'and', 'or']:
prev_keyword, text_before_cursor = find_prev_keyword(text_before_cursor)
if prev_keyword:
return suggest_based_on_last_token(
prev_keyword, text_before_cursor, full_text, identifier)
else:
return []
else:
return [{'type': 'keyword'}]
def identifies(id, schema, table, alias):
return id == alias or id == table or (
schema and (id == schema + '.' + table))
|
|
""" Python script for local testing (compatible with both Python 2 and Python 3)
Disclaimer: this is a way to test your solutions, but it is NOT the real judging
system. The judging system behavior might be different.
"""
from __future__ import print_function
import random
import subprocess
import sys
USAGE_MSG = """
Usage:
Linux and Mac users:
From your terminal, run
python testing_tool.py command_to_run_your_script_or_executable
Note that command_to_run_your_script_or_executable is read as a list of
arguments, so you should NOT wrap it with quotation marks.
Examples:
C++, after compilation:
python testing_tool.py ./my_binary
Python:
python testing_tool.py python my_code.py
Java, after compilation:
python testing_tool.py java my_main_class_name
See https://code.google.com/codejam/resources/faq#languages for how we compile
and run your solution in the language of your choice.
Windows users:
Follow the instructions for Linux and Mac users if you are familiar with
terminal tools on Windows. Otherwise, please be advised that this script might
not work with Python 2 (it works with Python 3). In addition, if you cannot
pass arguments to Python, you will need to modify the "cmd = sys.argv[1:]"
line below.
"""
# Right now, there are 3 test cases with the minimum prepared area A in each
# test case being 10. We encourage you to modify LIST_OF_A for more thorough
# testing. Note that A[0] is the A given for the first test case, A[1] is for
# the second test case, etc. In real judging, A is the same for all test cases
# within the same test set.
LIST_OF_A = [10, 20, 200]
NUM_TEST_CASES = len(LIST_OF_A)
# You can set PRINT_INTERACTION_HISTORY to True to print out the interaction
# history between your code and the judge.
PRINT_INTERACTION_HISTORY = False
"""Helper functions"""
def JudgePrint(p, s):
# Print the judge output to your code's input stream. Log this interaction
# to console (stdout) if PRINT_INTERACTION_HISTORY is True.
print(s, file=p.stdin)
p.stdin.flush()
if PRINT_INTERACTION_HISTORY:
print("Judge prints:", s)
def PrintSubprocessResults(p):
# Print the return code and stderr output for your code.
print("Your code finishes with exit status {}.".format(p.returncode))
code_stderr_output = p.stderr.read()
if code_stderr_output:
print("The stderr output of your code is:")
sys.stdout.write(code_stderr_output)
else:
print("Your code doesn't have stderr output.")
def WaitForSubprocess(p):
# Wait for your code to finish and print the stderr output of your code for
# debugging purposes.
if p.poll() is None:
print("Waiting for your code to finish...")
p.wait()
PrintSubprocessResults(p)
def CheckSubprocessExit(p, case_id):
# Exit if your code finishes in the middle of a test case.
if p.poll() is not None:
print("Your code exited early, in the middle of Case #{}.".format(case_id))
PrintSubprocessResults(p)
sys.exit(-1)
def WrongAnswerExit(p, case_id, error_msg):
print("Case #{} failed: {}".format(case_id, error_msg))
try:
JudgePrint(p, "-1 -1")
except IOError:
print("Failed to print -1 -1 because your code finished already.")
WaitForSubprocess(p)
sys.exit(-1)
"""Main function begins"""
# Retrieve the command to run your code from the arguments.
# If you cannot pass arguments to Python when running this testing tool, please
# replace sys.argv[1:] with the command list to run your code.
# e.g. C++ users: cmd = ["./my_binary"]
# Python users: cmd = [sys.executable, "my_code.py"]
# Java users: cmd = ["java", "my_main_class_name"]
cmd = sys.argv[1:]
assert cmd, "There should be at least one argument." + USAGE_MSG
if (cmd[0] == "-h") or (cmd[0] == "-help") or (cmd[0] == "--h") or (
cmd[0] == "--help"):
print(USAGE_MSG)
sys.exit(0)
# Run your code in a separate process. You can debug your code by printing to
# stderr inside your code, or adding print statements in this testing tool.
# Note that your stderr output will be printed by this testing tool only after
# your code finishes, e.g. if your code hangs, you wouldn't get your stderr
# output.
try:
p = subprocess.Popen(
cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
bufsize=1,
universal_newlines=True)
except Exception as e:
print("Failed to start running your code. Error:")
print(e)
sys.exit(-1)
JudgePrint(p, NUM_TEST_CASES)
for test_case_id in range(1, NUM_TEST_CASES + 1):
if PRINT_INTERACTION_HISTORY:
print("Test Case #{}:".format(test_case_id))
# Different test case has different seed.
random.seed(test_case_id)
A = LIST_OF_A[test_case_id - 1]
JudgePrint(p, A)
test_case_passed = False
random.seed(test_case_id)
field = set()
prepared_cells_count = 0
northmost = None
for _ in range(1000):
# Detect whether the subprocess has finished running.
CheckSubprocessExit(p, test_case_id)
user_input = None
try:
user_input = p.stdout.readline()
i, j = map(int, user_input.split())
except:
# Note that your code might finish after the first CheckSubprocessExit
# check above but before the readline(), so we will need to again check
# whether your code has finished.
CheckSubprocessExit(p, test_case_id)
exit_msg = ""
if user_input == "":
exit_msg = (
"Read an empty string as opposed to 2 integers for cell location. "
"This might happen because your code exited early, or printed an "
"extra newline character.")
elif user_input is None:
exit_msg = (
"Unable to read the cell location. This might happen because your "
"code exited early, printed an extra new line character, or did "
"not print the output correctly.")
else:
exit_msg = (
"Failed to read the cell location. Expected two integers ending "
"with one newline character. Read \"{}\" (quotes added to isolate "
"output of your program) instead.".format(user_input))
WrongAnswerExit(p, test_case_id, exit_msg)
if PRINT_INTERACTION_HISTORY:
print("Judge reads:", user_input.rstrip())
if (i <= 1) or (i >= 1000) or (j <= 1) or (j >= 1000):
WrongAnswerExit(p, test_case_id, "Your input is out of range [2, 999].")
prepared_i = random.randint(i - 1, i + 1)
prepared_j = random.randint(j - 1, j + 1)
if not (prepared_i, prepared_j) in field:
if northmost is None:
northmost = prepared_i
southmost = prepared_i
westmost = prepared_j
eastmost = prepared_j
else:
northmost = min(prepared_i, northmost)
southmost = max(prepared_i, southmost)
westmost = min(prepared_j, westmost)
eastmost = max(prepared_j, eastmost)
field.add((prepared_i, prepared_j))
prepared_cells_count += 1
if (prepared_cells_count >=
A) and (prepared_cells_count == (southmost - northmost + 1) *
(eastmost - westmost + 1)):
JudgePrint(p, "0 0")
test_case_passed = True
break
JudgePrint(p, "{} {}".format(prepared_i, prepared_j))
if not test_case_passed:
WrongAnswerExit(p, test_case_id,
"Failed to prepare the rectangle within 1000 tries.")
extra_output = p.stdout.readline()
WaitForSubprocess(p)
if extra_output == "":
print("Congratulations! All test cases passed :)")
else:
print("Wrong Answer because of extra output:")
sys.stdout.write(extra_output)
sys.exit(-1)
|
|
from sklearn.externals import joblib
__author__ = 'Aaron J. Masino'
import time, os
from sklearn import linear_model, svm, tree
from sklearn.pipeline import Pipeline
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.ensemble import RandomForestClassifier
from sklearn.naive_bayes import BernoulliNB
from sklearn.metrics import confusion_matrix
import pandas as pd
import numpy as np
from numpy.random import RandomState
from learn import wrangle, printers
from nlp import util
import learn.sklearn_extensions as sklx
from nltk.corpus import stopwords
from learn.metrics import PerformanceMetrics
from functools import reduce
def load_report(path):
f = open(path,'r')
text = reduce(lambda x,y: x+y, f.readlines(), "")
f.close()
return text
def concatenate(d1,d2):
d = d1.copy()
d.update(d2)
return d
#custom preprocessor to keep some stop words
english_stopwords = filter(lambda w: w not in ['no', 'not', 'under'], stopwords.words('english'))
def text_preprocessor(text):
ct = util.replace_digits(text)
ct = util.replace_numerals(ct)
ct = util.replace_units(ct)
_words = [word.lower() for word in util.words(ct)]
_words = filter(lambda x: x not in english_stopwords and len(x)>=2, _words)
_words = util.porter_stem(_words)
return reduce(lambda x,y: '{0} {1}'.format(x,y), _words, "")
def analyze_classifiers(region_key, classifiers, x_train, y_train, x_test, y_test, out_file, preprocessor=text_preprocessor):
printers.printsf('{0}Analysis for {1} ear region{0}'.format(40*'-', region_key), out_file)
for key,value in classifiers.items():
clf = value[0] #the classifier
usa = value[1] #use spare array
ubf = value[2] #use binary features (this is to support NB)
parameters = value[3]
vectorizer = CountVectorizer(input='content', decode_error='ignore', preprocessor=preprocessor, binary=ubf)
pipeline = (Pipeline(steps=[('vect', vectorizer),('clf',clf)]) if usa
else Pipeline(steps=[('vect', vectorizer),('sa',sklx.SparseToArray()),('clf',clf)]))
gs = sklx.grid_analysis(pipeline,parameters, x_train, y_train)
printers.print_grid_search_results(gs,key,out_file,x_test,y_test)
if __name__ == '__main__':
use_finding_impression_only = True
analyze_baseline = True
analyze_all_classifiers = False
# static parameters
kfolds = 5
seed = 987654321
# set the numpy random seed so results are reproducible
rs = RandomState(987654321)
# set common path variables
label_file = './data/input/SDS_PV2_combined/SDS_PV2_class_labels.txt'
report_path = './data/input/SDS_PV2_combined/{0}'
file_suffix = ''
if use_finding_impression_only:
report_path = report_path.format('reports_single_find_impr')
file_suffix = '_fi.txt'
else:
report_path = report_path.format('reports_single')
file_suffix = '.txt'
output_path = './data/output/{0}'
standard_out_file = output_path.format('SDS_PV2_results_.txt')
# read data
label_data = pd.read_csv(label_file)
region_keys = label_data.columns[2:6]
miss_labeled_file = output_path.format('SDS_PV2_missed_.txt')
if not os.path.exists(os.path.dirname(standard_out_file)):
os.makedirs(os.path.dirname(standard_out_file))
now = time.localtime()
(printers.
printsf('{6}{0}-{1}-{2} {3}:{4}:{5}{6}'.
format(now.tm_year, now.tm_mon, now.tm_mday, now.tm_hour, now.tm_min, now.tm_sec, 40*'-'),
standard_out_file, 'a',False))
# partition the data
pos_cases, neg_cases = wrangle.partion(label_data['doc_norm']==1, label_data, ratios=[0.8,0.2])
train_mask = np.concatenate((pos_cases[0], neg_cases[0]))
test_mask = np.concatenate((pos_cases[1], neg_cases[1]))
rs.shuffle(train_mask)
rs.shuffle(test_mask)
train_labels = label_data.iloc[train_mask]
test_labels = label_data.iloc[test_mask]
# print partition stats
printers.printsf('{0}Data Partition Stats{0}'.format(40*'-'), standard_out_file)
printers.print_data_stats(train_labels['doc_norm'], test_labels['doc_norm'], '{0}Document{0}'.format(40*'-'),standard_out_file)
for key in region_keys:
printers.print_data_stats(train_labels[key], test_labels[key],
'{0}{1}{0}'.format(40*'-', key),standard_out_file)
# read in the text reports
train_reports = [load_report('{0}/{1}{2}'.format(report_path, pid, file_suffix)) for pid in train_labels['pid']]
test_reports = [load_report('{0}/{1}{2}'.format(report_path, pid, file_suffix)) for pid in test_labels['pid']]
#------------------------------ BASELINE ANALYSIS -----------------------------------------------------------------
if analyze_baseline:
clf = linear_model.LogisticRegression(C=1000)
#clf = BernoulliNB(alpha=1.0, binarize=None, fit_prior=True, class_prior=None)
usa = True #use sparse array, should be false for NB classifier
binary_features = False #should be true for NB classifier
apply_text_preprocessing = False
tpp = None
if apply_text_preprocessing: tpp = text_preprocessor
#start with no regulariztion, unigrams and no text preprocessing
vectorizer = CountVectorizer(input='content', decode_error='ignore', analyzer='word',
preprocessor=tpp, ngram_range=(1,1), stop_words=None, lowercase=False,
binary=binary_features)
pipeline = (Pipeline(steps=[('vect', vectorizer),('clf',clf)]) if usa
else Pipeline(steps=[('vect', vectorizer),('sa',sklx.SparseToArray()),('clf',clf)]))
for key in region_keys:
printers.printsf("{1} Performance for {0} region {1}".format(key,40*'-'), standard_out_file)
y_train = train_labels[key]
y_test = test_labels[key]
pipeline.fit(train_reports, y_train)
y_test_predicted = pipeline.predict(test_reports)
pm = PerformanceMetrics(y_test, y_test_predicted)
printers.printsfPerformanceMetrics(pm, standard_out_file)
# print miss classified examples
missed_labels = test_labels[y_test!=y_test_predicted]
missed_pids = missed_labels['pid']
missed_correct_class = missed_labels[key]
missed = reduce(lambda x,y: x+'{0}\t{1}\n'.format(y[0],y[1]), zip(missed_pids, missed_correct_class),'')
printers.printsf('{1}MISSED EXAMPLES REGION {0}{1}\n'.format(key, 40*'#'), miss_labeled_file)
printers.printsf(missed, miss_labeled_file)
#print confusion matrix
cm = confusion_matrix(y_test,y_test_predicted)
printers.printTwoClassConfusion(cm, standard_out_file)
#------------------------------- CROSS VALIDATION ANALYSIS ALL CLASSIFIERS ----------------------------------------
if analyze_all_classifiers:
# classifiers and parameters to consider for each region
feature_parameters = {
'vect__binary':(False, True),
'vect__ngram_range': ((1,1),(1,2),(1,3)),
'vect__analyzer' : ('word', 'char_wb')}
nb_feature_parameters = {'vect__ngram_range': ((1,1),(1,2),(1,3)),
'vect__analyzer' : ('word', 'char_wb')}
use_spare_array = True
use_binary_features = True
classifiers = ({
'logistic_regression':(linear_model.LogisticRegression(),
use_spare_array,
not use_binary_features,
concatenate(feature_parameters, {'clf__C': [1/x for x in [0.01, 0.1, 0.3, 1.0, 3.0, 10.0]]})),
'svm_linear':(svm.LinearSVC(tol=1e-6),
use_spare_array,
not use_binary_features,
concatenate(feature_parameters, {'clf__C': [1/x for x in [0.01, 0.1, 0.3, 1.0, 3.0, 10.0]]})),
'svm_gaussian':(svm.SVC(tol=1e-6, kernel='rbf'),
use_spare_array,
not use_binary_features,
concatenate(feature_parameters, {'clf__gamma': [.01, .03, 0.1],
'clf__C': [1/x for x in [0.01, 0.1, 0.3, 1.0, 3.0, 10.0]]})),
'decision_tree':(tree.DecisionTreeClassifier(criterion='entropy', random_state=RandomState(seed)),
not use_spare_array,
not use_binary_features,
concatenate(feature_parameters,{'clf__max_depth': [2, 3, 4, 5, 6, 7 , 8, 9, 10, 15, 20]})),
'random_forest':(RandomForestClassifier(criterion='entropy', random_state=RandomState(seed)),
not use_spare_array,
not use_binary_features,
concatenate(feature_parameters,{'clf__max_depth': [2, 3, 4, 5],
'clf__n_estimators': [5, 25, 50, 100, 150, 200]})),
'naive_bayes':(BernoulliNB(alpha=1.0, binarize=None, fit_prior=True, class_prior=None),
use_spare_array,
use_binary_features,
{'vect__ngram_range':((1,1),(1,2),(1,3)),
'vect__analyzer':('word', 'char_wb')})
})
#analyze model performance for classifiers X regions
#WARNING: This may run for hours (or even days) depending on the number of classifiers
#and parameters considered
#for key in region_keys:
for key in ['mastoid']:
y_train = train_labels[key]
y_test = test_labels[key]
analyze_classifiers(key, classifiers, train_reports, y_train, test_reports, y_test, standard_out_file)
|
|
from __future__ import absolute_import, print_function
import random
import socket
import string
import sys
import time
import unittest2 as unittest
import warnings
import weakref
from nose import SkipTest
from kombu import Connection
from kombu import Exchange, Queue
from kombu.five import range
if sys.version_info >= (2, 5):
from hashlib import sha256 as _digest
else:
from sha import new as _digest # noqa
def _nobuf(x):
return [str(i) if isinstance(i, buffer) else i for i in x]
def consumeN(conn, consumer, n=1, timeout=30):
messages = []
def callback(message_data, message):
messages.append(message_data)
message.ack()
prev, consumer.callbacks = consumer.callbacks, [callback]
consumer.consume()
seconds = 0
while True:
try:
conn.drain_events(timeout=1)
except socket.timeout:
seconds += 1
msg = 'Received %s/%s messages. %s seconds passed.' % (
len(messages), n, seconds)
if seconds >= timeout:
raise socket.timeout(msg)
if seconds > 1:
print(msg)
if len(messages) >= n:
break
consumer.cancel()
consumer.callback = prev
return messages
class TransportCase(unittest.TestCase):
transport = None
prefix = None
sep = '.'
userid = None
password = None
event_loop_max = 100
connection_options = {}
suppress_disorder_warning = False
reliable_purge = True
connected = False
skip_test_reason = None
message_size_limit = None
def before_connect(self):
pass
def after_connect(self, connection):
pass
def setUp(self):
if self.transport:
try:
self.before_connect()
except SkipTest as exc:
self.skip_test_reason = str(exc)
else:
self.do_connect()
self.exchange = Exchange(self.prefix, 'direct')
self.queue = Queue(self.prefix, self.exchange, self.prefix)
def purge(self, names):
chan = self.connection.channel()
total = 0
for queue in names:
while 1:
# ensure the queue is completly empty
purged = chan.queue_purge(queue=queue)
if not purged:
break
total += purged
chan.close()
return total
def get_connection(self, **options):
if self.userid:
options.setdefault('userid', self.userid)
if self.password:
options.setdefault('password', self.password)
return Connection(transport=self.transport, **options)
def do_connect(self):
self.connection = self.get_connection(**self.connection_options)
try:
self.connection.connect()
self.after_connect(self.connection)
except self.connection.connection_errors:
self.skip_test_reason = '%s transport cannot connect' % (
self.transport, )
else:
self.connected = True
def verify_alive(self):
if self.transport:
if not self.connected:
raise SkipTest(self.skip_test_reason)
return True
def purge_consumer(self, consumer):
return self.purge([queue.name for queue in consumer.queues])
def test_produce__consume(self):
if not self.verify_alive():
return
chan1 = self.connection.channel()
consumer = chan1.Consumer(self.queue)
self.purge_consumer(consumer)
producer = chan1.Producer(self.exchange)
producer.publish({'foo': 'bar'}, routing_key=self.prefix)
message = consumeN(self.connection, consumer)
self.assertDictEqual(message[0], {'foo': 'bar'})
chan1.close()
self.purge([self.queue.name])
def test_purge(self):
if not self.verify_alive():
return
chan1 = self.connection.channel()
consumer = chan1.Consumer(self.queue)
self.purge_consumer(consumer)
producer = chan1.Producer(self.exchange)
for i in range(10):
producer.publish({'foo': 'bar'}, routing_key=self.prefix)
if self.reliable_purge:
self.assertEqual(consumer.purge(), 10)
self.assertEqual(consumer.purge(), 0)
else:
purged = 0
while purged < 9:
purged += self.purge_consumer(consumer)
def _digest(self, data):
return _digest(data).hexdigest()
def test_produce__consume_large_messages(
self, bytes=1048576, n=10,
charset=string.punctuation + string.letters + string.digits):
if not self.verify_alive():
return
bytes = min(x for x in [bytes, self.message_size_limit] if x)
messages = [''.join(random.choice(charset)
for j in range(bytes)) + '--%s' % n
for i in range(n)]
digests = []
chan1 = self.connection.channel()
consumer = chan1.Consumer(self.queue)
self.purge_consumer(consumer)
producer = chan1.Producer(self.exchange)
for i, message in enumerate(messages):
producer.publish({'text': message,
'i': i}, routing_key=self.prefix)
digests.append(self._digest(message))
received = [(msg['i'], msg['text'])
for msg in consumeN(self.connection, consumer, n)]
self.assertEqual(len(received), n)
ordering = [i for i, _ in received]
if ordering != list(range(n)) and not self.suppress_disorder_warning:
warnings.warn(
'%s did not deliver messages in FIFO order: %r' % (
self.transport, ordering))
for i, text in received:
if text != messages[i]:
raise AssertionError('%i: %r is not %r' % (
i, text[-100:], messages[i][-100:]))
self.assertEqual(self._digest(text), digests[i])
chan1.close()
self.purge([self.queue.name])
def P(self, rest):
return '%s%s%s' % (self.prefix, self.sep, rest)
def test_produce__consume_multiple(self):
if not self.verify_alive():
return
chan1 = self.connection.channel()
producer = chan1.Producer(self.exchange)
b1 = Queue(self.P('b1'), self.exchange, 'b1')(chan1)
b2 = Queue(self.P('b2'), self.exchange, 'b2')(chan1)
b3 = Queue(self.P('b3'), self.exchange, 'b3')(chan1)
[q.declare() for q in (b1, b2, b3)]
self.purge([b1.name, b2.name, b3.name])
producer.publish('b1', routing_key='b1')
producer.publish('b2', routing_key='b2')
producer.publish('b3', routing_key='b3')
chan1.close()
chan2 = self.connection.channel()
consumer = chan2.Consumer([b1, b2, b3])
messages = consumeN(self.connection, consumer, 3)
self.assertItemsEqual(_nobuf(messages), ['b1', 'b2', 'b3'])
chan2.close()
self.purge([self.P('b1'), self.P('b2'), self.P('b3')])
def test_timeout(self):
if not self.verify_alive():
return
chan = self.connection.channel()
self.purge([self.queue.name])
consumer = chan.Consumer(self.queue)
self.assertRaises(
socket.timeout, self.connection.drain_events, timeout=0.3,
)
consumer.cancel()
chan.close()
def test_basic_get(self):
if not self.verify_alive():
return
chan1 = self.connection.channel()
producer = chan1.Producer(self.exchange)
chan2 = self.connection.channel()
queue = Queue(self.P('basic_get'), self.exchange, 'basic_get')
queue = queue(chan2)
queue.declare()
producer.publish({'basic.get': 'this'}, routing_key='basic_get')
chan1.close()
for i in range(self.event_loop_max):
m = queue.get()
if m:
break
time.sleep(0.1)
self.assertEqual(m.payload, {'basic.get': 'this'})
self.purge([queue.name])
chan2.close()
def test_cyclic_reference_transport(self):
if not self.verify_alive():
return
def _createref():
conn = self.get_connection()
conn.transport
conn.close()
return weakref.ref(conn)
self.assertIsNone(_createref()())
def test_cyclic_reference_connection(self):
if not self.verify_alive():
return
def _createref():
conn = self.get_connection()
conn.connect()
conn.close()
return weakref.ref(conn)
self.assertIsNone(_createref()())
def test_cyclic_reference_channel(self):
if not self.verify_alive():
return
def _createref():
conn = self.get_connection()
conn.connect()
chanrefs = []
try:
for i in range(100):
channel = conn.channel()
chanrefs.append(weakref.ref(channel))
channel.close()
finally:
conn.close()
return chanrefs
for chanref in _createref():
self.assertIsNone(chanref())
def tearDown(self):
if self.transport and self.connected:
self.connection.close()
|
|
# -*- coding: utf-8 -*-
#FracFocus Scraper
import pprint
from datetime import datetime, date, timedelta
from urlparse import urlsplit, urljoin
from urlparse import parse_qs
from cStringIO import StringIO
from itertools import izip_longest
import re
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from scrapy.http import FormRequest
from scrapy.http import Request, Response, TextResponse
from scrapy.shell import inspect_response
from scrapy import log
from scrapy.contrib.loader import XPathItemLoader
from scrapy.contrib.loader.processor import TakeFirst, MapCompose, Join
from BeautifulSoup import BeautifulSoup
from nrc.NrcBot import NrcBot
from nrc.items import FracFocusScrape
from nrc.database import NrcDatabase
#nextpage:
#'ctl00$MainContent$DocumentList1$GridView1$ctl01$ButtonNext': 'Next Page',
#Sort by Job Date:
#ctl00$MainContent$ScriptManager1:ctl00$MainContent$UpdatePanel2|ctl00$MainContent$DocumentList1$GridView1
#__EVENTTARGET:ctl00$MainContent$DocumentList1$GridView1
#__EVENTARGUMENT:Sort$JobDate
class FracFocusScraper(NrcBot):
name = "FracFocusScraper"
allowed_domains = None
# allowed_domains = ["hydraulicfracturingdisclosure.org"]
# base_url = "http://www.hydraulicfracturingdisclosure.org/fracfocusfind/Default.aspx"
base_url = "http://www.fracfocusdata.org/fracfocusfind/Default.aspx"
# job_item_limit = 7 # maximum total items to process in one job execution
get_counties_form_data = {
'ctl00$MainContent$ScriptManager1': 'ctl00$MainContent$DocumentFilter1$UpdatePanel1|ctl00$MainContent$DocumentFilter1$cboStateList',
'__EVENTTARGET': 'ctl00$MainContent$DocumentFilter1$cboStateList',
'__ASYNCPOST': 'true',
'ctl00$MainContent$DocumentFilter1$tbAPINo' : '__-___-_____',
'__EVENTARGUMENT': '',
'__LASTFOCUS': '',
'ctl00$MainContent$DocumentFilter1$cboCountyList': 'Choose a State First'
}
def __init__ (self, **kwargs):
self.api = kwargs.get('api',None)
if kwargs.has_key('state'):
FracFocusScraper.job_item_limit = 1
NrcBot.__init__(self, **kwargs)
def process_items (self):
if self.api:
request = Request(self.base_url, callback=self.search_by_api, dont_filter=True, errback=self.error_callback)
request.meta['api'] = self.api
request.meta['cookiejar'] = 'FracFocusScraper:%s' % self.api
self.log('** Scraping API %s' % self.api, log.INFO)
yield request
else:
for item in NrcBot.process_items (self):
yield item
def process_item (self, task):
request = Request(self.base_url, callback=self.search_by_state, dont_filter=True, errback=self.error_callback)
request.meta['state'] = task['state']
request.meta['cookiejar'] = 'FracFocusScraper:%s' % task['state']
request.meta['task_id'] = task['task_id']
self.log('** Scraping State %s' % task['state'], log.INFO)
yield request
self.log('** Marking item %s complete' % task['task_id'], log.INFO)
self.item_completed (task['task_id'])
def search_by_state (self, response):
search_params = {
'ctl00$MainContent$DocumentFilter1$cboStateList': response.meta['state'],
}
yield self.create_search_request(response, search_params=search_params, callback=self.sort_by_job_date)
def search_by_api (self, response):
search_params = {
'ctl00$MainContent$DocumentFilter1$tbAPINo':response.meta['api'],
}
yield self.create_search_request(response, search_params=search_params, callback=self.scrape_and_next)
def create_search_request (self, response, search_params, callback):
formdata={
'ctl00$MainContent$ScriptManager1': 'ctl00$MainContent$DocumentFilter1$UpdatePanel1|ctl00$MainContent$DocumentFilter1$btnSearch',
'ctl00$MainContent$DocumentFilter1$btnSearch' : 'SEARCH',
'__ASYNCPOST': 'true',
'ctl00$MainContent$NoBot1$NoBot1_NoBotExtender_ClientState': self.extract_NoBot_ClientState (response)
}
formdata.update(search_params)
request = FormRequest.from_response(response,
formdata=formdata,
dont_click=True,
callback=callback,
errback=self.error_callback)
request.meta['full_response'] = response
request.meta['num_pages'] = 2 # not used
request.meta['cookiejar'] = response.meta['cookiejar']
self.log('%s Getting intial page' % response.meta['cookiejar'], log.INFO)
# self._print_form_request(request)
return request
def sort_by_job_date(self, response):
meta = response.meta
response_parts = self.response2dict (response)
response = self.update_response(response.meta['full_response'], response_parts)
formdata = {
'ctl00$MainContent$ScriptManager1':'ctl00$MainContent$UpdatePanel2|ctl00$MainContent$DocumentList1$GridView1',
'__EVENTTARGET':'ctl00$MainContent$DocumentList1$GridView1',
'__EVENTARGUMENT':'Sort$JobDate',
}
request = self.create_request(response,response_parts, formdata, self.scrape_and_next)
if request:
request.meta['num_pages'] = meta['num_pages']
request.meta['cookiejar'] = response.meta['cookiejar']
self.log('%s Sorting by Job Date' % response.meta['cookiejar'], log.INFO)
yield request
def scrape_and_next(self, response):
response_parts = self.response2dict (response)
num_pages = response.meta['num_pages']
response = self.update_response(response.meta['full_response'], response_parts)
# scrape page and goto next
for item in self.scrape_content_items(response):
yield item
# Check to see if there is a next page button present
if response.body.find ('ctl00$MainContent$DocumentList1$GridView1$ctl01$ButtonNext') >= 0 and num_pages > 1:
# navigate to next page
formdata = {
'ctl00$MainContent$ScriptManager1': 'ctl00$MainContent$UpdatePanel2|ctl00$MainContent$DocumentList1$GridView1$ctl01$ButtonNext',
'ctl00$MainContent$DocumentList1$GridView1$ctl01$ButtonNext': 'Next Page',
}
request = self.create_request(response,response_parts, formdata, self.scrape_and_next)
# if num_pages > 1:
# request.meta['num_pages'] = num_pages - 1
# yield request
if request:
request.meta['num_pages'] = num_pages
request.meta['cookiejar'] = response.meta['cookiejar']
yield request
# print response_parts['updatePanel.MainContent_UpdatePanel1']
def response2dict (self, response):
parsed = {}
params = izip_longest(*[iter(response.body.split ('|'))]*4, fillvalue=None)
for p in params:
if p[3]:
parsed ['%s|%s'%(p[1].strip(),p[2].strip())] = p[3].strip()
return parsed
# integreate new div content from the response into the original page
def update_response (self, response, response_parts):
soup = BeautifulSoup(response.body)
# print response.body
update_divs = []
for key,content in response_parts.items():
# print "_%s_" % key
action,div_id = key.split('|')
if 'updatePanel' == action:
div=soup.find(id=div_id)
if div:
update_divs.append ((div, content))
for update in update_divs:
update[0].clear ()
update[0].append(BeautifulSoup(update[1]))
return response.replace (body=str(soup))
# create a new form request
def create_request (self, response, response_parts, formdata, callback):
fd = formdata
fd['__VIEWSTATE'] = response_parts.get('hiddenField|__VIEWSTATE')
fd['__EVENTVALIDATION'] = response_parts.get('hiddenField|__EVENTVALIDATION')
fd['__ASYNCPOST'] = 'true'
# defensive check for 'None' entries
formdata = dict([(k,'') if v is None else (k,v)
for k,v in formdata.items()])
# create new form request; log exceptions
try:
request = FormRequest.from_response(
response=response,
formdata=formdata,
dont_click=True,
callback=callback,
errback=self.error_callback)
except Exception as e:
self.log('FracFocusScraper.create_request: %s\n\tformdata:%s'
% (e, formdata), log.ERROR)
return None
request.meta['full_response'] = response
# self._print_form_request(request)
return request
# generator that will parse items from a content page and yield items
def scrape_content_items (self, response):
hxs = HtmlXPathSelector(response)
stats = self.crawler.stats
page_num = hxs.select ('//*[@id="MainContent_DocumentList1_GridView1_PageCurrent"]/@value').extract()
if page_num:
page_num = page_num[0]
self.log('%s Scraping page %s' % (response.meta['cookiejar'], page_num), log.INFO)
else:
self.log('%s No page number found' % (response.meta['cookiejar']), log.WARNING)
stats.inc_value ('_pages', spider=self)
reports = hxs.select ('//table[@id="MainContent_DocumentList1_GridView1"]//tr')
for report in reports:
l = XPathItemLoader(FracFocusScrape(), report)
l.state_in = lambda slist: [s[:20] for s in slist]
l.county_in = lambda slist: [s[:20] for s in slist]
for name, params in FracFocusScrape.fields.items():
l.add_xpath(name, params['xpath'])
item = l.load_item()
if item.get('api'):
if self.db.itemExists(item):
stats.inc_value ('_existing_count', spider=self)
else:
stats.inc_value ('_new_count', spider=self)
# print item['operator']
yield item
if not stats.get_value('_existing_count') and not stats.get_value('_new_count'):
self.log('%s No records found' % (response.meta['cookiejar']), log.WARNING)
def extract_NoBot_ClientState (self, response):
match = re.search(u'<div id="MainContent_NoBot1_NoBotSamplePanel" style="height:(\d+)px;width:(\d+)px;visibility:hidden;position:absolute;">', response.body)
if match:
return str(int(match.group(1)) * int(match.group(2)))
return None
def _print_form_data (self, response):
forms = ParseFile(StringIO(response.body), response.url,
encoding=getattr(response, 'encoding', 'utf-8'), backwards_compat=False)
for f in forms:
for c in f.controls:
print "%s: %s" % (c.name, c.value[:100])
def _print_response_parts (self, response_parts):
print "########"
for key, content in response_parts.items():
print ("%s: %s") % (key, content[:100])
def _print_form_request (self, form_request):
print "********"
headers = dict(re.findall('Content-Disposition: form-data; name="([^"]*)"\r\n\r\n(.*)', form_request.body))
for key, content in headers.items():
print ("%s: %s") % (key, content[:100])
def item_stored(self, item, id):
self.db.setBotTaskStatus(id, 'FracFocusReport', self.status_new)
pass
|
|
#! coding:utf-8
# python2 requires: pip install futures
import atexit
from concurrent.futures import (ProcessPoolExecutor, ThreadPoolExecutor,
as_completed)
from concurrent.futures._base import (CANCELLED, CANCELLED_AND_NOTIFIED,
FINISHED, PENDING, RUNNING,
CancelledError, Error, Executor, Future,
TimeoutError)
from concurrent.futures.thread import _threads_queues, _WorkItem
from functools import wraps
from logging import getLogger
from threading import Thread, Timer
from time import sleep
from time import time as time_time
from weakref import WeakSet
from requests import PreparedRequest, RequestException, Session
from requests.adapters import HTTPAdapter
from urllib3 import disable_warnings
from .configs import Config
from .exceptions import FailureException, ValidationError
from .frequency_controller.sync_tools import Frequency
from .versions import PY2, PY3
try:
from queue import Empty, Queue
except ImportError:
from Queue import Empty, Queue
if PY3:
from concurrent.futures.process import BrokenProcessPool
__all__ = [
"Pool", "ProcessPool", "NewFuture", "Async", "threads",
"get_results_generator", "run_after_async", "tPool", "get", "post",
"options", "delete", "put", "head", "patch", "request", "disable_warnings",
"Workshop"
]
logger = getLogger("torequests")
def _abandon_all_tasks():
"""Only used for abandon_all_tasks and exit the main thread,
to prevent the main thread waiting for unclosed thread while exiting."""
_threads_queues.clear()
def ensure_waiting_for_threads():
if Config.wait_futures_before_exiting:
_abandon_all_tasks()
atexit.register(ensure_waiting_for_threads)
class NewExecutorPoolMixin(Executor):
"""Add async_func decorator for wrapping a function to return the NewFuture."""
def async_func(self, function):
"""Decorator for let a normal function return the NewFuture"""
@wraps(function)
def wrapped(*args, **kwargs):
return self.submit(function, *args, **kwargs)
return wrapped
def close(self, wait=True):
"""Same as self.shutdown"""
return self.shutdown(wait=wait)
def _get_cpu_count(self):
"""Get the cpu count."""
try:
from multiprocessing import cpu_count
return cpu_count()
except Exception as e:
logger.error("_get_cpu_count failed for %s" % e)
@property
def x(self):
"""Return self.wait_futures_done"""
return self.wait_futures_done(list(self._all_futures))
def wait_futures_done(self, tasks=None):
# ignore the order of tasks
tasks = tasks or self._all_futures
fs = []
try:
for f in as_completed(tasks, timeout=self._timeout):
fs.append(f.x)
except TimeoutError:
pass
return fs
class Pool(ThreadPoolExecutor, NewExecutorPoolMixin):
"""Let ThreadPoolExecutor use NewFuture instead of origin concurrent.futures.Future.
WARNING: NewFutures in Pool will not block main thread without NewFuture.x.
Basic Usage::
from torequests.main import Pool
import time
pool = Pool()
def use_submit(i):
time.sleep(i)
result = 'use_submit: %s' % i
print(result)
return result
@pool.async_func
def use_decorator(i):
time.sleep(i)
result = 'use_decorator: %s' % i
print(result)
return result
tasks = [pool.submit(use_submit, i) for i in (2, 1, 0)
] + [use_decorator(i) for i in (2, 1, 0)]
# pool.x can be ignore
pool.x
results = [i.x for i in tasks]
print(results)
# use_submit: 0
# use_decorator: 0
# use_submit: 1
# use_decorator: 1
# use_submit: 2
# use_decorator: 2
# ['use_submit: 2', 'use_submit: 1', 'use_submit: 0', 'use_decorator: 2', 'use_decorator: 1', 'use_decorator: 0']
"""
def __init__(self,
n=None,
timeout=None,
default_callback=None,
catch_exception=True,
*args,
**kwargs):
n = n or kwargs.pop("max_workers", None)
if PY2 and n is None:
# python2 n!=None
n = (self._get_cpu_count() or 1) * 5
super(Pool, self).__init__(n, *args, **kwargs)
#: set the default timeout
self._timeout = timeout
#: set the default_callback if not set single task's callback
self.default_callback = default_callback
#: WeakSet of _all_futures for self.x
self._all_futures = WeakSet()
#: catch_exception=True will not raise exceptions, return object FailureException(exception)
self.catch_exception = catch_exception
@property
def all_tasks(self):
"""Keep the same api for dummy, return self._all_futures actually"""
return self._all_futures
def submit(self, func, *args, **kwargs):
"""Submit a function to the pool, `self.submit(function,arg1,arg2,arg3=3)`"""
with self._shutdown_lock:
if self._shutdown:
raise RuntimeError("cannot schedule new futures after shutdown")
callback = kwargs.pop("callback", self.default_callback)
future = NewFuture(
self._timeout,
args,
kwargs,
callback=callback,
catch_exception=self.catch_exception,
)
w = _WorkItem(future, func, args, kwargs)
self._work_queue.put(w)
self._adjust_thread_count()
self._all_futures.add(future)
return future
class ProcessPool(ProcessPoolExecutor, NewExecutorPoolMixin):
"""Simple ProcessPool covered ProcessPoolExecutor.
::
from torequests.main import ProcessPool
import time
pool = ProcessPool()
def use_submit(i):
time.sleep(i)
result = 'use_submit: %s' % i
print(result)
return result
def main():
tasks = [pool.submit(use_submit, i) for i in (2, 1, 0)]
# pool.x can be ignore
pool.x
results = [i.x for i in tasks]
print(results)
if __name__ == '__main__':
main()
# ['use_submit: 2', 'use_submit: 1', 'use_submit: 0']
# use_submit: 0
# use_submit: 1
# use_submit: 2
"""
def __init__(self,
n=None,
timeout=None,
default_callback=None,
catch_exception=True,
*args,
**kwargs):
n = n or kwargs.pop("max_workers", None)
if PY2 and n is None:
# python2 n!=None
n = self._get_cpu_count() or 1
super(ProcessPool, self).__init__(n, *args, **kwargs)
self._timeout = timeout
self.default_callback = default_callback
self._all_futures = WeakSet()
self.catch_exception = catch_exception
def submit(self, func, *args, **kwargs):
"""Submit a function to the pool, `self.submit(function,arg1,arg2,arg3=3)`"""
with self._shutdown_lock:
if PY3 and self._broken:
raise BrokenProcessPool(
"A child process terminated "
"abruptly, the process pool is not usable anymore")
if self._shutdown_thread:
raise RuntimeError("cannot schedule new futures after shutdown")
callback = kwargs.pop("callback", self.default_callback)
future = NewFuture(
self._timeout,
args,
kwargs,
callback=callback,
catch_exception=self.catch_exception,
)
w = _WorkItem(future, func, args, kwargs)
self._pending_work_items[self._queue_count] = w
self._work_ids.put(self._queue_count)
self._queue_count += 1
self._result_queue.put(None)
self._start_queue_management_thread()
if PY2:
self._adjust_process_count()
self._all_futures.add(future)
return future
def async_func(self, *args):
"""Decorator mode not support for ProcessPool for _pickle.PicklingError."""
raise NotImplementedError
class NewFuture(Future):
"""Add `.x` attribute and timeout args for original Future class
WARNING: Future thread will not stop running until function finished or pid killed.
:attr cx: blocking until the task finish and return the callback_result.
:attr x: blocking until the task finish and return the value as `coro` returned.
:attr task_start_time: timestamp when the task start up.
:attr task_end_time: timestamp when the task end up.
:attr task_cost_time: seconds of task costs.
:param catch_exception: `True` will catch all exceptions and return as :class:`FailureException <FailureException>`
"""
if PY3:
from ._py3_patch import _new_future_await
__await__ = _new_future_await
def __init__(self,
timeout=None,
args=None,
kwargs=None,
callback=None,
catch_exception=True):
super(NewFuture, self).__init__()
self._timeout = timeout
self._args = args or ()
self._kwargs = kwargs or {}
self._callback_result = None
self.catch_exception = catch_exception
self.task_start_time = time_time()
self.task_end_time = 0
self.task_cost_time = 0
self._user_callbacks = set()
if callback:
if not isinstance(callback, (list, tuple)):
callback = [callback]
for fn in callback:
self.add_done_callback(fn)
self._user_callbacks.add(fn)
def __getattr__(self, name):
return getattr(self.x, name)
def _invoke_callbacks(self):
"""Record the task_end_time & task_cost_time, set result for self._callback_result."""
self.task_end_time = time_time()
self.task_cost_time = self.task_end_time - self.task_start_time
with self._condition:
for callback in self._done_callbacks:
try:
result = callback(self)
if callback in self._user_callbacks:
self._callback_result = result
except Exception as e:
logger.error("exception calling callback for %s" % e)
self._condition.notify_all()
@property
def _callbacks(self):
"""Keep same api for NewTask."""
return self._done_callbacks
@property
def cx(self):
"""Block the main thead until future finish, return the future.callback_result."""
return self.callback_result
@property
def callback_result(self):
"""Block the main thead until future finish, return the future.callback_result."""
if self._state in [PENDING, RUNNING]:
self.x
if self._user_callbacks:
return self._callback_result
else:
return self.x
@property
def x(self):
"""Block the main thead until future finish, return the future.result()."""
with self._condition:
result = None
if not self.done():
self._condition.wait(self._timeout)
if not self.done():
# timeout
self.set_exception(TimeoutError())
if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
# cancelled
result = CancelledError()
elif self._state == FINISHED:
# finished
if self._exception:
result = self._exception
else:
result = self._result
if isinstance(result, Exception):
if self.catch_exception:
result = FailureException(result)
return result
else:
raise result
return result
def Async(f, n=None, timeout=None):
"""Concise usage for pool.submit.
Basic Usage Asnyc & threads ::
from torequests.main import Async, threads
import time
def use_submit(i):
time.sleep(i)
result = 'use_submit: %s' % i
print(result)
return result
@threads()
def use_decorator(i):
time.sleep(i)
result = 'use_decorator: %s' % i
print(result)
return result
new_use_submit = Async(use_submit)
tasks = [new_use_submit(i) for i in (2, 1, 0)
] + [use_decorator(i) for i in (2, 1, 0)]
print([type(i) for i in tasks])
results = [i.x for i in tasks]
print(results)
# use_submit: 0
# use_decorator: 0
# [<class 'torequests.main.NewFuture'>, <class 'torequests.main.NewFuture'>, <class 'torequests.main.NewFuture'>, <class 'torequests.main.NewFuture'>, <class 'torequests.main.NewFuture'>, <class 'torequests.main.NewFuture'>]
# use_submit: 1
# use_decorator: 1
# use_submit: 2
# use_decorator: 2
# ['use_submit: 2', 'use_submit: 1', 'use_submit: 0', 'use_decorator: 2', 'use_decorator: 1', 'use_decorator: 0']
"""
return threads(n=n, timeout=timeout)(f)
def threads(n=None, timeout=None):
"""Decorator usage like Async."""
return Pool(n, timeout).async_func
def get_results_generator(future_list, timeout=None, sort_by_completed=False):
"""Return as a generator of tasks order by completed sequence."""
try:
# python2 not support yield from
if sort_by_completed:
for future in as_completed(future_list, timeout=timeout):
yield future.x
else:
for future in future_list:
yield future.x
except TimeoutError:
return
def run_after_async(seconds, func, *args, **kwargs):
"""Run the function after seconds asynchronously."""
t = Timer(seconds, func, args, kwargs)
t.daemon = True
t.start()
return t
class FailedRequest(PreparedRequest):
allow_keys = {
"method",
"url",
"headers",
"files",
"data",
"params",
"auth",
"cookies",
"hooks",
"json",
}
def __init__(self, **kwargs):
# self.kwargs for retry tPool.request
self.kwargs = kwargs
filted_kwargs = {
key: value
for key, value in kwargs.items()
if key in self.allow_keys
}
super(FailedRequest, self).__init__()
self.prepare(**filted_kwargs)
class tPool(object):
"""Async wrapper for requests.
:param n: thread pool size for concurrent limit.
:param interval: time.sleep(interval) after each task finished.
:param timeout: timeout for each task.result(timeout). But it will not shutdown the raw funtion.
:param session: individually given a available requests.Session instance if necessary.
:param catch_exception: `True` will catch all exceptions and return as :class:`FailureException <FailureException>`
:param default_callback: default_callback for tasks which not set callback param.
Usage::
from torequests.main import tPool
from torequests.logs import print_info
trequests = tPool(2, 1)
test_url = 'http://p.3.cn'
ss = [
trequests.get(
test_url,
retry=2,
callback=lambda x: (len(x.content), print_info(len(x.content))))
for i in range(3)
]
# or [i.x for i in ss]
trequests.x
ss = [i.cx for i in ss]
print_info(ss)
# [2020-02-11 11:36:33] temp_code.py(10): 612
# [2020-02-11 11:36:33] temp_code.py(10): 612
# [2020-02-11 11:36:34] temp_code.py(10): 612
# [2020-02-11 11:36:34] temp_code.py(16): [(612, None), (612, None), (612, None)]
"""
def __init__(
self,
n=None,
interval=0,
timeout=None,
session=None,
catch_exception=True,
default_callback=None,
retry_exceptions=(RequestException, Error),
):
self.pool = Pool(n, timeout)
self.session = session if session else Session()
self.n = n or 10
# adapt the concurrent limit.
custom_adapter = HTTPAdapter(pool_connections=self.n,
pool_maxsize=self.n)
self.session.mount("http://", custom_adapter)
self.session.mount("https://", custom_adapter)
self.interval = interval
self.catch_exception = catch_exception
self.default_callback = default_callback
self.frequency = Frequency(self.n, self.interval)
self.retry_exceptions = retry_exceptions
@property
def all_tasks(self):
"""Return self.pool._all_futures"""
return self.pool._all_futures
@property
def x(self):
"""Return self.pool.x"""
return self.pool.x
def close(self, wait=False):
"""Close session, shutdown pool."""
self.session.close()
self.pool.shutdown(wait=wait)
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
def __del__(self):
self.close()
def _request(self,
method,
url,
retry=0,
response_validator=None,
retry_interval=0,
**kwargs):
if not url:
raise ValueError("url should not be null, but given: %s" % url)
kwargs["url"] = url
kwargs["method"] = method
# non-official request args
referer_info = kwargs.pop("referer_info", None)
encoding = kwargs.pop("encoding", None)
error = Exception()
for _ in range(retry + 1):
with self.frequency:
try:
resp = self.session.request(**kwargs)
if encoding:
resp.encoding = encoding
logger.debug("%s done, %s" % (url, kwargs))
resp.referer_info = referer_info
if response_validator and not response_validator(resp):
raise ValidationError(response_validator.__name__)
return resp
except self.retry_exceptions as e:
error = e
logger.debug(
"Retry %s for the %s time, Exception: %r . kwargs= %s" %
(url, _ + 1, e, kwargs))
if retry_interval:
sleep(retry_interval)
continue
# for unofficial request args
kwargs["retry"] = retry
if referer_info:
kwargs["referer_info"] = referer_info
if encoding:
kwargs["encoding"] = encoding
logger.debug("Retry %s times failed again: %s." % (retry, error))
failure = FailureException(error)
failure.request = FailedRequest(**kwargs)
if self.catch_exception:
return failure
else:
raise failure
def request(self,
method,
url,
callback=None,
retry=0,
response_validator=None,
**kwargs):
"""Similar to `requests.request`, but return as NewFuture."""
return self.pool.submit(self._request,
method=method,
url=url,
retry=retry,
response_validator=response_validator,
callback=callback or self.default_callback,
**kwargs)
def get(self,
url,
params=None,
callback=None,
retry=0,
response_validator=None,
**kwargs):
"""Similar to `requests.get`, but return as NewFuture."""
kwargs.setdefault("allow_redirects", True)
return self.request("get",
url=url,
params=params,
callback=callback,
retry=retry,
response_validator=response_validator,
**kwargs)
def post(self,
url,
data=None,
json=None,
callback=None,
retry=0,
response_validator=None,
**kwargs):
"""Similar to `requests.post`, but return as NewFuture."""
return self.request("post",
url=url,
data=data,
json=json,
callback=callback,
retry=retry,
response_validator=response_validator,
**kwargs)
def delete(self,
url,
callback=None,
retry=0,
response_validator=None,
**kwargs):
"""Similar to `requests.delete`, but return as NewFuture."""
return self.request("delete",
url=url,
callback=callback,
retry=retry,
response_validator=response_validator,
**kwargs)
def put(self,
url,
data=None,
callback=None,
retry=0,
response_validator=None,
**kwargs):
"""Similar to `requests.put`, but return as NewFuture."""
return self.request("put",
url=url,
data=data,
callback=callback,
retry=retry,
response_validator=response_validator,
**kwargs)
def head(self,
url,
callback=None,
retry=0,
response_validator=None,
allow_redirects=False,
**kwargs):
"""Similar to `requests.head`, but return as NewFuture."""
kwargs['allow_redirects'] = allow_redirects
return self.request("head",
url=url,
callback=callback,
retry=retry,
response_validator=response_validator,
**kwargs)
def options(self,
url,
callback=None,
retry=0,
response_validator=None,
**kwargs):
"""Similar to `requests.options`, but return as NewFuture."""
kwargs.setdefault("allow_redirects", True)
return self.request("options",
url=url,
callback=callback,
retry=retry,
response_validator=response_validator,
**kwargs)
def patch(self,
url,
callback=None,
retry=0,
response_validator=None,
**kwargs):
"""Similar to `requests.patch`, but return as NewFuture."""
return self.request("patch",
url=url,
callback=callback,
retry=retry,
response_validator=response_validator,
**kwargs)
def get(url,
params=None,
callback=None,
retry=0,
response_validator=None,
**kwargs):
return tPool().get(url,
params=params,
callback=callback,
retry=retry,
response_validator=response_validator,
**kwargs)
def post(url,
data=None,
json=None,
callback=None,
retry=0,
response_validator=None,
**kwargs):
return tPool().post(url,
data=data,
json=json,
callback=callback,
retry=retry,
response_validator=response_validator,
**kwargs)
def delete(url, callback=None, retry=0, response_validator=None, **kwargs):
return tPool().delete(url,
callback=callback,
retry=retry,
response_validator=response_validator,
**kwargs)
def put(url,
data=None,
callback=None,
retry=0,
response_validator=None,
**kwargs):
return tPool().put(url,
data=data,
callback=callback,
retry=retry,
response_validator=response_validator,
**kwargs)
def head(url, callback=None, retry=0, response_validator=None, **kwargs):
return tPool().head(url,
callback=callback,
retry=retry,
response_validator=response_validator,
**kwargs)
def options(url, callback=None, retry=0, response_validator=None, **kwargs):
return tPool().options(url,
callback=callback,
retry=retry,
response_validator=response_validator,
**kwargs)
def patch(url, callback=None, retry=0, response_validator=None, **kwargs):
return tPool().patch(url,
callback=callback,
retry=retry,
response_validator=response_validator,
**kwargs)
def request(method,
url,
callback=None,
retry=0,
response_validator=None,
**kwargs):
return tPool().request(method,
url,
callback=callback,
retry=retry,
response_validator=response_validator,
**kwargs)
class Workshop:
"""Simple solution for producer-consumer problem.
WARNING: callback should has its own timeout to avoid blocking to long.
Demo::
import time
from torequests.main import Workshop
def callback(todo, worker_arg):
time.sleep(todo)
if worker_arg == 'worker1':
return None
return [todo, worker_arg]
fc = Workshop(range(1, 5), ['worker1', 'worker2', 'worker3'], callback)
for i in fc.get_result_as_completed():
print(i)
# [2, 'worker2']
# [3, 'worker3']
# [1, 'worker2']
# [4, 'worker3']
for i in fc.get_result_as_sequence():
print(i)
# [1, 'worker3']
# [2, 'worker3']
# [3, 'worker3']
# [4, 'worker2']
"""
def __init__(self,
todo_args,
worker_args,
callback,
timeout=None,
wait_empty_secs=1,
handle_exceptions=(),
max_failure=None,
fail_returned=None):
"""
:param todo_args: args to be send to callback
:type todo_args: List[Any]
:param worker_args: args for launching worker threads, you can use like [worker1, worker1, worker1] for concurrent workers
:type worker_args: List[Any]
:param callback: callback to consume the todo_arg from queue, handle args like callback(todo_arg, worker_arg)
:type callback: Callable
:param timeout: timeout for worker running, defaults to None
:type timeout: [float, int], optional
:param wait_empty_secs: seconds to sleep while queue is Empty, defaults to 1
:type wait_empty_secs: float, optional
:param handle_exceptions: ignore Exceptions raise from callback, defaults to ()
:type handle_exceptions: Tuple[Exception], optional
:param max_failure: stop worker while failing too many times, defaults to None
:type max_failure: int, optional
:param fail_returned: returned from callback will be treated as a failure, defaults to None
:type fail_returned: Any, optional
"""
self.q = Queue()
self.futures = self.init_futures(todo_args)
self.worker_args = worker_args
self.callback = callback
self.timeout = timeout or float('inf')
self.wait_empty_secs = wait_empty_secs
self.result = None
self.handle_exceptions = handle_exceptions
self.max_failure = float('inf') if max_failure is None else max_failure
self.fail_returned = fail_returned
self._done = False
self._done_signal = object()
def init_futures(self, todo_args):
futures = []
for arg in todo_args:
f = Future()
f.arg = arg
futures.append(f)
self.q.put(f)
return futures
def run(self, as_completed=False):
"""run until all tasks finished"""
if as_completed:
return list(self.get_result_as_completed())
return list(self.get_result_as_sequence())
def get_result_as_sequence(self):
"""return a generator of results with same sequence as self.todo_args"""
self.start_workers()
for f in self.futures:
yield f.result()
def get_result_as_completed(self):
"""return a generator of results as completed sequence"""
self.start_workers()
for f in as_completed(self.futures):
yield f.result()
@property
def done(self):
self._done = self._done or all((f.done() for f in self.futures))
return self._done
def worker(self, worker_arg):
fails = 0
start_time = time_time()
while time_time(
) - start_time < self.timeout and fails <= self.max_failure:
try:
f = self.q.get(timeout=self.wait_empty_secs)
if f is self._done_signal:
break
except TimeoutError:
if self.done:
break
fails += 1
continue
try:
result = self.callback(f.arg, worker_arg)
except self.handle_exceptions as err:
logger.error(
'Raised {err!r}, worker_arg: {worker_arg}, todo_arg: {arg}'.
format_map(
dict(err=err,
worker_arg=repr(worker_arg)[:100],
arg=repr(f.arg)[:100])))
result = self.fail_returned
if result == self.fail_returned:
self.q.put(f)
fails += 1
sleep(self.wait_empty_secs)
continue
else:
f.set_result(result)
if fails > 0:
fails -= 1
self.q.put_nowait
def start_workers(self):
self._done = False
for worker_arg in self.worker_args:
t = Thread(target=self.worker, args=(worker_arg,))
t.daemon = True
t.start()
|
|
"""
*****
Pajek
*****
Read graphs in Pajek format.
This implementation handles directed and undirected graphs including
those with self loops and parallel edges.
Format
------
See http://vlado.fmf.uni-lj.si/pub/networks/pajek/doc/draweps.htm
for format information.
"""
__author__ = """Aric Hagberg (hagberg@lanl.gov)"""
# Copyright (C) 2008-2010 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
import networkx as nx
from networkx.utils import is_string_like,_get_fh,make_str
__all__ = ['read_pajek', 'parse_pajek', 'generate_pajek', 'write_pajek']
def generate_pajek(G):
"""Generate lines in Pajek graph format.
Parameters
----------
G : graph
A Networkx graph
References
----------
See http://vlado.fmf.uni-lj.si/pub/networks/pajek/doc/draweps.htm
for format information.
"""
if G.name=='':
name='NetworkX'
else:
name=G.name
yield '*network %s'%name
# write nodes with attributes
yield '*vertices %s'%(G.order())
nodes = G.nodes()
# make dictionary mapping nodes to integers
nodenumber=dict(zip(nodes,range(1,len(nodes)+1)))
for n in nodes:
na=G.node.get(n,{})
x=na.get('x',0.0)
y=na.get('y',0.0)
id=int(na.get('id',nodenumber[n]))
nodenumber[n]=id
shape=na.get('shape','ellipse')
yield ' '.join(map(make_str,(id,n,x,y,shape)))
for k,v in na.items():
yield '%s %s '%(k,v)
# yield '\n'
# write edges with attributes
if G.is_directed():
yield '*arcs'
else:
yield '*edges'
for u,v,edgedata in G.edges(data=True):
d=edgedata.copy()
value=d.pop('weight',1.0) # use 1 as default edge value
yield ' '.join(map(make_str,(nodenumber[u],nodenumber[v],value)))
for k,v in d.items():
if is_string_like(v):
# add quotes to any values with a blank space
if " " in v:
v="\"%s\""%v
yield '%s %s '%(k,v)
# yield '\n'
def write_pajek(G, path, encoding='UTF-8'):
"""Write graph in Pajek format to path.
Parameters
----------
G : graph
A Networkx graph
path : file or string
File or filename to write.
Filenames ending in .gz or .bz2 will be compressed.
Examples
--------
>>> G=nx.path_graph(4)
>>> nx.write_pajek(G, "test.net")
References
----------
See http://vlado.fmf.uni-lj.si/pub/networks/pajek/doc/draweps.htm
for format information.
"""
fh=_get_fh(path, 'wb')
for line in generate_pajek(G):
line+='\n'
fh.write(line.encode(encoding))
def read_pajek(path,encoding='UTF-8'):
"""Read graph in Pajek format from path.
Parameters
----------
path : file or string
File or filename to write.
Filenames ending in .gz or .bz2 will be uncompressed.
Returns
-------
G : NetworkX MultiGraph or MultiDiGraph.
Examples
--------
>>> G=nx.path_graph(4)
>>> nx.write_pajek(G, "test.net")
>>> G=nx.read_pajek("test.net")
To create a Graph instead of a MultiGraph use
>>> G1=nx.Graph(G)
References
----------
See http://vlado.fmf.uni-lj.si/pub/networks/pajek/doc/draweps.htm
for format information.
"""
fh=_get_fh(path, 'rb')
lines = (line.decode(encoding) for line in fh)
return parse_pajek(lines)
def parse_pajek(lines):
"""Parse Pajek format graph from string or iterable.
Parameters
----------
lines : string or iterable
Data in Pajek format.
Returns
-------
G : NetworkX graph
See Also
--------
read_pajek()
"""
import shlex
multigraph=False
if is_string_like(lines): lines=iter(lines.split('\n'))
lines = iter([line.rstrip('\n') for line in lines])
G=nx.MultiDiGraph() # are multiedges allowed in Pajek? assume yes
directed=True # assume this is a directed network for now
while lines:
try:
l=next(lines)
except: #EOF
break
if l.lower().startswith("*network"):
label,name=l.split()
G.name=name
if l.lower().startswith("*vertices"):
nodelabels={}
l,nnodes=l.split()
for i in range(int(nnodes)):
splitline=shlex.split(str(next(lines)))
id,label=splitline[0:2]
G.add_node(label)
nodelabels[id]=label
G.node[label]={'id':id}
try:
x,y,shape=splitline[2:5]
G.node[label].update({'x':float(x),
'y':float(y),
'shape':shape})
except:
pass
extra_attr=zip(splitline[5::2],splitline[6::2])
G.node[label].update(extra_attr)
if l.lower().startswith("*edges") or l.lower().startswith("*arcs"):
if l.lower().startswith("*edge"):
# switch from multi digraph to multi graph
G=nx.MultiGraph(G)
for l in lines:
splitline=shlex.split(str(l))
ui,vi=splitline[0:2]
u=nodelabels.get(ui,ui)
v=nodelabels.get(vi,vi)
# parse the data attached to this edge and put in a dictionary
edge_data={}
try:
# there should always be a single value on the edge?
w=splitline[2:3]
edge_data.update({'weight':float(w[0])})
except:
pass
# if there isn't, just assign a 1
# edge_data.update({'value':1})
extra_attr=zip(splitline[3::2],splitline[4::2])
edge_data.update(extra_attr)
if G.has_edge(u,v):
multigraph=True
G.add_edge(u,v,**edge_data)
# if not multigraph: # use Graph/DiGraph if no parallel edges
# if G.is_directed():
# G=nx.DiGraph(G)
# else:
# G=nx.Graph(G)
return G
# fixture for nose tests
def teardown_module(module):
import os
os.unlink('test.net')
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for utilities working with arbitrarily nested structures."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python.data.util import nest
from tensorflow.python.data.util import structure
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class StructureTest(test.TestCase, parameterized.TestCase):
# NOTE(mrry): The arguments must be lifted into lambdas because otherwise they
# will be executed before the (eager- or graph-mode) test environment has been
# set up.
# pylint: disable=g-long-lambda,protected-access
@parameterized.parameters(
(lambda: constant_op.constant(37.0), structure.TensorStructure,
[dtypes.float32], [[]]),
(lambda: sparse_tensor.SparseTensor(
indices=[[3, 4]], values=[-1], dense_shape=[4, 5]),
structure.SparseTensorStructure, [dtypes.variant], [[3]]),
(lambda: (constant_op.constant(37.0), constant_op.constant([1, 2, 3])),
structure.NestedStructure, [dtypes.float32, dtypes.int32], [[], [3]]),
(lambda: {
"a": constant_op.constant(37.0),
"b": constant_op.constant([1, 2, 3])
}, structure.NestedStructure, [dtypes.float32, dtypes.int32], [[], [3]]),
(lambda: {
"a": constant_op.constant(37.0),
"b": (sparse_tensor.SparseTensor(
indices=[[0, 0]], values=[1], dense_shape=[1, 1]),
sparse_tensor.SparseTensor(
indices=[[3, 4]], values=[-1], dense_shape=[4, 5]))
}, structure.NestedStructure,
[dtypes.float32, dtypes.variant, dtypes.variant], [[], [3], [3]]))
def testFlatStructure(self, value_fn, expected_structure, expected_types,
expected_shapes):
value = value_fn()
s = structure.Structure.from_value(value)
self.assertIsInstance(s, expected_structure)
self.assertEqual(expected_types, s._flat_types)
self.assertEqual(expected_shapes, s._flat_shapes)
@parameterized.parameters(
(lambda: constant_op.constant(37.0), lambda: [
constant_op.constant(38.0),
array_ops.placeholder(dtypes.float32),
variables.Variable(100.0), 42.0,
np.array(42.0, dtype=np.float32)
], lambda: [constant_op.constant([1.0, 2.0]), constant_op.constant(37)]),
(lambda: sparse_tensor.SparseTensor(
indices=[[3, 4]], values=[-1], dense_shape=[4, 5]),
lambda: [
sparse_tensor.SparseTensor(
indices=[[1, 1], [3, 4]], values=[10, -1], dense_shape=[4, 5]),
sparse_tensor.SparseTensorValue(
indices=[[1, 1], [3, 4]], values=[10, -1], dense_shape=[4, 5]),
array_ops.sparse_placeholder(dtype=dtypes.int32),
array_ops.sparse_placeholder(dtype=dtypes.int32, shape=[None, None])
], lambda: [
constant_op.constant(37, shape=[4, 5]),
sparse_tensor.SparseTensor(
indices=[[3, 4]], values=[-1], dense_shape=[5, 6]),
array_ops.sparse_placeholder(
dtype=dtypes.int32, shape=[None, None, None]),
sparse_tensor.SparseTensor(
indices=[[3, 4]], values=[-1.0], dense_shape=[4, 5])
]),
(lambda: {
"a": constant_op.constant(37.0),
"b": constant_op.constant([1, 2, 3])
}, lambda: [{
"a": constant_op.constant(15.0),
"b": constant_op.constant([4, 5, 6])
}], lambda: [{
"a": constant_op.constant(15.0),
"b": constant_op.constant([4, 5, 6, 7])
}, {
"a": constant_op.constant(15),
"b": constant_op.constant([4, 5, 6])
}, {
"a":
constant_op.constant(15),
"b":
sparse_tensor.SparseTensor(
indices=[[0], [1], [2]], values=[4, 5, 6], dense_shape=[3])
}, (constant_op.constant(15.0), constant_op.constant([4, 5, 6]))]),
)
def testIsCompatibleWithStructure(
self, original_value_fn, compatible_values_fn, incompatible_values_fn):
original_value = original_value_fn()
compatible_values = compatible_values_fn()
incompatible_values = incompatible_values_fn()
s = structure.Structure.from_value(original_value)
for compatible_value in compatible_values:
self.assertTrue(
s.is_compatible_with(
structure.Structure.from_value(compatible_value)))
for incompatible_value in incompatible_values:
self.assertFalse(
s.is_compatible_with(
structure.Structure.from_value(incompatible_value)))
@parameterized.parameters(
(lambda: constant_op.constant(37.0),),
(lambda: sparse_tensor.SparseTensor(
indices=[[3, 4]], values=[-1], dense_shape=[4, 5]),),
(lambda: {"a": constant_op.constant(37.0),
"b": constant_op.constant([1, 2, 3])},),
(lambda: {"a": constant_op.constant(37.0),
"b": (sparse_tensor.SparseTensor(
indices=[[0, 0]], values=[1], dense_shape=[1, 1]),
sparse_tensor.SparseTensor(
indices=[[3, 4]], values=[-1], dense_shape=[4, 5]))
},),
)
def testRoundTripConversion(self, value_fn):
value = value_fn()
s = structure.Structure.from_value(value)
before = self.evaluate(value)
after = self.evaluate(s._from_tensor_list(s._to_tensor_list(value)))
flat_before = nest.flatten(before)
flat_after = nest.flatten(after)
for b, a in zip(flat_before, flat_after):
if isinstance(b, sparse_tensor.SparseTensorValue):
self.assertAllEqual(b.indices, a.indices)
self.assertAllEqual(b.values, a.values)
self.assertAllEqual(b.dense_shape, a.dense_shape)
else:
self.assertAllEqual(b, a)
# pylint: enable=g-long-lambda
def testIncompatibleStructure(self):
# Define three mutually incompatible values/structures, and assert that:
# 1. Using one structure to flatten a value with an incompatible structure
# fails.
# 2. Using one structure to restructre a flattened value with an
# incompatible structure fails.
value_tensor = constant_op.constant(42.0)
s_tensor = structure.Structure.from_value(value_tensor)
flat_tensor = s_tensor._to_tensor_list(value_tensor)
value_sparse_tensor = sparse_tensor.SparseTensor(
indices=[[0, 0]], values=[1], dense_shape=[1, 1])
s_sparse_tensor = structure.Structure.from_value(value_sparse_tensor)
flat_sparse_tensor = s_sparse_tensor._to_tensor_list(value_sparse_tensor)
value_nest = {
"a": constant_op.constant(37.0),
"b": constant_op.constant([1, 2, 3])
}
s_nest = structure.Structure.from_value(value_nest)
flat_nest = s_nest._to_tensor_list(value_nest)
with self.assertRaisesRegexp(
ValueError, r"SparseTensor.* is not convertible to a tensor with "
r"dtype.*float32.* and shape \(\)"):
s_tensor._to_tensor_list(value_sparse_tensor)
with self.assertRaisesRegexp(
ValueError, r"Value \{.*\} is not convertible to a tensor with "
r"dtype.*float32.* and shape \(\)"):
s_tensor._to_tensor_list(value_nest)
with self.assertRaisesRegexp(TypeError, "Input must be a SparseTensor"):
s_sparse_tensor._to_tensor_list(value_tensor)
with self.assertRaisesRegexp(TypeError, "Input must be a SparseTensor"):
s_sparse_tensor._to_tensor_list(value_nest)
with self.assertRaisesRegexp(
ValueError, "Tensor.* not compatible with the nested structure "
".*TensorStructure.*TensorStructure"):
s_nest._to_tensor_list(value_tensor)
with self.assertRaisesRegexp(
ValueError, "SparseTensor.* not compatible with the nested structure "
".*TensorStructure.*TensorStructure"):
s_nest._to_tensor_list(value_sparse_tensor)
with self.assertRaisesRegexp(
ValueError, r"Cannot convert.*with dtype.*float32.* and shape \(\)"):
s_tensor._from_tensor_list(flat_sparse_tensor)
with self.assertRaisesRegexp(
ValueError, "TensorStructure corresponds to a single tf.Tensor."):
s_tensor._from_tensor_list(flat_nest)
with self.assertRaisesRegexp(
ValueError, "SparseTensorStructure corresponds to a single tf.variant "
"vector of length 3."):
s_sparse_tensor._from_tensor_list(flat_tensor)
with self.assertRaisesRegexp(
ValueError, "SparseTensorStructure corresponds to a single tf.variant "
"vector of length 3."):
s_sparse_tensor._from_tensor_list(flat_nest)
with self.assertRaisesRegexp(
ValueError, "Expected 2 flat values in NestedStructure but got 1."):
s_nest._from_tensor_list(flat_tensor)
with self.assertRaisesRegexp(
ValueError, "Expected 2 flat values in NestedStructure but got 1."):
s_nest._from_tensor_list(flat_sparse_tensor)
def testIncompatibleNestedStructure(self):
# Define three mutually incompatible nested values/structures, and assert
# that:
# 1. Using one structure to flatten a value with an incompatible structure
# fails.
# 2. Using one structure to restructre a flattened value with an
# incompatible structure fails.
value_0 = {
"a": constant_op.constant(37.0),
"b": constant_op.constant([1, 2, 3])
}
s_0 = structure.Structure.from_value(value_0)
flat_s_0 = s_0._to_tensor_list(value_0)
# `value_1` has compatible nested structure with `value_0`, but different
# classes.
value_1 = {
"a":
constant_op.constant(37.0),
"b":
sparse_tensor.SparseTensor(
indices=[[0, 0]], values=[1], dense_shape=[1, 1])
}
s_1 = structure.Structure.from_value(value_1)
flat_s_1 = s_1._to_tensor_list(value_1)
# `value_2` has incompatible nested structure with `value_0` and `value_1`.
value_2 = {
"a":
constant_op.constant(37.0),
"b": (sparse_tensor.SparseTensor(
indices=[[0, 0]], values=[1], dense_shape=[1, 1]),
sparse_tensor.SparseTensor(
indices=[[3, 4]], values=[-1], dense_shape=[4, 5]))
}
s_2 = structure.Structure.from_value(value_2)
flat_s_2 = s_2._to_tensor_list(value_2)
with self.assertRaisesRegexp(
ValueError, "SparseTensor.* not compatible with the nested structure "
".*TensorStructure"):
s_0._to_tensor_list(value_1)
with self.assertRaisesRegexp(
ValueError, "SparseTensor.*SparseTensor.* not compatible with the "
"nested structure .*TensorStructure"):
s_0._to_tensor_list(value_2)
with self.assertRaisesRegexp(
ValueError, "Tensor.* not compatible with the nested structure "
".*SparseTensorStructure"):
s_1._to_tensor_list(value_0)
with self.assertRaisesRegexp(
ValueError, "SparseTensor.*SparseTensor.* not compatible with the "
"nested structure .*TensorStructure"):
s_0._to_tensor_list(value_2)
# NOTE(mrry): The repr of the dictionaries is not sorted, so the regexp
# needs to account for "a" coming before or after "b". It might be worth
# adding a deterministic repr for these error messages (among other
# improvements).
with self.assertRaisesRegexp(
ValueError, "Tensor.*Tensor.* not compatible with the nested structure "
".*(TensorStructure.*SparseTensorStructure.*SparseTensorStructure|"
"SparseTensorStructure.*SparseTensorStructure.*TensorStructure)"):
s_2._to_tensor_list(value_0)
with self.assertRaisesRegexp(
ValueError, "(Tensor.*SparseTensor|SparseTensor.*Tensor).* "
"not compatible with the nested structure .*"
"(TensorStructure.*SparseTensorStructure.*SparseTensorStructure|"
"SparseTensorStructure.*SparseTensorStructure.*TensorStructure)"):
s_2._to_tensor_list(value_1)
with self.assertRaisesRegexp(
ValueError, r"Cannot convert.*with dtype.*int32.* and shape \(3,\)"):
s_0._from_tensor_list(flat_s_1)
with self.assertRaisesRegexp(
ValueError, "Expected 2 flat values in NestedStructure but got 3."):
s_0._from_tensor_list(flat_s_2)
with self.assertRaisesRegexp(
ValueError, "SparseTensorStructure corresponds to a single tf.variant "
"vector of length 3."):
s_1._from_tensor_list(flat_s_0)
with self.assertRaisesRegexp(
ValueError, "Expected 2 flat values in NestedStructure but got 3."):
s_1._from_tensor_list(flat_s_2)
with self.assertRaisesRegexp(
ValueError, "Expected 3 flat values in NestedStructure but got 2."):
s_2._from_tensor_list(flat_s_0)
with self.assertRaisesRegexp(
ValueError, "Expected 3 flat values in NestedStructure but got 2."):
s_2._from_tensor_list(flat_s_1)
@parameterized.named_parameters(
("Tensor", dtypes.float32, tensor_shape.scalar(), ops.Tensor,
structure.TensorStructure(dtypes.float32, [])),
("SparseTensor", dtypes.int32, tensor_shape.matrix(2, 2),
sparse_tensor.SparseTensor,
structure.SparseTensorStructure(dtypes.int32, [2, 2])),
("Nest",
{"a": dtypes.float32, "b": (dtypes.int32, dtypes.string)},
{"a": tensor_shape.scalar(),
"b": (tensor_shape.matrix(2, 2), tensor_shape.scalar())},
{"a": ops.Tensor, "b": (sparse_tensor.SparseTensor, ops.Tensor)},
structure.NestedStructure({
"a": structure.TensorStructure(dtypes.float32, []),
"b": (structure.SparseTensorStructure(dtypes.int32, [2, 2]),
structure.TensorStructure(dtypes.string, []))})),
)
def testFromLegacyStructure(self, output_types, output_shapes, output_classes,
expected_structure):
actual_structure = structure.Structure._from_legacy_structure(
output_types, output_shapes, output_classes)
self.assertTrue(expected_structure.is_compatible_with(actual_structure))
self.assertTrue(actual_structure.is_compatible_with(expected_structure))
if __name__ == "__main__":
test.main()
|
|
import fnmatch
import os
import shutil
import yaml
from chandra_suli.logging_system import get_logger
from chandra_suli.sanitize_filename import sanitize_filename
from chandra_suli.work_within_directory import work_within_directory
logger = get_logger("DataPackage")
_index_file = "index.yml"
def _check_directory(directory):
sanitized_directory = sanitize_filename(directory)
assert os.path.exists(sanitized_directory), "Directory %s does not exists" % sanitized_directory
assert os.path.isdir(sanitized_directory), "The file %s is not a directory" % sanitized_directory
return sanitized_directory
class File(object):
def __init__(self, filename, description):
self._filename = sanitize_filename(filename)
self._description = description
assert os.path.exists(self._filename), "Something went wrong when creating File instance. " \
"File %s does not exists!" % self._filename
@property
def filename(self):
return self._filename
@property
def description(self):
return self._description
def _check_consistency(self):
assert os.path.exists(self._filename), "The file %s was moved independently, bad idea!" % self._filename
def move_to(self, new_directory):
"""
Move the file to a new location (a new directory)
:param new_directory:
:return: new path
"""
self._check_consistency()
new_directory = _check_directory(new_directory)
shutil.move(self._filename, new_directory)
new_path = os.path.join(new_directory, os.path.basename(self._filename))
assert os.path.exists(new_path), "Could not move %s to %s" % (self._filename, new_path)
self._filename = new_path
return self
def copy_to(self, new_directory):
self._check_consistency()
new_directory = _check_directory(new_directory)
shutil.copy(self._filename, new_directory)
new_path = os.path.join(new_directory, os.path.basename(self._filename))
assert os.path.exists(new_path), "Could not copy %s to %s" % (self._filename, new_path)
return File(new_path, self._description)
class DataPackage(object):
def __init__(self, directory, create=False):
self._directory = sanitize_filename(directory)
if os.path.exists(self._directory) and os.path.isdir(self._directory):
logger.debug("Accessing data in %s" % self._directory)
with work_within_directory(self._directory):
# Access the index file
assert os.path.exists(_index_file), "Cannot find index file in %s" % self._directory
self._load_status()
self._check_consistency()
else:
if create:
# Create directory
os.makedirs(self._directory)
# Create an empty index file
with work_within_directory(self._directory):
# By default the package is read-write
self._status = {'read_only': False, 'index': {}}
self._save_status()
logger.info("Datapackage in %s has been created" % self._directory)
else:
raise IOError("Directory %s does not exist or is not a directory" % self._directory)
@property
def location(self):
return self._directory
def has(self, tag):
"""
Returns whether the package contains the file corresponding to the provided tag, or not
:param tag:
:return: True or False
"""
self._load_status()
return tag in self._status['index']
def _set_readonly(self, read_only):
read_only = bool(read_only)
self._status['read_only'] = read_only
self._save_status()
def _get_readonly(self):
return self._status['read_only']
read_only = property(_get_readonly, _set_readonly, doc="Set or get the read-only status of the package")
@property
def _status_file(self):
return os.path.abspath(os.path.join(self._directory, _index_file))
def _get_abs_path(self, tag):
self._load_status()
return os.path.abspath(os.path.join(self._directory, self._status['index'][tag]['path']))
def _save_status(self):
# Save the dictionary to the dictionary file
with open(self._status_file, "w+") as f:
yaml.dump(self._status, f)
def _load_status(self):
# Read the dictionary
with open(self._status_file, "r") as f:
self._status = yaml.load(f)
def _check_consistency(self):
self._load_status()
# Check that all files described in the dictionary exist
with work_within_directory(self._directory):
for tag in self._status['index'].keys():
path = self._status['index'][tag]['path']
if not os.path.exists(path):
abspath = os.path.abspath(path)
raise IOError("File %s is contained in the index, but does not exists in %s" % (path, abspath))
def clear(self):
"""
Remove all files from the data package (careful!)
:return: None
"""
if self.read_only:
raise RuntimeError("Trying to modifying a read-only package")
self._check_consistency()
for tag in self._status['index'].keys():
# NOTE: the order here is important, because _get_abs_path reload the status!
path = self._get_abs_path(tag)
self._status['index'].pop(tag)
os.remove(path)
self._save_status()
def store(self, tag, filename, description, force=False, move=False):
"""
Store (move) a file in the package
:param filename:
:param description:
:return:
"""
self._load_status()
if self.read_only:
raise RuntimeError("Trying to modifying a read-only package")
if tag in self._status['index'] and not force:
raise RuntimeError("Cannot store file with tag %s, because the tag is already present in the package. "
"Use .update()." % tag)
# Create the instance of a File
orig_file = File(filename, description)
# Move the file inside the package
if move:
new_file = orig_file.move_to(self._directory)
else:
new_file = orig_file.copy_to(self._directory)
# Register it in the dictionary (using a relative path)
relative_path = os.path.relpath(new_file.filename, self._directory)
self._status['index'][tag] = {'path': relative_path, 'description': orig_file.description}
# Save to the index file
self._save_status()
def update(self, tag, filename):
"""
Update a file which is already in the package
:param tag:
:param filename:
:return:
"""
self._load_status()
if self.read_only:
raise RuntimeError("Trying to modifying a read-only package")
assert tag in self._status['index'], "Cannot update file with tag %s, it does not exist in the package" % tag
# Assure that the filename is the same
name1 = os.path.basename(filename)
name2 = os.path.basename(self._status['index'][tag]['path'])
if name1 != name2:
raise RuntimeError("You cannot update the file %s with tag %s with the file %s "
"which has a different name" % (name1, tag, name2))
# Move old file to a temporary location
temp_backup = os.path.join(self._directory, name1 + '.bak')
path = self._get_abs_path(tag)
shutil.move(path, temp_backup)
# Store new one
try:
self.store(tag, filename, self._status['index'][tag]['description'], force=True)
except:
# Move back the temp file
shutil.move(temp_backup, path)
logger.error("Could not update file with tag %s, could not store the new file. "
"The old file has been restored." % tag)
raise
else:
# If we are here the store has worked out fine, remove the temp file
os.remove(temp_backup)
def get(self, tag, dest_dir=None):
"""
Retrieve a file by tag from the data package
:param tag:
:param dest_dir: if None, use current workdir, otherwise use the one provided, as destination dir. for the file
:return: a File instance
"""
self._load_status()
assert tag in self._status['index'], "Tag %s does not exists in data package: \n%s" % (tag, self)
item = self._status['index'][tag]
abs_path = self._get_abs_path(tag)
this_file = File(abs_path, item['description'])
if dest_dir is not None:
dest = sanitize_filename(dest_dir)
else:
dest = os.getcwd()
out_file = this_file.copy_to(dest)
return out_file
def copy_to(self, new_directory):
"""
Copy the entire data package to another directory
:param new_directory: destination path. The package will be moved, with its name, inside this directory,
which must already exist
:return: the instance of the new package
"""
self._save_status()
directory = _check_directory(new_directory)
package_name = os.path.split(self._directory)[-1]
destination = os.path.join(directory, package_name)
shutil.copytree(self._directory, destination)
return DataPackage(destination)
def __repr__(self):
self._load_status()
repr = ""
repr += "Data package in %s" % self._directory
if self._status['read_only']:
repr += " (read only)\n"
else:
repr += " (read/write)\n"
for tag in self._status['index']:
repr += "* %s: %s\n" % (tag, self._status['index'][tag]['path'])
return repr
def find_all(self, pattern):
"""
Returns all tags matching the patter
:param pattern: a pattern like "ccd_*" (unix-style wildcards)
:return: list of tags matching the pattern
"""
tags = fnmatch.filter(self._status['index'].keys(), pattern)
return tags
|
|
'''
Tests for fileinput module.
Nick Mathewson
'''
import os
import sys
import re
import fileinput
import collections
import builtins
import unittest
try:
import bz2
except ImportError:
bz2 = None
try:
import gzip
except ImportError:
gzip = None
from io import StringIO
from fileinput import FileInput, hook_encoded
from test.support import verbose, TESTFN, run_unittest
from test.support import unlink as safe_unlink
# The fileinput module has 2 interfaces: the FileInput class which does
# all the work, and a few functions (input, etc.) that use a global _state
# variable.
# Write lines (a list of lines) to temp file number i, and return the
# temp file's name.
def writeTmp(i, lines, mode='w'): # opening in text mode is the default
name = TESTFN + str(i)
f = open(name, mode)
for line in lines:
f.write(line)
f.close()
return name
def remove_tempfiles(*names):
for name in names:
if name:
safe_unlink(name)
class BufferSizesTests(unittest.TestCase):
def test_buffer_sizes(self):
# First, run the tests with default and teeny buffer size.
for round, bs in (0, 0), (1, 30):
t1 = t2 = t3 = t4 = None
try:
t1 = writeTmp(1, ["Line %s of file 1\n" % (i+1) for i in range(15)])
t2 = writeTmp(2, ["Line %s of file 2\n" % (i+1) for i in range(10)])
t3 = writeTmp(3, ["Line %s of file 3\n" % (i+1) for i in range(5)])
t4 = writeTmp(4, ["Line %s of file 4\n" % (i+1) for i in range(1)])
self.buffer_size_test(t1, t2, t3, t4, bs, round)
finally:
remove_tempfiles(t1, t2, t3, t4)
def buffer_size_test(self, t1, t2, t3, t4, bs=0, round=0):
pat = re.compile(r'LINE (\d+) OF FILE (\d+)')
start = 1 + round*6
if verbose:
print('%s. Simple iteration (bs=%s)' % (start+0, bs))
fi = FileInput(files=(t1, t2, t3, t4), bufsize=bs)
lines = list(fi)
fi.close()
self.assertEqual(len(lines), 31)
self.assertEqual(lines[4], 'Line 5 of file 1\n')
self.assertEqual(lines[30], 'Line 1 of file 4\n')
self.assertEqual(fi.lineno(), 31)
self.assertEqual(fi.filename(), t4)
if verbose:
print('%s. Status variables (bs=%s)' % (start+1, bs))
fi = FileInput(files=(t1, t2, t3, t4), bufsize=bs)
s = "x"
while s and s != 'Line 6 of file 2\n':
s = fi.readline()
self.assertEqual(fi.filename(), t2)
self.assertEqual(fi.lineno(), 21)
self.assertEqual(fi.filelineno(), 6)
self.assertFalse(fi.isfirstline())
self.assertFalse(fi.isstdin())
if verbose:
print('%s. Nextfile (bs=%s)' % (start+2, bs))
fi.nextfile()
self.assertEqual(fi.readline(), 'Line 1 of file 3\n')
self.assertEqual(fi.lineno(), 22)
fi.close()
if verbose:
print('%s. Stdin (bs=%s)' % (start+3, bs))
fi = FileInput(files=(t1, t2, t3, t4, '-'), bufsize=bs)
savestdin = sys.stdin
try:
sys.stdin = StringIO("Line 1 of stdin\nLine 2 of stdin\n")
lines = list(fi)
self.assertEqual(len(lines), 33)
self.assertEqual(lines[32], 'Line 2 of stdin\n')
self.assertEqual(fi.filename(), '<stdin>')
fi.nextfile()
finally:
sys.stdin = savestdin
if verbose:
print('%s. Boundary conditions (bs=%s)' % (start+4, bs))
fi = FileInput(files=(t1, t2, t3, t4), bufsize=bs)
self.assertEqual(fi.lineno(), 0)
self.assertEqual(fi.filename(), None)
fi.nextfile()
self.assertEqual(fi.lineno(), 0)
self.assertEqual(fi.filename(), None)
if verbose:
print('%s. Inplace (bs=%s)' % (start+5, bs))
savestdout = sys.stdout
try:
fi = FileInput(files=(t1, t2, t3, t4), inplace=1, bufsize=bs)
for line in fi:
line = line[:-1].upper()
print(line)
fi.close()
finally:
sys.stdout = savestdout
fi = FileInput(files=(t1, t2, t3, t4), bufsize=bs)
for line in fi:
self.assertEqual(line[-1], '\n')
m = pat.match(line[:-1])
self.assertNotEqual(m, None)
self.assertEqual(int(m.group(1)), fi.filelineno())
fi.close()
class UnconditionallyRaise:
def __init__(self, exception_type):
self.exception_type = exception_type
self.invoked = False
def __call__(self, *args, **kwargs):
self.invoked = True
raise self.exception_type()
class FileInputTests(unittest.TestCase):
def test_zero_byte_files(self):
t1 = t2 = t3 = t4 = None
try:
t1 = writeTmp(1, [""])
t2 = writeTmp(2, [""])
t3 = writeTmp(3, ["The only line there is.\n"])
t4 = writeTmp(4, [""])
fi = FileInput(files=(t1, t2, t3, t4))
line = fi.readline()
self.assertEqual(line, 'The only line there is.\n')
self.assertEqual(fi.lineno(), 1)
self.assertEqual(fi.filelineno(), 1)
self.assertEqual(fi.filename(), t3)
line = fi.readline()
self.assertFalse(line)
self.assertEqual(fi.lineno(), 1)
self.assertEqual(fi.filelineno(), 0)
self.assertEqual(fi.filename(), t4)
fi.close()
finally:
remove_tempfiles(t1, t2, t3, t4)
def test_files_that_dont_end_with_newline(self):
t1 = t2 = None
try:
t1 = writeTmp(1, ["A\nB\nC"])
t2 = writeTmp(2, ["D\nE\nF"])
fi = FileInput(files=(t1, t2))
lines = list(fi)
self.assertEqual(lines, ["A\n", "B\n", "C", "D\n", "E\n", "F"])
self.assertEqual(fi.filelineno(), 3)
self.assertEqual(fi.lineno(), 6)
finally:
remove_tempfiles(t1, t2)
## def test_unicode_filenames(self):
## # XXX A unicode string is always returned by writeTmp.
## # So is this needed?
## try:
## t1 = writeTmp(1, ["A\nB"])
## encoding = sys.getfilesystemencoding()
## if encoding is None:
## encoding = 'ascii'
## fi = FileInput(files=str(t1, encoding))
## lines = list(fi)
## self.assertEqual(lines, ["A\n", "B"])
## finally:
## remove_tempfiles(t1)
def test_fileno(self):
t1 = t2 = None
try:
t1 = writeTmp(1, ["A\nB"])
t2 = writeTmp(2, ["C\nD"])
fi = FileInput(files=(t1, t2))
self.assertEqual(fi.fileno(), -1)
line =next( fi)
self.assertNotEqual(fi.fileno(), -1)
fi.nextfile()
self.assertEqual(fi.fileno(), -1)
line = list(fi)
self.assertEqual(fi.fileno(), -1)
finally:
remove_tempfiles(t1, t2)
def test_opening_mode(self):
try:
# invalid mode, should raise ValueError
fi = FileInput(mode="w")
self.fail("FileInput should reject invalid mode argument")
except ValueError:
pass
t1 = None
try:
# try opening in universal newline mode
t1 = writeTmp(1, [b"A\nB\r\nC\rD"], mode="wb")
fi = FileInput(files=t1, mode="U")
lines = list(fi)
self.assertEqual(lines, ["A\n", "B\n", "C\n", "D"])
finally:
remove_tempfiles(t1)
def test_file_opening_hook(self):
try:
# cannot use openhook and inplace mode
fi = FileInput(inplace=1, openhook=lambda f, m: None)
self.fail("FileInput should raise if both inplace "
"and openhook arguments are given")
except ValueError:
pass
try:
fi = FileInput(openhook=1)
self.fail("FileInput should check openhook for being callable")
except ValueError:
pass
class CustomOpenHook:
def __init__(self):
self.invoked = False
def __call__(self, *args):
self.invoked = True
return open(*args)
t = writeTmp(1, ["\n"])
self.addCleanup(remove_tempfiles, t)
custom_open_hook = CustomOpenHook()
with FileInput([t], openhook=custom_open_hook) as fi:
fi.readline()
self.assertTrue(custom_open_hook.invoked, "openhook not invoked")
def test_context_manager(self):
try:
t1 = writeTmp(1, ["A\nB\nC"])
t2 = writeTmp(2, ["D\nE\nF"])
with FileInput(files=(t1, t2)) as fi:
lines = list(fi)
self.assertEqual(lines, ["A\n", "B\n", "C", "D\n", "E\n", "F"])
self.assertEqual(fi.filelineno(), 3)
self.assertEqual(fi.lineno(), 6)
self.assertEqual(fi._files, ())
finally:
remove_tempfiles(t1, t2)
def test_close_on_exception(self):
try:
t1 = writeTmp(1, [""])
with FileInput(files=t1) as fi:
raise IOError
except IOError:
self.assertEqual(fi._files, ())
finally:
remove_tempfiles(t1)
def test_empty_files_list_specified_to_constructor(self):
with FileInput(files=[]) as fi:
self.assertEqual(fi._files, ('-',))
def test__getitem__(self):
"""Tests invoking FileInput.__getitem__() with the current
line number"""
t = writeTmp(1, ["line1\n", "line2\n"])
self.addCleanup(remove_tempfiles, t)
with FileInput(files=[t]) as fi:
retval1 = fi[0]
self.assertEqual(retval1, "line1\n")
retval2 = fi[1]
self.assertEqual(retval2, "line2\n")
def test__getitem__invalid_key(self):
"""Tests invoking FileInput.__getitem__() with an index unequal to
the line number"""
t = writeTmp(1, ["line1\n", "line2\n"])
self.addCleanup(remove_tempfiles, t)
with FileInput(files=[t]) as fi:
with self.assertRaises(RuntimeError) as cm:
fi[1]
self.assertEqual(cm.exception.args, ("accessing lines out of order",))
def test__getitem__eof(self):
"""Tests invoking FileInput.__getitem__() with the line number but at
end-of-input"""
t = writeTmp(1, [])
self.addCleanup(remove_tempfiles, t)
with FileInput(files=[t]) as fi:
with self.assertRaises(IndexError) as cm:
fi[0]
self.assertEqual(cm.exception.args, ("end of input reached",))
def test_nextfile_oserror_deleting_backup(self):
"""Tests invoking FileInput.nextfile() when the attempt to delete
the backup file would raise OSError. This error is expected to be
silently ignored"""
os_unlink_orig = os.unlink
os_unlink_replacement = UnconditionallyRaise(OSError)
try:
t = writeTmp(1, ["\n"])
self.addCleanup(remove_tempfiles, t)
with FileInput(files=[t], inplace=True) as fi:
next(fi) # make sure the file is opened
os.unlink = os_unlink_replacement
fi.nextfile()
finally:
os.unlink = os_unlink_orig
# sanity check to make sure that our test scenario was actually hit
self.assertTrue(os_unlink_replacement.invoked,
"os.unlink() was not invoked")
def test_readline_os_fstat_raises_OSError(self):
"""Tests invoking FileInput.readline() when os.fstat() raises OSError.
This exception should be silently discarded."""
os_fstat_orig = os.fstat
os_fstat_replacement = UnconditionallyRaise(OSError)
try:
t = writeTmp(1, ["\n"])
self.addCleanup(remove_tempfiles, t)
with FileInput(files=[t], inplace=True) as fi:
os.fstat = os_fstat_replacement
fi.readline()
finally:
os.fstat = os_fstat_orig
# sanity check to make sure that our test scenario was actually hit
self.assertTrue(os_fstat_replacement.invoked,
"os.fstat() was not invoked")
@unittest.skipIf(not hasattr(os, "chmod"), "os.chmod does not exist")
def test_readline_os_chmod_raises_OSError(self):
"""Tests invoking FileInput.readline() when os.chmod() raises OSError.
This exception should be silently discarded."""
os_chmod_orig = os.chmod
os_chmod_replacement = UnconditionallyRaise(OSError)
try:
t = writeTmp(1, ["\n"])
self.addCleanup(remove_tempfiles, t)
with FileInput(files=[t], inplace=True) as fi:
os.chmod = os_chmod_replacement
fi.readline()
finally:
os.chmod = os_chmod_orig
# sanity check to make sure that our test scenario was actually hit
self.assertTrue(os_chmod_replacement.invoked,
"os.fstat() was not invoked")
def test_fileno_when_ValueError_raised(self):
class FilenoRaisesValueError(UnconditionallyRaise):
def __init__(self):
UnconditionallyRaise.__init__(self, ValueError)
def fileno(self):
self.__call__()
unconditionally_raise_ValueError = FilenoRaisesValueError()
t = writeTmp(1, ["\n"])
self.addCleanup(remove_tempfiles, t)
with FileInput(files=[t]) as fi:
file_backup = fi._file
try:
fi._file = unconditionally_raise_ValueError
result = fi.fileno()
finally:
fi._file = file_backup # make sure the file gets cleaned up
# sanity check to make sure that our test scenario was actually hit
self.assertTrue(unconditionally_raise_ValueError.invoked,
"_file.fileno() was not invoked")
self.assertEqual(result, -1, "fileno() should return -1")
class MockFileInput:
"""A class that mocks out fileinput.FileInput for use during unit tests"""
def __init__(self, files=None, inplace=False, backup="", bufsize=0,
mode="r", openhook=None):
self.files = files
self.inplace = inplace
self.backup = backup
self.bufsize = bufsize
self.mode = mode
self.openhook = openhook
self._file = None
self.invocation_counts = collections.defaultdict(lambda: 0)
self.return_values = {}
def close(self):
self.invocation_counts["close"] += 1
def nextfile(self):
self.invocation_counts["nextfile"] += 1
return self.return_values["nextfile"]
def filename(self):
self.invocation_counts["filename"] += 1
return self.return_values["filename"]
def lineno(self):
self.invocation_counts["lineno"] += 1
return self.return_values["lineno"]
def filelineno(self):
self.invocation_counts["filelineno"] += 1
return self.return_values["filelineno"]
def fileno(self):
self.invocation_counts["fileno"] += 1
return self.return_values["fileno"]
def isfirstline(self):
self.invocation_counts["isfirstline"] += 1
return self.return_values["isfirstline"]
def isstdin(self):
self.invocation_counts["isstdin"] += 1
return self.return_values["isstdin"]
class BaseFileInputGlobalMethodsTest(unittest.TestCase):
"""Base class for unit tests for the global function of
the fileinput module."""
def setUp(self):
self._orig_state = fileinput._state
self._orig_FileInput = fileinput.FileInput
fileinput.FileInput = MockFileInput
def tearDown(self):
fileinput.FileInput = self._orig_FileInput
fileinput._state = self._orig_state
def assertExactlyOneInvocation(self, mock_file_input, method_name):
# assert that the method with the given name was invoked once
actual_count = mock_file_input.invocation_counts[method_name]
self.assertEqual(actual_count, 1, method_name)
# assert that no other unexpected methods were invoked
actual_total_count = len(mock_file_input.invocation_counts)
self.assertEqual(actual_total_count, 1)
class Test_fileinput_input(BaseFileInputGlobalMethodsTest):
"""Unit tests for fileinput.input()"""
def test_state_is_not_None_and_state_file_is_not_None(self):
"""Tests invoking fileinput.input() when fileinput._state is not None
and its _file attribute is also not None. Expect RuntimeError to
be raised with a meaningful error message and for fileinput._state
to *not* be modified."""
instance = MockFileInput()
instance._file = object()
fileinput._state = instance
with self.assertRaises(RuntimeError) as cm:
fileinput.input()
self.assertEqual(("input() already active",), cm.exception.args)
self.assertIs(instance, fileinput._state, "fileinput._state")
def test_state_is_not_None_and_state_file_is_None(self):
"""Tests invoking fileinput.input() when fileinput._state is not None
but its _file attribute *is* None. Expect it to create and return
a new fileinput.FileInput object with all method parameters passed
explicitly to the __init__() method; also ensure that
fileinput._state is set to the returned instance."""
instance = MockFileInput()
instance._file = None
fileinput._state = instance
self.do_test_call_input()
def test_state_is_None(self):
"""Tests invoking fileinput.input() when fileinput._state is None
Expect it to create and return a new fileinput.FileInput object
with all method parameters passed explicitly to the __init__()
method; also ensure that fileinput._state is set to the returned
instance."""
fileinput._state = None
self.do_test_call_input()
def do_test_call_input(self):
"""Tests that fileinput.input() creates a new fileinput.FileInput
object, passing the given parameters unmodified to
fileinput.FileInput.__init__(). Note that this test depends on the
monkey patching of fileinput.FileInput done by setUp()."""
files = object()
inplace = object()
backup = object()
bufsize = object()
mode = object()
openhook = object()
# call fileinput.input() with different values for each argument
result = fileinput.input(files=files, inplace=inplace, backup=backup,
bufsize=bufsize,
mode=mode, openhook=openhook)
# ensure fileinput._state was set to the returned object
self.assertIs(result, fileinput._state, "fileinput._state")
# ensure the parameters to fileinput.input() were passed directly
# to FileInput.__init__()
self.assertIs(files, result.files, "files")
self.assertIs(inplace, result.inplace, "inplace")
self.assertIs(backup, result.backup, "backup")
self.assertIs(bufsize, result.bufsize, "bufsize")
self.assertIs(mode, result.mode, "mode")
self.assertIs(openhook, result.openhook, "openhook")
class Test_fileinput_close(BaseFileInputGlobalMethodsTest):
"""Unit tests for fileinput.close()"""
def test_state_is_None(self):
"""Tests that fileinput.close() does nothing if fileinput._state
is None"""
fileinput._state = None
fileinput.close()
self.assertIsNone(fileinput._state)
def test_state_is_not_None(self):
"""Tests that fileinput.close() invokes close() on fileinput._state
and sets _state=None"""
instance = MockFileInput()
fileinput._state = instance
fileinput.close()
self.assertExactlyOneInvocation(instance, "close")
self.assertIsNone(fileinput._state)
class Test_fileinput_nextfile(BaseFileInputGlobalMethodsTest):
"""Unit tests for fileinput.nextfile()"""
def test_state_is_None(self):
"""Tests fileinput.nextfile() when fileinput._state is None.
Ensure that it raises RuntimeError with a meaningful error message
and does not modify fileinput._state"""
fileinput._state = None
with self.assertRaises(RuntimeError) as cm:
fileinput.nextfile()
self.assertEqual(("no active input()",), cm.exception.args)
self.assertIsNone(fileinput._state)
def test_state_is_not_None(self):
"""Tests fileinput.nextfile() when fileinput._state is not None.
Ensure that it invokes fileinput._state.nextfile() exactly once,
returns whatever it returns, and does not modify fileinput._state
to point to a different object."""
nextfile_retval = object()
instance = MockFileInput()
instance.return_values["nextfile"] = nextfile_retval
fileinput._state = instance
retval = fileinput.nextfile()
self.assertExactlyOneInvocation(instance, "nextfile")
self.assertIs(retval, nextfile_retval)
self.assertIs(fileinput._state, instance)
class Test_fileinput_filename(BaseFileInputGlobalMethodsTest):
"""Unit tests for fileinput.filename()"""
def test_state_is_None(self):
"""Tests fileinput.filename() when fileinput._state is None.
Ensure that it raises RuntimeError with a meaningful error message
and does not modify fileinput._state"""
fileinput._state = None
with self.assertRaises(RuntimeError) as cm:
fileinput.filename()
self.assertEqual(("no active input()",), cm.exception.args)
self.assertIsNone(fileinput._state)
def test_state_is_not_None(self):
"""Tests fileinput.filename() when fileinput._state is not None.
Ensure that it invokes fileinput._state.filename() exactly once,
returns whatever it returns, and does not modify fileinput._state
to point to a different object."""
filename_retval = object()
instance = MockFileInput()
instance.return_values["filename"] = filename_retval
fileinput._state = instance
retval = fileinput.filename()
self.assertExactlyOneInvocation(instance, "filename")
self.assertIs(retval, filename_retval)
self.assertIs(fileinput._state, instance)
class Test_fileinput_lineno(BaseFileInputGlobalMethodsTest):
"""Unit tests for fileinput.lineno()"""
def test_state_is_None(self):
"""Tests fileinput.lineno() when fileinput._state is None.
Ensure that it raises RuntimeError with a meaningful error message
and does not modify fileinput._state"""
fileinput._state = None
with self.assertRaises(RuntimeError) as cm:
fileinput.lineno()
self.assertEqual(("no active input()",), cm.exception.args)
self.assertIsNone(fileinput._state)
def test_state_is_not_None(self):
"""Tests fileinput.lineno() when fileinput._state is not None.
Ensure that it invokes fileinput._state.lineno() exactly once,
returns whatever it returns, and does not modify fileinput._state
to point to a different object."""
lineno_retval = object()
instance = MockFileInput()
instance.return_values["lineno"] = lineno_retval
fileinput._state = instance
retval = fileinput.lineno()
self.assertExactlyOneInvocation(instance, "lineno")
self.assertIs(retval, lineno_retval)
self.assertIs(fileinput._state, instance)
class Test_fileinput_filelineno(BaseFileInputGlobalMethodsTest):
"""Unit tests for fileinput.filelineno()"""
def test_state_is_None(self):
"""Tests fileinput.filelineno() when fileinput._state is None.
Ensure that it raises RuntimeError with a meaningful error message
and does not modify fileinput._state"""
fileinput._state = None
with self.assertRaises(RuntimeError) as cm:
fileinput.filelineno()
self.assertEqual(("no active input()",), cm.exception.args)
self.assertIsNone(fileinput._state)
def test_state_is_not_None(self):
"""Tests fileinput.filelineno() when fileinput._state is not None.
Ensure that it invokes fileinput._state.filelineno() exactly once,
returns whatever it returns, and does not modify fileinput._state
to point to a different object."""
filelineno_retval = object()
instance = MockFileInput()
instance.return_values["filelineno"] = filelineno_retval
fileinput._state = instance
retval = fileinput.filelineno()
self.assertExactlyOneInvocation(instance, "filelineno")
self.assertIs(retval, filelineno_retval)
self.assertIs(fileinput._state, instance)
class Test_fileinput_fileno(BaseFileInputGlobalMethodsTest):
"""Unit tests for fileinput.fileno()"""
def test_state_is_None(self):
"""Tests fileinput.fileno() when fileinput._state is None.
Ensure that it raises RuntimeError with a meaningful error message
and does not modify fileinput._state"""
fileinput._state = None
with self.assertRaises(RuntimeError) as cm:
fileinput.fileno()
self.assertEqual(("no active input()",), cm.exception.args)
self.assertIsNone(fileinput._state)
def test_state_is_not_None(self):
"""Tests fileinput.fileno() when fileinput._state is not None.
Ensure that it invokes fileinput._state.fileno() exactly once,
returns whatever it returns, and does not modify fileinput._state
to point to a different object."""
fileno_retval = object()
instance = MockFileInput()
instance.return_values["fileno"] = fileno_retval
instance.fileno_retval = fileno_retval
fileinput._state = instance
retval = fileinput.fileno()
self.assertExactlyOneInvocation(instance, "fileno")
self.assertIs(retval, fileno_retval)
self.assertIs(fileinput._state, instance)
class Test_fileinput_isfirstline(BaseFileInputGlobalMethodsTest):
"""Unit tests for fileinput.isfirstline()"""
def test_state_is_None(self):
"""Tests fileinput.isfirstline() when fileinput._state is None.
Ensure that it raises RuntimeError with a meaningful error message
and does not modify fileinput._state"""
fileinput._state = None
with self.assertRaises(RuntimeError) as cm:
fileinput.isfirstline()
self.assertEqual(("no active input()",), cm.exception.args)
self.assertIsNone(fileinput._state)
def test_state_is_not_None(self):
"""Tests fileinput.isfirstline() when fileinput._state is not None.
Ensure that it invokes fileinput._state.isfirstline() exactly once,
returns whatever it returns, and does not modify fileinput._state
to point to a different object."""
isfirstline_retval = object()
instance = MockFileInput()
instance.return_values["isfirstline"] = isfirstline_retval
fileinput._state = instance
retval = fileinput.isfirstline()
self.assertExactlyOneInvocation(instance, "isfirstline")
self.assertIs(retval, isfirstline_retval)
self.assertIs(fileinput._state, instance)
class Test_fileinput_isstdin(BaseFileInputGlobalMethodsTest):
"""Unit tests for fileinput.isstdin()"""
def test_state_is_None(self):
"""Tests fileinput.isstdin() when fileinput._state is None.
Ensure that it raises RuntimeError with a meaningful error message
and does not modify fileinput._state"""
fileinput._state = None
with self.assertRaises(RuntimeError) as cm:
fileinput.isstdin()
self.assertEqual(("no active input()",), cm.exception.args)
self.assertIsNone(fileinput._state)
def test_state_is_not_None(self):
"""Tests fileinput.isstdin() when fileinput._state is not None.
Ensure that it invokes fileinput._state.isstdin() exactly once,
returns whatever it returns, and does not modify fileinput._state
to point to a different object."""
isstdin_retval = object()
instance = MockFileInput()
instance.return_values["isstdin"] = isstdin_retval
fileinput._state = instance
retval = fileinput.isstdin()
self.assertExactlyOneInvocation(instance, "isstdin")
self.assertIs(retval, isstdin_retval)
self.assertIs(fileinput._state, instance)
class InvocationRecorder:
def __init__(self):
self.invocation_count = 0
def __call__(self, *args, **kwargs):
self.invocation_count += 1
self.last_invocation = (args, kwargs)
class Test_hook_compressed(unittest.TestCase):
"""Unit tests for fileinput.hook_compressed()"""
def setUp(self):
self.fake_open = InvocationRecorder()
def test_empty_string(self):
self.do_test_use_builtin_open("", 1)
def test_no_ext(self):
self.do_test_use_builtin_open("abcd", 2)
@unittest.skipUnless(gzip, "Requires gzip and zlib")
def test_gz_ext_fake(self):
original_open = gzip.open
gzip.open = self.fake_open
try:
result = fileinput.hook_compressed("test.gz", 3)
finally:
gzip.open = original_open
self.assertEqual(self.fake_open.invocation_count, 1)
self.assertEqual(self.fake_open.last_invocation, (("test.gz", 3), {}))
@unittest.skipUnless(bz2, "Requires bz2")
def test_bz2_ext_fake(self):
original_open = bz2.BZ2File
bz2.BZ2File = self.fake_open
try:
result = fileinput.hook_compressed("test.bz2", 4)
finally:
bz2.BZ2File = original_open
self.assertEqual(self.fake_open.invocation_count, 1)
self.assertEqual(self.fake_open.last_invocation, (("test.bz2", 4), {}))
def test_blah_ext(self):
self.do_test_use_builtin_open("abcd.blah", 5)
def test_gz_ext_builtin(self):
self.do_test_use_builtin_open("abcd.Gz", 6)
def test_bz2_ext_builtin(self):
self.do_test_use_builtin_open("abcd.Bz2", 7)
def do_test_use_builtin_open(self, filename, mode):
original_open = self.replace_builtin_open(self.fake_open)
try:
result = fileinput.hook_compressed(filename, mode)
finally:
self.replace_builtin_open(original_open)
self.assertEqual(self.fake_open.invocation_count, 1)
self.assertEqual(self.fake_open.last_invocation,
((filename, mode), {}))
@staticmethod
def replace_builtin_open(new_open_func):
original_open = builtins.open
builtins.open = new_open_func
return original_open
class Test_hook_encoded(unittest.TestCase):
"""Unit tests for fileinput.hook_encoded()"""
def test(self):
encoding = object()
result = fileinput.hook_encoded(encoding)
fake_open = InvocationRecorder()
original_open = builtins.open
builtins.open = fake_open
try:
filename = object()
mode = object()
open_result = result(filename, mode)
finally:
builtins.open = original_open
self.assertEqual(fake_open.invocation_count, 1)
args, kwargs = fake_open.last_invocation
self.assertIs(args[0], filename)
self.assertIs(args[1], mode)
self.assertIs(kwargs.pop('encoding'), encoding)
self.assertFalse(kwargs)
def test_main():
run_unittest(
BufferSizesTests,
FileInputTests,
Test_fileinput_input,
Test_fileinput_close,
Test_fileinput_nextfile,
Test_fileinput_filename,
Test_fileinput_lineno,
Test_fileinput_filelineno,
Test_fileinput_fileno,
Test_fileinput_isfirstline,
Test_fileinput_isstdin,
Test_hook_compressed,
Test_hook_encoded,
)
if __name__ == "__main__":
test_main()
|
|
import xml.etree.cElementTree as ElementTree
import requests
NAMESILO_OPERATIONS = {
'add_account_funds': 'addAccountFunds',
'add_auto_renew': 'addAutoRenewal',
'add_contact': 'contactAdd',
'add_dns_record': 'dnsAddRecord',
'add_email_forward': 'configureEmailForward',
'add_portfolio': 'portfolioAdd',
'add_privacy': 'addPrivacy',
'add_registered_nameserver': 'addRegisteredNameServer',
'associate_contact': 'contactDomainAssociate',
'associate_portfolio': 'portfolioDomainAssociate',
'change_nameservers': 'changeNameServers',
'check_register_availability': 'checkRegisterAvailability',
'check_transfer_availability': 'checkTransferAvailability',
'check_transfer_status': 'checkTransferStatus',
'delete_dns_record': 'dnsDeleteRecord',
'delete_portfolio': 'portfolioDelete',
'delete_registered_nameserver': 'deleteRegisteredNameServer',
'forward_domain': 'domainForward',
'get_account_balance': 'getAccountBalance',
'get_auth_code': 'retrieveAuthCode',
'get_domain_info': 'getDomainInfo',
'list_contacts': 'contactList',
'list_dns_records': 'dnsListRecords',
'list_domains': 'listDomains',
'list_email_forwards': 'listEmailForwards',
'list_portfolios': 'portfolioList',
'list_registered_nameservers': 'listRegisteredNameServers',
'lock_domain': 'domainLock',
'register_domain': 'registerDomain',
'renew_domain': 'renewDomain',
'remove_auto_renewal': 'removeAutoRenewal',
'remove_email_forward': 'deleteEmailForward',
'remove_privacy': 'removePrivacy',
'transfer_domain': 'ransferDomain',
'unlock_domain': 'domainUnlock',
'update_contact': 'contactUpdate',
'update_dns_record': 'dnsUpdateRecord',
'update_portfolio': 'portfoliopdate',
'update_registered_nameserver': 'modifyRegisteredNameServer'
}
class NameSiloError(Exception):
"""Base class for NameSilo errors."""
pass
class HTTPSNotUsed(NameSiloError):
"""Raised if request is made without HTTPS."""
pass
class NoVersionSpecified(NameSiloError):
"""Raised if no version is specified in the request."""
pass
class InvalidAPIVersion(NameSiloError):
"""Raised if the ApI version specified is invalid."""
pass
class NoTypeSpecified(NameSiloError):
"""Raised if no type is specified in request."""
pass
class InvalidAPIType(NameSiloError):
"""Raised if API type is invalid."""
pass
class NoOperationSpecified(NameSiloError):
"""Raised if no operation is specified in request."""
pass
class issingAPIParameters(NameSiloError):
"""Raised if there are missing parameters for the specified operation."""
pass
class InvalidAPIOperation(NameSiloError):
"""Raised if the API operaiton is invalid."""
pass
class MissingOperationParameters(NameSiloError):
"""Raised if parameters are missing from the API operation."""
pass
class NoAPIKeySpecified(NameSiloError):
"""Raised if no API key is specified for request."""
pass
class InvalidAPIKey(NameSiloError):
"""Raised if the API key is invalid."""
pass
class InvalidUser(NameSiloError):
"""Raised if user associatedwith API key is invalid."""
pass
class APINotAvailableToSubAccounts(NameSiloError):
pass
class invalidIPAddress(NameSiloError):
pass
class InvalidDomainSyntax(NameSiloError):
pass
class CentralRegistryNotResponding(NameSiloError):
pass
class InvalidSandboxAccount(NameSiloError):
pass
class CreditCardProfileDoesNotExist(NameSiloError):
pass
class UnverifiedCreditCardProfile(NameSiloError):
pass
class InsufficientAccountFunds(NameSiloError):
pass
class ApIKeyNotPassedasGet(NameSiloError):
pass
class DomainNotActive(NameSiloError):
pass
class InteralSystemError(NameSiloError):
pass
class DomainAlreadyAutoRenew(NameSiloError):
pass
class DomainAlreadyNotAutoReview(NameSiloError):
pass
class DomainAlreadyLocked(NameSiloError):
pass
class DomainAlreadyUnlocked(NameSiloError):
pass
class NameserverUpdateError(NameSiloError):
pass
class DomainAlreadyPrivate(NameSiloError):
pass
class DomainAlreadyNotPrivate(NameSiloError):
pass
class ProcessingError(NameSiloError):
pass
class DomainAlreadyActive(NameSiloError):
pass
class InvalidNumberOfYears(NameSiloError):
pass
class DomainRenewalError(NameSiloError):
pass
class DomainTransferError(NameSiloError):
pass
class DomainTransferDoesNotExist(NameSiloError):
pass
class InvalidDomainName(NameSiloError):
pass
class DNSModificationError(NameSiloError):
pass
NAMESILO_ERRORS = {
'101': HTTPSNotUsed,
'102': NoVersionSpecified,
'103': InvalidAPIVersion,
'104': NoTypeSpecified,
'105': InvalidAPIType,
'106': NoOperationSpecified,
'107': InvalidAPIOperation,
'108': MissingOperationParameters,
'109': NoAPIKeySpecified,
'110': InvalidAPIKey,
'111': InvalidUser,
'112': APINotAvailableToSubAccounts,
'113': invalidIPAddress,
'114': InvalidDomainSyntax,
'115': CentralRegistryNotResponding,
'116': InvalidSandboxAccount,
'117': CreditCardProfileDoesNotExist,
'118': UnverifiedCreditCardProfile,
'119': InsufficientAccountFunds,
'120': ApIKeyNotPassedasGet,
'200': DomainNotActive,
'201': InteralSystemError,
'210': NameSiloError,
'250': DomainAlreadyAutoRenew,
'251': DomainAlreadyNotAutoReview,
'252': DomainAlreadyLocked,
'253': DomainAlreadyUnlocked,
'254': NameserverUpdateError,
'255': DomainAlreadyPrivate,
'256': DomainAlreadyNotPrivate,
'261': ProcessingError,
'262': DomainAlreadyActive,
'263': InvalidNumberOfYears,
'264': DomainRenewalError,
'265': DomainTransferError,
'266': DomainTransferDoesNotExist,
'267': InvalidDomainName,
'280': DNSModificationError,
}
class NameSilo(object):
LIVE_BASE_URL = 'https://www.namesilo.com/api/'
SANDBOX_BASE_URL = 'http://sandbox.namesilo.com/api/'
VERSION = '1'
RESPONSE_TYPE = 'xml'
def __init__(self, api_key, live=False):
self.api_key = api_key
self.base_url = self.LIVE_BASE_URL if live else self.SANDBOX_BASE_URL
def __getattr__(self, name):
if name in NAMESILO_OPERATIONS:
def handle_request(**kwargs):
return self.request(name, **kwargs)
return handle_request
return super(NameSilo, self).__getattr__(name)
def request(self, operation, **kwargs):
operation = NAMESILO_OPERATIONS.get(operation, operation)
kwargs.update(version=self.VERSION, type=self.RESPONSE_TYPE,
key=self.api_key)
r = requests.get(self.base_url + operation, params=kwargs)
r.raise_for_status()
root = ElementTree.XML(r.text)
response = XmlDictConfig(root)
reply = response.get('reply')
reply = self.format_reply(reply)
self.handle_error(reply)
return reply
def handle_error(self, reply):
code = reply.get('code')
if code in NAMESILO_ERRORS:
error = NAMESILO_ERRORS[code]
raise error(reply.get('detail'))
def format_reply(self, reply):
for k, v in reply.items():
if isinstance(v, dict):
reply[k] = self.format_reply(v)
elif not isinstance(v, list):
if v.lower() == 'yes':
reply[k] = True
elif v.lower() == 'no':
reply[k] = False
elif v.lower() == 'n/a':
reply[k] = None
return reply
class XmlListConfig(list):
def __init__(self, aList):
for element in aList:
if element:
# treat like dict
if len(element) == 1 or element[0].tag != element[1].tag:
self.append(XmlDictConfig(element))
# treat like list
elif element[0].tag == element[1].tag:
self.append(XmlListConfig(element))
elif element.text:
text = element.text.strip()
if text:
self.append(text)
class XmlDictConfig(dict):
'''
Example usage:
>>> tree = ElementTree.parse('your_file.xml')
>>> root = tree.getroot()
>>> xmldict = XmlDictConfig(root)
Or, if you want to use an XML string:
>>> root = ElementTree.XML(xml_string)
>>> xmldict = XmlDictConfig(root)
And then use xmldict for what it is... a dict.
'''
def __init__(self, parent_element):
if parent_element.items():
self.update(dict(parent_element.items()))
for element in parent_element:
if element:
# treat like dict - we assume that if the first two tags
# in a series are different, then they are all different.
if len(element) == 1 or element[0].tag != element[1].tag:
aDict = XmlDictConfig(element)
# treat like list - we assume that if the first two tags
# in a series are the same, then the rest are the same.
else:
# here, we put the list in dictionary; the key is the
# tag name the list elements all share in common, and
# the value is the list itself
aDict = {element[0].tag: XmlListConfig(element)}
# if the tag has attributes, add those to the dict
if element.items():
aDict.update(dict(element.items()))
if element.tag in self:
temporary = self[element.tag]
self[element.tag] = [temporary, aDict]
else:
self.update({element.tag: aDict})
# this assumes that if you've got an attribute in a tag,
# you won't be having any text. This may or may not be a
# good idea -- time will tell. It works for the way we are
# currently doing XML configuration files...
elif element.items():
self.update({element.tag: dict(element.items())})
# finally, if there are no child tags and no attributes, extract
# the text
else:
self.update({element.tag: element.text})
|
|
# Copyright (C) 2014-2016 DNAnexus, Inc.
#
# This file is part of dx-toolkit (DNAnexus platform client libraries).
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
'''
This module handles download commands for the dx command-line client.
'''
from __future__ import print_function, unicode_literals, division, absolute_import
import collections
import os
import subprocess
import sys
import tempfile
import warnings
import logging
import dxpy
from ..utils.resolver import (resolve_existing_path, get_first_pos_of_char, is_project_explicit,
object_exists_in_project, is_jbor_str)
from ..exceptions import err_exit
from . import try_call
from dxpy.utils.printing import (fill)
from dxpy.utils import pathmatch
# Check if a program (wget, curl, etc.) is on the path, and
# can be called.
def _which(program):
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
for path in os.environ["PATH"].split(os.pathsep):
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
# Caluclate the md5 checkum for [filename], and raise
# an exception if the checksum is wrong.
def _verify(filename, md5digest):
md5sum_exe = _which("md5sum")
if md5sum_exe is None:
err_exit("md5sum is not installed on this system")
cmd = [md5sum_exe, "-b", filename]
try:
print("Calculating checksum")
cmd_out = subprocess.check_output(cmd)
except subprocess.CalledProcessError:
err_exit("Failed to run md5sum: " + str(cmd))
line = cmd_out.strip().split()
if len(line) != 2:
err_exit("md5sum returned weird results: " + str(line))
actual_md5 = line[0]
if actual_md5 != md5digest:
err_exit("Checksum doesn't match " + actual_md5 + " expected:" + md5digest)
print("Checksum correct")
def download_one_file(project, file_desc, dest_filename, args):
if not args.overwrite:
if os.path.exists(dest_filename):
err_exit(fill('Error: path "' + dest_filename + '" already exists but -f/--overwrite was not set'))
if file_desc['class'] != 'file':
print("Skipping non-file data object {name} ({id})".format(**file_desc), file=sys.stderr)
return
if file_desc['state'] != 'closed':
print("Skipping file {name} ({id}) because it is not closed".format(**file_desc), file=sys.stderr)
return
try:
show_progress = args.show_progress
except AttributeError:
show_progress = False
try:
dxpy.download_dxfile(
file_desc['id'],
dest_filename,
show_progress=show_progress,
project=project,
describe_output=file_desc)
return
except:
err_exit()
def do_debug(msg):
logging.debug(msg)
# dest_filename = local file where downloaded file will go
# src_filename = name of parquet file or folder being downloaded from database
def download_one_database_file(project, database_desc, dest_filename, src_filename, file_status, args):
do_debug("download.py#download_one_database_file - src_filename = {}".format(src_filename));
if file_status is not None:
do_debug("download.py#download_one_database_file - file_status = {}".format(file_status));
if not args.overwrite:
if os.path.exists(dest_filename):
err_exit(fill('Error: path "' + dest_filename + '" already exists but -f/--overwrite was not set'))
if database_desc['class'] != 'database':
print("Skipping non-database data object {name} ({id})".format(**database_desc), file=sys.stderr)
return
try:
show_progress = args.show_progress
except AttributeError:
show_progress = False
try:
dxpy.download_dxdatabasefile(
database_desc['id'],
dest_filename,
src_filename,
file_status,
show_progress=show_progress,
project=project,
describe_output=database_desc)
except:
err_exit()
def _ensure_local_dir(d):
if not os.path.isdir(d):
if os.path.exists(d):
err_exit(fill('Error: path "' + d + '" already exists and is not a directory'))
os.makedirs(d)
def _is_glob(path):
return get_first_pos_of_char('*', path) > -1 or get_first_pos_of_char('?', path) > -1
def _rel2abs(path, project):
if path.startswith('/') or dxpy.WORKSPACE_ID != project:
abs_path, strip_prefix = path, os.path.dirname(path.rstrip('/'))
else:
wd = dxpy.config.get('DX_CLI_WD', u'/')
abs_path, strip_prefix = os.path.join(wd, path), os.path.dirname(os.path.join(wd, path).rstrip('/'))
if len(abs_path) > 1:
abs_path = abs_path.rstrip('/')
return abs_path, strip_prefix
def _download_files(files, destdir, args, dest_filename=None):
for project in files:
for f in files[project]:
file_desc = f['describe']
dest = dest_filename or os.path.join(destdir, file_desc['name'].replace('/', '%2F'))
download_one_file(project, file_desc, dest, args)
def _download_folders(folders, destdir, args):
try:
show_progress = args.show_progress
except AttributeError:
show_progress = False
for project in folders:
for folder, strip_prefix in folders[project]:
if not args.recursive:
err_exit('Error: "' + folder + '" is a folder but the -r/--recursive option was not given')
assert(folder.startswith(strip_prefix))
folder_destdir = os.path.join(destdir, folder[len(strip_prefix):].lstrip('/'))
try:
dxpy.download_folder(project, folder_destdir, folder=folder, overwrite=args.overwrite,
show_progress=show_progress)
except:
err_exit()
# Main entry point.
def download(args):
folders_to_get, files_to_get, count = collections.defaultdict(list), collections.defaultdict(list), 0
foldernames, filenames = [], []
for path in args.paths:
# Attempt to resolve name. If --all is given or the path looks like a glob, download all matches.
# Otherwise, the resolver will display a picker (or error out if there is no tty to display to).
resolver_kwargs = {'allow_empty_string': False}
if args.all or _is_glob(path):
resolver_kwargs.update({'allow_mult': True, 'all_mult': True})
# include "parts" and a few additional fields in the description so that
# we don't have to call a separate describe method downstream
resolver_kwargs.update({"describe": {"parts": True,
"size": True,
"drive": True,
"md5": True}})
project, folderpath, matching_files = try_call(resolve_existing_path, path, **resolver_kwargs)
if matching_files is None:
matching_files = []
elif not isinstance(matching_files, list):
matching_files = [matching_files]
# TODO: this could also be returned as metadata by resolve_path since
# resolve_path knows these things in some circumstances
path_has_explicit_proj = is_project_explicit(path) or is_jbor_str(path)
if is_jbor_str(path):
assert len(matching_files) == 1
project = matching_files[0]["describe"]["project"]
matching_folders = []
# project may be none if path is an ID and there is no project context
if project is not None:
colon_pos = get_first_pos_of_char(":", path)
if colon_pos >= 0:
path = path[colon_pos + 1:]
abs_path, strip_prefix = _rel2abs(path, project)
parent_folder = os.path.dirname(abs_path)
folder_listing = dxpy.list_subfolders(project, parent_folder, recurse=False)
matching_folders = pathmatch.filter(folder_listing, abs_path)
if '/' in matching_folders and len(matching_folders) > 1:
# The list of subfolders is {'/', '/A', '/B'}.
# Remove '/', otherwise we will download everything twice.
matching_folders.remove('/')
if len(matching_files) == 0 and len(matching_folders) == 0:
err_exit(fill('Error: {path} is neither a file nor a folder name'.format(path=path)))
# If the user did not explicitly provide the project, don't pass any
# project parameter to the API call but continue with the download.
if not path_has_explicit_proj:
project = dxpy.DXFile.NO_PROJECT_HINT
# If the user explicitly provided the project and it doesn't contain
# the files, don't allow the download.
# For speed's sake, skip this check (i.e. one API call) if the user
# passed the lightweight argument
#
# If length of matching_files is 0 then we're only downloading folders
# so skip this logic since the files will be verified in the API call.
if not args.lightweight \
and len(matching_files) > 0 \
and path_has_explicit_proj \
and not any(object_exists_in_project(f['describe']['id'], project) for f in matching_files):
err_exit(fill('Error: specified project does not contain specified file object'))
files_to_get[project].extend(matching_files)
folders_to_get[project].extend(((f, strip_prefix) for f in matching_folders))
count += len(matching_files) + len(matching_folders)
filenames.extend(f["describe"]["name"] for f in matching_files)
foldernames.extend(f[len(strip_prefix):].lstrip('/') for f in matching_folders)
if len(filenames) > 0 and len(foldernames) > 0:
name_conflicts = set(filenames) & set(foldernames)
if len(name_conflicts) > 0:
msg = "Error: The following paths are both file and folder names, and " \
"cannot be downloaded to the same destination: "
msg += ", ".join(sorted(name_conflicts))
err_exit(fill(msg))
if args.output is None:
destdir, dest_filename = os.getcwd(), None
elif count > 1:
if not os.path.exists(args.output):
err_exit(fill("Error: When downloading multiple objects, --output must be an existing directory"))
destdir, dest_filename = args.output, None
elif os.path.isdir(args.output):
destdir, dest_filename = args.output, None
elif args.output.endswith('/'):
err_exit(fill("Error: {path} could not be found".format(path=args.output)))
else:
destdir, dest_filename = os.getcwd(), args.output
_download_folders(folders_to_get, destdir, args)
_download_files(files_to_get, destdir, args, dest_filename=dest_filename)
|
|
#!/usr/bin/env python
"Representation of link standoff annotation and measures over it"
from collections import Sequence, defaultdict
import operator
try:
keys = dict.viewkeys
import itertools
filter = itertools.ifilter
except Exception:
# Py3k
keys = dict.keys
class Annotation(object):
__slots__ = ['docid', 'start', 'end', 'candidates', 'is_first']
def __init__(self, docid, start, end, candidates=[]):
self.docid = docid
self.start = start
self.end = end
self.candidates = candidates
def __str__(self):
return unicode(self)
def __unicode__(self):
return u'{}\t{}\t{}\t{}'.format(
self.docid,
self.start,
self.end,
u'\t'.join([unicode(c) for c in self.candidates])
)
def __repr__(self):
return 'Annotation({!r}, {!r}, {!r}, {!r})'.format(self.docid, self.start, self.end, self.candidates)
def __cmp__(self, other):
assert isinstance(other, Annotation)
return cmp((self.start, -self.end), (other.start, -other.end))
def compare_spans(self, other):
assert self.start <= self.end, 'End is before start: {!r}'.format(self)
assert other.start <= other.end, 'End is before start: {!r}'.format(self)
if self.docid != other.docid:
return 'different documents'
if self.start > other.end or self.end < other.start:
return 'non-overlapping'
elif self.start == other.start and self.end == other.end:
return 'duplicate'
elif self.start < other.start and self.end >= other.end:
return 'nested'
elif self.start >= other.start and self.end < other.end:
return 'nested'
else:
return 'crossing'
# Getters
@property
def span(self):
return (self.docid, self.start, self.end)
@property
def link(self):
"Return top candidate"
if self.candidates:
return self.candidates[0]
@property
def eid(self):
"Return link KB ID or NIL cluster ID (default cluster ID is None)"
if self.link is not None:
return self.link.id
@property
def kbid(self):
"Return link KB ID or None"
if self.is_linked:
return self.link.id
@property
def score(self):
"Return link score or None"
if self.is_linked:
return self.link.score
@property
def type(self):
"Return link type or None"
if self.link:
return self.link.type
@property
def is_nil(self):
if self.eid is None:
return True
if self.eid.startswith('NIL'):
return True
return False
@property
def is_linked(self):
return not self.is_nil
# Parsing methods
@classmethod
def from_string(cls, s):
docid, start, end, candidates = None, None, None, []
cols = s.rstrip('\n\t').split('\t', 3)
if len(cols) < 3:
raise SyntaxError('Annotation must have at least 3 columns. Got {!r}'.format(s))
if len(cols) >= 3:
docid = cols[0]
start = int(cols[1])
end = int(cols[2])
if len(cols) == 4:
candidates = sorted(Candidate.from_string(cols[3]), reverse=True)
return Annotation(docid, start, end, candidates)
class Candidate(object):
__slots__ = ['id', 'score', 'type']
def __init__(self, id, score=None, type=None):
self.id = id
self.score = score
self.type = type
def __str__(self):
return unicode(self)
def __unicode__(self):
return u'{}\t{}\t{}'.format(self.id,
self.score or '',
self.type or '')
def __repr__(self):
return '<{!r}>'.format(self.id)
def __cmp__(self, other):
assert isinstance(other, Candidate)
return cmp(self.score, other.score)
# Parsing methods
@classmethod
def from_string(cls, s):
cols = s.rstrip('\t').split('\t')
if len(cols) == 1:
# link includes id only
yield cls(cols[0])
elif len(cols) == 2:
# link includes id and score
yield cls(cols[0], float(cols[1]))
elif len(cols[3:]) % 3 == 0:
# >=1 (id, score, type) candidate tuples
for i in xrange(0, len(cols), 3):
id, score, type = cols[i:i+3]
yield cls(id, float(score), type)
else:
# undefined format
raise SyntaxError('Need id, score and type when >1 candidates')
class Measure(object):
__slots__ = ['key', 'filter', 'filter_fn', 'agg']
def __init__(self, key, filter=None, agg='sets-micro'):
"""
key : list of fields for mention comparison
filter : a function or attribute name to select evaluated annotations
agg : [work in progress]
"""
if not isinstance(key, Sequence):
raise TypeError('key should be a list or tuple')
self.key = tuple(key)
self.filter = filter
if filter is not None and not callable(filter):
assert isinstance(filter, str)
filter = operator.attrgetter(filter)
self.filter_fn = filter
self.agg = agg
def __str__(self):
return '{}:{}:{}'.format(self.agg, self.filter, '+'.join(self.key))
@classmethod
def from_string(cls, s):
if s.count(':') != 2:
raise ValueError('Expected 2 colons in {!r}'.format(s))
a, f, k = s.split(':')
if f in ('', 'None'):
f = None
return cls(k.split('+'), f, a)
def __repr__(self):
return ('{0.__class__.__name__}('
'{0.key!r}, {0.filter!r}, {0.agg!r})'.format(self))
NON_CLUSTERING_AGG = ('sets-micro',) # 'sets-macro')
@property
def is_clustering(self):
return self.agg not in self.NON_CLUSTERING_AGG
def build_index(self, annotations):
if isinstance(annotations, dict):
# assume already built
return annotations
# TODO: caching
if self.filter is not None:
annotations = filter(self.filter_fn, annotations)
key = self.key
return {tuple(getattr(ann, field) for field in key): ann
for ann in annotations}
def build_clusters(self, annotations):
if isinstance(annotations, dict):
# assume already built
return annotations
# TODO: caching
# TODO: can reuse build_index for small efficiency loss
if self.filter is not None:
annotations = filter(self.filter_fn, annotations)
key = self.key
out = defaultdict(set)
for ann in annotations:
out[ann.eid].add(tuple(getattr(ann, field) for field in key))
out.default_factory = None # disable defaulting
return out
def count_matches(self, system, gold):
if self.is_clustering:
raise ValueError('count_matches is inappropriate '
'for {}'.format(self.agg))
gold_index = self.build_index(gold)
pred_index = self.build_index(system)
tp = len(keys(gold_index) & keys(pred_index))
fn = len(gold_index) - tp
fp = len(pred_index) - tp
return tp, fp, fn
def get_matches(self, system, gold):
""" Assesses the match between sets of annotations
Returns three lists of items:
* tp [(item, other_item), ...]
* fp [(None, other_item), ...]
* fn [(item, None), ...]
"""
if self.is_clustering:
raise ValueError('get_matches is inappropriate '
'for {}'.format(self.agg))
gold_index = self.build_index(gold)
pred_index = self.build_index(system)
gold_keys = keys(gold_index)
pred_keys = keys(pred_index)
shared = gold_keys & pred_keys
tp = [(gold_index[k], pred_index[k]) for k in shared]
fp = [(None, pred_index[k]) for k in pred_keys - shared]
fn = [(gold_index[k], None) for k in gold_keys - shared]
return tp, fp, fn
def count_clustering(self, system, gold):
from . import coref_metrics
if not self.is_clustering:
raise ValueError('evaluate_clustering is inappropriate '
'for {}'.format(self.agg))
try:
fn = getattr(coref_metrics, self.agg)
except AttributeError:
raise ValueError('Invalid aggregation: {!r}'.format(self.agg))
if not callable(fn):
raise ValueError('Invalid aggregation: {!r}'.format(self.agg))
gold_clusters = self.build_clusters(gold)
pred_clusters = self.build_clusters(system)
return fn(gold_clusters, pred_clusters)
def contingency(self, system, gold):
if self.is_clustering:
p_num, p_den, r_num, r_den = self.count_clustering(system, gold)
ptp = p_num
fp = p_den - p_num
rtp = r_num
fn = r_den - r_num
return ptp, fp, rtp, fn
else:
tp, fp, fn = self.count_matches(system, gold)
return tp, fp, tp, fn
def docs_to_contingency(self, system, gold):
return self.contingency([a for doc in system for a in doc.annotations],
[a for doc in gold for a in doc.annotations])
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for RNN cells."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import numpy as np
from tensorflow.contrib.rnn.python.ops import core_rnn_cell_impl
from tensorflow.contrib.rnn.python.ops import rnn_cell
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import rnn
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.util import nest
class RNNCellTest(test.TestCase):
def testCoupledInputForgetGateLSTMCell(self):
with self.test_session() as sess:
num_units = 2
state_size = num_units * 2
batch_size = 3
input_size = 4
expected_output = np.array(
[[0.121753, 0.121753],
[0.103349, 0.103349],
[0.100178, 0.100178]],
dtype=np.float32)
expected_state = np.array(
[[0.137523, 0.137523, 0.121753, 0.121753],
[0.105450, 0.105450, 0.103349, 0.103349],
[0.100742, 0.100742, 0.100178, 0.100178]],
dtype=np.float32)
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([batch_size, input_size])
m = array_ops.zeros([batch_size, state_size])
output, state = rnn_cell.CoupledInputForgetGateLSTMCell(
num_units=num_units, forget_bias=1.0, state_is_tuple=False)(x, m)
sess.run([variables.global_variables_initializer()])
res = sess.run([output, state], {
x.name:
np.array([[1., 1., 1., 1.],
[2., 2., 2., 2.],
[3., 3., 3., 3.]]),
m.name:
0.1 * np.ones((batch_size, state_size))
})
# This is a smoke test: Only making sure expected values didn't change.
self.assertEqual(len(res), 2)
self.assertAllClose(res[0], expected_output)
self.assertAllClose(res[1], expected_state)
def testTimeFreqLSTMCell(self):
with self.test_session() as sess:
num_units = 8
state_size = num_units * 2
batch_size = 3
input_size = 4
feature_size = 2
frequency_skip = 1
num_shifts = (input_size - feature_size) // frequency_skip + 1
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([batch_size, input_size])
m = array_ops.zeros([batch_size, state_size * num_shifts])
output, state = rnn_cell.TimeFreqLSTMCell(
num_units=num_units,
feature_size=feature_size,
frequency_skip=frequency_skip,
forget_bias=1.0)(x, m)
sess.run([variables.global_variables_initializer()])
res = sess.run([output, state], {
x.name:
np.array([[1., 1., 1., 1.],
[2., 2., 2., 2.],
[3., 3., 3., 3.]]),
m.name:
0.1 * np.ones((batch_size, int(state_size * (num_shifts))))
})
self.assertEqual(len(res), 2)
# The numbers in results were not calculated, this is mostly just a
# smoke test.
self.assertEqual(res[0].shape, (batch_size, num_units * num_shifts))
self.assertEqual(res[1].shape, (batch_size, state_size * num_shifts))
# Different inputs so different outputs and states
for i in range(1, batch_size):
self.assertTrue(
float(np.linalg.norm((res[0][0, :] - res[0][i, :]))) > 1e-6)
self.assertTrue(
float(np.linalg.norm((res[1][0, :] - res[1][i, :]))) > 1e-6)
def testGridLSTMCell(self):
with self.test_session() as sess:
num_units = 8
batch_size = 3
input_size = 4
feature_size = 2
frequency_skip = 1
num_shifts = int((input_size - feature_size) / frequency_skip + 1)
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
cell = rnn_cell.GridLSTMCell(
num_units=num_units,
feature_size=feature_size,
frequency_skip=frequency_skip,
forget_bias=1.0,
num_frequency_blocks=[num_shifts],
couple_input_forget_gates=True,
state_is_tuple=True)
inputs = constant_op.constant(
np.array(
[[1., 1., 1., 1.],
[2., 2., 2., 2.],
[3., 3., 3., 3.]],
dtype=np.float32),
dtype=dtypes.float32)
state_value = constant_op.constant(
0.1 * np.ones(
(batch_size, num_units), dtype=np.float32),
dtype=dtypes.float32)
init_state = cell.state_tuple_type(
*([state_value, state_value] * num_shifts))
output, state = cell(inputs, init_state)
sess.run([variables.global_variables_initializer()])
res = sess.run([output, state])
self.assertEqual(len(res), 2)
# The numbers in results were not calculated, this is mostly just a
# smoke test.
self.assertEqual(res[0].shape, (batch_size, num_units * num_shifts * 2))
for ss in res[1]:
self.assertEqual(ss.shape, (batch_size, num_units))
# Different inputs so different outputs and states
for i in range(1, batch_size):
self.assertTrue(
float(np.linalg.norm((res[0][0, :] - res[0][i, :]))) > 1e-6)
self.assertTrue(
float(
np.linalg.norm((res[1].state_f00_b00_c[0, :] - res[1]
.state_f00_b00_c[i, :]))) > 1e-6)
def testGridLSTMCellWithFrequencyBlocks(self):
with self.test_session() as sess:
num_units = 8
batch_size = 3
feature_size = 2
frequency_skip = 1
num_frequency_blocks = [1, 1]
total_blocks = num_frequency_blocks[0] + num_frequency_blocks[1]
start_freqindex_list = [0, 2]
end_freqindex_list = [2, 4]
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
cell = rnn_cell.GridLSTMCell(
num_units=num_units,
feature_size=feature_size,
frequency_skip=frequency_skip,
forget_bias=1.0,
num_frequency_blocks=num_frequency_blocks,
start_freqindex_list=start_freqindex_list,
end_freqindex_list=end_freqindex_list,
couple_input_forget_gates=True,
state_is_tuple=True)
inputs = constant_op.constant(
np.array(
[[1., 1., 1., 1.], [2., 2., 2., 2.], [3., 3., 3., 3.]],
dtype=np.float32),
dtype=dtypes.float32)
state_value = constant_op.constant(
0.1 * np.ones(
(batch_size, num_units), dtype=np.float32),
dtype=dtypes.float32)
init_state = cell.state_tuple_type(
*([state_value, state_value] * total_blocks))
output, state = cell(inputs, init_state)
sess.run([variables.global_variables_initializer()])
res = sess.run([output, state])
self.assertEqual(len(res), 2)
# The numbers in results were not calculated, this is mostly just a
# smoke test.
self.assertEqual(res[0].shape,
(batch_size, num_units * total_blocks * 2))
for ss in res[1]:
self.assertEqual(ss.shape, (batch_size, num_units))
# Different inputs so different outputs and states
for i in range(1, batch_size):
self.assertTrue(
float(np.linalg.norm((res[0][0, :] - res[0][i, :]))) > 1e-6)
self.assertTrue(
float(
np.linalg.norm((res[1].state_f00_b00_c[0, :] - res[1]
.state_f00_b00_c[i, :]))) > 1e-6)
def testGridLstmCellWithCoupledInputForgetGates(self):
num_units = 2
batch_size = 3
input_size = 4
feature_size = 2
frequency_skip = 1
num_shifts = int((input_size - feature_size) / frequency_skip + 1)
expected_output = np.array(
[[0.416383, 0.416383, 0.403238, 0.403238, 0.524020, 0.524020,
0.565425, 0.565425, 0.557865, 0.557865, 0.609699, 0.609699],
[0.627331, 0.627331, 0.622393, 0.622393, 0.688342, 0.688342,
0.708078, 0.708078, 0.694245, 0.694245, 0.715171, 0.715171],
[0.711050, 0.711050, 0.709197, 0.709197, 0.736533, 0.736533,
0.744264, 0.744264, 0.737390, 0.737390, 0.745250, 0.745250]],
dtype=np.float32)
expected_state = np.array(
[[0.625556, 0.625556, 0.416383, 0.416383, 0.759134, 0.759134,
0.524020, 0.524020, 0.798795, 0.798795, 0.557865, 0.557865],
[0.875488, 0.875488, 0.627331, 0.627331, 0.936432, 0.936432,
0.688342, 0.688342, 0.941961, 0.941961, 0.694245, 0.694245],
[0.957327, 0.957327, 0.711050, 0.711050, 0.979522, 0.979522,
0.736533, 0.736533, 0.980245, 0.980245, 0.737390, 0.737390]],
dtype=np.float32)
for state_is_tuple in [False, True]:
with self.test_session() as sess:
with variable_scope.variable_scope(
"state_is_tuple" + str(state_is_tuple),
initializer=init_ops.constant_initializer(0.5)):
cell = rnn_cell.GridLSTMCell(
num_units=num_units,
feature_size=feature_size,
frequency_skip=frequency_skip,
forget_bias=1.0,
num_frequency_blocks=[num_shifts],
couple_input_forget_gates=True,
state_is_tuple=state_is_tuple)
inputs = constant_op.constant(
np.array([[1., 1., 1., 1.],
[2., 2., 2., 2.],
[3., 3., 3., 3.]],
dtype=np.float32),
dtype=dtypes.float32)
if state_is_tuple:
state_value = constant_op.constant(
0.1 * np.ones(
(batch_size, num_units), dtype=np.float32),
dtype=dtypes.float32)
init_state = cell.state_tuple_type(
*([state_value, state_value] * num_shifts))
else:
init_state = constant_op.constant(
0.1 * np.ones(
(batch_size, num_units * num_shifts * 2), dtype=np.float32),
dtype=dtypes.float32)
output, state = cell(inputs, init_state)
sess.run([variables.global_variables_initializer()])
res = sess.run([output, state])
# This is a smoke test: Only making sure expected values not change.
self.assertEqual(len(res), 2)
self.assertAllClose(res[0], expected_output)
if not state_is_tuple:
self.assertAllClose(res[1], expected_state)
else:
# There should be num_shifts * 2 states in the tuple.
self.assertEqual(len(res[1]), num_shifts * 2)
# Checking the shape of each state to be batch_size * num_units
for ss in res[1]:
self.assertEqual(ss.shape[0], batch_size)
self.assertEqual(ss.shape[1], num_units)
self.assertAllClose(np.concatenate(res[1], axis=1), expected_state)
def testBidirectionGridLSTMCell(self):
with self.test_session() as sess:
num_units = 2
batch_size = 3
input_size = 4
feature_size = 2
frequency_skip = 1
num_shifts = int((input_size - feature_size) / frequency_skip + 1)
expected_output = np.array(
[[0.464130, 0.464130, 0.419165, 0.419165, 0.593283, 0.593283,
0.738350, 0.738350, 0.661638, 0.661638, 0.866774, 0.866774,
0.520789, 0.520789, 0.476968, 0.476968, 0.604341, 0.604341,
0.760207, 0.760207, 0.635773, 0.635773, 0.850218, 0.850218],
[0.669636, 0.669636, 0.628966, 0.628966, 0.736057, 0.736057,
0.895927, 0.895927, 0.755559, 0.755559, 0.954359, 0.954359,
0.692621, 0.692621, 0.652363, 0.652363, 0.737517, 0.737517,
0.899558, 0.899558, 0.745984, 0.745984, 0.946840, 0.946840],
[0.751109, 0.751109, 0.711716, 0.711716, 0.778357, 0.778357,
0.940779, 0.940779, 0.784530, 0.784530, 0.980604, 0.980604,
0.759940, 0.759940, 0.720652, 0.720652, 0.778552, 0.778552,
0.941606, 0.941606, 0.781035, 0.781035, 0.977731, 0.977731]],
dtype=np.float32)
expected_state = np.array(
[[0.710660, 0.710660, 0.464130, 0.464130, 0.877293, 0.877293,
0.593283, 0.593283, 0.958505, 0.958505, 0.661638, 0.661638,
0.785405, 0.785405, 0.520789, 0.520789, 0.890836, 0.890836,
0.604341, 0.604341, 0.928512, 0.928512, 0.635773, 0.635773],
[0.967579, 0.967579, 0.669636, 0.669636, 1.038811, 1.038811,
0.736057, 0.736057, 1.058201, 1.058201, 0.755559, 0.755559,
0.993088, 0.993088, 0.692621, 0.692621, 1.040288, 1.040288,
0.737517, 0.737517, 1.048773, 1.048773, 0.745984, 0.745984],
[1.053842, 1.053842, 0.751109, 0.751109, 1.079919, 1.079919,
0.778357, 0.778357, 1.085620, 1.085620, 0.784530, 0.784530,
1.062455, 1.062455, 0.759940, 0.759940, 1.080101, 1.080101,
0.778552, 0.778552, 1.082402, 1.082402, 0.781035, 0.781035]],
dtype=np.float32)
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
cell = rnn_cell.BidirectionalGridLSTMCell(
num_units=num_units,
feature_size=feature_size,
share_time_frequency_weights=True,
frequency_skip=frequency_skip,
forget_bias=1.0,
num_frequency_blocks=[num_shifts])
inputs = constant_op.constant(
np.array([[1.0, 1.1, 1.2, 1.3],
[2.0, 2.1, 2.2, 2.3],
[3.0, 3.1, 3.2, 3.3]],
dtype=np.float32),
dtype=dtypes.float32)
state_value = constant_op.constant(
0.1 * np.ones(
(batch_size, num_units), dtype=np.float32),
dtype=dtypes.float32)
init_state = cell.state_tuple_type(
*([state_value, state_value] * num_shifts * 2))
output, state = cell(inputs, init_state)
sess.run([variables.global_variables_initializer()])
res = sess.run([output, state])
self.assertEqual(len(res), 2)
# The numbers in results were not calculated, this is mostly just a
# smoke test.
self.assertEqual(res[0].shape, (batch_size, num_units * num_shifts * 4))
self.assertAllClose(res[0], expected_output)
# There should be num_shifts * 4 states in the tuple.
self.assertEqual(len(res[1]), num_shifts * 4)
# Checking the shape of each state to be batch_size * num_units
for ss in res[1]:
self.assertEqual(ss.shape[0], batch_size)
self.assertEqual(ss.shape[1], num_units)
self.assertAllClose(np.concatenate(res[1], axis=1), expected_state)
def testBidirectionGridLSTMCellWithSliceOffset(self):
with self.test_session() as sess:
num_units = 2
batch_size = 3
input_size = 4
feature_size = 2
frequency_skip = 1
num_shifts = int((input_size - feature_size) / frequency_skip + 1)
expected_output = np.array(
[[0.464130, 0.464130, 0.419165, 0.419165, 0.593283, 0.593283,
0.738350, 0.738350, 0.661638, 0.661638, 0.866774, 0.866774,
0.322645, 0.322645, 0.276068, 0.276068, 0.584654, 0.584654,
0.690292, 0.690292, 0.640446, 0.640446, 0.840071, 0.840071],
[0.669636, 0.669636, 0.628966, 0.628966, 0.736057, 0.736057,
0.895927, 0.895927, 0.755559, 0.755559, 0.954359, 0.954359,
0.493625, 0.493625, 0.449236, 0.449236, 0.730828, 0.730828,
0.865996, 0.865996, 0.749429, 0.749429, 0.944958, 0.944958],
[0.751109, 0.751109, 0.711716, 0.711716, 0.778357, 0.778357,
0.940779, 0.940779, 0.784530, 0.784530, 0.980604, 0.980604,
0.608587, 0.608587, 0.566683, 0.566683, 0.777345, 0.777345,
0.925820, 0.925820, 0.782597, 0.782597, 0.976858, 0.976858]],
dtype=np.float32)
expected_state = np.array(
[[0.710660, 0.710660, 0.464130, 0.464130, 0.877293, 0.877293,
0.593283, 0.593283, 0.958505, 0.958505, 0.661638, 0.661638,
0.516575, 0.516575, 0.322645, 0.322645, 0.866628, 0.866628,
0.584654, 0.584654, 0.934002, 0.934002, 0.640446, 0.640446],
[0.967579, 0.967579, 0.669636, 0.669636, 1.038811, 1.038811,
0.736057, 0.736057, 1.058201, 1.058201, 0.755559, 0.755559,
0.749836, 0.749836, 0.493625, 0.493625, 1.033488, 1.033488,
0.730828, 0.730828, 1.052186, 1.052186, 0.749429, 0.749429],
[1.053842, 1.053842, 0.751109, 0.751109, 1.079919, 1.079919,
0.778357, 0.778357, 1.085620, 1.085620, 0.784530, 0.784530,
0.895999, 0.895999, 0.608587, 0.608587, 1.078978, 1.078978,
0.777345, 0.777345, 1.083843, 1.083843, 0.782597, 0.782597]],
dtype=np.float32)
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
cell = rnn_cell.BidirectionalGridLSTMCell(
num_units=num_units,
feature_size=feature_size,
share_time_frequency_weights=True,
frequency_skip=frequency_skip,
forget_bias=1.0,
num_frequency_blocks=[num_shifts],
backward_slice_offset=1)
inputs = constant_op.constant(
np.array([[1.0, 1.1, 1.2, 1.3],
[2.0, 2.1, 2.2, 2.3],
[3.0, 3.1, 3.2, 3.3]],
dtype=np.float32),
dtype=dtypes.float32)
state_value = constant_op.constant(
0.1 * np.ones(
(batch_size, num_units), dtype=np.float32),
dtype=dtypes.float32)
init_state = cell.state_tuple_type(
*([state_value, state_value] * num_shifts * 2))
output, state = cell(inputs, init_state)
sess.run([variables.global_variables_initializer()])
res = sess.run([output, state])
self.assertEqual(len(res), 2)
# The numbers in results were not calculated, this is mostly just a
# smoke test.
self.assertEqual(res[0].shape, (batch_size, num_units * num_shifts * 4))
self.assertAllClose(res[0], expected_output)
# There should be num_shifts * 4 states in the tuple.
self.assertEqual(len(res[1]), num_shifts * 4)
# Checking the shape of each state to be batch_size * num_units
for ss in res[1]:
self.assertEqual(ss.shape[0], batch_size)
self.assertEqual(ss.shape[1], num_units)
self.assertAllClose(np.concatenate(res[1], axis=1), expected_state)
def testAttentionCellWrapperFailures(self):
with self.assertRaisesRegexp(TypeError,
"The parameter cell is not RNNCell."):
rnn_cell.AttentionCellWrapper(None, 0)
num_units = 8
for state_is_tuple in [False, True]:
with ops.Graph().as_default():
lstm_cell = core_rnn_cell_impl.BasicLSTMCell(
num_units, state_is_tuple=state_is_tuple)
with self.assertRaisesRegexp(
ValueError, "attn_length should be greater than zero, got 0"):
rnn_cell.AttentionCellWrapper(
lstm_cell, 0, state_is_tuple=state_is_tuple)
with self.assertRaisesRegexp(
ValueError, "attn_length should be greater than zero, got -1"):
rnn_cell.AttentionCellWrapper(
lstm_cell, -1, state_is_tuple=state_is_tuple)
with ops.Graph().as_default():
lstm_cell = core_rnn_cell_impl.BasicLSTMCell(
num_units, state_is_tuple=True)
with self.assertRaisesRegexp(
ValueError, "Cell returns tuple of states, but the flag "
"state_is_tuple is not set. State size is: *"):
rnn_cell.AttentionCellWrapper(lstm_cell, 4, state_is_tuple=False)
def testAttentionCellWrapperZeros(self):
num_units = 8
attn_length = 16
batch_size = 3
input_size = 4
for state_is_tuple in [False, True]:
with ops.Graph().as_default():
with self.test_session() as sess:
with variable_scope.variable_scope("state_is_tuple_" + str(
state_is_tuple)):
lstm_cell = core_rnn_cell_impl.BasicLSTMCell(
num_units, state_is_tuple=state_is_tuple)
cell = rnn_cell.AttentionCellWrapper(
lstm_cell, attn_length, state_is_tuple=state_is_tuple)
if state_is_tuple:
zeros = array_ops.zeros([batch_size, num_units], dtype=np.float32)
attn_state_zeros = array_ops.zeros(
[batch_size, attn_length * num_units], dtype=np.float32)
zero_state = ((zeros, zeros), zeros, attn_state_zeros)
else:
zero_state = array_ops.zeros(
[
batch_size,
num_units * 2 + attn_length * num_units + num_units
],
dtype=np.float32)
inputs = array_ops.zeros(
[batch_size, input_size], dtype=dtypes.float32)
output, state = cell(inputs, zero_state)
self.assertEquals(output.get_shape(), [batch_size, num_units])
if state_is_tuple:
self.assertEquals(len(state), 3)
self.assertEquals(len(state[0]), 2)
self.assertEquals(state[0][0].get_shape(),
[batch_size, num_units])
self.assertEquals(state[0][1].get_shape(),
[batch_size, num_units])
self.assertEquals(state[1].get_shape(), [batch_size, num_units])
self.assertEquals(state[2].get_shape(),
[batch_size, attn_length * num_units])
tensors = [output] + list(state)
else:
self.assertEquals(state.get_shape(), [
batch_size,
num_units * 2 + num_units + attn_length * num_units
])
tensors = [output, state]
zero_result = sum(
[math_ops.reduce_sum(math_ops.abs(x)) for x in tensors])
sess.run(variables.global_variables_initializer())
self.assertTrue(sess.run(zero_result) < 1e-6)
def testAttentionCellWrapperValues(self):
num_units = 8
attn_length = 16
batch_size = 3
for state_is_tuple in [False, True]:
with ops.Graph().as_default():
with self.test_session() as sess:
with variable_scope.variable_scope("state_is_tuple_" + str(
state_is_tuple)):
lstm_cell = core_rnn_cell_impl.BasicLSTMCell(
num_units, state_is_tuple=state_is_tuple)
cell = rnn_cell.AttentionCellWrapper(
lstm_cell, attn_length, state_is_tuple=state_is_tuple)
if state_is_tuple:
zeros = constant_op.constant(
0.1 * np.ones(
[batch_size, num_units], dtype=np.float32),
dtype=dtypes.float32)
attn_state_zeros = constant_op.constant(
0.1 * np.ones(
[batch_size, attn_length * num_units], dtype=np.float32),
dtype=dtypes.float32)
zero_state = ((zeros, zeros), zeros, attn_state_zeros)
else:
zero_state = constant_op.constant(
0.1 * np.ones(
[
batch_size,
num_units * 2 + num_units + attn_length * num_units
],
dtype=np.float32),
dtype=dtypes.float32)
inputs = constant_op.constant(
np.array(
[[1., 1., 1., 1.], [2., 2., 2., 2.], [3., 3., 3., 3.]],
dtype=np.float32),
dtype=dtypes.float32)
output, state = cell(inputs, zero_state)
if state_is_tuple:
concat_state = array_ops.concat(
[state[0][0], state[0][1], state[1], state[2]], 1)
else:
concat_state = state
sess.run(variables.global_variables_initializer())
output, state = sess.run([output, concat_state])
# Different inputs so different outputs and states
for i in range(1, batch_size):
self.assertTrue(
float(np.linalg.norm((output[0, :] - output[i, :]))) > 1e-6)
self.assertTrue(
float(np.linalg.norm((state[0, :] - state[i, :]))) > 1e-6)
def _testAttentionCellWrapperCorrectResult(self):
num_units = 4
attn_length = 6
batch_size = 2
expected_output = np.array(
[[1.068372, 0.45496, -0.678277, 0.340538],
[1.018088, 0.378983, -0.572179, 0.268591]],
dtype=np.float32)
expected_state = np.array(
[[0.74946702, 0.34681597, 0.26474735, 1.06485605, 0.38465962,
0.11420801, 0.10272158, 0.30925757, 0.63899988, 0.7181077,
0.47534478, 0.33715725, 0.58086717, 0.49446869, 0.7641536,
0.12814975, 0.92231739, 0.89857256, 0.21889746, 0.38442063,
0.53481543, 0.8876909, 0.45823169, 0.5905602, 0.78038228,
0.56501579, 0.03971386, 0.09870267, 0.8074435, 0.66821432,
0.99211812, 0.12295902, 1.14606023, 0.34370938, -0.79251152,
0.51843399],
[0.5179342, 0.48682183, -0.25426468, 0.96810579, 0.28809637,
0.13607743, -0.11446252, 0.26792109, 0.78047138, 0.63460857,
0.49122369, 0.52007174, 0.73000264, 0.66986895, 0.73576689,
0.86301267, 0.87887371, 0.35185754, 0.93417215, 0.64732957,
0.63173044, 0.66627824, 0.53644657, 0.20477486, 0.98458421,
0.38277245, 0.03746676, 0.92510188, 0.57714164, 0.84932971,
0.36127412, 0.12125921, 1.1362772, 0.34361625, -0.78150457,
0.70582712]],
dtype=np.float32)
seed = 12345
random_seed.set_random_seed(seed)
rnn_scope = None
for state_is_tuple in [False, True]:
with session.Session() as sess:
with variable_scope.variable_scope(
"state_is_tuple", reuse=state_is_tuple,
initializer=init_ops.glorot_uniform_initializer()):
lstm_cell = core_rnn_cell_impl.BasicLSTMCell(
num_units, state_is_tuple=state_is_tuple)
cell = rnn_cell.AttentionCellWrapper(
lstm_cell, attn_length, state_is_tuple=state_is_tuple)
# This is legacy behavior to preserve the test. Weight
# sharing no longer works by creating a new RNNCell in the
# same variable scope; so here we restore the scope of the
# RNNCells after the first use below.
if rnn_scope is not None:
(cell._scope, lstm_cell._scope) = rnn_scope # pylint: disable=protected-access,unpacking-non-sequence
zeros1 = random_ops.random_uniform(
(batch_size, num_units), 0.0, 1.0, seed=seed + 1)
zeros2 = random_ops.random_uniform(
(batch_size, num_units), 0.0, 1.0, seed=seed + 2)
zeros3 = random_ops.random_uniform(
(batch_size, num_units), 0.0, 1.0, seed=seed + 3)
attn_state_zeros = random_ops.random_uniform(
(batch_size, attn_length * num_units), 0.0, 1.0, seed=seed + 4)
zero_state = ((zeros1, zeros2), zeros3, attn_state_zeros)
if not state_is_tuple:
zero_state = array_ops.concat([
zero_state[0][0], zero_state[0][1], zero_state[1], zero_state[2]
], 1)
inputs = random_ops.random_uniform(
(batch_size, num_units), 0.0, 1.0, seed=seed + 5)
output, state = cell(inputs, zero_state)
# This is legacy behavior to preserve the test. Weight
# sharing no longer works by creating a new RNNCell in the
# same variable scope; so here we store the scope of the
# first RNNCell for reuse above.
if rnn_scope is None:
rnn_scope = (cell._scope, lstm_cell._scope) # pylint: disable=protected-access
if state_is_tuple:
state = array_ops.concat(
[state[0][0], state[0][1], state[1], state[2]], 1)
sess.run(variables.global_variables_initializer())
self.assertAllClose(sess.run(output), expected_output)
self.assertAllClose(sess.run(state), expected_state)
def testNASCell(self):
num_units = 6
batch_size = 3
expected_output = np.array([[0.576751, 0.576751, 0.576751, 0.576751,
0.576751, 0.576751],
[0.618936, 0.618936, 0.618936, 0.618936,
0.618936, 0.618936],
[0.627393, 0.627393, 0.627393, 0.627393,
0.627393, 0.627393]])
expected_state = np.array([[0.71579772, 0.71579772, 0.71579772, 0.71579772,
0.71579772, 0.71579772, 0.57675087, 0.57675087,
0.57675087, 0.57675087, 0.57675087, 0.57675087],
[0.78041625, 0.78041625, 0.78041625, 0.78041625,
0.78041625, 0.78041625, 0.6189357, 0.6189357,
0.61893570, 0.6189357, 0.6189357, 0.6189357],
[0.79457647, 0.79457647, 0.79457647, 0.79457647,
0.79457653, 0.79457653, 0.62739348, 0.62739348,
0.62739348, 0.62739348, 0.62739348, 0.62739348]
])
with self.test_session() as sess:
with variable_scope.variable_scope(
"nas_test",
initializer=init_ops.constant_initializer(0.5)):
cell = rnn_cell.NASCell(
num_units=num_units)
inputs = constant_op.constant(
np.array([[1., 1., 1., 1.],
[2., 2., 2., 2.],
[3., 3., 3., 3.]],
dtype=np.float32),
dtype=dtypes.float32)
state_value = constant_op.constant(
0.1 * np.ones(
(batch_size, num_units), dtype=np.float32),
dtype=dtypes.float32)
init_state = core_rnn_cell_impl.LSTMStateTuple(state_value,
state_value)
output, state = cell(inputs, init_state)
sess.run([variables.global_variables_initializer()])
res = sess.run([output, state])
# This is a smoke test: Only making sure expected values not change.
self.assertEqual(len(res), 2)
self.assertAllClose(res[0], expected_output)
# There should be 2 states in the tuple.
self.assertEqual(len(res[1]), 2)
# Checking the shape of each state to be batch_size * num_units
new_c, new_h = res[1]
self.assertEqual(new_c.shape[0], batch_size)
self.assertEqual(new_c.shape[1], num_units)
self.assertEqual(new_h.shape[0], batch_size)
self.assertEqual(new_h.shape[1], num_units)
self.assertAllClose(np.concatenate(res[1], axis=1), expected_state)
def testNASCellProj(self):
num_units = 6
batch_size = 3
num_proj = 5
expected_output = np.array([[1.697418, 1.697418, 1.697418, 1.697418,
1.697418],
[1.840037, 1.840037, 1.840037, 1.840037,
1.840037],
[1.873985, 1.873985, 1.873985, 1.873985,
1.873985]])
expected_state = np.array([[0.69855207, 0.69855207, 0.69855207, 0.69855207,
0.69855207, 0.69855207, 1.69741797, 1.69741797,
1.69741797, 1.69741797, 1.69741797],
[0.77073824, 0.77073824, 0.77073824, 0.77073824,
0.77073824, 0.77073824, 1.84003687, 1.84003687,
1.84003687, 1.84003687, 1.84003687],
[0.78973997, 0.78973997, 0.78973997, 0.78973997,
0.78973997, 0.78973997, 1.87398517, 1.87398517,
1.87398517, 1.87398517, 1.87398517]])
with self.test_session() as sess:
with variable_scope.variable_scope(
"nas_proj_test",
initializer=init_ops.constant_initializer(0.5)):
cell = rnn_cell.NASCell(
num_units=num_units,
num_proj=num_proj)
inputs = constant_op.constant(
np.array([[1., 1., 1., 1.],
[2., 2., 2., 2.],
[3., 3., 3., 3.]],
dtype=np.float32),
dtype=dtypes.float32)
state_value_c = constant_op.constant(
0.1 * np.ones(
(batch_size, num_units), dtype=np.float32),
dtype=dtypes.float32)
state_value_h = constant_op.constant(
0.1 * np.ones(
(batch_size, num_proj), dtype=np.float32),
dtype=dtypes.float32)
init_state = core_rnn_cell_impl.LSTMStateTuple(state_value_c,
state_value_h)
output, state = cell(inputs, init_state)
sess.run([variables.global_variables_initializer()])
res = sess.run([output, state])
# This is a smoke test: Only making sure expected values not change.
self.assertEqual(len(res), 2)
self.assertAllClose(res[0], expected_output)
# There should be 2 states in the tuple.
self.assertEqual(len(res[1]), 2)
# Checking the shape of each state to be batch_size * num_units
new_c, new_h = res[1]
self.assertEqual(new_c.shape[0], batch_size)
self.assertEqual(new_c.shape[1], num_units)
self.assertEqual(new_h.shape[0], batch_size)
self.assertEqual(new_h.shape[1], num_proj)
self.assertAllClose(np.concatenate(res[1], axis=1), expected_state)
def testUGRNNCell(self):
num_units = 2
batch_size = 3
expected_state_and_output = np.array(
[[0.13752282, 0.13752282],
[0.10545051, 0.10545051],
[0.10074195, 0.10074195]],
dtype=np.float32)
with self.test_session() as sess:
with variable_scope.variable_scope(
"ugrnn_cell_test",
initializer=init_ops.constant_initializer(0.5)):
cell = rnn_cell.UGRNNCell(num_units=num_units)
inputs = constant_op.constant(
np.array([[1., 1., 1., 1.],
[2., 2., 2., 2.],
[3., 3., 3., 3.]],
dtype=np.float32),
dtype=dtypes.float32)
init_state = constant_op.constant(
0.1 * np.ones(
(batch_size, num_units), dtype=np.float32),
dtype=dtypes.float32)
output, state = cell(inputs, init_state)
sess.run([variables.global_variables_initializer()])
res = sess.run([output, state])
# This is a smoke test: Only making sure expected values didn't change.
self.assertEqual(len(res), 2)
self.assertAllClose(res[0], expected_state_and_output)
self.assertAllClose(res[1], expected_state_and_output)
def testIntersectionRNNCell(self):
num_units = 2
batch_size = 3
expected_state = np.array(
[[0.13752282, 0.13752282],
[0.10545051, 0.10545051],
[0.10074195, 0.10074195]],
dtype=np.float32)
expected_output = np.array(
[[2.00431061, 2.00431061],
[4.00060606, 4.00060606],
[6.00008249, 6.00008249]],
dtype=np.float32)
with self.test_session() as sess:
with variable_scope.variable_scope(
"intersection_rnn_cell_test",
initializer=init_ops.constant_initializer(0.5)):
cell = rnn_cell.IntersectionRNNCell(num_units=num_units,
num_in_proj=num_units)
inputs = constant_op.constant(
np.array([[1., 1., 1., 1.],
[2., 2., 2., 2.],
[3., 3., 3., 3.]],
dtype=np.float32),
dtype=dtypes.float32)
init_state = constant_op.constant(
0.1 * np.ones(
(batch_size, num_units), dtype=np.float32),
dtype=dtypes.float32)
output, state = cell(inputs, init_state)
sess.run([variables.global_variables_initializer()])
res = sess.run([output, state])
# This is a smoke test: Only making sure expected values didn't change.
self.assertEqual(len(res), 2)
self.assertAllClose(res[0], expected_output)
self.assertAllClose(res[1], expected_state)
def testIntersectionRNNCellFailure(self):
num_units = 2
batch_size = 3
cell = rnn_cell.IntersectionRNNCell(num_units=num_units)
inputs = constant_op.constant(
np.array([[1., 1., 1., 1.],
[2., 2., 2., 2.],
[3., 3., 3., 3.]],
dtype=np.float32),
dtype=dtypes.float32)
init_state = constant_op.constant(
0.1 * np.ones(
(batch_size, num_units), dtype=np.float32),
dtype=dtypes.float32)
with self.assertRaisesRegexp(
ValueError, "Must have input size == output size for "
"Intersection RNN. To fix, num_in_proj should "
"be set to num_units at cell init."):
cell(inputs, init_state)
def testPhasedLSTMCell(self):
with self.test_session() as sess:
num_units = 2
batch_size = 3
input_size = 4
expected_state_c = np.array(
[[2.954548e-01, 8.354891e-04],
[2.834632e-01, 8.158963e-01],
[2.291694e-01, 1.325745e-04]],
dtype=np.float32)
expected_state_h = np.array(
[[2.116566e-01, 5.985238e-04],
[2.137760e-01, 6.153145e-01],
[1.742966e-01, 1.008306e-04]],
dtype=np.float32)
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
t = array_ops.zeros([batch_size, 1], dtype=dtypes.float64)
x = array_ops.zeros([batch_size, input_size])
c0 = array_ops.zeros([batch_size, 2])
h0 = array_ops.zeros([batch_size, 2])
state0 = core_rnn_cell_impl.LSTMStateTuple(c0, h0)
output, state = rnn_cell.PhasedLSTMCell(num_units=num_units)((t, x),
state0)
sess.run([variables.global_variables_initializer()])
res = sess.run([output, state], {
t.name:
np.array([[1.], [2.], [3.]]),
x.name:
np.array([[1., 1., 1., 1.],
[2., 2., 2., 2.],
[3., 3., 3., 3.]]),
})
# This is a smoke test, making sure expected values are unchanged.
self.assertEqual(len(res), 2)
self.assertAllClose(res[0], res[1].h)
self.assertAllClose(res[1].c, expected_state_c)
self.assertAllClose(res[1].h, expected_state_h)
def testHighwayWrapper(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"base_cell", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 3])
m = array_ops.zeros([1, 3])
base_cell = core_rnn_cell_impl.GRUCell(3)
g, m_new = base_cell(x, m)
with variable_scope.variable_scope(
"hw_cell", initializer=init_ops.constant_initializer(0.5)):
hw_cell = rnn_cell.HighwayWrapper(
core_rnn_cell_impl.GRUCell(3), carry_bias_init=-100.0)
g_res, m_new_res = hw_cell(x, m)
sess.run([variables.global_variables_initializer()])
res = sess.run([g, g_res, m_new, m_new_res], {
x: np.array([[1., 1., 1.]]),
m: np.array([[0.1, 0.1, 0.1]])
})
# As carry_bias_init is very negative, the carry gate is 'open' and the
# transform gate is 'closed'. This means the output equals the input.
self.assertAllClose(res[1], res[0])
# States are left untouched
self.assertAllClose(res[2], res[3])
class LayerNormBasicLSTMCellTest(test.TestCase):
# NOTE: all the values in the current test case have been calculated.
def testBasicLSTMCell(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 2])
c0 = array_ops.zeros([1, 2])
h0 = array_ops.zeros([1, 2])
state0 = core_rnn_cell_impl.LSTMStateTuple(c0, h0)
c1 = array_ops.zeros([1, 2])
h1 = array_ops.zeros([1, 2])
state1 = core_rnn_cell_impl.LSTMStateTuple(c1, h1)
state = (state0, state1)
single_cell = lambda: rnn_cell.LayerNormBasicLSTMCell(2)
cell = core_rnn_cell_impl.MultiRNNCell([single_cell() for _ in range(2)])
g, out_m = cell(x, state)
sess.run([variables.global_variables_initializer()])
res = sess.run([g, out_m], {
x.name: np.array([[1., 1.]]),
c0.name: 0.1 * np.asarray([[0, 1]]),
h0.name: 0.1 * np.asarray([[2, 3]]),
c1.name: 0.1 * np.asarray([[4, 5]]),
h1.name: 0.1 * np.asarray([[6, 7]]),
})
expected_h = np.array([[-0.38079708, 0.38079708]])
expected_state0_c = np.array([[-1.0, 1.0]])
expected_state0_h = np.array([[-0.38079708, 0.38079708]])
expected_state1_c = np.array([[-1.0, 1.0]])
expected_state1_h = np.array([[-0.38079708, 0.38079708]])
actual_h = res[0]
actual_state0_c = res[1][0].c
actual_state0_h = res[1][0].h
actual_state1_c = res[1][1].c
actual_state1_h = res[1][1].h
self.assertAllClose(actual_h, expected_h, 1e-5)
self.assertAllClose(expected_state0_c, actual_state0_c, 1e-5)
self.assertAllClose(expected_state0_h, actual_state0_h, 1e-5)
self.assertAllClose(expected_state1_c, actual_state1_c, 1e-5)
self.assertAllClose(expected_state1_h, actual_state1_h, 1e-5)
with variable_scope.variable_scope(
"other", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros(
[1, 3]) # Test BasicLSTMCell with input_size != num_units.
c = array_ops.zeros([1, 2])
h = array_ops.zeros([1, 2])
state = core_rnn_cell_impl.LSTMStateTuple(c, h)
cell = rnn_cell.LayerNormBasicLSTMCell(2)
g, out_m = cell(x, state)
sess.run([variables.global_variables_initializer()])
res = sess.run([g, out_m], {
x.name: np.array([[1., 1., 1.]]),
c.name: 0.1 * np.asarray([[0, 1]]),
h.name: 0.1 * np.asarray([[2, 3]]),
})
expected_h = np.array([[-0.38079708, 0.38079708]])
expected_c = np.array([[-1.0, 1.0]])
self.assertEqual(len(res), 2)
self.assertAllClose(res[0], expected_h, 1e-5)
self.assertAllClose(res[1].c, expected_c, 1e-5)
self.assertAllClose(res[1].h, expected_h, 1e-5)
def testBasicLSTMCellWithStateTuple(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 2])
c0 = array_ops.zeros([1, 2])
h0 = array_ops.zeros([1, 2])
state0 = core_rnn_cell_impl.LSTMStateTuple(c0, h0)
c1 = array_ops.zeros([1, 2])
h1 = array_ops.zeros([1, 2])
state1 = core_rnn_cell_impl.LSTMStateTuple(c1, h1)
cell = core_rnn_cell_impl.MultiRNNCell(
[rnn_cell.LayerNormBasicLSTMCell(2) for _ in range(2)])
h, (s0, s1) = cell(x, (state0, state1))
sess.run([variables.global_variables_initializer()])
res = sess.run([h, s0, s1], {
x.name: np.array([[1., 1.]]),
c0.name: 0.1 * np.asarray([[0, 1]]),
h0.name: 0.1 * np.asarray([[2, 3]]),
c1.name: 0.1 * np.asarray([[4, 5]]),
h1.name: 0.1 * np.asarray([[6, 7]]),
})
expected_h = np.array([[-0.38079708, 0.38079708]])
expected_h0 = np.array([[-0.38079708, 0.38079708]])
expected_c0 = np.array([[-1.0, 1.0]])
expected_h1 = np.array([[-0.38079708, 0.38079708]])
expected_c1 = np.array([[-1.0, 1.0]])
self.assertEqual(len(res), 3)
self.assertAllClose(res[0], expected_h, 1e-5)
self.assertAllClose(res[1].c, expected_c0, 1e-5)
self.assertAllClose(res[1].h, expected_h0, 1e-5)
self.assertAllClose(res[2].c, expected_c1, 1e-5)
self.assertAllClose(res[2].h, expected_h1, 1e-5)
def testBasicLSTMCellWithDropout(self):
def _is_close(x, y, digits=4):
delta = x - y
return delta < 10**(-digits)
def _is_close_in(x, items, digits=4):
for i in items:
if _is_close(x, i, digits):
return True
return False
keep_prob = 0.5
c_high = 2.9998924946
c_low = 0.999983298578
h_low = 0.761552567265
h_high = 0.995008519604
num_units = 5
allowed_low = [2, 3]
with self.test_session() as sess:
with variable_scope.variable_scope(
"other", initializer=init_ops.constant_initializer(1)):
x = array_ops.zeros([1, 5])
c = array_ops.zeros([1, 5])
h = array_ops.zeros([1, 5])
state = core_rnn_cell_impl.LSTMStateTuple(c, h)
cell = rnn_cell.LayerNormBasicLSTMCell(
num_units, layer_norm=False, dropout_keep_prob=keep_prob)
g, s = cell(x, state)
sess.run([variables.global_variables_initializer()])
res = sess.run([g, s], {
x.name: np.ones([1, 5]),
c.name: np.ones([1, 5]),
h.name: np.ones([1, 5]),
})
# Since the returned tensors are of size [1,n]
# get the first component right now.
actual_h = res[0][0]
actual_state_c = res[1].c[0]
actual_state_h = res[1].h[0]
# For each item in `c` (the cell inner state) check that
# it is equal to one of the allowed values `c_high` (not
# dropped out) or `c_low` (dropped out) and verify that the
# corresponding item in `h` (the cell activation) is coherent.
# Count the dropped activations and check that their number is
# coherent with the dropout probability.
dropped_count = 0
self.assertTrue((actual_h == actual_state_h).all())
for citem, hitem in zip(actual_state_c, actual_state_h):
self.assertTrue(_is_close_in(citem, [c_low, c_high]))
if _is_close(citem, c_low):
self.assertTrue(_is_close(hitem, h_low))
dropped_count += 1
elif _is_close(citem, c_high):
self.assertTrue(_is_close(hitem, h_high))
self.assertIn(dropped_count, allowed_low)
def _create_multi_lstm_cell_ops(batch_size, num_units, input_depth,
num_layers, max_time, compiled):
with variable_scope.variable_scope(
"root",
initializer=init_ops.random_uniform_initializer(-0.1, 0.1, seed=2)):
inputs = variable_scope.get_variable(
"inputs", initializer=random_ops.random_uniform(
(max_time, batch_size, input_depth), seed=1))
maybe_xla = lambda c: rnn_cell.CompiledWrapper(c) if compiled else c
cell = core_rnn_cell_impl.MultiRNNCell(
[maybe_xla(core_rnn_cell_impl.LSTMCell(num_units))
for _ in range(num_layers)])
initial_state = cell.zero_state(
batch_size=batch_size, dtype=dtypes.float32)
outputs, final_state = rnn.dynamic_rnn(
cell=cell, inputs=inputs, initial_state=initial_state,
time_major=True)
flat_final_state = nest.flatten(final_state)
trainable_variables = variables.trainable_variables()
outputs_grad = gradients_impl.gradients(
[outputs],
trainable_variables + [inputs] + nest.flatten(initial_state))
final_state_grad = gradients_impl.gradients(
flat_final_state,
trainable_variables + [inputs] + nest.flatten(initial_state))
return {"outputs": outputs,
"final_state": flat_final_state,
"outputs_grad": outputs_grad,
"final_state_grad": final_state_grad}
class CompiledWrapperTest(test.TestCase):
def testMultiRNNCellWithLSTMCellAndXLA(self):
# TODO(b/34735319): Don't run this test if XLA is not available.
batch_size = 16
num_units = 32
input_depth = 12
num_layers = 2
max_time = 20
atol = 1e-5
random_seed.set_random_seed(1234)
with self.test_session(graph=ops.Graph()) as sess:
xla_ops = _create_multi_lstm_cell_ops(
batch_size=batch_size, num_units=num_units,
input_depth=input_depth, num_layers=num_layers,
max_time=max_time,
compiled=True)
sess.run([variables.global_variables_initializer()])
xla_results = sess.run(xla_ops)
random_seed.set_random_seed(1234)
with self.test_session(graph=ops.Graph()) as sess:
non_xla_ops = _create_multi_lstm_cell_ops(
batch_size=batch_size, num_units=num_units,
input_depth=input_depth, num_layers=num_layers,
max_time=max_time,
compiled=False)
sess.run([variables.global_variables_initializer()])
non_xla_results = sess.run(non_xla_ops)
self.assertAllClose(
non_xla_results["outputs"], xla_results["outputs"], atol=atol)
for xla_value, non_xla_value in zip(
xla_results["final_state"], non_xla_results["final_state"]):
self.assertAllClose(xla_value, non_xla_value, atol=atol)
for xla_g, non_xla_g in zip(
xla_results["outputs_grad"], non_xla_results["outputs_grad"]):
self.assertAllClose(xla_g, non_xla_g, atol=atol)
for xla_g, non_xla_g in zip(
xla_results["final_state_grad"], non_xla_results["final_state_grad"]):
self.assertAllClose(xla_g, non_xla_g, atol=atol)
def testMultiRNNCellWithStateTuple(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 2])
m_bad = array_ops.zeros([1, 4])
m_good = (array_ops.zeros([1, 2]), array_ops.zeros([1, 2]))
# Test incorrectness of state
with self.assertRaisesRegexp(ValueError, "Expected state .* a tuple"):
core_rnn_cell_impl.MultiRNNCell(
[core_rnn_cell_impl.GRUCell(2) for _ in range(2)],
state_is_tuple=True)(x, m_bad)
_, ml = core_rnn_cell_impl.MultiRNNCell(
[core_rnn_cell_impl.GRUCell(2) for _ in range(2)],
state_is_tuple=True)(x, m_good)
sess.run([variables.global_variables_initializer()])
res = sess.run(ml, {
x.name: np.array([[1., 1.]]),
m_good[0].name: np.array([[0.1, 0.1]]),
m_good[1].name: np.array([[0.1, 0.1]])
})
# The numbers in results were not calculated, this is just a
# smoke test. However, these numbers should match those of
# the test testMultiRNNCell.
self.assertAllClose(res[0], [[0.175991, 0.175991]])
self.assertAllClose(res[1], [[0.13248, 0.13248]])
class BenchmarkLSTMCellXLA(test.Benchmark):
def benchmarkDynamicRNNWithMultiLSTMCell(self):
num_layers = 3
max_time = 50
print("benchmarkDynamicRNNWithMultiLSTMCell")
print("\t" +
"\t".join(["inter_th", "intra_th",
"batch_size", "num_units", "input_depth", "device",
"compiled", "wall_time"]))
warmup_run = True
for (threads,
device,
num_units,
batch_size,
input_depth,
compiled) in itertools.product(
[{"inter": 0, "intra": 0}, {"inter": 1, "intra": 4}],
["cpu", "gpu"],
[32, 512],
[1, 32, 256],
[32, 512],
[False, True]):
if threads["inter"] != 0:
# We only care about testing inter/intra op limitations on
# CPU with small batch size, to mimic embedded devices.
if device != "cpu" or batch_size != 1:
continue
if device == "cpu" and batch_size > 32:
continue
random_seed.set_random_seed(1234)
config = config_pb2.ConfigProto(
inter_op_parallelism_threads=threads["inter"],
intra_op_parallelism_threads=threads["intra"],
allow_soft_placement=False)
with session.Session(config=config, graph=ops.Graph()) as sess:
with ops.device("/%s:0" % device):
ops_dict = _create_multi_lstm_cell_ops(
batch_size=batch_size, num_units=num_units,
input_depth=input_depth, num_layers=num_layers,
max_time=max_time,
compiled=compiled)
sess.run([variables.global_variables_initializer()])
all_ops = nest.flatten(ops_dict.values())
all_ops_group = control_flow_ops.group(*all_ops)
name_suffix = (
"inter_th_%d_intra_th_%d_bs_%d_units_%d_inputdepth_%d"
"_device_%s_xla_%s" % (
threads["inter"], threads["intra"],
batch_size, num_units, input_depth, device, compiled))
if warmup_run:
self.run_op_benchmark(
sess, all_ops_group, min_iters=30, name="ignore_warmup")
warmup_run = False
benchmark_results = self.run_op_benchmark(
sess, all_ops_group, min_iters=50,
name="benchmarkDynamicRNNWithMultiLSTMCell_%s" % name_suffix)
print("\t" +
"\t".join(["%s" % x for x in [
threads["inter"], threads["intra"],
batch_size, num_units, input_depth, device, compiled,
benchmark_results["wall_time"]]]))
if __name__ == "__main__":
test.main()
|
|
########
# Copyright (c) 2014 Cloudify Platform Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
############
import mock
from uuid import uuid4
from time import sleep
from dateutil import parser as date_parser
from cloudify.models_states import VisibilityState
from manager_rest.storage import models
from manager_rest.config import instance
from manager_rest.amqp_manager import AMQPManager
from manager_rest.utils import get_formatted_timestamp
from manager_rest.test.base_test import BaseServerTestCase
from amqp_postgres.postgres_publisher import BATCH_DELAY, DBLogEventPublisher
LOG_MESSAGE = 'cloudify-logs'
EVENT_MESSAGE = 'cloudify-events-topic'
class TestAMQPPostgres(BaseServerTestCase):
def setUp(self):
super(TestAMQPPostgres, self).setUp()
self._mock_amqp_conn = mock.Mock()
self.db_publisher = DBLogEventPublisher(
self.server_configuration, self._mock_amqp_conn)
self.db_publisher.start()
def publish_messages(self, messages):
for message, message_type in messages:
self.db_publisher.process(message, message_type, 0)
# The messages are dumped to the DB every BATCH_DELAY seconds, so
# we should wait before trying to query SQL
sleep(BATCH_DELAY * 2)
def test_insert(self):
execution_id = str(uuid4())
self._create_execution(execution_id)
log = self._get_log(execution_id)
event = self._get_event(execution_id)
self.publish_messages([
(event, EVENT_MESSAGE),
(log, LOG_MESSAGE)
])
db_log = self._get_db_element(models.Log)
db_event = self._get_db_element(models.Event)
self._assert_log(log, db_log)
self._assert_event(event, db_event)
def test_missing_execution(self):
execution_id = str(uuid4())
self._create_execution(execution_id)
execution_id_2 = str(uuid4())
self._create_execution(execution_id_2)
# insert a log for execution 1 so that the execution gets cached
log = self._get_log(execution_id)
self.publish_messages([
(log, LOG_MESSAGE)
])
db_log = self._get_db_element(models.Log)
self._assert_log(log, db_log)
# delete execution 1, and insert logs for both execution 1 and 2
# 1 was deleted, so the log will be lost, but we still expect that
# the log for execution 2 will be stored
self._delete_execution(execution_id)
log = self._get_log(execution_id)
log_2 = self._get_log(execution_id_2)
self.publish_messages([
(log, LOG_MESSAGE),
(log_2, LOG_MESSAGE)
])
execution_2_logs = self.sm.list(
models.Log, filters={'execution_id': execution_id_2})
self.assertEqual(len(execution_2_logs), 1)
self._assert_log(log_2, execution_2_logs[0])
@staticmethod
def _get_amqp_manager():
return AMQPManager(
host=instance.amqp_management_host,
username=instance.amqp_username,
password=instance.amqp_password,
cadata=instance.amqp_ca,
)
def _create_execution(self, execution_id):
admin_user = self.sm.get(models.User, 0)
default_tenant = self.sm.get(models.Tenant, 0)
new_execution = models.Execution(
id=execution_id,
status='terminated',
created_at=get_formatted_timestamp(),
workflow_id='test',
error='',
parameters={},
is_system_workflow=False
)
new_execution.creator = admin_user
new_execution.tenant = default_tenant
self.sm.put(new_execution)
def _delete_execution(self, execution_id):
execution = self.sm.get(models.Execution, execution_id)
self.sm.delete(execution)
def _get_db_element(self, model):
items = self.sm.list(model)
self.assertEqual(len(items), 1)
return items[0]
def _assert_timestamp(self, elem):
timestamp = date_parser.parse(elem.timestamp)
reported_timestamp = date_parser.parse(elem.reported_timestamp)
# timestamp comes from `postgres_publisher` when creating the new
# element, while `reported_timestamp` comes from the message object,
# which should be created beforehand
self.assertGreaterEqual(timestamp, reported_timestamp)
def _assert_log(self, log, db_log):
self._assert_timestamp(db_log)
self.assertEqual(db_log.message, log['message']['text'])
self.assertEqual(db_log.message_code, None)
self.assertEqual(db_log.logger, log['logger'])
self.assertEqual(db_log.level, log['level'])
self.assertEqual(db_log.operation, log['context']['operation'])
self.assertEqual(db_log.node_id, log['context']['node_id'])
self.assertEqual(db_log.execution.id, log['context']['execution_id'])
self.assertEqual(db_log.creator.id, 0)
self.assertEqual(db_log.tenant.id, 0)
self.assertEqual(db_log.reported_timestamp, log['timestamp'])
self.assertEqual(db_log.visibility, VisibilityState.TENANT)
def _assert_event(self, event, db_event):
self._assert_timestamp(db_event)
self.assertEqual(db_event.message, event['message']['text'])
self.assertEqual(db_event.message_code, None)
self.assertEqual(db_event.event_type, event['event_type'])
self.assertEqual(db_event.error_causes, None)
self.assertEqual(db_event.operation, None)
self.assertEqual(db_event.node_id, None)
self.assertEqual(db_event.execution.id,
event['context']['execution_id'])
self.assertEqual(db_event.creator.id, 0)
self.assertEqual(db_event.tenant.id, 0)
self.assertEqual(db_event.reported_timestamp, event['timestamp'])
self.assertEqual(db_event.visibility, VisibilityState.TENANT)
@staticmethod
def _get_log(execution_id, message='Test log'):
return {
'context': {
'execution_id': execution_id,
'node_id': 'vm_7j36my',
'operation': 'cloudify.interfaces.cloudify_agent.create',
},
'level': 'debug',
'logger': 'ctx.a13973d5-3866-4054-baa1-479e242fff75',
'message': {
'text': message
},
'timestamp': get_formatted_timestamp()
}
@staticmethod
def _get_event(execution_id,
message="Starting 'install' workflow execution"):
return {
'message': {
'text': message,
'arguments': None
},
'event_type': 'workflow_started',
'context': {
'execution_id': execution_id,
},
'timestamp': get_formatted_timestamp()
}
|
|
import sys
import threading
import io
import csv
import itertools
import concurrent.futures
from collections import OrderedDict, namedtuple
from math import isnan
import numpy
from PyQt4 import QtCore
from PyQt4 import QtGui
from PyQt4.QtGui import (QIdentityProxyModel, QTableView, QItemSelectionModel,
QItemSelection)
from PyQt4.QtCore import Qt, QMetaObject, QModelIndex, QT_VERSION
from PyQt4.QtCore import pyqtSlot as Slot
import Orange.data
from Orange.data.storage import Storage
from Orange.data.table import Table
from Orange.data.sql.table import SqlTable
from Orange.statistics import basic_stats
from Orange.widgets import widget, gui
from Orange.widgets.settings import (Setting, ContextSetting,
DomainContextHandler)
from Orange.widgets.utils import datacaching
from Orange.widgets.utils.itemmodels import TableModel
class RichTableDecorator(QIdentityProxyModel):
"""A proxy model for a TableModel with some bells and whistles
(adds support for gui.BarRole, include variable labels and icons
in the header)
"""
#: Rich header data flags.
Name, Labels, Icon = 1, 2, 4
def __init__(self, source, parent=None):
super().__init__(parent)
self._header_flags = RichTableDecorator.Name
self._labels = []
self._continuous = []
self.setSourceModel(source)
@property
def source(self):
return getattr(self.sourceModel(), "source", None)
@property
def vars(self):
return getattr(self.sourceModel(), "vars", [])
def setSourceModel(self, source):
if source is not None and \
not isinstance(source, TableModel):
raise TypeError()
if source is not None:
self._continuous = [var.is_continuous for var in source.vars]
labels = []
for var in source.vars:
if isinstance(var, Orange.data.Variable):
labels.extend(var.attributes.keys())
self._labels = list(sorted(
{label for label in labels if not label.startswith("_")}))
else:
self._continuous = []
self._labels = []
super().setSourceModel(source)
def data(self, index, role=Qt.DisplayRole,
# for faster local lookup
_BarRole=gui.TableBarItem.BarRole):
if role == _BarRole and self._continuous[index.column()]:
val = super().data(index, TableModel.ValueRole)
if val is None or isnan(val):
return None
dist = super().data(index, TableModel.VariableStatsRole)
if dist is not None and dist.max > dist.min:
return (val - dist.min) / (dist.max - dist.min)
else:
return None
elif role == Qt.TextAlignmentRole and self._continuous[index.column()]:
return Qt.AlignRight | Qt.AlignVCenter
else:
return super().data(index, role)
def headerData(self, section, orientation, role):
if self.sourceModel() is None:
return None
# NOTE: Always use `self.sourceModel().heaerData(...)` and not
# super().headerData(...). The later does not work for zero length
# source models
if orientation == Qt.Horizontal and role == Qt.DisplayRole:
var = self.sourceModel().headerData(
section, orientation, TableModel.VariableRole)
if var is None:
return self.sourceModel().headerData(
section, orientation, Qt.DisplayRole)
lines = []
if self._header_flags & RichTableDecorator.Name:
lines.append(var.name)
if self._header_flags & RichTableDecorator.Labels:
lines.extend(str(var.attributes.get(label, ""))
for label in self._labels)
return "\n".join(lines)
elif orientation == Qt.Horizontal and role == Qt.DecorationRole and \
self._header_flags & RichTableDecorator.Icon:
var = self.sourceModel().headerData(
section, orientation, TableModel.VariableRole)
if var is not None:
return gui.attributeIconDict[var]
else:
return None
else:
return self.sourceModel().headerData(section, orientation, role)
def setRichHeaderFlags(self, flags):
if flags != self._header_flags:
self._header_flags = flags
self.headerDataChanged.emit(
Qt.Horizontal, 0, self.columnCount() - 1)
def richHeaderFlags(self):
return self._header_flags
if QT_VERSION < 0xFFFFFF: # TODO: change when QTBUG-44143 is fixed
def sort(self, column, order):
# Preempt the layout change notification
self.layoutAboutToBeChanged.emit()
# Block signals to suppress repeated layout[AboutToBe]Changed
# TODO: Are any other signals emitted during a sort?
self.blockSignals(True)
try:
rval = self.sourceModel().sort(column, order)
finally:
self.blockSignals(False)
# Tidy up.
self.layoutChanged.emit()
return rval
else:
def sort(self, column, order):
return self.sourceModel().sort(column, order)
class TableSliceProxy(QIdentityProxyModel):
def __init__(self, parent=None, rowSlice=slice(0, -1), **kwargs):
super().__init__(parent, **kwargs)
self.__rowslice = rowSlice
def setRowSlice(self, rowslice):
if rowslice.step is not None and rowslice.step != 1:
raise ValueError("invalid stride")
if self.__rowslice != rowslice:
self.beginResetModel()
self.__rowslice = rowslice
self.endResetModel()
def setSourceModel(self, model):
super().setSourceModel(model)
def mapToSource(self, proxyindex):
model = self.sourceModel()
if model is None or not proxyindex.isValid():
return QModelIndex()
row, col = proxyindex.row(), proxyindex.column()
row = row + self.__rowslice.start
assert 0 <= row < model.rowCount()
return model.createIndex(row, col, proxyindex.internalPointer())
def mapFromSource(self, sourceindex):
model = self.sourceModel()
if model is None or not sourceindex.isValid():
return QModelIndex()
row, col = sourceindex.row(), sourceindex.column()
row = row - self.__rowslice.start
assert 0 <= row < self.rowCount()
return self.createIndex(row, col, sourceindex.internalPointer())
def rowCount(self, parent=QModelIndex()):
if parent.isValid():
return 0
count = super().rowCount()
start, stop, step = self.__rowslice.indices(count)
assert step == 1
return stop - start
class BlockSelectionModel(QItemSelectionModel):
"""
Item selection model ensuring the selection maintains a simple block
like structure.
e.g.
[a b] c [d e]
[f g] h [i j]
is allowed but this is not
[a] b c d e
[f g] h [i j]
I.e. select the Cartesian product of row and column indices.
"""
def __init__(self, model, parent=None, selectBlocks=True, **kwargs):
super().__init__(model, parent, **kwargs)
self.__selectBlocks = selectBlocks
def select(self, selection, flags):
"""Reimplemented."""
if isinstance(selection, QModelIndex):
selection = QtGui.QItemSelection(selection, selection)
model = self.model()
indexes = self.selectedIndexes()
rows = set(ind.row() for ind in indexes)
cols = set(ind.column() for ind in indexes)
if flags & QItemSelectionModel.Select and \
not flags & QItemSelectionModel.Clear and self.__selectBlocks:
indexes = selection.indexes()
sel_rows = set(ind.row() for ind in indexes).union(rows)
sel_cols = set(ind.column() for ind in indexes).union(cols)
selection = QtGui.QItemSelection()
for r_start, r_end in ranges(sorted(sel_rows)):
for c_start, c_end in ranges(sorted(sel_cols)):
top_left = model.index(r_start, c_start)
bottom_right = model.index(r_end - 1, c_end - 1)
selection.select(top_left, bottom_right)
elif self.__selectBlocks and flags & QItemSelectionModel.Deselect:
indexes = selection.indexes()
def to_ranges(indices):
return list(range(*r) for r in ranges(indices))
selected_rows = to_ranges(sorted(rows))
selected_cols = to_ranges(sorted(cols))
desel_rows = to_ranges(set(ind.row() for ind in indexes))
desel_cols = to_ranges(set(ind.column() for ind in indexes))
selection = QtGui.QItemSelection()
# deselection extended vertically
for row_range, col_range in \
itertools.product(selected_rows, desel_cols):
selection.select(
model.index(row_range.start, col_range.start),
model.index(row_range.stop - 1, col_range.stop - 1)
)
# deselection extended horizontally
for row_range, col_range in \
itertools.product(desel_rows, selected_cols):
selection.select(
model.index(row_range.start, col_range.start),
model.index(row_range.stop - 1, col_range.stop - 1)
)
QItemSelectionModel.select(self, selection, flags)
def selectBlocks(self):
"""Is the block selection in effect."""
return self.__selectBlocks
def setSelectBlocks(self, state):
"""Set the block selection state.
If set to False, the selection model behaves as the base
QItemSelectionModel
"""
self.__selectBlocks = state
def ranges(indices):
"""
Group consecutive indices into `(start, stop)` tuple 'ranges'.
>>> list(ranges([1, 2, 3, 5, 3, 4]))
>>> [(1, 4), (5, 6), (3, 5)]
"""
g = itertools.groupby(enumerate(indices),
key=lambda t: t[1] - t[0])
for _, range_ind in g:
range_ind = list(range_ind)
_, start = range_ind[0]
_, end = range_ind[-1]
yield start, end + 1
def table_selection_to_mime_data(table):
"""Copy the current selection in a QTableView to the clipboard.
"""
lines = table_selection_to_list(table)
csv = lines_to_csv_string(lines, dialect="excel")
tsv = lines_to_csv_string(lines, dialect="excel-tab")
mime = QtCore.QMimeData()
mime.setData("text/csv", QtCore.QByteArray(csv))
mime.setData("text/tab-separated-values", QtCore.QByteArray(tsv))
mime.setData("text/plain", QtCore.QByteArray(tsv))
return mime
def lines_to_csv_string(lines, dialect="excel"):
stream = io.StringIO()
writer = csv.writer(stream, dialect=dialect)
writer.writerows(lines)
return stream.getvalue()
def table_selection_to_list(table):
model = table.model()
indexes = table.selectedIndexes()
rows = sorted(set(index.row() for index in indexes))
columns = sorted(set(index.column() for index in indexes))
lines = []
for row in rows:
line = []
for col in columns:
val = model.index(row, col).data(Qt.DisplayRole)
# TODO: use style item delegate displayText?
line.append(str(val))
lines.append(line)
return lines
TableSlot = namedtuple("TableSlot", ["input_id", "table", "summary", "view"])
class OWDataTable(widget.OWWidget):
name = "Data Table"
description = "View data set in a spreadsheet."
icon = "icons/Table.svg"
priority = 100
inputs = [("Data", Table, "set_dataset", widget.Multiple)]
outputs = [("Selected Data", Table, widget.Default),
("Other Data", Table)]
show_distributions = Setting(False)
dist_color_RGB = Setting((220, 220, 220, 255))
show_attribute_labels = Setting(True)
select_rows = Setting(True)
auto_commit = Setting(True)
color_by_class = Setting(True)
settingsHandler = DomainContextHandler(
match_values=DomainContextHandler.MATCH_VALUES_ALL)
selected_rows = ContextSetting([])
selected_cols = ContextSetting([])
def __init__(self):
super().__init__()
self.inputs = OrderedDict()
self.dist_color = QtGui.QColor(*self.dist_color_RGB)
info_box = gui.widgetBox(self.controlArea, "Info")
self.info_ex = gui.widgetLabel(info_box, 'No data on input.', )
self.info_ex.setWordWrap(True)
self.info_attr = gui.widgetLabel(info_box, ' ')
self.info_attr.setWordWrap(True)
self.info_class = gui.widgetLabel(info_box, ' ')
self.info_class.setWordWrap(True)
self.info_meta = gui.widgetLabel(info_box, ' ')
self.info_meta.setWordWrap(True)
gui.separator(info_box)
gui.button(info_box, self, "Restore Original Order",
callback=self.restore_order,
tooltip="Show rows in the original order",
autoDefault=False)
info_box.setMinimumWidth(200)
gui.separator(self.controlArea)
box = gui.widgetBox(self.controlArea, "Variables")
self.c_show_attribute_labels = gui.checkBox(
box, self, "show_attribute_labels",
"Show variable labels (if present)",
callback=self._on_show_variable_labels_changed)
gui.checkBox(box, self, "show_distributions",
'Visualize continuous values',
callback=self._on_distribution_color_changed)
gui.checkBox(box, self, "color_by_class", 'Color by instance classes',
callback=self._on_distribution_color_changed)
box = gui.widgetBox(self.controlArea, "Selection")
gui.checkBox(box, self, "select_rows", "Select full rows",
callback=self._on_select_rows_changed)
gui.rubber(self.controlArea)
gui.auto_commit(self.controlArea, self, "auto_commit",
"Send Selected Rows", "Auto send is on")
# GUI with tabs
self.tabs = gui.tabWidget(self.mainArea)
self.tabs.currentChanged.connect(self._on_current_tab_changed)
copy = QtGui.QAction("Copy", self, shortcut=QtGui.QKeySequence.Copy,
triggered=self.copy)
self.addAction(copy)
def sizeHint(self):
return QtCore.QSize(800, 500)
def set_dataset(self, data, tid=None):
"""Set the input dataset."""
self.closeContext()
if data is not None:
if tid in self.inputs:
# update existing input slot
slot = self.inputs[tid]
view = slot.view
# reset the (header) view state.
view.setModel(None)
view.horizontalHeader().setSortIndicator(-1, Qt.AscendingOrder)
else:
view = QTableView()
view.setSortingEnabled(True)
view.setHorizontalScrollMode(QTableView.ScrollPerPixel)
if self.select_rows:
view.setSelectionBehavior(QTableView.SelectRows)
header = view.horizontalHeader()
header.setMovable(True)
header.setClickable(True)
header.setSortIndicatorShown(True)
header.setSortIndicator(-1, Qt.AscendingOrder)
# QHeaderView does not 'reset' the model sort column,
# because there is no guaranty (requirement) that the
# models understand the -1 sort column.
def sort_reset(index, order):
if view.model() is not None and index == -1:
view.model().sort(index, order)
header.sortIndicatorChanged.connect(sort_reset)
view.dataset = data
self.tabs.addTab(view, getattr(data, "name", "Data"))
self._setup_table_view(view, data)
slot = TableSlot(tid, data, table_summary(data), view)
view._input_slot = slot
self.inputs[tid] = slot
self.tabs.setCurrentIndex(self.tabs.indexOf(view))
self.set_info(slot.summary)
if isinstance(slot.summary.len, concurrent.futures.Future):
def update(f):
QMetaObject.invokeMethod(
self, "_update_info", Qt.QueuedConnection)
slot.summary.len.add_done_callback(update)
elif tid in self.inputs:
slot = self.inputs.pop(tid)
view = slot.view
view.hide()
view.deleteLater()
self.tabs.removeTab(self.tabs.indexOf(view))
current = self.tabs.currentWidget()
if current is not None:
self.set_info(current._input_slot.summary)
self.tabs.tabBar().setVisible(self.tabs.count() > 1)
self.selected_rows = []
self.selected_cols = []
self.openContext(data)
self.set_selection()
def _setup_table_view(self, view, data):
"""Setup the `view` (QTableView) with `data` (Orange.data.Table)
"""
if data is None:
view.setModel(None)
return
datamodel = TableModel(data)
datamodel = RichTableDecorator(datamodel)
rowcount = data.approx_len()
if self.color_by_class and data.domain.has_discrete_class:
color_schema = [
QtGui.QColor(*c) for c in data.domain.class_var.colors]
else:
color_schema = None
if self.show_distributions:
view.setItemDelegate(
gui.TableBarItem(
self, color=self.dist_color, color_schema=color_schema)
)
else:
view.setItemDelegate(QtGui.QStyledItemDelegate(self))
# Enable/disable view sorting based on data's type
view.setSortingEnabled(is_sortable(data))
header = view.horizontalHeader()
header.setClickable(is_sortable(data))
header.setSortIndicatorShown(is_sortable(data))
view.setModel(datamodel)
vheader = view.verticalHeader()
option = view.viewOptions()
size = view.style().sizeFromContents(
QtGui.QStyle.CT_ItemViewItem, option,
QtCore.QSize(20, 20), view)
vheader.setDefaultSectionSize(size.height() + 2)
vheader.setMinimumSectionSize(5)
vheader.setResizeMode(QtGui.QHeaderView.Fixed)
# Limit the number of rows displayed in the QTableView
# (workaround for QTBUG-18490 / QTBUG-28631)
maxrows = (2 ** 31 - 1) // (vheader.defaultSectionSize() + 2)
if rowcount > maxrows:
sliceproxy = TableSliceProxy(
parent=view, rowSlice=slice(0, maxrows))
sliceproxy.setSourceModel(datamodel)
# First reset the view (without this the header view retains
# it's state - at this point invalid/broken)
view.setModel(None)
view.setModel(sliceproxy)
assert view.model().rowCount() <= maxrows
assert vheader.sectionSize(0) > 1 or datamodel.rowCount() == 0
# update the header (attribute names)
self._update_variable_labels(view)
selmodel = BlockSelectionModel(
view.model(), parent=view, selectBlocks=not self.select_rows)
view.setSelectionModel(selmodel)
view.selectionModel().selectionChanged.connect(self.update_selection)
#noinspection PyBroadException
def set_corner_text(self, table, text):
"""Set table corner text."""
# As this is an ugly hack, do everything in
# try - except blocks, as it may stop working in newer Qt.
if not hasattr(table, "btn") and not hasattr(table, "btnfailed"):
try:
btn = table.findChild(QtGui.QAbstractButton)
class efc(QtCore.QObject):
def eventFilter(self, o, e):
if (isinstance(o, QtGui.QAbstractButton) and
e.type() == QtCore.QEvent.Paint):
# paint by hand (borrowed from QTableCornerButton)
btn = o
opt = QtGui.QStyleOptionHeader()
opt.init(btn)
state = QtGui.QStyle.State_None
if btn.isEnabled():
state |= QtGui.QStyle.State_Enabled
if btn.isActiveWindow():
state |= QtGui.QStyle.State_Active
if btn.isDown():
state |= QtGui.QStyle.State_Sunken
opt.state = state
opt.rect = btn.rect()
opt.text = btn.text()
opt.position = \
QtGui.QStyleOptionHeader.OnlyOneSection
painter = QtGui.QStylePainter(btn)
painter.drawControl(QtGui.QStyle.CE_Header, opt)
return True # eat event
return False
table.efc = efc()
btn.installEventFilter(table.efc)
table.btn = btn
if sys.platform == "darwin":
btn.setAttribute(Qt.WA_MacSmallSize)
except Exception:
table.btnfailed = True
if hasattr(table, "btn"):
try:
btn = table.btn
btn.setText(text)
opt = QtGui.QStyleOptionHeader()
opt.text = btn.text()
s = btn.style().sizeFromContents(
QtGui.QStyle.CT_HeaderSection,
opt, QtCore.QSize(),
btn).expandedTo(QtGui.QApplication.globalStrut())
if s.isValid():
table.verticalHeader().setMinimumWidth(s.width())
except Exception:
pass
def _on_current_tab_changed(self, index):
"""Update the info box on current tab change"""
view = self.tabs.widget(index)
if view is not None and view.model() is not None:
self.set_info(view._input_slot.summary)
else:
self.set_info(None)
def _update_variable_labels(self, view):
"Update the variable labels visibility for `view`"
model = view.model()
if isinstance(model, TableSliceProxy):
model = model.sourceModel()
if self.show_attribute_labels:
model.setRichHeaderFlags(
RichTableDecorator.Labels | RichTableDecorator.Name)
labelnames = set()
for a in model.source.domain:
labelnames.update(a.attributes.keys())
labelnames = sorted(
[label for label in labelnames if not label.startswith("_")])
self.set_corner_text(view, "\n".join([""] + labelnames))
else:
model.setRichHeaderFlags(RichTableDecorator.Name)
self.set_corner_text(view, "")
def _on_show_variable_labels_changed(self):
"""The variable labels (var.attribues) visibility was changed."""
for slot in self.inputs.values():
self._update_variable_labels(slot.view)
def _on_distribution_color_changed(self):
for ti in range(self.tabs.count()):
widget = self.tabs.widget(ti)
model = widget.model()
while isinstance(model, QtGui.QAbstractProxyModel):
model = model.sourceModel()
data = model.source
class_var = data.domain.class_var
if self.color_by_class and class_var and class_var.is_discrete:
color_schema = [QtGui.QColor(*c) for c in class_var.colors]
else:
color_schema = None
if self.show_distributions:
delegate = gui.TableBarItem(self, color=self.dist_color,
color_schema=color_schema)
else:
delegate = QtGui.QStyledItemDelegate(self)
widget.setItemDelegate(delegate)
tab = self.tabs.currentWidget()
if tab:
tab.reset()
def _on_select_rows_changed(self):
for slot in self.inputs.values():
selection_model = slot.view.selectionModel()
selection_model.setSelectBlocks(not self.select_rows)
if self.select_rows:
slot.view.setSelectionBehavior(QTableView.SelectRows)
# Expand the current selection to full row selection.
selection_model.select(
selection_model.selection(),
QItemSelectionModel.Select | QItemSelectionModel.Rows
)
else:
slot.view.setSelectionBehavior(QTableView.SelectItems)
def restore_order(self):
"""Restore the original data order of the current view."""
table = self.tabs.currentWidget()
if table is not None:
table.horizontalHeader().setSortIndicator(-1, Qt.AscendingOrder)
def set_info(self, summary):
if summary is None:
self.info_ex.setText("No data on input.")
self.info_attr.setText("")
self.info_class.setText("")
self.info_meta.setText("")
else:
info_len, info_attr, info_class, info_meta = \
format_summary(summary)
self.info_ex.setText(info_len)
self.info_attr.setText(info_attr)
self.info_class.setText(info_class)
self.info_meta.setText(info_meta)
@Slot()
def _update_info(self):
current = self.tabs.currentWidget()
if current is not None and current.model() is not None:
self.set_info(current._input_slot.summary)
def update_selection(self, *_):
self.commit()
def set_selection(self):
if len(self.selected_rows) and len(self.selected_cols):
view = self.tabs.currentWidget()
model = view.model()
if model.rowCount() <= self.selected_rows[-1] or \
model.columnCount() <= self.selected_cols[-1]:
return
selection = QItemSelection()
rowranges = list(ranges(self.selected_rows))
colranges = list(ranges(self.selected_cols))
for rowstart, rowend in rowranges:
for colstart, colend in colranges:
selection.append(
QtGui.QItemSelectionRange(
view.model().index(rowstart, colstart),
view.model().index(rowend - 1, colend - 1)
)
)
view.selectionModel().select(
selection, QItemSelectionModel.ClearAndSelect)
def get_selection(self, view):
"""
Return the selected row and column indices of the selection in view.
"""
selection = view.selectionModel().selection()
model = view.model()
# map through the proxies into input table.
while isinstance(model, QtGui.QAbstractProxyModel):
selection = model.mapSelectionToSource(selection)
model = model.sourceModel()
assert isinstance(model, TableModel)
indexes = selection.indexes()
rows = list(set(ind.row() for ind in indexes))
# map the rows through the applied sorting (if any)
rows = sorted(model.mapToTableRows(rows))
cols = sorted(set(ind.column() for ind in indexes))
return rows, cols
@staticmethod
def _get_model(view):
model = view.model()
while isinstance(model, QtGui.QAbstractProxyModel):
model = model.sourceModel()
return model
def commit(self):
"""
Commit/send the current selected row/column selection.
"""
selected_data = other_data = None
view = self.tabs.currentWidget()
if view and view.model() is not None:
model = self._get_model(view)
table = model.source # The input data table
# Selections of individual instances are not implemented
# for SqlTables
if isinstance(table, SqlTable):
self.send("Selected Data", selected_data)
self.send("Other Data", other_data)
return
rowsel, colsel = self.get_selection(view)
self.selected_rows, self.selected_cols = rowsel, colsel
def select(data, rows, domain):
"""
Select the data subset with specified rows and domain subsets.
If either rows or domain is None they mean select all.
"""
if rows is not None and domain is not None:
return data.from_table(domain, data, rows)
elif rows is not None:
return data.from_table(data.domain, rows)
elif domain is not None:
return data.from_table(domain, data)
else:
return data
domain = table.domain
if len(colsel) < len(domain) + len(domain.metas):
# only a subset of the columns is selected
allvars = domain.variables + domain.metas
columns = [(c, model.headerData(c, Qt.Horizontal,
TableModel.DomainRole))
for c in colsel]
assert all(role is not None for _, role in columns)
def select_vars(role):
"""select variables for role (TableModel.DomainRole)"""
return [allvars[c] for c, r in columns if r == role]
attrs = select_vars(TableModel.Attribute)
class_vars = select_vars(TableModel.ClassVar)
metas = select_vars(TableModel.Meta)
domain = Orange.data.Domain(attrs, class_vars, metas)
# Avoid a copy if all/none rows are selected.
if not rowsel:
selected_data = None
other_data = select(table, None, domain)
elif len(rowsel) == len(table):
selected_data = select(table, None, domain)
other_data = None
else:
selected_data = select(table, rowsel, domain)
selmask = numpy.ones((len(table),), dtype=bool)
selmask[rowsel] = False
other_data = select(table, numpy.flatnonzero(selmask), domain)
self.send("Selected Data", selected_data)
self.send("Other Data", other_data)
def copy(self):
"""
Copy current table selection to the clipboard.
"""
view = self.tabs.currentWidget()
if view is not None:
mime = table_selection_to_mime_data(view)
QtGui.QApplication.clipboard().setMimeData(
mime, QtGui.QClipboard.Clipboard
)
def send_report(self):
view = self.tabs.currentWidget()
if not view or not view.model():
return
model = self._get_model(view)
self.report_data_brief(model.source)
self.report_table(view)
# Table Summary
# Basic statistics for X/Y/metas arrays
DenseArray = namedtuple(
"DenseArray", ["nans", "non_nans", "stats"])
SparseArray = namedtuple(
"SparseArray", ["nans", "non_nans", "stats"])
SparseBoolArray = namedtuple(
"SparseBoolArray", ["nans", "non_nans", "stats"])
NotAvailable = namedtuple("NotAvailable", [])
#: Orange.data.Table summary
Summary = namedtuple(
"Summary",
["len", "domain", "X", "Y", "M"])
#: Orange.data.sql.table.SqlTable summary
ApproxSummary = namedtuple(
"ApproxSummary",
["approx_len", "len", "domain", "X", "Y", "M"])
def table_summary(table):
if isinstance(table, SqlTable):
approx_len = table.approx_len()
len_future = concurrent.futures.Future()
def _len():
len_future.set_result(len(table))
threading.Thread(target=_len).start() # KILL ME !!!
return ApproxSummary(approx_len, len_future, table.domain,
NotAvailable(), NotAvailable(), NotAvailable())
else:
domain = table.domain
n_instances = len(table)
# dist = basic_stats.DomainBasicStats(table, include_metas=True)
bstats = datacaching.getCached(
table, basic_stats.DomainBasicStats, (table, True)
)
dist = bstats.stats
X_dist, Y_dist, M_dist = numpy.split(
dist, numpy.cumsum([len(domain.attributes),
len(domain.class_vars)]))
def parts(array, density, col_dist):
array = numpy.atleast_2d(array)
nans = sum([dist.nans for dist in col_dist])
non_nans = sum([dist.non_nans for dist in col_dist])
if density == Storage.DENSE:
return DenseArray(nans, non_nans, col_dist)
elif density == Storage.SPARSE:
return SparseArray(nans, non_nans, col_dist)
elif density == Storage.SPARSE_BOOL:
return SparseBoolArray(nans, non_nans, col_dist)
elif density == Storage.MISSING:
return NotAvailable()
else:
assert False
X_part = parts(table.X, table.X_density(), X_dist)
Y_part = parts(table.Y, table.Y_density(), Y_dist)
M_part = parts(table.metas, table.metas_density(), M_dist)
return Summary(n_instances, domain, X_part, Y_part, M_part)
def format_summary(summary):
text = []
if isinstance(summary, ApproxSummary):
if summary.len.done():
text += ["{} instances".format(summary.len.result())]
else:
text += ["~{} instances".format(summary.approx_len)]
elif isinstance(summary, Summary):
text += ["{} instances".format(summary.len)]
if sum(p.nans for p in [summary.X, summary.Y, summary.M]) == 0:
text[-1] += " (no missing values)"
def format_part(part):
if isinstance(part, NotAvailable):
return ""
elif part.nans + part.non_nans == 0:
return ""
if isinstance(part, DenseArray):
total = part.nans + part.non_nans
miss = ("%.1f%%" % (100 * part.nans / total) if part.nans > 0
else "no")
return " (%s missing values)" % miss
elif isinstance(part, (SparseArray, SparseBoolArray)):
text = " ({}, density {:.2f}%)"
tag = "sparse" if isinstance(part, SparseArray) else "tags"
total = part.nans + part.non_nans
return text.format(tag, 100 * part.non_nans / total)
else:
# MISSING, N/A
return ""
def sp(n):
if n == 0:
return "No", "s"
elif n == 1:
return str(n), ''
else:
return str(n), 's'
text += [("%s feature%s" % sp(len(summary.domain.attributes)))
+ format_part(summary.X)]
if not summary.domain.class_vars:
text += ["No target variable."]
else:
if len(summary.domain.class_vars) > 1:
c_text = "%s outcome%s" % sp(len(summary.domain.class_vars))
elif summary.domain.has_continuous_class:
c_text = "Continuous target variable"
else:
c_text = "Discrete class with %s value%s" % sp(
len(summary.domain.class_var.values))
c_text += format_part(summary.Y)
text += [c_text]
text += [("%s meta attribute%s" % sp(len(summary.domain.metas)))
+ format_part(summary.M)]
return text
def is_sortable(table):
if isinstance(table, SqlTable):
return False
elif isinstance(table, Orange.data.Table):
return True
else:
return False
def test_main():
a = QtGui.QApplication(sys.argv)
ow = OWDataTable()
iris = Table("iris")
brown = Table("brown-selected")
housing = Table("housing")
ow.show()
ow.raise_()
ow.set_dataset(iris, iris.name)
ow.set_dataset(brown, brown.name)
ow.set_dataset(housing, housing.name)
rval = a.exec()
# ow.saveSettings()
return rval
def test_model():
app = QtGui.QApplication([])
view = QtGui.QTableView(
sortingEnabled=True
)
data = Orange.data.Table("lenses")
model = TableModel(data)
view.setModel(model)
view.show()
view.raise_()
return app.exec()
if __name__ == "__main__":
sys.exit(test_main())
|
|
"""SCons.Tool.javac
Tool-specific initialization for javac.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/javac.py 4369 2009/09/19 15:58:29 scons"
import os
import os.path
import string
import SCons.Action
import SCons.Builder
from SCons.Node.FS import _my_normcase
from SCons.Tool.JavaCommon import parse_java_file
import SCons.Util
def classname(path):
"""Turn a string (path name) into a Java class name."""
return string.replace(os.path.normpath(path), os.sep, '.')
def emit_java_classes(target, source, env):
"""Create and return lists of source java files
and their corresponding target class files.
"""
java_suffix = env.get('JAVASUFFIX', '.java')
class_suffix = env.get('JAVACLASSSUFFIX', '.class')
target[0].must_be_same(SCons.Node.FS.Dir)
classdir = target[0]
s = source[0].rentry().disambiguate()
if isinstance(s, SCons.Node.FS.File):
sourcedir = s.dir.rdir()
elif isinstance(s, SCons.Node.FS.Dir):
sourcedir = s.rdir()
else:
raise SCons.Errors.UserError("Java source must be File or Dir, not '%s'" % s.__class__)
slist = []
js = _my_normcase(java_suffix)
find_java = lambda n, js=js, ljs=len(js): _my_normcase(n[-ljs:]) == js
for entry in source:
entry = entry.rentry().disambiguate()
if isinstance(entry, SCons.Node.FS.File):
slist.append(entry)
elif isinstance(entry, SCons.Node.FS.Dir):
result = SCons.Util.OrderedDict()
def visit(arg, dirname, names, fj=find_java, dirnode=entry.rdir()):
java_files = sorted(filter(fj, names))
# The on-disk entries come back in arbitrary order. Sort
# them so our target and source lists are determinate.
mydir = dirnode.Dir(dirname)
java_paths = map(lambda f, d=mydir: d.File(f), java_files)
for jp in java_paths:
arg[jp] = True
os.path.walk(entry.rdir().get_abspath(), visit, result)
entry.walk(visit, result)
slist.extend(result.keys())
else:
raise SCons.Errors.UserError("Java source must be File or Dir, not '%s'" % entry.__class__)
version = env.get('JAVAVERSION', '1.4')
full_tlist = []
for f in slist:
tlist = []
source_file_based = True
pkg_dir = None
if not f.is_derived():
pkg_dir, classes = parse_java_file(f.rfile().get_abspath(), version)
if classes:
source_file_based = False
if pkg_dir:
d = target[0].Dir(pkg_dir)
p = pkg_dir + os.sep
else:
d = target[0]
p = ''
for c in classes:
t = d.File(c + class_suffix)
t.attributes.java_classdir = classdir
t.attributes.java_sourcedir = sourcedir
t.attributes.java_classname = classname(p + c)
tlist.append(t)
if source_file_based:
base = f.name[:-len(java_suffix)]
if pkg_dir:
t = target[0].Dir(pkg_dir).File(base + class_suffix)
else:
t = target[0].File(base + class_suffix)
t.attributes.java_classdir = classdir
t.attributes.java_sourcedir = f.dir
t.attributes.java_classname = classname(base)
tlist.append(t)
for t in tlist:
t.set_specific_source([f])
full_tlist.extend(tlist)
return full_tlist, slist
JavaAction = SCons.Action.Action('$JAVACCOM', '$JAVACCOMSTR')
JavaBuilder = SCons.Builder.Builder(action = JavaAction,
emitter = emit_java_classes,
target_factory = SCons.Node.FS.Entry,
source_factory = SCons.Node.FS.Entry)
class pathopt:
"""
Callable object for generating javac-style path options from
a construction variable (e.g. -classpath, -sourcepath).
"""
def __init__(self, opt, var, default=None):
self.opt = opt
self.var = var
self.default = default
def __call__(self, target, source, env, for_signature):
path = env[self.var]
if path and not SCons.Util.is_List(path):
path = [path]
if self.default:
path = path + [ env[self.default] ]
if path:
return [self.opt, string.join(path, os.pathsep)]
#return self.opt + " " + string.join(path, os.pathsep)
else:
return []
#return ""
def Java(env, target, source, *args, **kw):
"""
A pseudo-Builder wrapper around the separate JavaClass{File,Dir}
Builders.
"""
if not SCons.Util.is_List(target):
target = [target]
if not SCons.Util.is_List(source):
source = [source]
# Pad the target list with repetitions of the last element in the
# list so we have a target for every source element.
target = target + ([target[-1]] * (len(source) - len(target)))
java_suffix = env.subst('$JAVASUFFIX')
result = []
for t, s in zip(target, source):
if isinstance(s, SCons.Node.FS.Base):
if isinstance(s, SCons.Node.FS.File):
b = env.JavaClassFile
else:
b = env.JavaClassDir
else:
if os.path.isfile(s):
b = env.JavaClassFile
elif os.path.isdir(s):
b = env.JavaClassDir
elif s[-len(java_suffix):] == java_suffix:
b = env.JavaClassFile
else:
b = env.JavaClassDir
result.extend(b(*(t, s) + args, **kw))
return result
def generate(env):
"""Add Builders and construction variables for javac to an Environment."""
java_file = SCons.Tool.CreateJavaFileBuilder(env)
java_class = SCons.Tool.CreateJavaClassFileBuilder(env)
java_class_dir = SCons.Tool.CreateJavaClassDirBuilder(env)
java_class.add_emitter(None, emit_java_classes)
java_class.add_emitter(env.subst('$JAVASUFFIX'), emit_java_classes)
java_class_dir.emitter = emit_java_classes
env.AddMethod(Java)
env['JAVAC'] = 'javac'
env['JAVACFLAGS'] = SCons.Util.CLVar('')
env['JAVABOOTCLASSPATH'] = []
env['JAVACLASSPATH'] = []
env['JAVASOURCEPATH'] = []
env['_javapathopt'] = pathopt
env['_JAVABOOTCLASSPATH'] = '${_javapathopt("-bootclasspath", "JAVABOOTCLASSPATH")} '
env['_JAVACLASSPATH'] = '${_javapathopt("-classpath", "JAVACLASSPATH")} '
env['_JAVASOURCEPATH'] = '${_javapathopt("-sourcepath", "JAVASOURCEPATH", "_JAVASOURCEPATHDEFAULT")} '
env['_JAVASOURCEPATHDEFAULT'] = '${TARGET.attributes.java_sourcedir}'
env['_JAVACCOM'] = '$JAVAC $JAVACFLAGS $_JAVABOOTCLASSPATH $_JAVACLASSPATH -d ${TARGET.attributes.java_classdir} $_JAVASOURCEPATH $SOURCES'
env['JAVACCOM'] = "${TEMPFILE('$_JAVACCOM')}"
env['JAVACLASSSUFFIX'] = '.class'
env['JAVASUFFIX'] = '.java'
def exists(env):
return 1
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
|
# Copyright 2020 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The generalized Brillouin conditions are a series of stationarity conditions
for a Lie algebraic variational principle. This module implements solving for
stationarity based on a subset of the these conditions.
"""
import copy
import numpy as np
from openfermion import (
MolecularData,
make_reduced_hamiltonian,
InteractionOperator,
)
from openfermion.chem.molecular_data import spinorb_from_spatial
from fqe.hamiltonians.restricted_hamiltonian import RestrictedHamiltonian
from fqe.algorithm.brillouin_calculator import (
get_fermion_op, get_acse_residual_fqe, get_acse_residual_fqe_parallel,
get_tpdm_grad_fqe, get_tpdm_grad_fqe_parallel, two_rdo_commutator_antisymm,
two_rdo_commutator_symm)
try:
from joblib import Parallel
PARALLELIZABLE = True
except ImportError:
PARALLELIZABLE = False
class BrillouinCondition:
"""This object provide an interface to solving for stationarity with
respect to the 2-particle Brillouin condition.
"""
def __init__(
self,
molecule=MolecularData,
iter_max=30,
run_parallel=False,
verbose=True,
):
oei, tei = molecule.get_integrals()
elec_hamil = RestrictedHamiltonian((oei, np.einsum("ijlk", -0.5 * tei)))
oei, tei = molecule.get_integrals()
soei, stei = spinorb_from_spatial(oei, tei)
astei = np.einsum('ijkl', stei) - np.einsum('ijlk', stei)
molecular_hamiltonian = InteractionOperator(0, soei, 0.25 * astei)
# moleham = molecule.get_molecular_hamiltonian()
reduced_ham = make_reduced_hamiltonian(molecular_hamiltonian,
molecule.n_electrons)
self.molecule = molecule
self.reduced_ham = reduced_ham
self.elec_hamil = elec_hamil
self.iter_max = iter_max
self.sdim = elec_hamil.dim()
# change to use multiplicity to derive this for open shell
self.nalpha = molecule.n_electrons // 2
self.nbeta = molecule.n_electrons // 2
self.sz = self.nalpha - self.nbeta
if PARALLELIZABLE and run_parallel:
self.parallel = True
else:
self.parallel = False
self.verbose = verbose
# store results
self.acse_energy = []
def bc_solve(self, initial_wf):
"""Propagate BC differential equation until convergence.
Args:
initial_wf: Initial wavefunction to evolve.
"""
fqe_wf = copy.deepcopy(initial_wf)
sdim = self.sdim
iter_max = self.iter_max
iteration = 0
h = 1.0e-4
self.acse_energy = [fqe_wf.expectationValue(self.elec_hamil).real]
while iteration < iter_max:
if self.parallel:
acse_residual = get_acse_residual_fqe_parallel(
fqe_wf, self.elec_hamil, sdim)
acse_res_op = get_fermion_op(acse_residual)
tpdm_grad = get_tpdm_grad_fqe_parallel(fqe_wf, acse_residual,
sdim)
else:
acse_residual = get_acse_residual_fqe(fqe_wf, self.elec_hamil,
sdim)
acse_res_op = get_fermion_op(acse_residual)
tpdm_grad = get_tpdm_grad_fqe(fqe_wf, acse_residual, sdim)
# epsilon_opt = - Tr[K, D'(lambda)] / Tr[K, D''(lambda)]
# K is reduced Hamiltonian
# get approximate D'' by short propagation
# TODO: do this with cumulant reconstruction instead of wf prop.
fqe_wfh = fqe_wf.time_evolve(h, 1j * acse_res_op)
acse_residualh = get_acse_residual_fqe(fqe_wfh, self.elec_hamil,
sdim)
tpdm_gradh = get_tpdm_grad_fqe(fqe_wfh, acse_residualh, sdim)
tpdm_gradgrad = (1 / h) * (tpdm_gradh - tpdm_grad)
epsilon = -np.einsum("ijkl,ijkl", self.reduced_ham.two_body_tensor,
tpdm_grad)
epsilon /= np.einsum("ijkl,ijkl", self.reduced_ham.two_body_tensor,
tpdm_gradgrad)
epsilon = epsilon.real
fqe_wf = fqe_wf.time_evolve(epsilon, 1j * acse_res_op)
current_energy = fqe_wf.expectationValue(self.elec_hamil).real
self.acse_energy.append(current_energy.real)
print_string = "Iter {: 5f}\tcurrent energy {: 5.10f}\t".format(
iteration, current_energy)
print_string += "|dE| {: 5.10f}\tStep size {: 5.10f}".format(
np.abs(self.acse_energy[-2] - self.acse_energy[-1]), epsilon)
if self.verbose:
print(print_string)
if (iteration >= 1 and
np.abs(self.acse_energy[-2] - self.acse_energy[-1]) <
0.5e-4):
break
iteration += 1
def bc_solve_rdms(self, initial_wf):
"""Propagate BC differential equation until convergence.
State is evolved and then 3-RDM is measured. This information is
used to construct a new state
Args:
initial_wf: Initial wavefunction to evolve.
"""
fqe_wf = copy.deepcopy(initial_wf)
iter_max = self.iter_max
iteration = 0
sector = (self.nalpha + self.nbeta, self.sz)
h = 1.0e-4
self.acse_energy = [fqe_wf.expectationValue(self.elec_hamil).real]
while iteration < iter_max:
# extract FqeData object each iteration in case fqe_wf is copied
fqe_data = fqe_wf.sector(sector)
# get RDMs from FqeData
d3 = fqe_data.get_three_pdm()
_, tpdm = fqe_data.get_openfermion_rdms()
# get ACSE Residual and 2-RDM gradient
acse_residual = two_rdo_commutator_symm(
self.reduced_ham.two_body_tensor, tpdm, d3)
tpdm_grad = two_rdo_commutator_antisymm(acse_residual, tpdm, d3)
acse_res_op = get_fermion_op(acse_residual)
# epsilon_opt = - Tr[K, D'(lambda)] / Tr[K, D''(lambda)]
# K is reduced Hamiltonian
# get approximate D'' by short propagation
# TODO: do this with cumulant reconstruction instead of wf prop.
fqe_wfh = fqe_wf.time_evolve(h, 1j * acse_res_op)
fqe_datah = fqe_wfh.sector(sector)
d3h = fqe_datah.get_three_pdm()
_, tpdmh = fqe_datah.get_openfermion_rdms()
acse_residualh = two_rdo_commutator_symm(
self.reduced_ham.two_body_tensor, tpdmh, d3h)
tpdm_gradh = two_rdo_commutator_antisymm(acse_residualh, tpdmh, d3h)
tpdm_gradgrad = (1 / h) * (tpdm_gradh - tpdm_grad)
epsilon = -np.einsum("ijkl,ijkl", self.reduced_ham.two_body_tensor,
tpdm_grad)
epsilon /= np.einsum("ijkl,ijkl", self.reduced_ham.two_body_tensor,
tpdm_gradgrad)
epsilon = epsilon.real
fqe_wf = fqe_wf.time_evolve(epsilon, 1j * acse_res_op)
current_energy = fqe_wf.expectationValue(self.elec_hamil).real
self.acse_energy.append(current_energy.real)
print_string = "Iter {: 5f}\tcurrent energy {: 5.10f}\t".format(
iteration, current_energy)
print_string += "|dE| {: 5.10f}\tStep size {: 5.10f}".format(
np.abs(self.acse_energy[-2] - self.acse_energy[-1]), epsilon)
if self.verbose:
print(print_string)
if (iteration >= 1 and
np.abs(self.acse_energy[-2] - self.acse_energy[-1]) <
0.5e-4):
break
iteration += 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.