gt stringclasses 1
value | context stringlengths 2.49k 119k |
|---|---|
#!/usr/bin/env python
"""Tests for the access control mechanisms."""
from grr.lib import access_control
from grr.lib import aff4
from grr.lib import config_lib
from grr.lib import data_store
from grr.lib import email_alerts
from grr.lib import flags
from grr.lib import flow
from grr.lib import hunts
from grr.lib import rdfvalue
from grr.lib import test_lib
from grr.lib import utils
from grr.lib.rdfvalues import aff4_rdfvalues
from grr.lib.rdfvalues import client as rdf_client
class AccessControlTest(test_lib.GRRBaseTest):
"""Tests the access control mechanisms."""
install_mock_acl = False
def setUp(self):
super(AccessControlTest, self).setUp()
# We want to test the FullAccessControlManager
data_store.DB.security_manager = access_control.FullAccessControlManager()
def RevokeClientApproval(self, client_id, token, remove_from_cache=True):
approval_urn = aff4.ROOT_URN.Add("ACL").Add(client_id).Add(
token.username).Add(utils.EncodeReasonString(token.reason))
super_token = access_control.ACLToken(username="test")
super_token.supervisor = True
approval_request = aff4.FACTORY.Open(approval_urn, mode="rw",
token=super_token)
approval_request.DeleteAttribute(approval_request.Schema.APPROVER)
approval_request.Close()
if remove_from_cache:
data_store.DB.security_manager.acl_cache.ExpireObject(approval_urn)
def CreateHuntApproval(self, hunt_urn, token, admin=False):
approval_urn = aff4.ROOT_URN.Add("ACL").Add(hunt_urn.Path()).Add(
token.username).Add(utils.EncodeReasonString(token.reason))
super_token = access_control.ACLToken(username="test")
super_token.supervisor = True
approval_request = aff4.FACTORY.Create(approval_urn, "HuntApproval",
mode="rw", token=super_token)
approval_request.AddAttribute(approval_request.Schema.APPROVER("Approver1"))
approval_request.AddAttribute(approval_request.Schema.APPROVER("Approver2"))
approval_request.Close()
if admin:
self.CreateAdminUser("Approver1")
def CreateSampleHunt(self):
"""Creats SampleHunt, writes it to the data store and returns it's id."""
super_token = access_control.ACLToken(username="test")
super_token.supervisor = True
with hunts.GRRHunt.StartHunt(
hunt_name="SampleHunt", token=super_token) as hunt:
return hunt.session_id
def testSimpleAccess(self):
"""Tests that simple access requires a token."""
client_urn = rdf_client.ClientURN("C.%016X" % 0)
# These should raise for a lack of token
for urn, mode in [("aff4:/ACL", "r"),
("aff4:/config/drivers", "r"),
("aff4:/", "rw"),
(client_urn, "r")]:
self.assertRaises(access_control.UnauthorizedAccess, aff4.FACTORY.Open,
urn, mode=mode)
# These should raise for trying to get write access.
for urn, mode in [("aff4:/ACL", "rw"),
(client_urn, "rw")]:
fd = aff4.FACTORY.Open(urn, mode=mode, token=self.token)
# Force cache flush.
fd._dirty = True
self.assertRaises(access_control.UnauthorizedAccess, fd.Close)
# These should raise for access without a token:
for urn, mode in [(client_urn.Add("flows").Add("W:1234"), "r"),
(client_urn.Add("/fs"), "r")]:
self.assertRaises(access_control.UnauthorizedAccess, aff4.FACTORY.Open,
urn, mode=mode)
# Even if a token is provided - it is not authorized.
self.assertRaises(access_control.UnauthorizedAccess, aff4.FACTORY.Open,
urn, mode=mode, token=self.token)
def testSupervisorToken(self):
"""Tests that the supervisor token overrides the approvals."""
urn = rdf_client.ClientURN("C.%016X" % 0).Add("/fs/os/c")
self.assertRaises(access_control.UnauthorizedAccess, aff4.FACTORY.Open, urn)
super_token = access_control.ACLToken(username="test")
super_token.supervisor = True
aff4.FACTORY.Open(urn, mode="rw", token=super_token)
def testExpiredTokens(self):
"""Tests that expired tokens are rejected."""
urn = rdf_client.ClientURN("C.%016X" % 0).Add("/fs/os/c")
self.assertRaises(access_control.UnauthorizedAccess, aff4.FACTORY.Open, urn)
with test_lib.FakeTime(100):
# Token expires in 5 seconds.
super_token = access_control.ACLToken(username="test", expiry=105)
super_token.supervisor = True
# This should work since token is a super token.
aff4.FACTORY.Open(urn, mode="rw", token=super_token)
# Change the time to 200
with test_lib.FakeTime(200):
# Should be expired now.
self.assertRaises(access_control.ExpiryError, aff4.FACTORY.Open, urn,
token=super_token, mode="rw")
def testApprovalExpiry(self):
"""Tests that approvals expire after the correct time."""
client_id = "C.%016X" % 0
urn = rdf_client.ClientURN(client_id).Add("/fs/os/c")
token = access_control.ACLToken(username="test", reason="For testing")
self.assertRaises(access_control.UnauthorizedAccess, aff4.FACTORY.Open, urn,
None, "rw", token)
with test_lib.FakeTime(100.0, increment=1e-3):
self.GrantClientApproval(client_id, token)
# This should work now.
aff4.FACTORY.Open(urn, mode="rw", token=token)
token_expiry = config_lib.CONFIG["ACL.token_expiry"]
# This is close to expiry but should still work.
with test_lib.FakeTime(100.0 + token_expiry - 100.0):
aff4.FACTORY.Open(urn, mode="rw", token=token)
# Past expiry, should fail.
with test_lib.FakeTime(100.0 + token_expiry + 100.0):
self.assertRaises(access_control.UnauthorizedAccess,
aff4.FACTORY.Open, urn, None, "rw", token)
def testClientApproval(self):
"""Tests that we can create an approval object to access clients."""
client_id = "C.%016X" % 0
urn = rdf_client.ClientURN(client_id).Add("/fs")
token = access_control.ACLToken(username="test", reason="For testing")
self.assertRaises(access_control.UnauthorizedAccess, aff4.FACTORY.Open, urn,
None, "rw", token=token)
self.GrantClientApproval(client_id, token)
fd = aff4.FACTORY.Open(urn, None, "rw", token=token)
fd.Close()
self.RevokeClientApproval(client_id, token)
self.assertRaises(access_control.UnauthorizedAccess,
aff4.FACTORY.Open, urn, None, "rw", token=token)
def testHuntApproval(self):
"""Tests that we can create an approval object to run hunts."""
token = access_control.ACLToken(username="test", reason="For testing")
hunt_urn = self.CreateSampleHunt()
self.assertRaisesRegexp(
access_control.UnauthorizedAccess,
"No approval found for",
flow.GRRFlow.StartFlow,
flow_name="StartHuntFlow", token=token, hunt_urn=hunt_urn)
self.CreateHuntApproval(hunt_urn, token, admin=False)
self.assertRaisesRegexp(
access_control.UnauthorizedAccess,
r"At least 1 approver\(s\) should have 'admin' label.",
flow.GRRFlow.StartFlow,
flow_name="StartHuntFlow", token=token, hunt_urn=hunt_urn)
self.CreateHuntApproval(hunt_urn, token, admin=True)
flow.GRRFlow.StartFlow(flow_name="StartHuntFlow", token=token,
hunt_urn=hunt_urn)
def testUserAccess(self):
"""Tests access to user objects."""
token = access_control.ACLToken(username="test", reason="For testing")
urn = aff4.ROOT_URN.Add("users")
# We cannot open any user account.
self.assertRaises(access_control.UnauthorizedAccess,
aff4.FACTORY.Open, urn.Add("some_user"), None, "rw",
False, token)
# But we can open our own.
aff4.FACTORY.Open(urn.Add("test"), mode="rw", token=token)
# And we can also access our labels.
label_urn = urn.Add("test").Add("labels")
labels = aff4.FACTORY.Open(label_urn, mode="rw", token=token)
# But we cannot write to them.
l = labels.Schema.LABELS()
l.AddLabel(aff4_rdfvalues.AFF4ObjectLabel(name="admin", owner="GRR"))
labels.Set(labels.Schema.LABELS, l)
self.assertRaises(access_control.UnauthorizedAccess, labels.Close)
def testForemanAccess(self):
"""Test admin users can access the foreman."""
token = access_control.ACLToken(username="test", reason="For testing")
self.assertRaises(access_control.UnauthorizedAccess,
aff4.FACTORY.Open, "aff4:/foreman", token=token)
# We need a supervisor to manipulate a user's ACL token:
super_token = access_control.ACLToken(username="test")
super_token.supervisor = True
# Make the user an admin user now, this time with the supervisor token.
with aff4.FACTORY.Create("aff4:/users/test", "GRRUser",
token=super_token) as fd:
fd.SetLabels("admin", owner="GRR")
# Now we are allowed.
aff4.FACTORY.Open("aff4:/foreman", token=token)
def testCrashesAccess(self):
# We need a supervisor to manipulate a user's ACL token:
super_token = access_control.ACLToken(username="test")
super_token.supervisor = True
path = rdfvalue.RDFURN("aff4:/crashes")
crashes = aff4.FACTORY.Create(path, "RDFValueCollection", token=self.token)
self.assertRaises(access_control.UnauthorizedAccess, crashes.Close)
# This shouldn't raise as we're using supervisor token.
crashes = aff4.FACTORY.Create(path, "RDFValueCollection",
token=super_token)
crashes.Close()
crashes = aff4.FACTORY.Open(path, aff4_type="RDFValueCollection",
mode="rw", token=self.token)
crashes.Set(crashes.Schema.DESCRIPTION("Some description"))
self.assertRaises(access_control.UnauthorizedAccess, crashes.Close)
crashes = aff4.FACTORY.Open(path, aff4_type="RDFValueCollection",
mode="r", token=self.token)
crashes.Close()
def testFlowAccess(self):
"""Tests access to flows."""
token = access_control.ACLToken(username="test", reason="For testing")
client_id = "C." + "a" * 16
self.assertRaises(access_control.UnauthorizedAccess, flow.GRRFlow.StartFlow,
client_id=client_id, flow_name="SendingFlow",
message_count=1, token=token)
self.GrantClientApproval(client_id, token)
sid = flow.GRRFlow.StartFlow(
client_id=client_id, flow_name="SendingFlow", message_count=1,
token=token)
# Check we can open the flow object.
flow_obj = aff4.FACTORY.Open(sid, mode="r", token=token)
# Check that we can not write to it.
flow_obj.mode = "rw"
state = flow_obj.Get(flow_obj.Schema.FLOW_STATE)
flow_obj.Set(state)
# This is not allowed - Users can not write to flows.
self.assertRaises(access_control.UnauthorizedAccess,
flow_obj.Close)
self.RevokeClientApproval(client_id, token)
self.assertRaises(access_control.UnauthorizedAccess,
aff4.FACTORY.Open, sid, mode="r", token=token)
self.GrantClientApproval(client_id, token)
aff4.FACTORY.Open(sid, mode="r", token=token)
def testCaches(self):
"""Makes sure that results are cached in the security manager."""
token = access_control.ACLToken(username="test", reason="For testing")
client_id = "C." + "b" * 16
self.GrantClientApproval(client_id, token)
sid = flow.GRRFlow.StartFlow(
client_id=client_id, flow_name="SendingFlow", message_count=1,
token=token)
# Fill all the caches.
aff4.FACTORY.Open(sid, mode="r", token=token)
# Flush the AFF4 caches.
aff4.FACTORY.Flush()
# Remove the approval from the data store, but it should still exist in the
# security manager cache.
self.RevokeClientApproval(client_id, token, remove_from_cache=False)
# If this doesn't raise now, all answers were cached.
aff4.FACTORY.Open(sid, mode="r", token=token)
# Flush the AFF4 caches.
aff4.FACTORY.Flush()
# Remove the approval from the data store, and from the security manager.
self.RevokeClientApproval(client_id, token, remove_from_cache=True)
# This must raise now.
self.assertRaises(access_control.UnauthorizedAccess,
aff4.FACTORY.Open, sid, mode="r", token=token)
def testBreakGlass(self):
"""Test the breakglass mechanism."""
client_id = rdf_client.ClientURN("C.%016X" % 0)
urn = client_id.Add("/fs/os/c")
self.assertRaises(access_control.UnauthorizedAccess, aff4.FACTORY.Open, urn,
token=self.token)
# We expect to receive an email about this
email = {}
def SendEmail(to, from_user, subject, message, **_):
email["to"] = to
email["from_user"] = from_user
email["subject"] = subject
email["message"] = message
with utils.Stubber(email_alerts.EMAIL_ALERTER, "SendEmail", SendEmail):
flow.GRRFlow.StartFlow(
client_id=client_id, flow_name="BreakGlassGrantClientApprovalFlow",
token=self.token, reason=self.token.reason)
# Reset the emergency state of the token.
self.token.is_emergency = False
# This access is using the emergency_access granted, so we expect the
# token to be tagged as such.
aff4.FACTORY.Open(urn, token=self.token)
self.assertEqual(email["to"],
config_lib.CONFIG["Monitoring.emergency_access_email"])
self.assert_(self.token.username in email["message"])
self.assertEqual(email["from_user"], self.token.username)
# Make sure the token is tagged as an emergency token:
self.assertEqual(self.token.is_emergency, True)
def testValidateToken(self):
token = access_control.ACLToken(username="test", reason="For testing")
access_manager = access_control.BasicAccessControlManager()
access_manager.ValidateToken(token, "aff4:/C.0000000000000001")
with self.assertRaises(access_control.UnauthorizedAccess):
access_manager.ValidateToken(None, "aff4:/C.0000000000000001")
token = access_control.ACLToken(reason="For testing")
with self.assertRaises(access_control.UnauthorizedAccess):
access_manager.ValidateToken(token, "aff4:/C.0000000000000001")
def testValidateRequestedAccess(self):
access_manager = access_control.BasicAccessControlManager()
access_manager.ValidateRequestedAccess("r", "aff4:/C.0000000000000001")
with self.assertRaises(access_control.UnauthorizedAccess):
access_manager.ValidateRequestedAccess("", "aff4:/C.0000000000000001")
with self.assertRaises(access_control.UnauthorizedAccess):
access_manager.ValidateRequestedAccess("q", "aff4:/C.0000000000000001")
def testCheckACL(self):
access_manager = access_control.FullAccessControlManager()
# Supervisor can do anything
token = access_control.ACLToken(username="unknown", supervisor=True)
self.assertTrue(access_manager.CheckACL(token, "aff4:/C.0000000000000001"))
# No target should raise
token = access_control.ACLToken(username="unknown")
with self.assertRaises(access_control.UnauthorizedAccess):
access_manager.CheckACL(token, "")
# Unless it is a system user
token = access_control.ACLToken(username="GRRSystem", reason="bcause")
self.assertTrue(access_manager.CheckACL(token, None))
# No reason should raise
token = access_control.ACLToken(username="unknown")
with self.assertRaises(access_control.UnauthorizedAccess):
access_manager.CheckACL(token, "aff4:/C.0000000000000001")
def main(argv):
# Run the full test suite
test_lib.GrrTestProgram(argv=argv)
if __name__ == "__main__":
flags.StartMain(main)
| |
# util/langhelpers.py
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Routines to help with the creation, loading and introspection of
modules, classes, hierarchies, attributes, functions, and methods.
"""
import itertools
import inspect
import operator
import re
import sys
import types
import warnings
from compat import update_wrapper, set_types, threading, callable, inspect_getfullargspec, py3k_warning
from sqlalchemy import exc
def _unique_symbols(used, *bases):
used = set(used)
for base in bases:
pool = itertools.chain((base,),
itertools.imap(lambda i: base + str(i),
xrange(1000)))
for sym in pool:
if sym not in used:
used.add(sym)
yield sym
break
else:
raise NameError("exhausted namespace for symbol base %s" % base)
def decorator(target):
"""A signature-matching decorator factory."""
def decorate(fn):
if not inspect.isfunction(fn):
raise Exception("not a decoratable function")
spec = inspect_getfullargspec(fn)
names = tuple(spec[0]) + spec[1:3] + (fn.func_name,)
targ_name, fn_name = _unique_symbols(names, 'target', 'fn')
metadata = dict(target=targ_name, fn=fn_name)
metadata.update(format_argspec_plus(spec, grouped=False))
code = 'lambda %(args)s: %(target)s(%(fn)s, %(apply_kw)s)' % (
metadata)
decorated = eval(code, {targ_name:target, fn_name:fn})
decorated.func_defaults = getattr(fn, 'im_func', fn).func_defaults
return update_wrapper(decorated, fn)
return update_wrapper(decorate, target)
def get_cls_kwargs(cls):
"""Return the full set of inherited kwargs for the given `cls`.
Probes a class's __init__ method, collecting all named arguments. If the
__init__ defines a \**kwargs catch-all, then the constructor is presumed to
pass along unrecognized keywords to it's base classes, and the collection
process is repeated recursively on each of the bases.
Uses a subset of inspect.getargspec() to cut down on method overhead.
No anonymous tuple arguments please !
"""
for c in cls.__mro__:
if '__init__' in c.__dict__:
stack = set([c])
break
else:
return []
args = set()
while stack:
class_ = stack.pop()
ctr = class_.__dict__.get('__init__', False)
if (not ctr or
not isinstance(ctr, types.FunctionType) or
not isinstance(ctr.func_code, types.CodeType)):
stack.update(class_.__bases__)
continue
# this is shorthand for
# names, _, has_kw, _ = inspect.getargspec(ctr)
names, has_kw = inspect_func_args(ctr)
args.update(names)
if has_kw:
stack.update(class_.__bases__)
args.discard('self')
return args
try:
from inspect import CO_VARKEYWORDS
def inspect_func_args(fn):
co = fn.func_code
nargs = co.co_argcount
names = co.co_varnames
args = list(names[:nargs])
has_kw = bool(co.co_flags & CO_VARKEYWORDS)
return args, has_kw
except ImportError:
def inspect_func_args(fn):
names, _, has_kw, _ = inspect.getargspec(fn)
return names, bool(has_kw)
def get_func_kwargs(func):
"""Return the set of legal kwargs for the given `func`.
Uses getargspec so is safe to call for methods, functions,
etc.
"""
return inspect.getargspec(func)[0]
def format_argspec_plus(fn, grouped=True):
"""Returns a dictionary of formatted, introspected function arguments.
A enhanced variant of inspect.formatargspec to support code generation.
fn
An inspectable callable or tuple of inspect getargspec() results.
grouped
Defaults to True; include (parens, around, argument) lists
Returns:
args
Full inspect.formatargspec for fn
self_arg
The name of the first positional argument, varargs[0], or None
if the function defines no positional arguments.
apply_pos
args, re-written in calling rather than receiving syntax. Arguments are
passed positionally.
apply_kw
Like apply_pos, except keyword-ish args are passed as keywords.
Example::
>>> format_argspec_plus(lambda self, a, b, c=3, **d: 123)
{'args': '(self, a, b, c=3, **d)',
'self_arg': 'self',
'apply_kw': '(self, a, b, c=c, **d)',
'apply_pos': '(self, a, b, c, **d)'}
"""
if callable(fn):
spec = inspect_getfullargspec(fn)
else:
# we accept an existing argspec...
spec = fn
args = inspect.formatargspec(*spec)
if spec[0]:
self_arg = spec[0][0]
elif spec[1]:
self_arg = '%s[0]' % spec[1]
else:
self_arg = None
# Py3K
#apply_pos = inspect.formatargspec(spec[0], spec[1], spec[2], None, spec[4])
#num_defaults = 0
#if spec[3]:
# num_defaults += len(spec[3])
#if spec[4]:
# num_defaults += len(spec[4])
#name_args = spec[0] + spec[4]
# Py2K
apply_pos = inspect.formatargspec(spec[0], spec[1], spec[2])
num_defaults = 0
if spec[3]:
num_defaults += len(spec[3])
name_args = spec[0]
# end Py2K
if num_defaults:
defaulted_vals = name_args[0-num_defaults:]
else:
defaulted_vals = ()
apply_kw = inspect.formatargspec(name_args, spec[1], spec[2], defaulted_vals,
formatvalue=lambda x: '=' + x)
if grouped:
return dict(args=args, self_arg=self_arg,
apply_pos=apply_pos, apply_kw=apply_kw)
else:
return dict(args=args[1:-1], self_arg=self_arg,
apply_pos=apply_pos[1:-1], apply_kw=apply_kw[1:-1])
def format_argspec_init(method, grouped=True):
"""format_argspec_plus with considerations for typical __init__ methods
Wraps format_argspec_plus with error handling strategies for typical
__init__ cases::
object.__init__ -> (self)
other unreflectable (usually C) -> (self, *args, **kwargs)
"""
try:
return format_argspec_plus(method, grouped=grouped)
except TypeError:
self_arg = 'self'
if method is object.__init__:
args = grouped and '(self)' or 'self'
else:
args = (grouped and '(self, *args, **kwargs)'
or 'self, *args, **kwargs')
return dict(self_arg='self', args=args, apply_pos=args, apply_kw=args)
def getargspec_init(method):
"""inspect.getargspec with considerations for typical __init__ methods
Wraps inspect.getargspec with error handling for typical __init__ cases::
object.__init__ -> (self)
other unreflectable (usually C) -> (self, *args, **kwargs)
"""
try:
return inspect.getargspec(method)
except TypeError:
if method is object.__init__:
return (['self'], None, None, None)
else:
return (['self'], 'args', 'kwargs', None)
def unbound_method_to_callable(func_or_cls):
"""Adjust the incoming callable such that a 'self' argument is not required."""
if isinstance(func_or_cls, types.MethodType) and not func_or_cls.im_self:
return func_or_cls.im_func
else:
return func_or_cls
def generic_repr(obj):
"""Produce a __repr__() based on direct association of the __init__()
specification vs. same-named attributes present.
"""
def genargs():
try:
(args, vargs, vkw, defaults) = inspect.getargspec(obj.__init__)
except TypeError:
return
default_len = defaults and len(defaults) or 0
if not default_len:
for arg in args[1:]:
yield repr(getattr(obj, arg, None))
if vargs is not None and hasattr(obj, vargs):
yield ', '.join(repr(val) for val in getattr(obj, vargs))
else:
for arg in args[1:-default_len]:
yield repr(getattr(obj, arg, None))
for (arg, defval) in zip(args[-default_len:], defaults):
try:
val = getattr(obj, arg, None)
if val != defval:
yield '%s=%r' % (arg, val)
except:
pass
return "%s(%s)" % (obj.__class__.__name__, ", ".join(genargs()))
class portable_instancemethod(object):
"""Turn an instancemethod into a (parent, name) pair
to produce a serializable callable.
"""
def __init__(self, meth):
self.target = meth.im_self
self.name = meth.__name__
def __call__(self, *arg, **kw):
return getattr(self.target, self.name)(*arg, **kw)
def class_hierarchy(cls):
"""Return an unordered sequence of all classes related to cls.
Traverses diamond hierarchies.
Fibs slightly: subclasses of builtin types are not returned. Thus
class_hierarchy(class A(object)) returns (A, object), not A plus every
class systemwide that derives from object.
Old-style classes are discarded and hierarchies rooted on them
will not be descended.
"""
# Py2K
if isinstance(cls, types.ClassType):
return list()
# end Py2K
hier = set([cls])
process = list(cls.__mro__)
while process:
c = process.pop()
# Py2K
if isinstance(c, types.ClassType):
continue
for b in (_ for _ in c.__bases__
if _ not in hier and not isinstance(_, types.ClassType)):
# end Py2K
# Py3K
#for b in (_ for _ in c.__bases__
# if _ not in hier):
process.append(b)
hier.add(b)
# Py3K
#if c.__module__ == 'builtins' or not hasattr(c, '__subclasses__'):
# continue
# Py2K
if c.__module__ == '__builtin__' or not hasattr(c, '__subclasses__'):
continue
# end Py2K
for s in [_ for _ in c.__subclasses__() if _ not in hier]:
process.append(s)
hier.add(s)
return list(hier)
def iterate_attributes(cls):
"""iterate all the keys and attributes associated
with a class, without using getattr().
Does not use getattr() so that class-sensitive
descriptors (i.e. property.__get__()) are not called.
"""
keys = dir(cls)
for key in keys:
for c in cls.__mro__:
if key in c.__dict__:
yield (key, c.__dict__[key])
break
def monkeypatch_proxied_specials(into_cls, from_cls, skip=None, only=None,
name='self.proxy', from_instance=None):
"""Automates delegation of __specials__ for a proxying type."""
if only:
dunders = only
else:
if skip is None:
skip = ('__slots__', '__del__', '__getattribute__',
'__metaclass__', '__getstate__', '__setstate__')
dunders = [m for m in dir(from_cls)
if (m.startswith('__') and m.endswith('__') and
not hasattr(into_cls, m) and m not in skip)]
for method in dunders:
try:
fn = getattr(from_cls, method)
if not hasattr(fn, '__call__'):
continue
fn = getattr(fn, 'im_func', fn)
except AttributeError:
continue
try:
spec = inspect.getargspec(fn)
fn_args = inspect.formatargspec(spec[0])
d_args = inspect.formatargspec(spec[0][1:])
except TypeError:
fn_args = '(self, *args, **kw)'
d_args = '(*args, **kw)'
py = ("def %(method)s%(fn_args)s: "
"return %(name)s.%(method)s%(d_args)s" % locals())
env = from_instance is not None and {name: from_instance} or {}
exec py in env
try:
env[method].func_defaults = fn.func_defaults
except AttributeError:
pass
setattr(into_cls, method, env[method])
def methods_equivalent(meth1, meth2):
"""Return True if the two methods are the same implementation."""
# Py3K
#return getattr(meth1, '__func__', meth1) is getattr(meth2, '__func__', meth2)
# Py2K
return getattr(meth1, 'im_func', meth1) is getattr(meth2, 'im_func', meth2)
# end Py2K
def as_interface(obj, cls=None, methods=None, required=None):
"""Ensure basic interface compliance for an instance or dict of callables.
Checks that ``obj`` implements public methods of ``cls`` or has members
listed in ``methods``. If ``required`` is not supplied, implementing at
least one interface method is sufficient. Methods present on ``obj`` that
are not in the interface are ignored.
If ``obj`` is a dict and ``dict`` does not meet the interface
requirements, the keys of the dictionary are inspected. Keys present in
``obj`` that are not in the interface will raise TypeErrors.
Raises TypeError if ``obj`` does not meet the interface criteria.
In all passing cases, an object with callable members is returned. In the
simple case, ``obj`` is returned as-is; if dict processing kicks in then
an anonymous class is returned.
obj
A type, instance, or dictionary of callables.
cls
Optional, a type. All public methods of cls are considered the
interface. An ``obj`` instance of cls will always pass, ignoring
``required``..
methods
Optional, a sequence of method names to consider as the interface.
required
Optional, a sequence of mandatory implementations. If omitted, an
``obj`` that provides at least one interface method is considered
sufficient. As a convenience, required may be a type, in which case
all public methods of the type are required.
"""
if not cls and not methods:
raise TypeError('a class or collection of method names are required')
if isinstance(cls, type) and isinstance(obj, cls):
return obj
interface = set(methods or [m for m in dir(cls) if not m.startswith('_')])
implemented = set(dir(obj))
complies = operator.ge
if isinstance(required, type):
required = interface
elif not required:
required = set()
complies = operator.gt
else:
required = set(required)
if complies(implemented.intersection(interface), required):
return obj
# No dict duck typing here.
if not type(obj) is dict:
qualifier = complies is operator.gt and 'any of' or 'all of'
raise TypeError("%r does not implement %s: %s" % (
obj, qualifier, ', '.join(interface)))
class AnonymousInterface(object):
"""A callable-holding shell."""
if cls:
AnonymousInterface.__name__ = 'Anonymous' + cls.__name__
found = set()
for method, impl in dictlike_iteritems(obj):
if method not in interface:
raise TypeError("%r: unknown in this interface" % method)
if not callable(impl):
raise TypeError("%r=%r is not callable" % (method, impl))
setattr(AnonymousInterface, method, staticmethod(impl))
found.add(method)
if complies(found, required):
return AnonymousInterface
raise TypeError("dictionary does not contain required keys %s" %
', '.join(required - found))
class memoized_property(object):
"""A read-only @property that is only evaluated once."""
def __init__(self, fget, doc=None):
self.fget = fget
self.__doc__ = doc or fget.__doc__
self.__name__ = fget.__name__
def __get__(self, obj, cls):
if obj is None:
return self
obj.__dict__[self.__name__] = result = self.fget(obj)
return result
class memoized_instancemethod(object):
"""Decorate a method memoize its return value.
Best applied to no-arg methods: memoization is not sensitive to
argument values, and will always return the same value even when
called with different arguments.
"""
def __init__(self, fget, doc=None):
self.fget = fget
self.__doc__ = doc or fget.__doc__
self.__name__ = fget.__name__
def __get__(self, obj, cls):
if obj is None:
return self
def oneshot(*args, **kw):
result = self.fget(obj, *args, **kw)
memo = lambda *a, **kw: result
memo.__name__ = self.__name__
memo.__doc__ = self.__doc__
obj.__dict__[self.__name__] = memo
return result
oneshot.__name__ = self.__name__
oneshot.__doc__ = self.__doc__
return oneshot
def reset_memoized(instance, name):
instance.__dict__.pop(name, None)
class group_expirable_memoized_property(object):
"""A family of @memoized_properties that can be expired in tandem."""
def __init__(self, attributes=()):
self.attributes = []
if attributes:
self.attributes.extend(attributes)
def expire_instance(self, instance):
"""Expire all memoized properties for *instance*."""
stash = instance.__dict__
for attribute in self.attributes:
stash.pop(attribute, None)
def __call__(self, fn):
self.attributes.append(fn.__name__)
return memoized_property(fn)
def method(self, fn):
self.attributes.append(fn.__name__)
return memoized_instancemethod(fn)
class importlater(object):
"""Deferred import object.
e.g.::
somesubmod = importlater("mypackage.somemodule", "somesubmod")
is equivalent to::
from mypackage.somemodule import somesubmod
except evaluted upon attribute access to "somesubmod".
importlater() currently requires that resolve_all() be
called, typically at the bottom of a package's __init__.py.
This is so that __import__ still called only at
module import time, and not potentially within
a non-main thread later on.
"""
_unresolved = set()
def __init__(self, path, addtl=None):
self._il_path = path
self._il_addtl = addtl
importlater._unresolved.add(self)
@classmethod
def resolve_all(cls):
for m in list(importlater._unresolved):
m._resolve()
@property
def _full_path(self):
if self._il_addtl:
return self._il_path + "." + self._il_addtl
else:
return self._il_path
@memoized_property
def module(self):
if self in importlater._unresolved:
raise ImportError(
"importlater.resolve_all() hasn't been called")
m = self._initial_import
if self._il_addtl:
m = getattr(m, self._il_addtl)
else:
for token in self._il_path.split(".")[1:]:
m = getattr(m, token)
return m
def _resolve(self):
importlater._unresolved.discard(self)
if self._il_addtl:
self._initial_import = __import__(
self._il_path, globals(), locals(),
[self._il_addtl])
else:
self._initial_import = __import__(self._il_path)
def __getattr__(self, key):
if key == 'module':
raise ImportError("Could not resolve module %s"
% self._full_path)
try:
attr = getattr(self.module, key)
except AttributeError:
raise AttributeError(
"Module %s has no attribute '%s'" %
(self._full_path, key)
)
self.__dict__[key] = attr
return attr
# from paste.deploy.converters
def asbool(obj):
if isinstance(obj, (str, unicode)):
obj = obj.strip().lower()
if obj in ['true', 'yes', 'on', 'y', 't', '1']:
return True
elif obj in ['false', 'no', 'off', 'n', 'f', '0']:
return False
else:
raise ValueError("String is not true/false: %r" % obj)
return bool(obj)
def bool_or_str(*text):
"""Return a callable that will evaulate a string as
boolean, or one of a set of "alternate" string values.
"""
def bool_or_value(obj):
if obj in text:
return obj
else:
return asbool(obj)
return bool_or_value
def asint(value):
"""Coerce to integer."""
if value is None:
return value
return int(value)
def coerce_kw_type(kw, key, type_, flexi_bool=True):
"""If 'key' is present in dict 'kw', coerce its value to type 'type\_' if
necessary. If 'flexi_bool' is True, the string '0' is considered false
when coercing to boolean.
"""
if key in kw and type(kw[key]) is not type_ and kw[key] is not None:
if type_ is bool and flexi_bool:
kw[key] = asbool(kw[key])
else:
kw[key] = type_(kw[key])
def constructor_copy(obj, cls, **kw):
"""Instantiate cls using the __dict__ of obj as constructor arguments.
Uses inspect to match the named arguments of ``cls``.
"""
names = get_cls_kwargs(cls)
kw.update((k, obj.__dict__[k]) for k in names if k in obj.__dict__)
return cls(**kw)
def counter():
"""Return a threadsafe counter function."""
lock = threading.Lock()
counter = itertools.count(1L)
# avoid the 2to3 "next" transformation...
def _next():
lock.acquire()
try:
return counter.next()
finally:
lock.release()
return _next
def duck_type_collection(specimen, default=None):
"""Given an instance or class, guess if it is or is acting as one of
the basic collection types: list, set and dict. If the __emulates__
property is present, return that preferentially.
"""
if hasattr(specimen, '__emulates__'):
# canonicalize set vs sets.Set to a standard: the builtin set
if (specimen.__emulates__ is not None and
issubclass(specimen.__emulates__, set_types)):
return set
else:
return specimen.__emulates__
isa = isinstance(specimen, type) and issubclass or isinstance
if isa(specimen, list):
return list
elif isa(specimen, set_types):
return set
elif isa(specimen, dict):
return dict
if hasattr(specimen, 'append'):
return list
elif hasattr(specimen, 'add'):
return set
elif hasattr(specimen, 'set'):
return dict
else:
return default
def assert_arg_type(arg, argtype, name):
if isinstance(arg, argtype):
return arg
else:
if isinstance(argtype, tuple):
raise exc.ArgumentError(
"Argument '%s' is expected to be one of type %s, got '%s'" %
(name, ' or '.join("'%s'" % a for a in argtype), type(arg)))
else:
raise exc.ArgumentError(
"Argument '%s' is expected to be of type '%s', got '%s'" %
(name, argtype, type(arg)))
def dictlike_iteritems(dictlike):
"""Return a (key, value) iterator for almost any dict-like object."""
# Py3K
#if hasattr(dictlike, 'items'):
# return dictlike.items()
# Py2K
if hasattr(dictlike, 'iteritems'):
return dictlike.iteritems()
elif hasattr(dictlike, 'items'):
return iter(dictlike.items())
# end Py2K
getter = getattr(dictlike, '__getitem__', getattr(dictlike, 'get', None))
if getter is None:
raise TypeError(
"Object '%r' is not dict-like" % dictlike)
if hasattr(dictlike, 'iterkeys'):
def iterator():
for key in dictlike.iterkeys():
yield key, getter(key)
return iterator()
elif hasattr(dictlike, 'keys'):
return iter((key, getter(key)) for key in dictlike.keys())
else:
raise TypeError(
"Object '%r' is not dict-like" % dictlike)
class classproperty(property):
"""A decorator that behaves like @property except that operates
on classes rather than instances.
The decorator is currently special when using the declarative
module, but note that the
:class:`~.sqlalchemy.ext.declarative.declared_attr`
decorator should be used for this purpose with declarative.
"""
def __init__(self, fget, *arg, **kw):
super(classproperty, self).__init__(fget, *arg, **kw)
self.__doc__ = fget.__doc__
def __get__(desc, self, cls):
return desc.fget(cls)
class _symbol(object):
def __init__(self, name, doc=None):
"""Construct a new named symbol."""
assert isinstance(name, str)
self.name = name
if doc:
self.__doc__ = doc
def __reduce__(self):
return symbol, (self.name,)
def __repr__(self):
return "<symbol '%s>" % self.name
_symbol.__name__ = 'symbol'
class symbol(object):
"""A constant symbol.
>>> symbol('foo') is symbol('foo')
True
>>> symbol('foo')
<symbol 'foo>
A slight refinement of the MAGICCOOKIE=object() pattern. The primary
advantage of symbol() is its repr(). They are also singletons.
Repeated calls of symbol('name') will all return the same instance.
The optional ``doc`` argument assigns to ``__doc__``. This
is strictly so that Sphinx autoattr picks up the docstring we want
(it doesn't appear to pick up the in-module docstring if the datamember
is in a different module - autoattribute also blows up completely).
If Sphinx fixes/improves this then we would no longer need
``doc`` here.
"""
symbols = {}
_lock = threading.Lock()
def __new__(cls, name, doc=None):
cls._lock.acquire()
try:
sym = cls.symbols.get(name)
if sym is None:
cls.symbols[name] = sym = _symbol(name, doc)
return sym
finally:
symbol._lock.release()
_creation_order = 1
def set_creation_order(instance):
"""Assign a '_creation_order' sequence to the given instance.
This allows multiple instances to be sorted in order of creation
(typically within a single thread; the counter is not particularly
threadsafe).
"""
global _creation_order
instance._creation_order = _creation_order
_creation_order +=1
def warn_exception(func, *args, **kwargs):
"""executes the given function, catches all exceptions and converts to a warning."""
try:
return func(*args, **kwargs)
except:
warn("%s('%s') ignored" % sys.exc_info()[0:2])
def warn(msg, stacklevel=3):
"""Issue a warning.
If msg is a string, :class:`.exc.SAWarning` is used as
the category.
.. note::
This function is swapped out when the test suite
runs, with a compatible version that uses
warnings.warn_explicit, so that the warnings registry can
be controlled.
"""
if isinstance(msg, basestring):
warnings.warn(msg, exc.SAWarning, stacklevel=stacklevel)
else:
warnings.warn(msg, stacklevel=stacklevel)
_SQLA_RE = re.compile(r'sqlalchemy/([a-z_]+/){0,2}[a-z_]+\.py')
_UNITTEST_RE = re.compile(r'unit(?:2|test2?/)')
def chop_traceback(tb, exclude_prefix=_UNITTEST_RE, exclude_suffix=_SQLA_RE):
"""Chop extraneous lines off beginning and end of a traceback.
:param tb:
a list of traceback lines as returned by ``traceback.format_stack()``
:param exclude_prefix:
a regular expression object matching lines to skip at beginning of ``tb``
:param exclude_suffix:
a regular expression object matching lines to skip at end of ``tb``
"""
start = 0
end = len(tb) - 1
while start <= end and exclude_prefix.search(tb[start]):
start += 1
while start <= end and exclude_suffix.search(tb[end]):
end -= 1
return tb[start:end+1]
NoneType = type(None)
| |
# -*- coding: utf-8 -*-
"""
Functions to generate spike trains from analog signals,
or to generate random spike trains.
Some functions are based on the NeuroTools stgen module, which was mostly
written by Eilif Muller, or from the NeuroTools signals.analogs module.
:copyright: Copyright 2015 by the Elephant team, see AUTHORS.txt.
:license: Modified BSD, see LICENSE.txt for details.
"""
from __future__ import division
import numpy as np
from quantities import ms, mV, Hz, Quantity, dimensionless
from neo import SpikeTrain
import random
from elephant.spike_train_surrogates import dither_spike_train
def threshold_detection(signal, threshold=0.0 * mV, sign='above'):
"""
Returns the times when the analog signal crosses a threshold.
Usually used for extracting spike times from a membrane potential.
Adapted from version in NeuroTools.
Parameters
----------
signal : neo AnalogSignal object
'signal' is an analog signal.
threshold : A quantity, e.g. in mV
'threshold' contains a value that must be reached
for an event to be detected.
sign : 'above' or 'below'
'sign' determines whether to count thresholding crossings
that cross above or below the threshold.
format : None or 'raw'
Whether to return as SpikeTrain (None)
or as a plain array of times ('raw').
Returns
-------
result_st : neo SpikeTrain object
'result_st' contains the spike times of each of the events (spikes)
extracted from the signal.
"""
assert threshold is not None, "A threshold must be provided"
if sign is 'above':
cutout = np.where(signal > threshold)[0]
elif sign in 'below':
cutout = np.where(signal < threshold)[0]
if len(cutout) <= 0:
events = np.zeros(0)
else:
take = np.where(np.diff(cutout) > 1)[0] + 1
take = np.append(0, take)
time = signal.times
events = time[cutout][take]
events_base = events.base
if events_base is None:
# This occurs in some Python 3 builds due to some
# bug in quantities.
events_base = np.array([event.base for event in events]) # Workaround
result_st = SpikeTrain(events_base, units=signal.times.units,
t_start=signal.t_start, t_stop=signal.t_stop)
return result_st
def _homogeneous_process(interval_generator, args, mean_rate, t_start, t_stop,
as_array):
"""
Returns a spike train whose spikes are a realization of a random process
generated by the function `interval_generator` with the given rate,
starting at time `t_start` and stopping `time t_stop`.
"""
def rescale(x):
return (x / mean_rate.units).rescale(t_stop.units)
n = int(((t_stop - t_start) * mean_rate).simplified)
number = np.ceil(n + 3 * np.sqrt(n))
if number < 100:
number = min(5 + np.ceil(2 * n), 100)
assert number > 4 # if positive, number cannot be less than 5
isi = rescale(interval_generator(*args, size=number))
spikes = np.cumsum(isi)
spikes += t_start
i = spikes.searchsorted(t_stop)
if i == len(spikes):
# ISI buffer overrun
extra_spikes = []
t_last = spikes[-1] + rescale(interval_generator(*args, size=1))[0]
while t_last < t_stop:
extra_spikes.append(t_last)
t_last = t_last + rescale(interval_generator(*args, size=1))[0]
# np.concatenate does not conserve units
spikes = Quantity(
np.concatenate(
(spikes, extra_spikes)).magnitude, units=spikes.units)
else:
spikes = spikes[:i]
if as_array:
spikes = spikes.magnitude
else:
spikes = SpikeTrain(
spikes, t_start=t_start, t_stop=t_stop, units=spikes.units)
return spikes
def homogeneous_poisson_process(rate, t_start=0.0 * ms, t_stop=1000.0 * ms,
as_array=False):
"""
Returns a spike train whose spikes are a realization of a Poisson process
with the given rate, starting at time `t_start` and stopping time `t_stop`.
All numerical values should be given as Quantities, e.g. 100*Hz.
Parameters
----------
rate : Quantity scalar with dimension 1/time
The rate of the discharge.
t_start : Quantity scalar with dimension time
The beginning of the spike train.
t_stop : Quantity scalar with dimension time
The end of the spike train.
as_array : bool
If True, a NumPy array of sorted spikes is returned,
rather than a SpikeTrain object.
Examples
--------
>>> from quantities import Hz, ms
>>> spikes = homogeneous_poisson_process(50*Hz, 0*ms, 1000*ms)
>>> spikes = homogeneous_poisson_process(
20*Hz, 5000*ms, 10000*ms, as_array=True)
"""
mean_interval = 1 / rate
return _homogeneous_process(
np.random.exponential, (mean_interval,), rate, t_start, t_stop,
as_array)
def homogeneous_gamma_process(a, b, t_start=0.0 * ms, t_stop=1000.0 * ms,
as_array=False):
"""
Returns a spike train whose spikes are a realization of a gamma process
with the given parameters, starting at time `t_start` and stopping time
`t_stop` (average rate will be b/a).
All numerical values should be given as Quantities, e.g. 100*Hz.
Parameters
----------
a : int or float
The shape parameter of the gamma distribution.
b : Quantity scalar with dimension 1/time
The rate parameter of the gamma distribution.
t_start : Quantity scalar with dimension time
The beginning of the spike train.
t_stop : Quantity scalar with dimension time
The end of the spike train.
as_array : bool
If True, a NumPy array of sorted spikes is returned,
rather than a SpikeTrain object.
Examples
--------
>>> from quantities import Hz, ms
>>> spikes = homogeneous_gamma_process(2.0, 50*Hz, 0*ms, 1000*ms)
>>> spikes = homogeneous_gamma_process(
5.0, 20*Hz, 5000*ms, 10000*ms, as_array=True)
"""
rate = b / a
k, theta = a, (1 / b)
return _homogeneous_process(np.random.gamma, (k, theta), rate, t_start, t_stop, as_array)
def _n_poisson(rate, t_stop, t_start=0.0 * ms, n=1):
"""
Generates one or more independent Poisson spike trains.
Parameters
----------
rate : Quantity or Quantity array
Expected firing rate (frequency) of each output SpikeTrain.
Can be one of:
* a single Quantity value: expected firing rate of each output
SpikeTrain
* a Quantity array: rate[i] is the expected firing rate of the i-th
output SpikeTrain
t_stop : Quantity
Single common stop time of each output SpikeTrain. Must be > t_start.
t_start : Quantity (optional)
Single common start time of each output SpikeTrain. Must be < t_stop.
Default: 0 s.
n: int (optional)
If rate is a single Quantity value, n specifies the number of
SpikeTrains to be generated. If rate is an array, n is ignored and the
number of SpikeTrains is equal to len(rate).
Default: 1
Returns
-------
list of neo.SpikeTrain
Each SpikeTrain contains one of the independent Poisson spike trains,
either n SpikeTrains of the same rate, or len(rate) SpikeTrains with
varying rates according to the rate parameter. The time unit of the
SpikeTrains is given by t_stop.
"""
# Check that the provided input is Hertz of return error
try:
for r in rate.reshape(-1, 1):
r.rescale('Hz')
except AttributeError:
raise ValueError('rate argument must have rate unit (1/time)')
# Check t_start < t_stop and create their strip dimensions
if not t_start < t_stop:
raise ValueError(
't_start (=%s) must be < t_stop (=%s)' % (t_start, t_stop))
# Set number n of output spike trains (specified or set to len(rate))
if not (type(n) == int and n > 0):
raise ValueError('n (=%s) must be a positive integer' % str(n))
rate_dl = rate.simplified.magnitude.flatten()
# Check rate input parameter
if len(rate_dl) == 1:
if rate_dl < 0:
raise ValueError('rate (=%s) must be non-negative.' % rate)
rates = np.array([rate_dl] * n)
else:
rates = rate_dl.flatten()
if any(rates < 0):
raise ValueError('rate must have non-negative elements.')
sts = []
for r in rates:
sts.append(homogeneous_poisson_process(r * Hz, t_start, t_stop))
return sts
def single_interaction_process(
rate, rate_c, t_stop, n=2, jitter=0 * ms, coincidences='deterministic',
t_start=0 * ms, min_delay=0 * ms, return_coinc=False):
"""
Generates a multidimensional Poisson SIP (single interaction process)
plus independent Poisson processes
A Poisson SIP consists of Poisson time series which are independent
except for simultaneous events in all of them. This routine generates
a SIP plus additional parallel independent Poisson processes.
See [1].
Parameters
-----------
t_stop: quantities.Quantity
Total time of the simulated processes. The events are drawn between
0 and `t_stop`.
rate: quantities.Quantity
Overall mean rate of the time series to be generated (coincidence
rate `rate_c` is subtracted to determine the background rate). Can be:
* a float, representing the overall mean rate of each process. If
so, it must be higher than `rate_c`.
* an iterable of floats (one float per process), each float
representing the overall mean rate of a process. If so, all the
entries must be larger than `rate_c`.
rate_c: quantities.Quantity
Coincidence rate (rate of coincidences for the n-dimensional SIP).
The SIP spike trains will have coincident events with rate `rate_c`
plus independent 'background' events with rate `rate-rate_c`.
n: int, optional
If `rate` is a single Quantity value, `n` specifies the number of
SpikeTrains to be generated. If rate is an array, `n` is ignored and
the number of SpikeTrains is equal to `len(rate)`.
Default: 1
jitter: quantities.Quantity, optional
Jitter for the coincident events. If `jitter == 0`, the events of all
n correlated processes are exactly coincident. Otherwise, they are
jittered around a common time randomly, up to +/- `jitter`.
coincidences: string, optional
Whether the total number of injected coincidences must be determin-
istic (i.e. rate_c is the actual rate with which coincidences are
generated) or stochastic (i.e. rate_c is the mean rate of coincid-
ences):
* 'deterministic': deterministic rate
* 'stochastic': stochastic rate
Default: 'deterministic'
t_start: quantities.Quantity, optional
Starting time of the series. If specified, it must be lower than
t_stop
Default: 0 * ms
min_delay: quantities.Quantity, optional
Minimum delay between consecutive coincidence times.
Default: 0 * ms
return_coinc: bool, optional
Whether to return the coincidence times for the SIP process
Default: False
Returns
--------
output: list
Realization of a SIP consisting of n Poisson processes characterized
by synchronous events (with the given jitter)
If `return_coinc` is `True`, the coincidence times are returned as a
second output argument. They also have an associated time unit (same
as `t_stop`).
References
----------
[1] Kuhn, Aertsen, Rotter (2003) Neural Comput 15(1):67-101
EXAMPLE:
>>> import quantities as qt
>>> import jelephant.core.stocmod as sm
>>> sip, coinc = sm.sip_poisson(n=10, n=0, t_stop=1*qt.sec, \
rate=20*qt.Hz, rate_c=4, return_coinc = True)
*************************************************************************
"""
# Check if n is a positive integer
if not (isinstance(n, int) and n > 0):
raise ValueError('n (=%s) must be a positive integer' % str(n))
# Assign time unit to jitter, or check that its existing unit is a time
# unit
jitter = abs(jitter)
# Define the array of rates from input argument rate. Check that its length
# matches with n
if rate.ndim == 0:
if rate < 0 * Hz:
raise ValueError(
'rate (=%s) must be non-negative.' % str(rate))
rates_b = np.array(
[rate.magnitude for _ in range(n)]) * rate.units
else:
rates_b = np.array(rate).flatten() * rate.units
if not all(rates_b >= 0. * Hz):
raise ValueError('*rate* must have non-negative elements')
# Check: rate>=rate_c
if np.any(rates_b < rate_c):
raise ValueError('all elements of *rate* must be >= *rate_c*')
# Check min_delay < 1./rate_c
if not (rate_c == 0 * Hz or min_delay < 1. / rate_c):
raise ValueError(
"'*min_delay* (%s) must be lower than 1/*rate_c* (%s)." %
(str(min_delay), str((1. / rate_c).rescale(min_delay.units))))
# Generate the n Poisson processes there are the basis for the SIP
# (coincidences still lacking)
embedded_poisson_trains = _n_poisson(
rate=rates_b - rate_c, t_stop=t_stop, t_start=t_start)
# Convert the trains from neo SpikeTrain objects to simpler Quantity
# objects
embedded_poisson_trains = [
emb.view(Quantity) for emb in embedded_poisson_trains]
# Generate the array of times for coincident events in SIP, not closer than
# min_delay. The array is generated as a quantity from the Quantity class
# in the quantities module
if coincidences == 'deterministic':
Nr_coinc = int(((t_stop - t_start) * rate_c).rescale(dimensionless))
while True:
coinc_times = t_start + \
np.sort(np.random.random(Nr_coinc)) * (t_stop - t_start)
if len(coinc_times) < 2 or min(np.diff(coinc_times)) >= min_delay:
break
elif coincidences == 'stochastic':
while True:
coinc_times = homogeneous_poisson_process(
rate=rate_c, t_stop=t_stop, t_start=t_start)
if len(coinc_times) < 2 or min(np.diff(coinc_times)) >= min_delay:
break
# Convert coinc_times from a neo SpikeTrain object to a Quantity object
# pq.Quantity(coinc_times.base)*coinc_times.units
coinc_times = coinc_times.view(Quantity)
# Set the coincidence times to T-jitter if larger. This ensures that
# the last jittered spike time is <T
for i in range(len(coinc_times)):
if coinc_times[i] > t_stop - jitter:
coinc_times[i] = t_stop - jitter
# Replicate coinc_times n times, and jitter each event in each array by
# +/- jitter (within (t_start, t_stop))
embedded_coinc = coinc_times + \
np.random.random(
(len(rates_b), len(coinc_times))) * 2 * jitter - jitter
embedded_coinc = embedded_coinc + \
(t_start - embedded_coinc) * (embedded_coinc < t_start) - \
(t_stop - embedded_coinc) * (embedded_coinc > t_stop)
# Inject coincident events into the n SIP processes generated above, and
# merge with the n independent processes
sip_process = [
np.sort(np.concatenate((
embedded_poisson_trains[m].rescale(t_stop.units),
embedded_coinc[m].rescale(t_stop.units))) * t_stop.units)
for m in range(len(rates_b))]
# Convert back sip_process and coinc_times from Quantity objects to
# neo.SpikeTrain objects
sip_process = [
SpikeTrain(t, t_start=t_start, t_stop=t_stop).rescale(t_stop.units)
for t in sip_process]
coinc_times = [
SpikeTrain(t, t_start=t_start, t_stop=t_stop).rescale(t_stop.units)
for t in embedded_coinc]
# Return the processes in the specified output_format
if not return_coinc:
output = sip_process
else:
output = sip_process, coinc_times
return output
def _pool_two_spiketrains(a, b, extremes='inner'):
"""
Pool the spikes of two spike trains a and b into a unique spike train.
Parameters
----------
a, b : neo.SpikeTrains
Spike trains to be pooled
extremes: str, optional
Only spikes of a and b in the specified extremes are considered.
* 'inner': pool all spikes from max(a.tstart_ b.t_start) to
min(a.t_stop, b.t_stop)
* 'outer': pool all spikes from min(a.tstart_ b.t_start) to
max(a.t_stop, b.t_stop)
Default: 'inner'
Output
------
neo.SpikeTrain containing all spikes in a and b falling in the
specified extremes
"""
unit = a.units
times_a_dimless = list(a.view(Quantity).magnitude)
times_b_dimless = list(b.rescale(unit).view(Quantity).magnitude)
times = (times_a_dimless + times_b_dimless) * unit
if extremes == 'outer':
t_start = min(a.t_start, b.t_start)
t_stop = max(a.t_stop, b.t_stop)
elif extremes == 'inner':
t_start = max(a.t_start, b.t_start)
t_stop = min(a.t_stop, b.t_stop)
times = times[times > t_start]
times = times[times < t_stop]
else:
raise ValueError(
'extremes (%s) can only be "inner" or "outer"' % extremes)
pooled_train = SpikeTrain(
times=sorted(times.magnitude), units=unit, t_start=t_start,
t_stop=t_stop)
return pooled_train
def _pool_spiketrains(trains, extremes='inner'):
"""
Pool spikes from any number of spike trains into a unique spike train.
Parameters
----------
trains: list
list of spike trains to merge
extremes: str, optional
Only spikes of a and b in the specified extremes are considered.
* 'inner': pool all spikes from min(a.t_start b.t_start) to
max(a.t_stop, b.t_stop)
* 'outer': pool all spikes from max(a.tstart_ b.t_start) to
min(a.t_stop, b.t_stop)
Default: 'inner'
Output
------
neo.SpikeTrain containing all spikes in trains falling in the
specified extremes
"""
merge_trains = trains[0]
for t in trains[1:]:
merge_trains = _pool_two_spiketrains(
merge_trains, t, extremes=extremes)
t_start, t_stop = merge_trains.t_start, merge_trains.t_stop
merge_trains = sorted(merge_trains)
merge_trains = np.squeeze(merge_trains)
merge_trains = SpikeTrain(
merge_trains, t_stop=t_stop, t_start=t_start, units=trains[0].units)
return merge_trains
def _sample_int_from_pdf(a, n):
"""
Draw n independent samples from the set {0,1,...,L}, where L=len(a)-1,
according to the probability distribution a.
a[j] is the probability to sample j, for each j from 0 to L.
Parameters
-----
a: numpy.array
Probability vector (i..e array of sum 1) that at each entry j carries
the probability to sample j (j=0,1,...,len(a)-1).
n: int
Number of samples generated with the function
Output
-------
array of n samples taking values between 0 and n=len(a)-1.
"""
A = np.cumsum(a) # cumulative distribution of a
u = np.random.uniform(0, 1, size=n)
U = np.array([u for i in a]).T # copy u (as column vector) len(a) times
return (A < U).sum(axis=1)
def _mother_proc_cpp_stat(A, t_stop, rate, t_start=0 * ms):
"""
Generate the hidden ("mother") Poisson process for a Compound Poisson
Process (CPP).
Parameters
----------
A : numpy.array
Amplitude distribution. A[j] represents the probability of a
synchronous event of size j.
The sum over all entries of a must be equal to one.
t_stop : quantities.Quantity
The stopping time of the mother process
rate : quantities.Quantity
Homogeneous rate of the n spike trains that will be genereted by the
CPP function
t_start : quantities.Quantity, optional
The starting time of the mother process
Default: 0 ms
Output
------
Poisson spike train representing the mother process generating the CPP
"""
N = len(A) - 1
exp_A = np.dot(A, range(N + 1)) # expected value of a
exp_mother = (N * rate) / float(exp_A) # rate of the mother process
return homogeneous_poisson_process(
rate=exp_mother, t_stop=t_stop, t_start=t_start)
def _cpp_hom_stat(A, t_stop, rate, t_start=0 * ms):
"""
Generate a Compound Poisson Process (CPP) with amplitude distribution
A and heterogeneous firing rates r=r[0], r[1], ..., r[-1].
Parameters
----------
A : numpy.ndarray
Amplitude distribution. A[j] represents the probability of a
synchronous event of size j.
The sum over all entries of A must be equal to one.
t_stop : quantities.Quantity
The end time of the output spike trains
rate : quantities.Quantity
Average rate of each spike train generated
t_start : quantities.Quantity, optional
The start time of the output spike trains
Default: 0 ms
Output
------
List of n neo.SpikeTrains, having average firing rate r and correlated
such to form a CPP with amplitude distribution a
"""
# Generate mother process and associated spike labels
mother = _mother_proc_cpp_stat(
A=A, t_stop=t_stop, rate=rate, t_start=t_start)
labels = _sample_int_from_pdf(A, len(mother))
N = len(A) - 1 # Number of trains in output
try: # Faster but more memory-consuming approach
M = len(mother) # number of spikes in the mother process
spike_matrix = np.zeros((N, M), dtype=bool)
# for each spike, take its label l
for spike_id, l in enumerate(labels):
# choose l random trains
train_ids = random.sample(range(N), l)
# and set the spike matrix for that train
for train_id in train_ids:
spike_matrix[train_id, spike_id] = True # and spike to True
times = [[] for i in range(N)]
for train_id, row in enumerate(spike_matrix):
times[train_id] = mother[row].view(Quantity)
except MemoryError: # Slower (~2x) but less memory-consuming approach
print('memory case')
times = [[] for i in range(N)]
for t, l in zip(mother, labels):
train_ids = random.sample(range(N), l)
for train_id in train_ids:
times[train_id].append(t)
trains = [SpikeTrain(
times=t, t_start=t_start, t_stop=t_stop) for t in times]
return trains
def _cpp_het_stat(A, t_stop, rate, t_start=0. * ms):
"""
Generate a Compound Poisson Process (CPP) with amplitude distribution
A and heterogeneous firing rates r=r[0], r[1], ..., r[-1].
Parameters
----------
A : array
CPP's amplitude distribution. A[j] represents the probability of
a synchronous event of size j among the generated spike trains.
The sum over all entries of A must be equal to one.
t_stop : Quantity (time)
The end time of the output spike trains
rate : Quantity (1/time)
Average rate of each spike train generated
t_start : quantities.Quantity, optional
The start time of the output spike trains
Default: 0 ms
Output
------
List of neo.SpikeTrains with different firing rates, forming
a CPP with amplitude distribution A
"""
# Computation of Parameters of the two CPPs that will be merged
# (uncorrelated with heterog. rates + correlated with homog. rates)
N = len(rate) # number of output spike trains
A_exp = np.dot(A, range(N + 1)) # expectation of A
r_sum = np.sum(rate) # sum of all output firing rates
r_min = np.min(rate) # minimum of the firing rates
r1 = r_sum - N * r_min # rate of the uncorrelated CPP
r2 = r_sum / float(A_exp) - r1 # rate of the correlated CPP
r_mother = r1 + r2 # rate of the hidden mother process
# Check the analytical constraint for the amplitude distribution
if A[1] < (r1 / r_mother).rescale(dimensionless).magnitude:
raise ValueError('A[1] too small / A[i], i>1 too high')
# Compute the amplitude distrib of the correlated CPP, and generate it
a = [(r_mother * i) / float(r2) for i in A]
a[1] = a[1] - r1 / float(r2)
CPP = _cpp_hom_stat(a, t_stop, r_min, t_start)
# Generate the independent heterogeneous Poisson processes
POISS = [
homogeneous_poisson_process(i - r_min, t_start, t_stop) for i in rate]
# Pool the correlated CPP and the corresponding Poisson processes
out = [_pool_two_spiketrains(CPP[i], POISS[i]) for i in range(N)]
return out
def compound_poisson_process(rate, A, t_stop, shift=None, t_start=0 * ms):
"""
Generate a Compound Poisson Process (CPP; see [1]) with a given amplitude
distribution A and stationary marginal rates r.
The CPP process is a model for parallel, correlated processes with Poisson
spiking statistics at pre-defined firing rates. It is composed of len(A)-1
spike trains with a correlation structure determined by the amplitude
distribution A: A[j] is the probability that a spike occurs synchronously
in any j spike trains.
The CPP is generated by creating a hidden mother Poisson process, and then
copying spikes of the mother process to j of the output spike trains with
probability A[j].
Note that this function decorrelates the firing rate of each SpikeTrain
from the probability for that SpikeTrain to participate in a synchronous
event (which is uniform across SpikeTrains).
Parameters
----------
rate : quantities.Quantity
Average rate of each spike train generated. Can be:
- a single value, all spike trains will have same rate rate
- an array of values (of length len(A)-1), each indicating the
firing rate of one process in output
A : array
CPP's amplitude distribution. A[j] represents the probability of
a synchronous event of size j among the generated spike trains.
The sum over all entries of A must be equal to one.
t_stop : quantities.Quantity
The end time of the output spike trains.
shift : None or quantities.Quantity, optional
If None, the injected synchrony is exact. If shift is a Quantity, all
the spike trains are shifted independently by a random amount in
the interval [-shift, +shift].
Default: None
t_start : quantities.Quantity, optional
The t_start time of the output spike trains.
Default: 0 s
Returns
-------
List of neo.SpikeTrains
SpikeTrains with specified firing rates forming the CPP with amplitude
distribution A.
References
----------
[1] Staude, Rotter, Gruen (2010) J Comput Neurosci 29:327-350.
"""
# Check A is a probability distribution (it sums to 1 and is positive)
if abs(sum(A) - 1) > np.finfo('float').eps:
raise ValueError(
'A must be a probability vector, sum(A)= %f !=1' % (sum(A)))
if any([a < 0 for a in A]):
raise ValueError(
'A must be a probability vector, all the elements of must be >0')
# Check that the rate is not an empty Quantity
if rate.ndim == 1 and len(rate.magnitude) == 0:
raise ValueError('Rate is an empty Quantity array')
# Return empty spike trains for specific parameters
elif A[0] == 1 or np.sum(np.abs(rate.magnitude)) == 0:
return [
SpikeTrain([] * t_stop.units, t_stop=t_stop,
t_start=t_start) for i in range(len(A) - 1)]
else:
# Homogeneous rates
if rate.ndim == 0:
cpp = _cpp_hom_stat(A=A, t_stop=t_stop, rate=rate, t_start=t_start)
# Heterogeneous rates
else:
cpp = _cpp_het_stat(A=A, t_stop=t_stop, rate=rate, t_start=t_start)
if shift is None:
return cpp
# Dither the output spiketrains
else:
cpp = [
dither_spike_train(cp, shift=shift, edges=True)[0]
for cp in cpp]
return cpp
# Alias for the compound poisson process
cpp = compound_poisson_process
| |
from nutils.testing import *
import nutils.types
import inspect, pickle, itertools, ctypes, stringly, tempfile, io, os
import numpy
class apply_annotations(TestCase):
def test_without_annotations(self):
@nutils.types.apply_annotations
def f(a, b):
return a, b
a, b = f(1, 2)
self.assertEqual(a, 1)
self.assertEqual(b, 2)
def test_pos_or_kw(self):
@nutils.types.apply_annotations
def f(a:int, b, c:str):
return a, b, c
a, b, c = f(1, 2, 3)
self.assertEqual(a, 1)
self.assertEqual(b, 2)
self.assertEqual(c, '3')
def test_with_signature(self):
def f(a):
return a
f.__signature__ = inspect.Signature([inspect.Parameter('a', inspect.Parameter.POSITIONAL_OR_KEYWORD, annotation=str)])
f = nutils.types.apply_annotations(f)
self.assertEqual(f(1), '1')
def test_posonly(self):
def f(a):
return a
f.__signature__ = inspect.Signature([inspect.Parameter('a', inspect.Parameter.POSITIONAL_ONLY, annotation=str)])
f = nutils.types.apply_annotations(f)
self.assertEqual(f(1), '1')
def test_kwonly(self):
@nutils.types.apply_annotations
def f(a:str, *, b:int, c:bool):
return a, b, c
self.assertEqual(f(1, b='2', c=3), ('1', 2, True))
def test_varpos(self):
@nutils.types.apply_annotations
def f(a:str, *args):
return a, args
self.assertEqual(f(1, 2, 3), ('1', (2, 3)))
def test_varpos_annotated(self):
map_str = lambda args: map(str, args)
@nutils.types.apply_annotations
def f(a:str, *args:map_str):
return a, args
self.assertEqual(f(1, 2, 3), ('1', ('2', '3')))
def test_varkw(self):
@nutils.types.apply_annotations
def f(a:str, **kwargs):
return a, kwargs
self.assertEqual(f(1, b=2, c=3), ('1', dict(b=2, c=3)))
def test_varkw_annotated(self):
map_str = lambda kwargs: {k: str(v) for k, v in kwargs.items()}
@nutils.types.apply_annotations
def f(a:str, **kwargs:map_str):
return a, kwargs
self.assertEqual(f(1, b=2, c=3), ('1', dict(b='2', c='3')))
def test_posonly_varkw(self):
def f(a, b, **c):
return a, b, c
f.__signature__ = inspect.Signature([inspect.Parameter('a', inspect.Parameter.POSITIONAL_ONLY, annotation=str),
inspect.Parameter('b', inspect.Parameter.POSITIONAL_OR_KEYWORD, annotation=str, default=None),
inspect.Parameter('c', inspect.Parameter.VAR_KEYWORD)])
f = nutils.types.apply_annotations(f)
self.assertEqual(f(1, c=2, d=3), ('1', None, dict(c=2, d=3)))
self.assertEqual(f(1, None, c=2, d=3), ('1', None, dict(c=2, d=3)))
self.assertEqual(f(1, b=None, c=2, d=3), ('1', None, dict(c=2, d=3)))
self.assertEqual(f(1, b=4, c=2, d=3), ('1', '4', dict(c=2, d=3)))
def test_default_none(self):
@nutils.types.apply_annotations
def f(a:str=None):
return a
self.assertEqual(f(), None)
self.assertEqual(f(None), None)
self.assertEqual(f(1), '1')
class nutils_hash(TestCase):
class custom:
@property
def __nutils_hash__(self):
return b'01234567890123456789'
def f(self):
pass
def test_ellipsis(self):
self.assertEqual(nutils.types.nutils_hash(...).hex(), '0c8bce06e451e4d5c49f60da0abf2ccbadf80600')
def test_None(self):
self.assertEqual(nutils.types.nutils_hash(None).hex(), 'bdfcbd663476b2db5b2b2e59a6d93882a908dc76')
def test_bool(self):
self.assertEqual(nutils.types.nutils_hash(False).hex(), '04a5e8f73dcea55dcd7482a476cf2e7b53d6dc50')
self.assertEqual(nutils.types.nutils_hash(True).hex(), '3fe990437e1624c831729f2866979254437bb7e9')
def test_int(self):
self.assertEqual(nutils.types.nutils_hash(1).hex(), '00ec7dea895ebd921e56bbc554688d8b3a1e4dfc')
self.assertEqual(nutils.types.nutils_hash(2).hex(), '8ae88fa39407cf75e46f9e0aba8c971de2256b14')
def test_float(self):
self.assertEqual(nutils.types.nutils_hash(1.).hex(), 'def4bae4f2a3e29f6ddac537d3fa7c72195e5d8b')
self.assertEqual(nutils.types.nutils_hash(2.5).hex(), '5216c2bf3c16d8b8ff4d9b79f482e5cea0a4cb95')
def test_complex(self):
self.assertEqual(nutils.types.nutils_hash(1+0j).hex(), 'cf7a0d933b7bb8d3ca252683b137534a1ecae073')
self.assertEqual(nutils.types.nutils_hash(2+1j).hex(), 'ee088890528f941a80aa842dad36591b05253e55')
def test_inequality_numbers(self):
self.assertNotEqual(nutils.types.nutils_hash(1).hex(), nutils.types.nutils_hash(1.).hex())
self.assertNotEqual(nutils.types.nutils_hash(1).hex(), nutils.types.nutils_hash(1+0j).hex())
self.assertNotEqual(nutils.types.nutils_hash(1).hex(), nutils.types.nutils_hash(True).hex())
def test_str(self):
self.assertEqual(nutils.types.nutils_hash('spam').hex(), '3ca1023ab75a68dc7b0f83b43ec624704a7aef61')
self.assertEqual(nutils.types.nutils_hash('eggs').hex(), '124b0a7b3984e08125c380f7454896c1cad22e2c')
def test_bytes(self):
self.assertEqual(nutils.types.nutils_hash(b'spam').hex(), '5e717ec15aace7c25610c1dea340f2173f2df014')
self.assertEqual(nutils.types.nutils_hash(b'eggs').hex(), '98f2061978497751cac94f982fd96d9b015b74c3')
def test_tuple(self):
self.assertEqual(nutils.types.nutils_hash(()).hex(), '15d44755bf0731b2a3e9a5c5c8e0807b61881a1f')
self.assertEqual(nutils.types.nutils_hash((1,)).hex(), '328b16ebbc1815cf579ae038a35c4d68ebb022af')
self.assertNotEqual(nutils.types.nutils_hash((1,'spam')).hex(), nutils.types.nutils_hash(('spam',1)).hex())
def test_frozenset(self):
self.assertEqual(nutils.types.nutils_hash(frozenset([1,2])).hex(), '3862dc7e5321bc8a576c385ed2c12c71b96a375a')
self.assertEqual(nutils.types.nutils_hash(frozenset(['spam','eggs'])).hex(), '2c75fd3db57f5e505e1425ae9ff6dcbbc77fd123')
def test_type_bool(self):
self.assertEqual(nutils.types.nutils_hash(bool).hex(), 'feb912889d52d45fcd1e778c427b093a19a1ea78')
def test_type_int(self):
self.assertEqual(nutils.types.nutils_hash(int).hex(), 'aa8cb9975f7161b1f7ceb88b4b8585b49946b31e')
def test_type_float(self):
self.assertEqual(nutils.types.nutils_hash(float).hex(), '6d5079a53075f4b6f7710377838d8183730f1388')
def test_type_complex(self):
self.assertEqual(nutils.types.nutils_hash(complex).hex(), '6b00f6b9c6522742fd3f8054af6f10a24a671fff')
def test_type_str(self):
self.assertEqual(nutils.types.nutils_hash(str).hex(), '2349e11586163208d2581fe736630f4e4b680a7b')
def test_type_bytes(self):
self.assertEqual(nutils.types.nutils_hash(bytes).hex(), 'b0826ca666a48739e6f8b968d191adcefaa39670')
def test_type_tuple(self):
self.assertEqual(nutils.types.nutils_hash(tuple).hex(), '07cb4a24ca8ac53c820f20721432b4726e2ad1af')
def test_type_frozenset(self):
self.assertEqual(nutils.types.nutils_hash(frozenset).hex(), '48dc7cd0fbd54924498deb7c68dd363b4049f5e2')
def test_type_bufferedreader(self):
try:
fid, path = tempfile.mkstemp()
os.write(fid, b'test')
os.close(fid)
with open(path, 'rb') as f:
f.seek(2)
self.assertEqual(nutils.types.nutils_hash(f).hex(), '4edef1af3aa845b9e8bbde2d8265be5f30be4c2a')
self.assertEqual(f.tell(), 2)
with open(path, 'rb+') as f, self.assertRaises(TypeError):
nutils.types.nutils_hash(f).hex()
finally:
os.unlink(path)
def test_type_boundmethod(self):
self.assertEqual(nutils.types.nutils_hash(self.custom().f).hex(), 'ebf7084bb2504922235ab035a9197b9cb4cf47af')
def test_custom(self):
self.assertEqual(nutils.types.nutils_hash(self.custom()).hex(), b'01234567890123456789'.hex())
def test_unhashable(self):
with self.assertRaises(TypeError):
nutils.types.nutils_hash([])
class CacheMeta(TestCase):
def test_property(self):
for withslots in False, True:
with self.subTest(withslots=withslots):
class T(metaclass=nutils.types.CacheMeta):
if withslots:
__slots__ = ()
__cache__ = 'x',
@property
def x(self):
nonlocal ncalls
ncalls += 1
return 1
ncalls = 0
t = T()
self.assertEqual(ncalls, 0)
self.assertEqual(t.x, 1)
self.assertEqual(ncalls, 1)
self.assertEqual(t.x, 1)
self.assertEqual(ncalls, 1)
def test_set_property(self):
class T(metaclass=nutils.types.CacheMeta):
__cache__ = 'x',
@property
def x(self):
return 1
t = T()
with self.assertRaises(AttributeError):
t.x = 1
def test_del_property(self):
class T(metaclass=nutils.types.CacheMeta):
__cache__ = 'x',
@property
def x(self):
return 1
t = T()
with self.assertRaises(AttributeError):
del t.x
def test_method_without_args(self):
for withslots in False, True:
with self.subTest(withslots=withslots):
class T(metaclass=nutils.types.CacheMeta):
if withslots:
__slots__ = ()
__cache__ = 'x',
def x(self):
nonlocal ncalls
ncalls += 1
return 1
ncalls = 0
t = T()
self.assertEqual(ncalls, 0)
self.assertEqual(t.x(), 1)
self.assertEqual(ncalls, 1)
self.assertEqual(t.x(), 1)
self.assertEqual(ncalls, 1)
def test_method_with_args(self):
for withslots in False, True:
with self.subTest(withslots=withslots):
class T(metaclass=nutils.types.CacheMeta):
if withslots:
__slots__ = ()
__cache__ = 'x',
def x(self, a, b):
nonlocal ncalls
ncalls += 1
return a + b
ncalls = 0
t = T()
self.assertEqual(ncalls, 0)
self.assertEqual(t.x(1, 2), 3)
self.assertEqual(ncalls, 1)
self.assertEqual(t.x(a=1, b=2), 3)
self.assertEqual(ncalls, 1)
self.assertEqual(t.x(2, 2), 4)
self.assertEqual(ncalls, 2)
self.assertEqual(t.x(a=2, b=2), 4)
self.assertEqual(ncalls, 2)
self.assertEqual(t.x(1, 2), 3)
self.assertEqual(ncalls, 3)
def test_method_with_args_and_preprocessors(self):
for withslots in False, True:
with self.subTest(withslots=withslots):
class T(metaclass=nutils.types.CacheMeta):
if withslots:
__slots__ = ()
__cache__ = 'x',
@nutils.types.apply_annotations
def x(self, a:int, b:int):
nonlocal ncalls
ncalls += 1
return a + b
ncalls = 0
t = T()
self.assertEqual(ncalls, 0)
self.assertEqual(t.x(1, 2), 3)
self.assertEqual(ncalls, 1)
self.assertEqual(t.x(a='1', b='2'), 3)
self.assertEqual(ncalls, 1)
self.assertEqual(t.x('2', '2'), 4)
self.assertEqual(ncalls, 2)
self.assertEqual(t.x(a=2, b=2), 4)
self.assertEqual(ncalls, 2)
self.assertEqual(t.x('1', 2), 3)
self.assertEqual(ncalls, 3)
def test_method_with_kwargs(self):
for withslots in False, True:
with self.subTest(withslots=withslots):
class T(metaclass=nutils.types.CacheMeta):
if withslots:
__slots__ = ()
__cache__ = 'x',
def x(self, a, **kwargs):
nonlocal ncalls
ncalls += 1
return a + sum(kwargs.values())
ncalls = 0
t = T()
self.assertEqual(ncalls, 0)
self.assertEqual(t.x(1, b=2), 3)
self.assertEqual(ncalls, 1)
self.assertEqual(t.x(a=1, b=2), 3)
self.assertEqual(ncalls, 1)
self.assertEqual(t.x(1, b=2, c=3), 6)
self.assertEqual(ncalls, 2)
self.assertEqual(t.x(a=1, b=2, c=3), 6)
self.assertEqual(ncalls, 2)
def test_subclass_redefined_property(self):
class T(metaclass=nutils.types.CacheMeta):
__cache__ = 'x',
@property
def x(self):
return 1
class U(T):
__cache__ = 'x',
@property
def x(self):
return super().x + 1
@property
def y(self):
return super().x
u1 = U()
self.assertEqual(u1.x, 2)
self.assertEqual(u1.y, 1)
u2 = U()
self.assertEqual(u2.y, 1)
self.assertEqual(u2.x, 2)
def test_missing_attribute(self):
with self.assertRaisesRegex(TypeError, 'Attribute listed in __cache__ is undefined: x'):
class T(metaclass=nutils.types.CacheMeta):
__cache__ = 'x',
def test_invalid_attribute(self):
with self.assertRaisesRegex(TypeError, "Don't know how to cache attribute x: None"):
class T(metaclass=nutils.types.CacheMeta):
__cache__ = 'x',
x = None
def test_name_mangling(self):
for withslots in False, True:
with self.subTest(withslots=withslots):
class T(metaclass=nutils.types.CacheMeta):
if withslots:
__slots__ = ()
__cache__ = '__x',
@property
def __x(self):
nonlocal ncalls
ncalls += 1
return 1
@property
def y(self):
return self.__x
ncalls = 0
t = T()
self.assertEqual(ncalls, 0)
self.assertEqual(t.y, 1)
self.assertEqual(ncalls, 1)
self.assertEqual(t.y, 1)
self.assertEqual(ncalls, 1)
class strictint(TestCase):
def test_int(self):
value = nutils.types.strictint(1)
self.assertEqual(value, 1)
self.assertEqual(type(value), int)
def test_numpy_int(self):
value = nutils.types.strictint(numpy.int64(1))
self.assertEqual(value, 1)
self.assertEqual(type(value), int)
def test_float(self):
with self.assertRaises(ValueError):
nutils.types.strictint(1.)
def test_numpy_float(self):
with self.assertRaises(ValueError):
nutils.types.strictint(numpy.float64(1.))
def test_complex(self):
with self.assertRaises(ValueError):
nutils.types.strictint(1+0j)
def test_str(self):
with self.assertRaises(ValueError):
nutils.types.strictint('1')
class strictfloat(TestCase):
def test_int(self):
value = nutils.types.strictfloat(1)
self.assertEqual(value, 1.)
self.assertEqual(type(value), float)
def test_numpy_int(self):
value = nutils.types.strictfloat(numpy.int64(1))
self.assertEqual(value, 1.)
self.assertEqual(type(value), float)
def test_float(self):
value = nutils.types.strictfloat(1.)
self.assertEqual(value, 1.)
self.assertEqual(type(value), float)
def test_numpy_float(self):
value = nutils.types.strictfloat(numpy.float64(1.))
self.assertEqual(value, 1.)
self.assertEqual(type(value), float)
def test_complex(self):
with self.assertRaises(ValueError):
nutils.types.strictint(1+0j)
def test_str(self):
with self.assertRaises(ValueError):
nutils.types.strictfloat('1.')
class strictstr(TestCase):
def test_str(self):
value = nutils.types.strictstr('spam')
self.assertEqual(value, 'spam')
self.assertEqual(type(value), str)
def test_int(self):
with self.assertRaises(ValueError):
nutils.types.strictstr(1)
class strict(TestCase):
def test_valid(self):
self.assertEqual(nutils.types.strict[int](1), 1)
def test_invalid(self):
with self.assertRaises(ValueError):
nutils.types.strict[int]('1')
def test_call(self):
with self.assertRaises(TypeError):
nutils.types.strict()
class tupletype(TestCase):
def test_valid1(self):
value = nutils.types.tuple[nutils.types.strictint]([])
self.assertEqual(value, ())
self.assertEqual(type(value), tuple)
def test_valid2(self):
value = nutils.types.tuple[nutils.types.strictint]([1,2,3])
self.assertEqual(value, (1,2,3))
self.assertEqual(type(value), tuple)
def test_invalid(self):
with self.assertRaises(ValueError):
nutils.types.tuple[nutils.types.strictint]([1, 'spam','eggs'])
def test_without_item_constructor(self):
src = 1,2,3
self.assertEqual(nutils.types.tuple(src), tuple(src))
def test_name(self):
self.assertEqual(nutils.types.tuple[nutils.types.strictint].__name__, 'tuple[nutils.types.strictint]')
class frozendict(TestCase):
def test_constructor(self):
src = {'spam': 1, 'eggs': 2.3}
for name, value in [('mapping', src), ('mapping_view', src.items()), ('iterable', (item for item in src.items())), ('frozendict', nutils.types.frozendict(src))]:
with self.subTest(name):
frozen = nutils.types.frozendict(value)
self.assertIsInstance(frozen, nutils.types.frozendict)
self.assertEqual(dict(frozen), src)
def test_constructor_invalid(self):
with self.assertRaises(ValueError):
nutils.types.frozendict(['spam', 'eggs', 1])
def test_clsgetitem(self):
T = nutils.types.frozendict[str, float]
src = {1: 2, 'spam': '2.3'}
for name, value in [('mapping', src), ('mapping_view', src.items()), ('iterable', (item for item in src.items()))]:
with self.subTest(name):
frozen = T(value)
self.assertIsInstance(frozen, nutils.types.frozendict)
self.assertEqual(dict(frozen), {'1': 2., 'spam': 2.3})
def test_clsgetitem_invalid_types(self):
with self.assertRaises(RuntimeError):
nutils.types.frozendict[str, float, bool]
def test_clsgetitem_invalid_value(self):
T = nutils.types.frozendict[str, float]
with self.assertRaises(ValueError):
T(1)
def test_setitem(self):
frozen = nutils.types.frozendict({'spam': 1, 'eggs': 2.3})
with self.assertRaises(TypeError):
frozen['eggs'] = 3
def test_delitem(self):
frozen = nutils.types.frozendict({'spam': 1, 'eggs': 2.3})
with self.assertRaises(TypeError):
del frozen['eggs']
def test_getitem_existing(self):
frozen = nutils.types.frozendict({'spam': 1, 'eggs': 2.3})
self.assertEqual(frozen['spam'], 1)
def test_getitem_nonexisting(self):
frozen = nutils.types.frozendict({'spam': 1, 'eggs': 2.3})
with self.assertRaises(KeyError):
frozen['foo']
def test_contains(self):
frozen = nutils.types.frozendict({'spam': 1, 'eggs': 2.3})
self.assertIn('spam', frozen)
self.assertNotIn('foo', frozen)
def test_iter(self):
src = {'spam': 1, 'eggs': 2.3}
frozen = nutils.types.frozendict(src)
self.assertEqual(frozenset(frozen), frozenset(src))
def test_len(self):
src = {'spam': 1, 'eggs': 2.3}
frozen = nutils.types.frozendict(src)
self.assertEqual(len(frozen), len(src))
def test_hash(self):
src = {'spam': 1, 'eggs': 2.3}
self.assertEqual(hash(nutils.types.frozendict(src)), hash(nutils.types.frozendict(src)))
def test_copy(self):
src = {'spam': 1, 'eggs': 2.3}
copy = nutils.types.frozendict(src).copy()
self.assertIsInstance(copy, dict)
self.assertEqual(copy, src)
def test_pickle(self):
src = {'spam': 1, 'eggs': 2.3}
frozen = pickle.loads(pickle.dumps(nutils.types.frozendict(src)))
self.assertIsInstance(frozen, nutils.types.frozendict)
self.assertEqual(dict(frozen), src)
def test_eq_same_id(self):
src = {'spam': 1, 'eggs': 2.3}
a = nutils.types.frozendict(src)
self.assertEqual(a, a)
def test_eq_other_id(self):
src = {'spam': 1, 'eggs': 2.3}
a = nutils.types.frozendict(src)
b = nutils.types.frozendict(src)
self.assertEqual(a, b)
def test_eq_deduplicated(self):
src = {'spam': 1, 'eggs': 2.3}
a = nutils.types.frozendict(src)
b = nutils.types.frozendict(src)
a == b # this replaces `a.__base` with `b.__base`
self.assertEqual(a, b)
def test_ineq_frozendict(self):
src = {'spam': 1, 'eggs': 2.3}
self.assertNotEqual(nutils.types.frozendict(src), nutils.types.frozendict({'spam': 1}))
def test_ineq_dict(self):
src = {'spam': 1, 'eggs': 2.3}
self.assertNotEqual(nutils.types.frozendict(src), src)
def test_nutils_hash(self):
frozen = nutils.types.frozendict({'spam': 1, 'eggs': 2.3})
self.assertEqual(nutils.types.nutils_hash(frozen).hex(), '8cf14f109e54707af9c2e66d7d3cdb755cce8243')
class frozenmultiset(TestCase):
def test_constructor(self):
src = 'spam', 'bacon', 'sausage', 'spam'
for name, value in [('tuple', src), ('frozenmultiset', nutils.types.frozenmultiset(src))]:
with self.subTest(name=name):
frozen = nutils.types.frozenmultiset(value)
for item in 'spam', 'bacon', 'sausage':
self.assertEqual({k: tuple(frozen).count(k) for k in set(src)}, {'spam':2, 'bacon':1, 'sausage':1})
def test_clsgetitem(self):
src = False, 1, numpy.int64(2)
frozen = nutils.types.frozenmultiset[nutils.types.strictint](src)
self.assertEqual(set(frozen), {0, 1, 2})
def test_preserve_order(self):
for src in [('spam', 'bacon', 'sausage', 'spam'), ('spam', 'egg', 'spam', 'spam', 'bacon', 'spam')]:
with self.subTest(src=src):
self.assertEqual(tuple(nutils.types.frozenmultiset(src)), src)
def test_and(self):
for l, r, lar in [[['spam', 'eggs'], ['spam', 'spam', 'eggs'], ['spam', 'eggs']],
[['spam'], ['eggs'], []],
[['spam','spam']]*3]:
with self.subTest(l=l, r=r, lar=lar):
self.assertEqual(nutils.types.frozenmultiset(l)&nutils.types.frozenmultiset(r), nutils.types.frozenmultiset(lar))
with self.subTest(l=r, r=l, lar=lar):
self.assertEqual(nutils.types.frozenmultiset(r)&nutils.types.frozenmultiset(l), nutils.types.frozenmultiset(lar))
def test_sub(self):
for l, r, lmr, rml in [[['spam', 'eggs'], ['spam', 'spam', 'eggs'], [], ['spam']],
[['spam'], ['eggs'], ['spam'], ['eggs']],
[['spam'], ['spam'], [], []]]:
with self.subTest(l=l, r=r, lmr=lmr):
self.assertEqual(nutils.types.frozenmultiset(l)-nutils.types.frozenmultiset(r), nutils.types.frozenmultiset(lmr))
with self.subTest(l=r, r=l, lmr=rml):
self.assertEqual(nutils.types.frozenmultiset(r)-nutils.types.frozenmultiset(l), nutils.types.frozenmultiset(rml))
def test_pickle(self):
src = 'spam', 'bacon', 'sausage', 'spam'
frozen = pickle.loads(pickle.dumps(nutils.types.frozenmultiset(src)))
self.assertIsInstance(frozen, nutils.types.frozenmultiset)
self.assertEqual(frozen, nutils.types.frozenmultiset(src))
def test_hash(self):
src = 'spam', 'bacon', 'sausage', 'spam'
ref = nutils.types.frozenmultiset(src)
for perm in itertools.permutations(src):
with self.subTest(perm=perm):
self.assertEqual(hash(nutils.types.frozenmultiset(src)), hash(ref))
def test_nutils_hash(self):
for perm in itertools.permutations(('spam', 'bacon', 'sausage', 'spam')):
with self.subTest(perm=perm):
frozen = nutils.types.frozenmultiset(perm)
self.assertEqual(nutils.types.nutils_hash(frozen).hex(), 'f3fd9c6d4741af2e67973457ee6308deddcb714c')
def test_eq(self):
src = 'spam', 'bacon', 'sausage', 'spam'
ref = nutils.types.frozenmultiset(src)
for perm in itertools.permutations(src):
with self.subTest(perm=perm):
self.assertEqual(nutils.types.frozenmultiset(src), ref)
def test_contains(self):
src = 'spam', 'bacon', 'sausage', 'spam'
frozen = nutils.types.frozenmultiset(src)
for item in 'spam', 'bacon', 'eggs':
with self.subTest(item=item):
if item in src:
self.assertIn(item, frozen)
else:
self.assertNotIn(item, frozen)
def test_len(self):
src = 'spam', 'bacon', 'sausage', 'spam'
frozen = nutils.types.frozenmultiset(src)
self.assertEqual(len(frozen), len(src))
def test_nonzero(self):
self.assertTrue(nutils.types.frozenmultiset(['spam', 'eggs']))
self.assertFalse(nutils.types.frozenmultiset([]))
def test_add(self):
l = nutils.types.frozenmultiset(['spam', 'bacon'])
r = nutils.types.frozenmultiset(['sausage', 'spam'])
lpr = nutils.types.frozenmultiset(['spam', 'bacon', 'sausage', 'spam'])
self.assertEqual(l+r, lpr)
def test_isdisjoint(self):
for l, r, disjoint in [[['spam', 'eggs'], ['spam', 'spam', 'eggs'], False],
[['spam'], ['eggs'], True],
[['spam'], ['spam'], False]]:
with self.subTest(l=l, r=r, disjoint=disjoint):
self.assertEqual(nutils.types.frozenmultiset(l).isdisjoint(nutils.types.frozenmultiset(r)), disjoint)
class frozenarray(TestCase):
def _test_constructor(self, src, frozen_dtype, src_types=(list,numpy.array,nutils.types.frozenarray)):
src = list(src)
for copy in True, False:
for src_type in src_types:
with self.subTest(copy=copy, src_type=src_type):
frozen = nutils.types.frozenarray(src_type(src), copy=copy, dtype=frozen_dtype)
self.assertIsInstance(frozen, nutils.types.frozenarray)
self.assertEqual(frozen.tolist(), src)
def _test_constructor_raises(self, src, frozen_dtype, exc_type, exc_regex):
src = list(src)
for copy in True, False:
for src_type in list, numpy.array, nutils.types.frozenarray:
with self.subTest(copy=copy, src_type=src_type), self.assertRaisesRegex(exc_type, exc_regex):
nutils.types.frozenarray(src_type(src), copy=copy, dtype=frozen_dtype)
def test_constructor_bool(self):
self._test_constructor((False, True), bool)
def test_constructor_bool_emptyarray(self):
self._test_constructor((), bool, src_types=[list])
def test_constructor_int(self):
self._test_constructor((0,1), int)
def test_constructor_int_upcast(self):
self._test_constructor((False,True), int)
def test_constructor_int_downcast(self):
self._test_constructor((0.,1.), int)
def test_constructor_int_emptyarray(self):
self._test_constructor((), int, src_types=[list])
def test_constructor_float(self):
self._test_constructor((0.,1.), float)
def test_constructor_float_upcast(self):
self._test_constructor((0,1), float)
def test_constructor_float_downcast(self):
src = [0.+0j,1.+0j]
for copy in True, False:
with self.subTest(copy=copy, src_type=list), self.assertRaises(TypeError):
nutils.types.frozenarray(src, copy=copy, dtype=float)
for src_type in numpy.array, nutils.types.frozenarray:
with self.subTest(copy=copy, src_type=src_type), self.assertWarns(numpy.ComplexWarning):
nutils.types.frozenarray(src_type(src), copy=copy, dtype=float)
def test_constructor_complex(self):
self._test_constructor((0+0j,1+1j), complex)
def test_constructor_strictint(self):
self._test_constructor((0,1), nutils.types.strictint)
def test_constructor_strictint_upcast(self):
self._test_constructor((False,True), nutils.types.strictint)
def test_constructor_strictint_downcast(self):
self._test_constructor_raises((0.,1.), nutils.types.strictint, ValueError, '^downcasting .* is forbidden$')
def test_constructor_strictint_emptyarray(self):
self._test_constructor((), nutils.types.strictint, src_types=[list])
def test_constructor_strictfloat(self):
self._test_constructor((0.,1.), nutils.types.strictfloat)
def test_constructor_strictfloat_upcast(self):
self._test_constructor((0,1), nutils.types.strictfloat)
def test_constructor_strictfloat_downcast(self):
self._test_constructor_raises((0.+0j,1.+0j), nutils.types.strictfloat, ValueError, '^downcasting .* is forbidden$')
def test_constructor_invalid_dtype(self):
self._test_constructor_raises((0,1), list, ValueError, '^unsupported dtype:')
def test_clsgetitem(self):
src = [0.,1.]
frozen = nutils.types.frozenarray[nutils.types.strictfloat](src)
self.assertIsInstance(frozen, nutils.types.frozenarray)
self.assertEqual(frozen.tolist(), src)
def test_clsgetitem_invalid(self):
src = [0.,1.]
with self.assertRaises(ValueError):
nutils.types.frozenarray[nutils.types.strictint](src)
def test_nutils_hash(self):
a = nutils.types.frozenarray(numpy.array([[1,2],[3,4]], numpy.int64))
b = nutils.types.frozenarray(numpy.array([[1,3],[2,4]], numpy.int64))
self.assertNotEqual(nutils.types.nutils_hash(a).hex(), nutils.types.nutils_hash(b).hex())
self.assertEqual(nutils.types.nutils_hash(a).hex(), nutils.types.nutils_hash(b.T).hex())
self.assertEqual(nutils.types.nutils_hash(a).hex(), '42cc3a5e1216c1f0a9921a61a3a2c67025c98d69')
self.assertEqual(nutils.types.nutils_hash(b).hex(), '8f0c9f9a118c42c258f1e69e374aadda99b4be97')
def test_pickle(self):
src = [[1,2],[3,4]]
value = pickle.loads(pickle.dumps(nutils.types.frozenarray(src)))
self.assertIsInstance(value, nutils.types.frozenarray)
self.assertEqual(value, nutils.types.frozenarray(src))
def test_eq_same_instance(self):
a = nutils.types.frozenarray([[1,2],[3,4]], int)
self.assertEqual(a, a)
def test_eq_not_frozenarray(self):
a = nutils.types.frozenarray([[1,2],[3,4]], int)
self.assertNotEqual(a, [[1,2],[3,4]])
def test_eq_same_base(self):
base = numpy.array([[1,2],[3,4]], int)
a = nutils.types.frozenarray(base, copy=False)
b = nutils.types.frozenarray(base, copy=False)
self.assertEqual(a, b)
def test_eq_different_array(self):
a = nutils.types.frozenarray([[1,2],[3,4]], int)
b = nutils.types.frozenarray([[1,3],[2,4]], int)
self.assertNotEqual(a, b)
def test_eq_different_dtype(self):
a = nutils.types.frozenarray([[1,2],[3,4]], int)
b = nutils.types.frozenarray([[1,2],[3,4]], float)
self.assertNotEqual(a, b)
def test_eq_different_base(self):
a = nutils.types.frozenarray([[1,2],[3,4]], int)
b = nutils.types.frozenarray([[1,2],[3,4]], int)
self.assertEqual(a, b)
def test_ineq_equal(self):
l = nutils.types.frozenarray([1,2], int)
r = nutils.types.frozenarray([1,2], int)
self.assertFalse(l < r)
self.assertTrue(l <= r)
self.assertFalse(l > r)
self.assertTrue(l >= r)
def test_ineq_smaller(self):
l = nutils.types.frozenarray([1,2], int)
r = nutils.types.frozenarray([2,1], int)
self.assertTrue(l < r)
self.assertTrue(l <= r)
self.assertFalse(l > r)
self.assertFalse(l >= r)
def test_ineq_larger(self):
l = nutils.types.frozenarray([2,1], int)
r = nutils.types.frozenarray([1,2], int)
self.assertFalse(l < r)
self.assertFalse(l <= r)
self.assertTrue(l > r)
self.assertTrue(l >= r)
def test_ineq_incomparable(self):
array = nutils.types.frozenarray([1,2], int)
for op in operator.lt, operator.le, operator.gt, operator.ge:
with self.subTest(op=op), self.assertRaises(TypeError):
op(array, 1)
def test_full(self):
self.assertEqual(nutils.types.frozenarray.full([2,3], 1.5), nutils.types.frozenarray([[1.5]*3]*2, float))
def test_as_numpy_array(self):
a = numpy.array(nutils.types.frozenarray([1,2]))
self.assertIsInstance(a, numpy.ndarray)
class c_array(TestCase):
def test_idempotence(self):
a = numpy.array([1,2,3], dtype=numpy.int64)
P = nutils.types.c_array[numpy.int64]
a_ct = P(a)
self.assertEqual(P(a_ct), a_ct)
def test_list(self):
a = [1,2,3]
a_ct = nutils.types.c_array[numpy.int64](a)
self.assertEqual(a_ct.data_as(ctypes.POINTER(ctypes.c_int64)).contents.value, 1)
def test_array(self):
a = numpy.array([1,2,3], dtype=numpy.int64)
a_ct = nutils.types.c_array[numpy.int64](a)
self.assertEqual(a_ct.data_as(ctypes.POINTER(ctypes.c_int64)).contents.value, 1)
def test_array_invalid_dtype(self):
a = numpy.array([1,2,3], dtype=numpy.int32)
with self.assertRaisesRegex(ValueError, '^Expected dtype .* but array has dtype .*\\.$'):
a_ct = nutils.types.c_array[numpy.int64](a)
def test_array_noncontinguous(self):
a = numpy.array([[1,2],[3,4]], dtype=numpy.int32).T
with self.assertRaisesRegex(ValueError, '^Array is not contiguous\\.$'):
a_ct = nutils.types.c_array[numpy.int64](a)
def test_wo_getitem(self):
with self.assertRaises(TypeError):
nutils.types.c_array()
class T_Immutable(nutils.types.Immutable):
def __init__(self, x, y, *, z):
pass
class T_Singleton(nutils.types.Singleton):
def __init__(self, x, y, *, z):
pass
@parametrize
class ImmutableFamily(TestCase):
def test_pickle(self):
T = {nutils.types.Immutable: T_Immutable, nutils.types.Singleton: T_Singleton}[self.cls]
a = T(1, 2, z=3)
b = pickle.loads(pickle.dumps(a))
self.assertEqual(a, b)
def test_eq(self):
class T(self.cls):
def __init__(self, x, y):
pass
class U(self.cls):
def __init__(self, x, y):
pass
self.assertEqual(T(1, 2), T(1, 2))
self.assertNotEqual(T(1, 2), T(2, 1))
self.assertNotEqual(T(1, 2), U(1, 2))
def test_canonical_args(self):
class T(self.cls):
def __init__(self, x, y, z=3):
pass
self.assertEqual(T(x=1, y=2), T(1, 2, 3))
def test_keyword_args(self):
class T(self.cls):
def __init__(self, x, y, **kwargs):
pass
a = T(x=1, y=2, z=3)
b = T(1, 2, z=3)
self.assertEqual(a, b)
def test_preprocessors(self):
class T(self.cls):
@nutils.types.apply_annotations
def __init__(self, x: int):
pass
self.assertEqual(T(1), T('1'))
self.assertEqual(T(1), T(x='1'))
def test_nutils_hash(self):
class T(self.cls):
def __init__(self, x, y):
pass
class T1(self.cls, version=1):
def __init__(self, x, y):
pass
class U(self.cls):
def __init__(self, x, y):
pass
self.assertEqual(nutils.types.nutils_hash(T(1, 2)).hex(), nutils.types.nutils_hash(T(1, 2)).hex())
self.assertNotEqual(nutils.types.nutils_hash(T(1, 2)).hex(), nutils.types.nutils_hash(T(2, 1)).hex())
self.assertNotEqual(nutils.types.nutils_hash(T(1, 2)).hex(), nutils.types.nutils_hash(U(1, 2)).hex())
# Since the hash does not include base classes, the hashes of Immutable and Singleton are the same.
self.assertEqual(nutils.types.nutils_hash(T(1, 2)).hex(), '8c3ba8f0d9eb054ab192f4e4e2ba7442564bdf85')
self.assertEqual(nutils.types.nutils_hash(T1(1, 2)).hex(), 'bab4ee65b5189f544a4242f0e386af76cfa6e31d')
@parametrize.enable_if(lambda cls: cls is nutils.types.Singleton)
def test_deduplication(self):
class T(self.cls):
def __init__(self, x, y):
pass
class U(self.cls):
def __init__(self, x, y):
pass
a = T(1, 2)
b = T(1, 2)
c = T(2, 1)
d = U(1, 2)
self.assertIs(a, b)
self.assertEqual(a, b)
self.assertIsNot(a, c)
self.assertNotEqual(a, c)
self.assertIsNot(a, d)
self.assertNotEqual(a, d)
def test_edit(self):
class T(self.cls):
def __init__(self, x, *, y):
self.x = x
self.y = y
a = T(1, y=2).edit(lambda v: v+1)
self.assertEqual(a.x, 2)
self.assertEqual(a.y, 3)
ImmutableFamily(cls=nutils.types.Immutable)
ImmutableFamily(cls=nutils.types.Singleton)
class Unit(TestCase):
def setUp(self):
self.U = nutils.types.unit(m=1, s=1, g=1e-3,
Pa='N/m2', N='kg*m/s2', lb='453.59237g', h='3600s', **{'in': '.0254m'})
def check(self, *args, **powers):
s, v = args
u = self.U(s)
U = type(u)
self.assertEqual(u, v)
self.assertEqual(self.U._parse(s)[1], powers)
self.assertEqual(stringly.dumps(U, u), s)
self.assertEqual(stringly.loads(U, s), u)
def test_length(self):
self.check('1m', 1, m=1)
self.check('10in', .254, m=1)
self.check('10000000000000000m', 1e16, m=1) # str(1e16) has no decimal point
def test_mass(self):
self.check('1kg', 1, g=1)
self.check('1lb', .45359237, g=1)
def test_time(self):
self.check('1s', 1, s=1)
self.check('0.5h', 1800, s=1)
def test_velocity(self):
self.check('1m/s', 1, m=1, s=-1)
self.check('1km/h', 1/3.6, m=1, s=-1)
def test_force(self):
self.check('1N', 1, g=1, m=1, s=-2)
def test_pressure(self):
self.check('1Pa', 1, g=1, m=-1, s=-2)
def test_bind(self):
T = self.U['m']
self.assertEqual(T.__name__, 'unit:m')
stringly.loads(T, '2in')
with self.assertRaises(ValueError):
stringly.loads(T, '2kg')
def test_invalid(self):
with self.assertRaises(ValueError):
self.U('2foo')
def test_loads_dumps(self):
U = self.U['Pa*mm2']
for s in '123456789Pa*mm2', '12.34Pa*mm2', '0Pa*mm2', '0.000012345Pa*mm2':
v = stringly.loads(U, s)
self.assertEqual(s, stringly.dumps(U, v))
with self.assertRaises(ValueError):
stringly.dumps(U, 'foo')
def test_create(self):
U = nutils.types.unit.create('mytype', m=1)
self.assertEqual(list(U._units), ['m'])
# vim:shiftwidth=2:softtabstop=2:expandtab:foldmethod=indent:foldnestmax=2
| |
#
# Copyright (c) 2013, Digium, Inc.
#
"""Code for handling the base Swagger API model.
"""
import json
import os
import urllib
import urlparse
from swaggerpy.http_client import SynchronousHttpClient
from swaggerpy.processors import SwaggerProcessor, SwaggerError
SWAGGER_VERSIONS = ["1.1", "1.2"]
SWAGGER_PRIMITIVES = [
'void',
'string',
'boolean',
'number',
'int',
'long',
'double',
'float',
'Date',
]
# noinspection PyDocstring
class ValidationProcessor(SwaggerProcessor):
"""A processor that validates the Swagger model.
"""
def process_resource_listing(self, resources, context):
required_fields = ['basePath', 'apis', 'swaggerVersion']
validate_required_fields(resources, required_fields, context)
if not resources['swaggerVersion'] in SWAGGER_VERSIONS:
raise SwaggerError(
"Unsupported Swagger version %s" % resources.swaggerVersion,
context)
def process_resource_listing_api(self, resources, listing_api, context):
validate_required_fields(listing_api, ['path', 'description'], context)
if not listing_api['path'].startswith("/"):
raise SwaggerError("Path must start with /", context)
def process_api_declaration(self, resources, resource, context):
required_fields = [
'swaggerVersion', 'basePath', 'resourcePath', 'apis',
'models'
]
validate_required_fields(resource, required_fields, context)
# Check model name and id consistency
for (model_name, model) in resource['models'].items():
if model_name != model['id']:
raise SwaggerError("Model id doesn't match name", context)
# Convert models dict to list
def process_resource_api(self, resources, resource, api, context):
required_fields = ['path', 'operations']
validate_required_fields(api, required_fields, context)
def process_operation(self, resources, resource, api, operation, context):
required_fields = ['httpMethod', 'nickname']
validate_required_fields(operation, required_fields, context)
def process_parameter(self, resources, resource, api, operation, parameter,
context):
required_fields = ['name', 'paramType']
validate_required_fields(parameter, required_fields, context)
if parameter['paramType'] == 'path':
# special handling for path parameters
parameter['required'] = True
parameter['dataType'] = 'string'
else:
# dataType is required for non-path parameters
validate_required_fields(parameter, ['dataType'], context)
if 'allowedValues' in parameter:
raise SwaggerError(
"Field 'allowedValues' invalid; use 'allowableValues'",
context)
def process_error_response(self, resources, resource, api, operation,
error_response, context):
required_fields = ['code', 'reason']
validate_required_fields(error_response, required_fields, context)
def process_model(self, resources, resource, model, context):
required_fields = ['id', 'properties']
validate_required_fields(model, required_fields, context)
# Move property field name into the object
for (prop_name, prop) in model['properties'].items():
prop['name'] = prop_name
def process_property(self, resources, resource, model, prop,
context):
required_fields = ['type']
validate_required_fields(prop, required_fields, context)
def json_load_url(http_client, url):
"""Download and parse JSON from a URL.
:param http_client: HTTP client interface.
:type http_client: http_client.HttpClient
:param url: URL for JSON to parse
:return: Parsed JSON dict
"""
scheme = urlparse.urlparse(url).scheme
if scheme == 'file':
# requests can't handle file: URLs
fp = urllib.urlopen(url)
try:
return json.load(fp)
finally:
fp.close()
else:
resp = http_client.request('GET', url)
resp.raise_for_status()
return resp.json()
class Loader(object):
"""Abstraction for loading Swagger API's.
:param http_client: HTTP client interface.
:type http_client: http_client.HttpClient
:param processors: List of processors to apply to the API.
:type processors: list of SwaggerProcessor
"""
def __init__(self, http_client, processors=None):
self.http_client = http_client
if processors is None:
processors = []
# always go through the validation processor first
# noinspection PyTypeChecker
self.processors = [ValidationProcessor()] + processors
def load_resource_listing(self, resources_url, base_url=None):
"""Load a resource listing, loading referenced API declarations.
The following fields are added to the resource listing object model.
* ['url'] = URL resource listing was loaded from
* The ['apis'] array is modified according to load_api_declaration()
The Loader's processors are applied to the fully loaded resource
listing.
:param resources_url: File name for resources.json
:param base_url: Optional URL to be the base URL for finding API
declarations. If not specified, 'basePath' from the
resource listing is used.
"""
# Load the resource listing
resource_listing = json_load_url(self.http_client, resources_url)
# Some extra data only known about at load time
resource_listing['url'] = resources_url
if not base_url:
base_url = resource_listing.get('basePath')
# Load the API declarations
for api in resource_listing.get('apis'):
self.load_api_declaration(base_url, api)
# Now that the raw object model has been loaded, apply the processors
self.process_resource_listing(resource_listing)
return resource_listing
def load_api_declaration(self, base_url, api_dict):
"""Load an API declaration file.
api_dict is modified with the results of the load:
* ['url'] = URL api declaration was loaded from
* ['api_declaration'] = Parsed results of the load
:param base_url: Base URL to load from
:param api_dict: api object from resource listing.
"""
path = api_dict.get('path').replace('{format}', 'json')
api_dict['url'] = urlparse.urljoin(base_url + '/', path.strip('/'))
api_dict['api_declaration'] = json_load_url(
self.http_client, api_dict['url'])
def process_resource_listing(self, resources):
"""Apply processors to a resource listing.
:param resources: Resource listing to process.
"""
for processor in self.processors:
processor.apply(resources)
def validate_required_fields(json, required_fields, context):
"""Checks a JSON object for a set of required fields.
If any required field is missing, a SwaggerError is raised.
:param json: JSON object to check.
:param required_fields: List of required fields.
:param context: Current context in the API.
"""
missing_fields = [f for f in required_fields if not f in json]
if missing_fields:
raise SwaggerError(
"Missing fields: %s" % ', '.join(missing_fields), context)
def load_file(resource_listing_file, http_client=None, processors=None):
"""Loads a resource listing file, applying the given processors.
:param http_client: HTTP client interface.
:param resource_listing_file: File name for a resource listing.
:param processors: List of SwaggerProcessors to apply to the resulting
resource.
:return: Processed object model from
:raise: IOError: On error reading api-docs.
"""
file_path = os.path.abspath(resource_listing_file)
url = urlparse.urljoin('file:', urllib.pathname2url(file_path))
# When loading from files, everything is relative to the resource listing
dir_path = os.path.dirname(file_path)
base_url = urlparse.urljoin('file:', urllib.pathname2url(dir_path))
return load_url(url, http_client=http_client, processors=processors,
base_url=base_url)
def load_url(resource_listing_url, http_client=None, processors=None,
base_url=None):
"""Loads a resource listing, applying the given processors.
:param resource_listing_url: URL for a resource listing.
:param http_client: HTTP client interface.
:param processors: List of SwaggerProcessors to apply to the resulting
resource.
:param base_url: Optional URL to be the base URL for finding API
declarations. If not specified, 'basePath' from the
resource listing is used.
:return: Processed object model from
:raise: IOError, URLError: On error reading api-docs.
"""
if http_client is None:
http_client = SynchronousHttpClient()
loader = Loader(http_client=http_client, processors=processors)
return loader.load_resource_listing(
resource_listing_url, base_url=base_url)
def load_json(resource_listing, http_client=None, processors=None):
"""Process a resource listing that has already been parsed.
:param resource_listing: Parsed resource listing.
:type resource_listing: dict
:param http_client:
:param processors:
:return: Processed resource listing.
"""
if http_client is None:
http_client = SynchronousHttpClient()
loader = Loader(http_client=http_client, processors=processors)
loader.process_resource_listing(resource_listing)
return resource_listing
| |
"""The feature extraction module contains classes for feature extraction."""
import numpy as np
import SimpleITK as sitk
import mialab.filtering.filter as fltr
class AtlasCoordinates(fltr.IFilter):
"""Represents an atlas coordinates feature extractor."""
def __init__(self):
"""Initializes a new instance of the AtlasCoordinates class."""
super().__init__()
def execute(self, image: sitk.Image, params: fltr.IFilterParams = None) -> sitk.Image:
"""Executes a atlas coordinates feature extractor on an image.
Args:
image (sitk.Image): The image.
params (fltr.IFilterParams): The parameters (unused).
Returns:
sitk.Image: The atlas coordinates image
(a vector image with 3 components, which represent the physical x, y, z coordinates in mm).
Raises:
ValueError: If image is not 3-D.
"""
if image.GetDimension() != 3:
raise ValueError('image needs to be 3-D')
x, y, z = image.GetSize()
# create matrix with homogenous indices in axis 3
coords = np.zeros((x, y, z, 4))
coords[..., 0] = np.arange(x)[:, np.newaxis, np.newaxis]
coords[..., 1] = np.arange(y)[np.newaxis, :, np.newaxis]
coords[..., 2] = np.arange(z)[np.newaxis, np.newaxis, :]
coords[..., 3] = 1
# reshape such that each voxel is one row
lin_coords = np.reshape(coords, [coords.shape[0] * coords.shape[1] * coords.shape[2], 4])
# generate transformation matrix
tmpmat = image.GetDirection() + image.GetOrigin()
tfm = np.reshape(tmpmat, [3, 4], order='F')
tfm = np.vstack((tfm, [0, 0, 0, 1]))
atlas_coords = (tfm @ np.transpose(lin_coords))[0:3, :]
atlas_coords = np.reshape(np.transpose(atlas_coords), [z, y, x, 3], 'F')
img_out = sitk.GetImageFromArray(atlas_coords)
img_out.CopyInformation(image)
return img_out
def __str__(self):
"""Gets a printable string representation.
Returns:
str: String representation.
"""
return 'AtlasCoordinates:\n' \
.format(self=self)
def first_order_texture_features_function(values):
"""Calculates first-order texture features.
Args:
values (np.array): The values to calculate the first-order texture features from.
Returns:
np.array: A vector containing the first-order texture features:
- mean
- variance
- sigma
- skewness
- kurtosis
- entropy
- energy
- snr
- min
- max
- range
- percentile10th
- percentile25th
- percentile50th
- percentile75th
- percentile90th
"""
mean = np.mean(values)
std = np.std(values)
snr = mean / std if std != 0 else 0
min = np.min(values)
max = np.max(values)
return np.array([mean,
np.var(values),
std,
0.0, # todo(fabianbalsiger): finish implementation
1.0,
2.0,
3.0,
snr,
min,
max,
max - min,
np.percentile(values, 10),
np.percentile(values, 25),
np.percentile(values, 50),
np.percentile(values, 75),
np.percentile(values, 90)
])
class NeighborhoodFeatureExtractor(fltr.IFilter):
"""Represents a feature extractor filter, which works on a neighborhood."""
def __init__(self, kernel=(3,3,3), function_=first_order_texture_features_function):
"""Initializes a new instance of the NeighborhoodFeatureExtractor class."""
super().__init__()
self.neighborhood_radius = 3
self.kernel = kernel
self.function = function_
def execute(self, image: sitk.Image, params: fltr.IFilterParams=None) -> sitk.Image:
"""Executes a neighborhood feature extractor on an image.
Args:
image (sitk.Image): The image.
params (fltr.IFilterParams): The parameters (unused).
Returns:
sitk.Image: The normalized image.
Raises:
ValueError: If image is not 3-D.
"""
if image.GetDimension() != 3:
raise ValueError('image needs to be 3-D')
# test the function and get the output dimension for later reshaping
function_output = self.function(np.array([1, 2, 3]))
if np.isscalar(function_output):
img_out = sitk.Image(image.GetSize(), sitk.sitkFloat32)
elif not isinstance(function_output, np.ndarray):
raise ValueError('function must return a scalar or a 1-D np.ndarray')
elif function_output.ndim > 1:
raise ValueError('function must return a scalar or a 1-D np.ndarray')
elif function_output.shape[0] <= 1:
raise ValueError('function must return a scalar or a 1-D np.ndarray with at least two elements')
else:
img_out = sitk.Image(image.GetSize(), sitk.sitkVectorFloat32, function_output.shape[0])
img_out_arr = sitk.GetArrayFromImage(img_out)
img_arr = sitk.GetArrayFromImage(image)
z, y, x = img_arr.shape
z_offset = self.kernel[2]
y_offset = self.kernel[1]
x_offset = self.kernel[0]
pad = ((0, z_offset), (0, y_offset), (0, x_offset))
img_arr_padded = np.pad(img_arr, pad, 'symmetric')
for xx in range(x):
for yy in range(y):
for zz in range(z):
val = self.function(img_arr_padded[zz:zz + z_offset, yy:yy + y_offset, xx:xx + x_offset])
img_out_arr[zz, yy, xx] = val
img_out = sitk.GetImageFromArray(img_out_arr)
img_out.CopyInformation(image)
return img_out
def __str__(self):
"""Gets a printable string representation.
Returns:
str: String representation.
"""
return 'NeighborhoodFeatureExtractor:\n' \
.format(self=self)
class RandomizedTrainingMaskGenerator:
"""Represents a training mask generator.
A training mask is an image with intensity values 0 and 1, where 1 represents masked.
Such a mask can be used to sample voxels for training.
"""
@staticmethod
def get_mask(ground_truth: sitk.Image,
ground_truth_labels: list,
label_percentages: list,
background_mask: sitk.Image=None) -> sitk.Image:
"""Gets a training mask.
Args:
ground_truth (sitk.Image): The ground truth image.
ground_truth_labels (list of int): The ground truth labels,
where 0=background, 1=label1, 2=label2, ..., e.g. [0, 1]
label_percentages (list of float): The percentage of voxels of a corresponding label to extract as mask,
e.g. [0.2, 0.2].
background_mask (sitk.Image): A mask, where intensity 0 indicates voxels to exclude independent of the label.
Returns:
sitk.Image: The training mask.
"""
# initialize mask
ground_truth_array = sitk.GetArrayFromImage(ground_truth)
mask_array = np.zeros(ground_truth_array.shape, dtype=np.uint8)
# exclude background
if background_mask is not None:
background_mask_array = sitk.GetArrayFromImage(background_mask)
background_mask_array = np.logical_not(background_mask_array)
ground_truth_array = ground_truth_array.astype(float) # convert to float because of np.nan
ground_truth_array[background_mask_array] = np.nan
for label_idx, label in enumerate(ground_truth_labels):
indices = np.transpose(np.where(ground_truth_array == label))
np.random.shuffle(indices)
no_mask_items = int(indices.shape[0] * label_percentages[label_idx])
for no in range(no_mask_items):
x = indices[no][0]
y = indices[no][1]
z = indices[no][2]
mask_array[x, y, z] = 1 # this is a masked item
mask = sitk.GetImageFromArray(mask_array)
mask.SetOrigin(ground_truth.GetOrigin())
mask.SetDirection(ground_truth.GetDirection())
mask.SetSpacing(ground_truth.GetSpacing())
return mask
| |
#!/bin/env python
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for objectfilter.objectfilter."""
import logging
import unittest
from objectfilter import objectfilter
attr1 = "Backup"
attr2 = "Archive"
hash1 = "123abc"
hash2 = "456def"
filename = "yay.exe"
class DummyObject(object):
def __init__(self, key, value):
setattr(self, key, value)
class HashObject(object):
def __init__(self, hash_value=None):
self.value = hash_value
@property
def md5(self):
return self.value
def __eq__(self, y):
return self.value == y
def __lt__(self, y):
return self.value < y
class Dll(object):
def __init__(self, name, imported_functions=None, exported_functions=None):
self.name = name
self._imported_functions = imported_functions or []
self.num_imported_functions = len(self._imported_functions)
self.exported_functions = exported_functions or []
self.num_exported_functions = len(self.exported_functions)
@property
def imported_functions(self):
for fn in self._imported_functions:
yield fn
class DummyFile(object):
non_callable_leaf = "yoda"
def __init__(self):
self.non_callable = HashObject(hash1)
self.non_callable_repeated = [DummyObject("desmond", ["brotha",
"brotha"]),
DummyObject("desmond", ["brotha",
"sista"])]
self.imported_dll1 = Dll("a.dll", ["FindWindow", "CreateFileA"])
self.imported_dll2 = Dll("b.dll", ["RegQueryValueEx"])
@property
def name(self):
return filename
@property
def attributes(self):
return [attr1, attr2]
@property
def hash(self):
return [HashObject(hash1), HashObject(hash2)]
@property
def size(self):
return 10
@property
def deferred_values(self):
for v in ["a", "b"]:
yield v
@property
def novalues(self):
return []
@property
def imported_dlls(self):
return [self.imported_dll1, self.imported_dll2]
def Callable(self):
raise RuntimeError("This can not be called.")
@property
def float(self):
return 123.9823
class ObjectFilterTest(unittest.TestCase):
def setUp(self):
self.file = DummyFile()
self.filter_imp = objectfilter.LowercaseAttributeFilterImplementation
self.value_expander = self.filter_imp.FILTERS["ValueExpander"]
operator_tests = {
objectfilter.Less: [
(True, ["size", 1000]),
(True, ["size", 11]),
(False, ["size", 10]),
(False, ["size", 0]),
(False, ["float", 1.0]),
(True, ["float", 123.9824]),
],
objectfilter.LessEqual: [
(True, ["size", 1000]),
(True, ["size", 11]),
(True, ["size", 10]),
(False, ["size", 9]),
(False, ["float", 1.0]),
(True, ["float", 123.9823]),
],
objectfilter.Greater: [
(True, ["size", 1]),
(True, ["size", 9.23]),
(False, ["size", 10]),
(False, ["size", 1000]),
(True, ["float", 122]),
(True, ["float", 1.0]),
],
objectfilter.GreaterEqual: [
(False, ["size", 1000]),
(False, ["size", 11]),
(True, ["size", 10]),
(True, ["size", 0]),
# Floats work fine too
(True, ["float", 122]),
(True, ["float", 123.9823]),
# Comparisons works with strings, although it might be a bit silly
(True, ["name", "aoot.ini"]),
],
objectfilter.Contains: [
# Contains works with strings
(True, ["name", "yay.exe"]),
(True, ["name", "yay"]),
(False, ["name", "meh"]),
# Works with generators
(True, ["imported_dlls.imported_functions", "FindWindow"]),
# But not with numbers
(False, ["size", 12]),
],
objectfilter.NotContains: [
(False, ["name", "yay.exe"]),
(False, ["name", "yay"]),
(True, ["name", "meh"]),
],
objectfilter.Equals: [
(True, ["name", "yay.exe"]),
(False, ["name", "foobar"]),
(True, ["float", 123.9823]),
],
objectfilter.NotEquals: [
(False, ["name", "yay.exe"]),
(True, ["name", "foobar"]),
(True, ["float", 25]),
],
objectfilter.InSet: [
(True, ["name", ["yay.exe", "autoexec.bat"]]),
(True, ["name", "yay.exe"]),
(False, ["name", "NOPE"]),
# All values of attributes are within these
(True, ["attributes", ["Archive", "Backup", "Nonexisting"]]),
# Not all values of attributes are within these
(False, ["attributes", ["Executable", "Sparse"]]),
],
objectfilter.NotInSet: [
(False, ["name", ["yay.exe", "autoexec.bat"]]),
(False, ["name", "yay.exe"]),
(True, ["name", "NOPE"]),
],
objectfilter.Regexp: [
(True, ["name", "^yay.exe$"]),
(True, ["name", "yay.exe"]),
(False, ["name", "^$"]),
(True, ["attributes", "Archive"]),
# One can regexp numbers if he's inclined to
(True, ["size", 0]),
# But regexp doesn't work with lists or generators for the moment
(False, ["imported_dlls.imported_functions", "FindWindow"])
],
}
def testBinaryOperators(self):
for operator, test_data in self.operator_tests.items():
for test_unit in test_data:
logging.debug("Testing %s with %s and %s",
operator, test_unit[0], test_unit[1])
kwargs = {"arguments": test_unit[1],
"value_expander": self.value_expander}
self.assertEqual(test_unit[0], operator(**kwargs).Matches(self.file))
def testExpand(self):
# Case insensitivity
values_lowercase = self.value_expander().Expand(self.file, "size")
values_uppercase = self.value_expander().Expand(self.file, "Size")
self.assertListEqual(list(values_lowercase), list(values_uppercase))
# Existing, non-repeated, leaf is a value
values = self.value_expander().Expand(self.file, "size")
self.assertListEqual(list(values), [10])
# Existing, non-repeated, leaf is iterable
values = self.value_expander().Expand(self.file, "attributes")
self.assertListEqual(list(values), [[attr1, attr2]])
# Existing, repeated, leaf is value
values = self.value_expander().Expand(self.file, "hash.md5")
self.assertListEqual(list(values), [hash1, hash2])
# Existing, repeated, leaf is iterable
values = self.value_expander().Expand(self.file,
"non_callable_repeated.desmond")
self.assertListEqual(list(values), [["brotha", "brotha"],
["brotha", "sista"]])
# Now with an iterator
values = self.value_expander().Expand(self.file, "deferred_values")
self.assertListEqual([list(value) for value in values], [["a", "b"]])
# Iterator > generator
values = self.value_expander().Expand(self.file,
"imported_dlls.imported_functions")
expected = [
["FindWindow", "CreateFileA"],
["RegQueryValueEx"]]
self.assertListEqual([list(value) for value in values], expected)
# Non-existing first path
values = self.value_expander().Expand(self.file, "nonexistant")
self.assertListEqual(list(values), [])
# Non-existing in the middle
values = self.value_expander().Expand(self.file, "hash.mink.boo")
self.assertListEqual(list(values), [])
# Non-existing as a leaf
values = self.value_expander().Expand(self.file, "hash.mink")
self.assertListEqual(list(values), [])
# Non-callable leaf
values = self.value_expander().Expand(self.file, "non_callable_leaf")
self.assertListEqual(list(values), [DummyFile.non_callable_leaf])
# callable
values = self.value_expander().Expand(self.file, "Callable")
self.assertListEqual(list(values), [])
# leaf under a callable. Will return nothing
values = self.value_expander().Expand(self.file, "Callable.a")
self.assertListEqual(list(values), [])
def testGenericBinaryOperator(self):
class TestBinaryOperator(objectfilter.GenericBinaryOperator):
values = list()
def Operation(self, x, _):
return self.values.append(x)
# Test a common binary operator
tbo = TestBinaryOperator(arguments=["whatever", 0],
value_expander=self.value_expander)
self.assertEqual(tbo.right_operand, 0)
self.assertEqual(tbo.args[0], "whatever")
tbo.Matches(DummyObject("whatever", "id"))
tbo.Matches(DummyObject("whatever", "id2"))
tbo.Matches(DummyObject("whatever", "bg"))
tbo.Matches(DummyObject("whatever", "bg2"))
self.assertListEqual(tbo.values, ["id", "id2", "bg", "bg2"])
def testContext(self):
self.assertRaises(objectfilter.InvalidNumberOfOperands,
objectfilter.Context,
arguments=["context"],
value_expander=self.value_expander)
self.assertRaises(objectfilter.InvalidNumberOfOperands,
objectfilter.Context,
arguments=
["context",
objectfilter.Equals(arguments=["path", "value"],
value_expander=self.value_expander),
objectfilter.Equals(arguments=["another_path", "value"],
value_expander=self.value_expander)
],
value_expander=self.value_expander)
# "One imported_dll imports 2 functions AND one imported_dll imports
# function RegQueryValueEx"
arguments = [
objectfilter.Equals(["imported_dlls.num_imported_functions", 1],
value_expander=self.value_expander),
objectfilter.Contains(["imported_dlls.imported_functions",
"RegQueryValueEx"],
value_expander=self.value_expander)]
condition = objectfilter.AndFilter(arguments=arguments)
# Without context, it matches because both filters match separately
self.assertEqual(True, condition.Matches(self.file))
arguments = [
objectfilter.Equals(["num_imported_functions", 2],
value_expander=self.value_expander),
objectfilter.Contains(["imported_functions", "RegQueryValueEx"],
value_expander=self.value_expander)]
condition = objectfilter.AndFilter(arguments=arguments)
# "The same DLL imports 2 functions AND one of these is RegQueryValueEx"
context = objectfilter.Context(arguments=["imported_dlls", condition],
value_expander=self.value_expander)
# With context, it doesn't match because both don't match in the same dll
self.assertEqual(False, context.Matches(self.file))
# "One imported_dll imports only 1 function AND one imported_dll imports
# function RegQueryValueEx"
condition = objectfilter.AndFilter(arguments=[
objectfilter.Equals(arguments=["num_imported_functions", 1],
value_expander=self.value_expander),
objectfilter.Contains(["imported_functions", "RegQueryValueEx"],
value_expander=self.value_expander)])
# "The same DLL imports 1 function AND it"s RegQueryValueEx"
context = objectfilter.Context(["imported_dlls", condition],
value_expander=self.value_expander)
self.assertEqual(True, context.Matches(self.file))
# Now test the context with a straight query
query = """
@imported_dlls
(
imported_functions contains "RegQueryValueEx"
AND num_imported_functions == 1
)
"""
self.assertObjectMatches(self.file, query)
def testRegexpRaises(self):
self.assertRaises(ValueError, objectfilter.Regexp,
arguments=["name", "I [dont compile"],
value_expander=self.value_expander)
def testEscaping(self):
parser = objectfilter.Parser(r"a is '\n'").Parse()
self.assertEqual(parser.args[0], "\n")
# Invalid escape sequence
parser = objectfilter.Parser(r"a is '\z'")
self.assertRaises(objectfilter.ParseError, parser.Parse)
# Can escape the backslash
parser = objectfilter.Parser(r"a is '\\'").Parse()
self.assertEqual(parser.args[0], "\\")
## HEX ESCAPING
# This fails as it's not really a hex escaped string
parser = objectfilter.Parser(r"a is '\xJZ'")
self.assertRaises(objectfilter.ParseError, parser.Parse)
# Instead, this is what one should write
parser = objectfilter.Parser(r"a is '\\xJZ'").Parse()
self.assertEqual(parser.args[0], r"\xJZ")
# Standard hex-escape
parser = objectfilter.Parser(r"a is '\x41\x41\x41'").Parse()
self.assertEqual(parser.args[0], "AAA")
# Hex-escape + a character
parser = objectfilter.Parser(r"a is '\x414'").Parse()
self.assertEqual(parser.args[0], r"A4")
# How to include r'\x41'
parser = objectfilter.Parser(r"a is '\\x41'").Parse()
self.assertEqual(parser.args[0], r"\x41")
def ParseQuery(self, query):
return objectfilter.Parser(query).Parse()
def assertQueryParses(self, query):
self.ParseQuery(query)
def assertParseRaises(self, query, exception=objectfilter.ParseError):
parser = objectfilter.Parser(query)
self.assertRaises(exception, parser.Parse)
def testParse(self):
# We need to complete a basic expression
self.assertParseRaises(" ")
self.assertParseRaises("attribute")
self.assertParseRaises("attribute is")
# We have to go from an expression to the ANDOR state
self.assertParseRaises("attribute is 3 really")
self.assertParseRaises("attribute is 3 AND")
self.assertParseRaises("attribute is 3 AND bla")
self.assertParseRaises("attribute is 3 AND bla contains")
# Two complete expressions parse fine
query = "attribute is 3 AND name contains 'atthew'"
self.assertQueryParses(query)
# Arguments are either int, float or quoted string
self.assertQueryParses("attribute == 1")
self.assertQueryParses("attribute == 0x10")
self.assertParseRaises("attribute == 1a")
self.assertQueryParses("attribute == 1.2")
self.assertParseRaises("attribute == 1.2a3")
# Scientific notation is not accepted...
self.assertParseRaises("attribute == 1e3")
# Test both quoted strings
self.assertQueryParses("attribute == 'bla'")
self.assertQueryParses("attribute == \"bla\"")
# Unquoted strings fail
self.assertParseRaises("something == red")
# Can't start with AND
self.assertParseRaises("and something is 'Blue'")
# Need to match parentheses
self.assertParseRaises("(a is 3")
self.assertParseRaises("((a is 3")
self.assertParseRaises("((a is 3)")
self.assertParseRaises("a is 3)")
self.assertParseRaises("a is 3))")
self.assertParseRaises("(a is 3))")
# Need to put parentheses in the right place
self.assertParseRaises("()a is 3")
self.assertParseRaises("(a) is 3")
self.assertParseRaises("(a is) 3")
self.assertParseRaises("a (is) 3")
self.assertParseRaises("a is() 3")
self.assertParseRaises("a is (3)")
self.assertParseRaises("a is 3()")
self.assertParseRaises("a (is 3 AND) b is 4 ")
# In the right places, parentheses are accepted
self.assertQueryParses("(a is 3)")
self.assertQueryParses("(a is 3 AND b is 4)")
# Context Operator alone is not accepted
self.assertParseRaises("@attributes")
# Accepted only with braces (not necessary but forced by the grammar
# to be explicit)
objectfilter.Parser("@attributes( name is 'adrien')").Parse()
# Not without them
self.assertParseRaises("@attributes name is 'adrien'")
# Or in the wrong place
self.assertParseRaises("@attributes (name is) 'adrien'")
# Can nest context operators
query = "@imported_dlls( @imported_function( name is 'OpenFileA'))"
self.assertQueryParses(query)
# Can nest context operators and mix braces without it messing up
query = "@imported_dlls( @imported_function( name is 'OpenFileA'))"
self.assertQueryParses(query)
query = """
@imported_dlls
(
@imported_function
(
name is 'OpenFileA'
)
)
"""
self.assertQueryParses(query)
# Mix context and binary operators
query = """
@imported_dlls
(
@imported_function
(
name is 'OpenFileA'
) AND num_functions == 2
)
"""
self.assertQueryParses(query)
# Also on the right
query = """
@imported_dlls
(
num_functions == 2 AND
@imported_function
(
name is 'OpenFileA'
)
)
"""
query = "b is 3 AND c is 4 AND d is 5"
self.assertQueryParses(query)
query = "@a(b is 3) AND @b(c is 4)"
self.assertQueryParses(query)
query = "@a(b is 3) AND @b(c is 4) AND @d(e is 5)"
self.assertQueryParses(query)
query = "@a(@b(c is 3)) AND @b(d is 4)"
self.assertQueryParses(query)
query = """
@imported_dlls( @imported_function ( name is 'OpenFileA' ) )
AND
@imported_dlls(
name regexp '(?i)advapi32.dll'
AND @imported_function ( name is 'RegQueryValueEx' )
)
AND @exported_symbols(name is 'inject')
"""
self.assertQueryParses(query)
self.assertQueryParses("a is ['blue', 'dot']")
self.assertQueryParses("a is ['blue', 1]")
self.assertQueryParses("a is [1]")
# This is an empty list
self.assertQueryParses("a is []")
# While weird, the current parser allows this. Same as an empty list
self.assertQueryParses("a is [,,]")
# Unifinished expressions shouldn't parse
self.assertParseRaises("a is [")
self.assertParseRaises("a is [,,")
self.assertParseRaises("a is [,']")
# Malformed expressions shouldn't parse
self.assertParseRaises("a is [[]")
self.assertParseRaises("a is []]")
# We do not support nested lists at the moment
self.assertParseRaises("a is ['cannot', ['nest', 'lists']]")
def assertObjectMatches(self, obj, query, match_is=True):
parser = self.ParseQuery(query)
filter_ = parser.Compile(self.filter_imp)
self.assertEqual(filter_.Matches(obj), match_is)
def testCompile(self):
obj = DummyObject("something", "Blue")
query = "something == 'Blue'"
self.assertObjectMatches(obj, query)
query = "something == 'Red'"
self.assertObjectMatches(obj, query, match_is=False)
query = "something == \"Red\""
self.assertObjectMatches(obj, query, match_is=False)
obj = DummyObject("size", 4)
parser = objectfilter.Parser("size < 3").Parse()
filter_ = parser.Compile(self.filter_imp)
self.assertEqual(filter_.Matches(obj), False)
parser = objectfilter.Parser("size == 4").Parse()
filter_ = parser.Compile(self.filter_imp)
self.assertEqual(filter_.Matches(obj), True)
query = "something is 'Blue' and size notcontains 3"
self.assertObjectMatches(obj, query, match_is=False)
query = """
@imported_dlls
(
name is 'a.dll'
AND imported_functions contains 'CreateFileA'
)
AND name is "yay.exe"
AND size is 10
"""
self.assertObjectMatches(self.file, query)
query = """
@imported_dlls
(
name is 'a.dll'
AND imported_functions contains 'CreateFileB'
)
AND name is "yay.exe"
AND size is 10
"""
self.assertObjectMatches(self.file, query, match_is=False)
obj = DummyObject("list", [1,2])
self.assertObjectMatches(obj, "list is [1,2]")
self.assertObjectMatches(obj, "list is [5,6]", match_is=False)
self.assertObjectMatches(obj, "list isnot [1,3]")
self.assertObjectMatches(obj, "list inset [1,2,3]")
obj = DummyObject("list", [])
self.assertObjectMatches(obj, "list is []")
self.assertObjectMatches(obj, "list inset []")
# An empty set [] is a subset of any set. Hence this is False.
self.assertObjectMatches(obj, "list notinset [2]", match_is=False)
obj = DummyObject("single_element", 1)
self.assertObjectMatches(obj, "single_element inset [1,2,3]")
# 1 != [1]
self.assertObjectMatches(obj, "single_element isnot [1]")
obj = DummyObject("os", "windows")
self.assertObjectMatches(obj, 'os inset ["windows", "mac"]')
# "a" != ["a"]
self.assertObjectMatches(obj, 'os isnot ["windows"]')
| |
# Copyright 2016 F5 Networks Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Requires Python 2.6+ and Openssl 1.0+
#
import os
import socket
import time
import unittest
import azurelinuxagent.common.logger as logger
import azurelinuxagent.common.osutil.bigip as osutil
import azurelinuxagent.common.osutil.default as default
import azurelinuxagent.common.utils.shellutil as shellutil
from azurelinuxagent.common.exception import OSUtilError
from azurelinuxagent.common.osutil.bigip import BigIpOSUtil
from tests.tools import AgentTestCase, patch
from .test_default import osutil_get_dhcp_pid_should_return_a_list_of_pids
class TestBigIpOSUtil_wait_until_mcpd_is_initialized(AgentTestCase):
@patch.object(shellutil, "run", return_value=0)
@patch.object(logger, "info", return_value=None)
def test_success(self, *args):
result = osutil.BigIpOSUtil._wait_until_mcpd_is_initialized(
osutil.BigIpOSUtil()
)
self.assertEqual(result, True)
# There are two logger calls in the mcpd wait function. The second
# occurs after mcpd is found to be "up"
self.assertEqual(args[0].call_count, 2)
@patch.object(shellutil, "run", return_value=1)
@patch.object(logger, "info", return_value=None)
@patch.object(time, "sleep", return_value=None)
def test_failure(self, *args): # pylint: disable=unused-argument
self.assertRaises(
OSUtilError,
osutil.BigIpOSUtil._wait_until_mcpd_is_initialized,
osutil.BigIpOSUtil()
)
class TestBigIpOSUtil_save_sys_config(AgentTestCase):
@patch.object(shellutil, "run", return_value=0)
@patch.object(logger, "error", return_value=None)
def test_success(self, *args):
result = osutil.BigIpOSUtil._save_sys_config(osutil.BigIpOSUtil())
self.assertEqual(result, 0)
self.assertEqual(args[0].call_count, 0)
@patch.object(shellutil, "run", return_value=1)
@patch.object(logger, "error", return_value=None)
def test_failure(self, *args):
result = osutil.BigIpOSUtil._save_sys_config(osutil.BigIpOSUtil())
self.assertEqual(result, 1)
self.assertEqual(args[0].call_count, 1)
class TestBigIpOSUtil_useradd(AgentTestCase):
@patch.object(osutil.BigIpOSUtil, 'get_userentry', return_value=None)
@patch.object(shellutil, "run_command")
def test_success(self, *args):
args[0].return_value = (0, None)
result = osutil.BigIpOSUtil.useradd(
osutil.BigIpOSUtil(), 'foo', expiration=None
)
self.assertEqual(result, 0)
@patch.object(osutil.BigIpOSUtil, 'get_userentry', return_value=None)
def test_user_already_exists(self, *args):
args[0].return_value = 'admin'
result = osutil.BigIpOSUtil.useradd(
osutil.BigIpOSUtil(), 'admin', expiration=None
)
self.assertEqual(result, None)
@patch.object(shellutil, "run", return_value=1)
def test_failure(self, *args): # pylint: disable=unused-argument
self.assertRaises(
OSUtilError,
osutil.BigIpOSUtil.useradd,
osutil.BigIpOSUtil(), 'foo', expiration=None
)
class TestBigIpOSUtil_chpasswd(AgentTestCase):
@patch.object(shellutil, "run_command")
@patch.object(osutil.BigIpOSUtil, 'get_userentry', return_value=True)
@patch.object(osutil.BigIpOSUtil, 'is_sys_user', return_value=False)
@patch.object(osutil.BigIpOSUtil, '_save_sys_config', return_value=None)
def test_success(self, *args):
result = osutil.BigIpOSUtil.chpasswd(
osutil.BigIpOSUtil(), 'admin', 'password', crypt_id=6, salt_len=10
)
self.assertEqual(result, 0)
self.assertEqual(args[0].call_count, 1)
self.assertEqual(args[0].call_count, 1)
@patch.object(osutil.BigIpOSUtil, 'is_sys_user', return_value=True)
def test_is_sys_user(self, *args): # pylint: disable=unused-argument
self.assertRaises(
OSUtilError,
osutil.BigIpOSUtil.chpasswd,
osutil.BigIpOSUtil(), 'admin', 'password', crypt_id=6, salt_len=10
)
@patch.object(shellutil, "run_get_output", return_value=(1, None))
@patch.object(osutil.BigIpOSUtil, 'is_sys_user', return_value=False)
def test_failed_to_set_user_password(self, *args): # pylint: disable=unused-argument
self.assertRaises(
OSUtilError,
osutil.BigIpOSUtil.chpasswd,
osutil.BigIpOSUtil(), 'admin', 'password', crypt_id=6, salt_len=10
)
@patch.object(shellutil, "run_get_output", return_value=(0, None))
@patch.object(osutil.BigIpOSUtil, 'is_sys_user', return_value=False)
@patch.object(osutil.BigIpOSUtil, 'get_userentry', return_value=None)
def test_failed_to_get_user_entry(self, *args): # pylint: disable=unused-argument
self.assertRaises(
OSUtilError,
osutil.BigIpOSUtil.chpasswd,
osutil.BigIpOSUtil(), 'admin', 'password', crypt_id=6, salt_len=10
)
class TestBigIpOSUtil_get_dvd_device(AgentTestCase):
@patch.object(os, "listdir", return_value=['tty1','cdrom0'])
def test_success(self, *args): # pylint: disable=unused-argument
result = osutil.BigIpOSUtil.get_dvd_device(
osutil.BigIpOSUtil(), '/dev'
)
self.assertEqual(result, '/dev/cdrom0')
@patch.object(os, "listdir", return_value=['foo', 'bar'])
def test_failure(self, *args): # pylint: disable=unused-argument
self.assertRaises(
OSUtilError,
osutil.BigIpOSUtil.get_dvd_device,
osutil.BigIpOSUtil(), '/dev'
)
class TestBigIpOSUtil_restart_ssh_service(AgentTestCase):
@patch.object(shellutil, "run", return_value=0)
def test_success(self, *args): # pylint: disable=unused-argument
result = osutil.BigIpOSUtil.restart_ssh_service(
osutil.BigIpOSUtil()
)
self.assertEqual(result, 0)
class TestBigIpOSUtil_stop_agent_service(AgentTestCase):
@patch.object(shellutil, "run", return_value=0)
def test_success(self, *args): # pylint: disable=unused-argument
result = osutil.BigIpOSUtil.stop_agent_service(
osutil.BigIpOSUtil()
)
self.assertEqual(result, 0)
class TestBigIpOSUtil_start_agent_service(AgentTestCase):
@patch.object(shellutil, "run", return_value=0)
def test_success(self, *args): # pylint: disable=unused-argument
result = osutil.BigIpOSUtil.start_agent_service(
osutil.BigIpOSUtil()
)
self.assertEqual(result, 0)
class TestBigIpOSUtil_register_agent_service(AgentTestCase):
@patch.object(shellutil, "run", return_value=0)
def test_success(self, *args): # pylint: disable=unused-argument
result = osutil.BigIpOSUtil.register_agent_service(
osutil.BigIpOSUtil()
)
self.assertEqual(result, 0)
class TestBigIpOSUtil_unregister_agent_service(AgentTestCase):
@patch.object(shellutil, "run", return_value=0)
def test_success(self, *args): # pylint: disable=unused-argument
result = osutil.BigIpOSUtil.unregister_agent_service(
osutil.BigIpOSUtil()
)
self.assertEqual(result, 0)
class TestBigIpOSUtil_set_hostname(AgentTestCase):
@patch.object(os.path, "exists", return_value=False)
def test_success(self, *args):
result = osutil.BigIpOSUtil.set_hostname( # pylint: disable=assignment-from-none
osutil.BigIpOSUtil(), None
)
self.assertEqual(args[0].call_count, 0)
self.assertEqual(result, None)
class TestBigIpOSUtil_set_dhcp_hostname(AgentTestCase):
@patch.object(os.path, "exists", return_value=False)
def test_success(self, *args):
result = osutil.BigIpOSUtil.set_dhcp_hostname( # pylint: disable=assignment-from-none
osutil.BigIpOSUtil(), None
)
self.assertEqual(args[0].call_count, 0)
self.assertEqual(result, None)
class TestBigIpOSUtil_get_first_if(AgentTestCase):
@patch.object(osutil.BigIpOSUtil,
'_format_single_interface_name', return_value=b'eth0')
def test_success(self, *args): # pylint: disable=unused-argument
ifname, ipaddr = osutil.BigIpOSUtil().get_first_if()
self.assertTrue(ifname.startswith('eth'))
self.assertTrue(ipaddr is not None)
try:
socket.inet_aton(ipaddr)
except socket.error:
self.fail("not a valid ip address")
@patch.object(osutil.BigIpOSUtil,
'_format_single_interface_name', return_value=b'loenp0s3')
def test_success(self, *args): # pylint: disable=unused-argument,function-redefined
ifname, ipaddr = osutil.BigIpOSUtil().get_first_if()
self.assertFalse(ifname.startswith('eth'))
self.assertTrue(ipaddr is not None)
try:
socket.inet_aton(ipaddr)
except socket.error:
self.fail("not a valid ip address")
class TestBigIpOSUtil_mount_dvd(AgentTestCase):
@patch.object(shellutil, "run", return_value=0)
@patch.object(time, "sleep", return_value=None)
@patch.object(osutil.BigIpOSUtil,
'_wait_until_mcpd_is_initialized', return_value=None)
@patch.object(default.DefaultOSUtil, 'mount_dvd', return_value=None)
def test_success(self, *args):
osutil.BigIpOSUtil.mount_dvd(
osutil.BigIpOSUtil(), max_retry=6, chk_err=True
)
self.assertEqual(args[0].call_count, 1)
self.assertEqual(args[1].call_count, 1)
class TestBigIpOSUtil_route_add(AgentTestCase):
@patch.object(shellutil, "run", return_value=0)
def test_success(self, *args):
osutil.BigIpOSUtil.route_add(
osutil.BigIpOSUtil(), '10.10.10.0', '255.255.255.0', '10.10.10.1'
)
self.assertEqual(args[0].call_count, 1)
class TestBigIpOSUtil_device_for_ide_port(AgentTestCase):
@patch.object(time, "sleep", return_value=None)
@patch.object(os.path, "exists", return_value=False)
@patch.object(default.DefaultOSUtil,
'device_for_ide_port', return_value=None)
def test_success_waiting(self, *args):
osutil.BigIpOSUtil.device_for_ide_port(
osutil.BigIpOSUtil(), '5'
)
self.assertEqual(args[0].call_count, 1)
self.assertEqual(args[1].call_count, 99)
self.assertEqual(args[2].call_count, 99)
@patch.object(time, "sleep", return_value=None)
@patch.object(os.path, "exists", return_value=True)
@patch.object(default.DefaultOSUtil,
'device_for_ide_port', return_value=None)
def test_success_immediate(self, *args):
osutil.BigIpOSUtil.device_for_ide_port(
osutil.BigIpOSUtil(), '5'
)
self.assertEqual(args[0].call_count, 1)
self.assertEqual(args[1].call_count, 1)
self.assertEqual(args[2].call_count, 0)
class TestBigIpOSUtil(AgentTestCase):
def setUp(self):
AgentTestCase.setUp(self)
def tearDown(self):
AgentTestCase.tearDown(self)
def test_get_dhcp_pid_should_return_a_list_of_pids(self):
osutil_get_dhcp_pid_should_return_a_list_of_pids(self, BigIpOSUtil())
if __name__ == '__main__':
unittest.main()
| |
'''
Entry point module to start the interactive console.
'''
from _pydev_imps._pydev_thread import start_new_thread
try:
from code import InteractiveConsole
except ImportError:
from _pydevd_bundle.pydevconsole_code_for_ironpython import InteractiveConsole
from code import compile_command
from code import InteractiveInterpreter
import os
import sys
from _pydev_imps import _pydev_threading as threading
import traceback
from _pydev_bundle import fix_getpass
fix_getpass.fix_getpass()
from _pydevd_bundle import pydevd_vars
from _pydev_bundle.pydev_imports import Exec, _queue
try:
import __builtin__
except:
import builtins as __builtin__ # @UnresolvedImport
try:
False
True
except NameError: # version < 2.3 -- didn't have the True/False builtins
import __builtin__
setattr(__builtin__, 'True', 1) #Python 3.0 does not accept __builtin__.True = 1 in its syntax
setattr(__builtin__, 'False', 0)
from _pydev_bundle.pydev_console_utils import BaseInterpreterInterface, BaseStdIn
from _pydev_bundle.pydev_console_utils import CodeFragment
IS_PYTHON_3K = False
IS_PY24 = False
try:
if sys.version_info[0] == 3:
IS_PYTHON_3K = True
elif sys.version_info[0] == 2 and sys.version_info[1] == 4:
IS_PY24 = True
except:
#That's OK, not all versions of python have sys.version_info
pass
class Command:
def __init__(self, interpreter, code_fragment):
"""
:type code_fragment: CodeFragment
:type interpreter: InteractiveConsole
"""
self.interpreter = interpreter
self.code_fragment = code_fragment
self.more = None
def symbol_for_fragment(code_fragment):
if code_fragment.is_single_line:
symbol = 'single'
else:
symbol = 'exec' # Jython doesn't support this
return symbol
symbol_for_fragment = staticmethod(symbol_for_fragment)
def run(self):
text = self.code_fragment.text
symbol = self.symbol_for_fragment(self.code_fragment)
self.more = self.interpreter.runsource(text, '<input>', symbol)
try:
try:
execfile #Not in Py3k
except NameError:
from _pydev_bundle.pydev_imports import execfile
__builtin__.execfile = execfile
except:
pass
# Pull in runfile, the interface to UMD that wraps execfile
from _pydev_bundle.pydev_umd import runfile, _set_globals_function
try:
import builtins # @UnresolvedImport
builtins.runfile = runfile
except:
import __builtin__
__builtin__.runfile = runfile
#=======================================================================================================================
# InterpreterInterface
#=======================================================================================================================
class InterpreterInterface(BaseInterpreterInterface):
'''
The methods in this class should be registered in the xml-rpc server.
'''
def __init__(self, host, client_port, mainThread, show_banner=True):
BaseInterpreterInterface.__init__(self, mainThread)
self.client_port = client_port
self.host = host
self.namespace = {}
self.interpreter = InteractiveConsole(self.namespace)
self._input_error_printed = False
def do_add_exec(self, codeFragment):
command = Command(self.interpreter, codeFragment)
command.run()
return command.more
def get_namespace(self):
return self.namespace
def getCompletions(self, text, act_tok):
try:
from _pydev_bundle._pydev_completer import Completer
completer = Completer(self.namespace, None)
return completer.complete(act_tok)
except:
import traceback
traceback.print_exc()
return []
def close(self):
sys.exit(0)
def get_greeting_msg(self):
return 'PyDev console: starting.\n'
class _ProcessExecQueueHelper:
_debug_hook = None
_return_control_osc = False
def set_debug_hook(debug_hook):
_ProcessExecQueueHelper._debug_hook = debug_hook
def process_exec_queue(interpreter):
from pydev_ipython.inputhook import get_inputhook, set_return_control_callback
def return_control():
''' A function that the inputhooks can call (via inputhook.stdin_ready()) to find
out if they should cede control and return '''
if _ProcessExecQueueHelper._debug_hook:
# Some of the input hooks check return control without doing
# a single operation, so we don't return True on every
# call when the debug hook is in place to allow the GUI to run
# XXX: Eventually the inputhook code will have diverged enough
# from the IPython source that it will be worthwhile rewriting
# it rather than pretending to maintain the old API
_ProcessExecQueueHelper._return_control_osc = not _ProcessExecQueueHelper._return_control_osc
if _ProcessExecQueueHelper._return_control_osc:
return True
if not interpreter.exec_queue.empty():
return True
return False
set_return_control_callback(return_control)
from _pydev_bundle.pydev_import_hook import import_hook_manager
from pydev_ipython.matplotlibtools import activate_matplotlib, activate_pylab, activate_pyplot
import_hook_manager.add_module_name("matplotlib", lambda: activate_matplotlib(interpreter.enableGui))
# enable_gui_function in activate_matplotlib should be called in main thread. That's why we call
# interpreter.enableGui which put it into the interpreter's exec_queue and executes it in the main thread.
import_hook_manager.add_module_name("pylab", activate_pylab)
import_hook_manager.add_module_name("pyplot", activate_pyplot)
while 1:
# Running the request may have changed the inputhook in use
inputhook = get_inputhook()
if _ProcessExecQueueHelper._debug_hook:
_ProcessExecQueueHelper._debug_hook()
if inputhook:
try:
# Note: it'll block here until return_control returns True.
inputhook()
except:
import traceback;traceback.print_exc()
try:
try:
code_fragment = interpreter.exec_queue.get(block=True, timeout=1/20.) # 20 calls/second
except _queue.Empty:
continue
if callable(code_fragment):
# It can be a callable (i.e.: something that must run in the main
# thread can be put in the queue for later execution).
code_fragment()
else:
more = interpreter.add_exec(code_fragment)
except KeyboardInterrupt:
interpreter.buffer = None
continue
except SystemExit:
raise
except:
type, value, tb = sys.exc_info()
traceback.print_exception(type, value, tb, file=sys.__stderr__)
exit()
if 'IPYTHONENABLE' in os.environ:
IPYTHON = os.environ['IPYTHONENABLE'] == 'True'
else:
IPYTHON = True
try:
try:
exitfunc = sys.exitfunc
except AttributeError:
exitfunc = None
if IPYTHON:
from _pydev_bundle.pydev_ipython_console import InterpreterInterface
if exitfunc is not None:
sys.exitfunc = exitfunc
else:
try:
delattr(sys, 'exitfunc')
except:
pass
except:
IPYTHON = False
pass
#=======================================================================================================================
# _DoExit
#=======================================================================================================================
def do_exit(*args):
'''
We have to override the exit because calling sys.exit will only actually exit the main thread,
and as we're in a Xml-rpc server, that won't work.
'''
try:
import java.lang.System
java.lang.System.exit(1)
except ImportError:
if len(args) == 1:
os._exit(args[0])
else:
os._exit(0)
def handshake():
return "PyCharm"
#=======================================================================================================================
# start_console_server
#=======================================================================================================================
def start_console_server(host, port, interpreter):
if port == 0:
host = ''
#I.e.: supporting the internal Jython version in PyDev to create a Jython interactive console inside Eclipse.
from _pydev_bundle.pydev_imports import SimpleXMLRPCServer as XMLRPCServer #@Reimport
try:
if IS_PY24:
server = XMLRPCServer((host, port), logRequests=False)
else:
server = XMLRPCServer((host, port), logRequests=False, allow_none=True)
except:
sys.stderr.write('Error starting server with host: %s, port: %s, client_port: %s\n' % (host, port, interpreter.client_port))
raise
# Tell UMD the proper default namespace
_set_globals_function(interpreter.get_namespace)
server.register_function(interpreter.execLine)
server.register_function(interpreter.execMultipleLines)
server.register_function(interpreter.getCompletions)
server.register_function(interpreter.getFrame)
server.register_function(interpreter.getVariable)
server.register_function(interpreter.changeVariable)
server.register_function(interpreter.getDescription)
server.register_function(interpreter.close)
server.register_function(interpreter.interrupt)
server.register_function(handshake)
server.register_function(interpreter.connectToDebugger)
server.register_function(interpreter.hello)
server.register_function(interpreter.getArray)
server.register_function(interpreter.evaluate)
# Functions for GUI main loop integration
server.register_function(interpreter.enableGui)
if port == 0:
(h, port) = server.socket.getsockname()
print(port)
print(interpreter.client_port)
sys.stderr.write(interpreter.get_greeting_msg())
sys.stderr.flush()
while True:
try:
server.serve_forever()
except:
# Ugly code to be py2/3 compatible
# https://sw-brainwy.rhcloud.com/tracker/PyDev/534:
# Unhandled "interrupted system call" error in the pydevconsol.py
e = sys.exc_info()[1]
retry = False
try:
retry = e.args[0] == 4 #errno.EINTR
except:
pass
if not retry:
raise
# Otherwise, keep on going
return server
def start_server(host, port, client_port):
#replace exit (see comments on method)
#note that this does not work in jython!!! (sys method can't be replaced).
sys.exit = do_exit
interpreter = InterpreterInterface(host, client_port, threading.currentThread())
start_new_thread(start_console_server,(host, port, interpreter))
process_exec_queue(interpreter)
def get_interpreter():
try:
interpreterInterface = getattr(__builtin__, 'interpreter')
except AttributeError:
interpreterInterface = InterpreterInterface(None, None, threading.currentThread())
setattr(__builtin__, 'interpreter', interpreterInterface)
return interpreterInterface
def get_completions(text, token, globals, locals):
interpreterInterface = get_interpreter()
interpreterInterface.interpreter.update(globals, locals)
return interpreterInterface.getCompletions(text, token)
#===============================================================================
# Debugger integration
#===============================================================================
def exec_code(code, globals, locals):
interpreterInterface = get_interpreter()
interpreterInterface.interpreter.update(globals, locals)
res = interpreterInterface.need_more(code)
if res:
return True
interpreterInterface.add_exec(code)
return False
class ConsoleWriter(InteractiveInterpreter):
skip = 0
def __init__(self, locals=None):
InteractiveInterpreter.__init__(self, locals)
def write(self, data):
#if (data.find("global_vars") == -1 and data.find("pydevd") == -1):
if self.skip > 0:
self.skip -= 1
else:
if data == "Traceback (most recent call last):\n":
self.skip = 1
sys.stderr.write(data)
def showsyntaxerror(self, filename=None):
"""Display the syntax error that just occurred."""
#Override for avoid using sys.excepthook PY-12600
type, value, tb = sys.exc_info()
sys.last_type = type
sys.last_value = value
sys.last_traceback = tb
if filename and type is SyntaxError:
# Work hard to stuff the correct filename in the exception
try:
msg, (dummy_filename, lineno, offset, line) = value.args
except ValueError:
# Not the format we expect; leave it alone
pass
else:
# Stuff in the right filename
value = SyntaxError(msg, (filename, lineno, offset, line))
sys.last_value = value
list = traceback.format_exception_only(type, value)
sys.stderr.write(''.join(list))
def showtraceback(self):
"""Display the exception that just occurred."""
#Override for avoid using sys.excepthook PY-12600
try:
type, value, tb = sys.exc_info()
sys.last_type = type
sys.last_value = value
sys.last_traceback = tb
tblist = traceback.extract_tb(tb)
del tblist[:1]
lines = traceback.format_list(tblist)
if lines:
lines.insert(0, "Traceback (most recent call last):\n")
lines.extend(traceback.format_exception_only(type, value))
finally:
tblist = tb = None
sys.stderr.write(''.join(lines))
def console_exec(thread_id, frame_id, expression):
"""returns 'False' in case expression is partially correct
"""
frame = pydevd_vars.find_frame(thread_id, frame_id)
expression = str(expression.replace('@LINE@', '\n'))
#Not using frame.f_globals because of https://sourceforge.net/tracker2/?func=detail&aid=2541355&group_id=85796&atid=577329
#(Names not resolved in generator expression in method)
#See message: http://mail.python.org/pipermail/python-list/2009-January/526522.html
updated_globals = {}
updated_globals.update(frame.f_globals)
updated_globals.update(frame.f_locals) #locals later because it has precedence over the actual globals
if IPYTHON:
return exec_code(CodeFragment(expression), updated_globals, frame.f_locals)
interpreter = ConsoleWriter()
try:
code = compile_command(expression)
except (OverflowError, SyntaxError, ValueError):
# Case 1
interpreter.showsyntaxerror()
return False
if code is None:
# Case 2
return True
#Case 3
try:
Exec(code, updated_globals, frame.f_locals)
except SystemExit:
raise
except:
interpreter.showtraceback()
return False
#=======================================================================================================================
# main
#=======================================================================================================================
if __name__ == '__main__':
#Important: don't use this module directly as the __main__ module, rather, import itself as pydevconsole
#so that we don't get multiple pydevconsole modules if it's executed directly (otherwise we'd have multiple
#representations of its classes).
#See: https://sw-brainwy.rhcloud.com/tracker/PyDev/446:
#'Variables' and 'Expressions' views stopped working when debugging interactive console
import pydevconsole
sys.stdin = pydevconsole.BaseStdIn()
port, client_port = sys.argv[1:3]
from _pydev_bundle import pydev_localhost
if int(port) == 0 and int(client_port) == 0:
(h, p) = pydev_localhost.get_socket_name()
client_port = p
pydevconsole.start_server(pydev_localhost.get_localhost(), int(port), int(client_port))
| |
# Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import gyp
import gyp.common
import gyp.SCons as SCons
import os.path
import pprint
import re
# TODO: remove when we delete the last WriteList() call in this module
WriteList = SCons.WriteList
generator_default_variables = {
'EXECUTABLE_PREFIX': '',
'EXECUTABLE_SUFFIX': '',
'STATIC_LIB_PREFIX': '${LIBPREFIX}',
'SHARED_LIB_PREFIX': '${SHLIBPREFIX}',
'STATIC_LIB_SUFFIX': '${LIBSUFFIX}',
'SHARED_LIB_SUFFIX': '${SHLIBSUFFIX}',
'INTERMEDIATE_DIR': '${INTERMEDIATE_DIR}',
'SHARED_INTERMEDIATE_DIR': '${SHARED_INTERMEDIATE_DIR}',
'OS': 'linux',
'PRODUCT_DIR': '$TOP_BUILDDIR',
'SHARED_LIB_DIR': '$LIB_DIR',
'LIB_DIR': '$LIB_DIR',
'RULE_INPUT_ROOT': '${SOURCE.filebase}',
'RULE_INPUT_DIRNAME': '${SOURCE.dir}',
'RULE_INPUT_EXT': '${SOURCE.suffix}',
'RULE_INPUT_NAME': '${SOURCE.file}',
'RULE_INPUT_PATH': '${SOURCE.abspath}',
'CONFIGURATION_NAME': '${CONFIG_NAME}',
}
# Tell GYP how to process the input for us.
generator_handles_variants = True
generator_wants_absolute_build_file_paths = True
def FixPath(path, prefix):
if not os.path.isabs(path) and not path[0] == '$':
path = prefix + path
return path
header = """\
# This file is generated; do not edit.
"""
_alias_template = """
if GetOption('verbose'):
_action = Action([%(action)s])
else:
_action = Action([%(action)s], %(message)s)
_outputs = env.Alias(
['_%(target_name)s_action'],
%(inputs)s,
_action
)
env.AlwaysBuild(_outputs)
"""
_run_as_template = """
if GetOption('verbose'):
_action = Action([%(action)s])
else:
_action = Action([%(action)s], %(message)s)
"""
_run_as_template_suffix = """
_run_as_target = env.Alias('run_%(target_name)s', target_files, _action)
env.Requires(_run_as_target, [
Alias('%(target_name)s'),
])
env.AlwaysBuild(_run_as_target)
"""
_command_template = """
if GetOption('verbose'):
_action = Action([%(action)s])
else:
_action = Action([%(action)s], %(message)s)
_outputs = env.Command(
%(outputs)s,
%(inputs)s,
_action
)
"""
# This is copied from the default SCons action, updated to handle symlinks.
_copy_action_template = """
import shutil
import SCons.Action
def _copy_files_or_dirs_or_symlinks(dest, src):
SCons.Node.FS.invalidate_node_memos(dest)
if SCons.Util.is_List(src) and os.path.isdir(dest):
for file in src:
shutil.copy2(file, dest)
return 0
elif os.path.islink(src):
linkto = os.readlink(src)
os.symlink(linkto, dest)
return 0
elif os.path.isfile(src):
return shutil.copy2(src, dest)
else:
return shutil.copytree(src, dest, 1)
def _copy_files_or_dirs_or_symlinks_str(dest, src):
return 'Copying %s to %s ...' % (src, dest)
GYPCopy = SCons.Action.ActionFactory(_copy_files_or_dirs_or_symlinks,
_copy_files_or_dirs_or_symlinks_str,
convert=str)
"""
_rule_template = """
%(name)s_additional_inputs = %(inputs)s
%(name)s_outputs = %(outputs)s
def %(name)s_emitter(target, source, env):
return (%(name)s_outputs, source + %(name)s_additional_inputs)
if GetOption('verbose'):
%(name)s_action = Action([%(action)s])
else:
%(name)s_action = Action([%(action)s], %(message)s)
env['BUILDERS']['%(name)s'] = Builder(action=%(name)s_action,
emitter=%(name)s_emitter)
_outputs = []
_processed_input_files = []
for infile in input_files:
if (type(infile) == type('')
and not os.path.isabs(infile)
and not infile[0] == '$'):
infile = %(src_dir)r + infile
if str(infile).endswith('.%(extension)s'):
_generated = env.%(name)s(infile)
env.Precious(_generated)
_outputs.append(_generated)
%(process_outputs_as_sources_line)s
else:
_processed_input_files.append(infile)
prerequisites.extend(_outputs)
input_files = _processed_input_files
"""
_spawn_hack = """
import re
import SCons.Platform.posix
needs_shell = re.compile('["\\'><!^&]')
def gyp_spawn(sh, escape, cmd, args, env):
def strip_scons_quotes(arg):
if arg[0] == '"' and arg[-1] == '"':
return arg[1:-1]
return arg
stripped_args = [strip_scons_quotes(a) for a in args]
if needs_shell.search(' '.join(stripped_args)):
return SCons.Platform.posix.exec_spawnvpe([sh, '-c', ' '.join(args)], env)
else:
return SCons.Platform.posix.exec_spawnvpe(stripped_args, env)
"""
def EscapeShellArgument(s):
"""Quotes an argument so that it will be interpreted literally by a POSIX
shell. Taken from
http://stackoverflow.com/questions/35817/whats-the-best-way-to-escape-ossystem-calls-in-python
"""
return "'" + s.replace("'", "'\\''") + "'"
def InvertNaiveSConsQuoting(s):
"""SCons tries to "help" with quoting by naively putting double-quotes around
command-line arguments containing space or tab, which is broken for all
but trivial cases, so we undo it. (See quote_spaces() in Subst.py)"""
if ' ' in s or '\t' in s:
# Then SCons will put double-quotes around this, so add our own quotes
# to close its quotes at the beginning and end.
s = '"' + s + '"'
return s
def EscapeSConsVariableExpansion(s):
"""SCons has its own variable expansion syntax using $. We must escape it for
strings to be interpreted literally. For some reason this requires four
dollar signs, not two, even without the shell involved."""
return s.replace('$', '$$$$')
def EscapeCppDefine(s):
"""Escapes a CPP define so that it will reach the compiler unaltered."""
s = EscapeShellArgument(s)
s = InvertNaiveSConsQuoting(s)
s = EscapeSConsVariableExpansion(s)
return s
def GenerateConfig(fp, config, indent='', src_dir=''):
"""
Generates SCons dictionary items for a gyp configuration.
This provides the main translation between the (lower-case) gyp settings
keywords and the (upper-case) SCons construction variables.
"""
var_mapping = {
'ASFLAGS' : 'asflags',
'CCFLAGS' : 'cflags',
'CFLAGS' : 'cflags_c',
'CXXFLAGS' : 'cflags_cc',
'CPPDEFINES' : 'defines',
'CPPPATH' : 'include_dirs',
# Add the ldflags value to $LINKFLAGS, but not $SHLINKFLAGS.
# SCons defines $SHLINKFLAGS to incorporate $LINKFLAGS, so
# listing both here would case 'ldflags' to get appended to
# both, and then have it show up twice on the command line.
'LINKFLAGS' : 'ldflags',
}
postamble='\n%s],\n' % indent
for scons_var in sorted(var_mapping.keys()):
gyp_var = var_mapping[scons_var]
value = config.get(gyp_var)
if value:
if gyp_var in ('defines',):
value = [EscapeCppDefine(v) for v in value]
if gyp_var in ('include_dirs',):
if src_dir and not src_dir.endswith('/'):
src_dir += '/'
result = []
for v in value:
v = FixPath(v, src_dir)
# Force SCons to evaluate the CPPPATH directories at
# SConscript-read time, so delayed evaluation of $SRC_DIR
# doesn't point it to the --generator-output= directory.
result.append('env.Dir(%r)' % v)
value = result
else:
value = map(repr, value)
WriteList(fp,
value,
prefix=indent,
preamble='%s%s = [\n ' % (indent, scons_var),
postamble=postamble)
def GenerateSConscript(output_filename, spec, build_file, build_file_data):
"""
Generates a SConscript file for a specific target.
This generates a SConscript file suitable for building any or all of
the target's configurations.
A SConscript file may be called multiple times to generate targets for
multiple configurations. Consequently, it needs to be ready to build
the target for any requested configuration, and therefore contains
information about the settings for all configurations (generated into
the SConscript file at gyp configuration time) as well as logic for
selecting (at SCons build time) the specific configuration being built.
The general outline of a generated SConscript file is:
-- Header
-- Import 'env'. This contains a $CONFIG_NAME construction
variable that specifies what configuration to build
(e.g. Debug, Release).
-- Configurations. This is a dictionary with settings for
the different configurations (Debug, Release) under which this
target can be built. The values in the dictionary are themselves
dictionaries specifying what construction variables should added
to the local copy of the imported construction environment
(Append), should be removed (FilterOut), and should outright
replace the imported values (Replace).
-- Clone the imported construction environment and update
with the proper configuration settings.
-- Initialize the lists of the targets' input files and prerequisites.
-- Target-specific actions and rules. These come after the
input file and prerequisite initializations because the
outputs of the actions and rules may affect the input file
list (process_outputs_as_sources) and get added to the list of
prerequisites (so that they're guaranteed to be executed before
building the target).
-- Call the Builder for the target itself.
-- Arrange for any copies to be made into installation directories.
-- Set up the {name} Alias (phony Node) for the target as the
primary handle for building all of the target's pieces.
-- Use env.Require() to make sure the prerequisites (explicitly
specified, but also including the actions and rules) are built
before the target itself.
-- Return the {name} Alias to the calling SConstruct file
so it can be added to the list of default targets.
"""
scons_target = SCons.Target(spec)
gyp_dir = os.path.dirname(output_filename)
if not gyp_dir:
gyp_dir = '.'
gyp_dir = os.path.abspath(gyp_dir)
output_dir = os.path.dirname(output_filename)
src_dir = build_file_data['_DEPTH']
src_dir_rel = gyp.common.RelativePath(src_dir, output_dir)
subdir = gyp.common.RelativePath(os.path.dirname(build_file), src_dir)
src_subdir = '$SRC_DIR/' + subdir
src_subdir_ = src_subdir + '/'
component_name = os.path.splitext(os.path.basename(build_file))[0]
target_name = spec['target_name']
if not os.path.exists(gyp_dir):
os.makedirs(gyp_dir)
fp = open(output_filename, 'w')
fp.write(header)
fp.write('\nimport os\n')
fp.write('\nImport("env")\n')
#
fp.write('\n')
fp.write('env = env.Clone(COMPONENT_NAME=%s,\n' % repr(component_name))
fp.write(' TARGET_NAME=%s)\n' % repr(target_name))
#
for config in spec['configurations'].itervalues():
if config.get('scons_line_length'):
fp.write(_spawn_hack)
break
#
indent = ' ' * 12
fp.write('\n')
fp.write('configurations = {\n')
for config_name, config in spec['configurations'].iteritems():
fp.write(' \'%s\' : {\n' % config_name)
fp.write(' \'Append\' : dict(\n')
GenerateConfig(fp, config, indent, src_subdir)
libraries = spec.get('libraries')
if libraries:
WriteList(fp,
map(repr, libraries),
prefix=indent,
preamble='%sLIBS = [\n ' % indent,
postamble='\n%s],\n' % indent)
fp.write(' ),\n')
fp.write(' \'FilterOut\' : dict(\n' )
for key, var in config.get('scons_remove', {}).iteritems():
fp.write(' %s = %s,\n' % (key, repr(var)))
fp.write(' ),\n')
fp.write(' \'Replace\' : dict(\n' )
scons_settings = config.get('scons_variable_settings', {})
for key in sorted(scons_settings.keys()):
val = pprint.pformat(scons_settings[key])
fp.write(' %s = %s,\n' % (key, val))
if 'c++' in spec.get('link_languages', []):
fp.write(' %s = %s,\n' % ('LINK', repr('$CXX')))
if config.get('scons_line_length'):
fp.write(' SPAWN = gyp_spawn,\n')
fp.write(' ),\n')
fp.write(' \'ImportExternal\' : [\n' )
for var in config.get('scons_import_variables', []):
fp.write(' %s,\n' % repr(var))
fp.write(' ],\n')
fp.write(' \'PropagateExternal\' : [\n' )
for var in config.get('scons_propagate_variables', []):
fp.write(' %s,\n' % repr(var))
fp.write(' ],\n')
fp.write(' },\n')
fp.write('}\n')
fp.write('\n'
'config = configurations[env[\'CONFIG_NAME\']]\n'
'env.Append(**config[\'Append\'])\n'
'env.FilterOut(**config[\'FilterOut\'])\n'
'env.Replace(**config[\'Replace\'])\n')
fp.write('\n'
'# Scons forces -fPIC for SHCCFLAGS on some platforms.\n'
'# Disable that so we can control it from cflags in gyp.\n'
'# Note that Scons itself is inconsistent with its -fPIC\n'
'# setting. SHCCFLAGS forces -fPIC, and SHCFLAGS does not.\n'
'# This will make SHCCFLAGS consistent with SHCFLAGS.\n'
'env[\'SHCCFLAGS\'] = [\'$CCFLAGS\']\n')
fp.write('\n'
'for _var in config[\'ImportExternal\']:\n'
' if _var in ARGUMENTS:\n'
' env[_var] = ARGUMENTS[_var]\n'
' elif _var in os.environ:\n'
' env[_var] = os.environ[_var]\n'
'for _var in config[\'PropagateExternal\']:\n'
' if _var in ARGUMENTS:\n'
' env[_var] = ARGUMENTS[_var]\n'
' elif _var in os.environ:\n'
' env[\'ENV\'][_var] = os.environ[_var]\n')
fp.write('\n'
"env['ENV']['LD_LIBRARY_PATH'] = env.subst('$LIB_DIR')\n")
#
#fp.write("\nif env.has_key('CPPPATH'):\n")
#fp.write(" env['CPPPATH'] = map(env.Dir, env['CPPPATH'])\n")
variants = spec.get('variants', {})
for setting in sorted(variants.keys()):
if_fmt = 'if ARGUMENTS.get(%s) not in (None, \'0\'):\n'
fp.write('\n')
fp.write(if_fmt % repr(setting.upper()))
fp.write(' env.AppendUnique(\n')
GenerateConfig(fp, variants[setting], indent, src_subdir)
fp.write(' )\n')
#
scons_target.write_input_files(fp)
fp.write('\n')
fp.write('target_files = []\n')
prerequisites = spec.get('scons_prerequisites', [])
fp.write('prerequisites = %s\n' % pprint.pformat(prerequisites))
actions = spec.get('actions', [])
for action in actions:
a = ['cd', src_subdir, '&&'] + action['action']
message = action.get('message')
if message:
message = repr(message)
inputs = [FixPath(f, src_subdir_) for f in action.get('inputs', [])]
outputs = [FixPath(f, src_subdir_) for f in action.get('outputs', [])]
if outputs:
template = _command_template
else:
template = _alias_template
fp.write(template % {
'inputs' : pprint.pformat(inputs),
'outputs' : pprint.pformat(outputs),
'action' : pprint.pformat(a),
'message' : message,
'target_name': target_name,
})
if int(action.get('process_outputs_as_sources', 0)):
fp.write('input_files.extend(_outputs)\n')
fp.write('prerequisites.extend(_outputs)\n')
fp.write('target_files.extend(_outputs)\n')
rules = spec.get('rules', [])
for rule in rules:
name = rule['rule_name']
a = ['cd', src_subdir, '&&'] + rule['action']
message = rule.get('message')
if message:
message = repr(message)
if int(rule.get('process_outputs_as_sources', 0)):
poas_line = '_processed_input_files.extend(_generated)'
else:
poas_line = '_processed_input_files.append(infile)'
inputs = [FixPath(f, src_subdir_) for f in rule.get('inputs', [])]
outputs = [FixPath(f, src_subdir_) for f in rule.get('outputs', [])]
fp.write(_rule_template % {
'inputs' : pprint.pformat(inputs),
'outputs' : pprint.pformat(outputs),
'action' : pprint.pformat(a),
'extension' : rule['extension'],
'name' : name,
'message' : message,
'process_outputs_as_sources_line' : poas_line,
'src_dir' : src_subdir_,
})
scons_target.write_target(fp, src_subdir)
copies = spec.get('copies', [])
if copies:
fp.write(_copy_action_template)
for copy in copies:
destdir = None
files = None
try:
destdir = copy['destination']
except KeyError, e:
gyp.common.ExceptionAppend(
e,
"Required 'destination' key missing for 'copies' in %s." % build_file)
raise
try:
files = copy['files']
except KeyError, e:
gyp.common.ExceptionAppend(
e, "Required 'files' key missing for 'copies' in %s." % build_file)
raise
if not files:
# TODO: should probably add a (suppressible) warning;
# a null file list may be unintentional.
continue
if not destdir:
raise Exception(
"Required 'destination' key is empty for 'copies' in %s." % build_file)
fmt = ('\n'
'_outputs = env.Command(%s,\n'
' %s,\n'
' GYPCopy(\'$TARGET\', \'$SOURCE\'))\n')
for f in copy['files']:
# Remove trailing separators so basename() acts like Unix basename and
# always returns the last element, whether a file or dir. Without this,
# only the contents, not the directory itself, are copied (and nothing
# might be copied if dest already exists, since scons thinks nothing needs
# to be done).
dest = os.path.join(destdir, os.path.basename(f.rstrip(os.sep)))
f = FixPath(f, src_subdir_)
dest = FixPath(dest, src_subdir_)
fp.write(fmt % (repr(dest), repr(f)))
fp.write('target_files.extend(_outputs)\n')
run_as = spec.get('run_as')
if run_as:
action = run_as.get('action', [])
working_directory = run_as.get('working_directory')
if not working_directory:
working_directory = gyp_dir
else:
if not os.path.isabs(working_directory):
working_directory = os.path.normpath(os.path.join(gyp_dir,
working_directory))
if run_as.get('environment'):
for (key, val) in run_as.get('environment').iteritems():
action = ['%s="%s"' % (key, val)] + action
action = ['cd', '"%s"' % working_directory, '&&'] + action
fp.write(_run_as_template % {
'action' : pprint.pformat(action),
'message' : run_as.get('message', ''),
})
fmt = "\ngyp_target = env.Alias('%s', target_files)\n"
fp.write(fmt % target_name)
dependencies = spec.get('scons_dependencies', [])
if dependencies:
WriteList(fp, dependencies, preamble='dependencies = [\n ',
postamble='\n]\n')
fp.write('env.Requires(target_files, dependencies)\n')
fp.write('env.Requires(gyp_target, dependencies)\n')
fp.write('for prerequisite in prerequisites:\n')
fp.write(' env.Requires(prerequisite, dependencies)\n')
fp.write('env.Requires(gyp_target, prerequisites)\n')
if run_as:
fp.write(_run_as_template_suffix % {
'target_name': target_name,
})
fp.write('Return("gyp_target")\n')
fp.close()
#############################################################################
# TEMPLATE BEGIN
_wrapper_template = """\
__doc__ = '''
Wrapper configuration for building this entire "solution,"
including all the specific targets in various *.scons files.
'''
import os
import sys
import SCons.Environment
import SCons.Util
def GetProcessorCount():
'''
Detects the number of CPUs on the system. Adapted form:
http://codeliberates.blogspot.com/2008/05/detecting-cpuscores-in-python.html
'''
# Linux, Unix and Mac OS X:
if hasattr(os, 'sysconf'):
if os.sysconf_names.has_key('SC_NPROCESSORS_ONLN'):
# Linux and Unix or Mac OS X with python >= 2.5:
return os.sysconf('SC_NPROCESSORS_ONLN')
else: # Mac OS X with Python < 2.5:
return int(os.popen2("sysctl -n hw.ncpu")[1].read())
# Windows:
if os.environ.has_key('NUMBER_OF_PROCESSORS'):
return max(int(os.environ.get('NUMBER_OF_PROCESSORS', '1')), 1)
return 1 # Default
# Support PROGRESS= to show progress in different ways.
p = ARGUMENTS.get('PROGRESS')
if p == 'spinner':
Progress(['/\\r', '|\\r', '\\\\\\r', '-\\r'],
interval=5,
file=open('/dev/tty', 'w'))
elif p == 'name':
Progress('$TARGET\\r', overwrite=True, file=open('/dev/tty', 'w'))
# Set the default -j value based on the number of processors.
SetOption('num_jobs', GetProcessorCount() + 1)
# Have SCons use its cached dependency information.
SetOption('implicit_cache', 1)
# Only re-calculate MD5 checksums if a timestamp has changed.
Decider('MD5-timestamp')
# Since we set the -j value by default, suppress SCons warnings about being
# unable to support parallel build on versions of Python with no threading.
default_warnings = ['no-no-parallel-support']
SetOption('warn', default_warnings + GetOption('warn'))
AddOption('--mode', nargs=1, dest='conf_list', default=[],
action='append', help='Configuration to build.')
AddOption('--verbose', dest='verbose', default=False,
action='store_true', help='Verbose command-line output.')
#
sconscript_file_map = %(sconscript_files)s
class LoadTarget:
'''
Class for deciding if a given target sconscript is to be included
based on a list of included target names, optionally prefixed with '-'
to exclude a target name.
'''
def __init__(self, load):
'''
Initialize a class with a list of names for possible loading.
Arguments:
load: list of elements in the LOAD= specification
'''
self.included = set([c for c in load if not c.startswith('-')])
self.excluded = set([c[1:] for c in load if c.startswith('-')])
if not self.included:
self.included = set(['all'])
def __call__(self, target):
'''
Returns True if the specified target's sconscript file should be
loaded, based on the initialized included and excluded lists.
'''
return (target in self.included or
('all' in self.included and not target in self.excluded))
if 'LOAD' in ARGUMENTS:
load = ARGUMENTS['LOAD'].split(',')
else:
load = []
load_target = LoadTarget(load)
sconscript_files = []
for target, sconscript in sconscript_file_map.iteritems():
if load_target(target):
sconscript_files.append(sconscript)
target_alias_list= []
conf_list = GetOption('conf_list')
if conf_list:
# In case the same --mode= value was specified multiple times.
conf_list = list(set(conf_list))
else:
conf_list = [%(default_configuration)r]
sconsbuild_dir = Dir(%(sconsbuild_dir)s)
def FilterOut(self, **kw):
kw = SCons.Environment.copy_non_reserved_keywords(kw)
for key, val in kw.items():
envval = self.get(key, None)
if envval is None:
# No existing variable in the environment, so nothing to delete.
continue
for vremove in val:
# Use while not if, so we can handle duplicates.
while vremove in envval:
envval.remove(vremove)
self[key] = envval
# TODO(sgk): SCons.Environment.Append() has much more logic to deal
# with various types of values. We should handle all those cases in here
# too. (If variable is a dict, etc.)
non_compilable_suffixes = {
'LINUX' : set([
'.bdic',
'.css',
'.dat',
'.fragment',
'.gperf',
'.h',
'.hh',
'.hpp',
'.html',
'.hxx',
'.idl',
'.in',
'.in0',
'.in1',
'.js',
'.mk',
'.rc',
'.sigs',
'',
]),
'WINDOWS' : set([
'.h',
'.hh',
'.hpp',
'.dat',
'.idl',
'.in',
'.in0',
'.in1',
]),
}
def compilable(env, file):
base, ext = os.path.splitext(str(file))
if ext in non_compilable_suffixes[env['TARGET_PLATFORM']]:
return False
return True
def compilable_files(env, sources):
return [x for x in sources if compilable(env, x)]
def GypProgram(env, target, source, *args, **kw):
source = compilable_files(env, source)
result = env.Program(target, source, *args, **kw)
if env.get('INCREMENTAL'):
env.Precious(result)
return result
def GypTestProgram(env, target, source, *args, **kw):
source = compilable_files(env, source)
result = env.Program(target, source, *args, **kw)
if env.get('INCREMENTAL'):
env.Precious(*result)
return result
def GypLibrary(env, target, source, *args, **kw):
source = compilable_files(env, source)
result = env.Library(target, source, *args, **kw)
return result
def GypLoadableModule(env, target, source, *args, **kw):
source = compilable_files(env, source)
result = env.LoadableModule(target, source, *args, **kw)
return result
def GypStaticLibrary(env, target, source, *args, **kw):
source = compilable_files(env, source)
result = env.StaticLibrary(target, source, *args, **kw)
return result
def GypSharedLibrary(env, target, source, *args, **kw):
source = compilable_files(env, source)
result = env.SharedLibrary(target, source, *args, **kw)
if env.get('INCREMENTAL'):
env.Precious(result)
return result
def add_gyp_methods(env):
env.AddMethod(GypProgram)
env.AddMethod(GypTestProgram)
env.AddMethod(GypLibrary)
env.AddMethod(GypLoadableModule)
env.AddMethod(GypStaticLibrary)
env.AddMethod(GypSharedLibrary)
env.AddMethod(FilterOut)
env.AddMethod(compilable)
base_env = Environment(
tools = %(scons_tools)s,
INTERMEDIATE_DIR='$OBJ_DIR/${COMPONENT_NAME}/_${TARGET_NAME}_intermediate',
LIB_DIR='$TOP_BUILDDIR/lib',
OBJ_DIR='$TOP_BUILDDIR/obj',
SCONSBUILD_DIR=sconsbuild_dir.abspath,
SHARED_INTERMEDIATE_DIR='$OBJ_DIR/_global_intermediate',
SRC_DIR=Dir(%(src_dir)r),
TARGET_PLATFORM='LINUX',
TOP_BUILDDIR='$SCONSBUILD_DIR/$CONFIG_NAME',
LIBPATH=['$LIB_DIR'],
)
if not GetOption('verbose'):
base_env.SetDefault(
ARCOMSTR='Creating library $TARGET',
ASCOMSTR='Assembling $TARGET',
CCCOMSTR='Compiling $TARGET',
CONCATSOURCECOMSTR='ConcatSource $TARGET',
CXXCOMSTR='Compiling $TARGET',
LDMODULECOMSTR='Building loadable module $TARGET',
LINKCOMSTR='Linking $TARGET',
MANIFESTCOMSTR='Updating manifest for $TARGET',
MIDLCOMSTR='Compiling IDL $TARGET',
PCHCOMSTR='Precompiling $TARGET',
RANLIBCOMSTR='Indexing $TARGET',
RCCOMSTR='Compiling resource $TARGET',
SHCCCOMSTR='Compiling $TARGET',
SHCXXCOMSTR='Compiling $TARGET',
SHLINKCOMSTR='Linking $TARGET',
SHMANIFESTCOMSTR='Updating manifest for $TARGET',
)
add_gyp_methods(base_env)
for conf in conf_list:
env = base_env.Clone(CONFIG_NAME=conf)
SConsignFile(env.File('$TOP_BUILDDIR/.sconsign').abspath)
for sconscript in sconscript_files:
target_alias = env.SConscript(sconscript, exports=['env'])
if target_alias:
target_alias_list.extend(target_alias)
Default(Alias('all', target_alias_list))
help_fmt = '''
Usage: hammer [SCONS_OPTIONS] [VARIABLES] [TARGET] ...
Local command-line build options:
--mode=CONFIG Configuration to build:
--mode=Debug [default]
--mode=Release
--verbose Print actual executed command lines.
Supported command-line build variables:
LOAD=[module,...] Comma-separated list of components to load in the
dependency graph ('-' prefix excludes)
PROGRESS=type Display a progress indicator:
name: print each evaluated target name
spinner: print a spinner every 5 targets
The following TARGET names can also be used as LOAD= module names:
%%s
'''
if GetOption('help'):
def columnar_text(items, width=78, indent=2, sep=2):
result = []
colwidth = max(map(len, items)) + sep
cols = (width - indent) / colwidth
if cols < 1:
cols = 1
rows = (len(items) + cols - 1) / cols
indent = '%%*s' %% (indent, '')
sep = indent
for row in xrange(0, rows):
result.append(sep)
for i in xrange(row, len(items), rows):
result.append('%%-*s' %% (colwidth, items[i]))
sep = '\\n' + indent
result.append('\\n')
return ''.join(result)
load_list = set(sconscript_file_map.keys())
target_aliases = set(map(str, target_alias_list))
common = load_list and target_aliases
load_only = load_list - common
target_only = target_aliases - common
help_text = [help_fmt %% columnar_text(sorted(list(common)))]
if target_only:
fmt = "The following are additional TARGET names:\\n\\n%%s\\n"
help_text.append(fmt %% columnar_text(sorted(list(target_only))))
if load_only:
fmt = "The following are additional LOAD= module names:\\n\\n%%s\\n"
help_text.append(fmt %% columnar_text(sorted(list(load_only))))
Help(''.join(help_text))
"""
# TEMPLATE END
#############################################################################
def GenerateSConscriptWrapper(build_file, build_file_data, name,
output_filename, sconscript_files,
default_configuration):
"""
Generates the "wrapper" SConscript file (analogous to the Visual Studio
solution) that calls all the individual target SConscript files.
"""
output_dir = os.path.dirname(output_filename)
src_dir = build_file_data['_DEPTH']
src_dir_rel = gyp.common.RelativePath(src_dir, output_dir)
if not src_dir_rel:
src_dir_rel = '.'
scons_settings = build_file_data.get('scons_settings', {})
sconsbuild_dir = scons_settings.get('sconsbuild_dir', '#')
scons_tools = scons_settings.get('tools', ['default'])
sconscript_file_lines = ['dict(']
for target in sorted(sconscript_files.keys()):
sconscript = sconscript_files[target]
sconscript_file_lines.append(' %s = %r,' % (target, sconscript))
sconscript_file_lines.append(')')
fp = open(output_filename, 'w')
fp.write(header)
fp.write(_wrapper_template % {
'default_configuration' : default_configuration,
'name' : name,
'scons_tools' : repr(scons_tools),
'sconsbuild_dir' : repr(sconsbuild_dir),
'sconscript_files' : '\n'.join(sconscript_file_lines),
'src_dir' : src_dir_rel,
})
fp.close()
# Generate the SConstruct file that invokes the wrapper SConscript.
dir, fname = os.path.split(output_filename)
SConstruct = os.path.join(dir, 'SConstruct')
fp = open(SConstruct, 'w')
fp.write(header)
fp.write('SConscript(%s)\n' % repr(fname))
fp.close()
def TargetFilename(target, build_file=None, output_suffix=''):
"""Returns the .scons file name for the specified target.
"""
if build_file is None:
build_file, target = gyp.common.ParseQualifiedTarget(target)[:2]
output_file = os.path.join(os.path.dirname(build_file),
target + output_suffix + '.scons')
return output_file
def GenerateOutput(target_list, target_dicts, data, params):
"""
Generates all the output files for the specified targets.
"""
options = params['options']
if options.generator_output:
def output_path(filename):
return filename.replace(params['cwd'], options.generator_output)
else:
def output_path(filename):
return filename
default_configuration = None
for qualified_target in target_list:
spec = target_dicts[qualified_target]
if spec['toolset'] != 'target':
raise Exception(
'Multiple toolsets not supported in scons build (target %s)' %
qualified_target)
scons_target = SCons.Target(spec)
if scons_target.is_ignored:
continue
# TODO: assumes the default_configuration of the first target
# non-Default target is the correct default for all targets.
# Need a better model for handle variation between targets.
if (not default_configuration and
spec['default_configuration'] != 'Default'):
default_configuration = spec['default_configuration']
build_file, target = gyp.common.ParseQualifiedTarget(qualified_target)[:2]
output_file = TargetFilename(target, build_file, options.suffix)
if options.generator_output:
output_file = output_path(output_file)
if not spec.has_key('libraries'):
spec['libraries'] = []
# Add dependent static library targets to the 'libraries' value.
deps = spec.get('dependencies', [])
spec['scons_dependencies'] = []
for d in deps:
td = target_dicts[d]
target_name = td['target_name']
spec['scons_dependencies'].append("Alias('%s')" % target_name)
if td['type'] in ('static_library', 'shared_library'):
libname = td.get('product_name', target_name)
spec['libraries'].append('lib' + libname)
if td['type'] == 'loadable_module':
prereqs = spec.get('scons_prerequisites', [])
# TODO: parameterize with <(SHARED_LIBRARY_*) variables?
td_target = SCons.Target(td)
td_target.target_prefix = '${SHLIBPREFIX}'
td_target.target_suffix = '${SHLIBSUFFIX}'
GenerateSConscript(output_file, spec, build_file, data[build_file])
if not default_configuration:
default_configuration = 'Default'
for build_file in sorted(data.keys()):
path, ext = os.path.splitext(build_file)
if ext != '.gyp':
continue
output_dir, basename = os.path.split(path)
output_filename = path + '_main' + options.suffix + '.scons'
all_targets = gyp.common.AllTargets(target_list, target_dicts, build_file)
sconscript_files = {}
for t in all_targets:
scons_target = SCons.Target(target_dicts[t])
if scons_target.is_ignored:
continue
bf, target = gyp.common.ParseQualifiedTarget(t)[:2]
target_filename = TargetFilename(target, bf, options.suffix)
tpath = gyp.common.RelativePath(target_filename, output_dir)
sconscript_files[target] = tpath
output_filename = output_path(output_filename)
if sconscript_files:
GenerateSConscriptWrapper(build_file, data[build_file], basename,
output_filename, sconscript_files,
default_configuration)
| |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'UserProfile.college'
db.add_column(u'catalog_userprofile', 'college',
self.gf('django.db.models.fields.TextField')(null=True, blank=True),
keep_default=False)
# Adding field 'UserProfile.patent'
db.add_column(u'catalog_userprofile', 'patent',
self.gf('django.db.models.fields.TextField')(null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'UserProfile.college'
db.delete_column(u'catalog_userprofile', 'college')
# Deleting field 'UserProfile.patent'
db.delete_column(u'catalog_userprofile', 'patent')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'catalog.cfistoreitem': {
'Meta': {'object_name': 'CfiStoreItem'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'item': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.Product']", 'unique': 'True'}),
'likers': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'cfi_store_item_likes'", 'symmetrical': 'False', 'through': "orm['catalog.LikeCfiStoreItem']", 'to': u"orm['auth.User']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'catalog.comment': {
'Meta': {'object_name': 'Comment'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'body': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'likes_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.documentation': {
'Meta': {'object_name': 'Documentation'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '1000'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'catalog.emailcollect': {
'Meta': {'object_name': 'EmailCollect'},
'email': ('django.db.models.fields.EmailField', [], {'max_length': '30'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'catalog.image': {
'Meta': {'object_name': 'Image'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'comments': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['catalog.Comment']", 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'large_url': ('django.db.models.fields.URLField', [], {'max_length': '1000'}),
'likes_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'small_url': ('django.db.models.fields.URLField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'images'", 'null': 'True', 'to': u"orm['auth.User']"})
},
'catalog.like': {
'Meta': {'object_name': 'Like'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.likecfistoreitem': {
'Meta': {'unique_together': "(('user', 'cfi_store_item'),)", 'object_name': 'LikeCfiStoreItem'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'cfi_store_item': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.CfiStoreItem']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.likecomment': {
'Meta': {'unique_together': "(('user', 'comment'),)", 'object_name': 'LikeComment'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'comment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Comment']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.likeimage': {
'Meta': {'unique_together': "(('user', 'image'),)", 'object_name': 'LikeImage'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Image']"}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.likemakey': {
'Meta': {'unique_together': "(('user', 'makey'),)", 'object_name': 'LikeMakey'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'makey': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'makeylikes'", 'to': "orm['catalog.Makey']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.likenote': {
'Meta': {'unique_together': "(('user', 'note'),)", 'object_name': 'LikeNote'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'note': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Note']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.likeproduct': {
'Meta': {'unique_together': "(('user', 'product'),)", 'object_name': 'LikeProduct'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Product']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.likeproductdescription': {
'Meta': {'unique_together': "(('user', 'product_description'),)", 'object_name': 'LikeProductDescription'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'product_description': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.ProductDescription']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.likeproductimage': {
'Meta': {'unique_together': "(('user', 'image'),)", 'object_name': 'LikeProductImage'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.ProductImage']"}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Product']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.likeproducttutorial': {
'Meta': {'unique_together': "(('user', 'tutorial', 'product'),)", 'object_name': 'LikeProductTutorial'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Product']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'tutorial': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Tutorial']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.likeshop': {
'Meta': {'unique_together': "(('user', 'shop'),)", 'object_name': 'LikeShop'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Shop']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.likevideo': {
'Meta': {'unique_together': "(('user', 'video'),)", 'object_name': 'LikeVideo'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'video': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Video']"})
},
'catalog.list': {
'Meta': {'object_name': 'List'},
'access': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'access'", 'symmetrical': 'False', 'to': u"orm['auth.User']"}),
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_private': ('django.db.models.fields.BooleanField', [], {}),
'items': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalog.ListItem']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'owner'", 'to': u"orm['auth.User']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'catalog.listgroup': {
'Meta': {'object_name': 'ListGroup'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'lists': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalog.List']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'catalog.listitem': {
'Meta': {'object_name': 'ListItem'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'createdby': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'note': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Product']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'catalog.location': {
'Meta': {'object_name': 'Location'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'catalog.logidenticalproduct': {
'Meta': {'object_name': 'LogIdenticalProduct'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product1': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'product1'", 'to': "orm['catalog.Product']"}),
'product2': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'product2'", 'to': "orm['catalog.Product']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.makey': {
'Meta': {'object_name': 'Makey'},
'about': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'collaborators': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'collaborators'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['auth.User']"}),
'comments': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'makeycomments'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.Comment']"}),
'cover_pic': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Image']", 'null': 'True', 'blank': 'True'}),
'credits': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'documentations': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'makeydocumentations'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.Documentation']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'images': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'makeyimages'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.Image']"}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'mentors': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'new_parts': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'makeys_parts'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.NewProduct']"}),
'new_tools': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'makeys_tools'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.NewProduct']"}),
'new_users': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'makeys'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.NewUser']"}),
'notes': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'makeynotes'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.Note']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'videos': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'makeyvideos'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.Video']"}),
'votes': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'why': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'catalog.makeyimage': {
'Meta': {'object_name': 'MakeyImage'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'makey_id': ('django.db.models.fields.IntegerField', [], {}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'catalog.newproduct': {
'Meta': {'object_name': 'NewProduct'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Image']", 'null': 'True', 'blank': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'catalog.newuser': {
'Meta': {'object_name': 'NewUser'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'catalog.note': {
'Meta': {'object_name': 'Note'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'body': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'comments': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['catalog.Comment']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Image']", 'null': 'True', 'blank': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'likes_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '140', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.product': {
'Meta': {'object_name': 'Product'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identicalto': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Product']", 'null': 'True', 'blank': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'likers': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'product_likes'", 'symmetrical': 'False', 'through': "orm['catalog.LikeProduct']", 'to': u"orm['auth.User']"}),
'makeys': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'partsused'", 'blank': 'True', 'to': "orm['catalog.Makey']"}),
'makeys_as_tools': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'tools_used'", 'blank': 'True', 'to': "orm['catalog.Makey']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'sku': ('django.db.models.fields.IntegerField', [], {}),
'tutorials': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'products'", 'blank': 'True', 'to': "orm['catalog.Tutorial']"})
},
'catalog.productdescription': {
'Meta': {'object_name': 'ProductDescription'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '100000'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'productdescriptions'", 'to': "orm['catalog.Product']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Shop']", 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'blank': 'True'}),
'user_or_shop': ('django.db.models.fields.BooleanField', [], {})
},
'catalog.productimage': {
'Meta': {'object_name': 'ProductImage'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'productimages'", 'to': "orm['catalog.Product']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Shop']", 'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'catalog.productreview': {
'Meta': {'object_name': 'ProductReview'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'product_reviews'", 'to': "orm['catalog.Product']"}),
'rating': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'review': ('django.db.models.fields.CharField', [], {'max_length': '100000'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'votes': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'catalog.productshopurl': {
'Meta': {'object_name': 'ProductShopUrl'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'productshopurls'", 'to': "orm['catalog.Product']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Shop']"}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
'catalog.searchlog': {
'Meta': {'object_name': 'SearchLog'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'term': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'time': ('django.db.models.fields.DateTimeField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'catalog.shop': {
'Meta': {'object_name': 'Shop'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'images': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'shopimages'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.Image']"}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'likes': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'shop_likes'", 'symmetrical': 'False', 'through': "orm['catalog.LikeShop']", 'to': u"orm['auth.User']"}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Location']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
'catalog.shopreview': {
'Meta': {'object_name': 'ShopReview'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'rating': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'review': ('django.db.models.fields.CharField', [], {'max_length': '100000'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'shop_reviews'", 'to': "orm['catalog.Shop']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'votes': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'catalog.toindexstore': {
'Meta': {'object_name': 'ToIndexStore'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Location']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
'catalog.topmakeys': {
'Meta': {'object_name': 'TopMakeys'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'makey': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Makey']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'catalog.topproducts': {
'Meta': {'object_name': 'TopProducts'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Product']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'catalog.topshops': {
'Meta': {'object_name': 'TopShops'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Shop']"})
},
'catalog.toptutorials': {
'Meta': {'object_name': 'TopTutorials'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'tutorial': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Tutorial']"})
},
'catalog.topusers': {
'Meta': {'object_name': 'TopUsers'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.tutorial': {
'Meta': {'object_name': 'Tutorial'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'images': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'tutorialimages'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.Image']"}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'votes': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'catalog.userflags': {
'Meta': {'object_name': 'UserFlags'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'show_maker_intro': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'show_makey_intro': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.userinteraction': {
'Meta': {'object_name': 'UserInteraction'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'event': ('django.db.models.fields.IntegerField', [], {}),
'event_id': ('django.db.models.fields.IntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'aboutme': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'blog_url': ('django.db.models.fields.URLField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'college': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'facebook_url': ('django.db.models.fields.URLField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'following': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'followers'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.UserProfile']"}),
'github_url': ('django.db.models.fields.URLField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instructables_url': ('django.db.models.fields.URLField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'linkedin_url': ('django.db.models.fields.URLField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'membership': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'patent': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'stackoverflow_url': ('django.db.models.fields.URLField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'twitter_url': ('django.db.models.fields.URLField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'profile'", 'unique': 'True', 'to': u"orm['auth.User']"}),
'website_url': ('django.db.models.fields.URLField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'yt_channel_url': ('django.db.models.fields.URLField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'})
},
'catalog.video': {
'Meta': {'object_name': 'Video'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'comments': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['catalog.Comment']", 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'embed_url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'likes_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'site': ('django.db.models.fields.IntegerField', [], {}),
'thumb_url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'catalog.votemakey': {
'Meta': {'unique_together': "(('user', 'makey'),)", 'object_name': 'VoteMakey'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'makey': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Makey']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'vote': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'catalog.voteproductreview': {
'Meta': {'unique_together': "(('user', 'review'),)", 'object_name': 'VoteProductReview'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'review': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.ProductReview']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'vote': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'catalog.voteshopreview': {
'Meta': {'unique_together': "(('user', 'review'),)", 'object_name': 'VoteShopReview'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'review': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.ShopReview']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'vote': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'catalog.votetutorial': {
'Meta': {'unique_together': "(('user', 'tutorial'),)", 'object_name': 'VoteTutorial'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'tutorial': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Tutorial']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'vote': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['catalog']
| |
import functools
import logging
import mimetypes
import os
import datetime
import re
from datetime import timedelta
from django.conf import settings
from django.contrib.sessions.middleware import SessionMiddleware
from django.core.exceptions import MiddlewareNotUsed
from django.http import HttpResponseRedirect
from django.urls import reverse
from django.contrib.auth.views import LogoutView
from django.utils.deprecation import MiddlewareMixin
from sentry_sdk import add_breadcrumb
from corehq.apps.domain.models import Domain
from corehq.apps.domain.utils import legacy_domain_re
from corehq.const import OPENROSA_DEFAULT_VERSION
from corehq.util.timer import DURATION_REPORTING_THRESHOLD
from dimagi.utils.logging import notify_exception
from dimagi.utils.modules import to_function
from dimagi.utils.parsing import json_format_datetime, string_to_utc_datetime
try:
import psutil
except ImportError:
psutil = None
# this isn't OR specific, but we like it to be included
OPENROSA_ACCEPT_LANGUAGE = "HTTP_ACCEPT_LANGUAGE"
OPENROSA_VERSION_HEADER = "HTTP_X_OPENROSA_VERSION"
OPENROSA_DATE_HEADER = "HTTP_DATE"
OPENROSA_HEADERS = [OPENROSA_VERSION_HEADER, OPENROSA_DATE_HEADER, OPENROSA_ACCEPT_LANGUAGE]
class OpenRosaMiddleware(MiddlewareMixin):
"""
Middleware to support OpenRosa request/response standards compliance
https://bitbucket.org/javarosa/javarosa/wiki/OpenRosaRequest
"""
def process_request(self, request):
# if there's a date header specified add that to the request
# as a first class property
or_headers = {}
for header in OPENROSA_HEADERS:
if header in request.META:
or_headers[header] = request.META[header]
request.openrosa_headers = or_headers
def process_response(self, request, response):
response[OPENROSA_VERSION_HEADER] = OPENROSA_DEFAULT_VERSION
return response
profile_logger = logging.getLogger('profile_middleware')
class MemoryUsageMiddleware(object):
"""
Stolen and modified from http://stackoverflow.com/a/12254394/8207
This is a pretty poor, blunt tool and is not recommended to be treated as definitive truth.
"""
_psutil_installed = None
def _check_psutil(self):
if self._psutil_installed is None:
if psutil is None:
profile_logger.warning('Install dev-requirements (psutil) in order to use MemoryUsageMiddleware')
self._psutil_installed = False
else:
self._psutil_installed = True
return self._psutil_installed
def process_request(self, request):
if self._check_psutil():
request._profile_memory = psutil.Process(os.getpid()).get_memory_info()
def process_response(self, request, response):
if self._check_psutil() and hasattr(request, '_profile_memory'):
mem = psutil.Process(os.getpid()).get_memory_info()
diff = (mem.rss - request._profile_memory.rss) // 1024
profile_logger.info('{} memory usage {} KB'.format(request.path, diff))
return response
class TimingMiddleware(object):
def process_request(self, request):
request._profile_starttime = datetime.datetime.utcnow()
def process_response(self, request, response):
if hasattr(request, '_profile_starttime'):
duration = datetime.datetime.utcnow() - request._profile_starttime
profile_logger.info('{} time {}'.format(request.path, duration), extra={'duration': duration})
return response
class LogLongRequestMiddleware(MiddlewareMixin):
"""Report requests that violate the timing threshold configured for the view.
Use `corehq.util.timer.set_request_duration_reporting_threshold` to override the
default threshold for specific views.
"""
DEFAULT_THRESHOLD = timedelta(minutes=10).total_seconds() # 10 minutes
def process_request(self, request):
request._profile_starttime = datetime.datetime.utcnow()
def process_view(self, request, view_fn, view_args, view_kwargs):
view_func = get_view_func(view_fn, view_kwargs)
reporting_threshold = getattr(view_func, DURATION_REPORTING_THRESHOLD, self.DEFAULT_THRESHOLD)
setattr(request, DURATION_REPORTING_THRESHOLD, reporting_threshold)
def process_response(self, request, response):
request_timer = getattr(response, 'request_timer', None)
if request_timer:
for sub in request_timer.to_list(exclude_root=True):
add_breadcrumb(
category="timing",
message=f"{sub.name}: {sub.duration:0.3f}",
level="info",
)
if hasattr(request, '_profile_starttime'):
duration = datetime.datetime.utcnow() - request._profile_starttime
threshold = getattr(request, DURATION_REPORTING_THRESHOLD, self.DEFAULT_THRESHOLD)
if duration.total_seconds() > threshold:
notify_exception(request, "Request timing above threshold", details={
'threshold': threshold,
'duration': duration.total_seconds(),
'status_code': response.status_code
})
return response
class TimeoutMiddleware(MiddlewareMixin):
@classmethod
def update_secure_session(cls, session, is_secure, user, domain=None):
session['secure_session'] = is_secure
timeout = cls._get_timeout(session, is_secure, user, domain)
session['secure_session_timeout'] = timeout
session.set_expiry(timeout * 60)
session['session_expiry'] = json_format_datetime(session.get_expiry_date())
@classmethod
def _get_timeout(cls, session, is_secure, user, domain=None):
if not is_secure:
return settings.INACTIVITY_TIMEOUT
domains = cls._get_relevant_domains(user, domain)
timeouts = list(map(Domain.secure_timeout, domains))
timeouts = list(filter(None, timeouts))
# Include timeout in current session, important for users who are not domain members
# (e.g., superusers) who visited a secure domain and are now looking at a non-secure domain
if 'secure_session_timeout' in session:
timeouts.append(session['secure_session_timeout'])
return min(timeouts) if timeouts else settings.SECURE_TIMEOUT
@classmethod
def _get_relevant_domains(cls, couch_user, domain=None):
domains = set()
# Include current domain, which user may not be a member of
if domain:
domains.add(domain)
if not couch_user:
return domains
domains = domains | set(couch_user.get_domains())
from corehq.apps.enterprise.models import EnterprisePermissions
subdomains = set()
for domain in domains:
subdomains = subdomains | set(EnterprisePermissions.get_domains(domain))
return domains | subdomains
@staticmethod
def _session_expired(timeout, activity):
if activity is None:
return False
time = datetime.datetime.utcnow()
time_since_activity = time - string_to_utc_datetime(activity)
return time_since_activity > datetime.timedelta(minutes=timeout)
def process_view(self, request, view_func, view_args, view_kwargs):
if not request.user.is_authenticated:
return
secure_session = request.session.get('secure_session')
domain = getattr(request, "domain", None)
domain_obj = Domain.get_by_name(domain) if domain else None
# figure out if we want to switch to secure_sessions
change_to_secure_session = (
not secure_session
and (
(domain_obj and domain_obj.secure_sessions)
or any(filter(Domain.is_secure_session_required,
self._get_relevant_domains(request.couch_user, domain)))
)
)
secure_session = secure_session or change_to_secure_session
timeout = self._get_timeout(request.session, secure_session, request.couch_user, domain)
if change_to_secure_session:
# force re-authentication if the user has been logged in longer than the secure timeout
if self._session_expired(timeout, request.user.last_login):
LogoutView.as_view(template_name=settings.BASE_TEMPLATE)(request)
# this must be after logout so it is attached to the new session
self.update_secure_session(request.session, True, request.couch_user, domain)
return HttpResponseRedirect(reverse('login') + '?next=' + request.path)
self.update_secure_session(request.session, True, request.couch_user, domain)
if not getattr(request, '_bypass_sessions', False):
self.update_secure_session(request.session, secure_session, request.couch_user, domain)
def always_allow_browser_caching(fn):
@functools.wraps(fn)
def inner(*args, **kwargs):
response = fn(*args, **kwargs)
response._always_allow_browser_caching = True
return response
return inner
class NoCacheMiddleware(MiddlewareMixin):
def process_response(self, request, response):
if not self._explicitly_marked_safe(response):
response['Cache-Control'] = "private, no-cache, no-store, must-revalidate, proxy-revalidate"
response['Expires'] = "Thu, 01 Dec 1994 16:00:00 GMT"
response['Pragma'] = "no-cache"
else:
max_age = getattr(response, '_cache_max_age', "31536000")
content_type, _ = mimetypes.guess_type(request.path)
response['Cache-Control'] = "max-age={}".format(max_age)
del response['Vary']
del response['Set-Cookie']
response['Content-Type'] = content_type
del response['Content-Language']
response['Content-Length'] = len(response.content)
del response['HTTP_X_OPENROSA_VERSION']
return response
@staticmethod
def _explicitly_marked_safe(response):
return getattr(response, '_always_allow_browser_caching', False)
class SentryContextMiddleware(MiddlewareMixin):
"""Add details to Sentry context.
Should be placed after 'corehq.apps.users.middleware.UsersMiddleware'
"""
def __init__(self, get_response=None):
super(SentryContextMiddleware, self).__init__(get_response)
try:
from sentry_sdk import configure_scope
except ImportError:
raise MiddlewareNotUsed
if not getattr(settings, 'SENTRY_CONFIGURED', None):
raise MiddlewareNotUsed
def process_view(self, request, view_func, view_args, view_kwargs):
from sentry_sdk import configure_scope
with configure_scope() as scope:
if getattr(request, 'couch_user', None):
scope.set_extra('couch_user_id', request.couch_user.get_id)
scope.set_tag('user.username', request.couch_user.username)
if getattr(request, 'domain', None):
scope.set_tag('domain', request.domain)
class SelectiveSessionMiddleware(SessionMiddleware):
def __init__(self, get_response=None):
super().__init__(get_response)
regexes = [
'/favicon.ico$',
'/ping_login/$',
'/downloads/temp/ajax/', # soil polling
'/downloads/temp/heartbeat/', # soil status
'/a/{domain}/apps/view/[A-Za-z0-9-]+/current_version/$' # app manager new changes polling
'/hq/notifications/service/$', # background request for notification (bell menu in top nav)
]
if settings.BYPASS_SESSIONS_FOR_MOBILE:
regexes.extend(getattr(settings, 'SESSION_BYPASS_URLS', []))
self.bypass_re = [
re.compile(regex.format(domain=legacy_domain_re)) for regex in regexes
]
def _bypass_sessions(self, request):
return any(rx.match(request.path_info) for rx in self.bypass_re)
def process_request(self, request):
super().process_request(request)
if self._bypass_sessions(request):
request.session.save = lambda *x: None
request._bypass_sessions = True
def get_view_func(view_fn, view_kwargs):
"""Given a view_fn from the `process_view` middleware function return the actual
function or class that represents the view.
:returns: the view function or class or None if not able to determine the view class
"""
if getattr(view_fn, 'is_hq_report', False): # HQ report
dispatcher = view_fn.view_class
domain = view_kwargs.get("domain", None)
slug = view_kwargs.get("report_slug", None)
try:
class_name = dispatcher.get_report_class_name(domain, slug)
return to_function(class_name) if class_name else None
except:
# custom report dispatchers may do things differently
return
if hasattr(view_fn, "view_class"): # Django view
return view_fn.view_class
return view_fn
| |
# -*- coding: utf-8 -*-
# ----------------------------------------------------------------------------
# Name: test_euvatid
# Purpose: Test driver for module euvatid
#
# Author: Michael Amrhein (michael@adrhinum.de)
#
# Copyright: (c) 2017 Michael Amrhein
# License: This program is part of a larger application. For license
# details please read the file LICENSE.TXT provided together
# with the application.
# ----------------------------------------------------------------------------
# $Source$
# $Revision$
import unittest
from identifiers.euvatid import EUVATId
_VALID_IDS = [
# 'AT': '^U(?P<base>\d{7})(?P<check>\d)$'
'ATU13585627',
'ATU10223006',
# 'BE': '^(?P<base>0[1-9]\d{6}|1\d{7})(?P<check>\d{2})$'
'BE0776091951',
'BE0136695962',
'BE0111113203',
'BE1151113846',
# 'BG': '^(?P<base>\d{8})(?P<check>\d)$'
# '^(?P<base>\d{2}([024][1-9]|[135][012])(0[1-9]|[12]\d|3[01])\d{3})
# (?P<check>\d)$'
# '^(?P<base>\d{9})(?P<check>\d)$'
'BG7523169263',
'BG7501020018',
'BG0542011038',
'BG123456786',
# 'CY': '^(?P<base>[013-59]\d{7})(?P<check>[A-Z])$'
'CY12345678F',
'CY33333333O',
'CY93333333C',
# 'CZ': '^(?P<base>\d{2}([05]\d|[16][0-2])(0[1-9]|[12]\d|3[01])\d{4})
# (?P<check>)$'
# '^(?P<base>([0-4]\d|5[0-3])([05]\d|[16][0-2])(0[1-9]|[12]\d|3[01])
# \d{3})(?P<check>)$'
# '^(?P<base>6\d{7})(?P<check>\d)$'
# '^(?P<base>[0-8]\d{6})(?P<check>\d)$'
'CZ5502080001',
'CZ0052291536',
'CZ6852294449',
'CZ8160080610',
'CZ110101111',
'CZ531124000',
'CZ006031038',
'CZ633333334',
'CZ12345679',
# 'DE': '^(?P<base>\d{8})(?P<check>\d)$'
'DE111111125',
'DE136695976',
# 'DK': '^(?P<base>[1-9]\d{7})(?P<check>)$'
'DK13585628',
'DK88146328',
# 'EE': '^(?P<base>\d{8})(?P<check>\d)$'
'EE123456780',
'EE444444442',
# 'ES': '^(?P<base>[A-H,JVU]\d{7})(?P<check>\d)$'
# '^(?P<base>[NP-SW]\d{7})(?P<check>[A-J])$'
# '^(?P<base>\d{8})(?P<check>[A-Z])$'
# '^(?P<base>[KLMXYZ]\d{7})(?P<check>[A-Z])$'
'ESA12345674',
'ESP1234567D',
'ES12345678Z',
'ESK1234567L',
# 'FI': '^(?P<base>\d{7})(?P<check>\d)$'
'FI12345671',
'FI09853608',
# 'FR': '^(?P<check>\d{2})(?P<base>[1-9]{9})$'
# '^(?P<check>)(?P<base>([A-HJ-NP-Z]\d|\d[A-HJ-NP-Z])[1-9]{9})'
'FR32123456789',
'FR2H123456789',
# 'GB': '^(?P<base>((00|[1-9]\d)\d{7}))(?P<check>)
# (\d\d[1-9]|\d[1-9]\d|[1-9]\d\d|$)$'
# '^GD[0-4]\d{2}'
# '^HA[5-9]\d{2}'
'GB434031494',
'GB434031439',
'GB123456782',
'GB123456727',
'GB123456727872',
'GB001234547',
'GB001234547238',
'GBGD123',
'GBHA629',
# 'GR': '^(?P<base>\d{7,8})(?P<check>\d)$'
'GR12345670',
'GR123456783',
# 'HR': '^(?P<base>\d{10})(?P<check>\d)$'
'HR12345678903',
'HR11111111119',
'HR00000777773',
# 'HU': '^(?P<base>[1-9]\d{6})(?P<check>\d)$'
'HU21376414',
'HU10597190',
'HU12345676',
# 'IE': '^(?P<add>\d)[A-Z+*](?P<base>\d{5})(?P<check>[A-W])'
# '^(?P<base>\d{7})(?P<check>[A-W])$'
# '^(?P<base>\d{7})(?P<check>[A-W])(?P<add>[A-I])'
'IE8Z49289F',
'IE3628739L',
'IE3628739UA',
'IE7A12345J',
'IE1234567T',
# 'IT': '^(?P<base>\d{7}(0\d[1-9]|0[1-9]\d|100|12[01]|888|999))
# (?P<check>\d)$'
'IT00000010215',
'IT12345670017',
'IT12345678887',
# 'LT': '^(?P<base>\d{10}1)(?P<check>\d)$'
# '^(?P<base>\d{7}1)(?P<check>\d)$'
'LT213179412',
'LT123456715',
'LT290061371314',
'LT123456789011',
# 'LU': '^(?P<base>\d{6})(?P<check>\d{2})$'
'LU77777706',
'LU10000356',
'LU12345613',
# 'LV': '^(?P<base>[4-9]\d{9})(?P<check>\d)$'
# '^(?P<base>(0[1-9]|[12]\d|3[01])(0[1-9]|1[0-2])\d{2}[012]\d{4})
# (?P<check>)$'
'LV41234567891',
'LV15066312345',
'LV29020412345',
# 'MT': '^(?P<base>[1-9]\d{5})(?P<check>\d{2})$'
'MT12345634',
'MT10000125',
# 'NL': '^(?P<base>\d{8})(?P<check>\d)B(\d[1-9]|[1-9]\d)$'
'NL123456782B70',
'NL010000446B01',
'NL000000012B34',
# 'PL': '^(?P<base>\d{9})(?P<check>\d)$'
'PL0123456789',
'PL5260001246',
# 'PT': '^(?P<base>[1-9]\d{7})(?P<check>\d)$'
'PT123456789',
'PT502757191',
# 'RO': '^(?P<base>[1-9]\d{8})(?P<check>\d)$'
# '^(?P<base>[1-9]\d{2}(0[1-9]|1[0-2])(0[1-9]|[12]\d|3[01])
# (0[1-9]|[1-3]\d|4[0-7]|5[12])\d{3})(?P<check>\d)$'
'RO1234567897',
'RO1630615123457',
# 'SE': '^(?P<base>\d{9})(?P<check>\d)(0[1-9]|[1-8]\d|9[1-4])'
'SE123456789701',
'SE556188840494',
# 'SI': '^(?P<base>[1-9]\d{6})(?P<check>\d)$'
'SI12345679',
'SI15012557',
# 'SK': '^(?P<base>[1-9]\d[2-47-9]\d{7})(?P<check>)$'
'SK1234567895',
'SK4030000007',
]
_INVALID_IDS = [
# 'AT': '^U(?P<base>\d{7})(?P<check>\d)$'
# invalid format
'ATU1234ABC',
'AT012345678',
# invalid check digit
'ATU13585626',
'ATU10223007',
'ATU12345678',
# 'BE': '^(?P<base>[01][1-9]\d{6})(?P<check>\d{2})$'
# invalid format
'BE3123456789',
'BE012345678X',
'BE0012345678',
# invalid check digit
'BE0776091950',
'BE0136695972',
# 'BG': '^(?P<base>\d{8})(?P<check>\d)$'
# '^(?P<base>\d{2}([024][1-9]|[135][012])(0[1-9]|[12]\d|3[01])\d{3})
# (?P<check>\d)$'
# '^(?P<base>\d{9})(?P<check>\d)$'
# invalid format
'BGK123456789',
'BG12345678',
'BG7542011030',
'BG7502290002',
'BG12345678901',
# invalid check digit
'BG7523169266',
'BG7501020017',
'BG0542011030',
'BG123456789',
# 'CY': '^(?P<base>[013-59]\d{7})(?P<check>[A-Z])$'
# invalid format
'CY0X123456B',
'CY23456789N',
'CY67890123K',
'CY123456789',
# invalid check digit
'CY12345678A',
'CY33333333J',
'CY93333333B',
# 'CZ': '^(?P<base>\d{2}([05]\d|[16][0-2])(0[1-9]|[12]\d|3[01])\d{4})
# (?P<check>)$'
# '^(?P<base>([0-4]\d|5[0-3])([05]\d|[16][0-2])(0[1-9]|[12]\d|3[01])
# \d{3})(?P<check>)$'
# '^(?P<base>6\d{7})(?P<check>\d)$'
# '^(?P<base>[0-8]\d{6})(?P<check>\d)$'
# invalid format
'CZ1234567890',
'CZ701120001',
'CZ12345678X',
'CZ6123456',
'CZ9999999',
# invalid check
'CZ5502080000',
'CZ7952290291',
'CZ005229153',
'CZ633333333',
'CZ12345678',
# 'DE': '^(?P<base>\d{8})(?P<check>\d)$'
# invalid format
'DE1234567890',
'DE12345G678',
# invalid check digit
'DE111111120',
'DE136695973',
# 'DK': '^(?P<base>[1-9]\d{7})(?P<check>)$'
# invalid format
'DK1234567',
'DKX1234567',
'DK01234567',
# invalid check
'DK13585627',
'DK88146324',
# 'EE': '^(?P<base>\d{8})(?P<check>\d)$'
# invalid format
'EE0123456789',
'EEO12345678'
# invalid check digit
'EE123456789',
'EE444444444',
# 'ES': '^(?P<base>[A-H,JVU]\d{7})(?P<check>\d)$'
# '^(?P<base>[NP-SW]\d{7})(?P<check>[A-J])$'
# '^(?P<base>\d{8})(?P<check>[A-Z])$'
# '^(?P<base>[KLMXYZ]\d{7})(?P<check>[A-Z])$'
# invalid format
'ESXA1234567',
'ES01234567B8',
'ES012345678',
# invalid check digit
'ESA12345678',
'ESP1234567B',
'ES12345678Y',
'ESK1234567E',
# 'FI': '^(?P<base>\d{7})(?P<check>\d)$'
# invalid format
'FI1234567',
'FI123456789',
'FI1234R678',
# invalid check digit
'FI12345678',
'FI09853600',
# 'FR': '^(?P<check>\d{2})(?P<base>[1-9]{9})$'
# '^(?P<check>)(?P<base>([A-HJ-NP-Z]\d|\d[A-HJ-NP-Z])[1-9]{9})'
# invalid format
'FR123456789AB',
'FR12345678901',
'FR0I123456789',
'FRO4123456789',
'FRXX123456789',
# invalid check digit
'FR22123456789',
'FR0H123456789',
'FR2J123456789',
# 'GB': '^(?P<base>((00|[1-9]\d)\d{7}))(?P<check>)
# (\d\d[1-9]|\d[1-9]\d|[1-9]\d\d|$)$'
# '^GD[0-4]\d{2}'
# '^HA[5-9]\d{2}'
# invalid format
'GB1234567890',
'GB012345678',
'GB123456727000',
'GBGD1234',
'GBGD777',
'GBHA12',
'GBHA123',
'GBAB123',
# invalid check digits
'GB434031499',
'GB434031430',
'GB123456781',
'GB123456728',
'GB123456728872',
'GB001234589',
'GB001234546',
'GB001234548238',
# 'GR': '^(?P<base>\d{7,8})(?P<check>\d)$'
# invalid format
'GR12345678G',
'GR1234567',
# invalid check digits
'GR12345678',
'GR123456789',
# 'HR': '^(?P<base>\d{10})(?P<check>\d)$'
# invalid format
'HR1234567890',
'HR123456789012',
'HR1234567890X',
# invalid check digits
'HR12345678901',
'HR11111111111',
'HR00000777777',
# 'HU': '^(?P<base>[1-9]\d{6})(?P<check>\d)$'
# invalid format
'HU1234567',
'HU123456789',
'HU1234567Z',
'HU01234567'
# invalid check digits
'HU21376411',
'HU10597199',
'HU12345678',
# 'IE': '^(?P<add>\d)[A-Z+*](?P<base>\d{5})(?P<check>[A-W])'
# '^(?P<base>\d{7})(?P<check>[A-W])$'
# '^(?P<base>\d{7})(?P<check>[A-W])(?P<add>[A-I])'
# invalid format
'IE12345678',
'IE1234567XA',
'IE1234567WM',
'IE123456',
'IEA123456C',
# invalid check character
'IE8Z49289V',
'IE3628739M',
'IE3628739VA',
'IE7A12345I',
'IE1234567U',
# 'IT': '^(?P<base>\d{7}(0\d[1-9]|0[1-9]\d|100|12[01]|888|999))
# (?P<check>\d)$'
# invalid format
'IT12345678I07',
'IT12345678804',
'IT12345670009',
'IT12345671239',
# invalid check digit
'IT00000010210',
'IT12345670016',
'IT12345678888',
# 'LT': '^(?P<base>\d{10}1)(?P<check>\d)$'
# '^(?P<base>\d{7}1)(?P<check>\d)$'
# invalid format
'LT12345678',
'LT1234567890',
'LT123456729',
'LT123456775',
'LT290061371394',
# invalid check digit
'LT213179411',
'LT123456712',
'LT290061371318',
'LT123456789012',
# 'LU': '^(?P<base>\d{6})(?P<check>\d{2})$'
# invalid format
'LU777777065',
'LU1111111111111111111111111',
# invalid check digit
'LU77777707',
'LU10000366',
'LU12345678',
# 'LV': '^(?P<base>[4-9]\d{9})(?P<check>\d)$'
# '^(?P<base>(0[1-9]|[12]\d|3[01])(0[1-9]|1[0-2])\d{2}[012]\d{4})
# (?P<check>)$'
# invalid format
'LV15166312345',
'LV20020072345',
'LVA1234567890',
# invalid check
'LV41234567890',
'LV29020012345',
'LV31040023456',
# 'MT': '^(?P<base>[1-9]\d{5})(?P<check>\d{2})$'
# invalid format
'MT123456-8',
'MT01234567',
# invalid check digits
'MT12345633',
'MT10000123',
# 'NL': '^(?P<base>\d{8})(?P<check>\d)B(\d[1-9]|[1-9]\d)$'
# invalid format
'NL123456782B00',
'NL123456782B-0',
'NL123456782123',
# invalid check digit
'NL123456789B70',
'NL010000444B01',
'NL000000010B34',
'NL200000100B10',
# 'PL': '^(?P<base>\d{9})(?P<check>\d)$'
# invalid format
'PL123456789',
'PLA123456789',
# invalid check digit
'PL1234567890',
'PL0123456780',
'PL5260001244',
'PL0200000000',
# 'PT': '^(?P<base>[1-9]\d{7})(?P<check>\d)$'
# invalid format
'PT12345678',
'PT012345679',
'PT1234567890',
# invalid check digit
'PT123456788',
'PT502757190',
# 'RO': '^(?P<base>[1-9]\d{8})(?P<check>\d)$'
# '^(?P<base>[1-9]\d{2}(0[1-9]|1[0-2])(0[1-9]|[12]\d|3[01])
# (0[1-9]|[1-3]\d|4[0-7]|5[12])\d{3})(?P<check>\d)$'
# invalid format
'RO0123456789',
'RO0123456789012',
'RO5121018001230',
'RO8121018641231',
'RO1630229123459',
# invalid check digit
'RO1234567890',
'RO1630615123456',
# 'SE': '^(?P<base>\d{9})(?P<check>\d)(0[1-9]|[1-8]\d|9[1-4])'
# invalid format
'SE1234567897',
'SE123456789700',
'SE123456789797',
# invalid check digit
'SE123456789001',
'SE556188840194',
# 'SI': '^(?P<base>[1-9]\d{6})(?P<check>\d)$'
# invalid format
'SI1234567',
'SI01234567',
'SI123456789',
# invalid check digit
'SI12345678',
'SI15012555',
'SI76543110',
# 'SK': '^(?P<base>[1-9]\d[2-47-9]\d{7})(?P<check>)$'
'SK123456789',
'SK1000000001',
'SK0123456784',
'SK12345678901',
# invalid check digit
'SK1234567890',
'SK4030000000',
# unknown country code
'XX1234567',
]
class EUVATIdTest(unittest.TestCase):
def test_constructor(self):
# wrong type of argument
self.assertRaises(TypeError, EUVATId, 14)
# white space stripped and letters converted to upper case
s = ' pt123456789 \n'
vat_id = EUVATId(s)
self.assertEqual(vat_id._id, s.strip().upper())
# ensure slot-only instance
vat_id = EUVATId(_VALID_IDS[0])
self.assertRaises(AttributeError, getattr, vat_id, '__dict__')
def test_valid_ids(self):
for s in _VALID_IDS:
vat_id = EUVATId(s)
self.assertEqual(vat_id._id, s)
def test_invalid_ids(self):
for s in _INVALID_IDS:
# print(s)
self.assertRaises(ValueError, EUVATId, s)
def test_str(self):
s = _VALID_IDS[0]
self.assertEqual(str(EUVATId(s)), s)
if __name__ == '__main__':
unittest.main()
| |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import multiprocessing
import argparse
from .. import __version__, util
def add_global_arguments(parser, suppress_defaults=True):
# Suppressing defaults is needed in order to allow global
# arguments both before and after subcommand. Only the top-level
# parser should have suppress_defaults=False
if suppress_defaults:
suppressor = dict(default=argparse.SUPPRESS)
else:
suppressor = dict()
parser.add_argument(
"--verbose", "-v", action="store_true",
help="Increase verbosity",
**suppressor)
parser.add_argument(
"--config",
help="Benchmark configuration file",
default=(argparse.SUPPRESS if suppress_defaults else 'asv.conf.json'))
parser.add_argument(
"--version", action="version", version="%(prog)s " + __version__,
help="Print program version",
**suppressor)
def add_compare(parser, only_changed_default=False, sort_default='name'):
parser.add_argument(
'--factor', "-f", type=float, default=1.1,
help="""The factor above or below which a result is considered
problematic. For example, with a factor of 1.1 (the default
value), if a benchmark gets 10%% slower or faster, it will
be displayed in the results list.""")
parser.add_argument(
'--no-stats', action="store_false", dest="use_stats", default=True,
help="""Do not use result statistics in comparisons, only `factor`
and the median result.""")
parser.add_argument(
'--split', '-s', action='store_true',
help="""Split the output into a table of benchmarks that have
improved, stayed the same, and gotten worse.""")
parser.add_argument(
'--only-changed', action='store_true', default=only_changed_default,
help="""Whether to show only changed results.""")
parser.add_argument('--no-only-changed', dest='only_changed', action='store_false')
parser.add_argument(
'--sort', action='store', type=str, choices=('name', 'ratio'),
default=sort_default, help="""Sort order""")
def add_show_stderr(parser):
parser.add_argument(
"--show-stderr", "-e", action="store_true",
help="""Display the stderr output from the benchmarks.""")
class DictionaryArgAction(argparse.Action):
"""
Parses multiple key=value assignments into a dictionary.
"""
def __init__(self, option_strings, dest, converters=None, choices=None,
dict_dest=None, **kwargs):
if converters is None:
converters = {}
self.converters = converters
self.__choices = choices
self.dict_dest = dict_dest
super(DictionaryArgAction, self).__init__(option_strings, dest, **kwargs)
def __call__(self, parser, namespace, values, option_string=None):
# Parse and check value
if self.dict_dest is None:
try:
key, value = values.split("=", 1)
except ValueError:
raise argparse.ArgumentError(self,
"{!r} is not a key=value assignment".format(values))
else:
key = self.dict_dest
value = values
if self.__choices is not None and key not in self.__choices:
raise argparse.ArgumentError(self,
"{!r} cannot be set".format(key))
dest_key = key
conv = self.converters.get(key, None)
if isinstance(conv, tuple):
dest_key, conv = conv
if conv is not None:
try:
value = conv(value)
except ValueError as exc:
raise argparse.ArgumentError(self,
"{!r}: {}".format(key, exc))
# Store value
result = getattr(namespace, self.dest, None)
if result is None:
result = {}
result[dest_key] = value
setattr(namespace, self.dest, result)
def add_bench(parser):
parser.add_argument(
"--bench", "-b", type=str, action="append",
help="""Regular expression(s) for benchmark to run. When not
provided, all benchmarks are run.""")
def parse_repeat(value):
try:
return int(value)
except ValueError:
pass
min_repeat, max_repeat, max_time = value.lstrip('(').rstrip(')').split(',')
value = (int(min_repeat), int(max_repeat), float(max_time))
return value
def parse_affinity(value):
if "," in value:
value = value.split(",")
else:
value = [value]
affinity_list = []
for v in value:
if "-" in v:
a, b = v.split("-", 1)
a = int(a)
b = int(b)
affinity_list.extend(range(a, b + 1))
else:
affinity_list.append(int(v))
num_cpu = multiprocessing.cpu_count()
for n in affinity_list:
if not (0 <= n < num_cpu):
raise ValueError("CPU {!r} not in range 0-{!r}".format(n, num_cpu - 1))
return affinity_list
converters = {
'timeout': float,
'version': str,
'warmup_time': float,
'repeat': parse_repeat,
'number': int,
'rounds': int,
'processes': ('rounds', int), # backward compatibility
'sample_time': float,
'cpu_affinity': parse_affinity
}
parser.add_argument(
"--attribute", "-a", action=DictionaryArgAction,
choices=tuple(converters.keys()), converters=converters,
help="""Override a benchmark attribute, e.g. `-a repeat=10`.""")
parser.add_argument(
"--cpu-affinity", action=DictionaryArgAction, dest="attribute",
dict_dest="cpu_affinity",
choices=tuple(converters.keys()), converters=converters,
help=("Set CPU affinity for running the benchmark, in format: "
"0 or 0,1,2 or 0-3. Default: not set"))
def add_machine(parser):
parser.add_argument(
"--machine", "-m", type=str, default=None,
help="""Use the given name to retrieve machine information.
If not provided, the hostname is used. If no entry with that
name is found, and there is only one entry in
~/.asv-machine.json, that one entry will be used.""")
class PythonArgAction(argparse.Action):
"""
Backward compatibility --python XYZ argument,
will be interpreted as --environment :XYZ
"""
def __init__(self, option_strings, dest, nargs=None, **kwargs):
if nargs is not None:
raise ValueError("nargs not allowed")
super(PythonArgAction, self).__init__(option_strings, dest, nargs=1, **kwargs)
def __call__(self, parser, namespace, values, option_string=None):
items = list(getattr(namespace, "env_spec", []))
if values == "same":
items.extend(["existing:same"])
else:
items.extend([":" + value for value in values])
setattr(namespace, "env_spec", items)
def add_environment(parser, default_same=False):
help = """Specify the environment and Python versions for running the
benchmarks. String of the format 'environment_type:python_version',
for example 'conda:2.7'. If the Python version is not specified,
all those listed in the configuration file are run. The special
environment type 'existing:/path/to/python' runs the benchmarks
using the given Python interpreter; if the path is omitted,
the Python running asv is used. For 'existing', the benchmarked
project must be already installed, including all dependencies.
"""
if default_same:
help += "The default value is 'existing:same'"
else:
help += """By default, uses the values specified in the
configuration file."""
parser.add_argument(
"-E", "--environment",
dest="env_spec",
action="append",
default=[],
help=help)
# The --python argument exists for backward compatibility. It
# will just set the part after ':' in the environment spec.
parser.add_argument(
"--python", action=PythonArgAction, metavar="PYTHON",
help="Same as --environment=:PYTHON")
def add_launch_method(parser):
parser.add_argument(
"--launch-method",
dest="launch_method",
action="store",
choices=("auto", "spawn", "forkserver"),
default="auto",
help="How to launch benchmarks. Choices: auto, spawn, forkserver")
def add_parallel(parser):
parser.add_argument(
"--parallel", "-j", nargs='?', type=int, default=1, const=-1,
help="""Build (but don't benchmark) in parallel. The value is
the number of CPUs to use, or if no number provided, use the
number of cores on this machine.""")
def add_record_samples(parser, record_default=False):
grp = parser.add_mutually_exclusive_group()
grp.add_argument(
"--record-samples", action="store_true", dest="record_samples",
help=(argparse.SUPPRESS if record_default else
"""Store raw measurement samples, not only statistics"""),
default=record_default)
grp.add_argument(
"--no-record-samples", action="store_false", dest="record_samples",
help=(argparse.SUPPRESS if not record_default else
"""Do not store raw measurement samples, but only statistics"""),
default=record_default)
parser.add_argument(
"--append-samples", action="store_true",
help="""Combine new measurement samples with previous results,
instead of discarding old results. Implies --record-samples.
The previous run must also have been run with --record/append-samples.""")
def positive_int(string):
"""
Parse a positive integer argument
"""
try:
value = int(string)
if not value > 0:
raise ValueError()
return value
except ValueError:
raise argparse.ArgumentTypeError("%r is not a positive integer" % (string,))
def positive_int_or_inf(string):
"""
Parse a positive integer argument
"""
try:
if string == 'all':
return float("inf")
value = int(string)
if not value > 0:
raise ValueError()
return value
except ValueError:
raise argparse.ArgumentTypeError("%r is not a positive integer or 'all'" % (string,))
def time_period(string, base_period='d'):
"""
Parse a time period argument with unit suffix
"""
try:
return util.parse_human_time(string, base_period)
except ValueError as err:
raise argparse.ArgumentTypeError(str(err))
| |
"""
Input, output and printing.
"""
from argparse import ArgumentParser, HelpFormatter
from contextlib import contextmanager
from os import fstat
from os.path import dirname, exists, join
from textwrap import indent
from numpy import (array, asscalar, get_printoptions, load, savez_compressed,
set_printoptions)
from scipy import stats
from interface import (get_max_time, get_num_of_actions, get_num_of_features,
load_level as bb_load_level)
@contextmanager
def printoptions(*args, **kwargs):
"""
A context manager for changing NumPy print settings locally.
"""
old_printoptions = get_printoptions()
set_printoptions(*args, **kwargs)
yield
set_printoptions(**old_printoptions)
def common_printoptions():
"""
Configures array printing for console and file output.
Array coefficients can be pretty big, we do want to print them in whole
however.
If we are printing to a file it's better not to wrap lines to be able to
scroll less and copy with ease. But when printing to the console, which
likely is set up to wrap lines, it's better to let NumPy do the wrapping.
"""
linewidth = 75 if fstat(0) == fstat(1) else 1e6
set_printoptions(linewidth=linewidth, threshold=1e6)
class NoMetavarsHelpFormatter(HelpFormatter):
"""
Skips the option destinations (normally repeated for both short and
long option).
"""
def _format_action_invocation(self, action):
if action.option_strings and action.nargs != 0:
return ", ".join(action.option_strings)
return super()._format_action_invocation(action)
class NoMetavarsArgumentParser(ArgumentParser):
"""
Provides a default formatter_class.
"""
def __init__(self, *args, **kwargs):
kwargs.setdefault('formatter_class', NoMetavarsHelpFormatter)
return super().__init__(*args, **kwargs)
def parse_args(description, arguments):
"""
Parsers command-line arguments according to the specification given.
The first argument, description, is a text included in the --help message.
The second argument, arguments, should be a tuple of (argument flags,
options) passed to argparse's add_argument() (respectively as positional
and keyword arguments).
"""
parser = NoMetavarsArgumentParser(description=description)
for arg, opts in arguments:
parser.add_argument(*arg, **opts)
args = parser.parse_args()
try:
if args.iterations > 1 and args.output is not None:
raise ValueError("Cannot use --output with --iterations")
except AttributeError:
pass
try:
if args.precision is None:
args.precision = 0 if args.verbosity == 0 else 3
except AttributeError:
pass
return args
def first_free(prefix):
"""
Finds the first inexistent file of the form <prefix>_<i>.npz.
"""
index = 0
while exists('{}_{}.npz'.format(prefix, index)):
index += 1
return index
def instantiate_dist(name, *opts):
"""
Creates distribution for the given command-line arguments.
"""
try:
dist = getattr(stats, name)
except AttributeError:
raise ValueError("No such distribution {}".format(name))
opts = list(opts)
try:
loc = float(opts.pop(0))
except IndexError:
loc = 0
try:
scale = float(opts.pop(0))
except IndexError:
scale = 1
return dist(*map(float, opts), loc=loc, scale=scale)
def load_level(level, verbosity):
"""
Loads the named level and returns its name and description.
"""
path = join(dirname(__file__), 'levels', level)
if not path.endswith('_level.data'):
path = '{}_level.data'.format(path)
if verbosity > 3:
print("Loading level from {}".format(path))
bb_load_level(path, verbose=verbosity > 3)
return {'key': level,
'steps': get_max_time(),
'actions': get_num_of_actions(),
'features': get_num_of_features()}
def resolve_path(agent, key, folder):
"""
Finds a file identified by the given key (in the context of the agent).
If the key is composed of only digits, adds the agent (collector or bot)
as a prefix. Otherwise uses the whole key, if "last" is given as the key,
finds the file with the highest numeric key.
"""
if key == 'last':
key = first_free(join(folder, agent)) - 1
if isinstance(key, int) or key.isdigit():
key = '{}_{}'.format(agent, key)
path = dirname(__file__)
if not key.startswith(folder):
path = join(path, folder, key)
else:
path = join(path, key)
if not path.endswith('.npz'):
path = '{}.npz'.format(path)
return key, path
def save_params(bot, key, params, history, verbosity):
"""
Saves bot parameters and its history.
Parameter files are zipped .npy files (saved as .npz), one for each params
key and an additional reserved one for history.
"""
params['__history'] = history
path = resolve_path(bot, key, 'params')[1]
if verbosity > 3:
print("Saving params to: {}".format(path))
savez_compressed(path, **params)
def load_params(bot, key, verbosity):
"""
Loads a set of bot parameters identified by the given key.
"""
key, path = resolve_path(bot, key, 'params')
if verbosity > 3:
print("Loading params from: {}".format(path))
with load(path) as data:
params = dict(data)
# Some backwards compatibility bits.
history = list(params.pop('__history', params.pop('history', [])))
if 'coeffs' in params:
if key.startswith('linear'):
coeffs = params.pop('coeffs')
params['constant'] = coeffs[:, -1]
params['state0l'] = coeffs[:, :-1]
elif key.startswith('states_1'):
coeffs = params.pop('coeffs')
params['constant'] = coeffs[0, :, -1] + coeffs[1, :, -1]
params['state0l'] = coeffs[0, :, :-1]
params['state1l'] = coeffs[1, :, :-1]
if '_phases' in params:
params['_phases'] = array(params['_phases'])
params = {k.replace('constant', 'free'): p for k, p in params.items()}
return key, params, history
def save_data(collector, key, data, meta, verbosity):
"""
Saves collected data to data/<collector>_<key>.npz.
"""
data['__meta'] = meta
path = resolve_path(collector, key, 'data')[1]
if verbosity > 3:
print("Saving data to: {}".format(path))
savez_compressed(path, **data)
def load_data(collector, key, verbosity):
"""
Loads collected data from data/<collector>_<key>.npz.
"""
path = resolve_path(collector, key, 'data')[1]
if verbosity > 3:
print("Loading data from: {}".format(path))
with load(path) as data:
records = dict(data)
meta = asscalar(records.pop('__meta'))
return records, meta
def date_desc(date):
"""
Date as a string.
"""
return "{:%Y-%m-%d %H:%M} (UTC)".format(date)
def dists_desc(dists):
"""
Readable description of free parameter distributions.
"""
def dist_desc(key):
name = dists[key].dist.name
args = list(dists[key].args)
kwds = dict(dists[key].kwds)
args.append('l={}'.format(kwds.pop('loc')))
args.append('s={}'.format(kwds.pop('scale')))
args.extend('{}={}'.format(k, v) for (k, v) in kwds.items())
return "{}({})".format(name, ', '.join(map(str, args)))
def sort_key(key):
try:
return str(['real', 'unit', 'variations', 'acceptance'].index(key))
except ValueError:
if key.startswith('new '):
return key[4:] + 'new'
elif key.startswith('vary '):
return key[5:] + 'vary'
return key
dists = dists.copy()
for key, dist in dists.pop('new').items():
dists['new ' + key] = dist
for key, dist in dists.pop('vary').items():
dists['vary ' + key] = dist
return "\n".join("{} {}".format(k, dist_desc(k))
for k in sorted(dists.keys(), key=sort_key))
def emphases_desc(emphases):
"""
A list of (index, weight) pairs for weights not equal to one.
"""
return ", ".join("{} {}".format(i + 1, w)
for i, w in enumerate(emphases) if w != 1)
def phases_desc(phases, precision):
"""
Turns the phase splits into a more intelligible form.
"""
precision = max(0, precision - 2)
desc = " - ".join("{:.{}f}%".format(p * 100, precision) for p in phases)
return "{:.{}f}% - {}".format(0, precision, desc)
def seeds_desc(seeds, verbosity):
"""
Readable, combined description of seeding options, condensed or expanded.
"""
named_seeds, random_seeds, random_seeds_pool = seeds
desc = ""
if named_seeds:
if verbosity > 0:
desc += "stored "
desc += ", ".join(named_seeds)
if random_seeds:
if named_seeds:
desc += ", "
if verbosity > 0:
desc += "and "
if verbosity > 0:
desc += "random "
desc += str(random_seeds)
if random_seeds_pool:
desc += " out of" if verbosity > 0 else " of"
desc += " " + str(random_seeds_pool)
return desc
def param_map_desc(param_map):
"""
Stringifies parameter mappings.
"""
return "\n".join("{}: {}".format(k, " ".join(nks))
for k, nks in param_map.items())
def param_scale_desc(param_scale):
"""
Human-readable version of prescaling directives.
"""
return ", ".join("{} {}".format(k, s) for k, s in param_scale.items())
def level_desc(level):
"""
Short, readable level and its parameters string.
"""
return "{key} ({steps}, {actions}, {features})".format(**level)
def time_desc(duration, precision):
"""
Humanized duration.
"""
minutes, seconds = divmod(duration, 60)
hours, minutes = divmod(minutes, 60)
desc = "{:.{}f} s".format(seconds, precision)
if minutes > 0 or hours > 0:
desc = "{} m, {}".format(int(minutes), desc)
if hours > 0:
desc = "{} h, {}".format(int(hours), desc)
return desc
def scores_desc(scores, verbosity, precision):
"""
Formats a list of scores (from different levels) for display.
"""
if verbosity == 0:
return " ".join("{:.{}f}".format(s, precision)
for s in scores.values())
else:
return ", ".join("{} {:.{}f}".format(l, s, precision)
for l, s in scores.items())
def training_desc(info, verbosity, precision):
"""
Textifies a single training history record. Returns a list of lines.
"""
desc = [
"Date: {}".format(date_desc(info['date'])),
"Bot: {bot}".format(**info) +
", Trainer: {trainer} {config}".format(**info),
"Dists: {}".format(indent(dists_desc(info['dists']), " " * 7).strip()),
"Seeds: {}".format(seeds_desc(info['seeds'], verbosity)),
"Level: {}".format(level_desc(info['level'])) +
", Runs: {}".format(info['runs']) +
", Time: {}".format(time_desc(info['time'], precision)),
"Output: {output}, PRNGs: {prngs_seed}".format(**info) +
", Scores: {}".format(scores_desc(info['scores'], verbosity,
precision))
]
if info['phases'] is not None:
desc.insert(3, "Phases: {}".format(
phases_desc(info['phases'], precision)))
if info['emphases']:
desc.insert(3, "Emphases: {}".format(emphases_desc(info['emphases'])))
if info['param_scale']:
desc.insert(5, "Scaled params: {}".format(
param_scale_desc(info['param_scale'])))
if info['param_freeze']:
desc.insert(5, "Frozen params: {}".format(
", ".join(info['param_freeze'])))
if info['param_map']:
desc.insert(5, "Params map: {}".format(
indent(param_map_desc(info['param_map']), " " * 12).strip()))
return desc
def params_desc(params, precision):
"""
Human-readable description of bot parameters.
"""
with printoptions(precision=precision):
desc = ""
for key, value in sorted(params.items()):
if key != 'history' and key != '__history':
value = indent(repr(value), " " * (len(key) + 4)).strip()
desc += "'{}': {}\n".format(key, value)
return desc
def results_desc(results, verbosity, precision):
"""
Nicely formatted processor results.
"""
with printoptions(precision=precision, suppress=True):
desc = ""
for key, value in results:
value = str(value)
if verbosity > 0:
value = indent(value, " ")
desc += "\n{}:\n\n{}\n".format(key, value)
else:
desc += "{}\n".format(value)
return desc
| |
import os
import time
import pytest
import logging
from collections import OrderedDict
from distutils import dir_util
from distutils.version import LooseVersion
from cassandra import ConsistencyLevel, InvalidRequest
from cassandra.query import SimpleStatement
from cassandra.util import sortedset
from dtest import Tester, create_ks
from tools.assertions import (assert_all, assert_almost_equal, assert_none,
assert_row_count, assert_unavailable)
since = pytest.mark.since
logger = logging.getLogger(__name__)
from scrub_test import TestHelper
@since('2.0')
class TestTTL(Tester):
""" Test Time To Live Feature """
@pytest.fixture(scope='function', autouse=True)
def fixture_ttl_test_setup(self, fixture_dtest_setup):
self.cluster = fixture_dtest_setup.cluster
self.fixture_dtest_setup = fixture_dtest_setup
self.cluster.populate(1).start()
[node1] = self.cluster.nodelist()
self.session1 = self.patient_cql_connection(node1)
create_ks(self.session1, 'ks', 1)
def prepare(self, default_time_to_live=None):
self.session1.execute("DROP TABLE IF EXISTS ttl_table;")
query = """
CREATE TABLE ttl_table (
key int primary key,
col1 int,
col2 int,
col3 int,
)
"""
if default_time_to_live:
query += " WITH default_time_to_live = {};".format(default_time_to_live)
self.session1.execute(query)
def smart_sleep(self, start_time, time_to_wait):
""" Function that sleep smartly based on the start_time.
Useful when tests are slower than expected.
start_time: The start time of the timed operations
time_to_wait: The time to wait in seconds from the start_time
"""
now = time.time()
real_time_to_wait = time_to_wait - (now - start_time)
if real_time_to_wait > 0:
time.sleep(real_time_to_wait)
def test_default_ttl(self):
""" Test default_time_to_live specified on a table """
self.prepare(default_time_to_live=1)
start = time.time()
self.session1.execute("INSERT INTO ttl_table (key, col1) VALUES (%d, %d)" % (1, 1))
self.session1.execute("INSERT INTO ttl_table (key, col1) VALUES (%d, %d)" % (2, 2))
self.session1.execute("INSERT INTO ttl_table (key, col1) VALUES (%d, %d)" % (3, 3))
self.smart_sleep(start, 3)
assert_row_count(self.session1, 'ttl_table', 0)
def test_insert_ttl_has_priority_on_defaut_ttl(self):
""" Test that a ttl specified during an insert has priority on the default table ttl """
self.prepare(default_time_to_live=1)
start = time.time()
self.session1.execute("""
INSERT INTO ttl_table (key, col1) VALUES (%d, %d) USING TTL 5;
""" % (1, 1))
self.smart_sleep(start, 2)
assert_row_count(self.session1, 'ttl_table', 1) # should still exist
self.smart_sleep(start, 7)
assert_row_count(self.session1, 'ttl_table', 0)
def test_insert_ttl_works_without_default_ttl(self):
""" Test that a ttl specified during an insert works even if a table has no default ttl """
self.prepare()
start = time.time()
self.session1.execute("""
INSERT INTO ttl_table (key, col1) VALUES (%d, %d) USING TTL 1;
""" % (1, 1))
self.smart_sleep(start, 3)
assert_row_count(self.session1, 'ttl_table', 0)
def test_default_ttl_can_be_removed(self):
""" Test that default_time_to_live can be removed """
self.prepare(default_time_to_live=1)
start = time.time()
self.session1.execute("ALTER TABLE ttl_table WITH default_time_to_live = 0;")
self.session1.execute("""
INSERT INTO ttl_table (key, col1) VALUES (%d, %d);
""" % (1, 1))
self.smart_sleep(start, 1.5)
assert_row_count(self.session1, 'ttl_table', 1)
def test_removing_default_ttl_does_not_affect_existing_rows(self):
""" Test that removing a default_time_to_live doesn't affect the existings rows """
self.prepare(default_time_to_live=1)
self.session1.execute("ALTER TABLE ttl_table WITH default_time_to_live = 10;")
start = time.time()
self.session1.execute("""
INSERT INTO ttl_table (key, col1) VALUES (%d, %d);
""" % (1, 1))
self.session1.execute("""
INSERT INTO ttl_table (key, col1) VALUES (%d, %d) USING TTL 15;
""" % (2, 1))
self.session1.execute("ALTER TABLE ttl_table WITH default_time_to_live = 0;")
self.session1.execute("INSERT INTO ttl_table (key, col1) VALUES (%d, %d);" % (3, 1))
self.smart_sleep(start, 5)
assert_row_count(self.session1, 'ttl_table', 3)
self.smart_sleep(start, 12)
assert_row_count(self.session1, 'ttl_table', 2)
self.smart_sleep(start, 20)
assert_row_count(self.session1, 'ttl_table', 1)
def test_update_single_column_ttl(self):
""" Test that specifying a TTL on a single column works """
self.prepare()
self.session1.execute("""
INSERT INTO ttl_table (key, col1, col2, col3) VALUES (%d, %d, %d, %d);
""" % (1, 1, 1, 1))
start = time.time()
self.session1.execute("UPDATE ttl_table USING TTL 3 set col1=42 where key=%s;" % (1,))
assert_all(self.session1, "SELECT * FROM ttl_table;", [[1, 42, 1, 1]])
self.smart_sleep(start, 5)
assert_all(self.session1, "SELECT * FROM ttl_table;", [[1, None, 1, 1]])
def test_update_multiple_columns_ttl(self):
""" Test that specifying a TTL on multiple columns works """
self.prepare()
self.session1.execute("""
INSERT INTO ttl_table (key, col1, col2, col3) VALUES (%d, %d, %d, %d);
""" % (1, 1, 1, 1))
start = time.time()
self.session1.execute("""
UPDATE ttl_table USING TTL 2 set col1=42, col2=42, col3=42 where key=%s;
""" % (1,))
assert_all(self.session1, "SELECT * FROM ttl_table;", [[1, 42, 42, 42]])
self.smart_sleep(start, 4)
assert_all(self.session1, "SELECT * FROM ttl_table;", [[1, None, None, None]])
def test_update_column_ttl_with_default_ttl(self):
"""
Test that specifying a column ttl works when a default ttl is set.
This test specify a lower ttl for the column than the default ttl.
"""
self.prepare(default_time_to_live=8)
start = time.time()
self.session1.execute("""
INSERT INTO ttl_table (key, col1, col2, col3) VALUES (%d, %d, %d, %d);
""" % (1, 1, 1, 1))
self.session1.execute("UPDATE ttl_table USING TTL 3 set col1=42 where key=%s;" % (1,))
assert_all(self.session1, "SELECT * FROM ttl_table;", [[1, 42, 1, 1]])
self.smart_sleep(start, 5)
assert_all(self.session1, "SELECT * FROM ttl_table;", [[1, None, 1, 1]])
self.smart_sleep(start, 10)
assert_row_count(self.session1, 'ttl_table', 0)
def update_column_ttl_with_default_ttl_test2(self):
"""
Test that specifying a column ttl works when a default ttl is set.
This test specify a higher column ttl than the default ttl.
"""
self.prepare(default_time_to_live=2)
start = time.time()
self.session1.execute("""
INSERT INTO ttl_table (key, col1, col2, col3) VALUES (%d, %d, %d, %d);
""" % (1, 1, 1, 1))
self.session1.execute("UPDATE ttl_table USING TTL 6 set col1=42 where key=%s;" % (1,))
self.smart_sleep(start, 4)
assert_all(self.session1, "SELECT * FROM ttl_table;", [[1, 42, None, None]])
self.smart_sleep(start, 8)
assert_row_count(self.session1, 'ttl_table', 0)
def test_remove_column_ttl(self):
"""
Test that removing a column ttl works.
"""
self.prepare()
start = time.time()
self.session1.execute("""
INSERT INTO ttl_table (key, col1, col2, col3) VALUES (%d, %d, %d, %d) USING TTL 2;
""" % (1, 1, 1, 1))
self.session1.execute("UPDATE ttl_table set col1=42 where key=%s;" % (1,))
self.smart_sleep(start, 4)
assert_all(self.session1, "SELECT * FROM ttl_table;", [[1, 42, None, None]])
@since('3.6')
def test_set_ttl_to_zero_to_default_ttl(self):
"""
Test that we can remove the default ttl by setting the ttl explicitly to zero.
CASSANDRA-11207
"""
self.prepare(default_time_to_live=2)
start = time.time()
self.session1.execute("INSERT INTO ttl_table (key, col1, col2, col3) VALUES ({}, {}, {}, {});".format(1, 1, 1, 1))
self.session1.execute("INSERT INTO ttl_table (key, col1, col2, col3) VALUES ({}, {}, {}, {});".format(2, 1, 1, 1))
self.session1.execute("UPDATE ttl_table using ttl 0 set col1=42 where key={};".format(1))
self.session1.execute("UPDATE ttl_table using ttl 3 set col1=42 where key={};".format(2))
self.smart_sleep(start, 5)
# The first row should be deleted, using ttl 0 should fallback to default_time_to_live
assert_all(self.session1, "SELECT * FROM ttl_table;", [[1, 42, None, None]])
@since('2.1', max_version='3.5')
def test_remove_column_ttl_with_default_ttl(self):
"""
Test that we cannot remove a column ttl when a default ttl is set.
"""
self.prepare(default_time_to_live=2)
start = time.time()
self.session1.execute("""
INSERT INTO ttl_table (key, col1, col2, col3) VALUES (%d, %d, %d, %d);
""" % (1, 1, 1, 1))
self.session1.execute("""
INSERT INTO ttl_table (key, col1, col2, col3) VALUES (%d, %d, %d, %d);
""" % (2, 1, 1, 1))
self.session1.execute("UPDATE ttl_table using ttl 0 set col1=42 where key=%s;" % (1,))
self.session1.execute("UPDATE ttl_table using ttl 8 set col1=42 where key=%s;" % (2,))
self.smart_sleep(start, 5)
# The first row should be deleted, using ttl 0 should fallback to default_time_to_live
assert_all(self.session1, "SELECT * FROM ttl_table;", [[2, 42, None, None]])
self.smart_sleep(start, 10)
assert_row_count(self.session1, 'ttl_table', 0)
def test_collection_list_ttl(self):
"""
Test that ttl has a granularity of elements using a list collection.
"""
self.prepare(default_time_to_live=10)
self.session1.execute("ALTER TABLE ttl_table ADD mylist list<int>;""")
start = time.time()
self.session1.execute("""
INSERT INTO ttl_table (key, col1, mylist) VALUES (%d, %d, %s);
""" % (1, 1, [1, 2, 3, 4, 5]))
self.session1.execute("""
UPDATE ttl_table USING TTL 5 SET mylist[0] = 42, mylist[4] = 42 WHERE key=1;
""")
assert_all(self.session1, "SELECT * FROM ttl_table;", [[1, 1, None, None, [42, 2, 3, 4, 42]]])
self.smart_sleep(start, 7)
assert_all(self.session1, "SELECT * FROM ttl_table;", [[1, 1, None, None, [2, 3, 4]]])
self.smart_sleep(start, 12)
assert_row_count(self.session1, 'ttl_table', 0)
def test_collection_set_ttl(self):
"""
Test that ttl has a granularity of elements using a set collection.
"""
self.prepare(default_time_to_live=10)
self.session1.execute("ALTER TABLE ttl_table ADD myset set<int>;""")
start = time.time()
self.session1.execute("""
INSERT INTO ttl_table (key, col1, myset) VALUES (%d, %d, %s);
""" % (1, 1, '{1,2,3,4,5}'))
self.session1.execute("""
UPDATE ttl_table USING TTL 3 SET myset = myset + {42} WHERE key=1;
""")
assert_all(
self.session1,
"SELECT * FROM ttl_table;",
[[1, 1, None, None, sortedset([1, 2, 3, 4, 5, 42])]]
)
self.smart_sleep(start, 5)
assert_all(
self.session1,
"SELECT * FROM ttl_table;",
[[1, 1, None, None, sortedset([1, 2, 3, 4, 5])]]
)
self.smart_sleep(start, 12)
assert_row_count(self.session1, 'ttl_table', 0)
def test_collection_map_ttl(self):
"""
Test that ttl has a granularity of elements using a map collection.
"""
self.prepare(default_time_to_live=6)
self.session1.execute("ALTER TABLE ttl_table ADD mymap map<int, int>;""")
start = time.time()
self.session1.execute("""
INSERT INTO ttl_table (key, col1, mymap) VALUES (%d, %d, %s);
""" % (1, 1, '{1:1,2:2,3:3,4:4,5:5}'))
self.session1.execute("""
UPDATE ttl_table USING TTL 2 SET mymap[1] = 42, mymap[5] = 42 WHERE key=1;
""")
assert_all(
self.session1,
"SELECT * FROM ttl_table;",
[[1, 1, None, None, OrderedDict([(1, 42), (2, 2), (3, 3), (4, 4), (5, 42)])]]
)
self.smart_sleep(start, 4)
assert_all(
self.session1,
"SELECT * FROM ttl_table;",
[[1, 1, None, None, OrderedDict([(2, 2), (3, 3), (4, 4)])]]
)
self.smart_sleep(start, 8)
assert_row_count(self.session1, 'ttl_table', 0)
def test_delete_with_ttl_expired(self):
"""
Updating a row with a ttl does not prevent deletion, test for CASSANDRA-6363
"""
self.session1.execute("DROP TABLE IF EXISTS session")
self.session1.execute("CREATE TABLE session (id text, usr text, valid int, PRIMARY KEY (id))")
self.session1.execute("insert into session (id, usr) values ('abc', 'abc')")
self.session1.execute("update session using ttl 1 set valid = 1 where id = 'abc'")
self.smart_sleep(time.time(), 2)
self.session1.execute("delete from session where id = 'abc' if usr ='abc'")
assert_row_count(self.session1, 'session', 0)
@since('2.1')
def test_expiration_overflow_policy_cap(self):
self._base_expiration_overflow_policy_test(default_ttl=False, policy='CAP')
@since('2.1')
def test_expiration_overflow_policy_cap_default_ttl(self):
self._base_expiration_overflow_policy_test(default_ttl=False, policy='CAP')
@since('3.0')
def test_expiration_overflow_policy_capnowarn(self):
self._base_expiration_overflow_policy_test(default_ttl=False, policy='CAP_NOWARN')
@since('3.0')
def test_expiration_overflow_policy_capnowarn_default_ttl(self):
self._base_expiration_overflow_policy_test(default_ttl=False, policy='CAP_NOWARN')
@since('2.1')
def test_expiration_overflow_policy_reject(self):
self._base_expiration_overflow_policy_test(default_ttl=False, policy='REJECT')
@since('2.1')
def test_expiration_overflow_policy_reject_default_ttl(self):
self._base_expiration_overflow_policy_test(default_ttl=False, policy='REJECT')
def _base_expiration_overflow_policy_test(self, default_ttl, policy):
"""
Checks that expiration date overflow policy is correctly applied
@jira_ticket CASSANDRA-14092
"""
MAX_TTL = 20 * 365 * 24 * 60 * 60 # 20 years in seconds
default_time_to_live = MAX_TTL if default_ttl else None
self.prepare(default_time_to_live=default_time_to_live)
# Restart node with expiration_date_overflow_policy
self.cluster.stop()
self.cluster.start(jvm_args=['-Dcassandra.expiration_date_overflow_policy={}'.format(policy)], wait_for_binary_proto=True)
self.session1 = self.patient_cql_connection(self.cluster.nodelist()[0])
self.session1.execute("USE ks;")
# Try to insert data, should only fail if policy is REJECT
query = 'INSERT INTO ttl_table (key, col1) VALUES (%d, %d)' % (1, 1)
if not default_time_to_live:
query = query + "USING TTL %d" % (MAX_TTL)
try:
result = self.session1.execute_async(query + ";")
result.result()
if policy == 'REJECT':
self.fail("should throw InvalidRequest")
if self.cluster.version() >= '3.0': # client warn only on 3.0+
if policy == 'CAP':
logger.debug("Warning is {}", result.warnings[0])
assert 'exceeds maximum supported expiration' in result.warnings[0], 'Warning not found'
else:
assert not result.warnings, "There should be no warnings"
except InvalidRequest as e:
if policy != 'REJECT':
self.fail("should not throw InvalidRequest")
self.cluster.flush()
# Data should be present unless policy is reject
assert_row_count(self.session1, 'ttl_table', 0 if policy == 'REJECT' else 1)
# Check that warning is always logged, unless policy is REJECT
if policy != 'REJECT':
node1 = self.cluster.nodelist()[0]
prefix = 'default ' if default_ttl else ''
warning = node1.grep_log("Request on table {}.{} with {}ttl of {} seconds exceeds maximum supported expiration"
.format('ks', 'ttl_table', prefix, MAX_TTL))
assert warning, 'Log message should be print for CAP and CAP_NOWARN policy'
class TestDistributedTTL(Tester):
""" Test Time To Live Feature in a distributed environment """
@pytest.fixture(scope='function', autouse=True)
def fixture_set_cluster_settings(self, fixture_dtest_setup):
fixture_dtest_setup.cluster.populate(2).start()
[self.node1, self.node2] = fixture_dtest_setup.cluster.nodelist()
self.session1 = fixture_dtest_setup.patient_cql_connection(self.node1)
create_ks(self.session1, 'ks', 2)
def prepare(self, default_time_to_live=None):
self.session1.execute("DROP TABLE IF EXISTS ttl_table;")
query = """
CREATE TABLE ttl_table (
key int primary key,
col1 int,
col2 int,
col3 int,
)
"""
if default_time_to_live:
query += " WITH default_time_to_live = {};".format(default_time_to_live)
self.session1.execute(query)
def test_ttl_is_replicated(self):
"""
Test that the ttl setting is replicated properly on all nodes
"""
self.prepare(default_time_to_live=5)
session1 = self.patient_exclusive_cql_connection(self.node1)
session2 = self.patient_exclusive_cql_connection(self.node2)
session1.execute("USE ks;")
session2.execute("USE ks;")
query = SimpleStatement(
"INSERT INTO ttl_table (key, col1) VALUES (1, 1);",
consistency_level=ConsistencyLevel.ALL
)
session1.execute(query)
assert_all(
session1,
"SELECT * FROM ttl_table;",
[[1, 1, None, None]],
cl=ConsistencyLevel.ALL
)
ttl_session1 = session1.execute('SELECT ttl(col1) FROM ttl_table;')
ttl_session2 = session2.execute('SELECT ttl(col1) FROM ttl_table;')
# since the two queries are not executed simultaneously, the remaining
# TTLs can differ by one second
assert abs(ttl_session1[0][0] - ttl_session2[0][0]) <= 1
time.sleep(7)
assert_none(session1, "SELECT * FROM ttl_table;", cl=ConsistencyLevel.ALL)
def test_ttl_is_respected_on_delayed_replication(self):
""" Test that ttl is respected on delayed replication """
self.prepare()
self.node2.stop()
self.session1.execute("""
INSERT INTO ttl_table (key, col1) VALUES (1, 1) USING TTL 5;
""")
self.session1.execute("""
INSERT INTO ttl_table (key, col1) VALUES (2, 2) USING TTL 1000;
""")
assert_all(
self.session1,
"SELECT * FROM ttl_table;",
[[1, 1, None, None], [2, 2, None, None]]
)
time.sleep(7)
self.node1.stop()
self.node2.start(wait_for_binary_proto=True)
session2 = self.patient_exclusive_cql_connection(self.node2)
session2.execute("USE ks;")
assert_row_count(session2, 'ttl_table', 0) # should be 0 since node1 is down, no replica yet
self.node1.start(wait_for_binary_proto=True)
self.session1 = self.patient_exclusive_cql_connection(self.node1)
self.session1.execute("USE ks;")
self.node1.cleanup()
assert_all(session2, "SELECT count(*) FROM ttl_table", [[1]], cl=ConsistencyLevel.ALL)
assert_all(
session2,
"SELECT * FROM ttl_table;",
[[2, 2, None, None]],
cl=ConsistencyLevel.ALL
)
# Check that the TTL on both server are the same
ttl_1 = self.session1.execute('SELECT ttl(col1) FROM ttl_table;')[0][0]
ttl_2 = session2.execute('SELECT ttl(col1) FROM ttl_table;')[0][0]
logger.debug("ttl_1 is {}:".format(ttl_1))
logger.debug("ttl_2 is {}:".format(ttl_2))
assert abs(ttl_1 - ttl_2) <= 1
def test_ttl_is_respected_on_repair(self):
""" Test that ttl is respected on repair """
self.prepare()
self.session1.execute("""
ALTER KEYSPACE ks WITH REPLICATION =
{'class' : 'SimpleStrategy', 'replication_factor' : 1};
""")
self.session1.execute("""
INSERT INTO ttl_table (key, col1) VALUES (1, 1) USING TTL 5;
""")
self.session1.execute("""
INSERT INTO ttl_table (key, col1) VALUES (2, 2) USING TTL 1000;
""")
assert_all(
self.session1,
"SELECT * FROM ttl_table;",
[[1, 1, None, None], [2, 2, None, None]]
)
time.sleep(7)
self.node1.stop()
session2 = self.patient_exclusive_cql_connection(self.node2)
session2.execute("USE ks;")
assert_unavailable(session2.execute, "SELECT * FROM ttl_table;")
self.node1.start(wait_for_binary_proto=True)
self.session1 = self.patient_exclusive_cql_connection(self.node1)
self.session1.execute("USE ks;")
self.session1.execute("""
ALTER KEYSPACE ks WITH REPLICATION =
{'class' : 'SimpleStrategy', 'replication_factor' : 2};
""")
self.node1.repair(['ks'])
ttl_start = time.time()
ttl_session1 = self.session1.execute('SELECT ttl(col1) FROM ttl_table;')
self.node1.stop()
assert_row_count(session2, 'ttl_table', 1)
assert_all(
session2,
"SELECT * FROM ttl_table;",
[[2, 2, None, None]]
)
# Check that the TTL on both server are the same
ttl_session2 = session2.execute('SELECT ttl(col1) FROM ttl_table;')
ttl_session1 = ttl_session1[0][0] - (time.time() - ttl_start)
assert_almost_equal(ttl_session1, ttl_session2[0][0], error=0.005)
class TestRecoverNegativeExpirationDate(TestHelper):
@since('2.1')
def test_recover_negative_expiration_date_sstables_with_scrub(self):
"""
@jira_ticket CASSANDRA-14092
Check that row with negative overflowed ttl is recovered by offline scrub
"""
cluster = self.cluster
cluster.populate(1).start(wait_for_binary_proto=True)
[node] = cluster.nodelist()
session = self.patient_cql_connection(node)
create_ks(session, 'ks', 1)
session.execute("DROP TABLE IF EXISTS ttl_table;")
query = """
CREATE TABLE ttl_table (
key int primary key,
col1 int,
col2 int,
col3 int,
)
"""
session.execute(query)
version = '2.1' if self.cluster.version() < LooseVersion('3.0') else \
('3.0' if self.cluster.version() < LooseVersion('3.11') else '3.11')
corrupt_sstable_dir = os.path.join('sstables', 'ttl_test', version)
table_dir = self.get_table_paths('ttl_table')[0]
logger.debug("Copying sstables from {} into {}", corrupt_sstable_dir, table_dir)
dir_util.copy_tree(corrupt_sstable_dir, table_dir)
logger.debug("Load corrupted sstable")
node.nodetool('refresh ks ttl_table')
node.watch_log_for('Loading new SSTables', timeout=10)
logger.debug("Check that there are no rows present")
assert_row_count(session, 'ttl_table', 0)
logger.debug("Shutting down node")
self.cluster.stop()
logger.debug("Will run offline scrub on sstable")
scrubbed_sstables = self.launch_standalone_scrub('ks', 'ttl_table',
reinsert_overflowed_ttl=True,
no_validate=True)
logger.debug("Executed offline scrub on {}", str(scrubbed_sstables))
logger.debug("Starting node again")
self.cluster.start(wait_for_binary_proto=True)
session = self.patient_cql_connection(node)
session.execute("USE ks;")
logger.debug("Check that row was recovered")
assert_all(session, "SELECT * FROM ttl_table;", [[1, 1, None, None]])
| |
from abc import ABCMeta, abstractmethod
from __main__ import settings, botdata, httpgetter
from .helpers import *
from gtts import gTTS
import urllib.request
import disnake
import re
import os
import random
import html
import requests
import functools
from concurrent.futures import ThreadPoolExecutor
import logging
logger = logging.getLogger("mangologger")
def tts_save(filename, text, lang):
# run_command(["pico2wave", "--wave", filename, "-l", "en-GB", text])
loop_count = 10
if "-" in lang:
lang = "en"
while loop_count > 0:
loop_count -= 1
try:
tts = gTTS(text=text, lang=lang, lang_check=False)
tts.save(filename)
except ValueError as e:
if loop_count > 0 and e.args and e.args[0] == "Unable to find token seed! Did https://translate.google.com change?":
logger.error(f"Got bad seed exception. Looping {loop_count} more times")
continue # loop, as reccomended here: https://github.com/pndurette/gTTS/issues/176#issuecomment-723393140
else:
raise
except AttributeError:
raise UserError("Whoops. Looks like gtts is broken right now.")
except (RecursionError, requests.exceptions.HTTPError):
raise UserError("There was a problem converting that via gtts")
except AssertionError as e:
if e.args and e.args[0] == "No text to send to TTS API":
raise UserError("I can't convert that to TTS. Looks like there's not much there.")
else:
raise
return # if we succeed, return
class ClipNotFound(UserError):
def __init__(self, cliptype, clipname):
super().__init__("There ain't a {} clip with the name '{}'".format(cliptype, clipname))
class MissingClipType(UserError):
def __init__(self, clipid):
super().__init__("Yer clipid '{}' is missin a proper cliptype".format(clipid))
# For this class and all its subclasses, we need to have
# init be asynchronous. because of this, we are simply making
# it a method that should always be called after initializing
# the object. therefore, to initialize a clip, do something
# like this:
# await Clip().init(<stuffhere>)
# instead of:
# Clip(<stuffhere>)
class Clip(object):
async def init(self, clipname, audiopath, text="", volume=0.6):
self.name = clipname
self.audiopath = audiopath
self.text = text
self.volume = volume
return self
@classmethod
@abstractmethod
def type(cls):
pass
@classmethod
def types_dict(cls):
return { cliptype.type(): cliptype for cliptype in cls.__subclasses__() }
@property
def clipid(self):
return "{}:{}".format(self.type(), self.name)
@property
def audiolength(self):
return round(float(run_command(["ffprobe", "-i", self.audiopath, "-show_entries", "format=duration", "-v", "quiet", "-of", "csv=p=0"])), 2)
async def get_info_embed(self):
embed = disnake.Embed()
embed.description = self.text if self.text is not None else ""
self.add_info_embed_parts(embed)
return embed
def add_info_embed_parts(self, embed):
"""Adds some things to a clips `info` embed"""
embed.set_author(name=self.clipid)
embed.add_field(name="Length", value=f"{self.audiolength} seconds")
class LocalClip(Clip):
async def init(self, clipname, bot, ctx):
audio = bot.get_cog("Audio")
clipinfos = audio.local_clipinfo
if not clipname in clipinfos:
raise ClipNotFound(self.type, clipname)
info = clipinfos[clipname]
self.author = info.get("author")
self.source = info.get("source")
self.tags = info.get("tags")
if self.tags:
self.tags = self.tags.split("|")
text = info.get("text", "")
clipfile = settings.resource("clips/" + info.get("path"))
return await Clip.init(self, clipname, clipfile, text=text)
@classmethod
def type(cls):
return "local"
async def get_info_embed(self):
result = ""
if self.text != "":
result += f"\"{self.text}\""
if self.author:
if self.text != "":
result += f" - {self.author}"
else:
result += f"By {self.author}"
embed = disnake.Embed()
embed.description = result
if self.source:
embed.add_field(name="Source", value=self.source)
if self.tags:
embed.add_field(name="Tags", value=", ".join(self.tags))
self.add_info_embed_parts(embed)
return embed
class TtsClip(Clip):
async def init(self, text, bot, ctx):
data = botdata.guildinfo(ctx)
ttslang = "en-au" if not data else data.ttslang
uri = f"clip_tts_{ttslang}:{text}"
filename = httpgetter.cache.get_filename(uri)
if not filename:
filename = await httpgetter.cache.new(uri, "wav")
try:
await bot.loop.run_in_executor(ThreadPoolExecutor(), functools.partial(tts_save, filename, text, ttslang))
except:
await httpgetter.cache.remove(uri)
raise
return await Clip.init(self, text, filename, text)
@classmethod
def type(cls):
return "tts"
class UrlClip(Clip):
async def init(self, url, bot, ctx):
if not re.match(f'^https?://.*\.({audio_extensions})$', url):
raise UserError("That's not a valid audio url")
filename = await httpgetter.get(url, "filename", cache=True)
return await Clip.init(self, url, filename)
@classmethod
def type(cls):
return "url"
voice_actor_links = read_json(settings.resource("json/voice_actor_links.json"))
class DotaClip(Clip):
async def init(self, responsename, bot, ctx):
dotabase = bot.get_cog("Dotabase")
self.response = dotabase.get_response(responsename)
if self.response == None:
raise ClipNotFound(self.type(), responsename)
self.voice_thumbnail = None
if self.response.voice.image:
self.voice_thumbnail = dotabase.vpkurl + self.response.voice.image
filename = await httpgetter.get(dotabase.vpkurl + self.response.mp3, "filename", cache=True)
return await Clip.init(self, responsename, filename, text=self.response.text, volume=0.4)
@classmethod
def type(cls):
return "dota"
async def get_info_embed(self):
embed = disnake.Embed()
embed.description = f"\"{self.response.text}\" - {self.response.voice.name}"
if self.response.criteria != "":
embed.add_field(name="Criteria", value=self.response.pretty_criteria.replace('|', '\n'))
if self.response.voice.voice_actor:
actor_name = self.response.voice.voice_actor
if actor_name in voice_actor_links:
actor_name = f"[{actor_name}]({voice_actor_links[actor_name]})"
embed.add_field(name="Voice Actor", value=actor_name)
if self.voice_thumbnail:
embed.set_thumbnail(url=self.voice_thumbnail)
self.add_info_embed_parts(embed)
return embed
class DotaChatWheel(Clip):
async def init(self, chatwheel_id, bot, ctx):
dotabase = bot.get_cog("Dotabase")
self.message = dotabase.get_chatwheel_sound(chatwheel_id)
if self.message == None:
raise ClipNotFound(self.type(), chatwheel_id)
filename = await httpgetter.get(dotabase.vpkurl + self.message.sound, "filename", cache=True)
return await Clip.init(self, chatwheel_id, filename, text=self.message.message, volume=0.4)
@classmethod
def type(cls):
return "dotachatwheel"
async def get_info_embed(self):
embed = disnake.Embed()
embed.description = self.message.message
if self.message.label != "":
embed.add_field(name="Label", value=self.message.label)
if self.message.category:
embed.add_field(name="Category", value=self.message.category)
allchat_value = "Yes" if self.message.all_chat else "No"
embed.add_field(name="All-Chat", value=allchat_value)
self.add_info_embed_parts(embed)
return embed
gtts_langs = read_json(settings.resource("json/gtts_languages.json"))
class GttsLang():
def __init__(self, language):
language = language.lower()
self.lang = None
for lang in gtts_langs:
if lang.lower() == language or gtts_langs[lang].lower() == language:
self.lang = lang
if self.lang is None:
raise ValueError(f"'{language}' is not a valid gtts lang")
@property
def pretty(self):
return gtts_langs[self.lang]
def __repr__(self):
return self.lang
@classmethod
def get(cls, language):
try:
return GttsLang(language)
except ValueError:
return None
| |
#Meeprommer commandline interface
#By Zack Nelson
#Project Home:
#https://github.com/mkeller0815/MEEPROMMER
#http://www.ichbinzustaendig.de/dev/meeprommer-en
#Adapted to work with EEPROMDate programmer
#By Svetlana Tovarisch
import serial, sys, argparse, time
# Parse command line arguments
parser = argparse.ArgumentParser(
description='Meepromer Command Line Interface',
epilog='Read source for further information')
task = parser.add_mutually_exclusive_group()
task.add_argument('-w', '--write', dest="cmd", action="store_const",
const="write", help='Write to EEPROM')
task.add_argument('-W', '--write_paged', dest="cmd", action="store_const",
const="write_paged", help='Fast paged write to EEPROM')
task.add_argument('-r', '--read', dest="cmd", action="store_const",
const="read", help='Read from EEPROM as ascii')
task.add_argument('-d', '--dump', dest="cmd", action="store_const",
const="dump", help='Dump EEPROM to binary file')
task.add_argument('-v', '--verify', dest="cmd", action="store_const",
const="verify", help='Compare EEPROM with file')
task.add_argument('-V', '--version', dest="cmd", action="store_const",
const="version", help='Check burner version')
task.add_argument('-u', '--unlock', dest="cmd", action="store_const",
const="unlock", help='Unlock EEPROM')
task.add_argument('-l', '--list', dest="cmd", action="store_const",
const="list", help='List serial ports')
task.add_argument('-D', '--debug', dest="cmd", action="store_const",
const="debug", help='run debug code')
parser.add_argument('-a', '--address', action='store', default='0',
help='Starting eeprom address (as hex), default 0')
parser.add_argument('-o', '--offset', action='store', default='0',
help='Input file offset (as hex), default 0')
parser.add_argument('-b', '--bytes', action='store', default='512',
type=long, help='Number of kBytes to r/w, default 8')
parser.add_argument('-p', '--page_size', action='store', default='256',
type=long, help='Number of bytes per EEPROM page e.g.:'+
'CAT28C*=32, AT28C*=64, X28C*=64, default 32')
parser.add_argument('-f', '--file', action='store',
help='Name of data file')
parser.add_argument('-c', '--com', action='store',
default='COM3', help='Com port address')
parser.add_argument('-s', '--speed', action='store',
type=int, default='460800', help='Com port baud, default 460800')
def list_ports():
from serial.tools import list_ports
for x in list_ports.comports():
print(x[0], x[1])
def dump_file():
ser.flushInput()
ser.write(bytes("B "+format(args.address,'08x')+" "+
format(args.bytes*1024,'08x')+" 10\n").encode('ascii'))
print(bytes("B "+format(args.address,'08x')+" "+
format(args.bytes*1024,'08x')+" 10\n").encode('ascii'))
eeprom = ser.read(args.bytes*1024)
if(ser.read(1) != b'\0'):
print("Error: no Ack")
#sys.exit(1)
try:
fo = open(args.file,'wb+')
except OSError:
print("Error: File cannot be opened, verify it is not in use")
sys.exit(1)
fo.write(eeprom)
fo.close()
def verify():
print("Verifying...")
ser.flushInput()
ser.write(bytes("B "+format(args.address,'08x')+" "+
format(args.bytes*1024,'08x')+" 10\n").encode('ascii'))
try:
fi = open(args.file,'rb')
except FileNotFoundError:
print("Error: ",args.file," not found, please select a valid file")
sys.exit(1)
except TypeError:
print("Error: No file specified")
sys.exit(1)
fi.seek(args.offset)
file = fi.read(args.bytes*1024)
eeprom = ser.read(args.bytes*1024)
if ser.read(1) != b'\0':
print("Error: no EOF received")
if file != eeprom:
print("Not equal")
n = 0
for i in range(args.bytes*1024):
if file[i] != eeprom[i]:
n+=1
print(n,"differences found")
sys.exit(1)
else:
print("Ok")
sys.exit(0)
if(ser.read(1) != b'\0'):
print("Error: no Ack")
sys.exit(1)
def read_eeprom():
ser.flushInput()
ser.write(bytes("R "+format(args.address,'08x')+" "+
format(args.address+args.bytes*1024,'08x')+
" 10\n").encode('ascii'))
ser.readline()#remove blank starting line
for i in range(int(round(args.bytes*1024/16))):
print(ser.readline().decode('ascii').rstrip())
def write_eeprom(paged):
import time
fi = open(args.file,'rb')
fi.seek(args.offset)
now = time.time() #start our stopwatch
for i in range(args.bytes*4): #write n blocks of 256 bytes
#if(i % 128 == 0):
# print("Block separation")
# time.sleep(1)
output = fi.read(256)
print("Writing from",format(args.address+i*256,'08x'),
"to",format(args.address+i*256+255,'08x'))
if paged:
ser.write(bytes("W "+format(args.address+i*256,'08x')+
" 00000100 "+format(args.page_size,'02x')+"\n").encode('ascii'))
else:
ser.write(bytes("W "+format(args.address+i*256,'08x')+
" 00000100 00\n").encode('ascii'))
ser.flushInput()
ser.write(output)
#time.sleep(0.08)
#if(ser.read(1) != b'%'):
# print("Error: no Ack")
# sys.exit(1)
while(ser.read(1) != b'%'):
time.sleep(0.01)
print("Wrote",args.bytes*1024,"bytes in","%.2f"%(time.time()-now),"seconds")
def unlock():
print("Unlocking...")
ser.flushInput()
ser.write(bytes("U 00000000 00000000 00\n").encode('ascii'))
if ser.read(1) != b'%':
print("Error: no ack")
sys.exit(1)
def version():
print("Burner version:")
ser.flushInput()
ser.write(bytes("V 00000000 00000000 00\n").encode('ascii'))
if ser.read(1) != b'E':
print("Error: no ack")
sys.exit(1)
print(ser.read(5))
args = parser.parse_args()
#convert our hex strings to ints
args.address = long(args.address,16)
args.offset = long(args.offset,16)
SERIAL_TIMEOUT = 1200 #seconds
try:
ser = serial.Serial(args.com, args.speed, timeout=SERIAL_TIMEOUT)
time.sleep(2)
except serial.serialutil.SerialException:
print("Error: Serial port is not valid, please select a valid port")
sys.exit(1)
if args.cmd == 'write':
write_eeprom(False)
elif args.cmd == 'write_paged':
write_eeprom(True)
#verify()
elif args.cmd == 'read':
read_eeprom()
elif args.cmd == 'dump':
dump_file()
elif args.cmd == 'verify':
verify()
elif args.cmd == 'unlock':
unlock();
elif args.cmd == 'list':
list_ports()
elif args.cmd == 'version':
version()
elif args.cmd == 'debug':
"""args.bytes = 32
args.file = 'pb.bin4'
args.address = 98304
write_eeprom(True)
verify()
args.file = 'pb.bin3'
args.address = 65536
write_eeprom(True)
verify()
args.file = 'pb.bin2'
args.address = 32768
write_eeprom(True)
verify()
args.file = 'pb.bin1'
args.address = 0
write_eeprom(True)
verify()
args.bytes = 128
args.file = 'pb.bin'
verify()"""
args.bytes = 32
args.file = 'chip.bin4'
args.address = 98304
dump_file()
args.file = 'chip.bin3'
args.address = 65536
dump_file()
args.file = 'chip.bin2'
args.address = 32768
dump_file()
args.file = 'chip.bin1'
args.address = 0
dump_file()
args.bytes = 128
args.file = 'chip.bin'
dump_file()
ser.close()
sys.exit(0)
| |
# -*- coding: utf-8 -*-
import os
import os.path
import datetime
import json
import sqlite3
# Uses python-requests
# http://docs.python-requests.org/en/latest/
import requests
import requests.exceptions
# Look at the X-Bin-Request-Count header and X-Bin-Max-Requests header
# for how many requests you've made, and how many you can make pr. hour.
# You can do any amount of requests pr. second that you want.
# All IDs used with the API are CCP IDs (Except killmail IDs, which can be
# internally set, but they are denoted with a - infront (negative numbers))
# If you get an error 403, look at the Retry-After header.
# The API will maximum deliver of 200 killmails.
# Up to 10 IDs can be fetched at the same time, by seperating them with a , (Comma)
# All modifiers can be combined in any order
# Examples of options both for ZKB class and cache classes:
zkb_cache_options_file = {
'debug': True,
'cache_time': 1200,
'cache_type': 'file',
'cache_dir': './_caches/zkb',
'use_evekill': True
}
zkb_cache_options_sqlite = {
'debug': True,
'cache_time': 1200,
'cache_type': 'sqlite',
'cache_file': './_caches/zkb/zkb_cache.db',
'use_evekill': True
}
class ZKBCacheBase:
def __init__(self, options: dict=None):
self._cache_time = 600 # seconds
self._debug = False
if options:
if 'cache_time' in options:
self._cache_time = int(options['cache_time'])
if 'debug' in options:
self._debug = options['debug']
def get_json(self, request_str: str):
return None
def save_json(self, request_str: str, reply_str: str):
return None
class ZKBCacheFile(ZKBCacheBase):
def __init__(self, options: dict=None):
super(ZKBCacheFile, self).__init__(options)
self._cache_dir = None
if options:
if 'cache_dir' in options:
cache_dir = options['cache_dir']
# create dir if it does not exist
if not os.access(cache_dir, os.R_OK):
os.makedirs(cache_dir, exist_ok=True)
else:
if not os.path.isdir(cache_dir):
# already exists and is not a directory
raise IOError('ZKBCacheFile: Already exists and is NOT a directory: ' + cache_dir)
self._cache_dir = cache_dir
def get_json(self, request_str: str):
ret = ''
if request_str is None:
return ret
if self._cache_dir is None:
return ret
cache_file = self._cache_dir + '/' + request_str + '.json'
# first try to get from cache
if os.path.isfile(cache_file) and os.access(cache_file, os.R_OK):
# check if cache file is too old for now
# get file modification time
st = os.stat(cache_file)
dt_cache = datetime.datetime.fromtimestamp(st.st_mtime)
# get current time
dt_now = datetime.datetime.now()
# compare deltas
delta = dt_now - dt_cache
delta_secs = delta.total_seconds()
if delta_secs < self._cache_time:
if self._debug:
print('ZKBCacheFile: Loading from cache: [{0}]'.format(cache_file))
try:
f = open(cache_file, 'rt')
ret = f.read()
f.close()
except IOError as e:
if self._debug:
print('ZKBCacheFile: failed to read cache data from: [{0}]'.format(cache_file))
print(str(e))
else:
if self._debug:
print('ZKBCacheFile: Cache file [{0}] skipped, too old: {1} secs. (limit was: {2})'.
format(cache_file, delta_secs, self._cache_time))
# Do not delete cache file, it will be just overwritten
# in case of successful request, or left to live otherwise
# this will allow to get at least any old data in the case of failure
# Or maybe delete it anyway?...
os.remove(cache_file)
return ret
def save_json(self, request_str: str, reply_str: str):
if request_str is None:
return
if reply_str is None:
return
if self._cache_dir is None:
return
# auto-create cache dir if not exists
if not os.path.isdir(self._cache_dir):
try:
os.makedirs(self._cache_dir)
except OSError:
pass
cache_file = self._cache_dir + '/' + request_str + '.json'
# store reply to cache file
try:
f = open(cache_file, 'wt') # probably may overwrite old cached file for this request
f.write(reply_str)
f.close()
except IOError as e:
if self._debug:
print("ZKBCacheFile: Can't store reply to cache file:")
print(str(e))
class ZKBCacheSqlite(ZKBCacheBase):
def __init__(self, options: dict=None):
super(ZKBCacheSqlite, self).__init__(options)
self._cache_file = None
self._db = None
if options:
if 'cache_file' in options:
self._cache_file = options['cache_file']
if (self._cache_file is not None) and (self._cache_file != ''):
self._db = sqlite3.connect(self._cache_file)
cur = self._db.cursor()
cur.execute('CREATE TABLE IF NOT EXISTS zkb_cache '
'(req text, resp text, save_time int)')
self._db.commit()
cur.close()
def get_json(self, request_str: str):
ret = ''
if not self._cache_file:
return ret
if not self._db:
return ret
tm_now = int(datetime.datetime.now().timestamp())
cur = self._db.cursor()
cur.execute('SELECT resp, save_time FROM zkb_cache WHERE req = ?', (request_str,))
row = cur.fetchone()
if row:
save_time = int(row[1])
time_passed = tm_now - save_time
if time_passed > self._cache_time:
cur.close()
# delete old record
cur = self._db.cursor()
cur.execute('DELETE FROM zkb_cache WHERE req = ? AND save_time = ?', (request_str, save_time))
self._db.commit()
else:
ret = row[0]
cur.close()
return ret
def save_json(self, request_str: str, reply_str: str):
if not self._cache_file:
return
if not self._db:
return
tm_now = int(datetime.datetime.now().timestamp())
cur = self._db.cursor()
cur.execute('INSERT INTO zkb_cache (req, resp, save_time) VALUES (?, ?, ?)',
(request_str, reply_str, tm_now))
self._db.commit()
cur.close()
return
class ZKB:
def __init__(self, options: dict=None):
self.HOURS = 3600
self.DAYS = 24 * self.HOURS
self._BASE_URL_ZKB = 'https://zkillboard.com/api/'
self._BASE_URL_EVEKILL = 'https://beta.eve-kill.net/api/combined/'
self._headers = dict()
self._headers['accept'] = 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'
self._headers['accept-language'] = 'ru-RU,ru;q=0.8,en-US;q=0.5,en;q=0.3'
self._headers['accept-encoding'] = 'gzip, deflate'
self._headers['user-agent'] = 'Python/ZKB agent, alexey.min@gmail.com'
self._url = ''
self._modifiers = ''
self._cache = None
self._debug = False
self._use_evekill = False
self.request_count = 0
self.max_requests = 0
self.kills_on_page = 0
self.clear_url()
# parse options
if options:
if 'debug' in options:
self._debug = options['debug']
if 'user_agent' in options:
self._headers['user-agent'] = options['user_agent']
if 'use_evekill' in options:
self._use_evekill = options['use_evekill']
self.clear_url()
if 'cache_type' in options:
cache_type = options['cache_type']
if cache_type == 'file':
self._cache = ZKBCacheFile(options)
elif cache_type == 'sqlite':
self._cache = ZKBCacheSqlite(options)
else:
raise IndexError('ZKB: Unknown cache_type in options: ' + cache_type)
if 'kills_on_page' in options:
self.kills_on_page = options['kills_on_page']
def clear_url(self):
self._url = self._BASE_URL_ZKB
if self._use_evekill:
self._url = self._BASE_URL_EVEKILL
self._modifiers = ''
def add_modifier(self, mname, mvalue=None):
self._url += mname
self._url += '/'
self._modifiers += mname
self._modifiers += '_'
if mvalue:
self._url += str(mvalue)
self._url += '/'
self._modifiers += str(mvalue)
self._modifiers += '_'
# startTime and endTime is datetime timestamps, in the format YmdHi..
# Example 2012-11-25 19:00 is written as 201211251900
def add_startTime(self, st=datetime.datetime.now()):
self.add_modifier('startTime', st.strftime('%Y%m%d%H%M'))
# startTime and endTime is datetime timestamps, in the format YmdHi..
# Example 2012-11-25 19:00 is written as 201211251900
def add_endTime(self, et=datetime.datetime.now()):
self.add_modifier('endTime', et.strftime('%Y%m%d%H%M'))
# pastSeconds returns only kills that have happened in the past x seconds.
# pastSeconds can maximum go up to 7 days (604800 seconds)
def add_pastSeconds(self, s):
self.add_modifier('pastSeconds', s)
def add_year(self, y):
self.add_modifier('year', y)
def add_month(self, m):
self.add_modifier('month', m)
def add_week(self, w):
self.add_modifier('week', w)
# If the /limit/ modifier is used, then /page/ is unavailable.
def add_limit(self, limit):
self.add_modifier('limit', str(limit))
# Page reqs over 10 are only allowed for characterID, corporationID and allianceID
def add_page(self, page):
self.add_modifier('page', page)
def add_beforeKillID(self, killID):
self.add_modifier('beforeKillID', killID)
def add_afterKillID(self, killID):
self.add_modifier('afterKillID', killID)
# To get combined /kills/ and /losses/, don't pass either /kills/ or /losses/
def add_kills(self):
self.add_modifier('kills')
# To get combined /kills/ and /losses/, don't pass either /kills/ or /losses/
def add_losses(self):
self.add_modifier('losses')
# /w-space/ and /solo/ can be combined with /kills/ and /losses/
def add_wspace(self):
self.add_modifier('w-space')
# /w-space/ and /solo/ can be combined with /kills/ and /losses/
def add_solo(self):
self.add_modifier('solo')
# If you do not paass /killID/ then you must pass at least two
# of the following modifiers. /w-space/, /solo/ or any of the /xID/ ones.
# (characterID, allianceID, factionID etc.)
def add_killID(self, killID):
self.add_modifier('killID', killID)
def add_orderAsc(self):
self.add_modifier('orderDirection', 'asc')
def add_orderDesc(self):
self.add_modifier('orderDirection', 'desc')
def add_noItems(self):
self.add_modifier('no-items')
def add_noAttackers(self):
self.add_modifier('no-attackers')
def add_character(self, charID):
self.add_modifier('characterID', charID)
def add_corporation(self, corpID):
self.add_modifier('corporationID', corpID)
def add_alliance(self, allyID):
self.add_modifier('allianceID', allyID)
def add_faction(self, factionID):
self.add_modifier('factionID', factionID)
def add_shipType(self, shipTypeID):
self.add_modifier('shipTypeID', shipTypeID)
def add_group(self, groupID):
self.add_modifier('groupID', groupID)
def add_solarSystem(self, solarSystemID):
self.add_modifier('solarSystemID', solarSystemID)
# Default cache lifetime set to 1 hour (3600 seconds)
def go(self):
zkb_kills = []
ret = ''
# first, try to get from cache
if self._cache:
ret = self._cache.get_json(self._modifiers)
if (ret is None) or (ret == ''):
# either no cache exists or cache read error :( send request
try:
if self._debug:
print('ZKB: Sending request! {0}'.format(self._url))
r = requests.get(self._url, headers=self._headers)
if r.status_code == 200:
ret = r.text
if 'x-bin-request-count' in r.headers:
self.request_count = int(r.headers['x-bin-request-count'])
if 'x-bin-max-requests' in r.headers:
self.max_requests = int(r.headers['x-bin-max-requests'])
if self._debug:
print('ZKB: We are making {0} requests of {1} allowed per hour.'.
format(self.request_count, self.max_requests))
elif r.status_code == 403:
# If you get an error 403, look at the Retry-After header.
retry_after = r.headers['retry-after']
if self._debug:
print('ZKB: ERROR: we got 403, retry-after: {0}'.format(retry_after))
else:
if self._debug:
print('ZKB: ERROR: HTTP response code: {0}'.format(r.status_code))
except requests.exceptions.RequestException as e:
if self._debug:
print(str(e))
# request done, see if we have a response
if ret != '':
if self._cache:
self._cache.save_json(self._modifiers, ret)
# parse response JSON, if we have one
if (ret is not None) and (ret != ''):
zkb_kills = []
try:
zkb_kills = json.loads(ret)
except ValueError:
# skip JSON parse errors
pass
utcnow = datetime.datetime.utcnow()
try:
if self.kills_on_page > 0:
# manually limit number of kills to process
zkb_kills = zkb_kills[0:self.kills_on_page]
for a_kill in zkb_kills:
# a_kill should be a dict object.
# Sometimes ZKB can return 'error' key as string, we can parse only dicts
if type(a_kill) != dict:
continue
# kill price in ISK, killmail hash
a_kill['killmail_hash'] = ''
a_kill['total_value'] = 0
a_kill['total_value_m'] = 0
a_kill['is_npc'] = False
a_kill['is_solo'] = False
if 'zkb' in a_kill:
if 'totalValue' in a_kill['zkb']:
a_kill['total_value'] = float(a_kill['zkb']['totalValue'])
a_kill['total_value_m'] = round(float(a_kill['zkb']['totalValue']) / 1000000.0)
if 'hash' in a_kill['zkb']:
a_kill['killmail_hash'] = a_kill['zkb']['hash']
if 'npc' in a_kill['zkb']:
a_kill['is_npc'] = a_kill['zkb']['npc']
if 'solo' in a_kill['zkb']:
a_kill['is_solo'] = a_kill['zkb']['solo']
del a_kill['zkb']
except KeyError as k_e:
if self._debug:
print('It is possible that ZKB API has changed (again).')
print(str(k_e))
return zkb_kills
# #################################
# Unimplemented / Unused:
# /api-only/
# /xml/
# https://zkillboard.com/system/31000707/
def pretty_print_kill(kill):
for k in kill.keys():
print('kill[{0}] -> {1}'.format(str(k), str(kill[k])))
# kill[killmail_id] -> 72725284
# kill[zkb] -> {
# 'locationID': 40387568,
# 'hash': '56a83bf9445ad4ed88426b19e600e801e6ab57f4',
# 'fittedValue': 1320489.39,
# 'totalValue': 48235664.21,
# 'points': 1,
# 'npc': False,
# 'solo': True,
# 'awox': False
# }
if __name__ == '__main__':
zkb_options_file = {
'debug': True,
'cache_time': 1200,
'cache_type': 'file',
'cache_dir': './_caches/zkb',
'use_evekill': False
}
zkb_options_sqlite = {
'debug': True,
'cache_time': 1200,
'cache_type': 'sqlite',
'cache_file': './_caches/zkb/zkb_cache.db',
'use_evekill': False
}
z = ZKB(zkb_options_file)
# z = ZKB(zkb_options_sqlite)
z.add_solarSystem('31000707')
# z.add_limit(1) # no more limits
zkb_kills = z.go()
if len(zkb_kills) > 0:
i = 0
for a_kill in zkb_kills:
if i == 0:
pretty_print_kill(a_kill)
i += 1
| |
from flask import (
Flask,
flash,
g,
session,
redirect,
render_template,
request,
)
from flask.ext.mongoengine import MongoEngine
import datetime
import os
import random
app = Flask(__name__)
app.config["MONGODB_SETTINGS"] = {'DB': 'furrypoll_2016'}
app.config["SECRET_KEY"] = os.urandom(12)
app.config["DEBUG"] = False
app.config['SURVEY_ACTIVE'] = False
db = MongoEngine(app)
# Older python compat - db has to be defined first.
import models
import questions
@app.before_request
def before_request():
"""Pre-request checks
If the survey is not active, do not allow any paths except /
"""
if 'static' in request.path:
return
if not app.config['SURVEY_ACTIVE'] and request.path != u'/':
flash('The survey is not currently active.')
return redirect('/')
@app.route('/')
def front():
"""Render the front page"""
return render_template('front.html', survey_active=app.config['SURVEY_ACTIVE'])
@app.route('/survey/start/', methods=['GET', 'POST'])
def surveyStart():
"""Begin a survey
This view creates a response object if none exists and provides the user
with some additional information about the survey. Additionally, bot
checks are made with a honeypot and a simple math question.
"""
# If it's a POST request, we need to check for bots.
if request.method == 'POST':
result = -100
try:
result = int(request.form.get('result', '-100'))
except ValueError:
pass
if (result == session.get('add_a', 0) + session.get('add_b', 0)) \
and request.form.get('hp_field', '') == '':
return redirect('/survey/overview/')
else:
flash('''Please ensure that you have answered the simple question
below to start the survey!''')
# Create a new response object if none exists.
if session.get('response_id') is not None:
survey = models.Response.objects.get(id=session['response_id'])
else:
survey = models.Response()
survey.metadata = models.ResponseMetadata(
client_ip=request.remote_addr,
client_ua=str(request.user_agent)
)
start_tp = models.Touchpoint(
touchpoint_type=0
)
survey.metadata.touchpoints.append(start_tp)
survey.save()
session['response_id'] = str(survey.id)
if len(models.Response.objects.filter(metadata__client_ip=request.remote_addr)) > 1:
flash('''It appears that someone has already completed the furry
survey from this computer or IP address. If this is a public
computer, a household with multiple people sharing one IP address,
or you believe that you have not completed this survey, please feel
free to continue; otherwise, please <a href="/survey/cancel">cancel
the survey</a> if you have already completed it.''')
survey.metadata.touchpoints.append(models.Touchpoint(touchpoint_type=-6))
# Prepare bot checks.
session['add_a'] = add_a = random.randint(1, 10)
session['add_b'] = add_b = random.randint(1, 10)
return render_template('start.html',
survey_id=str(survey.id),
add_a=add_a,
add_b=add_b)
@app.route('/touch/question/<int:question_id>')
def surveyQuestion(question_id):
"""Mark a question answered
This AJAX view marks a question as answered with a touchpoint, which is
used to judge how quickly the respondent answered each question; spam
responses generally take place far too quickly.
"""
if session.get('response_id') is None:
return '{"error":"No session id"}'
tp = models.Touchpoint(
touchpoint_type=question_id
)
survey = models.Response.objects.get(id=session['response_id'])
survey.metadata.touchpoints.append(tp)
survey.save()
return '{"error":null}'
@app.route('/survey/overview/', methods=['GET', 'POST'])
def surveyOverview():
"""The Overview section of the survey"""
# Check if we have a response.
if session.get('response_id') is None:
return redirect('/')
survey = models.Response.objects.get(id=session['response_id'])
if request.method == 'POST':
# Add a page touchpoint.
tp = models.Touchpoint(
touchpoint_type=-1
)
survey.metadata.touchpoints.append(tp)
# Cancel if requested.
if request.form.get('cancel') is not None:
survey.save()
return redirect('/survey/cancel/')
# Save answers if provided.
survey.overview = models.Overview()
_save_answers(request.form, 'overview', survey)
survey.save()
# Complete the survey if requested.
if request.form.get('complete', None) is not None:
return redirect('/survey/complete')
# Otherwise, continue on to the next page.
return redirect('/survey/psychographic/')
else:
# Check if we've already completed the Overview.
if -1 in [tp.touchpoint_type for tp in survey.metadata.touchpoints]:
flash("Looks like you've already completed the overview...")
return redirect('/survey/psychographic/')
return render_template('overview.html', questions=questions)
@app.route('/survey/psychographic/', methods=['GET', 'POST'])
def surveyPsychographic():
"""The Psychographic Battery section of the survey"""
# Check if we have a response.
if session.get('response_id') is None:
return redirect('/')
survey = models.Response.objects.get(id=session['response_id'])
if request.method == 'POST':
# Add a page touchpoint.
tp = models.Touchpoint(
touchpoint_type=-2
)
survey.metadata.touchpoints.append(tp)
# Cancel if requested.
if request.form.get('cancel', None) is not None:
survey.save()
return redirect('/survey/cancel/')
# Save answers if provided.
survey.psychographic_battery = models.PsychographicBattery()
_save_answers(request.form, 'psychographic_battery', survey)
survey.save()
# Complete the survey if requested.
if request.form.get('complete', None) is not None:
return redirect('/survey/complete')
# Otherwise, continue on to the next page.
return redirect('/survey/sexuality/')
else:
# Check if we've already completed the battery.
if -2 in [tp.touchpoint_type for tp in survey.metadata.touchpoints]:
flash("Looks like you've already completed the psychographic battery...")
return redirect('/survey/sexuality/')
return render_template('psychographic.html')
@app.route('/survey/sexuality/', methods=['GET', 'POST'])
def surveySexuality():
"""The Sexuality section of the survey"""
# Check if we have a response.
if session.get('response_id') is None:
return redirect('/')
survey = models.Response.objects.get(id=session['response_id'])
if request.method == 'POST':
# Add a page touchpoint.
tp = models.Touchpoint(
touchpoint_type=-3
)
survey.metadata.touchpoints.append(tp)
# Cancel if requested.
if request.form.get('cancel', None) is not None:
survey.save()
return redirect('/survey/cancel/')
# Save answers if provided.
survey.sexuality = models.Sexuality()
_save_answers(request.form, 'sexuality', survey)
survey.save()
# Complete the survey.
return redirect('/survey/complete/')
else:
# Check if we've already completed the sexuality section.
if -3 in [tp.touchpoint_type for tp in survey.metadata.touchpoints]:
flash("Looks like you've already completed the sexuality section...")
return redirect('/survey/complete/')
return render_template('sexuality.html')
@app.route('/survey/complete/', methods=['GET', 'POST'])
def surveyComplete():
"""Mark a survey as complete"""
if session.get('response_id') is not None:
survey = models.Response.objects.get(id=session['response_id'])
if -4 not in [tp.touchpoint_type for tp in survey.metadata.touchpoints]:
# Add a complete touchpoint.
flash("Survey complete! Thank you!")
tp = models.Touchpoint(
touchpoint_type=-4
)
survey.metadata.touchpoints.append(tp)
survey.save()
else:
flash("Survey is already marked as complete!")
session['response_id'] = None
return render_template('complete.html')
@app.route('/survey/cancel/', methods=['GET', 'POST'])
def surveyCancel():
"""Mark a survey as canceled"""
if session.get('response_id') is None:
return redirect('/')
survey = models.Response.objects.get(id=session['response_id'])
# Add a cancel touchpoint.
tp = models.Touchpoint(
touchpoint_type=-5
)
survey.metadata.touchpoints.append(tp)
survey.save()
flash("Survey canceled. Thank you for your time!")
session['response_id'] = None
return render_template('cancel.html')
def _save_answers(form, section, survey):
"""Save question answers.
Given a form, the section which is to be saved, and a survey response
object, save the questions from the form in the survey response.
"""
for key in form.keys():
value = form.get(key, '')
# Skip blank answers.
if value == '':
continue
# Skip values from button clicks
if key in ['submit', 'complete', 'cancel']:
continue
# Skip other text fields, which will be fetched manually.
if key.endswith('_other'):
continue
if key in questions.question_options:
# If it's in a list of question options, save a PSR.
psr_lists = ['race', 'occupation']
if key in psr_lists:
values = form.getlist(key)
for value in values:
survey.__getattribute__(section).__getattribute__(key).append(
_psr_from_value(form, key, value))
else:
survey.__getattribute__(section).__setattr__(
key,
_psr_from_value(form, key, value))
else:
# If it has an indicator type, utilize the proper save method.
indicator = key[:3]
indicated_types = {
'gic': _save_gender_identity_coordinates,
'npo': _save_number_per_option,
'spo': _save_string_per_option,
'lpo': _save_list_per_option,
'lst': _save_list_item,
'chr': _save_character,
'psr': _save_raw_psr,
}
if indicator in indicated_types:
indicated_types[indicator](form, key, value, section, survey)
else:
# Otherwise, save strings, floats, booleans.
try:
value = float(value)
except:
pass
if value == 'on':
value = True
survey.__getattribute__(section).__setattr__(key, value)
def _psr_from_value(form, key, value):
"""Save PotentiallySubjectiveResponse from a given value."""
value_to_save = value
if value == 'other':
value_to_save = form.get('{}_other'.format(key), 'other (not specified)')
return models.PotentiallySubjectiveResponse(
value=value_to_save[:2000],
subjective=questions.question_options[key][value]['subjective'] \
if value in questions.question_options[key] else False
)
def _save_gender_identity_coordinates(form, key, value, section, survey):
"""Save gender widget coordinates."""
name = key[4:7]
gic = models.GenderIdentityCoordinates(
male=form.get('gic_{}_male'.format(name), ''),
female=form.get('gic_{}_female'.format(name), ''),
male_quantized=form.get('gic_{}_male_quantized'.format(name), ''),
female_quantized=form.get('gic_{}_female_quantized'.format(name), ''),
)
question_name = {
'exp': 'gender_expression_coords',
'gid': 'gender_identity_coords',
'gif': 'gender_in_furry_coords',
}[name]
survey.__getattribute__(section).__setattr__(question_name, gic)
def _save_number_per_option(form, key, value, section, survey):
"""Save number-per-option questions."""
name = key[4:7]
npo = models.NumberPerOption(
option=key[8:],
value=value
)
question_name = {
'pol': 'political_views',
'fac': 'furry_activities',
'fao': 'furry_activities_opinion',
'nfa': 'non_furry_activities',
'imp': 'furry_importance',
'bat': 'battery',
'sim': 'sex_importance',
'dvs': 'dom_or_sub',
}[name]
survey.__getattribute__(section).__getattribute__(question_name).append(npo)
def _save_string_per_option(form, key, value, section, survey):
"""Save string-per-option types."""
name = key[4:7]
spo = models.StringPerOption(
option=key[8:],
value=value
)
question_name = {
'fws': 'furry_websites',
}[name]
survey.__getattribute__(section).__getattribute__(question_name).append(spo)
def _save_list_per_option(form, key, value, section, survey):
"""Save list-per-option types."""
name = key[4:7]
lpo = models.ListPerOption(
option=key[8:],
value=form.getlist(key)
)
question_name = {
'int': 'interests',
}[name]
survey.__getattribute__(section).__getattribute__(question_name).append(lpo)
def _save_list_item(form, key, value, section, survey):
"""Append an item to a list."""
name = key[4:7]
question_name = {
'con': 'conventions',
'sds': 'self_described',
}[name]
values = form.getlist(key)
survey.__getattribute__(section).__setattr__(question_name, values)
def _save_character(form, key, value, section, survey):
"""Save characters with metadata."""
index = key.split('_')[1]
# Since our form is immutable, we may wind up with additional responses;
# If we've already saved this index, skip saving it again.
if int(index) in map(lambda x: x.index, survey.overview.characters):
return
# Retrieve species data.
species_category = form.getlist('chr_{}_category'.format(index))
species_text = form.get('chr_{}_species'.format(index),
', '.join(species_category))
reason = form.get('chr_{}_reason'.format(index), '')
primary_character = form.get('chr_{}_primary'.format(index), '') != ''
deprecated_character = \
form.get('chr_{}_deprecated'.format(index), '') != ''
# Do not save blank species.
if not species_category and not species_text:
return
survey.overview.characters.append(
models.Character(
index=index,
species_category=species_category,
species_text=models.PotentiallySubjectiveResponse(
subjective=True,
value=species_text[:2000]
),
primary_character=primary_character,
deprecated_character=deprecated_character,
reason=models.PotentiallySubjectiveResponse(
subjective=True,
value=reason[:2000]
)
)
)
def _save_raw_psr(form, key, value, section, survey):
"""Save raw text as a subjective response."""
key = key[4:]
survey.__getattribute__(section).__setattr__(
key,
models.PotentiallySubjectiveResponse(
value=value[:2000],
subjective=True))
if __name__ == '__main__':
app.secret_key = 'Development key'
app.debug = True
app.config['SURVEY_ACTIVE'] = True
app.run()
| |
# awsauth.py for Interoute Object Storage
# See the repo: https://github.com/Interoute/object-storage-api
#
# Source for this file: https://pypi.python.org/pypi/requests-aws/0.1.8
# See also: https://github.com/tax/python-requests-aws
#
# This code provides a class S3Auth to generate authorisation data which can be used with
# the Python Requests module (http://docs.python-requests.org).
#
# The code uses AWS Signature Version 2 from the S3 API standard.
#
# Use of the code is explained in the Interoute Object Storage API User Guide:
# https://cloudstore.interoute.com/knowledge-centre/library/object-storage-api-user-guide
#
# The value of 'service_base_url' has been modified to set a default for
# Interoute Object Storage
# Original License Statement:
'''
Copyright (c) 2012-2013 Paul Tax <paultax@gmail.com> All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
3. Neither the name of Infrae nor the names of its contributors may
be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL INFRAE OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
import hmac
from hashlib import sha1 as sha
import urllib
py3k = False
try:
from urlparse import urlparse, unquote
from base64 import encodestring
except:
py3k = True
from urllib.parse import urlparse, unquote
from base64 import encodebytes as encodestring
from email.utils import formatdate
from requests.auth import AuthBase
class S3Auth(AuthBase):
"""Attaches AWS Authentication to the given Request object."""
service_base_url = 's3-eu.object.vdc.interoute.com'
# List of Query String Arguments of Interest
special_params = [
'acl', 'location', 'logging', 'partNumber', 'policy', 'requestPayment',
'torrent', 'versioning', 'versionId', 'versions', 'website', 'uploads',
'uploadId', 'response-content-type', 'response-content-language',
'response-expires', 'response-cache-control', 'delete', 'lifecycle',
'response-content-disposition', 'response-content-encoding', 'tagging',
'notification', 'cors'
]
def __init__(self, access_key, secret_key, service_url=None):
if service_url:
self.service_base_url = service_url
self.access_key = str(access_key)
self.secret_key = str(secret_key)
def __call__(self, r):
# Create date header if it is not created yet.
if 'date' not in r.headers and 'x-amz-date' not in r.headers:
r.headers['date'] = formatdate(
timeval=None,
localtime=False,
usegmt=True)
signature = self.get_signature(r)
if py3k:
signature = signature.decode('utf-8')
r.headers['Authorization'] = 'AWS %s:%s' % (self.access_key, signature)
return r
def get_signature(self, r):
canonical_string = self.get_canonical_string(
r.url, r.headers, r.method)
if py3k:
key = self.secret_key.encode('utf-8')
msg = canonical_string.encode('utf-8')
else:
key = self.secret_key
msg = canonical_string
h = hmac.new(key, msg, digestmod=sha)
return encodestring(h.digest()).strip()
def get_canonical_string(self, url, headers, method):
parsedurl = urlparse(url)
objectkey = parsedurl.path[1:]
query_args = sorted(parsedurl.query.split('&'))
bucket = parsedurl.netloc[:-len(self.service_base_url)]
if len(bucket) > 1:
# remove last dot
bucket = bucket[:-1]
interesting_headers = {
'content-md5': '',
'content-type': '',
'date': ''}
for key in headers:
lk = key.lower()
try:
lk = lk.decode('utf-8')
except:
pass
if headers[key] and (lk in interesting_headers.keys()
or lk.startswith('x-amz-')):
interesting_headers[lk] = headers[key].strip()
# If x-amz-date is used it supersedes the date header.
if not py3k:
if 'x-amz-date' in interesting_headers:
interesting_headers['date'] = ''
else:
if 'x-amz-date' in interesting_headers:
interesting_headers['date'] = ''
buf = '%s\n' % method
for key in sorted(interesting_headers.keys()):
val = interesting_headers[key]
if key.startswith('x-amz-'):
buf += '%s:%s\n' % (key, val)
else:
buf += '%s\n' % val
# append the bucket if it exists
if bucket != '':
buf += '/%s' % bucket
# add the objectkey. even if it doesn't exist, add the slash
buf += '/%s' % objectkey
params_found = False
# handle special query string arguments
for q in query_args:
k = q.split('=')[0]
if k in self.special_params:
buf += '&' if params_found else '?'
params_found = True
try:
k, v = q.split('=', 1)
except ValueError:
buf += q
else:
# Riak CS multipart upload ids look like this, `TFDSheOgTxC2Tsh1qVK73A==`,
# is should be escaped to be included as part of a query string.
#
# A requests mp upload part request may look like
# resp = requests.put(
# 'https://url_here',
# params={
# 'partNumber': 1,
# 'uploadId': 'TFDSheOgTxC2Tsh1qVK73A=='
# },
# data='some data',
# auth=S3Auth('access_key', 'secret_key')
# )
#
# Requests automatically escapes the values in the `params` dict, so now
# our uploadId is `TFDSheOgTxC2Tsh1qVK73A%3D%3D`,
# if we sign the request with the encoded value the signature will
# not be valid, we'll get 403 Access Denied.
# So we unquote, this is no-op if the value isn't encoded.
buf += '{key}={value}'.format(key=k, value=unquote(v))
return buf
| |
## A script for extracting info about the patients used in the analysis
## Load necessary modules
from rpy2 import robjects as ro
import numpy as np
import os
ro.r('library(survival)')
##This call will only work if you are running python from the command line.
##If you are not running from the command line manually type in your paths.
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
## There were three clinical files with nonredundant data. V4.0 was found to be most up to date.
## V2.1 was more up to date than V1.5. All three files are loaded with the more up to date file getting preference.
f=open(os.path.join(BASE_DIR,'tcga_data','BRCA','clinical','nationwidechildrens.org_clinical_follow_up_v4.0_brca.txt'))
f.readline()
f.readline()
f.readline()
data=[i.split('\t') for i in f]
## A patient can be listed multiple times in the file. The most recent listing (furthest down in the file), contains the most recent
## follow up data. This code checks if the patient has already been loaded into the list, and if so, takes the more recent data.
## This required an empty value in the list initialization.
## Data is: [[Patient ID, time(days), Vital status],[Patient ID, time(days), Vital status],...]
clinical3=[['','','']]
for i in data:
if clinical3[-1][0]==i[0]:
if i[8]=='Alive':
clinical3[-1]=[i[0],int(i[9]),'Alive']
elif i[8]=='Dead':
try:
clinical3[-1]=[i[0],int(i[10]),'Dead']
except:
pass
else:
pass
else:
if i[8]=='Alive':
clinical3.append([i[0],int(i[9]),'Alive'])
elif i[8]=='Dead':
try:
clinical3.append([i[0],int(i[10]),'Dead'])
except:
pass
else:
pass
## Removing the empty value.
clinical=clinical3[1:]
f=open(os.path.join(BASE_DIR,'tcga_data','BRCA','clinical','nationwidechildrens.org_clinical_follow_up_v2.1_brca.txt'))
f.readline()
f.readline()
f.readline()
data=[i.split('\t') for i in f]
clinical1=[['','','']]
for i in data:
if i[0] not in [j[0] for j in clinical]:
if clinical1[-1][0]==i[0]:
if i[6]=='Alive':
clinical1[-1]=[i[0],int(i[7]),'Alive']
elif i[6]=='Dead':
try:
clinical1[-1]=[i[0],int(i[8]),'Dead']
except:
pass
else:
pass
else:
if i[6]=='Alive':
clinical1.append([i[0],int(i[7]),'Alive'])
elif i[6]=='Dead':
try:
clinical1.append([i[0],int(i[8]),'Dead'])
except:
pass
else:
pass
##merging data and removing the empty value
clinical+=clinical1[1:]
f=open(os.path.join(BASE_DIR,'tcga_data','BRCA','clinical','nationwidechildrens.org_clinical_follow_up_v1.5_brca.txt'))
f.readline()
f.readline()
f.readline()
data=[i.split('\t') for i in f]
clinical2=[['','','']]
for i in data:
if i[0] not in [j[0] for j in clinical]:
if clinical2[-1][0]==i[0]:
try:
if i[6]=='Alive':
clinical2[-1]=[i[0],int(i[7]),'Alive']
elif i[6]=='Dead':
try:
clinical2[-1]=[i[0],int(i[8]),'Dead']
except:
pass
else:
pass
except:
pass
else:
try:
if i[6]=='Alive':
clinical2.append([i[0],int(i[7]),'Alive'])
elif i[6]=='Dead':
try:
clinical2.append([i[0],int(i[8]),'Dead'])
except:
pass
else:
pass
except:
pass
##merging data and removing the empty value
clinical+=clinical2[1:]
## Grade, sex, and age information were taken from the "clinical_patient" file. A dictionary was created for sex and grade.
more_clinical={}
grade_dict={}
grade_dict['Infiltrating Ductal Carcinoma']=1
grade_dict['Metaplastic Carcinoma']=3
grade_dict['Mucinous Carcinoma']=4
grade_dict['Medullary Carcinoma']=5
grade_dict['Infiltrating Lobular Carcinoma']=6
sex_dict={}
sex_dict['MALE']=0
sex_dict['FEMALE']=1
## The "clinical_patient" file can also contain patients not listed in the follow_up files.
## In these cases the clinical data for these patients gets appended to a new clinical list.
f=open(os.path.join(BASE_DIR,'tcga_data','BRCA','clinical','nationwidechildrens.org_clinical_patient_brca.txt'))
f.readline()
f.readline()
f.readline()
data=[i.split('\t') for i in f]
clinical4=[]
for i in data:
try:
more_clinical[i[0]]=[grade_dict[i[-11]],sex_dict[i[6]],int(i[20])]
except:
pass
if i[13]=='Alive':
clinical4.append([i[0],int(i[14]),'Alive'])
elif i[13]=='Dead':
try:
clinical4.append([i[0],int(i[15]),'Dead'])
except:
pass
else:
pass
new_clinical=[]
##It is possible that the clinical data in the clinical_patient file is more up to date than the follow_up files
##All the clinical data is merged checking which data is the most up to date
for i in clinical4:
if i[0] not in [j[0] for j in clinical]:
new_clinical.append(i)
else:
if i[1]<=clinical[[j[0] for j in clinical].index(i[0])][1]:
new_clinical.append(clinical[[j[0] for j in clinical].index(i[0])])
else:
new_clinical.append(i)
##also do the reverse since clinical can contain patients not included in clinical4
for i in clinical:
if i[0] not in [j[0] for j in new_clinical]:
new_clinical.append(i)
## only patients who had a follow up time greater than 0 days are included in the analysis
clinical=[i for i in new_clinical if i[1]>0]
final_clinical=[]
## A new list containing both follow up times and grade, sex, and age is constructed.
## Only patients with grade, sex, and age information are included.
## Data is [[Patient ID, time (days), vital status, grade, sex, age at diagnosis],...]
for i in clinical:
if i[0] in more_clinical:
final_clinical.append(i+more_clinical[i[0]])
## Need to map the mRNA files to the correct patients
## The necessary information is included in the FILE_SAMPLE_MAP.txt file
f=open(os.path.join(BASE_DIR,'tcga_data','BRCA','FILE_SAMPLE_MAP.txt'))
f.readline()
data=[i.strip().split() for i in f if i!='\n']
## 01 indicates a primary tumor, and only primary tumors are included in this analysis
TCGA_to_mrna={}
for i in data:
## The normalized data files are used
if 'genes.normalized_results' in i[0]:
if i[1].split('-')[3][:-1]=='01':
x=''.join([k+j for k,j in zip(['','-','-'],i[1].split('-')[:3])])
TCGA_to_mrna[x]=TCGA_to_mrna.get(x,[])+[i[0]]
clinical_and_files=[]
## We only care about patients that contained complete clinical information
for i in final_clinical:
if TCGA_to_mrna.has_key(i[0]):
## The mRNA files are added to the clinical list
## Data structure: [[Patient ID, time (days), vital status, grade, sex, age at diagnosis,[mRNA files]],...]
clinical_and_files.append(i+[TCGA_to_mrna[i[0]]])
else:
pass
##print average age at diagnosis
age=np.mean([i[5] for i in clinical_and_files])
##print number of males
males=len([i for i in clinical_and_files if i[4]==0])
##print number of females
females=len([i for i in clinical_and_files if i[4]==1])
##to get the median survival we need to call survfit from r
##prepare variables for R
ro.globalenv['times']=ro.IntVector([i[1] for i in clinical_and_files])
##need to create a dummy variable group
ro.globalenv['group']=ro.IntVector([0 for i in clinical_and_files])
##need a vector for deaths
death_dic={}
death_dic['Alive']=0
death_dic['Dead']=1
ro.globalenv['died']=ro.IntVector([death_dic[i[2]] for i in clinical_and_files])
res=ro.r('survfit(Surv(times,died) ~ as.factor(group))')
#the number of events(deaths) is the fourth column of the output
deaths=str(res).split('\n')[-2].strip().split()[3]
#the median survival time is the fifth column of the output
median=str(res).split('\n')[-2].strip().split()[4]
##write data to a file
f=open('patient_info.txt','w')
f.write('Average Age')
f.write('\t')
f.write('Males')
f.write('\t')
f.write('Females')
f.write('\t')
f.write('Deaths')
f.write('\t')
f.write('Median Survival')
f.write('\n')
f.write(str(age))
f.write('\t')
f.write(str(males))
f.write('\t')
f.write(str(females))
f.write('\t')
f.write(deaths)
f.write('\t')
f.write(median)
| |
"""Config flow to configure Nest.
This configuration flow supports two APIs:
- The new Device Access program and the Smart Device Management API
- The legacy nest API
NestFlowHandler is an implementation of AbstractOAuth2FlowHandler with
some overrides to support the old APIs auth flow. That is, for the new
API this class has hardly any special config other than url parameters,
and everything else custom is for the old api. When configured with the
new api via NestFlowHandler.register_sdm_api, the custom methods just
invoke the AbstractOAuth2FlowHandler methods.
"""
import asyncio
from collections import OrderedDict
import logging
import os
from typing import Dict
import async_timeout
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.core import callback
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers import config_entry_oauth2_flow
from homeassistant.util.json import load_json
from .const import DATA_SDM, DOMAIN, SDM_SCOPES
DATA_FLOW_IMPL = "nest_flow_implementation"
_LOGGER = logging.getLogger(__name__)
@callback
def register_flow_implementation(hass, domain, name, gen_authorize_url, convert_code):
"""Register a flow implementation for legacy api.
domain: Domain of the component responsible for the implementation.
name: Name of the component.
gen_authorize_url: Coroutine function to generate the authorize url.
convert_code: Coroutine function to convert a code to an access token.
"""
if DATA_FLOW_IMPL not in hass.data:
hass.data[DATA_FLOW_IMPL] = OrderedDict()
hass.data[DATA_FLOW_IMPL][domain] = {
"domain": domain,
"name": name,
"gen_authorize_url": gen_authorize_url,
"convert_code": convert_code,
}
class NestAuthError(HomeAssistantError):
"""Base class for Nest auth errors."""
class CodeInvalid(NestAuthError):
"""Raised when invalid authorization code."""
class UnexpectedStateError(HomeAssistantError):
"""Raised when the config flow is invoked in a 'should not happen' case."""
@config_entries.HANDLERS.register(DOMAIN)
class NestFlowHandler(
config_entry_oauth2_flow.AbstractOAuth2FlowHandler, domain=DOMAIN
):
"""Config flow to handle authentication for both APIs."""
DOMAIN = DOMAIN
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_CLOUD_PUSH
def __init__(self):
"""Initialize NestFlowHandler."""
super().__init__()
# When invoked for reauth, allows updating an existing config entry
self._reauth = False
@classmethod
def register_sdm_api(cls, hass):
"""Configure the flow handler to use the SDM API."""
if DOMAIN not in hass.data:
hass.data[DOMAIN] = {}
hass.data[DOMAIN][DATA_SDM] = {}
def is_sdm_api(self):
"""Return true if this flow is setup to use SDM API."""
return DOMAIN in self.hass.data and DATA_SDM in self.hass.data[DOMAIN]
@property
def logger(self) -> logging.Logger:
"""Return logger."""
return logging.getLogger(__name__)
@property
def extra_authorize_data(self) -> Dict[str, str]:
"""Extra data that needs to be appended to the authorize url."""
return {
"scope": " ".join(SDM_SCOPES),
# Add params to ensure we get back a refresh token
"access_type": "offline",
"prompt": "consent",
}
async def async_oauth_create_entry(self, data: dict) -> dict:
"""Create an entry for the SDM flow."""
assert self.is_sdm_api(), "Step only supported for SDM API"
data[DATA_SDM] = {}
await self.async_set_unique_id(DOMAIN)
# Update existing config entry when in the reauth flow. This
# integration only supports one config entry so remove any prior entries
# added before the "single_instance_allowed" check was added
existing_entries = self.hass.config_entries.async_entries(DOMAIN)
if existing_entries:
updated = False
for entry in existing_entries:
if updated:
await self.hass.config_entries.async_remove(entry.entry_id)
continue
updated = True
self.hass.config_entries.async_update_entry(
entry, data=data, unique_id=DOMAIN
)
await self.hass.config_entries.async_reload(entry.entry_id)
return self.async_abort(reason="reauth_successful")
return await super().async_oauth_create_entry(data)
async def async_step_reauth(self, user_input=None):
"""Perform reauth upon an API authentication error."""
assert self.is_sdm_api(), "Step only supported for SDM API"
self._reauth = True # Forces update of existing config entry
return await self.async_step_reauth_confirm()
async def async_step_reauth_confirm(self, user_input=None):
"""Confirm reauth dialog."""
assert self.is_sdm_api(), "Step only supported for SDM API"
if user_input is None:
return self.async_show_form(
step_id="reauth_confirm",
data_schema=vol.Schema({}),
)
return await self.async_step_user()
async def async_step_user(self, user_input=None):
"""Handle a flow initialized by the user."""
if self.is_sdm_api():
# Reauth will update an existing entry
if self.hass.config_entries.async_entries(DOMAIN) and not self._reauth:
return self.async_abort(reason="single_instance_allowed")
return await super().async_step_user(user_input)
return await self.async_step_init(user_input)
async def async_step_init(self, user_input=None):
"""Handle a flow start."""
assert not self.is_sdm_api(), "Step only supported for legacy API"
flows = self.hass.data.get(DATA_FLOW_IMPL, {})
if self.hass.config_entries.async_entries(DOMAIN):
return self.async_abort(reason="single_instance_allowed")
if not flows:
return self.async_abort(reason="missing_configuration")
if len(flows) == 1:
self.flow_impl = list(flows)[0]
return await self.async_step_link()
if user_input is not None:
self.flow_impl = user_input["flow_impl"]
return await self.async_step_link()
return self.async_show_form(
step_id="init",
data_schema=vol.Schema({vol.Required("flow_impl"): vol.In(list(flows))}),
)
async def async_step_link(self, user_input=None):
"""Attempt to link with the Nest account.
Route the user to a website to authenticate with Nest. Depending on
implementation type we expect a pin or an external component to
deliver the authentication code.
"""
assert not self.is_sdm_api(), "Step only supported for legacy API"
flow = self.hass.data[DATA_FLOW_IMPL][self.flow_impl]
errors = {}
if user_input is not None:
try:
with async_timeout.timeout(10):
tokens = await flow["convert_code"](user_input["code"])
return self._entry_from_tokens(
f"Nest (via {flow['name']})", flow, tokens
)
except asyncio.TimeoutError:
errors["code"] = "timeout"
except CodeInvalid:
errors["code"] = "invalid_pin"
except NestAuthError:
errors["code"] = "unknown"
except Exception: # pylint: disable=broad-except
errors["code"] = "internal_error"
_LOGGER.exception("Unexpected error resolving code")
try:
with async_timeout.timeout(10):
url = await flow["gen_authorize_url"](self.flow_id)
except asyncio.TimeoutError:
return self.async_abort(reason="authorize_url_timeout")
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Unexpected error generating auth url")
return self.async_abort(reason="unknown_authorize_url_generation")
return self.async_show_form(
step_id="link",
description_placeholders={"url": url},
data_schema=vol.Schema({vol.Required("code"): str}),
errors=errors,
)
async def async_step_import(self, info):
"""Import existing auth from Nest."""
assert not self.is_sdm_api(), "Step only supported for legacy API"
if self.hass.config_entries.async_entries(DOMAIN):
return self.async_abort(reason="single_instance_allowed")
config_path = info["nest_conf_path"]
if not await self.hass.async_add_executor_job(os.path.isfile, config_path):
self.flow_impl = DOMAIN
return await self.async_step_link()
flow = self.hass.data[DATA_FLOW_IMPL][DOMAIN]
tokens = await self.hass.async_add_executor_job(load_json, config_path)
return self._entry_from_tokens(
"Nest (import from configuration.yaml)", flow, tokens
)
@callback
def _entry_from_tokens(self, title, flow, tokens):
"""Create an entry from tokens."""
return self.async_create_entry(
title=title, data={"tokens": tokens, "impl_domain": flow["domain"]}
)
| |
__file__ = 'IRI_v7'
__date__ = '1/28/2016'
__author__ = 'ABREZNIC'
"""
The MIT License (MIT)
Copyright (c) 2016 Texas Department of Transportation
Author: Adam Breznicky
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import arcpy, os, datetime, csv
now = datetime.datetime.now()
curMonth = now.strftime("%m")
curDay = now.strftime("%d")
curYear = now.strftime("%Y")
today = curYear + "_" + curMonth + "_" + curDay
input = arcpy.GetParameterAsText(0)
calRhino = arcpy.GetParameterAsText(1)
output = arcpy.GetParameterAsText(2)
# theMXD = "C:\\TxDOT\\Projects\\IRI_dan\\working\\Untitled.mxd"
inputlist = input.split(";")
inputcntr = 1
lengthinput = len(inputlist)
issuesReport = [["DISTRICT_FILE", "ROUTE_ID", "BEGIN_POINT", "END_POINT", "SECTION_LENGTH", "IRI", "RUTTING", "DATE", "ERROR_DESCRIPTION"]]
statsReport = [["DISTRICT_FILE", "LG Record Count", "KG Record Count", "Total Records Count", "Input Record Count", "Lost Records Count", "LG Records Length", "KG Records Length", "Total Routed Length"]]
arcpy.CreateFileGDB_management(output, "RhinoLines.gdb")
rhinospace = output + os.sep + "RhinoLines.gdb"
rhino_lines = rhinospace + os.sep + "rhinolines"
# arcpy.Copy_management(calRhino, rhino_lines)
arcpy.FeatureClassToFeatureClass_conversion(calRhino, rhinospace, "rhinolines")
# arcpy.AddField_management(rhino_lines, "FRM_DFO", "DOUBLE")
# arcpy.AddField_management(rhino_lines, "TO_DFO", "DOUBLE")
cursor = arcpy.da.UpdateCursor(rhino_lines, ["FRM_DFO", "TO_DFO", 'SHAPE@', "OBJECTID"])
for row in cursor:
# arcpy.AddMessage(row[3])
bp = row[2].firstPoint.M
ep = row[2].lastPoint.M
bpNew = float(format(float(bp), '.3f'))
epNew = float(format(float(ep), '.3f'))
row[0] = bpNew
row[1] = epNew
cursor.updateRow(row)
del cursor
del row
arcpy.AddMessage("Calibrated RHINO copied local.")
arcpy.AddField_management(rhino_lines, "RTE_ORDER", "SHORT")
arcpy.AddField_management(rhino_lines, "FLAG", "TEXT", "", "", 30)
arcpy.AddMessage("Applying RTE_ORDER.")
cursor = arcpy.da.UpdateCursor(rhino_lines, ["RIA_RTE_ID", "FRM_DFO", "RTE_ORDER", "FLAG"], "", "", "", (None, "ORDER BY RIA_RTE_ID ASC, FRM_DFO ASC"))
# cursor = arcpy.da.UpdateCursor(rhino_lines, ["RTE_ID", "FRM_DFO", "RTE_ORDER", "FLAG", "RU", "F_SYSTEM", "SEC_NHS", "HPMS"], "", "", "", (None, "ORDER BY RTE_ID ASC, FRM_DFO ASC"))
counter = 0
order = 1
previous = ""
for row in cursor:
current = row[0]
if counter == 0:
row[2] = order
elif counter != 0 and previous == current:
order += 1
row[2] = order
else:
order = 1
row[2] = order
previous = current
counter += 1
# ru = int(row[4])
# fs = int(row[5])
# nhs = int(row[6])
# row[3] = current + "-" + str(order) + "-" + str(ru) + "-" + str(fs) + "-" + str(nhs) + "-" + str(row[7])
row[3] = current + "-" + str(order)
cursor.updateRow(row)
del cursor
arcpy.AddMessage("RTE_ORDER applied.")
dictionary = {}
cursor = arcpy.da.SearchCursor(rhino_lines, ["FLAG", "FRM_DFO", "TO_DFO"])
for row in cursor:
flag = row[0]
odr = flag.split("-")[0] + "-" + flag.split("-")[1] + "-" + flag.split("-")[2]
fDFO = row[1]
tDFO = row[2]
dictionary[odr] = [fDFO, tDFO]
del cursor
for excel in inputlist:
distName = str(excel).split("\\")[-1]
if distName[-1] == "$":
distName = distName[:-1]
if distName[-4:] == ".dbf":
distName = distName[:-4]
arcpy.AddMessage("Beginning " + str(inputcntr) + " of " + str(lengthinput) + ": " + distName)
arcpy.CreateFileGDB_management(output, "Wrkg" + str(inputcntr) + ".gdb")
workspace = output + os.sep + "Wrkg" + str(inputcntr) + ".gdb"
arcpy.AddMessage("Working database created.")
data = []
lg = []
fields = ["ROUTE_ID", "BEGIN_POIN", "END_POINT", "SECTION_LE", "IRI", "RUTTING", "DATE", "TIME", "LANE"]
# fields = ["ROUTE_ID", "BEGIN_POIN", "END_POINT", "SECTION_LE", "IRI", "RUTTING", "DATE"]
data.append(fields)
lg.append(fields)
# spref = "Coordinate Systems\\Geographic Coordinate Systems\\World\\GCS_WGS_1984.prj"
spref = "Coordinate Systems\\Geographic Coordinate Systems\\World\\WGS 1984.prj"
arcpy.MakeXYEventLayer_management(excel, "Long", "Lat", "pointEvents" + str(inputcntr), spref)
arcpy.AddMessage("Event Layer created.")
pntfeature = workspace + os.sep + "allPoints"
arcpy.CopyFeatures_management("pointEvents" + str(inputcntr), pntfeature)
arcpy.AddMessage("Point feature class created.")
initial = 0
ids = []
cursor = arcpy.da.SearchCursor(pntfeature, ["ROUTE_ID", "LANE"])
for row in cursor:
id = row[0]
lane = row[1]
combo = id + "-" + lane
initial += 1
if combo not in ids:
ids.append(combo)
del cursor
del row
arcpy.AddMessage("RTE_IDs compiled.")
roadslayer = ""
pointslayer = ""
# mxd = arcpy.mapping.MapDocument(theMXD)
mxd = arcpy.mapping.MapDocument("CURRENT")
df = arcpy.mapping.ListDataFrames(mxd, "*")[0]
for lyr in arcpy.mapping.ListLayers(mxd):
if lyr.name == "rhinolines":
arcpy.mapping.RemoveLayer(df, lyr)
if lyr.name == "allPoints":
arcpy.mapping.RemoveLayer(df, lyr)
newlayerpnt = arcpy.mapping.Layer(pntfeature)
arcpy.mapping.AddLayer(df, newlayerpnt)
newlayerline = arcpy.mapping.Layer(rhino_lines)
arcpy.mapping.AddLayer(df, newlayerline)
for lyr in arcpy.mapping.ListLayers(mxd):
if lyr.name == "rhinolines":
roadslayer = lyr
if lyr.name == "allPoints":
pointslayer = lyr
arcpy.AddMessage("Layers acquired.")
counter = 1
total = len(ids)
arcpy.AddMessage("Finding measures for: ")
for combo in ids:
id = combo.split("-")[0] + "-" + combo.split("-")[1]
lane = combo.split("-")[2]
rteName = combo.split("-")[0]
roadslayer.definitionQuery = " RIA_RTE_ID = '" + rteName + "-KG' "
pointslayer.definitionQuery = " ROUTE_ID = '" + id + "' AND LANE = '" + lane + "'"
arcpy.RefreshActiveView()
arcpy.AddMessage(str(counter) + "/" + str(total) + " " + combo)
label = combo.replace("-", "")
arcpy.LocateFeaturesAlongRoutes_lr(pointslayer, roadslayer, "FLAG", "230 Feet", workspace + os.sep + label, "FLAG POINT END_POINT")
counter += 1
arcpy.AddMessage("Tables created.")
# alltables = []
arcpy.env.workspace = workspace
tables = arcpy.ListTables()
for table in tables:
arcpy.AddMessage(table)
arcpy.AddField_management(table, "ODR_FLAG", "TEXT", "", "", 20)
arcpy.AddMessage("Order Flag field created.")
numbDict = {}
cursor = arcpy.da.UpdateCursor(table, ["FLAG", "ODR_FLAG"])
for row in cursor:
flag = row[0]
odr = flag.split("-")[0] + "-" + flag.split("-")[1] + "-" + flag.split("-")[2]
if odr not in numbDict.keys():
numbDict[odr] = 1
else:
curNumb = numbDict[odr]
curNumb += 1
numbDict[odr] = curNumb
row[1] = odr
cursor.updateRow(row)
del cursor
counter = 1
previous = ""
last = ""
# cursor = arcpy.da.UpdateCursor(table, ["ODR_FLAG", "BEGIN_POINT", "END_POINT", "SECTION_LENGTH"], None, None, False, (None, "ORDER BY ODR_FLAG ASC, END_POINT ASC"))
cursor = arcpy.da.UpdateCursor(table, ["ODR_FLAG", "BEGIN_POIN", "END_POINT", "SECTION_LE"], None, None, False, (None, "ORDER BY ODR_FLAG ASC, END_POINT ASC"))
for row in cursor:
current = row[0]
total = numbDict[current]
if counter == 1 and counter != total:
values = dictionary[current]
beginner = float(format(float(values[0]), '.3f'))
segEnd = float(format(float(row[2]), '.3f'))
if abs(segEnd - beginner) > 1:
segSrt = segEnd - .1
row[1] = float(format(float(segSrt), '.3f'))
row[2] = segEnd
row[3] = round(row[2] - row[1], 3)
else:
row[1] = beginner
row[2] = segEnd
row[3] = round(row[2] - row[1], 3)
elif counter == 1 and counter == total:
values = dictionary[current]
row[1] = float(format(float(values[0]), '.3f'))
row[2] = float(format(float(values[1]), '.3f'))
row[3] = round(row[2] - row[1], 3)
counter = 0
elif previous == current and counter != total:
row[1] = last
row[2] = float(format(float(row[2]), '.3f'))
row[3] = round(row[2] - last, 3)
elif previous == current and counter == total:
values = dictionary[current]
ender = float(format(float(values[1]), '.3f'))
if abs(ender - last) > 1:
row[1] = last
row[2] = float(format(float(row[2]), '.3f'))
row[3] = round(row[2] - last, 3)
else:
row[1] = last
row[2] = float(format(float(values[1]), '.3f'))
row[3] = round(row[2] - last, 3)
counter = 0
else:
arcpy.AddMessage("problem with " + current)
last = row[2]
cursor.updateRow(row)
previous = current
counter += 1
del cursor
arcpy.AddMessage("Measure difference fields populated.")
arcpy.Merge_management(tables, workspace + os.sep + "merged")
arcpy.AddMessage("All tables merged successfully.")
# arcpy.AddField_management(workspace + os.sep + "merged", "RU", "TEXT", "", "", 5)
# arcpy.AddMessage("RU field created.")
# arcpy.AddField_management(workspace + os.sep + "merged", "F_SYSTEM", "TEXT", "", "", 5)
# arcpy.AddMessage("Functional System field created.")
# arcpy.AddField_management(workspace + os.sep + "merged", "SEC_NHS", "TEXT", "", "", 5)
# arcpy.AddMessage("NHS field created.")
# arcpy.AddField_management(workspace + os.sep + "merged", "HPMS", "TEXT", "", "", 5)
# arcpy.AddMessage("HPMS Keeper field created.")
# arcpy.AddMessage("Fields created.")
# cursor = arcpy.da.UpdateCursor(workspace + os.sep + "merged", ["FLAG", "RU", "F_SYSTEM", "SEC_NHS"])
## cursor = arcpy.da.UpdateCursor(workspace + os.sep + "merged", ["FLAG", "RU", "F_SYSTEM", "SEC_NHS", "HPMS"])
# for row in cursor:
# flag = row[0]
# row[1] = flag.split("-")[3]
# row[2] = flag.split("-")[4]
# row[3] = flag.split("-")[5]
# # row[4] = flag.split("-")[6]
# cursor.updateRow(row)
# del cursor
LGcounter = 0
KGcounter = 0
LGlength = 0
KGlength = 0
cursor = arcpy.da.SearchCursor(workspace + os.sep + "merged", fields, None, None, False, (None, "ORDER BY ROUTE_ID ASC, LANE ASC, BEGIN_POIN ASC"))
for row in cursor:
id = row[0]
if id[-2:] == "LG":
lg.append(row)
LGcounter += 1
LGlength += float(row[3])
elif id[-2:] == "RG":
THEid = id[:-2]
newid = THEid + "KG"
fixed = [newid, row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8]]
# fixed = [newid, row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8], row[9], row[10]]
data.append(fixed)
KGcounter += 1
KGlength += float(row[3])
if float(row[3]) > 1:
problem = [distName, newid, row[1], row[2], row[3], row[4], row[5], row[6], "Abnormally large SECTION_LENGTH"]
issuesReport.append(problem)
if float(row[3]) == 0:
problem = [distName, newid, row[1], row[2], row[3], row[4], row[5], row[6], "Zero length SECTION_LENGTH"]
issuesReport.append(problem)
else:
data.append(row)
KGcounter += 1
KGlength += float(row[3])
if float(row[3]) > 1:
problem = [distName, id, row[1], row[2], row[3], row[4], row[5], row[6], "Abnormally large SECTION_LENGTH"]
issuesReport.append(problem)
if float(row[3]) == 0:
problem = [distName, id, row[1], row[2], row[3], row[4], row[5], row[6], "Zero length SECTION_LENGTH"]
issuesReport.append(problem)
del cursor
arcpy.AddMessage("Data compiled.")
arcpy.AddMessage("Creating CSV reports.")
leftover = open(output + os.sep + distName + "_LG.csv", 'wb')
writer = csv.writer(leftover)
writer.writerows(lg)
leftover.close()
final = open(output + os.sep + distName + "_Plotted.csv", 'wb')
writer = csv.writer(final)
writer.writerows(data)
final.close()
arcpy.AddMessage("CSV written locally.")
arcpy.AddMessage("T:\\DATAMGT\\HPMS-DATA\\2015Data\\Pavement\\IRI\\IRIData\\Output_From_Script" + os.sep + distName + "_LG.csv")
leftover = open("T:\\DATAMGT\\HPMS-DATA\\2015Data\\Pavement\\IRI\\IRIData\\Output_From_Script" + os.sep + distName + "_LG.csv", 'wb')
writer = csv.writer(leftover)
writer.writerows(lg)
leftover.close()
final = open("T:\\DATAMGT\\HPMS-DATA\\2015Data\\Pavement\\IRI\\IRIData\\Output_From_Script" + os.sep + distName + "_Plotted.csv", 'wb')
writer = csv.writer(final)
writer.writerows(data)
final.close()
arcpy.AddMessage("CSV written to T drive.")
pointsName = distName.split("_")[-1]
arcpy.FeatureClassToFeatureClass_conversion(pntfeature, "T:\\DATAMGT\\HPMS-DATA\\2015Data\\Pavement\\IRI\\IRIData\\Output_From_Script\\All_Points.gdb", pointsName)
arcpy.AddMessage("allpoints feature class transferred to T drive.")
TOTALcounter = LGcounter + KGcounter
TOTALlength = LGlength + KGlength
DIFFcounter = initial - TOTALcounter
statsReport.append([distName, LGcounter, KGcounter, TOTALcounter, initial, DIFFcounter, LGlength, KGlength, TOTALlength])
inputcntr += 1
if len(issuesReport) > 1:
arcpy.AddMessage("Creating errors report...")
errors = open(output + os.sep + "00ISSUES_Investigate.csv", 'wb')
writer = csv.writer(errors)
writer.writerows(issuesReport)
errors.close()
arcpy.AddMessage("Creating stats report...")
stats = open(output + os.sep + "00Statistics.csv", 'wb')
writer = csv.writer(stats)
writer.writerows(statsReport)
stats.close()
arcpy.AddMessage("that's all folks!")
arcpy.AddMessage("started: " + str(now))
now2 = datetime.datetime.now()
arcpy.AddMessage("ended: " + str(now2))
print "that's all folks!"
| |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Sends email on behalf of application.
Provides functions for application developers to provide email services
for their applications. Also provides a few utility methods.
"""
import email
from email import MIMEBase
from email import MIMEMultipart
from email import MIMEText
from email import Parser
import logging
from google.appengine.api import api_base_pb
from google.appengine.api import apiproxy_stub_map
from google.appengine.api import mail_service_pb
from google.appengine.api import users
from google.appengine.api.mail_errors import *
from google.appengine.runtime import apiproxy_errors
ERROR_MAP = {
mail_service_pb.MailServiceError.BAD_REQUEST:
BadRequestError,
mail_service_pb.MailServiceError.UNAUTHORIZED_SENDER:
InvalidSenderError,
mail_service_pb.MailServiceError.INVALID_ATTACHMENT_TYPE:
InvalidAttachmentTypeError,
}
EXTENSION_MIME_MAP = {
'aif': 'audio/x-aiff',
'aifc': 'audio/x-aiff',
'aiff': 'audio/x-aiff',
'asc': 'text/plain',
'au': 'audio/basic',
'avi': 'video/x-msvideo',
'bmp': 'image/x-ms-bmp',
'css': 'text/css',
'csv': 'text/csv',
'doc': 'application/msword',
'diff': 'text/plain',
'flac': 'audio/flac',
'gif': 'image/gif',
'htm': 'text/html',
'html': 'text/html',
'ics': 'text/calendar',
'jpe': 'image/jpeg',
'jpeg': 'image/jpeg',
'jpg': 'image/jpeg',
'kml': 'application/vnd.google-earth.kml+xml',
'kmz': 'application/vnd.google-earth.kmz',
'm4a': 'audio/mp4',
'mid': 'audio/mid',
'mov': 'video/quicktime',
'mp3': 'audio/mpeg',
'mp4': 'video/mp4',
'mpe': 'video/mpeg',
'mpeg': 'video/mpeg',
'mpg': 'video/mpeg',
'odp': 'application/vnd.oasis.opendocument.presentation',
'ods': 'application/vnd.oasis.opendocument.spreadsheet',
'odt': 'application/vnd.oasis.opendocument.text',
'oga': 'audio/ogg',
'ogg': 'audio/ogg',
'ogv': 'video/ogg',
'pdf': 'application/pdf',
'png': 'image/png',
'pot': 'text/plain',
'pps': 'application/vnd.ms-powerpoint',
'ppt': 'application/vnd.ms-powerpoint',
'qt': 'video/quicktime',
'rmi': 'audio/mid',
'rss': 'text/rss+xml',
'snd': 'audio/basic',
'sxc': 'application/vnd.sun.xml.calc',
'sxw': 'application/vnd.sun.xml.writer',
'text': 'text/plain',
'tif': 'image/tiff',
'tiff': 'image/tiff',
'txt': 'text/plain',
'vcf': 'text/directory',
'wav': 'audio/x-wav',
'wbmp': 'image/vnd.wap.wbmp',
'xls': 'application/vnd.ms-excel',
}
EXTENSION_WHITELIST = frozenset(EXTENSION_MIME_MAP.iterkeys())
def invalid_email_reason(email_address, field):
"""Determine reason why email is invalid.
Args:
email_address: Email to check.
field: Field that is invalid.
Returns:
String indicating invalid email reason if there is one,
else None.
"""
if email_address is None:
return 'None email address for %s.' % field
if isinstance(email_address, users.User):
email_address = email_address.email()
if not isinstance(email_address, basestring):
return 'Invalid email address type for %s.' % field
stripped_address = email_address.strip()
if not stripped_address:
return 'Empty email address for %s.' % field
return None
InvalidEmailReason = invalid_email_reason
def is_email_valid(email_address):
"""Determine if email is invalid.
Args:
email_address: Email to check.
Returns:
True if email is valid, else False.
"""
return invalid_email_reason(email_address, '') is None
IsEmailValid = is_email_valid
def check_email_valid(email_address, field):
"""Check that email is valid.
Args:
email_address: Email to check.
field: Field to check.
Raises:
InvalidEmailError if email_address is invalid.
"""
reason = invalid_email_reason(email_address, field)
if reason is not None:
raise InvalidEmailError(reason)
CheckEmailValid = check_email_valid
def _email_check_and_list(emails, field):
"""Generate a list of emails.
Args:
emails: Single email or list of emails.
Returns:
Sequence of email addresses.
Raises:
InvalidEmailError if any email addresses are invalid.
"""
if isinstance(emails, types.StringTypes):
check_email_valid(value)
else:
for address in iter(emails):
check_email_valid(address, field)
def _email_sequence(emails):
"""Forces email to be sequenceable type.
Iterable values are returned as is. This function really just wraps the case
where there is a single email string.
Args:
emails: Emails (or email) to coerce to sequence.
Returns:
Single tuple with email in it if only one email string provided,
else returns emails as is.
"""
if isinstance(emails, basestring):
return emails,
return emails
def _attachment_sequence(attachments):
"""Forces attachments to be sequenceable type.
Iterable values are returned as is. This function really just wraps the case
where there is a single attachment.
Args:
attachments: Attachments (or attachment) to coerce to sequence.
Returns:
Single tuple with attachment tuple in it if only one attachment provided,
else returns attachments as is.
"""
if len(attachments) == 2 and isinstance(attachments[0], basestring):
return attachments,
return attachments
def _parse_mime_message(mime_message):
"""Helper function converts a mime_message in to email.Message.Message.
Args:
mime_message: MIME Message, string or file containing mime message.
Returns:
Instance of email.Message.Message. Will return mime_message if already
an instance.
"""
if isinstance(mime_message, email.Message.Message):
return mime_message
elif isinstance(mime_message, basestring):
return email.message_from_string(mime_message)
else:
return email.message_from_file(mime_message)
def send_mail(sender,
to,
subject,
body,
make_sync_call=apiproxy_stub_map.MakeSyncCall,
**kw):
"""Sends mail on behalf of application.
Args:
sender: Sender email address as appears in the 'from' email line.
to: List of 'to' addresses or a single address.
subject: Message subject string.
body: Body of type text/plain.
make_sync_call: Function used to make sync call to API proxy.
kw: Keyword arguments compatible with EmailMessage keyword based
constructor.
Raises:
InvalidEmailError when invalid email address provided.
"""
kw['sender'] = sender
kw['to'] = to
kw['subject'] = subject
kw['body'] = body
message = EmailMessage(**kw)
message.send(make_sync_call)
SendMail = send_mail
def send_mail_to_admins(sender,
subject,
body,
make_sync_call=apiproxy_stub_map.MakeSyncCall,
**kw):
"""Sends mail to admins on behalf of application.
Args:
sender: Sender email address as appears in the 'from' email line.
subject: Message subject string.
body: Body of type text/plain.
make_sync_call: Function used to make sync call to API proxy.
kw: Keyword arguments compatible with EmailMessage keyword based
constructor.
Raises:
InvalidEmailError when invalid email address provided.
"""
kw['sender'] = sender
kw['subject'] = subject
kw['body'] = body
message = AdminEmailMessage(**kw)
message.send(make_sync_call)
SendMailToAdmins = send_mail_to_admins
def _GetMimeType(file_name):
"""Determine mime-type from file name.
Parses file name and determines mime-type based on extension map.
This method is not part of the public API and should not be used by
applications.
Args:
file_name: File to determine extension for.
Returns:
Mime-type associated with file extension.
Raises:
InvalidAttachmentTypeError when the file name of an attachment.
"""
extension_index = file_name.rfind('.')
if extension_index == -1:
raise InvalidAttachmentTypeError(
"File '%s' does not have an extension" % file_name)
extension = file_name[extension_index + 1:].lower()
mime_type = EXTENSION_MIME_MAP.get(extension, None)
if mime_type is None:
raise InvalidAttachmentTypeError(
"Extension '%s' is not supported." % extension)
return mime_type
def mail_message_to_mime_message(protocol_message):
"""Generate a MIMEMultitype message from protocol buffer.
Generates a complete MIME multi-part email object from a MailMessage
protocol buffer. The body fields are sent as individual alternatives
if they are both present, otherwise, only one body part is sent.
Multiple entry email fields such as 'To', 'Cc' and 'Bcc' are converted
to a list of comma separated email addresses.
Args:
protocol_message: Message PB to convert to MIMEMultitype.
Returns:
MIMEMultitype representing the provided MailMessage.
Raises:
InvalidAttachmentTypeError when the file name of an attachment
"""
parts = []
if protocol_message.has_textbody():
parts.append(MIMEText.MIMEText(protocol_message.textbody()))
if protocol_message.has_htmlbody():
parts.append(MIMEText.MIMEText(protocol_message.htmlbody(),
_subtype='html'))
if len(parts) == 1:
payload = parts
else:
payload = [MIMEMultipart.MIMEMultipart('alternative', _subparts=parts)]
result = MIMEMultipart.MIMEMultipart(_subparts=payload)
for attachment in protocol_message.attachment_list():
file_name = attachment.filename()
mime_type = _GetMimeType(file_name)
maintype, subtype = mime_type.split('/')
mime_attachment = MIMEBase.MIMEBase(maintype, subtype)
mime_attachment.add_header('Content-Disposition',
'attachment',
filename=attachment.filename())
mime_attachment.set_payload(attachment.data())
result.attach(mime_attachment)
if protocol_message.to_size():
result['To'] = ', '.join(protocol_message.to_list())
if protocol_message.cc_size():
result['Cc'] = ', '.join(protocol_message.cc_list())
if protocol_message.bcc_size():
result['Bcc'] = ', '.join(protocol_message.bcc_list())
result['From'] = protocol_message.sender()
result['Reply-To'] = protocol_message.replyto()
result['Subject'] = protocol_message.subject()
return result
MailMessageToMIMEMessage = mail_message_to_mime_message
def _to_str(value):
"""Helper function to make sure unicode values converted to utf-8.
Args:
value: str or unicode to convert to utf-8.
Returns:
UTF-8 encoded str of value, otherwise value unchanged.
"""
if isinstance(value, unicode):
return value.encode('utf-8')
return value
class EncodedPayload(object):
"""Wrapper for a payload that contains encoding information.
When an email is recieved, it is usually encoded using a certain
character set, and then possibly further encoded using a transfer
encoding in that character set. Most of the times, it is possible
to decode the encoded payload as is, however, in the case where it
is not, the encoded payload and the original encoding information
must be preserved.
Attributes:
payload: The original encoded payload.
charset: The character set of the encoded payload. None means use
default character set.
encoding: The transfer encoding of the encoded payload. None means
content not encoded.
"""
def __init__(self, payload, charset=None, encoding=None):
"""Constructor.
Args:
payload: Maps to attribute of the same name.
charset: Maps to attribute of the same name.
encoding: Maps to attribute of the same name.
"""
self.payload = payload
self.charset = charset
self.encoding = encoding
def decode(self):
"""Attempt to decode the encoded data.
Attempt to use pythons codec library to decode the payload. All
exceptions are passed back to the caller.
Returns:
Binary or unicode version of payload content.
"""
payload = self.payload
if self.encoding and self.encoding.lower() != '7bit':
try:
payload = payload.decode(self.encoding)
except LookupError:
raise UnknownEncodingError('Unknown decoding %s.' % self.encoding)
except (Exception, Error), e:
raise PayloadEncodingError('Could not decode payload: %s' % e)
if self.charset and str(self.charset).lower() != '7bit':
try:
payload = payload.decode(str(self.charset))
except LookupError:
raise UnknownCharsetError('Unknown charset %s.' % self.charset)
except (Exception, Error), e:
raise PayloadEncodingError('Could read characters: %s' % e)
return payload
def __eq__(self, other):
"""Equality operator.
Args:
other: The other EncodedPayload object to compare with. Comparison
with other object types are not implemented.
Returns:
True of payload and encodings are equal, else false.
"""
if isinstance(other, EncodedPayload):
return (self.payload == other.payload and
self.charset == other.charset and
self.encoding == other.encoding)
else:
return NotImplemented
def copy_to(self, mime_message):
"""Copy contents to MIME message payload.
If no content transfer encoding is specified, and the character set does
not equal the over-all message encoding, the payload will be base64
encoded.
Args:
mime_message: Message instance to receive new payload.
"""
if self.encoding:
mime_message['content-transfer-encoding'] = self.encoding
mime_message.set_payload(self.payload, self.charset)
def to_mime_message(self):
"""Convert to MIME message.
Returns:
MIME message instance of payload.
"""
mime_message = email.Message.Message()
self.copy_to(mime_message)
return mime_message
def __str__(self):
"""String representation of encoded message.
Returns:
MIME encoded representation of encoded payload as an independent message.
"""
return str(self.to_mime_message())
def __repr__(self):
"""Basic representation of encoded payload.
Returns:
Payload itself is represented by its hash value.
"""
result = '<EncodedPayload payload=#%d' % hash(self.payload)
if self.charset:
result += ' charset=%s' % self.charset
if self.encoding:
result += ' encoding=%s' % self.encoding
return result + '>'
class _EmailMessageBase(object):
"""Base class for email API service objects.
Subclasses must define a class variable called _API_CALL with the name
of its underlying mail sending API call.
"""
PROPERTIES = set([
'sender',
'reply_to',
'subject',
'body',
'html',
'attachments',
])
PROPERTIES.update(('to', 'cc', 'bcc'))
def __init__(self, mime_message=None, **kw):
"""Initialize Email message.
Creates new MailMessage protocol buffer and initializes it with any
keyword arguments.
Args:
mime_message: MIME message to initialize from. If instance of
email.Message.Message will take ownership as original message.
kw: List of keyword properties as defined by PROPERTIES.
"""
if mime_message:
mime_message = _parse_mime_message(mime_message)
self.update_from_mime_message(mime_message)
self.__original = mime_message
self.initialize(**kw)
@property
def original(self):
"""Get original MIME message from which values were set."""
return self.__original
def initialize(self, **kw):
"""Keyword initialization.
Used to set all fields of the email message using keyword arguments.
Args:
kw: List of keyword properties as defined by PROPERTIES.
"""
for name, value in kw.iteritems():
setattr(self, name, value)
def Initialize(self, **kw):
self.initialize(**kw)
def check_initialized(self):
"""Check if EmailMessage is properly initialized.
Test used to determine if EmailMessage meets basic requirements
for being used with the mail API. This means that the following
fields must be set or have at least one value in the case of
multi value fields:
- Subject must be set.
- A recipient must be specified.
- Must contain a body.
- All bodies and attachments must decode properly.
This check does not include determining if the sender is actually
authorized to send email for the application.
Raises:
Appropriate exception for initialization failure.
InvalidAttachmentTypeError: Use of incorrect attachment type.
MissingRecipientsError: No recipients specified in to, cc or bcc.
MissingSenderError: No sender specified.
MissingSubjectError: Subject is not specified.
MissingBodyError: No body specified.
PayloadEncodingError: Payload is not properly encoded.
UnknownEncodingError: Payload has unknown encoding.
UnknownCharsetError: Payload has unknown character set.
"""
if not hasattr(self, 'sender'):
raise MissingSenderError()
if not hasattr(self, 'subject'):
raise MissingSubjectError()
found_body = False
try:
body = self.body
except AttributeError:
pass
else:
if isinstance(body, EncodedPayload):
body.decode()
found_body = True
try:
html = self.html
except AttributeError:
pass
else:
if isinstance(html, EncodedPayload):
html.decode()
found_body = True
if not found_body:
raise MissingBodyError()
if hasattr(self, 'attachments'):
for file_name, data in _attachment_sequence(self.attachments):
_GetMimeType(file_name)
if isinstance(data, EncodedPayload):
data.decode()
def CheckInitialized(self):
self.check_initialized()
def is_initialized(self):
"""Determine if EmailMessage is properly initialized.
Returns:
True if message is properly initializes, otherwise False.
"""
try:
self.check_initialized()
return True
except Error:
return False
def IsInitialized(self):
return self.is_initialized()
def ToProto(self):
"""Convert mail message to protocol message.
Unicode strings are converted to UTF-8 for all fields.
This method is overriden by EmailMessage to support the sender fields.
Returns:
MailMessage protocol version of mail message.
Raises:
Passes through decoding errors that occur when using when decoding
EncodedPayload objects.
"""
self.check_initialized()
message = mail_service_pb.MailMessage()
message.set_sender(_to_str(self.sender))
if hasattr(self, 'reply_to'):
message.set_replyto(_to_str(self.reply_to))
message.set_subject(_to_str(self.subject))
if hasattr(self, 'body'):
body = self.body
if isinstance(body, EncodedPayload):
body = body.decode()
message.set_textbody(_to_str(body))
if hasattr(self, 'html'):
html = self.html
if isinstance(html, EncodedPayload):
html = html.decode()
message.set_htmlbody(_to_str(html))
if hasattr(self, 'attachments'):
for file_name, data in _attachment_sequence(self.attachments):
if isinstance(data, EncodedPayload):
data = data.decode()
attachment = message.add_attachment()
attachment.set_filename(_to_str(file_name))
attachment.set_data(_to_str(data))
return message
def to_mime_message(self):
"""Generate a MIMEMultitype message from EmailMessage.
Calls MailMessageToMessage after converting self to protocol
buffer. Protocol buffer is better at handing corner cases
than EmailMessage class.
Returns:
MIMEMultitype representing the provided MailMessage.
Raises:
Appropriate exception for initialization failure.
InvalidAttachmentTypeError: Use of incorrect attachment type.
MissingSenderError: No sender specified.
MissingSubjectError: Subject is not specified.
MissingBodyError: No body specified.
"""
return mail_message_to_mime_message(self.ToProto())
def ToMIMEMessage(self):
return self.to_mime_message()
def send(self, make_sync_call=apiproxy_stub_map.MakeSyncCall):
"""Send email message.
Send properly initialized email message via email API.
Args:
make_sync_call: Method which will make synchronous call to api proxy.
Raises:
Errors defined in this file above.
"""
message = self.ToProto()
response = api_base_pb.VoidProto()
try:
make_sync_call('mail', self._API_CALL, message, response)
except apiproxy_errors.ApplicationError, e:
if e.application_error in ERROR_MAP:
raise ERROR_MAP[e.application_error](e.error_detail)
raise e
def Send(self, *args, **kwds):
self.send(*args, **kwds)
def _check_attachment(self, attachment):
file_name, data = attachment
if not (isinstance(file_name, basestring) or
isinstance(data, basestring)):
raise TypeError()
def _check_attachments(self, attachments):
"""Checks values going to attachment field.
Mainly used to check type safety of the values. Each value of the list
must be a pair of the form (file_name, data), and both values a string
type.
Args:
attachments: Collection of attachment tuples.
Raises:
TypeError if values are not string type.
"""
if len(attachments) == 2 and isinstance(attachments[0], basestring):
self._check_attachment(attachments)
else:
for attachment in attachments:
self._check_attachment(attachment)
def __setattr__(self, attr, value):
"""Property setting access control.
Controls write access to email fields.
Args:
attr: Attribute to access.
value: New value for field.
Raises:
ValueError: If provided with an empty field.
AttributeError: If not an allowed assignment field.
"""
if not attr.startswith('_EmailMessageBase'):
if attr in ['sender', 'reply_to']:
check_email_valid(value, attr)
if not value:
raise ValueError('May not set empty value for \'%s\'' % attr)
if attr not in self.PROPERTIES:
raise AttributeError('\'EmailMessage\' has no attribute \'%s\'' % attr)
if attr == 'attachments':
self._check_attachments(value)
super(_EmailMessageBase, self).__setattr__(attr, value)
def _add_body(self, content_type, payload):
"""Add body to email from payload.
Will overwrite any existing default plain or html body.
Args:
content_type: Content-type of body.
payload: Payload to store body as.
"""
if content_type == 'text/plain':
self.body = payload
elif content_type == 'text/html':
self.html = payload
def _update_payload(self, mime_message):
"""Update payload of mail message from mime_message.
This function works recusively when it receives a multipart body.
If it receives a non-multi mime object, it will determine whether or
not it is an attachment by whether it has a filename or not. Attachments
and bodies are then wrapped in EncodedPayload with the correct charsets and
encodings.
Args:
mime_message: A Message MIME email object.
"""
payload = mime_message.get_payload()
if payload:
if mime_message.get_content_maintype() == 'multipart':
for alternative in payload:
self._update_payload(alternative)
else:
filename = mime_message.get_param('filename',
header='content-disposition')
if not filename:
filename = mime_message.get_param('name')
payload = EncodedPayload(payload,
(mime_message.get_content_charset() or
mime_message.get_charset()),
mime_message['content-transfer-encoding'])
if filename:
try:
attachments = self.attachments
except AttributeError:
self.attachments = [(filename, payload)]
else:
if isinstance(attachments[0], basestring):
self.attachments = [attachments]
attachments = self.attachments
attachments.append((filename, payload))
else:
self._add_body(mime_message.get_content_type(), payload)
def update_from_mime_message(self, mime_message):
"""Copy information from a mime message.
Set information of instance to values of mime message. This method
will only copy values that it finds. Any missing values will not
be copied, nor will they overwrite old values with blank values.
This object is not guaranteed to be initialized after this call.
Args:
mime_message: email.Message instance to copy information from.
Returns:
MIME Message instance of mime_message argument.
"""
mime_message = _parse_mime_message(mime_message)
sender = mime_message['from']
if sender:
self.sender = sender
reply_to = mime_message['reply-to']
if reply_to:
self.reply_to = reply_to
subject = mime_message['subject']
if subject:
self.subject = subject
self._update_payload(mime_message)
def bodies(self, content_type=None):
"""Iterate over all bodies.
Yields:
Tuple (content_type, payload) for html and body in that order.
"""
if (not content_type or
content_type == 'text' or
content_type == 'text/html'):
try:
yield 'text/html', self.html
except AttributeError:
pass
if (not content_type or
content_type == 'text' or
content_type == 'text/plain'):
try:
yield 'text/plain', self.body
except AttributeError:
pass
class EmailMessage(_EmailMessageBase):
"""Main interface to email API service.
This class is used to programmatically build an email message to send via
the Mail API. The usage is to construct an instance, populate its fields
and call Send().
Example Usage:
An EmailMessage can be built completely by the constructor.
EmailMessage(sender='sender@nowhere.com',
to='recipient@nowhere.com',
subject='a subject',
body='This is an email to you').Send()
It might be desirable for an application to build an email in different
places throughout the code. For this, EmailMessage is mutable.
message = EmailMessage()
message.sender = 'sender@nowhere.com'
message.to = ['recipient1@nowhere.com', 'recipient2@nowhere.com']
message.subject = 'a subject'
message.body = 'This is an email to you')
message.check_initialized()
message.send()
"""
_API_CALL = 'Send'
PROPERTIES = set(_EmailMessageBase.PROPERTIES)
def check_initialized(self):
"""Provide additional checks to ensure recipients have been specified.
Raises:
MissingRecipientError when no recipients specified in to, cc or bcc.
"""
if (not hasattr(self, 'to') and
not hasattr(self, 'cc') and
not hasattr(self, 'bcc')):
raise MissingRecipientsError()
super(EmailMessage, self).check_initialized()
def CheckInitialized(self):
self.check_initialized()
def ToProto(self):
"""Does addition conversion of recipient fields to protocol buffer.
Returns:
MailMessage protocol version of mail message including sender fields.
"""
message = super(EmailMessage, self).ToProto()
for attribute, adder in (('to', message.add_to),
('cc', message.add_cc),
('bcc', message.add_bcc)):
if hasattr(self, attribute):
for address in _email_sequence(getattr(self, attribute)):
adder(_to_str(address))
return message
def __setattr__(self, attr, value):
"""Provides additional checks on recipient fields."""
if attr in ['to', 'cc', 'bcc']:
if isinstance(value, basestring):
check_email_valid(value, attr)
else:
for address in value:
check_email_valid(address, attr)
super(EmailMessage, self).__setattr__(attr, value)
def update_from_mime_message(self, mime_message):
"""Copy information from a mime message.
Update fields for recipients.
Args:
mime_message: email.Message instance to copy information from.
"""
mime_message = _parse_mime_message(mime_message)
super(EmailMessage, self).update_from_mime_message(mime_message)
to = mime_message.get_all('to')
if to:
if len(to) == 1:
self.to = to[0]
else:
self.to = to
cc = mime_message.get_all('cc')
if cc:
if len(cc) == 1:
self.cc = cc[0]
else:
self.cc = cc
bcc = mime_message.get_all('bcc')
if bcc:
if len(bcc) == 1:
self.bcc = bcc[0]
else:
self.bcc = bcc
class AdminEmailMessage(_EmailMessageBase):
"""Interface to sending email messages to all admins via the amil API.
This class is used to programmatically build an admin email message to send
via the Mail API. The usage is to construct an instance, populate its fields
and call Send().
Unlike the normal email message, addresses in the recipient fields are
ignored and not used for sending.
Example Usage:
An AdminEmailMessage can be built completely by the constructor.
AdminEmailMessage(sender='sender@nowhere.com',
subject='a subject',
body='This is an email to you').Send()
It might be desirable for an application to build an admin email in
different places throughout the code. For this, AdminEmailMessage is
mutable.
message = AdminEmailMessage()
message.sender = 'sender@nowhere.com'
message.subject = 'a subject'
message.body = 'This is an email to you')
message.check_initialized()
message.send()
"""
_API_CALL = 'SendToAdmins'
__UNUSED_PROPERTIES = set(('to', 'cc', 'bcc'))
def __setattr__(self, attr, value):
if attr in self.__UNUSED_PROPERTIES:
logging.warning('\'%s\' is not a valid property to set '
'for AdminEmailMessage. It is unused.', attr)
super(AdminEmailMessage, self).__setattr__(attr, value)
class InboundEmailMessage(EmailMessage):
"""Parsed email object as recevied from external source.
Has a date field and can store any number of additional bodies. These
additional attributes make the email more flexible as required for
incoming mail, where the developer has less control over the content.
Example Usage:
# Read mail message from CGI input.
message = InboundEmailMessage(sys.stdin.read())
logging.info('Received email message from %s at %s',
message.sender,
message.date)
enriched_body = list(message.bodies('text/enriched'))[0]
... Do something with body ...
"""
__HEADER_PROPERTIES = {'date': 'date',
'message_id': 'message-id',
}
PROPERTIES = frozenset(_EmailMessageBase.PROPERTIES |
set(('alternate_bodies',)) |
set(__HEADER_PROPERTIES.iterkeys()))
def update_from_mime_message(self, mime_message):
"""Update values from MIME message.
Copies over date values.
Args:
mime_message: email.Message instance to copy information from.
"""
mime_message = _parse_mime_message(mime_message)
super(InboundEmailMessage, self).update_from_mime_message(mime_message)
for property, header in InboundEmailMessage.__HEADER_PROPERTIES.iteritems():
value = mime_message[header]
if value:
setattr(self, property, value)
def _add_body(self, content_type, payload):
"""Add body to inbound message.
Method is overidden to handle incoming messages that have more than one
plain or html bodies or has any unidentified bodies.
This method will not overwrite existing html and body values. This means
that when updating, the text and html bodies that are first in the MIME
document order are assigned to the body and html properties.
Args:
content_type: Content-type of additional body.
payload: Content of additional body.
"""
if (content_type == 'text/plain' and not hasattr(self, 'body') or
content_type == 'text/html' and not hasattr(self, 'html')):
super(InboundEmailMessage, self)._add_body(content_type, payload)
else:
try:
alternate_bodies = self.alternate_bodies
except AttributeError:
alternate_bodies = self.alternate_bodies = [(content_type, payload)]
else:
alternate_bodies.append((content_type, payload))
def bodies(self, content_type=None):
"""Iterate over all bodies.
Args:
content_type: Content type to filter on. Allows selection of only
specific types of content. Can be just the base type of the content
type. For example:
content_type = 'text/html' # Matches only HTML content.
content_type = 'text' # Matches text of any kind.
Yields:
Tuple (content_type, payload) for all bodies of message, including body,
html and all alternate_bodies in that order.
"""
main_bodies = super(InboundEmailMessage, self).bodies(content_type)
for payload_type, payload in main_bodies:
yield payload_type, payload
partial_type = bool(content_type and content_type.find('/') < 0)
try:
for payload_type, payload in self.alternate_bodies:
if content_type:
if partial_type:
match_type = payload_type.split('/')[0]
else:
match_type = payload_type
match = match_type == content_type
else:
match = True
if match:
yield payload_type, payload
except AttributeError:
pass
def to_mime_message(self):
"""Convert to MIME message.
Adds additional headers from inbound email.
Returns:
MIME message instance of payload.
"""
mime_message = super(InboundEmailMessage, self).to_mime_message()
for property, header in InboundEmailMessage.__HEADER_PROPERTIES.iteritems():
try:
mime_message[header] = getattr(self, property)
except AttributeError:
pass
return mime_message
Parser.Parser
| |
from ..java import (
Method as JavaMethod,
opcodes as JavaOpcodes,
)
from .blocks import Block
from .opcodes import ALOAD_name, ASTORE_name, ICONST_val
POSITIONAL_OR_KEYWORD = 1
VAR_POSITIONAL = 2
KEYWORD_ONLY = 3
VAR_KEYWORD = 4
CO_VARARGS = 0x0004
CO_VARKEYWORDS = 0x0008
class Method(Block):
def __init__(self, parent, name, parameters, returns=None, static=False, commands=None):
super().__init__(parent, commands=commands)
self.name = name
self.parameters = parameters
if returns is None:
self.returns = {}
else:
self.returns = returns
# Load args and kwargs, but don't expose those names into the localvars.
self.add_self()
self.localvars['##__args__##'] = len(self.localvars)
self.localvars['##__kwargs__##'] = len(self.localvars)
# Then reserve space for all the *actual* arguments.
for i, arg in enumerate(self.parameters):
self.localvars[arg['name']] = len(self.localvars)
self.static = static
@property
def is_constructor(self):
return False
@property
def is_instancemethod(self):
return False
@property
def is_closuremethod(self):
return False
@property
def has_void_return(self):
return self.returns.get('annotation', object()) is None
@property
def callable(self):
return 'org/python/types/Function'
@property
def signature(self):
return_descriptor = 'V' if self.has_void_return else 'Lorg/python/Object;'
return '([Lorg/python/Object;Ljava/util/Hashtable;)%s' % return_descriptor
def add_self(self):
pass
@property
def method_name(self):
return self.name
@property
def module(self):
return self.parent
def add_method(self, method_name, code):
# If a method is added to a method, it is added as an anonymous
# inner class.
from .klass import AnonymousInnerClass
callable = AnonymousInnerClass(
parent=self.parent,
super_name='org/python/types/Object',
interfaces=['org/python/Callable'],
public=False,
final=True,
)
method = ClosureMethod(callable, 'invoke', extract_parameters(code))
method.extract(code)
callable.methods.append(method.transpile())
self.parent.classes.append(callable.transpile())
return method
def tweak(self):
# Load all the arguments into locals
setup = []
for i, arg in enumerate(self.parameters):
setup.extend([
ALOAD_name(self, '##__args__##'),
ICONST_val(i),
JavaOpcodes.AALOAD(),
ASTORE_name(self, arg['name']),
])
# Then run the code as normal.
self.code = setup + self.code
# If the method has a void return, clean up the final opcodes.
if self.has_void_return:
self.void_return()
def transpile(self):
code = super().transpile()
return JavaMethod(
self.method_name,
self.signature,
static=self.static,
attributes=[
code
]
)
class InitMethod(Method):
def __init__(self, parent, parameters, commands=None):
super().__init__(
parent, '__init__',
parameters=parameters[1:],
returns={'annotation': None},
commands=commands
)
@property
def is_constructor(self):
return True
@property
def method_name(self):
return '<init>'
@property
def klass(self):
return self.parent
@property
def module(self):
return self.klass.module
def add_self(self):
self.localvars['self'] = len(self.localvars)
def tweak(self):
# If the block is an init method, make sure it invokes super().<init>
super_found = False
# FIXME: Search for existing calls on <init>
# for opcode in code:
# if isinstance(opcode, JavaOpcodes.INVOKESPECIAL) and opcode.method.name == '<init>':
# super_found = True
# break
# Load all the arguments into locals
setup = []
for i, arg in enumerate(self.parameters):
setup.extend([
ALOAD_name(self, '##__args__##'),
ICONST_val(i),
JavaOpcodes.AALOAD(),
ASTORE_name(self, arg['name']),
])
if not super_found:
setup.extend([
JavaOpcodes.ALOAD_0(),
JavaOpcodes.INVOKESPECIAL(self.klass.super_name, '<init>', '()V'),
])
self.code = setup + self.code
self.void_return()
self.ignore_empty()
class InstanceMethod(Method):
def __init__(self, parent, name, parameters, returns=None, static=False, commands=None):
super().__init__(
parent, name,
parameters=parameters[1:],
returns=returns,
static=static,
commands=commands
)
@property
def is_instancemethod(self):
return False
@property
def callable(self):
return 'org/python/types/Method'
@property
def klass(self):
return self.parent
@property
def module(self):
return self.klass.module
def add_self(self):
self.localvars['self'] = len(self.localvars)
def tweak(self):
# Load the implicit 'self' argument, then all the arguments, into locals
super().tweak()
self.code = [
ALOAD_name(self, '##__args__##'),
ICONST_val(0),
JavaOpcodes.AALOAD(),
ASTORE_name(self, 'self'),
] + self.code
class MainMethod(Method):
def __init__(self, parent, commands=None):
super().__init__(
parent, '__main__',
parameters=[{'name': 'args', 'annotation': 'argv'}],
returns={'annotation': None},
static=True,
commands=commands
)
@property
def method_name(self):
return 'main'
@property
def module(self):
return self.parent
@property
def signature(self):
return '([Ljava/lang/String;)V'
def tweak(self):
self.void_return()
self.ignore_empty()
class ClosureMethod(Method):
@property
def is_closuremethod(self):
return True
@property
def callable(self):
return self.parent.descriptor
def extract_parameters(code):
pos_count = code.co_argcount
arg_names = code.co_varnames
positional = arg_names[0: pos_count]
keyword_only_count = code.co_kwonlyargcount
keyword_only = arg_names[pos_count:pos_count + keyword_only_count]
annotations = {} # func.__annotations__
defs = None # func.__defaults__
kwdefaults = None # func.__kwdefaults__
if defs:
pos_default_count = len(defs)
else:
pos_default_count = 0
parameters = []
# Non-keyword-only parameters w/o defaults.
non_default_count = pos_count - pos_default_count
for name in positional[0: non_default_count]:
parameters.append({
'name': name,
'annotation': annotations.get(name),
'kind': POSITIONAL_OR_KEYWORD
})
# ... w/ defaults.
for offset, name in enumerate(positional[non_default_count: len(positional)]):
parameters.append({
'name': name,
'annotation': annotations.get(name),
'kind': POSITIONAL_OR_KEYWORD,
'default': defs[offset]
})
# *args
if code.co_flags & CO_VARARGS:
name = arg_names[pos_count + keyword_only_count]
annotation = annotations.get(name)
parameters.append({
'name': name,
'annotation': annotation,
'kind': VAR_POSITIONAL
})
# Keyword-only parameters.
for name in keyword_only:
default = None
if kwdefaults is not None:
default = kwdefaults.get(name)
parameters.append({
'name': name,
'annotation': annotations.get(name),
'kind': KEYWORD_ONLY,
'default': default
})
# **kwargs
if code.co_flags & CO_VARKEYWORDS:
index = pos_count + keyword_only_count
if code.co_flags & CO_VARARGS:
index += 1
name = arg_names[index]
parameters.append({
'name': name,
'annotation': annotations.get(name),
'kind': VAR_KEYWORD
})
return parameters
| |
import threading, time, cgi, os, re
import configuration
import helper_tools as ht
from motes import motes
import utils
import sensor_data
maybeIsInSubprocess = False
forceInterrupt = False
subprocessLock = threading.Lock()
def emitCodeMansOS(code, config):
with open(os.path.join("build", "main.c"), "w") as outFile:
outFile.write(code)
outFile.close()
with open(os.path.join("build", "config"), "w") as outFile:
outFile.write(config)
outFile.close()
with open(os.path.join("build", "Makefile"), "w") as outFile:
outFile.write("SOURCES = main.c\n")
outFile.write("APPMOD = App\n")
outFile.write("PROJDIR = $(CURDIR)\n")
outFile.write("ifndef MOSROOT\n")
mansosPath = configuration.c.getCfgValue("mansosDirectory")
if not os.path.isabs(mansosPath):
# one level up - because we are in build directory
mansosPath = os.path.join(mansosPath, "..")
outFile.write(" MOSROOT = " + mansosPath + "\n")
outFile.write("endif\n")
outFile.write("include ${MOSROOT}/mos/make/Makefile\n")
outFile.close()
def emitCodePlainC(code, config):
with open(os.path.join("build", "main.c"), "w") as outFile:
outFile.write(code)
outFile.close()
with open(os.path.join("build", "config"), "w") as outFile:
# do not use mansos main in this case
config += "\n"
config += "USE_KERNEL_MAIN=n\n"
config += "USE_HARDWARE_TIMERS=n\n"
outFile.write(config)
outFile.close()
with open(os.path.join("build", "Makefile"), "w") as outFile:
outFile.write("SOURCES = main.c\n")
outFile.write("APPMOD = App\n")
outFile.write("PROJDIR = $(CURDIR)\n")
outFile.write("ifndef MOSROOT\n")
mansosPath = configuration.c.getCfgValue("mansosDirectory")
if not os.path.isabs(mansosPath):
# one level up - because we are in build directory
mansosPath = os.path.join(mansosPath, "..")
outFile.write(" MOSROOT = " + mansosPath + "\n")
outFile.write("endif\n")
outFile.write("include ${MOSROOT}/mos/make/Makefile\n")
outFile.close()
def detectTinyOSAppName(code, config):
m = re.search('.*module\s+([a-zA-Z0-9_]+)\s+', code)
if not m is None:
componentName = m.group(1)
else:
componentName = "MyC"
m = re.search('.*configuration\s+([a-zA-Z0-9_]+)\s+', config)
if not m is None:
appName = m.group(1)
else:
appName = "MyAppC"
return (componentName, appName)
def emitCodeTinyOS(code, config, componentName, appName):
with open(os.path.join("build", componentName + ".nc"), "w") as outFile:
outFile.write(code)
outFile.close()
with open(os.path.join("build", appName + ".nc"), "w") as outFile:
outFile.write(config)
outFile.close()
with open(os.path.join("build", "Makefile"), "w") as outFile:
outFile.write("COMPONENT = " + appName + "\n")
outFile.write("include $(MAKERULES)\n")
outFile.close()
def emitCodeContiki(code):
with open(os.path.join("build", "app.c"), "w") as outFile:
outFile.write(code)
outFile.close()
with open(os.path.join("build", "Makefile"), "w") as outFile:
outFile.write("CONTIKI_PROJECT = app\n")
outFile.write("all: $(CONTIKI_PROJECT)\n")
outFile.write("PROJDIR = $(CURDIR)\n")
contikiPath = configuration.c.getCfgValue("contikiDirectory")
if not os.path.isabs(contikiPath):
# one level up - because we are in build directory
contikiPath = os.path.join(contikiPath, "..")
outFile.write("CONTIKI = " + contikiPath + "\n")
outFile.write("include $(CONTIKI)/Makefile.include\n")
outFile.close()
def emitCodeSEAL(code, config):
with open(os.path.join("build", "main.sl"), "w") as outFile:
outFile.write(code)
outFile.close()
with open(os.path.join("build", "config"), "w") as outFile:
outFile.write(config)
outFile.close()
with open(os.path.join("build", "Makefile"), "w") as outFile:
outFile.write("SEAL_SOURCES = main.sl\n")
outFile.write("APPMOD = App\n")
outFile.write("PROJDIR = $(CURDIR)\n")
outFile.write("ifndef MOSROOT\n")
mansosPath = configuration.c.getCfgValue("mansosDirectory")
if not os.path.isabs(mansosPath):
# one level up - because we are in build directory
mansosPath = os.path.join(mansosPath, "..")
outFile.write(" MOSROOT = " + mansosPath + "\n")
outFile.write("endif\n")
outFile.write("include ${MOSROOT}/mos/make/Makefile\n")
outFile.close()
class PageUpload():
def serveUploadGet(self, qs): #, lastUploadCode, lastUploadConfig, lastUploadFile):
global isListening
self.setSession(qs)
self.send_response(200)
self.sendDefaultHeaders()
self.end_headers()
# TODO
lastUploadCode = ""
lastUploadConfig = ""
lastUploadFile = ""
motesText = self.serveMotes("upload", "Upload", qs, None)
codeType = configuration.c.getCfgValue("codeType").lower()
isSlow = configuration.c.getCfgValueAsBool("slowUpload")
self.serveAnyPage("upload", qs, True, {"MOTES_TXT" : motesText,
"CCODE_SELECTED": 'selected="selected"' if codeType == "c" else "",
"PLAINCCODE_SELECTED": 'selected="selected"' if codeType == "plain_c" else "",
"NESCCODE_SELECTED": 'selected="selected"' if codeType == "nesc" else "",
"CONTIKICODE_SELECTED": 'selected="selected"' if codeType == "contiki_c" else "",
"SEALCODE_SELECTED": 'selected="selected"' if codeType == "seal" else "",
"UPLOAD_CODE" : lastUploadCode,
"UPLOAD_CONFIG" : lastUploadConfig,
"UPLOAD_FILENAME": lastUploadFile,
"SLOW_CHECKED" : 'checked="checked"' if isSlow else ""})
def serveUploadPost(self, qs): #, lastUploadCode, lastUploadConfig, lastUploadFile):
global maybeIsInSubprocess
global forceInterrupt
# TODO
lastUploadCode = ""
lastUploadConfig = ""
lastUploadFile = ""
# Parse the form data posted
form = cgi.FieldStorage(
fp = self.rfile,
headers = self.headers,
environ = {
'REQUEST_METHOD':'POST',
'CONTENT_TYPE':self.headers['Content-Type']
})
self.send_response(200)
self.sendDefaultHeaders()
self.end_headers()
file_data = None
codeType = "C"
if "compile" in form:
if "language" in form:
codeType = form["language"].value.strip().lower()
configuration.c.setCfgValue("codeType", codeType)
if "slow" in form:
slow = form["slow"].value == "on"
else:
slow = False
configuration.c.setCfgValue("slowUpload", slow)
configuration.c.save()
code = None
fileContents = None
fileName = None
if "compile" in form and "code" in form:
code = form["code"].value
if "upload" in form and "file" in form:
fileContents = form["file"].file.read()
fileName = form["file"].filename
# check if what to upload is provided
if not fileContents and not code:
infoMsg = "Neither filename nor code specified!"
maybeIsInSubprocess = False
forceInterrupt = True
self.writeChunk(infoMsg);
return
for m in motes.getMotes():
name = "mote" + m.getFullBasename()
if name in form:
isChecked = form[name].value == "on"
else:
isChecked = False
if isChecked:
m.isSelected = True
else:
m.isSelected = False
# remember which motes were selected and which not
motes.storeSelected()
# check if any motes are selected
if not motes.anySelected():
infoMsg = "No motes selected!"
maybeIsInSubprocess = False
forceInterrupt = True
self.writeChunk(infoMsg);
return
config = ""
if "config" in form.keys():
lastUploadConfig = form["config"].value
config = lastUploadConfig
if slow:
config += "\nSLOW_UPLOAD=y\n"
retcode = self.compileAndUpload(code, config, fileName, fileContents, codeType)
motesText = self.serveMotes("upload", "Upload", None, form)
codeType = configuration.c.getCfgValue("codeType")
isSlow = configuration.c.getCfgValueAsBool("slowUpload")
if retcode == 0:
infoMsg = "Upload done!"
else:
infoMsg = "Upload failed!"
self.writeChunk(infoMsg);
def serveUploadResult(self, qs):
global maybeIsInSubprocess
global forceInterrupt
self.send_response(200)
self.sendDefaultHeaders()
self.end_headers()
inFileName = os.path.join("build", "child_output.txt")
inFile = None
try:
limit = int(qs['line'][0])
except:
limit = 100000
if limit == 1:
# first time in this function
maybeIsInSubprocess = True
uploadLine = ""
try:
# wait until subprocess output file appears
while inFile == None:
try:
if forceInterrupt:
self.writeChunk("Finished!")
forceInterrupt = False
return
inFile = open(inFileName, "rb")
except:
inFile = None
time.sleep(0.001)
if inFile:
i = 0;
while True:
if utils.fileIsOver(inFile):
if maybeIsInSubprocess:
time.sleep(0.001)
continue
else:
self.writeChunk("Finished!")
# clean up
try:
if inFile: inFile.close()
os.remove(inFileName)
except:
pass
break
# read one symbol
c = inFile.read(1)
uploadLine += c
if c == '\n':
# if newline reached, print out the current line
self.writeChunk(uploadLine)
uploadLine = ""
i = i + 1
if i > limit:
break;
except:
raise
def compileAndUpload(self, code, config, fileName, fileContents, codeType):
# global lastUploadCode
# global lastUploadConfig
# global lastUploadFile
global maybeIsInSubprocess
global isListening
if not os.path.exists("build"):
os.mkdir("build")
# do this synchronously
subprocessLock.acquire()
ht.closeAllSerial()
isListening = False
maybeIsInSubprocess = True
try:
if fileContents:
lastUploadFile = fileName
filename = os.path.join("build", "tmp-file.ihex")
with open(filename, "w") as outFile:
outFile.write(fileContents)
outFile.close()
retcode = 0
for m in motes.getMotes():
if not m.tryToOpenSerial(False): continue
r = m.tryToUpload(self, filename)
if r != 0: retcode = r
elif code:
lastUploadCode = code
if config == None:
config = ""
if codeType == "c":
emitCodeMansOS(code, config)
elif codeType == "plain_c":
emitCodePlainC(code, config)
elif codeType == "nesc":
(componentName, appName) = detectTinyOSAppName(code, config)
emitCodeTinyOS(code, config, componentName, appName)
elif codeType == "contiki_c":
emitCodeContiki(code)
elif codeType == "seal":
emitCodeSEAL(code, config)
else:
print("compileAndUpload: unknow code type: " + codeType)
return 1
retcode = 0
for m in motes.getMotes():
if m.isLocal():
if not m.tryToOpenSerial(False): continue
r = m.tryToCompileAndUpload(self, codeType)
if r != 0: retcode = r
finally:
maybeIsInSubprocess = False
sensor_data.moteData.reset()
ht.openAllSerial()
isListening = True
subprocessLock.release()
return retcode
| |
'''
EffectWidget
============
.. versionadded:: 1.9.0
This code is still experimental, and its API is subject to change in a
future version.
The :class:`EffectWidget` is able to apply a variety of fancy
graphical effects to
its children. It works by rendering to a series of
:class:`~kivy.graphics.Fbo` instances with custom opengl fragment shaders.
As such, effects can freely do almost anything, from inverting the
colors of the widget, to antialiasing, to emulating the appearance of a
crt monitor!
The basic usage is as follows::
w = EffectWidget()
w.add_widget(Button(text='Hello!')
w.effects = [InvertEffect(), HorizontalBlurEffect(size=2.0)]
The effects can be a list of effects of any length, and they will be
applied sequentially.
The module comes with a range of prebuilt effects, but the interface
is designed to make it easy to create your own. Instead of writing a
full glsl shader, you provide a single function that takes
some inputs based on the screen (current pixel color, current widget
texture etc.). See the sections below for more information.
.. note:: It is not efficient to resize an :class:`EffectWidget`, as
each :class:`~kivy.graphics.Fbo` is recreated every time.
If you need to resize frequently, consider doing things a
different way.
.. note:: Although some effects have adjustable parameters, it is
*not* efficient to animate these, as the entire
shader is reconstructed every time. You should use glsl
uniform variables instead. The :class:`AdvancedEffectBase`
may make this easier.
.. note:: The :class:`EffectWidget` *cannot* draw outside its own
widget area (pos -> pos + size), any child widgets
overlapping the boundary will be cut off at this point.
Provided Effects
----------------
The module comes with several pre-written effects. Some have
adjustable properties (e.g. blur radius), see the individual
effect documentation for more details.
- :class:`MonochromeEffect` - makes the widget grayscale.
- :class:`InvertEffect` - inverts the widget colors.
- :class:`ChannelMixEffect` - swaps around color channels.
- :class:`ScanlinesEffect` - displays flickering scanlines.
- :class:`PixelateEffect` - pixelates the image.
- :class:`HorizontalBlurEffect` - Gaussuan blurs horizontally.
- :class:`VerticalBlurEffect` - Gaussuan blurs vertically.
- :class:`FXAAEffect` - applies a very basic AA.
Creating Effects
----------------
Effects are designed to make it easy to create and use your own
transformations. You do this by creating and using an instance of
:class:`EffectBase` with your own custom :attr:`EffectBase.glsl`
property.
The glsl property is a string representing part of a glsl fragment
shader. You can include as many functions as you like (the string
is simply spliced into the whole shader), but it
must implement a function :code:`effect` as below::
vec4 effect(vec4 color, sampler2D texture, vec2 tex_coords, vec2 coords)
{
// ... your code here
return something; // must be a vec4 representing the new color
}
The full shader will calculate the normal pixel color at each point,
then call your :code:`effect` function to transform it. The
parameters are:
- **color**: The normal color of the current pixel (i.e. texture
sampled at tex_coords).
- **texture**: The texture containing the widget's normal background.
- **tex_coords**: The normal texture_coords used to access texture.
- **coords**: The pixel indices of the current pixel.
The shader code also has access to two useful uniform variables,
:code:`time` containing the time (in seconds) since the program start,
and :code:`resolution` containing the shape (x pixels, y pixels) of
the widget.
For instance, the following simple string (taken from the `InvertEffect`)
would invert the input color but set alpha to 1.0::
vec4 effect(vec4 color, sampler2D texture, vec2 tex_coords, vec2 coords)
{
return vec4(1.0 - color.xyz, 1.0);
}
You can also set the glsl by automatically loading the string from a
file, simply set the :attr:`EffectBase.source` property of an effect.
'''
from kivy.clock import Clock
from kivy.uix.relativelayout import RelativeLayout
from kivy.properties import (StringProperty, ObjectProperty, ListProperty,
NumericProperty, DictProperty)
from kivy.graphics import (RenderContext, Fbo, Color, Rectangle,
Translate, PushMatrix, PopMatrix, ClearColor,
ClearBuffers)
from kivy.event import EventDispatcher
from kivy.base import EventLoop
from kivy.resources import resource_find
__all__ = ('EffectWidget', 'EffectBase', 'AdvancedEffectBase',
'MonochromeEffect', 'InvertEffect', 'ChannelMixEffect',
'ScanlinesEffect', 'PixelateEffect',
'HorizontalBlurEffect', 'VerticalBlurEffect',
'FXAAEffect')
shader_header = '''
#ifdef GL_ES
precision highp float;
#endif
/* Outputs from the vertex shader */
varying vec4 frag_color;
varying vec2 tex_coord0;
/* uniform texture samplers */
uniform sampler2D texture0;
'''
shader_uniforms = '''
uniform vec2 resolution;
uniform float time;
'''
shader_footer_trivial = '''
void main (void){
gl_FragColor = frag_color * texture2D(texture0, tex_coord0);
}
'''
shader_footer_effect = '''
void main (void){
vec4 normal_color = frag_color * texture2D(texture0, tex_coord0);
vec4 effect_color = effect(normal_color, texture0, tex_coord0,
gl_FragCoord.xy);
gl_FragColor = effect_color;
}
'''
effect_trivial = '''
vec4 effect(vec4 color, sampler2D texture, vec2 tex_coords, vec2 coords)
{
return color;
}
'''
effect_monochrome = '''
vec4 effect(vec4 color, sampler2D texture, vec2 tex_coords, vec2 coords)
{
float mag = 1.0/3.0 * (color.x + color.y + color.z);
return vec4(mag, mag, mag, color.w);
}
'''
effect_invert = '''
vec4 effect(vec4 color, sampler2D texture, vec2 tex_coords, vec2 coords)
{
return vec4(1.0 - color.xyz, color.w);
}
'''
effect_mix = '''
vec4 effect(vec4 color, sampler2D texture, vec2 tex_coords, vec2 coords)
{{
return vec4(color.{}, color.{}, color.{}, color.w);
}}
'''
effect_blur_h = '''
vec4 effect(vec4 color, sampler2D texture, vec2 tex_coords, vec2 coords)
{{
float dt = ({} / 4.0) * 1.0 / resolution.x;
vec4 sum = vec4(0.0);
sum += texture2D(texture, vec2(tex_coords.x - 4.0*dt, tex_coords.y))
* 0.05;
sum += texture2D(texture, vec2(tex_coords.x - 3.0*dt, tex_coords.y))
* 0.09;
sum += texture2D(texture, vec2(tex_coords.x - 2.0*dt, tex_coords.y))
* 0.12;
sum += texture2D(texture, vec2(tex_coords.x - dt, tex_coords.y))
* 0.15;
sum += texture2D(texture, vec2(tex_coords.x, tex_coords.y))
* 0.16;
sum += texture2D(texture, vec2(tex_coords.x + dt, tex_coords.y))
* 0.15;
sum += texture2D(texture, vec2(tex_coords.x + 2.0*dt, tex_coords.y))
* 0.12;
sum += texture2D(texture, vec2(tex_coords.x + 3.0*dt, tex_coords.y))
* 0.09;
sum += texture2D(texture, vec2(tex_coords.x + 4.0*dt, tex_coords.y))
* 0.05;
return vec4(sum.xyz, color.w);
}}
'''
effect_blur_v = '''
vec4 effect(vec4 color, sampler2D texture, vec2 tex_coords, vec2 coords)
{{
float dt = ({} / 4.0)
* 1.0 / resolution.x;
vec4 sum = vec4(0.0);
sum += texture2D(texture, vec2(tex_coords.x, tex_coords.y - 4.0*dt))
* 0.05;
sum += texture2D(texture, vec2(tex_coords.x, tex_coords.y - 3.0*dt))
* 0.09;
sum += texture2D(texture, vec2(tex_coords.x, tex_coords.y - 2.0*dt))
* 0.12;
sum += texture2D(texture, vec2(tex_coords.x, tex_coords.y - dt))
* 0.15;
sum += texture2D(texture, vec2(tex_coords.x, tex_coords.y))
* 0.16;
sum += texture2D(texture, vec2(tex_coords.x, tex_coords.y + dt))
* 0.15;
sum += texture2D(texture, vec2(tex_coords.x, tex_coords.y + 2.0*dt))
* 0.12;
sum += texture2D(texture, vec2(tex_coords.x, tex_coords.y + 3.0*dt))
* 0.09;
sum += texture2D(texture, vec2(tex_coords.x, tex_coords.y + 4.0*dt))
* 0.05;
return vec4(sum.xyz, color.w);
}}
'''
effect_postprocessing = '''
vec4 effect(vec4 color, sampler2D texture, vec2 tex_coords, vec2 coords)
{
vec2 q = tex_coords * vec2(1, -1);
vec2 uv = 0.5 + (q-0.5);//*(0.9);// + 0.1*sin(0.2*time));
vec3 oricol = texture2D(texture,vec2(q.x,1.0-q.y)).xyz;
vec3 col;
col.r = texture2D(texture,vec2(uv.x+0.003,-uv.y)).x;
col.g = texture2D(texture,vec2(uv.x+0.000,-uv.y)).y;
col.b = texture2D(texture,vec2(uv.x-0.003,-uv.y)).z;
col = clamp(col*0.5+0.5*col*col*1.2,0.0,1.0);
//col *= 0.5 + 0.5*16.0*uv.x*uv.y*(1.0-uv.x)*(1.0-uv.y);
col *= vec3(0.8,1.0,0.7);
col *= 0.9+0.1*sin(10.0*time+uv.y*1000.0);
col *= 0.97+0.03*sin(110.0*time);
float comp = smoothstep( 0.2, 0.7, sin(time) );
//col = mix( col, oricol, clamp(-2.0+2.0*q.x+3.0*comp,0.0,1.0) );
return vec4(col, color.w);
}
'''
effect_pixelate = '''
vec4 effect(vec4 vcolor, sampler2D texture, vec2 texcoord, vec2 pixel_coords)
{{
vec2 pixelSize = {} / resolution;
vec2 xy = floor(texcoord/pixelSize)*pixelSize + pixelSize/2.0;
return texture2D(texture, xy);
}}
'''
effect_fxaa = '''
vec4 effect( vec4 color, sampler2D buf0, vec2 texCoords, vec2 coords)
{
vec2 frameBufSize = resolution;
float FXAA_SPAN_MAX = 8.0;
float FXAA_REDUCE_MUL = 1.0/8.0;
float FXAA_REDUCE_MIN = 1.0/128.0;
vec3 rgbNW=texture2D(buf0,texCoords+(vec2(-1.0,-1.0)/frameBufSize)).xyz;
vec3 rgbNE=texture2D(buf0,texCoords+(vec2(1.0,-1.0)/frameBufSize)).xyz;
vec3 rgbSW=texture2D(buf0,texCoords+(vec2(-1.0,1.0)/frameBufSize)).xyz;
vec3 rgbSE=texture2D(buf0,texCoords+(vec2(1.0,1.0)/frameBufSize)).xyz;
vec3 rgbM=texture2D(buf0,texCoords).xyz;
vec3 luma=vec3(0.299, 0.587, 0.114);
float lumaNW = dot(rgbNW, luma);
float lumaNE = dot(rgbNE, luma);
float lumaSW = dot(rgbSW, luma);
float lumaSE = dot(rgbSE, luma);
float lumaM = dot(rgbM, luma);
float lumaMin = min(lumaM, min(min(lumaNW, lumaNE), min(lumaSW, lumaSE)));
float lumaMax = max(lumaM, max(max(lumaNW, lumaNE), max(lumaSW, lumaSE)));
vec2 dir;
dir.x = -((lumaNW + lumaNE) - (lumaSW + lumaSE));
dir.y = ((lumaNW + lumaSW) - (lumaNE + lumaSE));
float dirReduce = max(
(lumaNW + lumaNE + lumaSW + lumaSE) * (0.25 * FXAA_REDUCE_MUL),
FXAA_REDUCE_MIN);
float rcpDirMin = 1.0/(min(abs(dir.x), abs(dir.y)) + dirReduce);
dir = min(vec2(FXAA_SPAN_MAX, FXAA_SPAN_MAX),
max(vec2(-FXAA_SPAN_MAX, -FXAA_SPAN_MAX),
dir * rcpDirMin)) / frameBufSize;
vec3 rgbA = (1.0/2.0) * (
texture2D(buf0, texCoords.xy + dir * (1.0/3.0 - 0.5)).xyz +
texture2D(buf0, texCoords.xy + dir * (2.0/3.0 - 0.5)).xyz);
vec3 rgbB = rgbA * (1.0/2.0) + (1.0/4.0) * (
texture2D(buf0, texCoords.xy + dir * (0.0/3.0 - 0.5)).xyz +
texture2D(buf0, texCoords.xy + dir * (3.0/3.0 - 0.5)).xyz);
float lumaB = dot(rgbB, luma);
vec4 return_color;
if((lumaB < lumaMin) || (lumaB > lumaMax)){
return_color = vec4(rgbA, color.w);
}else{
return_color = vec4(rgbB, color.w);
}
return return_color;
}
'''
class EffectBase(EventDispatcher):
'''The base class for GLSL effects. It simply returns its input.
See module documentation for more details.
'''
glsl = StringProperty(effect_trivial)
'''The glsl string defining your effect function, see module
documentation for more details.
:attr:`glsl` is a :class:`~kivy.properties.StringProperty` and
defaults to
a trivial effect that returns its input.
'''
source = StringProperty('')
'''The (optional) filename from which to load the :attr:`glsl`
string.
:attr:`source` is a :class:`~kivy.properties.StringProperty` and
defaults to ''.
'''
fbo = ObjectProperty(None, allownone=True)
'''The fbo currently using this effect. The :class:`EffectBase`
automatically handles this.
:attr:`fbo` is a :class:`~kivy.properties.ObjectProperty` and
defaults to None.
'''
def __init__(self, *args, **kwargs):
super(EffectBase, self).__init__(*args, **kwargs)
fbind = self.fast_bind
fbo_shader = self.set_fbo_shader
fbind('fbo', fbo_shader)
fbind('glsl', fbo_shader)
fbind('source', self._load_from_source)
def set_fbo_shader(self, *args):
'''Sets the :class:`~kivy.graphics.Fbo`'s shader by splicing
the :attr:`glsl` string into a full fragment shader.
The full shader is made up of :code:`shader_header +
shader_uniforms + self.glsl + shader_footer_effect`.
'''
if self.fbo is None:
return
self.fbo.set_fs(shader_header + shader_uniforms + self.glsl +
shader_footer_effect)
def _load_from_source(self, *args):
'''(internal) Loads the glsl string from a source file.'''
source = self.source
if not source:
return
filename = resource_find(source)
if filename is None:
return Logger.error('Error reading file {filename}'.
format(filename=source))
with open(filename) as fileh:
self.glsl = fileh.read()
class AdvancedEffectBase(EffectBase):
'''An :class:`EffectBase` with additional behavior to easily
set and update uniform variables in your shader.
This class is provided for convenience if implementing your own
effects, it is not used by any of those provided with Kivy.
In addition to your base glsl string that must be provided as
normal, the :class:`AdvancedEffectBase` has an extra property
:attr:`uniforms`, a dictionary of name-value pairs. Whenever
a value is changed, the new values for the uniform variable with
the given name are uploaded to the shader.
You must still manually declare your uniform variables at the top
of your glsl string.
'''
uniforms = DictProperty({})
'''A dictionary of uniform variable names and their values. These
are automatically uploaded to the :attr:`fbo` shader if appropriate.
uniforms is a :class:`~kivy.properties.DictProperty` and
defaults to {}.
'''
def __init__(self, *args, **kwargs):
super(AdvancedEffectBase, self).__init__(*args, **kwargs)
self.fast_bind('uniforms', self._update_uniforms)
def _update_uniforms(self, *args):
if self.fbo is None:
return
for key, value in self.uniforms.items():
self.fbo[key] = value
def set_fbo_shader(self, *args):
super(AdvancedEffectBase, self).set_fbo_shader(*args)
self._update_uniforms()
class MonochromeEffect(EffectBase):
'''Returns its input colors in monochrome.'''
def __init__(self, *args, **kwargs):
super(MonochromeEffect, self).__init__(*args, **kwargs)
self.glsl = effect_monochrome
class InvertEffect(EffectBase):
'''Inverts the colors in the input.'''
def __init__(self, *args, **kwargs):
super(InvertEffect, self).__init__(*args, **kwargs)
self.glsl = effect_invert
class ScanlinesEffect(EffectBase):
'''Adds scanlines to the input.'''
def __init__(self, *args, **kwargs):
super(ScanlinesEffect, self).__init__(*args, **kwargs)
self.glsl = effect_postprocessing
class ChannelMixEffect(EffectBase):
'''Mixes the color channels of the input according to the order
property. Channels may be arbitrarily rearranged or repeated.'''
order = ListProperty([1, 2, 0])
'''The new sorted order of the rgb channels.
order is a :class:`~kivy.properties.ListProperty` and defaults to
[1, 2, 0], corresponding to (g, b, r).
'''
def __init__(self, *args, **kwargs):
super(ChannelMixEffect, self).__init__(*args, **kwargs)
self.do_glsl()
def on_order(self, *args):
self.do_glsl()
def do_glsl(self):
letters = [{0: 'x', 1: 'y', 2: 'z'}[i] for i in self.order]
self.glsl = effect_mix.format(*letters)
class PixelateEffect(EffectBase):
'''Pixelates the input according to its
:attr:`~PixelateEffect.pixel_size`'''
pixel_size = NumericProperty(10)
'''
Sets the size of a new 'pixel' in the effect, in terms of number of
'real' pixels.
pixel_size is a :class:`~kivy.properties.NumericProperty` and
defaults to 10.
'''
def __init__(self, *args, **kwargs):
super(PixelateEffect, self).__init__(*args, **kwargs)
self.do_glsl()
def on_pixel_size(self, *args):
self.do_glsl()
def do_glsl(self):
self.glsl = effect_pixelate.format(float(self.pixel_size))
class HorizontalBlurEffect(EffectBase):
'''Blurs the input horizontally, with the width given by
:attr:`~HorizontalBlurEffect.size`.'''
size = NumericProperty(4.0)
'''The blur width in pixels.
size is a :class:`~kivy.properties.NumericProperty` and defaults to
4.0.
'''
def __init__(self, *args, **kwargs):
super(HorizontalBlurEffect, self).__init__(*args, **kwargs)
self.do_glsl()
def on_size(self, *args):
self.do_glsl()
def do_glsl(self):
self.glsl = effect_blur_h.format(float(self.size))
class VerticalBlurEffect(EffectBase):
'''Blurs the input vertically, with the width given by
:attr:`~VerticalBlurEffect.size`.'''
size = NumericProperty(4.0)
'''The blur width in pixels.
size is a :class:`~kivy.properties.NumericProperty` and defaults to
4.0.
'''
def __init__(self, *args, **kwargs):
super(VerticalBlurEffect, self).__init__(*args, **kwargs)
self.do_glsl()
def on_size(self, *args):
self.do_glsl()
def do_glsl(self):
self.glsl = effect_blur_v.format(float(self.size))
class FXAAEffect(EffectBase):
'''Applies very simple antialiasing via fxaa.'''
def __init__(self, *args, **kwargs):
super(FXAAEffect, self).__init__(*args, **kwargs)
self.glsl = effect_fxaa
class EffectFbo(Fbo):
'''An :class:`~kivy.graphics.Fbo` with extra facility to
attempt setting a new shader, see :meth:`set_fs`.
'''
def __init__(self, *args, **kwargs):
super(EffectFbo, self).__init__(*args, **kwargs)
self.texture_rectangle = None
def set_fs(self, value):
'''Attempt to set the fragment shader to the given value.
If setting the shader fails, resets the old one and raises an
exception.
'''
shader = self.shader
old_value = shader.fs
shader.fs = value
if not shader.success:
shader.fs = old_value
raise Exception('Setting new shader failed.')
class EffectWidget(RelativeLayout):
'''
Widget with the ability to apply a series of graphical effects to
its children. See module documentation for full information on
setting effects and creating your own.
'''
background_color = ListProperty((0, 0, 0, 1))
'''This defines the background color to be used for the fbo in the
EffectWidget.
:attr:`background_color` is a :class:`ListProperty` defaults to
(0, 0, 0, 1)
'''
texture = ObjectProperty(None)
'''The output texture of our final :class:`~kivy.graphics.Fbo` after
all effects have been applied.
texture is an :class:`~kivy.properties.ObjectProperty` and defaults
to None.
'''
effects = ListProperty([])
'''List of all the effects to be applied. These should all be
instances of :class:`EffectBase`.
effects is a :class:`ListProperty` and defaults to [].
'''
fbo_list = ListProperty([])
'''(internal) list of all the fbos that are being used to apply
the effects.
fbo_list is a :class:`ListProperty` and defaults to [].
'''
_bound_effects = ListProperty([])
'''(internal) list of effect classes that have been given an fbo to
manage. This is necessary so that the fbo can be removed it the
effect is no longer in use.
_bound_effects is a :class:`ListProperty` and defaults to [].
'''
def __init__(self, **kwargs):
# Make sure opengl context exists
EventLoop.ensure_window()
self.canvas = RenderContext(use_parent_projection=True,
use_parent_modelview=True)
with self.canvas:
self.fbo = Fbo(size=self.size)
with self.fbo.before:
PushMatrix()
with self.fbo:
ClearColor(0, 0, 0, 0)
ClearBuffers()
self._background_color = Color(*self.background_color)
self.fbo_rectangle = Rectangle(size=self.size)
with self.fbo.after:
PopMatrix()
super(EffectWidget, self).__init__(**kwargs)
Clock.schedule_interval(self._update_glsl, 0)
fbind = self.fast_bind
fbo_setup = self.refresh_fbo_setup
fbind('size', fbo_setup)
fbind('effects', fbo_setup)
fbind('background_color', self._refresh_background_color)
self.refresh_fbo_setup()
self._refresh_background_color() # In case thi was changed in kwargs
def _refresh_background_color(self, *args):
self._background_color.rgba = self.background_color
def _update_glsl(self, *largs):
'''(internal) Passes new time and resolution uniform
variables to the shader.
'''
time = Clock.get_boottime()
resolution = [float(size) for size in self.size]
self.canvas['time'] = time
self.canvas['resolution'] = resolution
for fbo in self.fbo_list:
fbo['time'] = time
fbo['resolution'] = resolution
def refresh_fbo_setup(self, *args):
'''(internal) Creates and assigns one :class:`~kivy.graphics.Fbo`
per effect, and makes sure all sizes etc. are correct and
consistent.
'''
# Add/remove fbos until there is one per effect
while len(self.fbo_list) < len(self.effects):
with self.canvas:
new_fbo = EffectFbo(size=self.size)
with new_fbo:
ClearColor(0, 0, 0, 0)
ClearBuffers()
Color(1, 1, 1, 1)
new_fbo.texture_rectangle = Rectangle(size=self.size)
new_fbo.texture_rectangle.size = self.size
self.fbo_list.append(new_fbo)
while len(self.fbo_list) > len(self.effects):
old_fbo = self.fbo_list.pop()
self.canvas.remove(old_fbo)
# Remove fbos from unused effects
for effect in self._bound_effects:
if effect not in self.effects:
effect.fbo = None
self._bound_effects = self.effects
# Do resizing etc.
self.fbo.size = self.size
self.fbo_rectangle.size = self.size
for i in range(len(self.fbo_list)):
self.fbo_list[i].size = self.size
self.fbo_list[i].texture_rectangle.size = self.size
# If there are no effects, just draw our main fbo
if len(self.fbo_list) == 0:
self.texture = self.fbo.texture
return
for i in range(1, len(self.fbo_list)):
fbo = self.fbo_list[i]
fbo.texture_rectangle.texture = self.fbo_list[i - 1].texture
# Build effect shaders
for effect, fbo in zip(self.effects, self.fbo_list):
effect.fbo = fbo
self.fbo_list[0].texture_rectangle.texture = self.fbo.texture
self.texture = self.fbo_list[-1].texture
def add_widget(self, widget):
# Add the widget to our Fbo instead of the normal canvas
c = self.canvas
self.canvas = self.fbo
super(EffectWidget, self).add_widget(widget)
self.canvas = c
def remove_widget(self, widget):
# Remove the widget from our Fbo instead of the normal canvas
c = self.canvas
self.canvas = self.fbo
super(EffectWidget, self).remove_widget(widget)
self.canvas = c
def clear_widgets(self, children=None):
# Clear widgets from our Fbo instead of the normal canvas
c = self.canvas
self.canvas = self.fbo
super(EffectWidget, self).clear_widgets(children)
self.canvas = c
| |
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import re
from oslo_log import log as logging
from oslo_utils import timeutils
from oslo_utils import uuidutils
import six
from nova import cache_utils
from nova import context
from nova import exception
from nova.i18n import _
from nova.i18n import _LI
from nova.network import model as network_model
from nova import objects
from nova.objects import base as obj_base
LOG = logging.getLogger(__name__)
# NOTE(vish): cache mapping for one week
_CACHE_TIME = 7 * 24 * 60 * 60
_CACHE = None
def memoize(func):
@functools.wraps(func)
def memoizer(context, reqid):
global _CACHE
if not _CACHE:
_CACHE = cache_utils.get_client(expiration_time=_CACHE_TIME)
key = "%s:%s" % (func.__name__, reqid)
key = str(key)
value = _CACHE.get(key)
if value is None:
value = func(context, reqid)
_CACHE.set(key, value)
return value
return memoizer
def reset_cache():
global _CACHE
_CACHE = None
def image_type(image_type):
"""Converts to a three letter image type.
aki, kernel => aki
ari, ramdisk => ari
anything else => ami
"""
if image_type == 'kernel':
return 'aki'
if image_type == 'ramdisk':
return 'ari'
if image_type not in ['aki', 'ari']:
return 'ami'
return image_type
def resource_type_from_id(context, resource_id):
"""Get resource type by ID
Returns a string representation of the Amazon resource type, if known.
Returns None on failure.
:param context: context under which the method is called
:param resource_id: resource_id to evaluate
"""
known_types = {
'i': 'instance',
'r': 'reservation',
'vol': 'volume',
'snap': 'snapshot',
'ami': 'image',
'aki': 'image',
'ari': 'image'
}
type_marker = resource_id.split('-')[0]
return known_types.get(type_marker)
@memoize
def id_to_glance_id(context, image_id):
"""Convert an internal (db) id to a glance id."""
return objects.S3ImageMapping.get_by_id(context, image_id).uuid
@memoize
def glance_id_to_id(context, glance_id):
"""Convert a glance id to an internal (db) id."""
if not glance_id:
return
try:
return objects.S3ImageMapping.get_by_uuid(context, glance_id).id
except exception.NotFound:
s3imap = objects.S3ImageMapping(context, uuid=glance_id)
s3imap.create()
return s3imap.id
def ec2_id_to_glance_id(context, ec2_id):
image_id = ec2_id_to_id(ec2_id)
return id_to_glance_id(context, image_id)
def glance_id_to_ec2_id(context, glance_id, image_type='ami'):
image_id = glance_id_to_id(context, glance_id)
if image_id is None:
return
return image_ec2_id(image_id, image_type=image_type)
def ec2_id_to_id(ec2_id):
"""Convert an ec2 ID (i-[base 16 number]) to an instance id (int)."""
try:
return int(ec2_id.split('-')[-1], 16)
except ValueError:
raise exception.InvalidEc2Id(ec2_id=ec2_id)
def image_ec2_id(image_id, image_type='ami'):
"""Returns image ec2_id using id and three letter type."""
template = image_type + '-%08x'
return id_to_ec2_id(image_id, template=template)
def get_ip_info_for_instance_from_nw_info(nw_info):
if not isinstance(nw_info, network_model.NetworkInfo):
nw_info = network_model.NetworkInfo.hydrate(nw_info)
ip_info = {}
fixed_ips = nw_info.fixed_ips()
ip_info['fixed_ips'] = [ip['address'] for ip in fixed_ips
if ip['version'] == 4]
ip_info['fixed_ip6s'] = [ip['address'] for ip in fixed_ips
if ip['version'] == 6]
ip_info['floating_ips'] = [ip['address'] for ip in nw_info.floating_ips()]
return ip_info
def get_ip_info_for_instance(context, instance):
"""Return a dictionary of IP information for an instance."""
if isinstance(instance, obj_base.NovaObject):
nw_info = instance.info_cache.network_info
else:
# FIXME(comstud): Temporary as we transition to objects.
info_cache = instance.info_cache or {}
nw_info = info_cache.get('network_info')
# Make sure empty response is turned into the model
if not nw_info:
nw_info = []
return get_ip_info_for_instance_from_nw_info(nw_info)
def id_to_ec2_id(instance_id, template='i-%08x'):
"""Convert an instance ID (int) to an ec2 ID (i-[base 16 number])."""
return template % int(instance_id)
def id_to_ec2_inst_id(instance_id):
"""Get or create an ec2 instance ID (i-[base 16 number]) from uuid."""
if instance_id is None:
return None
elif uuidutils.is_uuid_like(instance_id):
ctxt = context.get_admin_context()
int_id = get_int_id_from_instance_uuid(ctxt, instance_id)
return id_to_ec2_id(int_id)
else:
return id_to_ec2_id(instance_id)
def ec2_inst_id_to_uuid(context, ec2_id):
""""Convert an instance id to uuid."""
int_id = ec2_id_to_id(ec2_id)
return get_instance_uuid_from_int_id(context, int_id)
@memoize
def get_instance_uuid_from_int_id(context, int_id):
imap = objects.EC2InstanceMapping.get_by_id(context, int_id)
return imap.uuid
def id_to_ec2_snap_id(snapshot_id):
"""Get or create an ec2 volume ID (vol-[base 16 number]) from uuid."""
if uuidutils.is_uuid_like(snapshot_id):
ctxt = context.get_admin_context()
int_id = get_int_id_from_snapshot_uuid(ctxt, snapshot_id)
return id_to_ec2_id(int_id, 'snap-%08x')
else:
return id_to_ec2_id(snapshot_id, 'snap-%08x')
def id_to_ec2_vol_id(volume_id):
"""Get or create an ec2 volume ID (vol-[base 16 number]) from uuid."""
if uuidutils.is_uuid_like(volume_id):
ctxt = context.get_admin_context()
int_id = get_int_id_from_volume_uuid(ctxt, volume_id)
return id_to_ec2_id(int_id, 'vol-%08x')
else:
return id_to_ec2_id(volume_id, 'vol-%08x')
def ec2_vol_id_to_uuid(ec2_id):
"""Get the corresponding UUID for the given ec2-id."""
ctxt = context.get_admin_context()
# NOTE(jgriffith) first strip prefix to get just the numeric
int_id = ec2_id_to_id(ec2_id)
return get_volume_uuid_from_int_id(ctxt, int_id)
_ms_time_regex = re.compile('^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{3,6}Z$')
def status_to_ec2_attach_status(volume):
"""Get the corresponding EC2 attachment state.
According to EC2 API, the valid attachment status in response is:
attaching | attached | detaching | detached
"""
volume_status = volume.get('status')
attach_status = volume.get('attach_status')
if volume_status in ('attaching', 'detaching'):
ec2_attach_status = volume_status
elif attach_status in ('attached', 'detached'):
ec2_attach_status = attach_status
else:
msg = _("Unacceptable attach status:%s for ec2 API.") % attach_status
raise exception.Invalid(msg)
return ec2_attach_status
def is_ec2_timestamp_expired(request, expires=None):
"""Checks the timestamp or expiry time included in an EC2 request
and returns true if the request is expired
"""
timestamp = request.get('Timestamp')
expiry_time = request.get('Expires')
def parse_strtime(strtime):
if _ms_time_regex.match(strtime):
# NOTE(MotoKen): time format for aws-sdk-java contains millisecond
time_format = "%Y-%m-%dT%H:%M:%S.%fZ"
else:
time_format = "%Y-%m-%dT%H:%M:%SZ"
return timeutils.parse_strtime(strtime, time_format)
try:
if timestamp and expiry_time:
msg = _("Request must include either Timestamp or Expires,"
" but cannot contain both")
LOG.error(msg)
raise exception.InvalidRequest(msg)
elif expiry_time:
query_time = parse_strtime(expiry_time)
return timeutils.is_older_than(query_time, -1)
elif timestamp:
query_time = parse_strtime(timestamp)
# Check if the difference between the timestamp in the request
# and the time on our servers is larger than 5 minutes, the
# request is too old (or too new).
if query_time and expires:
return timeutils.is_older_than(query_time, expires) or \
timeutils.is_newer_than(query_time, expires)
return False
except ValueError:
LOG.info(_LI("Timestamp is invalid."))
return True
@memoize
def get_int_id_from_instance_uuid(context, instance_uuid):
if instance_uuid is None:
return
try:
imap = objects.EC2InstanceMapping.get_by_uuid(context, instance_uuid)
return imap.id
except exception.NotFound:
imap = objects.EC2InstanceMapping(context)
imap.uuid = instance_uuid
imap.create()
return imap.id
@memoize
def get_int_id_from_volume_uuid(context, volume_uuid):
if volume_uuid is None:
return
try:
vmap = objects.EC2VolumeMapping.get_by_uuid(context, volume_uuid)
return vmap.id
except exception.NotFound:
vmap = objects.EC2VolumeMapping(context)
vmap.uuid = volume_uuid
vmap.create()
return vmap.id
@memoize
def get_volume_uuid_from_int_id(context, int_id):
vmap = objects.EC2VolumeMapping.get_by_id(context, int_id)
return vmap.uuid
def ec2_snap_id_to_uuid(ec2_id):
"""Get the corresponding UUID for the given ec2-id."""
ctxt = context.get_admin_context()
# NOTE(jgriffith) first strip prefix to get just the numeric
int_id = ec2_id_to_id(ec2_id)
return get_snapshot_uuid_from_int_id(ctxt, int_id)
@memoize
def get_int_id_from_snapshot_uuid(context, snapshot_uuid):
if snapshot_uuid is None:
return
try:
smap = objects.EC2SnapshotMapping.get_by_uuid(context, snapshot_uuid)
return smap.id
except exception.NotFound:
smap = objects.EC2SnapshotMapping(context, uuid=snapshot_uuid)
smap.create()
return smap.id
@memoize
def get_snapshot_uuid_from_int_id(context, int_id):
smap = objects.EC2SnapshotMapping.get_by_id(context, int_id)
return smap.uuid
_c2u = re.compile('(((?<=[a-z])[A-Z])|([A-Z](?![A-Z]|$)))')
def camelcase_to_underscore(str):
return _c2u.sub(r'_\1', str).lower().strip('_')
def _try_convert(value):
"""Return a non-string from a string or unicode, if possible.
============= =====================================================
When value is returns
============= =====================================================
zero-length ''
'None' None
'True' True case insensitive
'False' False case insensitive
'0', '-0' 0
0xN, -0xN int from hex (positive) (N is any number)
0bN, -0bN int from binary (positive) (N is any number)
* try conversion to int, float, complex, fallback value
"""
def _negative_zero(value):
epsilon = 1e-7
return 0 if abs(value) < epsilon else value
if len(value) == 0:
return ''
if value == 'None':
return None
lowered_value = value.lower()
if lowered_value == 'true':
return True
if lowered_value == 'false':
return False
for prefix, base in [('0x', 16), ('0b', 2), ('0', 8), ('', 10)]:
try:
if lowered_value.startswith((prefix, "-" + prefix)):
return int(lowered_value, base)
except ValueError:
pass
try:
return _negative_zero(float(value))
except ValueError:
return value
def dict_from_dotted_str(items):
"""parse multi dot-separated argument into dict.
EBS boot uses multi dot-separated arguments like
BlockDeviceMapping.1.DeviceName=snap-id
Convert the above into
{'block_device_mapping': {'1': {'device_name': snap-id}}}
"""
args = {}
for key, value in items:
parts = key.split(".")
key = str(camelcase_to_underscore(parts[0]))
if isinstance(value, six.string_types):
# NOTE(vish): Automatically convert strings back
# into their respective values
value = _try_convert(value)
if len(parts) > 1:
d = args.get(key, {})
args[key] = d
for k in parts[1:-1]:
k = camelcase_to_underscore(k)
v = d.get(k, {})
d[k] = v
d = v
d[camelcase_to_underscore(parts[-1])] = value
else:
args[key] = value
return args
def search_opts_from_filters(filters):
return {f['name'].replace('-', '_'): f['value']['1']
for f in filters if f['value']['1']} if filters else {}
def regex_from_ec2_regex(ec2_re):
"""Converts an EC2-style regex to a python regex.
Approach is based on python fnmatch.
"""
iter_ec2_re = iter(ec2_re)
py_re = ''
for char in iter_ec2_re:
if char == '*':
py_re += '.*'
elif char == '?':
py_re += '.'
elif char == '\\':
try:
next_char = next(iter_ec2_re)
except StopIteration:
next_char = ''
if next_char == '*' or next_char == '?':
py_re += '[%s]' % next_char
else:
py_re += '\\\\' + next_char
else:
py_re += re.escape(char)
return '\A%s\Z(?s)' % py_re
| |
''' Helper functions for geometrical operations.
'''
from __future__ import division
import logging
import tables as tb
import numpy as np
def get_plane_normal(direction_vector_1, direction_vector_2):
''' Normal vector of a plane.
Plane is define by two non parallel direction vectors within the plane.
Parameters
----------
direction_vector_1 : array
Array with x, y and z.
direction_vector_2 : array
Array with x, y and z.
Returns
-------
Array with x, y and z.
'''
return np.cross(direction_vector_1, direction_vector_2)
def get_line_intersections_with_plane(line_origins, line_directions,
position_plane, normal_plane):
''' Calculates the intersection of n lines with one plane.
If there is no intersection point (line is parallel to plane or the line is
in the plane) the intersection point is set to nan.
Notes
-----
Further information:
http://stackoverflow.com/questions/4938332/line-plane-intersection-based-on-points
Parameters
----------
line_origins : array
A point (x, y and z) on the line for each of the n lines.
line_directions : array
The direction vector of the line for n lines.
position_plane : array
A array (x, y and z) to the plane.
normal_plane : array
The normal vector (x, y and z) of the plane.
Returns
-------
Array with shape (n, 3) with the intersection point.
'''
# Calculate offsets and extend in missing dimension
offsets = position_plane[np.newaxis, :] - line_origins
# Precalculate to be able to avoid division by 0
# (line is parallel to the plane or in the plane)
norm_dot_off = np.dot(normal_plane, offsets.T)
# Dot product is transformed to be at least 1D for special n = 1
norm_dot_dir = np.atleast_1d(np.dot(normal_plane,
line_directions.T))
# Initialize result to nan
t = np.full_like(norm_dot_off, fill_value=np.nan)
# Warn if some intersection cannot be calculated
if np.any(norm_dot_dir == 0):
logging.warning('Some line plane intersection could not be calculated')
# Calculate t scalar for each line simultaniously, avoid division by 0
sel = norm_dot_dir != 0
t[sel] = norm_dot_off[sel] / norm_dot_dir[sel]
# Calculate the intersections for each line with the plane
intersections = line_origins + line_directions * t[:, np.newaxis]
return intersections
def cartesian_to_spherical(x, y, z):
''' Does a transformation from cartesian to spherical coordinates.
Convention: r = 0 --> phi = theta = 0
Parameters
----------
x, y, z : float
Position in cartesian space.
Returns
-------
Spherical coordinates phi, theta and r.
'''
r = np.sqrt(x * x + y * y + z * z)
phi = np.zeros_like(r) # define phi = 0 for x = 0
theta = np.zeros_like(r) # theta = 0 for r = 0
# Avoid division by zero
# https://en.wikipedia.org/wiki/Atan2
phi[x != 0] = np.arctan2(y[x != 0], x[x != 0])
phi[phi < 0] += 2. * np.pi # map to phi = [0 .. 2 pi[
theta[r != 0] = np.arccos(z[r != 0] / r[r != 0])
return phi, theta, r
def spherical_to_cartesian(phi, theta, r):
''' Transformation from spherical to cartesian coordinates.
Including error checks.
Parameters
----------
phi, theta, r : float
Position in spherical space.
Returns
-------
Cartesian coordinates x, y and z.
'''
if np.any(r < 0):
raise RuntimeError('Conversion from spherical to cartesian coordinates failed, because r < 0')
if np.any(theta < 0) or np.any(theta >= np.pi):
raise RuntimeError('Conversion from spherical to cartesian coordinates failed, because theta exceeds [0, Pi[')
if np.any(phi < 0) or np.any(phi >= 2 * np.pi):
raise RuntimeError('Conversion from spherical to cartesian coordinates failed, because phi exceeds [0, 2*Pi[')
x = r * np.cos(phi) * np.sin(theta)
y = r * np.sin(phi) * np.sin(theta)
z = r * np.cos(theta)
return x, y, z
def rotation_matrix_x(alpha):
''' Calculates the rotation matrix for the rotation around the x axis by an angle alpha in a cartesian right-handed coordinate system.
Note
----
Rotation in a cartesian right-handed coordinate system.
Parameters
----------
alpha : float
Angle in radians.
Returns
-------
Array with shape (3, 3).
'''
return np.array([[1, 0, 0],
[0, np.cos(alpha), np.sin(alpha)],
[0, -np.sin(alpha), np.cos(alpha)]])
def rotation_matrix_y(beta):
''' Calculates the rotation matrix for the rotation around the y axis by an angle beta in a cartesian right-handed coordinate system.
Note
----
Rotation in a cartesian right-handed coordinate system.
Parameters
----------
beta : float
Angle in radians.
Returns
-------
Array with shape (3, 3).
'''
return np.array([[np.cos(beta), 0, - np.sin(beta)],
[0, 1, 0],
[np.sin(beta), 0, np.cos(beta)]])
def rotation_matrix_z(gamma):
''' Calculates the rotation matrix for the rotation around the z axis by an angle gamma in a cartesian right-handed coordinate system.
Note
----
Rotation in a cartesian right-handed coordinate system.
Parameters
----------
gamma : float
Angle in radians.
Returns
-------
Array with shape (3, 3).
'''
return np.array([[np.cos(gamma), np.sin(gamma), 0],
[-np.sin(gamma), np.cos(gamma), 0],
[0, 0, 1]])
def rotation_matrix(alpha, beta, gamma):
''' Calculates the rotation matrix for the rotation around the three cartesian axis x, y, z in a right-handed system.
Note
----
In a right-handed system. The rotation is done around x then y then z.
Remember:
- Transform to the locale coordinate system before applying rotations
- Rotations are associative but not commutative
Usage
-----
A rotation by (alpha, beta, gamma) of the vector (x, y, z) in the local
coordinate system can be done by:
np.dot(rotation_matrix(alpha, beta, gamma), np.array([x, y, z]))
Parameters
----------
alpha : float
Angle in radians for rotation around x.
beta : float
Angle in radians for rotation around y.
gamma : float
Angle in radians for rotation around z.
Returns
-------
Array with shape (3, 3).
'''
return np.dot(rotation_matrix_x(alpha=alpha), np.dot(rotation_matrix_y(beta=beta), rotation_matrix_z(gamma=gamma)))
def translation_matrix(x, y, z):
''' Calculates the translation matrix for the translation in x, y, z in a cartesian right-handed system.
Note
----
Remember: Translations are associative and commutative
Usage
-----
A translation of a vector (x, y, z) by dx, dy, dz can be done by:
np.dot(translation_matrix(dx, dy, dz), np.array([x, y, z, 1]))
Parameters
----------
x : float
Translation in x.
y : float
Translation in y.
z : float
Translation in z.
Returns
-------
Array with shape (4, 4).
'''
translation_matrix = np.eye(4, 4, 0)
translation_matrix[3, :3] = np.array([x, y, z])
return translation_matrix.T
def global_to_local_transformation_matrix(x, y, z, alpha, beta, gamma):
''' Transformation matrix that applies a translation and rotation.
Translation is T=(-x, -y, -z) to the local coordinate system followed
by a rotation = R(alpha, beta, gamma).T in the local coordinate system.
Note
----
- This function is the inverse of
local_to_global_transformation_matrix()
- The resulting transformation matrix is 4 x 4
- Translation and Rotation operations are not commutative
Parameters
----------
x : float
Translation in x.
y : float
Translation in y.
z : float
Translation in z.
alpha : float
Angle in radians for rotation around x.
beta : float
Angle in radians for rotation around y.
gamma : float
Angle in radians for rotation around z.
Returns
-------
Array with shape (4, 4).
'''
# Extend rotation matrix R by one dimension
R = np.eye(4, 4, 0)
R[:3, :3] = rotation_matrix(alpha=alpha, beta=beta, gamma=gamma).T
# Get translation matrix T
T = translation_matrix(x=-x, y=-y, z=-z)
return np.dot(R, T)
def local_to_global_transformation_matrix(x, y, z, alpha, beta, gamma):
''' Transformation matrix that applies a inverse translation and rotation.
Inverse rotation in the local coordinate system followed by an inverse
translation by x, y, z to the global coordinate system.
Note
----
- The resulting transformation matrix is 4 x 4
- Translation and Rotation operations do not commutative
Parameters
----------
x : float
Translation in x.
y : float
Translation in y.
z : float
Translation in z.
alpha : float
Angle in radians for rotation around x.
beta : float
Angle in radians for rotation around y.
gamma : float
Angle in radians for rotation around z.
Returns
-------
Array with shape (4, 4).
'''
# Extend inverse rotation matrix R by one dimension
R = np.eye(4, 4, 0)
R[:3, :3] = rotation_matrix(alpha=alpha, beta=beta, gamma=gamma)
# Get inverse translation matrix T
T = translation_matrix(x=x, y=y, z=z)
return np.dot(T, R)
def apply_transformation_matrix(x, y, z, transformation_matrix):
''' Takes arrays for x, y, z and applies a transformation matrix (4 x 4).
Parameters
----------
x : array
Array of x coordinates.
y : array
Array of y coordinates.
z : array
Array of z coordinates.
Returns
-------
Array with transformed coordinates.
'''
# Add extra 4th dimension
pos = np.column_stack((x, y, z, np.ones_like(x))).T
# Transform and delete extra dimension
pos_transformed = np.dot(transformation_matrix, pos).T[:, :-1]
return pos_transformed[:, 0], pos_transformed[:, 1], pos_transformed[:, 2]
def apply_rotation_matrix(x, y, z, rotation_matrix):
''' Takes array in x, y, z and applies a rotation matrix (3 x 3).
Parameters
----------
x : array
Array of x coordinates.
y : array
Array of x coordinates.
z : array
Array of x coordinates.
Returns
-------
Array with rotated coordinates.
'''
pos = np.column_stack((x, y, z)).T
pos_transformed = np.dot(rotation_matrix, pos).T
return pos_transformed[:, 0], pos_transformed[:, 1], pos_transformed[:, 2]
def apply_alignment(hits_x, hits_y, hits_z, dut_index,
hits_xerr=None, hits_yerr=None, hits_zerr=None,
alignment=None, prealignment=None, inverse=False):
''' Takes hits with errors and applies a transformation according to the alignment data.
If alignment data with rotations and translations are given the hits are
transformed according to the rotations and translations.
If pre-alignment data with offsets and slopes are given the hits are
transformed according to the slopes and offsets.
If both are given alignment data is taken.
The transformation can be inverted.
Parameters
---------
hits_x, hits_y, hits_z : array
Array(s) with hit positions.
dut_index : int
Needed to select the corrct alignment info.
hits_x, hits_y, hits_z : array
Array(s) with hit errors.
alignment : array
Alignment information with rotations and translations.
prealignment : array
Pre-alignment information with offsets and slopes.
inverse : bool
Apply inverse transformation if True.
Returns
-------
hits_x, hits_y, hits_z : array
Array with transformed hit positions.
'''
if (alignment is None and prealignment is None) or \
(alignment is not None and prealignment is not None):
raise RuntimeError('Neither pre-alignment or alignment data given.')
if alignment is not None:
if inverse:
logging.debug('Transform hit position into the local coordinate '
'system using alignment data')
transformation_matrix = global_to_local_transformation_matrix(
x=alignment[dut_index]['translation_x'],
y=alignment[dut_index]['translation_y'],
z=alignment[dut_index]['translation_z'],
alpha=alignment[dut_index]['alpha'],
beta=alignment[dut_index]['beta'],
gamma=alignment[dut_index]['gamma'])
rotation_matrix = global_to_local_transformation_matrix(
x=0.,
y=0.,
z=0.,
alpha=alignment[dut_index]['alpha'],
beta=alignment[dut_index]['beta'],
gamma=alignment[dut_index]['gamma'])
else:
logging.debug('Transform hit position into the global coordinate '
'system using alignment data')
transformation_matrix = local_to_global_transformation_matrix(
x=alignment[dut_index]['translation_x'],
y=alignment[dut_index]['translation_y'],
z=alignment[dut_index]['translation_z'],
alpha=alignment[dut_index]['alpha'],
beta=alignment[dut_index]['beta'],
gamma=alignment[dut_index]['gamma'])
rotation_matrix = local_to_global_transformation_matrix(
x=0.,
y=0.,
z=0.,
alpha=alignment[dut_index]['alpha'],
beta=alignment[dut_index]['beta'],
gamma=alignment[dut_index]['gamma'])
hits_x, hits_y, hits_z = apply_transformation_matrix(
x=hits_x,
y=hits_y,
z=hits_z,
transformation_matrix=transformation_matrix)
if hits_xerr is not None and hits_yerr is not None and hits_zerr is not None:
# Errors need only rotation but no translation
hits_xerr, hits_yerr, hits_zerr = apply_transformation_matrix(
x=hits_xerr,
y=hits_yerr,
z=hits_zerr,
transformation_matrix=rotation_matrix)
else:
c0_column = prealignment[dut_index]['column_c0']
c1_column = prealignment[dut_index]['column_c1']
c0_row = prealignment[dut_index]['row_c0']
c1_row = prealignment[dut_index]['row_c1']
z = prealignment[dut_index]['z']
if inverse:
logging.debug('Transform hit position into the local coordinate '
'system using pre-alignment data')
hits_x = (hits_x - c0_column) / c1_column
hits_y = (hits_y - c0_row) / c1_row
hits_z -= z
if hits_xerr is not None and hits_yerr is not None and hits_zerr is not None:
hits_xerr = hits_xerr / c1_column
hits_yerr = hits_yerr / c1_row
else:
logging.debug('Transform hit position into the global coordinate '
'system using pre-alignment data')
hits_x = c1_column * hits_x + c0_column
hits_y = c1_row * hits_y + c0_row
hits_z += z
if hits_xerr is not None and hits_yerr is not None and hits_zerr is not None:
hits_xerr = c1_column * hits_xerr
hits_yerr = c1_row * hits_yerr
if hits_xerr is not None and hits_yerr is not None and hits_zerr is not None:
return hits_x, hits_y, hits_z, hits_xerr, hits_yerr, hits_zerr
return hits_x, hits_y, hits_z
def merge_alignment_parameters(old_alignment, new_alignment, mode='relative',
select_duts=None):
if select_duts is None: # Select all DUTs
select_duts = np.ones(old_alignment.shape[0], dtype=np.bool)
else:
select = np.zeros(old_alignment.shape[0], dtype=np.bool)
select[np.array(select_duts)] = True
select_duts = select
# Do not change input parameters
align_pars = old_alignment.copy()
if mode == 'absolute':
logging.info('Set alignment')
align_pars[select_duts] = new_alignment[select_duts]
return align_pars
elif mode == 'relative':
logging.info('Merge new alignment with old alignment')
align_pars['translation_x'][select_duts] += new_alignment[
'translation_x'][select_duts]
align_pars['translation_y'][select_duts] += new_alignment[
'translation_y'][select_duts]
align_pars['translation_z'][select_duts] += new_alignment[
'translation_z'][select_duts]
align_pars['alpha'][select_duts] += new_alignment['alpha'][select_duts]
align_pars['beta'][select_duts] += new_alignment['beta'][select_duts]
align_pars['gamma'][select_duts] += new_alignment['gamma'][select_duts]
# TODO: Is this always a good idea? Usually works, but what if one
# heavily tilted device?
# All alignments are relative, thus center them around 0 by
# substracting the mean (exception: z position)
if np.count_nonzero(select_duts) > 1:
align_pars['alpha'][select_duts] -= np.mean(align_pars['alpha'][select_duts])
align_pars['beta'][select_duts] -= np.mean(align_pars['beta'][select_duts])
align_pars['gamma'][select_duts] -= np.mean(align_pars['gamma'][select_duts])
align_pars['translation_x'][select_duts] -= np.mean(align_pars[
'translation_x'][select_duts])
align_pars['translation_y'][select_duts] -= np.mean(align_pars[
'translation_y'][select_duts])
return align_pars
else:
raise RuntimeError('Unknown mode %s', str(mode))
def store_alignment_parameters(alignment_file, alignment_parameters,
mode='absolute', select_duts=None):
''' Stores alignment parameters (rotations, translations) into file.
Absolute (overwriting) and relative (add angles, translations) supported.
Parameters
---------
alignment_file : string
The pytables file name containing the alignment.
alignment_parameters : recarray
An array with the alignment values.
mode : string
Select relative or absolute alignment. The strings 'relative' and 'absolute' are supported.
use_duts : iterable
In relative mode only change specified DUTs.
'''
# Open file with alignment data
with tb.open_file(alignment_file, mode="r+") as out_file_h5:
try:
align_tab = out_file_h5.create_table(out_file_h5.root, name='Alignment',
title='Table containing the '
'alignment geometry parameters '
'(translations and rotations)',
description=alignment_parameters.dtype,
filters=tb.Filters(
complib='blosc',
complevel=5,
fletcher32=False))
align_tab.append(alignment_parameters)
except tb.NodeError:
alignment_parameters = merge_alignment_parameters(
old_alignment=out_file_h5.root.Alignment[:],
new_alignment=alignment_parameters,
mode=mode,
select_duts=select_duts)
logging.info('Overwrite existing alignment!')
# Remove old node
out_file_h5.root.Alignment._f_remove()
align_tab = out_file_h5.create_table(out_file_h5.root, name='Alignment',
title='Table containing the '
'alignment geometry parameters '
'(translations and rotations)',
description=alignment_parameters.dtype,
filters=tb.Filters(
complib='blosc',
complevel=5,
fletcher32=False))
align_tab.append(alignment_parameters)
string = "\n".join(['DUT%d: alpha=%1.4f, beta=%1.4f, gamma=%1.4f Rad, '
'x/y/z=%d/%d/%d um' % (dut_values['DUT'],
dut_values['alpha'],
dut_values['beta'],
dut_values['gamma'],
dut_values['translation_x'],
dut_values['translation_y'],
dut_values['translation_z'])
for dut_values in alignment_parameters])
logging.info('Set alignment parameters to:\n%s' % string)
| |
import unittest
import pickle, copy
import keyword
import re
import sys
import gc
import weakref
from recordclass import make_dataclass, datatype, DataclassStorage
from recordclass import dataobject, datatuple
from recordclass import asdict, clsconfig, enable_gc
_t = ()
_t1 = (1,)
_o = object()
headgc_size = sys.getsizeof(_t) - _t.__sizeof__()
ptr_size = sys.getsizeof(_t1) - sys.getsizeof(_t)
pyobject_size = _o.__sizeof__()
pyvarobject_size = _t.__sizeof__()
del _t, _t1, _o
class TestPickle2(dataobject):
__fields__ = 'x', 'y', 'z'
class TestPickle3(dataobject):
__fields__ = 'x', 'y', 'z', '__dict__'
# class TestPickleVar2(datatuple):
# __fields__ = 'x', 'y', 'z'
# class TestPickleVar3(datatuple):
# __fields__ = 'x', 'y', 'z', '__dict__'
class TestPickle22(dataobject):
x:int
y:int
z:int
class TestPickle33(dataobject):
x:int
y:int
z:int
__dict__:dict
class DataObjectTest3(unittest.TestCase):
def test_datatype_tp(self):
class A(dataobject):
__fields__ = 'x', 'y'
x:int
y:int
a = A(1,2)
self.assertEqual(repr(a), "A(x=1, y=2)")
self.assertEqual(a.x, 1)
self.assertEqual(a.y, 2)
self.assertEqual(asdict(a), {'x':1, 'y':2})
self.assertEqual(A.__annotations__, {'x':int, 'y':int})
# self.assertEqual(sys.getsizeof(a), 32)
with self.assertRaises(TypeError):
weakref.ref(a)
with self.assertRaises(AttributeError):
a.__dict__
with self.assertRaises(AttributeError):
a.z = 3
with self.assertRaises(AttributeError):
a.z
a = None
def test_datatype2_tp(self):
class A(dataobject):
x:int
y:int
a = A(1,2)
self.assertEqual(repr(a), "A(x=1, y=2)")
self.assertEqual(a.x, 1)
self.assertEqual(a.y, 2)
self.assertEqual(asdict(a), {'x':1, 'y':2})
self.assertEqual(A.__annotations__, {'x':int, 'y':int})
self.assertEqual(A.__fields__, ('x', 'y'))
# self.assertEqual(sys.getsizeof(a), 32)
with self.assertRaises(TypeError):
weakref.ref(a)
with self.assertRaises(AttributeError):
a.__dict__
with self.assertRaises(AttributeError):
a.z = 3
with self.assertRaises(AttributeError):
a.z
a = None
def test_datatype3_tp(self):
class A(dataobject):
x:int
y:int
def dummy(self):
pass
self.assertEqual(A.__fields__, ('x','y'))
def test_datatype4_tp(self):
@clsconfig(sequence=True)
class A(dataobject):
x:int
y:int
a = A(1,2)
self.assertEqual(repr(a), "A(x=1, y=2)")
self.assertEqual(a[0], 1)
self.assertEqual(a[1], 2)
def test_datatype5_tp(self):
@clsconfig(mapping=True)
class A(dataobject):
x:int
y:int
a = A(1,2)
self.assertEqual(repr(a), "A(x=1, y=2)")
self.assertEqual(a['x'], 1)
self.assertEqual(a['y'], 2)
def test_datatype6_tp(self):
@clsconfig(sequence=True, mapping=True)
class A(dataobject):
x:int
y:int
a = A(1,2)
self.assertEqual(A.__weakrefoffset__, 0)
self.assertEqual(A.__dictoffset__, 0)
self.assertEqual(repr(a), "A(x=1, y=2)")
self.assertEqual(a[0], 1)
self.assertEqual(a[1], 2)
self.assertEqual(a['x'], 1)
self.assertEqual(a['y'], 2)
def test_datatype7_tp(self):
class A(dataobject):
__fields__ = 'x', 'y'
a = A(1,2)
self.assertEqual(a.x, 1)
self.assertEqual(a.y, 2)
# self.assertEqual(sys.getsizeof(a), 32)
with self.assertRaises(TypeError):
weakref.ref(a)
with self.assertRaises(AttributeError):
a.__dict__
with self.assertRaises(AttributeError):
a.z = 3
with self.assertRaises(AttributeError):
a.z
a = None
def test_datatype_dict_tp(self):
class A(dataobject):
__fields__ = 'x', 'y', '__dict__', '__weakref__'
x:int
y:int
a = A(1,2)
self.assertEqual(repr(a), "A(x=1, y=2)")
self.assertEqual(a.x, 1)
self.assertEqual(a.y, 2)
self.assertEqual(asdict(a), {'x':1, 'y':2})
self.assertEqual(A.__annotations__, {'x':int, 'y':int})
# self.assertEqual(sys.getsizeof(a), 48)
self.assertNotEqual(A.__dictoffset__, 0)
self.assertNotEqual(A.__weakrefoffset__, 0)
weakref.ref(a)
self.assertEqual(a.__dict__, {})
a.z = 3
self.assertEqual(a.z, a.__dict__['z'])
#a = None
def test_subclass_tp(self):
class A(dataobject):
x:int
y:int
class B(A):
pass
self.assertEqual(type(A), type(B))
self.assertEqual(B.__dictoffset__, 0)
self.assertEqual(B.__weakrefoffset__, 0)
b = B(1,2)
self.assertEqual(repr(b), "B(x=1, y=2)")
self.assertEqual(b.x, 1)
self.assertEqual(b.y, 2)
self.assertEqual(asdict(b), {'x':1, 'y':2})
self.assertEqual(B.__annotations__, {'x':int, 'y':int})
# self.assertEqual(sys.getsizeof(a), 32)
self.assertEqual(A.__basicsize__, B.__basicsize__)
with self.assertRaises(TypeError):
weakref.ref(b)
with self.assertRaises(AttributeError):
b.__dict__
#a = None
def test_subclass2_tp(self):
class A(dataobject):
x:int
y:int
class B(A):
z:int
class C(B):
pass
self.assertEqual(type(A), type(B))
self.assertEqual(type(C), type(B))
self.assertEqual(C.__dictoffset__, 0)
self.assertEqual(C.__weakrefoffset__, 0)
c = C(1,2,3)
self.assertEqual(repr(c), "C(x=1, y=2, z=3)")
self.assertEqual(c.x, 1)
self.assertEqual(c.y, 2)
self.assertEqual(c.z, 3)
self.assertEqual(asdict(c), {'x':1, 'y':2, 'z':3})
self.assertEqual(C.__annotations__, {'x':int, 'y':int, 'z':int})
# self.assertEqual(sys.getsizeof(c), 40)
with self.assertRaises(TypeError):
weakref.ref(c)
with self.assertRaises(AttributeError):
c.__dict__
def test_subclass3_tp(self):
class A(dataobject):
x:int
y:int
class B:
def norm_1(self):
return abs(self.x) + abs(self.y)
class C(A, B):
pass
self.assertEqual(type(C), type(A))
self.assertEqual(C.__dictoffset__, 0)
self.assertEqual(C.__weakrefoffset__, 0)
c = C(1,2)
self.assertEqual(repr(c), "C(x=1, y=2)")
self.assertEqual(c.x, 1)
self.assertEqual(c.y, 2)
self.assertEqual(c.norm_1(), 3)
self.assertEqual(asdict(c), {'x':1, 'y':2})
self.assertEqual(C.__annotations__, {'x':int, 'y':int})
with self.assertRaises(TypeError):
weakref.ref(c)
with self.assertRaises(AttributeError):
c.__dict__
def test_subclass4_tp(self):
class A(dataobject):
x:int
y:int
class B(A):
z:int
class N:
def norm_1(self):
return abs(self.x) + abs(self.y) + abs(self.z)
class C(B, N):
pass
self.assertEqual(type(A), type(B))
self.assertEqual(type(C), type(B))
self.assertEqual(C.__dictoffset__, 0)
self.assertEqual(C.__weakrefoffset__, 0)
c = C(1,2,3)
self.assertEqual(repr(c), "C(x=1, y=2, z=3)")
self.assertEqual(c.x, 1)
self.assertEqual(c.y, 2)
self.assertEqual(c.z, 3)
self.assertEqual(c.norm_1(), 6)
self.assertEqual(asdict(c), {'x':1, 'y':2, 'z':3})
self.assertEqual(C.__annotations__, {'x':int, 'y':int, 'z':int})
# self.assertEqual(sys.getsizeof(c), 40)
with self.assertRaises(TypeError):
weakref.ref(c)
with self.assertRaises(AttributeError):
c.__dict__
def test_defaults_tp(self):
class A(dataobject):
x:int = 100
y:int = 200
z:int = 300
a1 = A()
self.assertEqual(repr(a1), "A(x=100, y=200, z=300)")
self.assertEqual(a1.x, 100)
self.assertEqual(a1.y, 200)
self.assertEqual(a1.z, 300)
a2 = A(1)
self.assertEqual(repr(a2), "A(x=1, y=200, z=300)")
self.assertEqual(a2.x, 1)
self.assertEqual(a2.y, 200)
self.assertEqual(a2.z, 300)
a3 = A(1,2)
self.assertEqual(repr(a3), "A(x=1, y=2, z=300)")
self.assertEqual(a3.x, 1)
self.assertEqual(a3.y, 2)
self.assertEqual(a3.z, 300)
def test_subclass_defaults_tp(self):
class A(dataobject):
x:int
y:int
class B(A):
x:int=0
b = B(1)
self.assertEqual(b.x, 0)
self.assertEqual(b.y, 1)
self.assertEqual(repr(b), "B(x=0, y=1)")
def test_keyword_args_tp(self):
class A(dataobject):
x:int
y:int
z:int
class B(dataobject):
x:int
y:int
z:int
a = A(1,2,3)
self.assertEqual(repr(a), "A(x=1, y=2, z=3)")
self.assertEqual(a.x, 1)
self.assertEqual(a.y, 2)
self.assertEqual(a.z, 3)
b = B(1,2,3)
self.assertEqual(repr(b), "B(x=1, y=2, z=3)")
self.assertEqual(b.x, 1)
self.assertEqual(b.y, 2)
self.assertEqual(b.z, 3)
c = B(1,2,3)
self.assertEqual(repr(c), "B(x=1, y=2, z=3)")
self.assertEqual(c.x, 1)
self.assertEqual(c.y, 2)
self.assertEqual(c.z, 3)
def test_keyword_args2_tp(self):
class A(dataobject):
__fields__ = 'x', 'y', 'z'
a1 = A(x=1, y=2, z=3)
self.assertEqual(a1.x, 1)
self.assertEqual(a1.y, 2)
self.assertEqual(a1.z, 3)
a3 = A(1,"a",3)
self.assertEqual(a3.x, 1)
self.assertEqual(a3.y, "a")
self.assertEqual(a3.z, 3)
def test_keyword_args_defaults2_tp(self):
class A(dataobject):
__fields__ = ('x', 'y', 'z')
x = 100
y = 200
z = 300
a1 = A(x=1)
self.assertEqual(a1.x, 1)
self.assertEqual(a1.y, 200)
self.assertEqual(a1.z, 300)
a2 = A(x=1,y=2.0)
self.assertEqual(a2.x, 1)
self.assertEqual(a2.y, 2.0)
self.assertEqual(a2.z, 300)
a3 = A(x=1,y=2.0,z="a")
self.assertEqual(a3.x, 1)
self.assertEqual(a3.y, 2.0)
self.assertEqual(a3.z, "a")
def test_keyword_args_defaults_tp(self):
class A(dataobject):
x:int = 100
y:int = 200
z:int = 300
a1 = A(x=1)
self.assertEqual(repr(a1), "A(x=1, y=200, z=300)")
self.assertEqual(a1.x, 1)
self.assertEqual(a1.y, 200)
self.assertEqual(a1.z, 300)
a2 = A(x=1,y=2)
self.assertEqual(repr(a2), "A(x=1, y=2, z=300)")
self.assertEqual(a2.x, 1)
self.assertEqual(a2.y, 2)
self.assertEqual(a2.z, 300)
a3 = A(x=1,y=2,z=3)
self.assertEqual(repr(a3), "A(x=1, y=2, z=3)")
self.assertEqual(a3.x, 1)
self.assertEqual(a3.y, 2)
self.assertEqual(a3.z, 3)
def test_datatype_dict2_tp(self):
@clsconfig(use_dict=True, use_weakref=True)
class A(dataobject):
__fields__ = 'x', 'y'
a = A(1,2)
self.assertEqual(a.x, 1)
self.assertEqual(a.y, 2)
# self.assertEqual(sys.getsizeof(a), 48)
# self.assertEqual(A.__dictoffset__, 32)
# self.assertEqual(A.__weakrefoffset__, 40)
weakref.ref(a)
self.assertEqual(a.__dict__, {})
a.z = 3
self.assertEqual(a.z, a.__dict__['z'])
a = None
def test_defaults2_tp(self):
class A(dataobject):
__fields__ = ('x', 'y', 'z')
x = 100
y = 200
z = 300
a1 = A()
self.assertEqual(a1.x, 100)
self.assertEqual(a1.y, 200)
self.assertEqual(a1.z, 300)
a2 = A(1)
self.assertEqual(a2.x, 1)
self.assertEqual(a2.y, 200)
self.assertEqual(a2.z, 300)
a3 = A(1,2)
self.assertEqual(a3.x, 1)
self.assertEqual(a3.y, 2)
self.assertEqual(a3.z, 300)
def test_defaults3_tp(self):
class A(dataobject):
__fields__ = ('x', 'y', 'z')
x = 100
y = 200
z = 300
class B(A):
__fields__ = 'z',
z = 400
a1 = B()
self.assertEqual(a1.x, 100)
self.assertEqual(a1.y, 200)
self.assertEqual(a1.z, 400)
a2 = B(1)
self.assertEqual(a2.x, 1)
self.assertEqual(a2.y, 200)
self.assertEqual(a2.z, 400)
a3 = B(1,2)
self.assertEqual(a3.x, 1)
self.assertEqual(a3.y, 2)
self.assertEqual(a3.z, 400)
def test_iter2_tp(self):
class A(dataobject):
__fields__ = 3
a=A(1, 2.0, "a")
self.assertEqual(list(iter(a)), [1, 2.0, "a"])
def test_iter3_tp(self):
@clsconfig(iterable=True)
class A(dataobject):
__fields__ = ('x', 'y', 'z')
a=A(1, 2.0, "a")
self.assertEqual(a.x, 1)
self.assertEqual(a.y, 2.0)
self.assertEqual(a.z, "a")
self.assertEqual(list(iter(a)), [1, 2.0, "a"])
def test_enable_gc_tp(self):
class A(dataobject):
__fields__ = 'x', 'y', 'z'
@enable_gc
class B(dataobject):
__fields__ = 'x', 'y', 'z'
a = A(1,2,3)
b = B(1,2,3)
self.assertEqual(a.x, b.x)
self.assertEqual(a.y, b.y)
self.assertEqual(a.z, b.z)
self.assertEqual(sys.getsizeof(b)-sys.getsizeof(a), headgc_size)
def test_pickle2_tp(self):
p = TestPickle2(10, 20, 30)
for module in (pickle,):
loads = getattr(module, 'loads')
dumps = getattr(module, 'dumps')
for protocol in range(-1, module.HIGHEST_PROTOCOL + 1):
tmp = dumps(p, protocol)
q = loads(tmp)
self.assertEqual(p, q)
def test_pickle3_tp(self):
p = TestPickle3(10, 20, 30)
p.a = 1
p.b = 2
for module in (pickle,):
loads = getattr(module, 'loads')
dumps = getattr(module, 'dumps')
for protocol in range(-1, module.HIGHEST_PROTOCOL + 1):
tmp = dumps(p, protocol)
q = loads(tmp)
self.assertEqual(p, q)
def test_pickle22_tp(self):
p = TestPickle22(10, 20, 30)
for module in (pickle,):
loads = getattr(module, 'loads')
dumps = getattr(module, 'dumps')
for protocol in range(-1, module.HIGHEST_PROTOCOL + 1):
tmp = dumps(p, protocol)
q = loads(tmp)
self.assertEqual(p, q)
def test_pickle33_tp(self):
p = TestPickle33(10, 20, 30)
p.a = 1
p.b = 2
for module in (pickle,):
loads = getattr(module, 'loads')
dumps = getattr(module, 'dumps')
for protocol in range(-1, module.HIGHEST_PROTOCOL + 1):
tmp = dumps(p, protocol)
q = loads(tmp)
self.assertEqual(p, q)
def test_copy_defaults_tp(self):
import copy
class A(dataobject):
x:int=0
y:int
a=A(x=1,y=2)
b = copy.copy(a)
self.assertEqual(a, b)
c = copy.deepcopy(a)
self.assertEqual(a, c)
def test_signature_tp(self):
class A(dataobject):
x:int
y:int=2
import inspect
s = inspect.signature(A)
px = s.parameters['x']
self.assertEqual(px.name, 'x')
self.assertEqual(px.annotation, int)
self.assertEqual(px.default, px.empty)
py = s.parameters['y']
self.assertEqual(py.name, 'y')
self.assertEqual(py.annotation, int)
self.assertEqual(py.default, 2)
def test_fast_new_tp(self):
class A(dataobject):
__fields__ = 'x', 'y'
__options__ = {'fast_new':True}
self.assertTrue('__new__' not in A.__dict__)
a = A(1,2)
self.assertEqual(a.x, 1)
self.assertEqual(a.y, 2)
a = A(1,y=2)
self.assertEqual(a.x, 1)
self.assertEqual(a.y, 2)
a = A(1,**{'y':2})
self.assertEqual(a.x, 1)
self.assertEqual(a.y, 2)
a = A(x=1,y=2)
self.assertEqual(a.x, 1)
self.assertEqual(a.y, 2)
a = A(**{'x':1, 'y':2})
self.assertEqual(a.x, 1)
self.assertEqual(a.y, 2)
def main():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(DataObjectTest3))
return suite
| |
import os, sys, inspect
import h5py
import numpy as np
import random
import math
import multiprocessing
from Crypto.Random.random import randint
import gc
import resource
# Visualization
import matplotlib
import matplotlib.pyplot as plt
from PIL import Image
# from mayavi import mlab
# from mayavi.core.ui.mayavi_scene import MayaviScene
# import volume_slicer
# Load the configuration file
import config
from numpy import float32, int32, uint8, dtype
cmd_folder = os.path.realpath(os.path.abspath(os.path.split(inspect.getfile(inspect.currentframe()))[0]))
if cmd_folder not in sys.path:
sys.path.append(cmd_folder)
cmd_subfolder = os.path.realpath(os.path.abspath(os.path.join(os.path.split(inspect.getfile(inspect.currentframe()))[0], config.caffe_path + "/python")))
if cmd_subfolder not in sys.path:
sys.path.append(cmd_subfolder)
sys.path.append(config.caffe_path + "/python")
cmd_subfolder = os.path.realpath(os.path.abspath(os.path.join(os.path.split(inspect.getfile(inspect.currentframe()))[0], "../../malis")))
if cmd_subfolder not in sys.path:
sys.path.append(cmd_subfolder)
# Ensure correct compilation of Caffe and Pycaffe
if config.library_compile:
cpus = multiprocessing.cpu_count()
cwd = os.getcwd()
os.chdir(config.caffe_path)
result = os.system("make all -j %s" % cpus)
if result != 0:
sys.exit(result)
result = os.system("make pycaffe -j %s" % cpus)
if result != 0:
sys.exit(result)
os.chdir(cwd)
# Import pycaffe
import caffe
import malis as malis
# Import visualization and display
# import visualizer
# Fix up OpenCL variables. Can interfere with the
# frame buffer if the GPU is also a display driver
os.environ["GPU_MAX_ALLOC_PERCENT"] = "100"
os.environ["GPU_SINGLE_ALLOC_PERCENT"] = "100"
os.environ["GPU_MAX_HEAP_SIZE"] = "100"
os.environ["GPU_FORCE_64BIT_PTR"] = "1"
dims = len(config.output_dims)
def normalize(dataset, newmin=-1, newmax=1):
maxval = dataset
while len(maxval.shape) > 0:
maxval = maxval.max(0)
minval = dataset
while len(minval.shape) > 0:
minval = minval.min(0)
return ((dataset - minval) / (maxval - minval)) * (newmax - newmin) + newmin
def error_scale(data, factor_low, factor_high):
scale = np.add((data >= 0.5) * factor_high, (data < 0.5) * factor_low)
return scale
def count_affinity(dataset):
aff_high = np.sum(dataset >= 0.5)
aff_low = np.sum(dataset < 0.5)
return aff_high, aff_low
def border_reflect(dataset, border):
return np.pad(dataset,((border, border)),'reflect')
def inspect_2D_hdf5(hdf5_file):
print 'HDF5 keys: %s' % hdf5_file.keys()
dset = hdf5_file[hdf5_file.keys()[0]]
print 'HDF5 shape: X: %s Y: %s' % dset.shape
print 'HDF5 data type: %s' % dset.dtype
print 'Max/Min: %s' % [np.asarray(dset).max(0).max(0), np.asarray(dset).min(0).min(0)]
def inspect_3D_hdf5(hdf5_file):
print 'HDF5 keys: %s' % hdf5_file.keys()
dset = hdf5_file[hdf5_file.keys()[0]]
print 'HDF5 shape: X: %s Y: %s Z: %s' % dset.shape
print 'HDF5 data type: %s' % dset.dtype
print 'Max/Min: %s' % [np.asarray(dset).max(0).max(0).max(0), np.asarray(dset).min(0).min(0).min(0)]
def inspect_4D_hdf5(hdf5_file):
print 'HDF5 keys: %s' % hdf5_file.keys()
dset = hdf5_file[hdf5_file.keys()[0]]
print 'HDF5 shape: T: %s X: %s Y: %s Z: %s' % dset.shape
print 'HDF5 data type: %s' % dset.dtype
print 'Max/Min: %s' % [np.asarray(dset).max(0).max(0).max(0).max(0), np.asarray(dset).min(0).min(0).min(0).min(0)]
def display_raw(raw_ds, index):
slice = raw_ds[0:raw_ds.shape[0], 0:raw_ds.shape[1], index]
minval = np.min(np.min(slice, axis=1), axis=0)
maxval = np.max(np.max(slice, axis=1), axis=0)
img = Image.fromarray((slice - minval) / (maxval - minval) * 255)
img.show()
def display_con(con_ds, index):
slice = con_ds[0:con_ds.shape[0], 0:con_ds.shape[1], index]
rgbArray = np.zeros((con_ds.shape[0], con_ds.shape[1], 3), 'uint8')
rgbArray[..., 0] = colorsr[slice] * 256
rgbArray[..., 1] = colorsg[slice] * 256
rgbArray[..., 2] = colorsb[slice] * 256
img = Image.fromarray(rgbArray, 'RGB')
img.show()
def display_aff(aff_ds, index):
sliceX = aff_ds[0, 0:520, 0:520, index]
sliceY = aff_ds[1, 0:520, 0:520, index]
sliceZ = aff_ds[2, 0:520, 0:520, index]
img = Image.fromarray((sliceX & sliceY & sliceZ) * 255)
img.show()
def display_binary(bin_ds, index):
slice = bin_ds[0:bin_ds.shape[0], 0:bin_ds.shape[1], index]
img = Image.fromarray(np.uint8(slice * 255))
img.show()
def slice_data(data, offsets, sizes):
if (len(offsets) == 1):
return data[offsets[0]:offsets[0] + sizes[0]]
if (len(offsets) == 2):
return data[offsets[0]:offsets[0] + sizes[0], offsets[1]:offsets[1] + sizes[1]]
if (len(offsets) == 3):
return data[offsets[0]:offsets[0] + sizes[0], offsets[1]:offsets[1] + sizes[1], offsets[2]:offsets[2] + sizes[2]]
if (len(offsets) == 4):
return data[offsets[0]:offsets[0] + sizes[0], offsets[1]:offsets[1] + sizes[1], offsets[2]:offsets[2] + sizes[2], offsets[3]:offsets[3] + sizes[3]]
def set_slice_data(data, insert_data, offsets, sizes):
if (len(offsets) == 1):
data[offsets[0]:offsets[0] + sizes[0]] = insert_data
if (len(offsets) == 2):
data[offsets[0]:offsets[0] + sizes[0], offsets[1]:offsets[1] + sizes[1]] = insert_data
if (len(offsets) == 3):
data[offsets[0]:offsets[0] + sizes[0], offsets[1]:offsets[1] + sizes[1], offsets[2]:offsets[2] + sizes[2]] = insert_data
if (len(offsets) == 4):
data[offsets[0]:offsets[0] + sizes[0], offsets[1]:offsets[1] + sizes[1], offsets[2]:offsets[2] + sizes[2], offsets[3]:offsets[3] + sizes[3]] = insert_data
def sanity_check_net_blobs(net):
for key in net.blobs.keys():
dst = net.blobs[key]
data = np.ndarray.flatten(dst.data[0].copy())
print 'Blob: %s; %s' % (key, data.shape)
failure = False
first = -1
for i in range(0,data.shape[0]):
if abs(data[i]) > 100000:
failure = True
if first == -1:
first = i
print 'Failure, location %d; objective %d' % (i, data[i])
print 'Failure: %s, first at %d' % (failure,first)
if failure:
break;
def process(net, data_arrays, output_folder):
if not os.path.exists(output_folder):
os.makedirs(output_folder)
dst = net.blobs['prob']
dummy_slice = [0]
for i in range(0, len(data_arrays)):
data_array = data_arrays[i]
dims = len(data_array.shape)
offsets = []
in_dims = []
out_dims = []
for d in range(0, dims):
offsets += [0]
in_dims += [data_array.shape[d]]
out_dims += [data_array.shape[d] - config.input_padding[d]]
pred_array = np.zeros(tuple([3] + out_dims))
while(True):
data_slice = slice_data(data_array, offsets, [config.output_dims[di] + config.input_padding[di] for di in range(0, dims)])
net.set_input_arrays(0, np.ascontiguousarray(data_slice[None, None, :]).astype(float32), np.ascontiguousarray(dummy_slice).astype(float32))
net.forward()
output = dst.data[0].copy()
print offsets
# while(True):
# blob = raw_input('Blob:')
# fmap = int(raw_input('Enter the feature map:'))
# m = volume_slicer.VolumeSlicer(data=np.squeeze(net.blobs[blob].data[0])[fmap,:,:])
# m.configure_traits()
set_slice_data(pred_array, output, [0] + offsets, [3] + config.output_dims)
incremented = False
for d in range(0, dims):
if (offsets[dims - 1 - d] == out_dims[dims - 1 - d] - config.output_dims[dims - 1 - d]):
# Reset direction
offsets[dims - 1 - d] = 0
else:
# Increment direction
offsets[dims - 1 - d] = min(offsets[dims - 1 - d] + config.output_dims[dims - 1 - d], out_dims[dims - 1 - d] - config.output_dims[dims - 1 - d])
incremented = True
break
# Processed the whole input block
if not incremented:
break
# Safe the output
outhdf5 = h5py.File(output_folder+'/'+str(i)+'.h5', 'w')
outdset = outhdf5.create_dataset('main', tuple([3]+out_dims), np.float32, data=pred_array)
outdset.attrs['edges'] = np.string_('-1,0,0;0,-1,0;0,0,-1')
outhdf5.close()
def train(solver, data_arrays, label_arrays, mode='malis'):
losses = []
net = solver.net
if mode == 'malis':
nhood = malis.mknhood3d()
if mode == 'euclid':
nhood = malis.mknhood3d()
if mode == 'malis_aniso':
nhood = malis.mknhood3d_aniso()
if mode == 'euclid_aniso':
nhood = malis.mknhood3d_aniso()
data_slice_cont = np.zeros((1,1,132,132,132), dtype=float32)
label_slice_cont = np.zeros((1,1,44,44,44), dtype=float32)
aff_slice_cont = np.zeros((1,3,44,44,44), dtype=float32)
nhood_cont = np.zeros((1,1,3,3), dtype=float32)
error_scale_cont = np.zeros((1,1,44,44,44), dtype=float32)
dummy_slice = np.ascontiguousarray([0]).astype(float32)
# Loop from current iteration to last iteration
for i in range(solver.iter, solver.max_iter):
# First pick the dataset to train with
dataset = randint(0, len(data_arrays) - 1)
data_array = data_arrays[dataset]
label_array = label_arrays[dataset]
# affinity_array = affinity_arrays[dataset]
offsets = []
for j in range(0, dims):
offsets.append(randint(0, data_array.shape[j] - (config.output_dims[j] + config.input_padding[j])))
# These are the raw data elements
data_slice = slice_data(data_array, offsets, [config.output_dims[di] + config.input_padding[di] for di in range(0, dims)])
# These are the labels (connected components)
label_slice = slice_data(label_array, [offsets[di] + int(math.ceil(config.input_padding[di] / float(2))) for di in range(0, dims)], config.output_dims)
# These are the affinity edge values
# Also recomputing the corresponding labels (connected components)
aff_slice = malis.seg_to_affgraph(label_slice,nhood)
label_slice,ccSizes = malis.connected_components_affgraph(aff_slice,nhood)
print (data_slice[None, None, :]).shape
print (label_slice[None, None, :]).shape
print (aff_slice[None, :]).shape
print (nhood).shape
if mode == 'malis':
np.copyto(data_slice_cont, np.ascontiguousarray(data_slice[None, None, :]).astype(float32))
np.copyto(label_slice_cont, np.ascontiguousarray(label_slice[None, None, :]).astype(float32))
np.copyto(aff_slice_cont, np.ascontiguousarray(aff_slice[None, :]).astype(float32))
np.copyto(nhood_cont, np.ascontiguousarray(nhood[None, None, :]).astype(float32))
net.set_input_arrays(0, data_slice_cont, dummy_slice)
net.set_input_arrays(1, label_slice_cont, dummy_slice)
net.set_input_arrays(2, aff_slice_cont, dummy_slice)
net.set_input_arrays(3, nhood_cont, dummy_slice)
# We pass the raw and affinity array only
if mode == 'euclid':
net.set_input_arrays(0, np.ascontiguousarray(data_slice[None, None, :]).astype(float32), np.ascontiguousarray(dummy_slice).astype(float32))
net.set_input_arrays(1, np.ascontiguousarray(aff_slice[None, :]).astype(float32), np.ascontiguousarray(dummy_slice).astype(float32))
net.set_input_arrays(2, np.ascontiguousarray(error_scale(aff_slice[None, :],1.0,0.045)).astype(float32), np.ascontiguousarray(dummy_slice).astype(float32))
if mode == 'softmax':
net.set_input_arrays(0, np.ascontiguousarray(data_slice[None, None, :]).astype(float32), np.ascontiguousarray(dummy_slice).astype(float32))
net.set_input_arrays(1, np.ascontiguousarray(label_slice[None, None, :]).astype(float32), np.ascontiguousarray(dummy_slice).astype(float32))
# Single step
loss = solver.step(1)
# Memory clean up and report
print("Memory usage (before GC): %d MiB" % ((resource.getrusage(resource.RUSAGE_SELF).ru_maxrss) / (1024)))
while gc.collect():
pass
print("Memory usage (after GC): %d MiB" % ((resource.getrusage(resource.RUSAGE_SELF).ru_maxrss) / (1024)))
# m = volume_slicer.VolumeSlicer(data=np.squeeze((net.blobs['Convolution18'].data[0])[0,:,:]))
# m.configure_traits()
print("Loss: %s" % loss)
losses += [loss]
hdf5_raw_file = 'fibsem_medulla_7col/tstvol-520-1-h5/img_normalized.h5'
hdf5_gt_file = 'fibsem_medulla_7col/tstvol-520-1-h5/groundtruth_seg.h5'
# hdf5_aff_file = 'fibsem_medulla_7col/tstvol-520-1-h5/groundtruth_aff.h5'
#hdf5_raw_file = 'zebrafish_friedrich/raw.hdf5'
#hdf5_gt_file = 'zebrafish_friedrich/labels_2.hdf5'
hdf5_raw = h5py.File(hdf5_raw_file, 'r')
hdf5_gt = h5py.File(hdf5_gt_file, 'r')
# hdf5_aff = h5py.File(hdf5_aff_file, 'r')
#inspect_3D_hdf5(hdf5_raw)
#inspect_3D_hdf5(hdf5_gt)
#inspect_4D_hdf5(hdf5_aff)
# Make the dataset ready for the network
hdf5_raw_ds = normalize(np.asarray(hdf5_raw[hdf5_raw.keys()[0]]).astype(float32), -1, 1)
hdf5_gt_ds = np.asarray(hdf5_gt[hdf5_gt.keys()[0]]).astype(float32)
# hdf5_aff_ds = np.asarray(hdf5_aff[hdf5_aff.keys()[0]])
#display_aff(hdf5_aff_ds, 1)
#display_con(hdf5_gt_ds, 0)
#display_raw(hdf5_raw_ds, 0)
#display_binary(hdf5_gt_ds, 0)
#Initialize caffe
caffe.set_mode_gpu()
caffe.set_device(config.device_id)
if(config.mode == "train"):
solver = caffe.get_solver_from_file(config.solver_proto)
#solver.restore("net__iter_8000.solverstate")
net = solver.net
train(solver, [normalize(hdf5_raw_ds)], [hdf5_gt_ds])
if(config.mode == "process"):
net = caffe.Net(config.test_net, config.trained_model, caffe.TEST)
process(net, [normalize(hdf5_raw_ds)], config.output_folder)
| |
#!/usr/bin/python
#
# Copyright (c) 2016, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import io
import random
import struct
import unittest
import ipaddress
import common
import net_crypto
import mle
master_key = bytearray([0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77,
0x88, 0x99, 0xaa, 0xbb, 0xcc, 0xdd, 0xee, 0xff])
def convert_aux_sec_hdr_to_bytearray(aux_sec_hdr):
data = bytearray([aux_sec_hdr.security_level | ((aux_sec_hdr.key_id_mode & 0x03) << 3)])
data += struct.pack("<L", aux_sec_hdr.frame_counter)
data += aux_sec_hdr.key_id
return data
def any_eui64():
return bytearray([random.getrandbits(8) for _ in range(8)])
def any_security_level():
return random.getrandbits(3)
def any_key_id_mode():
"""
Only key id mode 2.
"""
return 2
def any_key_id(key_id_mode):
if key_id_mode == 2:
length = 5
return bytearray([random.getrandbits(8) for _ in range(length)])
def any_auxiliary_security_header():
key_id_mode = any_key_id_mode()
key_id = any_key_id(key_id_mode)
return net_crypto.AuxiliarySecurityHeader(key_id_mode, any_security_level(), any_frame_counter(), key_id)
def any_frame_counter():
return random.getrandbits(32)
def any_ip_address():
ip_address_bytes = bytearray([random.getrandbits(8) for _ in range(16)])
return ipaddress.ip_address(bytes(ip_address_bytes))
def any_data(length=None):
length = length if length is not None else random.randint(0, 128)
return bytearray([random.getrandbits(8) for _ in range(length)])
def any_master_key():
return bytearray([random.getrandbits(8) for _ in range(16)])
class TestCryptoEngine(unittest.TestCase):
def test_should_decrypt_bytearray_to_mle_message_when_decrypt_method_is_called(self):
# GIVEN
message_info = common.MessageInfo()
message_info.source_mac_address = common.MacAddress.from_eui64(
bytearray([0x00, 0x35, 0xcc, 0x94, 0xd7, 0x7a, 0x07, 0xe8]))
message_info.source_ipv6 = "fe80::235:cc94:d77a:07e8"
message_info.destination_ipv6 = "ff02::2"
message_info.aux_sec_hdr = net_crypto.AuxiliarySecurityHeader(key_id_mode=2,
security_level=5,
frame_counter=262165,
key_id=bytearray([0x00, 0x00, 0x00, 0x00, 0x01]))
message_info.aux_sec_hdr_bytes = convert_aux_sec_hdr_to_bytearray(message_info.aux_sec_hdr)
data = bytearray([0x9a, 0x5a, 0x9a, 0x5b, 0xba, 0x25, 0x9c, 0x5e,
0x58, 0xa2, 0x7e, 0x75, 0x74, 0xef, 0x79, 0xbc,
0x4f, 0xa3, 0xf9, 0xae, 0xa8, 0x34, 0xf6, 0xf2,
0x37, 0x21, 0x93, 0x60])
mic = bytearray([0xe1, 0xb5, 0xa2, 0x53])
net_crypto_engine = net_crypto.CryptoEngine(net_crypto.MleCryptoMaterialCreator(master_key))
# WHEN
mle_msg = net_crypto_engine.decrypt(data, mic, message_info)
# THEN
expected_mle_msg = bytearray([0x04, 0x00, 0x02, 0x00, 0x00, 0x09, 0x0b, 0x8f,
0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x00,
0x01, 0xf1, 0x0b, 0x08, 0x65, 0x5e, 0x0f, 0x83,
0x40, 0xc7, 0x83, 0x31])
self.assertEqual(expected_mle_msg, mle_msg)
def test_should_encrypt_mle_message_to_bytearray_when_encrypt_method_is_called(self):
# GIVEN
message_info = common.MessageInfo()
message_info.source_mac_address = common.MacAddress.from_eui64(
bytearray([0x00, 0x35, 0xcc, 0x94, 0xd7, 0x7a, 0x07, 0xe8]))
message_info.source_ipv6 = "fe80::235:cc94:d77a:07e8"
message_info.destination_ipv6 = "ff02::2"
message_info.aux_sec_hdr = net_crypto.AuxiliarySecurityHeader(key_id_mode=2,
security_level=5,
frame_counter=262165,
key_id=bytearray([0x00, 0x00, 0x00, 0x00, 0x01]))
message_info.aux_sec_hdr_bytes = convert_aux_sec_hdr_to_bytearray(message_info.aux_sec_hdr)
mle_msg = bytearray([0x04, 0x00, 0x02, 0x00, 0x00, 0x09, 0x0b, 0x8f,
0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x00,
0x01, 0xf1, 0x0b, 0x08, 0x65, 0x5e, 0x0f, 0x83,
0x40, 0xc7, 0x83, 0x31])
net_crypto_engine = net_crypto.CryptoEngine(net_crypto.MleCryptoMaterialCreator(master_key))
# WHEN
encrypted_data, mic = net_crypto_engine.encrypt(mle_msg, message_info)
# THEN
expected_encrypted_data = bytearray([0x9a, 0x5a, 0x9a, 0x5b, 0xba, 0x25, 0x9c, 0x5e,
0x58, 0xa2, 0x7e, 0x75, 0x74, 0xef, 0x79, 0xbc,
0x4f, 0xa3, 0xf9, 0xae, 0xa8, 0x34, 0xf6, 0xf2,
0x37, 0x21, 0x93, 0x60, 0xe1, 0xb5, 0xa2, 0x53])
self.assertEqual(expected_encrypted_data, encrypted_data + mic)
def test_should_encrypt_and_decrypt_random_data_content_when_proper_methods_are_called(self):
# GIVEN
data = any_data()
master_key = any_master_key()
key_id_mode = 2
security_level = 5
message_info = common.MessageInfo()
message_info.source_mac_address = common.MacAddress.from_eui64(any_eui64())
message_info.source_ipv6 = any_ip_address()
message_info.destination_ipv6 = any_ip_address()
message_info.aux_sec_hdr = net_crypto.AuxiliarySecurityHeader(key_id_mode=key_id_mode,
security_level=security_level,
frame_counter=any_frame_counter(),
key_id=any_key_id(key_id_mode))
message_info.aux_sec_hdr_bytes = convert_aux_sec_hdr_to_bytearray(message_info.aux_sec_hdr)
net_crypto_engine = net_crypto.CryptoEngine(net_crypto.MleCryptoMaterialCreator(master_key))
# WHEN
enc_data, mic = net_crypto_engine.encrypt(data, message_info)
dec_data = net_crypto_engine.decrypt(enc_data, mic, message_info)
# THEN
self.assertEqual(data, dec_data)
class TestCryptoMaterialCreator(unittest.TestCase):
""" Key generaion was described in Thread specification.
Read more: Thread 1.1.0 Specification Candidate Final - 7.1.4 Key Generation
Test vectors was taken from thread specification.
"""
def test_should_generate_mle_and_mac_key_when_generate_keys_method_is_called_with_sequence_counter_equal_0(self):
"""
7.1.4.1 Test Vector 1
"""
# GIVEN
sequence_counter = 0
creator = net_crypto.CryptoMaterialCreator(master_key)
# WHEN
mle_key, mac_key = creator._generate_keys(sequence_counter)
# THEN
self.assertEqual(mle_key, bytearray([0x54, 0x45, 0xf4, 0x15, 0x8f, 0xd7, 0x59, 0x12,
0x17, 0x58, 0x09, 0xf8, 0xb5, 0x7a, 0x66, 0xa4]))
self.assertEqual(mac_key, bytearray([0xde, 0x89, 0xc5, 0x3a, 0xf3, 0x82, 0xb4, 0x21,
0xe0, 0xfd, 0xe5, 0xa9, 0xba, 0xe3, 0xbe, 0xf0]))
def test_should_generate_mle_and_mac_key_when_generate_keys_method_is_called_with_sequence_counter_equal_1(self):
"""
7.1.4.2 Test Vector 2
"""
# GIVEN
sequence_counter = 1
creator = net_crypto.CryptoMaterialCreator(master_key)
# WHEN
mle_key, mac_key = creator._generate_keys(sequence_counter)
# THEN
self.assertEqual(mle_key, bytearray([0x8f, 0x4c, 0xd1, 0xa2, 0x7d, 0x95, 0xc0, 0x7d,
0x12, 0xdb, 0x89, 0x74, 0xbd, 0x61, 0x5c, 0x13]))
self.assertEqual(mac_key, bytearray([0x9b, 0xe0, 0xd1, 0xaf, 0x7b, 0xd8, 0x73, 0x50,
0xde, 0xab, 0xcd, 0xd0, 0x7f, 0xeb, 0xb9, 0xd5]))
def test_should_generate_mle_and_mac_key_when_generate_keys_method_is_called_with_sequence_counter_equal_2(self):
"""
7.1.4.3 Test Vector 3
"""
# GIVEN
sequence_counter = 2
creator = net_crypto.CryptoMaterialCreator(master_key)
# WHEN
mle_key, mac_key = creator._generate_keys(sequence_counter)
# THEN
self.assertEqual(mle_key, bytearray([0x01, 0x6e, 0x2a, 0xb8, 0xec, 0x88, 0x87, 0x96,
0x87, 0xa7, 0x2e, 0x0a, 0x35, 0x7e, 0xcf, 0x2a]))
self.assertEqual(mac_key, bytearray([0x56, 0x41, 0x09, 0xe9, 0xd2, 0xaa, 0xd7, 0xf7,
0x23, 0xec, 0x3b, 0x96, 0x11, 0x0e, 0xef, 0xa3]))
class TestMleCryptoMaterialCreator(unittest.TestCase):
def test_should_create_nonce_when_create_nonce_method_is_called(self):
# GIVEN
source_eui64 = any_eui64()
frame_counter = any_frame_counter()
security_level = any_security_level()
creator = net_crypto.MleCryptoMaterialCreator(master_key)
# WHEN
nonce = creator._create_nonce(source_eui64, frame_counter, security_level)
# THEN
nonce_bytes = io.BytesIO(nonce)
self.assertEqual(source_eui64, nonce_bytes.read(8))
self.assertEqual(struct.pack(">L", frame_counter), nonce_bytes.read(4))
self.assertEqual(security_level, ord(nonce_bytes.read(1)))
def test_should_create_authenticated_data_when_create_authenticated_data_method_is_called(self):
"""
Only Key id mode 2.
Length of the Auxiliary Security Header is constantly equal 10.
"""
# GIVEN
source_address = any_ip_address()
destination_address = any_ip_address()
auxiliary_security_header_bytes = convert_aux_sec_hdr_to_bytearray(any_auxiliary_security_header())
creator = net_crypto.MleCryptoMaterialCreator(master_key)
# WHEN
authenticated_data = creator._create_authenticated_data(
source_address, destination_address, auxiliary_security_header_bytes)
# THEN
authenticated_data_bytes = io.BytesIO(authenticated_data)
self.assertEqual(source_address.packed, authenticated_data_bytes.read(16))
self.assertEqual(destination_address.packed, authenticated_data_bytes.read(16))
self.assertEqual(auxiliary_security_header_bytes, authenticated_data_bytes.read(10))
def test_should_create_key_and_nonce_and_authenticated_data_when_create_key_and_nonce_and_authenticated_data_is_called(self):
# GIVEN
message_info = common.MessageInfo()
message_info.source_mac_address = common.MacAddress.from_eui64(any_eui64())
message_info.source_ipv6 = any_ip_address()
message_info.destination_ipv6 = any_ip_address()
message_info.aux_sec_hdr = any_auxiliary_security_header()
message_info.aux_sec_hdr_bytes = convert_aux_sec_hdr_to_bytearray(message_info.aux_sec_hdr)
creator = net_crypto.MleCryptoMaterialCreator(master_key)
# WHEN
key, nonce, auth_data = creator.create_key_and_nonce_and_authenticated_data(message_info)
# THEN
self.assertEqual(message_info.source_mac_address.mac_address +
struct.pack(">LB",
message_info.aux_sec_hdr.frame_counter,
message_info.aux_sec_hdr.security_level), nonce)
self.assertEqual(message_info.source_ipv6.packed +
message_info.destination_ipv6.packed +
message_info.aux_sec_hdr_bytes, auth_data)
class TestAuxiliarySecurityHeader(unittest.TestCase):
def test_should_return_key_id_mode_value_when_key_id_mode_property_is_called(self):
# GIVEN
key_id_mode = any_key_id_mode()
aux_sec_hdr_obj = net_crypto.AuxiliarySecurityHeader(
key_id_mode, any_security_level(), any_frame_counter(), any_key_id(key_id_mode))
# WHEN
actual_key_id_mode = aux_sec_hdr_obj.key_id_mode
# THEN
self.assertEqual(key_id_mode, actual_key_id_mode)
def test_should_return_security_level_value_when_security_level_property_is_called(self):
# GIVEN
security_level = any_security_level()
key_id_mode = any_key_id_mode()
aux_sec_hdr_obj = net_crypto.AuxiliarySecurityHeader(
key_id_mode, security_level, any_frame_counter(), any_key_id(key_id_mode))
# WHEN
actual_security_level = aux_sec_hdr_obj.security_level
# THEN
self.assertEqual(security_level, actual_security_level)
def test_should_return_frame_counter_value_when_frame_counter_property_is_called(self):
# GIVEN
frame_counter = any_frame_counter()
key_id_mode = any_key_id_mode()
aux_sec_hdr_obj = net_crypto.AuxiliarySecurityHeader(
key_id_mode, any_security_level(), frame_counter, any_key_id(key_id_mode))
# WHEN
actual_frame_counter = aux_sec_hdr_obj.frame_counter
# THEN
self.assertEqual(frame_counter, actual_frame_counter)
def test_should_return_key_id_value_when_key_id_property_is_called(self):
# GIVEN
key_id_mode = any_key_id_mode()
key_id = any_key_id(key_id_mode)
aux_sec_hdr_obj = net_crypto.AuxiliarySecurityHeader(
key_id_mode, any_security_level(), any_frame_counter(), key_id)
# WHEN
actual_key_id = aux_sec_hdr_obj.key_id
# THEN
self.assertEqual(key_id, actual_key_id)
def test_should_return_sequence_counter_value_when_sequence_counter_property_is_called(self):
# GIVEN
key_id_mode = 2
key_id = any_key_id(key_id_mode)
aux_sec_hdr_obj = net_crypto.AuxiliarySecurityHeader(
key_id_mode, any_security_level(), any_frame_counter(), key_id)
# WHEN
actual_sequence_counter = aux_sec_hdr_obj.sequence_counter
# THEN
self.assertEqual(struct.unpack(">I", key_id[:4])[0], actual_sequence_counter)
class TestAuxiliarySecurityHeaderFactory(unittest.TestCase):
def test_should_create_AuxiliarySecurityHeader_from_bytearray_when_parse_method_is_called(self):
# GIVEN
key_id_mode = any_key_id_mode()
sec_lvl = any_security_level()
frame_counter = any_frame_counter()
key_id = any_key_id(key_id_mode)
factory = net_crypto.AuxiliarySecurityHeaderFactory()
data = bytearray([sec_lvl | key_id_mode << 3]) + struct.pack("<I", frame_counter) + key_id
# WHEN
aux_sec_hdr = factory.parse(io.BytesIO(data), common.MessageInfo())
# THEN
self.assertTrue(isinstance(aux_sec_hdr, net_crypto.AuxiliarySecurityHeader))
self.assertEqual(key_id_mode, aux_sec_hdr.key_id_mode)
self.assertEqual(sec_lvl, aux_sec_hdr.security_level)
self.assertEqual(frame_counter, aux_sec_hdr.frame_counter)
if __name__ == "__main__":
unittest.main()
| |
#
# Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
# Use of this file is governed by the BSD 3-clause license that
# can be found in the LICENSE.txt file in the project root.
#
class LexerActionType(object):
CHANNEL = 0 #The type of a {@link LexerChannelAction} action.
CUSTOM = 1 #The type of a {@link LexerCustomAction} action.
MODE = 2 #The type of a {@link LexerModeAction} action.
MORE = 3 #The type of a {@link LexerMoreAction} action.
POP_MODE = 4 #The type of a {@link LexerPopModeAction} action.
PUSH_MODE = 5 #The type of a {@link LexerPushModeAction} action.
SKIP = 6 #The type of a {@link LexerSkipAction} action.
TYPE = 7 #The type of a {@link LexerTypeAction} action.
class LexerAction(object):
def __init__(self, action):
self.actionType = action
self.isPositionDependent = False
def __hash__(self):
return hash(self.actionType)
def __eq__(self, other):
return self is other
def __str__(self):
return unicode(self)
def __unicode__(self):
return unicode(super(LexerAction, self))
#
# Implements the {@code skip} lexer action by calling {@link Lexer#skip}.
#
# <p>The {@code skip} command does not have any parameters, so this action is
# implemented as a singleton instance exposed by {@link #INSTANCE}.</p>
class LexerSkipAction(LexerAction ):
# Provides a singleton instance of this parameterless lexer action.
INSTANCE = None
def __init__(self):
super(LexerSkipAction, self).__init__(LexerActionType.SKIP)
def execute(self, lexer):
lexer.skip()
def __unicode__(self):
return u"skip"
LexerSkipAction.INSTANCE = LexerSkipAction()
# Implements the {@code type} lexer action by calling {@link Lexer#setType}
# with the assigned type.
class LexerTypeAction(LexerAction):
def __init__(self, type):
super(LexerTypeAction, self).__init__(LexerActionType.TYPE)
self.type = type
def execute(self, lexer):
lexer.type = self.type
def __hash__(self):
return hash((self.actionType, self.type))
def __eq__(self, other):
if self is other:
return True
elif not isinstance(other, LexerTypeAction):
return False
else:
return self.type == other.type
def __unicode__(self):
return u"type(" + unicode(self.type) + u")"
# Implements the {@code pushMode} lexer action by calling
# {@link Lexer#pushMode} with the assigned mode.
class LexerPushModeAction(LexerAction):
def __init__(self, mode):
super(LexerPushModeAction, self).__init__(LexerActionType.PUSH_MODE)
self.mode = mode
# <p>This action is implemented by calling {@link Lexer#pushMode} with the
# value provided by {@link #getMode}.</p>
def execute(self, lexer):
lexer.pushMode(self.mode)
def __hash__(self):
return hash((self.actionType, self.mode))
def __eq__(self, other):
if self is other:
return True
elif not isinstance(other, LexerPushModeAction):
return False
else:
return self.mode == other.mode
def __unicode__(self):
return u"pushMode(" + unicode(self.mode) + u")"
# Implements the {@code popMode} lexer action by calling {@link Lexer#popMode}.
#
# <p>The {@code popMode} command does not have any parameters, so this action is
# implemented as a singleton instance exposed by {@link #INSTANCE}.</p>
class LexerPopModeAction(LexerAction):
INSTANCE = None
def __init__(self):
super(LexerPopModeAction, self).__init__(LexerActionType.POP_MODE)
# <p>This action is implemented by calling {@link Lexer#popMode}.</p>
def execute(self, lexer):
lexer.popMode()
def __unicode__(self):
return "popMode"
LexerPopModeAction.INSTANCE = LexerPopModeAction()
# Implements the {@code more} lexer action by calling {@link Lexer#more}.
#
# <p>The {@code more} command does not have any parameters, so this action is
# implemented as a singleton instance exposed by {@link #INSTANCE}.</p>
class LexerMoreAction(LexerAction):
INSTANCE = None
def __init__(self):
super(LexerMoreAction, self).__init__(LexerActionType.MORE)
# <p>This action is implemented by calling {@link Lexer#popMode}.</p>
def execute(self, lexer):
lexer.more()
def __unicode__(self):
return "more"
LexerMoreAction.INSTANCE = LexerMoreAction()
# Implements the {@code mode} lexer action by calling {@link Lexer#mode} with
# the assigned mode.
class LexerModeAction(LexerAction):
def __init__(self, mode):
super(LexerModeAction, self).__init__(LexerActionType.MODE)
self.mode = mode
# <p>This action is implemented by calling {@link Lexer#mode} with the
# value provided by {@link #getMode}.</p>
def execute(self, lexer):
lexer.mode(self.mode)
def __hash__(self):
return hash((self.actionType, self.mode))
def __eq__(self, other):
if self is other:
return True
elif not isinstance(other, LexerModeAction):
return False
else:
return self.mode == other.mode
def __unicode__(self):
return u"mode(" + unicode(self.mode) + u")"
# Executes a custom lexer action by calling {@link Recognizer#action} with the
# rule and action indexes assigned to the custom action. The implementation of
# a custom action is added to the generated code for the lexer in an override
# of {@link Recognizer#action} when the grammar is compiled.
#
# <p>This class may represent embedded actions created with the <code>{...}</code>
# syntax in ANTLR 4, as well as actions created for lexer commands where the
# command argument could not be evaluated when the grammar was compiled.</p>
class LexerCustomAction(LexerAction):
# Constructs a custom lexer action with the specified rule and action
# indexes.
#
# @param ruleIndex The rule index to use for calls to
# {@link Recognizer#action}.
# @param actionIndex The action index to use for calls to
# {@link Recognizer#action}.
#/
def __init__(self, ruleIndex, actionIndex):
super(LexerCustomAction, self).__init__(LexerActionType.CUSTOM)
self.ruleIndex = ruleIndex
self.actionIndex = actionIndex
self.isPositionDependent = True
# <p>Custom actions are implemented by calling {@link Lexer#action} with the
# appropriate rule and action indexes.</p>
def execute(self, lexer):
lexer.action(None, self.ruleIndex, self.actionIndex)
def __hash__(self):
return hash((self.actionType, self.ruleIndex, self.actionIndex))
def __eq__(self, other):
if self is other:
return True
elif not isinstance(other, LexerCustomAction):
return False
else:
return self.ruleIndex == other.ruleIndex and self.actionIndex == other.actionIndex
# Implements the {@code channel} lexer action by calling
# {@link Lexer#setChannel} with the assigned channel.
class LexerChannelAction(LexerAction):
# Constructs a new {@code channel} action with the specified channel value.
# @param channel The channel value to pass to {@link Lexer#setChannel}.
def __init__(self, channel):
super(LexerChannelAction, self).__init__(LexerActionType.CHANNEL)
self.channel = channel
# <p>This action is implemented by calling {@link Lexer#setChannel} with the
# value provided by {@link #getChannel}.</p>
def execute(self, lexer):
lexer._channel = self.channel
def __hash__(self):
return hash((self.actionType, self.channel))
def __eq__(self, other):
if self is other:
return True
elif not isinstance(other, LexerChannelAction):
return False
else:
return self.channel == other.channel
def __unicode__(self):
return u"channel(" + unicode(self.channel) + u")"
# This implementation of {@link LexerAction} is used for tracking input offsets
# for position-dependent actions within a {@link LexerActionExecutor}.
#
# <p>This action is not serialized as part of the ATN, and is only required for
# position-dependent lexer actions which appear at a location other than the
# end of a rule. For more information about DFA optimizations employed for
# lexer actions, see {@link LexerActionExecutor#append} and
# {@link LexerActionExecutor#fixOffsetBeforeMatch}.</p>
class LexerIndexedCustomAction(LexerAction):
# Constructs a new indexed custom action by associating a character offset
# with a {@link LexerAction}.
#
# <p>Note: This class is only required for lexer actions for which
# {@link LexerAction#isPositionDependent} returns {@code true}.</p>
#
# @param offset The offset into the input {@link CharStream}, relative to
# the token start index, at which the specified lexer action should be
# executed.
# @param action The lexer action to execute at a particular offset in the
# input {@link CharStream}.
def __init__(self, offset, action):
super(LexerIndexedCustomAction, self).__init__(action.actionType)
self.offset = offset
self.action = action
self.isPositionDependent = True
# <p>This method calls {@link #execute} on the result of {@link #getAction}
# using the provided {@code lexer}.</p>
def execute(self, lexer):
# assume the input stream position was properly set by the calling code
self.action.execute(lexer)
def __hash__(self):
return hash((self.actionType, self.offset, self.action))
def __eq__(self, other):
if self is other:
return True
elif not isinstance(other, LexerIndexedCustomAction):
return False
else:
return self.offset == other.offset and self.action == other.action
| |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Policy for deleting node(s) from a cluster.
NOTE: For full documentation about how the deletion policy works, check:
https://docs.openstack.org/senlin/latest/contributor/policies/deletion_v1.html
"""
from oslo_log import log as logging
from senlin.common import constraints
from senlin.common import consts
from senlin.common.i18n import _
from senlin.common import scaleutils as su
from senlin.common import schema
from senlin.policies import base
LOG = logging.getLogger(__name__)
class DeletionPolicy(base.Policy):
"""Policy for choosing victim node(s) from a cluster for deletion.
This policy is enforced when nodes are to be removed from a cluster.
It will yield an ordered list of candidates for deletion based on user
specified criteria.
"""
VERSION = '1.1'
VERSIONS = {
'1.0': [
{'status': consts.SUPPORTED, 'since': '2016.04'}
],
'1.1': [
{'status': consts.SUPPORTED, 'since': '2018.01'}
],
}
PRIORITY = 400
KEYS = (
CRITERIA, DESTROY_AFTER_DELETION, GRACE_PERIOD,
REDUCE_DESIRED_CAPACITY, HOOKS, TYPE, PARAMS, QUEUE, URL, TIMEOUT
) = (
'criteria', 'destroy_after_deletion', 'grace_period',
'reduce_desired_capacity', 'hooks', 'type', 'params', 'queue', 'url',
'timeout'
)
CRITERIA_VALUES = (
OLDEST_FIRST, OLDEST_PROFILE_FIRST, YOUNGEST_FIRST, RANDOM,
) = (
'OLDEST_FIRST', 'OLDEST_PROFILE_FIRST', 'YOUNGEST_FIRST', 'RANDOM',
)
HOOK_VALUES = (
ZAQAR, WEBHOOK
) = (
'zaqar', 'webhook',
)
TARGET = [
('BEFORE', consts.CLUSTER_SCALE_IN),
('BEFORE', consts.CLUSTER_DEL_NODES),
('BEFORE', consts.CLUSTER_RESIZE),
('BEFORE', consts.NODE_DELETE),
]
PROFILE_TYPE = [
'ANY'
]
properties_schema = {
CRITERIA: schema.String(
_('Criteria used in selecting candidates for deletion'),
default=RANDOM,
constraints=[
constraints.AllowedValues(CRITERIA_VALUES),
]
),
DESTROY_AFTER_DELETION: schema.Boolean(
_('Whether a node should be completely destroyed after '
'deletion. Default to True'),
default=True,
),
GRACE_PERIOD: schema.Integer(
_('Number of seconds before real deletion happens.'),
default=0,
),
REDUCE_DESIRED_CAPACITY: schema.Boolean(
_('Whether the desired capacity of the cluster should be '
'reduced along the deletion. Default to True.'),
default=True,
),
HOOKS: schema.Map(
_("Lifecycle hook properties"),
schema={
TYPE: schema.String(
_("Type of lifecycle hook"),
default=ZAQAR,
constraints=[
constraints.AllowedValues(HOOK_VALUES),
]
),
PARAMS: schema.Map(
schema={
QUEUE: schema.String(
_("Zaqar queue to receive lifecycle hook message"),
default="",
),
URL: schema.String(
_("Url sink to which to send lifecycle hook "
"message"),
default="",
),
},
default={}
),
TIMEOUT: schema.Integer(
_('Number of seconds before actual deletion happens.'),
default=0,
),
},
default={}
)
}
def __init__(self, name, spec, **kwargs):
super(DeletionPolicy, self).__init__(name, spec, **kwargs)
self.criteria = self.properties[self.CRITERIA]
self.grace_period = self.properties[self.GRACE_PERIOD]
self.destroy_after_deletion = self.properties[
self.DESTROY_AFTER_DELETION]
self.reduce_desired_capacity = self.properties[
self.REDUCE_DESIRED_CAPACITY]
self.hooks = self.properties[self.HOOKS]
def _victims_by_regions(self, cluster, regions):
victims = []
for region in sorted(regions.keys()):
count = regions[region]
nodes = cluster.nodes_by_region(region)
if self.criteria == self.RANDOM:
candidates = su.nodes_by_random(nodes, count)
elif self.criteria == self.OLDEST_PROFILE_FIRST:
candidates = su.nodes_by_profile_age(nodes, count)
elif self.criteria == self.OLDEST_FIRST:
candidates = su.nodes_by_age(nodes, count, True)
else:
candidates = su.nodes_by_age(nodes, count, False)
victims.extend(candidates)
return victims
def _victims_by_zones(self, cluster, zones):
victims = []
for zone in sorted(zones.keys()):
count = zones[zone]
nodes = cluster.nodes_by_zone(zone)
if self.criteria == self.RANDOM:
candidates = su.nodes_by_random(nodes, count)
elif self.criteria == self.OLDEST_PROFILE_FIRST:
candidates = su.nodes_by_profile_age(nodes, count)
elif self.criteria == self.OLDEST_FIRST:
candidates = su.nodes_by_age(nodes, count, True)
else:
candidates = su.nodes_by_age(nodes, count, False)
victims.extend(candidates)
return victims
def _update_action(self, action, victims):
pd = action.data.get('deletion', {})
pd['count'] = len(victims)
pd['candidates'] = victims
pd['destroy_after_deletion'] = self.destroy_after_deletion
pd['grace_period'] = self.grace_period
pd['reduce_desired_capacity'] = self.reduce_desired_capacity
action.data.update({
'status': base.CHECK_OK,
'reason': _('Candidates generated'),
'deletion': pd
})
action.store(action.context)
def pre_op(self, cluster_id, action):
"""Choose victims that can be deleted.
:param cluster_id: ID of the cluster to be handled.
:param action: The action object that triggered this policy.
"""
victims = action.inputs.get('candidates', [])
if len(victims) > 0:
self._update_action(action, victims)
return
if action.action == consts.NODE_DELETE:
self._update_action(action, [action.entity.id])
return
cluster = action.entity
regions = None
zones = None
hooks_data = self.hooks
action.data.update({'status': base.CHECK_OK,
'reason': _('lifecycle hook parameters saved'),
'hooks': hooks_data})
action.store(action.context)
deletion = action.data.get('deletion', {})
if deletion:
# there are policy decisions
count = deletion['count']
regions = deletion.get('regions', None)
zones = deletion.get('zones', None)
# No policy decision, check action itself: SCALE_IN
elif action.action == consts.CLUSTER_SCALE_IN:
count = action.inputs.get('count', 1)
# No policy decision, check action itself: RESIZE
else:
current = len(cluster.nodes)
res, reason = su.parse_resize_params(action, cluster, current)
if res == base.CHECK_ERROR:
action.data['status'] = base.CHECK_ERROR
action.data['reason'] = reason
LOG.error(reason)
return
if 'deletion' not in action.data:
return
count = action.data['deletion']['count']
# Cross-region
if regions:
victims = self._victims_by_regions(cluster, regions)
self._update_action(action, victims)
return
# Cross-AZ
if zones:
victims = self._victims_by_zones(cluster, zones)
self._update_action(action, victims)
return
if count > len(cluster.nodes):
count = len(cluster.nodes)
if self.criteria == self.RANDOM:
victims = su.nodes_by_random(cluster.nodes, count)
elif self.criteria == self.OLDEST_PROFILE_FIRST:
victims = su.nodes_by_profile_age(cluster.nodes, count)
elif self.criteria == self.OLDEST_FIRST:
victims = su.nodes_by_age(cluster.nodes, count, True)
else:
victims = su.nodes_by_age(cluster.nodes, count, False)
self._update_action(action, victims)
return
| |
# encoding: utf-8
"""
flow.py
Created by Thomas Mangin on 2010-01-14.
Copyright (c) 2009-2015 Exa Networks. All rights reserved.
"""
# Do not use __slots__ here, we never create enough of them to be worth it
# And it really break complex inheritance
from struct import pack
from struct import unpack
from exabgp.protocol.ip import IP
from exabgp.protocol.ip import NoNextHop
from exabgp.protocol.family import AFI
from exabgp.protocol.family import SAFI
from exabgp.bgp.message.direction import OUT
from exabgp.bgp.message.notification import Notify
from exabgp.bgp.message.update.nlri.cidr import CIDR
from exabgp.protocol import Protocol
from exabgp.protocol.ip.icmp import ICMPType
from exabgp.protocol.ip.icmp import ICMPCode
from exabgp.protocol.ip.fragment import Fragment
from exabgp.protocol.ip.tcp.flag import TCPFlag
from exabgp.bgp.message.update.nlri.nlri import NLRI
from exabgp.bgp.message.update.nlri.qualifier import RouteDistinguisher
# =================================================================== Flow Components
class IComponent (object):
# all have ID
# should have an interface for serialisation and put it here
pass
class CommonOperator (object):
# power (2,x) is the same as 1 << x which is what the RFC say the len is
power = {0:1, 1:2, 2:4, 3:8,}
rewop = {1:0, 2:1, 4:2, 8:3,}
len_position = 0x30
EOL = 0x80 # 0b10000000
AND = 0x40 # 0b01000000
LEN = 0x30 # 0b00110000
NOP = 0x00
OPERATOR = 0xFF ^ (EOL | LEN)
@staticmethod
def eol (data):
return data & CommonOperator.EOL
@staticmethod
def operator (data):
return data & CommonOperator.OPERATOR
@staticmethod
def length (data):
return 1 << ((data & CommonOperator.LEN) >> 4)
class NumericOperator (CommonOperator):
# reserved= 0x08 # 0b00001000
LT = 0x04 # 0b00000100
GT = 0x02 # 0b00000010
EQ = 0x01 # 0b00000001
class BinaryOperator (CommonOperator):
# reserved= 0x0C # 0b00001100
NOT = 0x02 # 0b00000010
MATCH = 0x01 # 0b00000001
def _len_to_bit (value):
return NumericOperator.rewop[value] << 4
def _bit_to_len (value):
return NumericOperator.power[(value & CommonOperator.len_position) >> 4]
def _number (string):
value = 0
for c in string:
value = (value << 8) + ord(c)
return value
# def short (value):
# return (ord(value[0]) << 8) + ord(value[1])
# Interface ..................
class IPv4 (object):
afi = AFI.ipv4
class IPv6 (object):
afi = AFI.ipv6
class IPrefix (object):
pass
# Prococol
class IPrefix4 (IPrefix,IComponent,IPv4):
# Must be defined in subclasses
CODE = -1
NAME = ''
# not used, just present for simplying the nlri generation
operations = 0x0
def __init__ (self, raw, netmask):
self.nlri = CIDR(raw,netmask)
def pack (self):
raw = self.nlri.pack()
# ID is defined in subclasses
return "%s%s" % (chr(self.ID),raw) # pylint: disable=E1101
def __str__ (self):
return str(self.nlri)
class IPrefix6 (IPrefix,IComponent,IPv6):
# Must be defined in subclasses
CODE = -1
NAME = ''
# not used, just present for simplying the nlri generation
operations = 0x0
def __init__ (self, raw, netmask, offset):
self.nlri = CIDR(raw,netmask)
self.offset = offset
def pack (self):
raw = self.nlri.packed_ip()
# ID is defined in subclasses
return "%s%s%s%s" % (chr(self.ID),chr(self.nlri.mask),chr(self.offset),raw) # pylint: disable=E1101
def __str__ (self):
return "%s/%s" % (self.nlri,self.offset)
class IOperation (IComponent):
# need to implement encode which encode the value of the operator
def __init__ (self, operations, value):
self.operations = operations
self.value = value
self.first = None # handled by pack/str
def pack (self):
l,v = self.encode(self.value)
op = self.operations | _len_to_bit(l)
return "%s%s" % (chr(op),v)
def encode (self, value):
raise NotImplementedError('this method must be implemented by subclasses')
def decode (self, value):
raise NotImplementedError('this method must be implemented by subclasses')
# class IOperationIPv4 (IOperation):
# def encode (self, value):
# return 4, socket.pton(socket.AF_INET,value)
class IOperationByte (IOperation):
def encode (self, value):
return 1,chr(value)
def decode (self, bgp):
return ord(bgp[0]),bgp[1:]
class IOperationByteShort (IOperation):
def encode (self, value):
if value < (1 << 8):
return 1,chr(value)
return 2,pack('!H',value)
def decode (self, bgp):
return unpack('!H',bgp[:2])[0],bgp[2:]
# String representation for Numeric and Binary Tests
class NumericString (object):
operations = None
value = None
_string = {
NumericOperator.LT: '<',
NumericOperator.GT: '>',
NumericOperator.EQ: '=',
NumericOperator.LT | NumericOperator.EQ: '<=',
NumericOperator.GT | NumericOperator.EQ: '>=',
NumericOperator.AND | NumericOperator.LT: '&<',
NumericOperator.AND | NumericOperator.GT: '&>',
NumericOperator.AND | NumericOperator.EQ: '&=',
NumericOperator.AND | NumericOperator.LT | NumericOperator.EQ: '&<=',
NumericOperator.AND | NumericOperator.GT | NumericOperator.EQ: '&>=',
}
def __str__ (self):
return "%s%s" % (self._string[self.operations & (CommonOperator.EOL ^ 0xFF)], self.value)
class BinaryString (object):
operations = None
value = None
_string = {
BinaryOperator.NOT: '!',
BinaryOperator.MATCH: '=',
BinaryOperator.AND | BinaryOperator.NOT: '&!',
BinaryOperator.AND | BinaryOperator.MATCH: '&=',
}
def __str__ (self):
return "%s%s" % (self._string[self.operations & (CommonOperator.EOL ^ 0xFF)], self.value)
# Components ..............................
def converter (function, klass=None):
def _integer (value):
if klass is None:
return function(value)
try:
return klass(value)
except ValueError:
return function(value)
return _integer
def decoder (function, klass=int):
def _inner (value):
return klass(function(value))
return _inner
def PacketLength (data):
_str_bad_length = "cloudflare already found that invalid max-packet length for for you .."
number = int(data)
if number > 0xFFFF:
raise ValueError(_str_bad_length)
return number
def PortValue (data):
_str_bad_port = "you tried to set an invalid port number .."
number = int(data)
if number < 0 or number > 0xFFFF:
raise ValueError(_str_bad_port)
return number
def DSCPValue (data):
_str_bad_dscp = "you tried to filter a flow using an invalid dscp for a component .."
number = int(data)
if number < 0 or number > 0xFFFF:
raise ValueError(_str_bad_dscp)
return number
def ClassValue (data):
_str_bad_class = "you tried to filter a flow using an invalid traffic class for a component .."
number = int(data)
if number < 0 or number > 0xFFFF:
raise ValueError(_str_bad_class)
return number
def LabelValue (data):
_str_bad_label = "you tried to filter a flow using an invalid traffic label for a component .."
number = int(data)
if number < 0 or number > 0xFFFFF: # 20 bits 5 bytes
raise ValueError(_str_bad_label)
return number
# Protocol Shared
class FlowDestination (object):
ID = 0x01
NAME = 'destination'
class FlowSource (object):
ID = 0x02
NAME = 'source'
# Prefix
class Flow4Destination (IPrefix4,FlowDestination):
NAME = 'destination-ipv4'
# Prefix
class Flow4Source (IPrefix4,FlowSource):
NAME = 'source-ipv4'
# Prefix
class Flow6Destination (IPrefix6,FlowDestination):
NAME = 'destination-ipv4'
# Prefix
class Flow6Source (IPrefix6,FlowSource):
NAME = 'source-ipv6'
class FlowIPProtocol (IOperationByte,NumericString,IPv4):
ID = 0x03
NAME = 'protocol'
converter = staticmethod(converter(Protocol.named,Protocol))
decoder = staticmethod(decoder(ord,Protocol))
class FlowNextHeader (IOperationByte,NumericString,IPv6):
ID = 0x03
NAME = 'next-header'
converter = staticmethod(converter(Protocol.named,Protocol))
decoder = staticmethod(decoder(ord,Protocol))
class FlowAnyPort (IOperationByteShort,NumericString,IPv4,IPv6):
ID = 0x04
NAME = 'port'
converter = staticmethod(converter(PortValue))
decoder = staticmethod(_number)
class FlowDestinationPort (IOperationByteShort,NumericString,IPv4,IPv6):
ID = 0x05
NAME = 'destination-port'
converter = staticmethod(converter(PortValue))
decoder = staticmethod(_number)
class FlowSourcePort (IOperationByteShort,NumericString,IPv4,IPv6):
ID = 0x06
NAME = 'source-port'
converter = staticmethod(converter(PortValue))
decoder = staticmethod(_number)
class FlowICMPType (IOperationByte,BinaryString,IPv4,IPv6):
ID = 0x07
NAME = 'icmp-type'
converter = staticmethod(converter(ICMPType.named))
decoder = staticmethod(decoder(_number,ICMPType))
class FlowICMPCode (IOperationByte,BinaryString,IPv4,IPv6):
ID = 0x08
NAME = 'icmp-code'
converter = staticmethod(converter(ICMPCode.named))
decoder = staticmethod(decoder(_number,ICMPCode))
class FlowTCPFlag (IOperationByte,BinaryString,IPv4,IPv6):
ID = 0x09
NAME = 'tcp-flags'
converter = staticmethod(converter(TCPFlag.named))
decoder = staticmethod(decoder(ord,TCPFlag))
class FlowPacketLength (IOperationByteShort,NumericString,IPv4,IPv6):
ID = 0x0A
NAME = 'packet-length'
converter = staticmethod(converter(PacketLength))
decoder = staticmethod(_number)
# RFC2474
class FlowDSCP (IOperationByteShort,NumericString,IPv4):
ID = 0x0B
NAME = 'dscp'
converter = staticmethod(converter(DSCPValue))
decoder = staticmethod(_number)
# RFC2460
class FlowTrafficClass (IOperationByte,NumericString,IPv6):
ID = 0x0B
NAME = 'traffic-class'
converter = staticmethod(converter(ClassValue))
decoder = staticmethod(_number)
# BinaryOperator
class FlowFragment (IOperationByteShort,NumericString,IPv4):
ID = 0x0C
NAME = 'fragment'
converter = staticmethod(converter(Fragment.named))
decoder = staticmethod(decoder(ord,Fragment))
# draft-raszuk-idr-flow-spec-v6-01
class FlowFlowLabel (IOperationByteShort,NumericString,IPv6):
ID = 0x0D
NAME = 'flow-label'
converter = staticmethod(converter(LabelValue))
decoder = staticmethod(_number)
# ..........................................................
decode = {AFI.ipv4: {}, AFI.ipv6: {}}
factory = {AFI.ipv4: {}, AFI.ipv6: {}}
for content in dir():
kls = globals().get(content,None)
if not isinstance(kls,type(IComponent)):
continue
if not issubclass(kls,IComponent):
continue
if issubclass(kls,IPv4):
_afi = AFI.ipv4
elif issubclass(kls,IPv6):
_afi = AFI.ipv6
else:
continue
_ID = getattr(kls,'ID',None)
if not _ID:
continue
factory[_afi][_ID] = kls
name = getattr(kls,'NAME')
if issubclass(kls, IOperation):
if issubclass(kls, BinaryString):
decode[_afi][_ID] = 'binary'
elif issubclass(kls, NumericString):
decode[_afi][_ID] = 'numeric'
else:
raise RuntimeError('invalid class defined (string)')
elif issubclass(kls, IPrefix):
decode[_afi][_ID] = 'prefix'
else:
raise RuntimeError('unvalid class defined (type)')
# ..........................................................
def _unique ():
value = 0
while True:
yield value
value += 1
unique = _unique()
@NLRI.register(AFI.ipv4,SAFI.flow_ip)
@NLRI.register(AFI.ipv6,SAFI.flow_ip)
@NLRI.register(AFI.ipv4,SAFI.flow_vpn)
@NLRI.register(AFI.ipv6,SAFI.flow_vpn)
class Flow (NLRI):
def __init__ (self, afi=AFI.ipv4,safi=SAFI.flow_ip,nexthop=None,rd=None):
NLRI.__init__(self,afi,safi)
self.rules = {}
self.action = OUT.ANNOUNCE
self.nexthop = IP.unpack(nexthop) if nexthop else NoNextHop
self.rd = rd
self.unique = unique.next()
def __eq__ (self, other):
return \
self.rules == other.rules and \
self.action == other.action and \
self.nexthop == other.nexthop and \
self.rd == other.rd
def __ne__ (self, other):
return not self.__eq__(other)
def __lt__ (self, other):
raise RuntimeError('comparing Flow for ordering does not make sense')
def __le__ (self, other):
raise RuntimeError('comparing Flow for ordering does not make sense')
def __gt__ (self, other):
raise RuntimeError('comparing Flow for ordering does not make sense')
def __ge__ (self, other):
raise RuntimeError('comparing Flow for ordering does not make sense')
def __len__ (self):
return len(self.pack())
def add (self, rule):
ID = rule.ID
if ID in (FlowDestination.ID,FlowSource.ID):
if ID in self.rules:
return False
if ID == FlowDestination.ID:
pair = self.rules.get(FlowSource.ID,[])
else:
pair = self.rules.get(FlowDestination.ID,[])
if pair:
if rule.afi != pair[0].afi:
return False
self.rules.setdefault(ID,[]).append(rule)
return True
# The API requires addpath, but it is irrelevant here.
def pack (self, addpath=None):
ordered_rules = []
# the order is a RFC requirement
for ID in sorted(self.rules.keys()):
rules = self.rules[ID]
# for each component get all the operation to do
# the format use does not prevent two opposing rules meaning that no packet can ever match
for rule in rules:
rule.operations &= (CommonOperator.EOL ^ 0xFF)
rules[-1].operations |= CommonOperator.EOL
# and add it to the last rule
if ID not in (FlowDestination.ID,FlowSource.ID):
ordered_rules.append(chr(ID))
ordered_rules.append(''.join(rule.pack() for rule in rules))
components = ''.join(ordered_rules)
if self.safi == SAFI.flow_vpn:
components = self.rd.pack() + components
l = len(components)
if l < 0xF0:
data = "%s%s" % (chr(l),components)
elif l < 0x0FFF:
data = "%s%s" % (pack('!H',l | 0xF000),components)
else:
raise Notify(3,0,"rule too big for NLRI - how to handle this - does this work ?")
# data = "%s" % chr(0)
return data
def extensive (self):
string = []
for index in sorted(self.rules):
rules = self.rules[index]
s = []
for idx,rule in enumerate(rules):
# only add ' ' after the first element
if idx and not rule.operations & NumericOperator.AND:
s.append(' ')
s.append(rule)
string.append(' %s %s' % (rules[0].NAME,''.join(str(_) for _ in s)))
nexthop = ' next-hop %s' % self.nexthop if self.nexthop is not NoNextHop else ''
rd = str(self.rd) if self.rd else ''
return 'flow' + rd + ''.join(string) + nexthop
def __str__ (self):
return self.extensive()
def _json (self):
string = []
for index in sorted(self.rules):
rules = self.rules[index]
s = []
for idx,rule in enumerate(rules):
# only add ' ' after the first element
if idx and not rule.operations & NumericOperator.AND:
s.append(', ')
s.append('"%s"' % rule)
string.append(' "%s": [ %s ]' % (rules[0].NAME,''.join(str(_) for _ in s)))
nexthop = ', "next-hop": "%s"' % self.nexthop if self.nexthop is not NoNextHop else ''
rd = ', %s' % self.rd.json() if self.rd else ''
compatibility = ', "string": "%s"' % self.extensive()
return '{' + rd + ','.join(string) + nexthop + compatibility + ' }'
def json (self):
# this is a stop gap so flow route parsing does not crash exabgp
# delete unique when this is fixed
return '"flow-%d": %s' % (self.unique,self._json())
def index (self):
return self.pack()
@classmethod
def unpack (cls, afi, safi, bgp, has_multiple_path, nexthop, action):
total = len(bgp)
length,bgp = ord(bgp[0]),bgp[1:]
if length & 0xF0 == 0xF0: # bigger than 240
extra,bgp = ord(bgp[0]),bgp[1:]
length = ((length & 0x0F) << 16) + extra
if length > len(bgp):
raise Notify(3,10,'invalid length at the start of the the flow')
bgp = bgp[:length]
nlri = Flow(afi,safi,nexthop)
nlri.action = action
if safi == SAFI.flow_vpn:
nlri.rd = RouteDistinguisher(bgp[:8])
bgp = bgp[8:]
seen = []
while bgp:
what,bgp = ord(bgp[0]),bgp[1:]
if what not in decode.get(afi,{}):
raise Notify(3,10,'unknown flowspec component received for address family %d' % what)
seen.append(what)
if sorted(seen) != seen:
raise Notify(3,10,'components are not sent in the right order %s' % seen)
decoded = decode[afi][what]
klass = factory[afi][what]
if decoded == 'prefix':
if afi == AFI.ipv4:
_,rd,_,mask,size,prefix,left = NLRI._nlri(afi,safi,bgp,action,False)
adding = klass(prefix,mask)
if not nlri.add(adding):
raise Notify(3,10,'components are incompatible (two sources, two destinations, mix ipv4/ipv6) %s' % seen)
# logger.parser(LazyFormat("added flow %s (%s) payload " % (klass.NAME,adding),bgp[:-len(left)]))
bgp = left
else:
byte,bgp = bgp[1],bgp[0]+bgp[2:]
offset = ord(byte)
_,rd,_,mask,size,prefix,left = NLRI._nlri(afi,safi,bgp,action,False)
adding = klass(prefix,mask,offset)
if not nlri.add(adding):
raise Notify(3,10,'components are incompatible (two sources, two destinations, mix ipv4/ipv6) %s' % seen)
# logger.parser(LazyFormat("added flow %s (%s) payload " % (klass.NAME,adding),bgp[:-len(left)]))
bgp = left
else:
end = False
while not end:
byte,bgp = ord(bgp[0]),bgp[1:]
end = CommonOperator.eol(byte)
operator = CommonOperator.operator(byte)
length = CommonOperator.length(byte)
value,bgp = bgp[:length],bgp[length:]
adding = klass.decoder(value)
nlri.add(klass(operator,adding))
# logger.parser(LazyFormat("added flow %s (%s) operator %d len %d payload " % (klass.NAME,adding,byte,length),value))
return total-len(bgp),nlri
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# @author: Ryota MIBU
import random
from quantum.db import api as db_api
from quantum.openstack.common import uuidutils
from quantum.plugins.nec.common import exceptions as nexc
from quantum.plugins.nec.db import api as ndb
from quantum.plugins.nec.db import models as nmodels # noqa
from quantum.tests import base
class NECPluginV2DBTestBase(base.BaseTestCase):
"""Class conisting of NECPluginV2 DB unit tests."""
def setUp(self):
"""Setup for tests."""
super(NECPluginV2DBTestBase, self).setUp()
ndb.initialize()
self.session = db_api.get_session()
self.addCleanup(ndb.clear_db)
def get_ofc_item_random_params(self):
"""create random parameters for ofc_item test."""
ofc_id = uuidutils.generate_uuid()
quantum_id = uuidutils.generate_uuid()
none = uuidutils.generate_uuid()
return ofc_id, quantum_id, none
def get_portinfo_random_params(self):
"""create random parameters for portinfo test."""
port_id = uuidutils.generate_uuid()
datapath_id = hex(random.randint(0, 0xffffffff))
port_no = random.randint(1, 100)
vlan_id = random.randint(0, 4095)
mac = ':'.join(["%02x" % random.randint(0, 0xff) for x in range(6)])
none = uuidutils.generate_uuid()
return port_id, datapath_id, port_no, vlan_id, mac, none
class NECPluginV2DBTest(NECPluginV2DBTestBase):
def testa_add_ofc_item(self):
"""test add OFC item."""
o, q, n = self.get_ofc_item_random_params()
tenant = ndb.add_ofc_item(self.session, 'ofc_tenant', q, o)
self.assertEqual(tenant.ofc_id, o)
self.assertEqual(tenant.quantum_id, q)
self.assertRaises(nexc.NECDBException,
ndb.add_ofc_item,
self.session, 'ofc_tenant', q, o)
def testb_get_ofc_item(self):
"""test get OFC item."""
o, q, n = self.get_ofc_item_random_params()
ndb.add_ofc_item(self.session, 'ofc_tenant', q, o)
tenant = ndb.get_ofc_item(self.session, 'ofc_tenant', q)
self.assertEqual(tenant.ofc_id, o)
self.assertEqual(tenant.quantum_id, q)
tenant_none = ndb.get_ofc_item(self.session, 'ofc_tenant', n)
self.assertEqual(None, tenant_none)
def testb_get_ofc_id(self):
"""test get OFC d."""
o, q, n = self.get_ofc_item_random_params()
ndb.add_ofc_item(self.session, 'ofc_tenant', q, o)
tenant_id = ndb.get_ofc_id(self.session, 'ofc_tenant', q)
self.assertEqual(tenant_id, o)
tenant_none = ndb.get_ofc_item(self.session, 'ofc_tenant', n)
self.assertEqual(None, tenant_none)
def testb_exists_ofc_item(self):
"""test get OFC d."""
o, q, n = self.get_ofc_item_random_params()
ndb.add_ofc_item(self.session, 'ofc_tenant', q, o)
ret = ndb.exists_ofc_item(self.session, 'ofc_tenant', q)
self.assertTrue(ret)
tenant_none = ndb.get_ofc_item(self.session, 'ofc_tenant', n)
self.assertEqual(None, tenant_none)
def testc_find_ofc_item(self):
"""test find OFC item."""
o, q, n = self.get_ofc_item_random_params()
ndb.add_ofc_item(self.session, 'ofc_tenant', q, o)
tenant = ndb.find_ofc_item(self.session, 'ofc_tenant', o)
self.assertEqual(tenant.ofc_id, o)
self.assertEqual(tenant.quantum_id, q)
tenant_none = ndb.find_ofc_item(self.session, 'ofc_tenant', n)
self.assertEqual(None, tenant_none)
def testc_del_ofc_item(self):
"""test delete OFC item."""
o, q, n = self.get_ofc_item_random_params()
ndb.add_ofc_item(self.session, 'ofc_tenant', q, o)
ndb.del_ofc_item(self.session, 'ofc_tenant', q)
tenant_none = ndb.get_ofc_item(self.session,
'ofc_tenant', q)
self.assertEqual(None, tenant_none)
tenant_none = ndb.find_ofc_item(self.session,
'ofc_tenant', o)
self.assertEqual(None, tenant_none)
def testd_add_portinfo(self):
"""test add portinfo."""
i, d, p, v, m, n = self.get_portinfo_random_params()
portinfo = ndb.add_portinfo(self.session, i, d, p, v, m)
self.assertEqual(portinfo.id, i)
self.assertEqual(portinfo.datapath_id, d)
self.assertEqual(portinfo.port_no, p)
self.assertEqual(portinfo.vlan_id, v)
self.assertEqual(portinfo.mac, m)
exception_raised = False
try:
ndb.add_portinfo(self.session, i, d, p, v, m)
except nexc.NECDBException:
exception_raised = True
self.assertTrue(exception_raised)
def teste_get_portinfo(self):
"""test get portinfo."""
i, d, p, v, m, n = self.get_portinfo_random_params()
ndb.add_portinfo(self.session, i, d, p, v, m)
portinfo = ndb.get_portinfo(self.session, i)
self.assertEqual(portinfo.id, i)
self.assertEqual(portinfo.datapath_id, d)
self.assertEqual(portinfo.port_no, p)
self.assertEqual(portinfo.vlan_id, v)
self.assertEqual(portinfo.mac, m)
portinfo_none = ndb.get_portinfo(self.session, n)
self.assertEqual(None, portinfo_none)
def testf_del_portinfo(self):
"""test delete portinfo."""
i, d, p, v, m, n = self.get_portinfo_random_params()
ndb.add_portinfo(self.session, i, d, p, v, m)
portinfo = ndb.get_portinfo(self.session, i)
self.assertEqual(portinfo.id, i)
ndb.del_portinfo(self.session, i)
portinfo_none = ndb.get_portinfo(self.session, i)
self.assertEqual(None, portinfo_none)
class NECPluginV2DBOldMappingTest(NECPluginV2DBTestBase):
"""Test related to old ID mapping."""
# Mapping Table mode
OLD = True
NEW = False
def test_add_ofc_item_new(self):
o, q, n = self.get_ofc_item_random_params()
ret = ndb.add_ofc_item(self.session, 'ofc_tenant', q, o, self.NEW)
self.assertEqual(ret.ofc_id, o)
self.assertEqual(ret.quantum_id, q)
ret = ndb.get_ofc_item(self.session, 'ofc_tenant', q, self.NEW)
self.assertEqual(ret.ofc_id, o)
self.assertEqual(ret.quantum_id, q)
ret = ndb.get_ofc_item(self.session, 'ofc_tenant', q, self.OLD)
self.assertEqual(ret, None)
def test_add_ofc_item_old(self):
o, q, n = self.get_ofc_item_random_params()
ret = ndb.add_ofc_item(self.session, 'ofc_tenant', q, o, self.OLD)
self.assertEqual(ret.id, o)
self.assertEqual(ret.quantum_id, q)
ret = ndb.get_ofc_item(self.session, 'ofc_tenant', q, self.NEW)
self.assertEqual(ret, None)
ret = ndb.get_ofc_item(self.session, 'ofc_tenant', q, self.OLD)
self.assertEqual(ret.id, o)
self.assertEqual(ret.quantum_id, q)
def _check_new_old_item(self, method, q_id, exp_new, exp_old):
ret = method(self.session, 'ofc_tenant', q_id, self.NEW)
self.assertEqual(ret, exp_new)
ret = method(self.session, 'ofc_tenant', q_id, self.OLD)
self.assertEqual(ret, exp_old)
def test_get_ofc_id_new(self):
o, q, n = self.get_ofc_item_random_params()
ndb.add_ofc_item(self.session, 'ofc_tenant', q, o, self.NEW)
self._check_new_old_item(ndb.get_ofc_id, q, o, None)
ret = ndb.get_ofc_id_lookup_both(self.session, 'ofc_tenant', q)
self.assertEqual(ret, o)
def test_get_ofc_id_old(self):
o, q, n = self.get_ofc_item_random_params()
ndb.add_ofc_item(self.session, 'ofc_tenant', q, o, self.OLD)
self._check_new_old_item(ndb.get_ofc_id, q, None, o)
ret = ndb.get_ofc_id_lookup_both(self.session, 'ofc_tenant', q)
self.assertEqual(ret, o)
def _check_exists_ofc_item(self, mode, exp_new, exp_old):
o, q, n = self.get_ofc_item_random_params()
self._check_new_old_item(ndb.exists_ofc_item, q, False, False)
self.assertFalse(ndb.exists_ofc_item_lookup_both(
self.session, 'ofc_tenant', q))
ndb.add_ofc_item(self.session, 'ofc_tenant', q, o, mode)
self._check_new_old_item(ndb.exists_ofc_item, q, exp_new, exp_old)
self.assertTrue(ndb.exists_ofc_item_lookup_both(
self.session, 'ofc_tenant', q))
ndb.del_ofc_item(self.session, 'ofc_tenant', q, mode)
self._check_new_old_item(ndb.exists_ofc_item, q, False, False)
self.assertFalse(ndb.exists_ofc_item_lookup_both(
self.session, 'ofc_tenant', q))
def test_exists_ofc_item_new(self):
self._check_exists_ofc_item(self.NEW, True, False)
def test_exists_ofc_item_old(self):
self._check_exists_ofc_item(self.OLD, False, True)
def _check_delete_ofc_item(self, mode, detect_mode=False):
o, q, n = self.get_ofc_item_random_params()
ret = ndb.add_ofc_item(self.session, 'ofc_tenant', q, o, mode)
ofc_id = ret.ofc_id if mode == self.NEW else ret.id
self.assertEqual(ofc_id, o)
self.assertEqual(ret.quantum_id, q)
ret = ndb.get_ofc_item(self.session, 'ofc_tenant', q, mode)
ofc_id = ret.ofc_id if mode == self.NEW else ret.id
self.assertEqual(ofc_id, o)
self.assertEqual(ret.quantum_id, q)
if detect_mode:
ndb.del_ofc_item_lookup_both(self.session, 'ofc_tenant', q)
else:
ndb.del_ofc_item(self.session, 'ofc_tenant', q, mode)
ret = ndb.get_ofc_item(self.session, 'ofc_tenant', q, self.NEW)
self.assertEqual(ret, None)
ret = ndb.get_ofc_item(self.session, 'ofc_tenant', q, self.OLD)
self.assertEqual(ret, None)
def test_delete_ofc_item_new(self):
self._check_delete_ofc_item(self.NEW)
def test_delete_ofc_item_old(self):
self._check_delete_ofc_item(self.OLD)
def test_delete_ofc_item_with_auto_detect_new(self):
self._check_delete_ofc_item(self.NEW, detect_mode=True)
def test_delete_ofc_item_old_auto_detect_new(self):
self._check_delete_ofc_item(self.OLD, detect_mode=True)
| |
from __future__ import absolute_import
from typing import Any, Callable, Iterable, Tuple, Text
from collections import defaultdict
import datetime
import six
from django.db.models import Q, QuerySet
from django.template import loader
from django.conf import settings
from zerver.lib.notifications import build_message_list, hashchange_encode, \
send_future_email, one_click_unsubscribe_link
from zerver.models import UserProfile, UserMessage, Recipient, Stream, \
Subscription, get_active_streams
from zerver.context_processors import common_context
import logging
log_format = "%(asctime)s: %(message)s"
logging.basicConfig(format=log_format)
formatter = logging.Formatter(log_format)
file_handler = logging.FileHandler(settings.DIGEST_LOG_PATH)
file_handler.setFormatter(formatter)
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
logger.addHandler(file_handler)
# Digests accumulate 4 types of interesting traffic for a user:
# 1. Missed PMs
# 2. New streams
# 3. New users
# 4. Interesting stream traffic, as determined by the longest and most
# diversely comment upon topics.
def gather_hot_conversations(user_profile, stream_messages):
# type: (UserProfile, QuerySet) -> List[Dict[str, Any]]
# Gather stream conversations of 2 types:
# 1. long conversations
# 2. conversations where many different people participated
#
# Returns a list of dictionaries containing the templating
# information for each hot conversation.
conversation_length = defaultdict(int) # type: Dict[Tuple[int, Text], int]
conversation_diversity = defaultdict(set) # type: Dict[Tuple[int, Text], Set[Text]]
for user_message in stream_messages:
if not user_message.message.sent_by_human():
# Don't include automated messages in the count.
continue
key = (user_message.message.recipient.type_id,
user_message.message.subject)
conversation_diversity[key].add(
user_message.message.sender.full_name)
conversation_length[key] += 1
diversity_list = list(conversation_diversity.items())
diversity_list.sort(key=lambda entry: len(entry[1]), reverse=True)
length_list = list(conversation_length.items())
length_list.sort(key=lambda entry: entry[1], reverse=True)
# Get up to the 4 best conversations from the diversity list
# and length list, filtering out overlapping conversations.
hot_conversations = [elt[0] for elt in diversity_list[:2]]
for candidate, _ in length_list:
if candidate not in hot_conversations:
hot_conversations.append(candidate)
if len(hot_conversations) >= 4:
break
# There was so much overlap between the diversity and length lists that we
# still have < 4 conversations. Try to use remaining diversity items to pad
# out the hot conversations.
num_convos = len(hot_conversations)
if num_convos < 4:
hot_conversations.extend([elt[0] for elt in diversity_list[num_convos:4]])
hot_conversation_render_payloads = []
for h in hot_conversations:
stream_id, subject = h
users = list(conversation_diversity[h])
count = conversation_length[h]
# We'll display up to 2 messages from the conversation.
first_few_messages = [user_message.message for user_message in
stream_messages.filter(
message__recipient__type_id=stream_id,
message__subject=subject)[:2]]
teaser_data = {"participants": users,
"count": count - len(first_few_messages),
"first_few_messages": build_message_list(
user_profile, first_few_messages)}
hot_conversation_render_payloads.append(teaser_data)
return hot_conversation_render_payloads
def gather_new_users(user_profile, threshold):
# type: (UserProfile, datetime.datetime) -> Tuple[int, List[Text]]
# Gather information on users in the realm who have recently
# joined.
if user_profile.realm.is_zephyr_mirror_realm:
new_users = [] # type: List[UserProfile]
else:
new_users = list(UserProfile.objects.filter(
realm=user_profile.realm, date_joined__gt=threshold,
is_bot=False))
user_names = [user.full_name for user in new_users]
return len(user_names), user_names
def gather_new_streams(user_profile, threshold):
# type: (UserProfile, datetime.datetime) -> Tuple[int, Dict[str, List[Text]]]
if user_profile.realm.is_zephyr_mirror_realm:
new_streams = [] # type: List[Stream]
else:
new_streams = list(get_active_streams(user_profile.realm).filter(
invite_only=False, date_created__gt=threshold))
base_url = u"https://%s/#narrow/stream/" % (settings.EXTERNAL_HOST,)
streams_html = []
streams_plain = []
for stream in new_streams:
narrow_url = base_url + hashchange_encode(stream.name)
stream_link = u"<a href='%s'>%s</a>" % (narrow_url, stream.name)
streams_html.append(stream_link)
streams_plain.append(stream.name)
return len(new_streams), {"html": streams_html, "plain": streams_plain}
def enough_traffic(unread_pms, hot_conversations, new_streams, new_users):
# type: (Text, Text, int, int) -> bool
if unread_pms or hot_conversations:
# If you have any unread traffic, good enough.
return True
if new_streams and new_users:
# If you somehow don't have any traffic but your realm did get
# new streams and users, good enough.
return True
return False
def send_digest_email(user_profile, html_content, text_content):
# type: (UserProfile, Text, Text) -> None
recipients = [{'email': user_profile.email, 'name': user_profile.full_name}]
subject = "While you've been gone - Zulip"
sender = {'email': settings.NOREPLY_EMAIL_ADDRESS, 'name': 'Zulip'}
# Send now, through Mandrill.
send_future_email(recipients, html_content, text_content, subject,
delay=datetime.timedelta(0), sender=sender,
tags=["digest-emails"])
def handle_digest_email(user_profile_id, cutoff):
# type: (int, float) -> None
user_profile = UserProfile.objects.get(id=user_profile_id)
# Convert from epoch seconds to a datetime object.
cutoff_date = datetime.datetime.utcfromtimestamp(int(cutoff))
all_messages = UserMessage.objects.filter(
user_profile=user_profile,
message__pub_date__gt=cutoff_date).order_by("message__pub_date")
template_payload = common_context(user_profile)
# Start building email template data.
template_payload.update({
'name': user_profile.full_name,
'unsubscribe_link': one_click_unsubscribe_link(user_profile, "digest")
})
# Gather recent missed PMs, re-using the missed PM email logic.
# You can't have an unread message that you sent, but when testing
# this causes confusion so filter your messages out.
pms = all_messages.filter(
~Q(message__recipient__type=Recipient.STREAM) &
~Q(message__sender=user_profile))
# Show up to 4 missed PMs.
pms_limit = 4
template_payload['unread_pms'] = build_message_list(
user_profile, [pm.message for pm in pms[:pms_limit]])
template_payload['remaining_unread_pms_count'] = min(0, len(pms) - pms_limit)
home_view_recipients = [sub.recipient for sub in
Subscription.objects.filter(
user_profile=user_profile,
active=True,
in_home_view=True)]
stream_messages = all_messages.filter(
message__recipient__type=Recipient.STREAM,
message__recipient__in=home_view_recipients)
# Gather hot conversations.
template_payload["hot_conversations"] = gather_hot_conversations(
user_profile, stream_messages)
# Gather new streams.
new_streams_count, new_streams = gather_new_streams(
user_profile, cutoff_date)
template_payload["new_streams"] = new_streams
template_payload["new_streams_count"] = new_streams_count
# Gather users who signed up recently.
new_users_count, new_users = gather_new_users(
user_profile, cutoff_date)
template_payload["new_users"] = new_users
text_content = loader.render_to_string(
'zerver/emails/digest/digest_email.txt', template_payload)
html_content = loader.render_to_string(
'zerver/emails/digest/digest_email_html.txt', template_payload)
# We don't want to send emails containing almost no information.
if enough_traffic(template_payload["unread_pms"],
template_payload["hot_conversations"],
new_streams_count, new_users_count):
logger.info("Sending digest email for %s" % (user_profile.email,))
send_digest_email(user_profile, html_content, text_content)
| |
#!/usr/bin/env python
# -*- coding: latin-1 -*-
#
# Copyright 2019-2021 Blaise Frederick
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
import argparse
import sys
import numpy as np
import rapidtide.io as tide_io
import rapidtide.workflows.parser_funcs as pf
def _get_parser():
"""
Argument parser for happy
"""
parser = argparse.ArgumentParser(
prog="happy",
description="Hypersampling by Analytic Phase Projection - Yay!.",
)
# Required arguments
parser.add_argument(
"fmrifilename",
type=lambda x: pf.is_valid_file(parser, x),
help="The input data file (BOLD fmri file or NIRS text file)",
)
parser.add_argument(
"slicetimename",
type=lambda x: pf.is_valid_file(parser, x),
help=(
"Text file containing the offset time in seconds of each slice relative "
"to the start of the TR, one value per line, OR the BIDS sidecar JSON file."
),
)
parser.add_argument("outputroot", help="The root name for the output files")
# Processing steps
processing_steps = parser.add_argument_group("Processing steps")
processing_steps.add_argument(
"--cardcalconly",
dest="cardcalconly",
action="store_true",
help="Stop after all cardiac regressor calculation steps (before phase projection). ",
default=False,
)
processing_steps.add_argument(
"--skipdlfilter",
dest="dodlfilter",
action="store_false",
help="Disable deep learning cardiac waveform filter. ",
default=True,
)
processing_steps.add_argument(
"--usesuperdangerousworkaround",
dest="mpfix",
action="store_true",
help=(
"Some versions of tensorflow seem to have some weird conflict with MKL which"
"I don't seem to be able to fix. If the dl filter bombs complaining about "
"multiple openmp libraries, try rerunning with the secret and inadvisable "
"'--usesuperdangerousworkaround' flag. Good luck! "
),
default=False,
)
processing_steps.add_argument(
"--model",
dest="modelname",
metavar="MODELNAME",
help=(
"Use model MODELNAME for dl filter (default is model_revised - "
"from the revised NeuroImage paper. "
),
default="model_revised",
)
# Performance
performance_opts = parser.add_argument_group("Performance")
performance_opts.add_argument(
"--mklthreads",
dest="mklthreads",
action="store",
metavar="NTHREADS",
type=lambda x: pf.is_int(parser, x),
help=(
"Use NTHREADS MKL threads to accelerate processing (defaults to 1 - more "
"threads up to the number of cores can accelerate processing a lot, but "
"can really kill you on clusters unless you're very careful. Use at your own risk"
),
default=1,
)
# Preprocessing
preprocessing_opts = parser.add_argument_group("Preprocessing")
preprocessing_opts.add_argument(
"--numskip",
dest="numskip",
action="store",
metavar="SKIP",
type=lambda x: pf.is_int(parser, x),
help="Skip SKIP tr's at the beginning of the fMRI file (default is 0). ",
default=0,
)
preprocessing_opts.add_argument(
"--motskip",
dest="motskip",
action="store",
metavar="SKIP",
type=lambda x: pf.is_int(parser, x),
help="Skip SKIP tr's at the beginning of the motion regressor file (default is 0). ",
default=0,
)
preprocessing_opts.add_argument(
"--motionfile",
dest="motionfilename",
metavar="MOTFILE",
help=(
"Read 6 columns of motion regressors out of MOTFILE file (.par or BIDS .json) "
"(with timepoints rows) and regress them, their derivatives, "
"and delayed derivatives out of the data prior to analysis. "
),
default=None,
)
preprocessing_opts.add_argument(
"--motionhp",
dest="motionhp",
action="store",
metavar="HPFREQ",
type=lambda x: pf.is_float(parser, x),
help="Highpass filter motion regressors to HPFREQ Hz prior to regression. ",
default=None,
)
preprocessing_opts.add_argument(
"--motionlp",
dest="motionlp",
action="store",
metavar="LPFREQ",
type=lambda x: pf.is_float(parser, x),
help="Lowpass filter motion regressors to LPFREQ Hz prior to regression. ",
default=None,
)
preprocessing_opts.add_argument(
"--nomotorthogonalize",
dest="orthogonalize",
action="store_false",
help=(
"Do not orthogonalize motion regressors prior to regressing them out of the " "data. "
),
default=True,
)
preprocessing_opts.add_argument(
"--motpos",
dest="motfilt_pos",
action="store_true",
help=("Include motion position regressors. "),
default=False,
)
preprocessing_opts.add_argument(
"--nomotderiv",
dest="motfilt_deriv",
action="store_false",
help=("Do not use motion derivative regressors. "),
default=True,
)
preprocessing_opts.add_argument(
"--nomotderivdelayed",
dest="motfilt_derivdelayed",
action="store_false",
help=("Do not use motion derivative regressors. "),
default=True,
)
preprocessing_opts.add_argument(
"--discardmotionfiltered",
dest="savemotionglmfilt",
action="store_false",
help=("Do not save data after motion filtering. "),
default=True,
)
# Cardiac estimation tuning
cardiac_est_tuning = parser.add_argument_group("Cardiac estimation tuning")
cardiac_est_tuning.add_argument(
"--estmask",
dest="estmaskname",
action="store",
metavar="MASKNAME",
help=(
"Generation of cardiac waveform from data will be restricted to "
"voxels in MASKNAME and weighted by the mask intensity. If this is "
"selected, happy will only make a single pass through the data (the "
"initial vessel mask generation pass will be skipped)."
),
default=None,
)
cardiac_est_tuning.add_argument(
"--minhr",
dest="minhr",
action="store",
metavar="MINHR",
type=lambda x: pf.is_float(parser, x),
help="Limit lower cardiac frequency search range to MINHR BPM (default is 40). ",
default=40.0,
)
cardiac_est_tuning.add_argument(
"--maxhr",
dest="maxhr",
action="store",
metavar="MAXHR",
type=lambda x: pf.is_float(parser, x),
help="Limit upper cardiac frequency search range to MAXHR BPM (default is 140). ",
default=140.0,
)
cardiac_est_tuning.add_argument(
"--minhrfilt",
dest="minhrfilt",
action="store",
metavar="MINHR",
type=lambda x: pf.is_float(parser, x),
help="Highpass filter cardiac waveform estimate to MINHR BPM (default is 40). ",
default=40.0,
)
cardiac_est_tuning.add_argument(
"--maxhrfilt",
dest="maxhrfilt",
action="store",
metavar="MAXHR",
type=lambda x: pf.is_float(parser, x),
help="Lowpass filter cardiac waveform estimate to MAXHR BPM (default is 1000). ",
default=1000.0,
)
cardiac_est_tuning.add_argument(
"--envcutoff",
dest="envcutoff",
action="store",
metavar="CUTOFF",
type=lambda x: pf.is_float(parser, x),
help="Lowpass filter cardiac normalization envelope to CUTOFF Hz (default is 0.4 Hz). ",
default=0.4,
)
cardiac_est_tuning.add_argument(
"--notchwidth",
dest="notchpct",
action="store",
metavar="WIDTH",
type=lambda x: pf.is_float(parser, x),
help="Set the width of the notch filter, in percent of the notch frequency (default is 1.5). ",
default=1.5,
)
cardiac_est_tuning.add_argument(
"--invertphysiosign",
dest="invertphysiosign",
action="store_true",
help=(
"Invert the waveform extracted from the physiological signal. "
"Use this if there is a contrast agent in the blood. "
),
default=False,
)
# External cardiac waveform options
external_cardiac_opts = parser.add_argument_group("External cardiac waveform options")
external_cardiac_opts.add_argument(
"--cardiacfile",
dest="cardiacfilename",
metavar="FILE[:COL]",
help=(
"Read the cardiac waveform from file FILE. If COL is an integer, "
"and FILE is a text file, use the COL'th column. If FILE is a BIDS "
"format json file, use column named COL. If no file is specified, "
"estimate the cardiac signal from the fMRI data."
),
default=None,
)
cardiac_freq = external_cardiac_opts.add_mutually_exclusive_group()
cardiac_freq.add_argument(
"--cardiacfreq",
dest="inputfreq",
action="store",
metavar="FREQ",
type=lambda x: pf.is_float(parser, x),
help=(
"Cardiac waveform in cardiacfile has sample frequency FREQ "
"(default is 32Hz). NB: --cardiacfreq and --cardiactstep "
"are two ways to specify the same thing. "
),
default=-32.0,
)
cardiac_freq.add_argument(
"--cardiactstep",
dest="inputfreq",
action="store",
metavar="TSTEP",
type=lambda x: pf.invert_float(parser, x),
help=(
"Cardiac waveform in cardiacfile has time step TSTEP "
"(default is 1/32 sec). NB: --cardiacfreq and --cardiactstep "
"are two ways to specify the same thing. "
),
default=-32.0,
)
external_cardiac_opts.add_argument(
"--cardiacstart",
dest="inputstart",
metavar="START",
action="store",
type=float,
help=(
"The time delay in seconds into the cardiac file, corresponding "
"to the first TR of the fMRI file (default is 0.0) "
),
default=None,
)
external_cardiac_opts.add_argument(
"--stdfreq",
dest="stdfreq",
metavar="FREQ",
action="store",
type=float,
help=(
"Frequency to which the cardiac signals are resampled for output. " "Default is 25. "
),
default=25.0,
)
external_cardiac_opts.add_argument(
"--forcehr",
dest="forcedhr",
metavar="BPM",
action="store",
type=lambda x: pf.is_float(parser, x) / 60.0,
help=(
"Force heart rate fundamental detector to be centered at BPM "
"(overrides peak frequencies found from spectrum). Useful"
"if there is structured noise that confuses the peak finder. "
),
default=None,
)
# Output processing
output_proc = parser.add_argument_group("Output processing")
output_proc.add_argument(
"--spatialglm",
dest="dospatialglm",
action="store_true",
help="Generate framewise cardiac signal maps and filter them out of the input data. ",
default=False,
)
output_proc.add_argument(
"--temporalglm",
dest="dotemporalglm",
action="store_true",
help="Generate voxelwise aliased synthetic cardiac regressors and filter them out of the input data. ",
default=False,
)
# Output options
output = parser.add_argument_group("Output options")
output.add_argument(
"--legacyoutput",
dest="bidsoutput",
action="store_false",
help=(
"Use legacy file naming and formats rather than BIDS naming and "
"format conventions for output files."
),
default=True,
)
# Phase projection tuning
phase_proj_tuning = parser.add_argument_group("Phase projection tuning")
phase_proj_tuning.add_argument(
"--outputbins",
dest="destpoints",
metavar="BINS",
action="store",
type=lambda x: pf.is_int(parser, x),
help="Number of output phase bins (default is 32). ",
default=32,
)
phase_proj_tuning.add_argument(
"--gridbins",
dest="congridbins",
metavar="BINS",
action="store",
type=lambda x: pf.is_float(parser, x),
help="Width of the gridding kernel in output phase bins (default is 3.0). ",
default=3.0,
)
phase_proj_tuning.add_argument(
"--gridkernel",
dest="gridkernel",
action="store",
type=str,
choices=["old", "gauss", "kaiser"],
help="Convolution gridding kernel. Default is kaiser",
default="kaiser",
)
phase_proj_tuning.add_argument(
"--projmask",
dest="projmaskname",
metavar="MASKNAME",
help=(
"Phase projection will be restricted to voxels in MASKNAME "
"(overrides normal intensity mask.) "
),
default=None,
)
phase_proj_tuning.add_argument(
"--projectwithraw",
dest="projectwithraw",
action="store_true",
help="Use fMRI derived cardiac waveform as phase source for projection, even if a plethysmogram is supplied.",
default=False,
)
phase_proj_tuning.add_argument(
"--fliparteries",
dest="fliparteries",
action="store_true",
help=(
"Attempt to detect arterial signals and flip over the timecourses after phase projection "
"(since relative arterial blood susceptibility is inverted relative to venous blood)."
),
default=False,
)
phase_proj_tuning.add_argument(
"--arteriesonly",
dest="arteriesonly",
action="store_true",
help="Restrict cardiac waveform estimation to putative arteries only.",
default=False,
)
# Debugging options
debug_opts = parser.add_argument_group("Debugging options (probably not of interest to users)")
debug_opts.add_argument(
"--debug",
dest="debug",
action="store_true",
help="Turn on debugging information.",
default=False,
)
debug_opts.add_argument(
"--aliasedcorrelation",
dest="doaliasedcorrelation",
action="store_true",
help="Attempt to calculate absolute delay using an aliased correlation (experimental).",
default=False,
)
debug_opts.add_argument(
"--noprogressbar",
dest="showprogressbar",
action="store_false",
help="Will disable showing progress bars (helpful if stdout is going to a file). ",
default=True,
)
debug_opts.add_argument(
"--nodetrend",
dest="detrendorder",
action="store",
type=lambda x: pf.is_int(parser, 0),
help="Disable data detrending. ",
default=3,
)
debug_opts.add_argument(
"--noorthog",
dest="orthogonalize",
action="store_false",
help="Disable orthogonalization of motion confound regressors. ",
default=True,
)
debug_opts.add_argument(
"--disablenotch",
dest="disablenotch",
action="store_true",
help="Disable subharmonic notch filter. ",
default=False,
)
debug_opts.add_argument(
"--nomask",
dest="usemaskcardfromfmri",
action="store_false",
help="Disable data masking for calculating cardiac waveform. ",
default=True,
)
debug_opts.add_argument(
"--nocensor",
dest="censorbadpts",
action="store_false",
help="Bad points will not be excluded from analytic phase projection. ",
default=True,
)
debug_opts.add_argument(
"--noappsmooth",
dest="smoothapp",
action="store_false",
help="Disable smoothing app file in the phase direction. ",
default=True,
)
debug_opts.add_argument(
"--nophasefilt",
dest="filtphase",
action="store_false",
help="Disable the phase trend filter (probably not a good idea). ",
default=True,
)
debug_opts.add_argument(
"--nocardiacalign",
dest="aligncardiac",
action="store_false",
help="Disable alignment of pleth signal to fMRI derived cardiac signal. ",
default=True,
)
debug_opts.add_argument(
"--saveinfoastext",
dest="saveinfoasjson",
action="store_false",
help="Save the info file in text format rather than json. ",
default=True,
)
debug_opts.add_argument(
"--saveintermediate",
dest="saveintermediate",
action="store_true",
help="Save some data from intermediate passes to help debugging. ",
default=False,
)
debug_opts.add_argument(
"--increaseoutputlevel",
dest="inc_outputlevel",
action="count",
help="Increase the number of intermediate output files. ",
default=0,
)
debug_opts.add_argument(
"--decreaseoutputlevel",
dest="dec_outputlevel",
action="count",
help="Decrease the number of intermediate output files. ",
default=0,
)
return parser
def process_args(inputargs=None):
"""
Compile arguments for rapidtide workflow.
"""
if inputargs is None:
print("processing command line arguments")
# write out the command used
try:
args = _get_parser().parse_args()
argstowrite = sys.argv
except SystemExit:
_get_parser().print_help()
raise
else:
print("processing passed argument list:")
try:
args = _get_parser().parse_args(inputargs)
argstowrite = inputargs
except SystemExit:
_get_parser().print_help()
raise
# save the raw and formatted command lines
args.commandline = " ".join(argstowrite)
tide_io.writevec([args.commandline], args.outputroot + "_commandline.txt")
formattedcommandline = []
for thetoken in argstowrite[0:3]:
formattedcommandline.append(thetoken)
for thetoken in argstowrite[3:]:
if thetoken[0:2] == "--":
formattedcommandline.append(thetoken)
else:
formattedcommandline[-1] += " " + thetoken
for i in range(len(formattedcommandline)):
if i > 0:
prefix = " "
else:
prefix = ""
if i < len(formattedcommandline) - 1:
suffix = " \\"
else:
suffix = ""
formattedcommandline[i] = prefix + formattedcommandline[i] + suffix
tide_io.writevec(formattedcommandline, args.outputroot + "_formattedcommandline.txt")
if args.debug:
print()
print("before postprocessing")
print(args)
# some tunable parameters
args.outputlevel = 1
args.maskthreshpct = 10.0
args.domadnorm = True
args.nprocs = 1
args.verbose = False
args.smoothlen = 101
args.envthresh = 0.2
args.upsamplefac = 100
args.centric = True
args.pulsereconstepsize = 0.01
args.aliasedcorrelationwidth = 5
args.aliasedcorrelationpts = 201
args.unnormvesselmap = True
args.histlen = 100
args.softvesselfrac = 0.4
args.savecardiacnoise = True
args.colnum = None
args.colname = None
# Additional argument parsing not handled by argparse
# deal with notch filter logic
if args.disablenotch:
args.notchpct = None
# determine the outputlevel
args.outputlevel = np.max([0, args.outputlevel + args.inc_outputlevel - args.dec_outputlevel])
if args.debug:
print()
print("after postprocessing")
print(args)
# start the clock!
# tide_util.checkimports(args)
return args
| |
from __future__ import unicode_literals
import copy
from collections import OrderedDict
from django.apps import AppConfig
from django.apps.registry import Apps, apps as global_apps
from django.conf import settings
from django.db import models
from django.db.models.fields.proxy import OrderWrt
from django.db.models.fields.related import do_pending_lookups
from django.db.models.options import DEFAULT_NAMES, normalize_together
from django.utils import six
from django.utils.encoding import force_text, smart_text
from django.utils.functional import cached_property
from django.utils.module_loading import import_string
from django.utils.version import get_docs_version
class InvalidBasesError(ValueError):
pass
class ProjectState(object):
"""
Represents the entire project's overall state.
This is the item that is passed around - we do it here rather than at the
app level so that cross-app FKs/etc. resolve properly.
"""
def __init__(self, models=None, real_apps=None):
self.models = models or {}
# Apps to include from main registry, usually unmigrated ones
self.real_apps = real_apps or []
def add_model(self, model_state):
app_label, model_name = model_state.app_label, model_state.name_lower
self.models[(app_label, model_name)] = model_state
if 'apps' in self.__dict__: # hasattr would cache the property
self.reload_model(app_label, model_name)
def remove_model(self, app_label, model_name):
del self.models[app_label, model_name]
if 'apps' in self.__dict__: # hasattr would cache the property
self.apps.unregister_model(app_label, model_name)
def reload_model(self, app_label, model_name):
if 'apps' in self.__dict__: # hasattr would cache the property
# Get relations before reloading the models, as _meta.apps may change
try:
related_old = {
f.related_model for f in
self.apps.get_model(app_label, model_name)._meta.related_objects
}
except LookupError:
related_old = set()
self._reload_one_model(app_label, model_name)
# Reload models if there are relations
model = self.apps.get_model(app_label, model_name)
related_m2m = {f.related_model for f in model._meta.many_to_many}
for rel_model in related_old.union(related_m2m):
self._reload_one_model(rel_model._meta.app_label, rel_model._meta.model_name)
if related_m2m:
# Re-render this model after related models have been reloaded
self._reload_one_model(app_label, model_name)
def _reload_one_model(self, app_label, model_name):
self.apps.unregister_model(app_label, model_name)
self.models[app_label, model_name].render(self.apps)
def clone(self):
"Returns an exact copy of this ProjectState"
new_state = ProjectState(
models={k: v.clone() for k, v in self.models.items()},
real_apps=self.real_apps,
)
if 'apps' in self.__dict__:
new_state.apps = self.apps.clone()
return new_state
@cached_property
def apps(self):
return StateApps(self.real_apps, self.models)
@property
def concrete_apps(self):
self.apps = StateApps(self.real_apps, self.models, ignore_swappable=True)
return self.apps
@classmethod
def from_apps(cls, apps):
"Takes in an Apps and returns a ProjectState matching it"
app_models = {}
for model in apps.get_models(include_swapped=True):
model_state = ModelState.from_model(model)
app_models[(model_state.app_label, model_state.name_lower)] = model_state
return cls(app_models)
def __eq__(self, other):
if set(self.models.keys()) != set(other.models.keys()):
return False
if set(self.real_apps) != set(other.real_apps):
return False
return all(model == other.models[key] for key, model in self.models.items())
def __ne__(self, other):
return not (self == other)
class AppConfigStub(AppConfig):
"""
Stubs a Django AppConfig. Only provides a label, and a dict of models.
"""
# Not used, but required by AppConfig.__init__
path = ''
def __init__(self, label):
self.label = label
# App-label and app-name are not the same thing, so technically passing
# in the label here is wrong. In practice, migrations don't care about
# the app name, but we need something unique, and the label works fine.
super(AppConfigStub, self).__init__(label, None)
def import_models(self, all_models):
self.models = all_models
class StateApps(Apps):
"""
Subclass of the global Apps registry class to better handle dynamic model
additions and removals.
"""
def __init__(self, real_apps, models, ignore_swappable=False):
# Any apps in self.real_apps should have all their models included
# in the render. We don't use the original model instances as there
# are some variables that refer to the Apps object.
# FKs/M2Ms from real apps are also not included as they just
# mess things up with partial states (due to lack of dependencies)
real_models = []
for app_label in real_apps:
app = global_apps.get_app_config(app_label)
for model in app.get_models():
real_models.append(ModelState.from_model(model, exclude_rels=True))
# Populate the app registry with a stub for each application.
app_labels = {model_state.app_label for model_state in models.values()}
app_configs = [AppConfigStub(label) for label in sorted(real_apps + list(app_labels))]
super(StateApps, self).__init__(app_configs)
# We keep trying to render the models in a loop, ignoring invalid
# base errors, until the size of the unrendered models doesn't
# decrease by at least one, meaning there's a base dependency loop/
# missing base.
unrendered_models = list(models.values()) + real_models
while unrendered_models:
new_unrendered_models = []
for model in unrendered_models:
try:
model.render(self)
except InvalidBasesError:
new_unrendered_models.append(model)
if len(new_unrendered_models) == len(unrendered_models):
raise InvalidBasesError(
"Cannot resolve bases for %r\nThis can happen if you are inheriting models from an "
"app with migrations (e.g. contrib.auth)\n in an app with no migrations; see "
"https://docs.djangoproject.com/en/%s/topics/migrations/#dependencies "
"for more" % (new_unrendered_models, get_docs_version())
)
unrendered_models = new_unrendered_models
# If there are some lookups left, see if we can first resolve them
# ourselves - sometimes fields are added after class_prepared is sent
for lookup_model, operations in self._pending_lookups.items():
try:
model = self.get_model(lookup_model[0], lookup_model[1])
except LookupError:
app_label = "%s.%s" % (lookup_model[0], lookup_model[1])
if app_label == settings.AUTH_USER_MODEL and ignore_swappable:
continue
# Raise an error with a best-effort helpful message
# (only for the first issue). Error message should look like:
# "ValueError: Lookup failed for model referenced by
# field migrations.Book.author: migrations.Author"
msg = "Lookup failed for model referenced by field {field}: {model[0]}.{model[1]}"
raise ValueError(msg.format(field=operations[0][1], model=lookup_model))
else:
do_pending_lookups(model)
def clone(self):
"""
Return a clone of this registry, mainly used by the migration framework.
"""
clone = StateApps([], {})
clone.all_models = copy.deepcopy(self.all_models)
clone.app_configs = copy.deepcopy(self.app_configs)
return clone
def register_model(self, app_label, model):
self.all_models[app_label][model._meta.model_name] = model
if app_label not in self.app_configs:
self.app_configs[app_label] = AppConfigStub(app_label)
self.app_configs[app_label].models = OrderedDict()
self.app_configs[app_label].models[model._meta.model_name] = model
self.clear_cache()
def unregister_model(self, app_label, model_name):
try:
del self.all_models[app_label][model_name]
del self.app_configs[app_label].models[model_name]
except KeyError:
pass
self.clear_cache()
class ModelState(object):
"""
Represents a Django Model. We don't use the actual Model class
as it's not designed to have its options changed - instead, we
mutate this one and then render it into a Model as required.
Note that while you are allowed to mutate .fields, you are not allowed
to mutate the Field instances inside there themselves - you must instead
assign new ones, as these are not detached during a clone.
"""
def __init__(self, app_label, name, fields, options=None, bases=None, managers=None):
self.app_label = app_label
self.name = force_text(name)
self.fields = fields
self.options = options or {}
self.bases = bases or (models.Model, )
self.managers = managers or []
# Sanity-check that fields is NOT a dict. It must be ordered.
if isinstance(self.fields, dict):
raise ValueError("ModelState.fields cannot be a dict - it must be a list of 2-tuples.")
# Sanity-check that fields are NOT already bound to a model.
for name, field in fields:
if hasattr(field, 'model'):
raise ValueError(
'ModelState.fields cannot be bound to a model - "%s" is.' % name
)
@cached_property
def name_lower(self):
return self.name.lower()
@classmethod
def from_model(cls, model, exclude_rels=False):
"""
Feed me a model, get a ModelState representing it out.
"""
# Deconstruct the fields
fields = []
for field in model._meta.local_fields:
if getattr(field, "rel", None) and exclude_rels:
continue
if isinstance(field, OrderWrt):
continue
name, path, args, kwargs = field.deconstruct()
field_class = import_string(path)
try:
fields.append((name, field_class(*args, **kwargs)))
except TypeError as e:
raise TypeError("Couldn't reconstruct field %s on %s.%s: %s" % (
name,
model._meta.app_label,
model._meta.object_name,
e,
))
if not exclude_rels:
for field in model._meta.local_many_to_many:
name, path, args, kwargs = field.deconstruct()
field_class = import_string(path)
try:
fields.append((name, field_class(*args, **kwargs)))
except TypeError as e:
raise TypeError("Couldn't reconstruct m2m field %s on %s: %s" % (
name,
model._meta.object_name,
e,
))
# Extract the options
options = {}
for name in DEFAULT_NAMES:
# Ignore some special options
if name in ["apps", "app_label"]:
continue
elif name in model._meta.original_attrs:
if name == "unique_together":
ut = model._meta.original_attrs["unique_together"]
options[name] = set(normalize_together(ut))
elif name == "index_together":
it = model._meta.original_attrs["index_together"]
options[name] = set(normalize_together(it))
else:
options[name] = model._meta.original_attrs[name]
# Force-convert all options to text_type (#23226)
options = cls.force_text_recursive(options)
# If we're ignoring relationships, remove all field-listing model
# options (that option basically just means "make a stub model")
if exclude_rels:
for key in ["unique_together", "index_together", "order_with_respect_to"]:
if key in options:
del options[key]
def flatten_bases(model):
bases = []
for base in model.__bases__:
if hasattr(base, "_meta") and base._meta.abstract:
bases.extend(flatten_bases(base))
else:
bases.append(base)
return bases
# We can't rely on __mro__ directly because we only want to flatten
# abstract models and not the whole tree. However by recursing on
# __bases__ we may end up with duplicates and ordering issues, we
# therefore discard any duplicates and reorder the bases according
# to their index in the MRO.
flattened_bases = sorted(set(flatten_bases(model)), key=lambda x: model.__mro__.index(x))
# Make our record
bases = tuple(
(
"%s.%s" % (base._meta.app_label, base._meta.model_name)
if hasattr(base, "_meta") else
base
)
for base in flattened_bases
)
# Ensure at least one base inherits from models.Model
if not any((isinstance(base, six.string_types) or issubclass(base, models.Model)) for base in bases):
bases = (models.Model,)
# Constructs all managers on the model
managers = {}
def reconstruct_manager(mgr):
as_manager, manager_path, qs_path, args, kwargs = mgr.deconstruct()
if as_manager:
qs_class = import_string(qs_path)
instance = qs_class.as_manager()
else:
manager_class = import_string(manager_path)
instance = manager_class(*args, **kwargs)
# We rely on the ordering of the creation_counter of the original
# instance
managers[mgr.name] = (mgr.creation_counter, instance)
default_manager_name = model._default_manager.name
# Make sure the default manager is always the first
if model._default_manager.use_in_migrations:
reconstruct_manager(model._default_manager)
else:
# Force this manager to be the first and thus default
managers[default_manager_name] = (0, models.Manager())
# Sort all managers by their creation counter
for _, manager, _ in sorted(model._meta.managers):
if manager.name == '_base_manager' or not manager.use_in_migrations:
continue
reconstruct_manager(manager)
# Sort all managers by their creation counter but take only name and
# instance for further processing
managers = [
(name, instance) for name, (cc, instance) in
sorted(managers.items(), key=lambda v: v[1])
]
if managers == [(default_manager_name, models.Manager())]:
managers = []
# Construct the new ModelState
return cls(
model._meta.app_label,
model._meta.object_name,
fields,
options,
bases,
managers,
)
@classmethod
def force_text_recursive(cls, value):
if isinstance(value, six.string_types):
return smart_text(value)
elif isinstance(value, list):
return [cls.force_text_recursive(x) for x in value]
elif isinstance(value, tuple):
return tuple(cls.force_text_recursive(x) for x in value)
elif isinstance(value, set):
return set(cls.force_text_recursive(x) for x in value)
elif isinstance(value, dict):
return {
cls.force_text_recursive(k): cls.force_text_recursive(v)
for k, v in value.items()
}
return value
def construct_fields(self):
"Deep-clone the fields using deconstruction"
for name, field in self.fields:
_, path, args, kwargs = field.deconstruct()
field_class = import_string(path)
yield name, field_class(*args, **kwargs)
def construct_managers(self):
"Deep-clone the managers using deconstruction"
# Sort all managers by their creation counter
sorted_managers = sorted(self.managers, key=lambda v: v[1].creation_counter)
for mgr_name, manager in sorted_managers:
as_manager, manager_path, qs_path, args, kwargs = manager.deconstruct()
if as_manager:
qs_class = import_string(qs_path)
yield mgr_name, qs_class.as_manager()
else:
manager_class = import_string(manager_path)
yield mgr_name, manager_class(*args, **kwargs)
def clone(self):
"Returns an exact copy of this ModelState"
return self.__class__(
app_label=self.app_label,
name=self.name,
fields=list(self.construct_fields()),
options=dict(self.options),
bases=self.bases,
managers=list(self.construct_managers()),
)
def render(self, apps):
"Creates a Model object from our current state into the given apps"
# First, make a Meta object
meta_contents = {'app_label': self.app_label, "apps": apps}
meta_contents.update(self.options)
meta = type(str("Meta"), tuple(), meta_contents)
# Then, work out our bases
try:
bases = tuple(
(apps.get_model(base) if isinstance(base, six.string_types) else base)
for base in self.bases
)
except LookupError:
raise InvalidBasesError("Cannot resolve one or more bases from %r" % (self.bases,))
# Turn fields into a dict for the body, add other bits
body = dict(self.construct_fields())
body['Meta'] = meta
body['__module__'] = "__fake__"
# Restore managers
body.update(self.construct_managers())
# Then, make a Model object (apps.register_model is called in __new__)
return type(
str(self.name),
bases,
body,
)
def get_field_by_name(self, name):
for fname, field in self.fields:
if fname == name:
return field
raise ValueError("No field called %s on model %s" % (name, self.name))
def __repr__(self):
return "<ModelState: '%s.%s'>" % (self.app_label, self.name)
def __eq__(self, other):
return (
(self.app_label == other.app_label) and
(self.name == other.name) and
(len(self.fields) == len(other.fields)) and
all((k1 == k2 and (f1.deconstruct()[1:] == f2.deconstruct()[1:]))
for (k1, f1), (k2, f2) in zip(self.fields, other.fields)) and
(self.options == other.options) and
(self.bases == other.bases) and
(self.managers == other.managers)
)
def __ne__(self, other):
return not (self == other)
| |
import logging
import os
from abc import ABC
from collections import defaultdict, deque
from collections.abc import Mapping
from typing import TYPE_CHECKING, Deque, Dict, Generator, Optional, Tuple, Type
from dvc.proc.manager import ProcessManager
from ..base import (
EXEC_BASELINE,
EXEC_BRANCH,
EXEC_HEAD,
EXEC_MERGE,
CheckpointExistsError,
ExperimentExistsError,
ExpRefInfo,
ExpStashEntry,
)
from .base import EXEC_PID_DIR, BaseExecutor
from .local import TempDirExecutor, WorkspaceExecutor
if TYPE_CHECKING:
from scmrepo.git import Git
from dvc.repo import Repo
logger = logging.getLogger(__name__)
class BaseExecutorManager(ABC, Mapping):
"""Manages executors for a collection of experiments to be run."""
EXECUTOR_CLS: Type = BaseExecutor
def __init__(
self,
scm: "Git",
wdir: str,
):
from dvc.utils.fs import makedirs
self.scm = scm
makedirs(wdir, exist_ok=True)
self.wdir = wdir
self.proc = ProcessManager(self.pid_dir)
self._attached: Dict[str, "BaseExecutor"] = {}
self._detached: Dict[str, "BaseExecutor"] = dict(self._load_infos())
self._queue: Deque[Tuple[str, "BaseExecutor"]] = deque()
def __getitem__(self, key: str) -> "BaseExecutor":
try:
return self._attached[key]
except KeyError:
pass
return self._detached[key]
def __iter__(self):
yield from self._attached
yield from self._detached
def __len__(self):
return len(self._attached) + len(self._detached)
@property
def pid_dir(self) -> str:
return os.path.join(self.wdir, EXEC_PID_DIR)
def enqueue(self, rev: str, executor: "BaseExecutor"):
assert rev not in self
self._queue.append((rev, executor))
def _load_infos(self) -> Generator[Tuple[str, "BaseExecutor"], None, None]:
import json
from urllib.parse import urlparse
from .base import ExecutorInfo
from .ssh import SSHExecutor
def make_executor(info: "ExecutorInfo"):
if info.git_url:
scheme = urlparse(info.git_url).scheme
if scheme == "file":
cls: Type = TempDirExecutor
elif scheme == "ssh":
cls = SSHExecutor
else:
raise NotImplementedError
else:
cls = WorkspaceExecutor
return cls.from_info(info)
for name in self.proc:
infofile = self.get_infofile_path(name)
try:
with open(infofile, encoding="utf-8") as fobj:
info = ExecutorInfo.from_dict(json.load(fobj))
yield name, make_executor(info)
except OSError:
continue
def get_infofile_path(self, name: str) -> str:
return os.path.join(
self.pid_dir,
name,
f"{name}{BaseExecutor.INFOFILE_EXT}",
)
@classmethod
def from_stash_entries(
cls,
scm: "Git",
wdir: str,
repo: "Repo",
to_run: Dict[str, ExpStashEntry],
**kwargs,
):
manager = cls(scm, wdir)
try:
for stash_rev, entry in to_run.items():
scm.set_ref(EXEC_HEAD, entry.head_rev)
scm.set_ref(EXEC_MERGE, stash_rev)
scm.set_ref(EXEC_BASELINE, entry.baseline_rev)
# Executor will be initialized with an empty git repo that
# we populate by pushing:
# EXEC_HEAD - the base commit for this experiment
# EXEC_MERGE - the unmerged changes (from our stash)
# to be reproduced
# EXEC_BASELINE - the baseline commit for this experiment
executor = cls.EXECUTOR_CLS.from_stash_entry(
repo,
stash_rev,
entry,
**kwargs,
)
manager.enqueue(stash_rev, executor)
finally:
for ref in (EXEC_HEAD, EXEC_MERGE, EXEC_BASELINE):
scm.remove_ref(ref)
return manager
def exec_queue(self, jobs: Optional[int] = 1, detach: bool = False):
"""Run dvc repro for queued executors in parallel."""
if detach:
raise NotImplementedError
# TODO use ProcessManager.spawn() to support detached runs
return self._exec_attached(jobs=jobs)
def _exec_attached(self, jobs: Optional[int] = 1):
import signal
from concurrent.futures import (
CancelledError,
ProcessPoolExecutor,
wait,
)
from multiprocessing import Manager
from dvc.stage.monitor import CheckpointKilledError
result: Dict[str, Dict[str, str]] = defaultdict(dict)
manager = Manager()
pid_q = manager.Queue()
with ProcessPoolExecutor(max_workers=jobs) as workers:
futures = {}
while self._queue:
rev, executor = self._queue.popleft()
infofile = self.get_infofile_path(rev)
future = workers.submit(
executor.reproduce,
info=executor.info,
rev=rev,
queue=pid_q,
infofile=infofile,
log_level=logger.getEffectiveLevel(),
)
futures[future] = (rev, executor)
self._attached[rev] = executor
try:
wait(futures)
except KeyboardInterrupt:
# forward SIGINT to any running executor processes and
# cancel any remaining futures
workers.shutdown(wait=False)
pids = {}
for future, (rev, _) in futures.items():
if future.running():
# if future has already been started by the scheduler
# we still have to wait until it tells us its PID
while rev not in pids:
rev, pid = pid_q.get()
pids[rev] = pid
os.kill(pids[rev], signal.SIGINT)
elif not future.done():
future.cancel()
for future, (rev, executor) in futures.items():
rev, executor = futures[future]
try:
exc = future.exception()
if exc is None:
exec_result = future.result()
result[rev].update(
self._collect_executor(executor, exec_result)
)
elif not isinstance(exc, CheckpointKilledError):
logger.error(
"Failed to reproduce experiment '%s'", rev[:7]
)
except CancelledError:
logger.error(
"Cancelled before attempting to reproduce experiment "
"'%s'",
rev[:7],
)
finally:
self.cleanup_executor(rev, executor)
return result
def _collect_executor(self, executor, exec_result) -> Dict[str, str]:
# NOTE: GitPython Repo instances cannot be re-used
# after process has received SIGINT or SIGTERM, so we
# need this hack to re-instantiate git instances after
# checkpoint runs. See:
# https://github.com/gitpython-developers/GitPython/issues/427
# del self.repo.scm
results = {}
def on_diverged(ref: str, checkpoint: bool):
ref_info = ExpRefInfo.from_ref(ref)
if checkpoint:
raise CheckpointExistsError(ref_info.name)
raise ExperimentExistsError(ref_info.name)
for ref in executor.fetch_exps(
self.scm,
executor.git_url,
force=exec_result.force,
on_diverged=on_diverged,
):
exp_rev = self.scm.get_ref(ref)
if exp_rev:
logger.debug("Collected experiment '%s'.", exp_rev[:7])
results[exp_rev] = exec_result.exp_hash
return results
def cleanup_executor(self, rev: str, executor: "BaseExecutor"):
from dvc.utils.fs import remove
executor.cleanup()
try:
self.proc.remove(rev)
except KeyError:
pass
remove(os.path.join(self.pid_dir, rev))
class TempDirExecutorManager(BaseExecutorManager):
EXECUTOR_CLS = TempDirExecutor
class WorkspaceExecutorManager(BaseExecutorManager):
EXECUTOR_CLS = WorkspaceExecutor
@classmethod
def from_stash_entries(
cls,
scm: "Git",
wdir: str,
repo: "Repo",
to_run: Dict[str, ExpStashEntry],
**kwargs,
):
manager = cls(scm, wdir)
try:
assert len(to_run) == 1
for stash_rev, entry in to_run.items():
scm.set_ref(EXEC_HEAD, entry.head_rev)
scm.set_ref(EXEC_MERGE, stash_rev)
scm.set_ref(EXEC_BASELINE, entry.baseline_rev)
executor = cls.EXECUTOR_CLS.from_stash_entry(
repo,
stash_rev,
entry,
**kwargs,
)
manager.enqueue(stash_rev, executor)
finally:
for ref in (EXEC_MERGE,):
scm.remove_ref(ref)
return manager
def _collect_executor(self, executor, exec_result) -> Dict[str, str]:
results = {}
exp_rev = self.scm.get_ref(EXEC_BRANCH)
if exp_rev:
logger.debug("Collected experiment '%s'.", exp_rev[:7])
results[exp_rev] = exec_result.exp_hash
return results
def exec_queue(self, jobs: Optional[int] = 1, detach: bool = False):
"""Run a single WorkspaceExecutor.
Workspace execution is done within the main DVC process
(rather than in multiprocessing context)
"""
from dvc.exceptions import DvcException
from dvc.stage.monitor import CheckpointKilledError
assert len(self._queue) == 1
assert not detach
result: Dict[str, Dict[str, str]] = defaultdict(dict)
rev, executor = self._queue.popleft()
exec_name = "workspace"
infofile = self.get_infofile_path(exec_name)
try:
exec_result = executor.reproduce(
info=executor.info,
rev=rev,
infofile=infofile,
log_level=logger.getEffectiveLevel(),
)
if not exec_result.exp_hash:
raise DvcException(
f"Failed to reproduce experiment '{rev[:7]}'"
)
if exec_result.ref_info:
result[rev].update(
self._collect_executor(executor, exec_result)
)
except CheckpointKilledError:
# Checkpoint errors have already been logged
return {}
except DvcException:
raise
except Exception as exc:
raise DvcException(
f"Failed to reproduce experiment '{rev[:7]}'"
) from exc
finally:
self.cleanup_executor(exec_name, executor)
return result
| |
# -*- coding: utf-8 -*-
# Copyright (c) 2012-2015, Ben Lopatin and contributors
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer. Redistributions in binary
# form must reproduce the above copyright notice, this list of conditions and the
# following disclaimer in the documentation and/or other materials provided with
# the distribution
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from django.contrib.sites.models import get_current_site
from django.core.urlresolvers import reverse
from django.http import Http404
from django.shortcuts import render, redirect
from django.utils.translation import ugettext as _
from django.views.generic import (ListView, DetailView, UpdateView, CreateView,
DeleteView, FormView)
from .backends import invitation_backend, registration_backend
from .forms import (OrganizationForm, OrganizationUserForm,
OrganizationUserAddForm, OrganizationAddForm, SignUpForm)
from .mixins import (OrganizationMixin, OrganizationUserMixin,
MembershipRequiredMixin, AdminRequiredMixin, OwnerRequiredMixin)
from .models import Organization
from .utils import create_organization
class BaseOrganizationList(ListView):
# TODO change this to query on the specified model
queryset = Organization.active.all()
context_object_name = "organizations"
def get_queryset(self):
return super(BaseOrganizationList,
self).get_queryset().filter(users=self.request.user)
class BaseOrganizationDetail(OrganizationMixin, DetailView):
def get_context_data(self, **kwargs):
context = super(BaseOrganizationDetail, self).get_context_data(**kwargs)
context['organization_users'] = self.organization.organization_users.all()
context['organization'] = self.organization
return context
class BaseOrganizationCreate(CreateView):
model = Organization
form_class = OrganizationAddForm
template_name = 'organizations/organization_form.html'
def get_success_url(self):
return reverse("organization_list")
def get_form_kwargs(self):
kwargs = super(BaseOrganizationCreate, self).get_form_kwargs()
kwargs.update({'request': self.request})
return kwargs
class BaseOrganizationUpdate(OrganizationMixin, UpdateView):
form_class = OrganizationForm
def get_form_kwargs(self):
kwargs = super(BaseOrganizationUpdate, self).get_form_kwargs()
kwargs.update({'request': self.request})
return kwargs
class BaseOrganizationDelete(OrganizationMixin, DeleteView):
def get_success_url(self):
return reverse("organization_list")
class BaseOrganizationUserList(OrganizationMixin, ListView):
def get(self, request, *args, **kwargs):
self.organization = self.get_organization()
self.object_list = self.organization.organization_users.all()
context = self.get_context_data(object_list=self.object_list,
organization_users=self.object_list,
organization=self.organization)
return self.render_to_response(context)
class BaseOrganizationUserDetail(OrganizationUserMixin, DetailView):
pass
class BaseOrganizationUserCreate(OrganizationMixin, CreateView):
form_class = OrganizationUserAddForm
template_name = 'organizations/organizationuser_form.html'
def get_success_url(self):
return reverse('organization_user_list',
kwargs={'organization_pk': self.object.organization.pk})
def get_form_kwargs(self):
kwargs = super(BaseOrganizationUserCreate, self).get_form_kwargs()
kwargs.update({'organization': self.organization,
'request': self.request})
return kwargs
def get(self, request, *args, **kwargs):
self.organization = self.get_object()
return super(BaseOrganizationUserCreate, self).get(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
self.organization = self.get_object()
return super(BaseOrganizationUserCreate, self).post(request, *args, **kwargs)
class BaseOrganizationUserRemind(OrganizationUserMixin, DetailView):
template_name = 'organizations/organizationuser_remind.html'
# TODO move to invitations backend?
def get_object(self, **kwargs):
self.organization_user = super(BaseOrganizationUserRemind, self).get_object()
if self.organization_user.user.is_active:
raise Http404(_("Already active")) # TODO add better error
return self.organization_user
def post(self, request, *args, **kwargs):
self.object = self.get_object()
invitation_backend().send_reminder(self.object.user,
**{'domain': get_current_site(self.request),
'organization': self.organization, 'sender': request.user})
return redirect(self.object)
class BaseOrganizationUserUpdate(OrganizationUserMixin, UpdateView):
form_class = OrganizationUserForm
class BaseOrganizationUserDelete(OrganizationUserMixin, DeleteView):
def get_success_url(self):
return reverse('organization_user_list',
kwargs={'organization_pk': self.object.organization.pk})
class OrganizationSignup(FormView):
"""
View that allows unregistered users to create an organization account.
It simply processes the form and then calls the specified registration
backend.
"""
form_class = SignUpForm
template_name = "organizations/signup_form.html"
# TODO get success from backend, because some backends may do something
# else, like require verification
backend = registration_backend()
def dispatch(self, request, *args, **kwargs):
if request.user.is_authenticated():
return redirect('organization_add')
return super(OrganizationSignup, self).dispatch(request, *args,
**kwargs)
def get_success_url(self):
if hasattr(self, 'success_url'):
return self.success_url
return reverse('organization_signup_success')
def form_valid(self, form):
"""
"""
user = self.backend.register_by_email(form.cleaned_data['email'])
create_organization(user=user, name=form.cleaned_data['name'],
slug=form.cleaned_data['slug'], is_active=False)
return redirect(self.get_success_url())
def signup_success(self, request):
return render(request, "organizations/signup_success.html", {})
class OrganizationList(BaseOrganizationList):
pass
class OrganizationCreate(BaseOrganizationCreate):
"""
Allows any user to create a new organization.
"""
pass
class OrganizationDetail(MembershipRequiredMixin, BaseOrganizationDetail):
pass
class OrganizationUpdate(AdminRequiredMixin, BaseOrganizationUpdate):
pass
class OrganizationDelete(OwnerRequiredMixin, BaseOrganizationDelete):
pass
class OrganizationUserList(MembershipRequiredMixin, BaseOrganizationUserList):
pass
class OrganizationUserDetail(AdminRequiredMixin, BaseOrganizationUserDetail):
pass
class OrganizationUserUpdate(AdminRequiredMixin, BaseOrganizationUserUpdate):
pass
class OrganizationUserCreate(AdminRequiredMixin, BaseOrganizationUserCreate):
pass
class OrganizationUserRemind(AdminRequiredMixin, BaseOrganizationUserRemind):
pass
class OrganizationUserDelete(AdminRequiredMixin, BaseOrganizationUserDelete):
pass
| |
"""Tests on the examples in the specification documents
"""
import re
import sys
import unittest
from resync.resource import Resource
from resync.source_description import SourceDescription
from resync.capability_list import CapabilityList
from resync.resource_list import ResourceList,ResourceListOrdered
from resync.resource_dump import ResourceDump
from resync.resource_dump_manifest import ResourceDumpManifest
from resync.change_list import ChangeList
from resync.change_dump import ChangeDump
from resync.change_dump_manifest import ChangeDumpManifest
from resync.archives import ResourceListArchive,ResourceDumpArchive,ChangeListArchive,ChangeDumpArchive
from resync.sitemap import Sitemap
class TestExamplesFromSpec(unittest.TestCase):
def test_all_simple_read(self):
"""Just try to read each one"""
for ex in ('archives_ex_2_1','archives_ex_2_2',
'archives_ex_3_1','archives_ex_3_2',
'archives_ex_4_1',
'archives_ex_5_1',
'archives_ex_6_1',
'resourcesync_ex_1','resourcesync_ex_2','resourcesync_ex_3',
'resourcesync_ex_4','resourcesync_ex_5','resourcesync_ex_6',
'resourcesync_ex_7','resourcesync_ex_8','resourcesync_ex_12',
'resourcesync_ex_13','resourcesync_ex_14','resourcesync_ex_15',
'resourcesync_ex_16','resourcesync_ex_17','resourcesync_ex_18',
'resourcesync_ex_19','resourcesync_ex_20','resourcesync_ex_21',
'resourcesync_ex_22','resourcesync_ex_23','resourcesync_ex_24',
'resourcesync_ex_25','resourcesync_ex_26','resourcesync_ex_27',
'resourcesync_ex_28','resourcesync_ex_29','resourcesync_ex_30',
'resourcesync_ex_31','resourcesync_ex_32','resourcesync_ex_33'):
s=Sitemap()
fh = self._open_ex(ex)
si = s.parse_xml( fh=fh )
def test_ex_01(self):
"""resourcesync_ex_1 is a simple resource_list with 2 resources, no metadata"""
rl=ResourceList()
rl.parse(uri='resync/test/testdata/examples_from_spec/resourcesync_ex_1.xml')
self.assertEqual( rl.capability, 'resourcelist' )
self.assertEqual( len(rl.resources), 2, '2 resources')
sms = sorted(rl.uris())
self.assertEqual( sms, ['http://example.com/res1','http://example.com/res2'] )
self.assertEqual( rl.resources['http://example.com/res1'].lastmod, None )
def test_ex_02(self):
"""resourcesync_ex_2 is a simple resource_list with 2 resources, some metadata"""
rl=ResourceList()
rl.parse(uri='resync/test/testdata/examples_from_spec/resourcesync_ex_2.xml')
self.assertEqual( len(rl.resources), 2, '2 resources')
sms = sorted(rl.uris())
self.assertEqual( sms, ['http://example.com/res1','http://example.com/res2'] )
self.assertEqual( rl.resources['http://example.com/res1'].lastmod, '2013-01-02T13:00:00Z' )
self.assertEqual( rl.resources['http://example.com/res2'].lastmod, '2013-01-02T14:00:00Z' )
self.assertEqual( rl.resources['http://example.com/res1'].md5, '1584abdf8ebdc9802ac0c6a7402c03b6' )
self.assertEqual( rl.resources['http://example.com/res2'].md5, '1e0d5cb8ef6ba40c99b14c0237be735e' )
def test_ex_03(self):
"""resourcesync_ex_3 is a simple change_list with 2 resources"""
cl=ChangeList()
cl.parse('resync/test/testdata/examples_from_spec/resourcesync_ex_3.xml')
self.assertEqual( len(cl.resources), 2, '2 resources')
sms = sorted(cl.uris())
self.assertEqual( sms, ['http://example.com/res2.pdf','http://example.com/res3.tiff'] )
self.assertEqual( cl.resources[0].lastmod, '2013-01-02T13:00:00Z' )
self.assertEqual( cl.resources[1].lastmod, '2013-01-02T18:00:00Z' )
self.assertEqual( cl.resources[0].change, 'updated' )
self.assertEqual( cl.resources[1].change, 'deleted' )
def test_ex_04(self):
"""resourcesync_ex_4 is a simple resource dump with one ZIP listed"""
rd=ResourceDump()
rd.parse('resync/test/testdata/examples_from_spec/resourcesync_ex_4.xml')
self.assertEqual( len(rd.resources), 1, '1 resources')
self.assertTrue( 'http://example.com/resourcedump.zip' in rd.resources )
self.assertEqual( rd.resources['http://example.com/resourcedump.zip'].lastmod, '2013-01-03T09:00:00Z' )
def test_ex_05(self):
"""resourcesync_ex_5 is a simple resource dump manifest with two files listed"""
rdm=ResourceDumpManifest()
rdm.parse('resync/test/testdata/examples_from_spec/resourcesync_ex_5.xml')
self.assertEqual( len(rdm.resources), 2, '2 resources')
sms = sorted(rdm.uris())
self.assertEqual( sms, ['http://example.com/res1','http://example.com/res2'] )
self.assertEqual( rdm.resources['http://example.com/res1'].lastmod, '2013-01-03T03:00:00Z' )
self.assertEqual( rdm.resources['http://example.com/res1'].md5, '1584abdf8ebdc9802ac0c6a7402c03b6' )
self.assertEqual( rdm.resources['http://example.com/res1'].path, '/resources/res1' )
self.assertEqual( rdm.resources['http://example.com/res2'].lastmod, '2013-01-03T04:00:00Z' )
self.assertEqual( rdm.resources['http://example.com/res2'].md5, '1e0d5cb8ef6ba40c99b14c0237be735e' )
self.assertEqual( rdm.resources['http://example.com/res2'].path, '/resources/res2' )
def test_ex_06(self):
"""resourcesync_ex_6 is a simple capability list with three capabilities"""
capl=CapabilityList()
capl.parse('resync/test/testdata/examples_from_spec/resourcesync_ex_6.xml')
self.assertEqual( len(capl.resources), 3, '3 capabilities')
# What capabilities are present?
self.assertTrue( capl.has_capability('resourcelist') )
self.assertEqual( capl.capability_info('resourcelist').uri, 'http://example.com/dataset1/resourcelist.xml')
self.assertTrue( capl.has_capability('resourcedump') )
self.assertEqual( capl.capability_info('resourcedump').uri, 'http://example.com/dataset1/resourcedump.xml')
self.assertTrue( capl.has_capability('changelist') )
self.assertEqual( capl.capability_info('changelist').uri, 'http://example.com/dataset1/changelist.xml')
# Check some that aren't
self.assertFalse( capl.has_capability() )
self.assertFalse( capl.has_capability('bogus') )
self.assertFalse( capl.has_capability('capabilitylist') )
def text_ex_07(self):
"""resourcesync_ex_7 is a source description that list a single Capability List"""
sd=SourceDescription()
sd.read(uri='resync/test/testdata/examples_from_spec/resourcesync_ex_7.xml')
self.assertEqual( len(sd.resources), 1, '1 capability list' )
cl=sd.resources[0]
self.assertEqual( cl.uri, 'http://example.com/dataset1/capabilitylist.xml' )
self.assertEqual( cl.capability, 'resourcelist' )
self.assertEqual( cl.describedby, 'http://example.com/info_about_set1_of_resources.xml' )
def test_ex_08(self):
"""resourcesync_ex_8 is a simple Resource List Index with 2 Resource Lists"""
rl=ResourceList()
rl.read(uri='resync/test/testdata/examples_from_spec/resourcesync_ex_8.xml',index_only=True)
self.assertEqual( rl.capability, 'resourcelist' )
self.assertEqual( rl.md_at, '2013-01-03T09:00:00Z' )
self.assertEqual( len(rl.resources), 2, '2 resources')
sms = sorted(rl.uris())
self.assertEqual( sms, ['http://example.com/resourcelist-part1.xml',
'http://example.com/resourcelist-part2.xml'] )
# Examples 9, 10, 11 in the spec are not XML documents
def test_ex_12(self):
"""resourcesync_ex_12 is a Source Description that talks about 3 sets of resources"""
sd=SourceDescription()
sd.read(uri='resync/test/testdata/examples_from_spec/resourcesync_ex_12.xml')
self.assertEqual( len(sd), 3 )
self.assertEqual( sd.uris(), ['http://example.com/capabilitylist1.xml',
'http://example.com/capabilitylist2.xml',
'http://example.com/capabilitylist3.xml'] )
cl1=sd['http://example.com/capabilitylist1.xml']
self.assertEqual( cl1.capability, 'capabilitylist' )
self.assertEqual( cl1.describedby, 'http://example.com/info_about_set1_of_resources.xml')
##### BUILD EXAMPLES #####
def test_build_ex_01(self):
"""Simple Resource List document """
rl = ResourceList()
rl.md_at = '2013-01-03T09:00:00Z'
rl.add( Resource('http://example.com/res1') )
rl.add( Resource('http://example.com/res2') )
ex_xml = self._open_ex('resourcesync_ex_1').read()
self._assert_xml_equal( rl.as_xml(), ex_xml )
def test_build_ex_02(self):
"""Slightly more complex Resource List document """
rl = ResourceList()
rl.md_at = '2013-01-03T09:00:00Z'
rl.add( Resource(uri='http://example.com/res1',
lastmod='2013-01-02T13:00:00Z',
md5='1584abdf8ebdc9802ac0c6a7402c03b6') )
r2 = Resource(uri='http://example.com/res2',
lastmod='2013-01-02T14:00:00Z',
md5='1e0d5cb8ef6ba40c99b14c0237be735e')
r2.link_set(rel="duplicate",href="http://mirror.example.com/res2")
rl.add( r2 )
ex_xml = self._open_ex('resourcesync_ex_2').read()
self._assert_xml_equal( rl.as_xml(), ex_xml )
def test_build_ex_03(self):
"""Simple Change List document """
cl = ChangeList()
cl.md_from = '2013-01-02T00:00:00Z'
cl.md_until= '2013-01-03T00:00:00Z'
cl.add( Resource(uri='http://example.com/res2.pdf',
lastmod='2013-01-02T13:00:00Z',
change="updated") )
cl.add( Resource(uri='http://example.com/res3.tiff',
lastmod='2013-01-02T18:00:00Z',
change='deleted') )
ex_xml = self._open_ex('resourcesync_ex_3').read()
self._assert_xml_equal( cl.as_xml(), ex_xml )
def test_build_ex_04(self):
"""Simple Resource Dump document """
rd = ResourceDump()
rd.md_at = '2013-01-03T09:00:00Z'
rd.add( Resource(uri='http://example.com/resourcedump.zip',
lastmod='2013-01-03T09:00:00Z') )
ex_xml = self._open_ex('resourcesync_ex_4').read()
self._assert_xml_equal( rd.as_xml(), ex_xml )
def test_build_ex_05(self):
"""Simple Resource Dump Manifest document """
rdm = ResourceDumpManifest()
rdm.md_at = '2013-01-03T09:00:00Z'
rdm.add( Resource(uri='http://example.com/res1',
lastmod='2013-01-03T03:00:00Z',
md5='1584abdf8ebdc9802ac0c6a7402c03b6',
path='/resources/res1') )
rdm.add( Resource(uri='http://example.com/res2',
lastmod='2013-01-03T04:00:00Z',
md5='1e0d5cb8ef6ba40c99b14c0237be735e',
path='/resources/res2') )
ex_xml = self._open_ex('resourcesync_ex_5').read()
self._assert_xml_equal( rdm.as_xml(), ex_xml )
def test_build_ex_06(self):
"""Simple Capability List document """
cl = CapabilityList()
cl.describedby = 'http://example.com/info_about_set1_of_resources.xml'
cl.up = 'http://example.com/resourcesync_description.xml'
cl.add_capability( uri='http://example.com/dataset1/resourcelist.xml', name='resourcelist' )
cl.add_capability( uri='http://example.com/dataset1/resourcedump.xml', name='resourcedump' )
cl.add_capability( uri='http://example.com/dataset1/changelist.xml', name='changelist' )
ex_xml = self._open_ex('resourcesync_ex_6').read()
self._assert_xml_equal( cl.as_xml(), ex_xml )
def test_build_ex_07(self):
"""A Source Description document """
sd = SourceDescription()
sd.describedby = 'http://example.com/info-about-source.xml'
r = Resource( uri='http://example.com/dataset1/capabilitylist.xml',
capability='capabilitylist' )
r.link_set( rel='describedby',
href='http://example.com/info_about_set1_of_resources.xml' )
sd.add( r )
ex_xml = self._open_ex('resourcesync_ex_7').read()
self._assert_xml_equal( sd.as_xml(), ex_xml )
def test_build_ex_08(self):
"""Simple Resource List Index document
This is not something that would usually be created directly
but instead would be created as part of the process of
writing a large Resource List in multiple files. However,
it is possible to create manually.
"""
rli = ResourceList()
rli.sitemapindex=True
rli.md_at = '2013-01-03T09:00:00Z'
rli.add( Resource(uri='http://example.com/resourcelist-part1.xml') )
rli.add( Resource(uri='http://example.com/resourcelist-part2.xml') )
ex_xml = self._open_ex('resourcesync_ex_8').read()
self._assert_xml_equal( rli.as_xml(), ex_xml )
# Examples 9, 10, 11 in the spec are not XML documents
def test_build_ex_12(self):
"""Source Description document with describedby links"""
sd = SourceDescription()
sd.describedby = 'http://example.com/info_about_source.xml'
cl1 = CapabilityList( uri='http://example.com/capabilitylist1.xml' )
cl1.describedby = 'http://example.com/info_about_set1_of_resources.xml'
sd.add_capability_list( cl1 )
cl2 = CapabilityList( uri='http://example.com/capabilitylist2.xml' )
cl2.describedby = 'http://example.com/info_about_set2_of_resources.xml'
sd.add_capability_list( cl2 )
cl3 = CapabilityList( uri='http://example.com/capabilitylist3.xml' )
cl3.describedby = 'http://example.com/info_about_set3_of_resources.xml'
sd.add_capability_list( cl3 )
ex_xml = self._open_ex('resourcesync_ex_12').read()
self._assert_xml_equal( sd.as_xml(), ex_xml )
def test_build_ex_13(self):
"""Capability List document with 4 entries"""
cl = CapabilityList()
cl.describedby = 'http://example.com/info_about_set1_of_resources.xml'
cl.up = 'http://example.com/resourcesync_description.xml'
cl.add_capability( capability=ResourceList( uri='http://example.com/dataset1/resourcelist.xml' ) )
cl.add_capability( capability=ResourceDump( uri='http://example.com/dataset1/resourcedump.xml' ) )
cl.add_capability( capability=ChangeList( uri='http://example.com/dataset1/changelist.xml' ) )
cl.add_capability( capability=ChangeDump( uri='http://example.com/dataset1/changedump.xml' ) )
ex_xml = self._open_ex('resourcesync_ex_13').read()
self._assert_xml_equal( cl.as_xml(), ex_xml )
def test_build_ex_14(self):
"""Resource List with 2 entries and some metadata"""
rl = ResourceList()
rl.up='http://example.com/dataset1/capabilitylist.xml'
rl.md_at="2013-01-03T09:00:00Z"
rl.md_completed="2013-01-03T09:01:00Z"
rl.add( Resource( uri='http://example.com/res1',
lastmod='2013-01-02T13:00:00Z',
md5='1584abdf8ebdc9802ac0c6a7402c03b6',
length=8876,
mime_type="text/html" ))
rl.add( Resource( uri='http://example.com/res2',
lastmod='2013-01-02T14:00:00Z',
md5='1e0d5cb8ef6ba40c99b14c0237be735e',
sha256='854f61290e2e197a11bc91063afce22e43f8ccc655237050ace766adc68dc784',
length=14599,
mime_type="application/pdf" ))
ex_xml = self._open_ex('resourcesync_ex_14').read()
self._assert_xml_equal( rl.as_xml(), ex_xml )
def test_build_ex_15(self):
"""Resource List Index with metadata"""
rl = ResourceList(resources_class=ResourceListOrdered) #order in example is non-canonical
rl.sitemapindex=True
rl.up='http://example.com/dataset1/capabilitylist.xml'
rl.md_at="2013-01-03T09:00:00Z"
rl.md_completed="2013-01-03T09:10:00Z"
rl.add( Resource( uri='http://example.com/resourcelist1.xml',
md_at='2013-01-03T09:00:00Z' ))
rl.add( Resource( uri='http://example.com/resourcelist2.xml',
md_at='2013-01-03T09:03:00Z' ))
rl.add( Resource( uri='http://example.com/resourcelist3.xml',
md_at='2013-01-03T09:07:00Z' ))
ex_xml = self._open_ex('resourcesync_ex_15').read()
self._assert_xml_equal( rl.as_xml(), ex_xml )
def test_build_ex_16(self):
rl = ResourceList()
rl.up = 'http://example.com/dataset1/capabilitylist.xml'
rl.index = 'http://example.com/dataset1/resourcelist-index.xml'
rl.md_at="2013-01-03T09:00:00Z"
rl.add( Resource( uri='http://example.com/res3',
lastmod='2013-01-02T13:00:00Z',
md5='1584abdf8ebdc9802ac0c6a7402c8753',
length=4385,
mime_type="application/pdf" ))
rl.add( Resource( uri='http://example.com/res4',
lastmod='2013-01-02T14:00:00Z',
md5='4556abdf8ebdc9802ac0c6a7402c9881',
length=883,
mime_type="image/png" ))
ex_xml = self._open_ex('resourcesync_ex_16').read()
self._assert_xml_equal( rl.as_xml(), ex_xml )
def test_build_ex_17(self):
"""Resource Dump with 3 entries and some metadata"""
rd = ResourceDump()
rd.up='http://example.com/dataset1/capabilitylist.xml'
rd.md_at="2013-01-03T09:00:00Z"
rd.md_completed="2013-01-03T09:04:00Z"
z1 = Resource( uri='http://example.com/resourcedump-part1.zip',
mime_type="application/zip",
length=4765,
md_at="2013-01-03T09:00:00Z",
md_completed="2013-01-03T09:02:00Z" )
z1.link_set( rel="contents",
href="http://example.com/resourcedump_manifest-part1.xml",
mime_type="application/xml" )
rd.add( z1 )
z2 = Resource( uri='http://example.com/resourcedump-part2.zip',
mime_type="application/zip",
length=9875,
md_at="2013-01-03T09:01:00Z",
md_completed="2013-01-03T09:03:00Z" )
z2.link_set( rel="contents",
href="http://example.com/resourcedump_manifest-part2.xml",
mime_type="application/xml" )
rd.add( z2 )
z3 = Resource( uri='http://example.com/resourcedump-part3.zip',
mime_type="application/zip",
length=2298,
md_at="2013-01-03T09:03:00Z",
md_completed="2013-01-03T09:04:00Z" )
z3.link_set( rel="contents",
href="http://example.com/resourcedump_manifest-part3.xml",
mime_type="application/xml" )
rd.add( z3 )
ex_xml = self._open_ex('resourcesync_ex_17').read()
self._assert_xml_equal( rd.as_xml(), ex_xml )
def test_build_ex_18(self):
"""Resource Dump Manifest with 2 entries and some metadata"""
rdm = ResourceDumpManifest()
rdm.up='http://example.com/dataset1/capabilitylist.xml'
rdm.md_at="2013-01-03T09:00:00Z"
rdm.md_completed="2013-01-03T09:02:00Z"
rdm.add( Resource( uri='http://example.com/res1',
lastmod='2013-01-02T13:00:00Z',
md5='1584abdf8ebdc9802ac0c6a7402c03b6',
length=8876,
mime_type='text/html',
path='/resources/res1') )
rdm.add( Resource( uri='http://example.com/res2',
lastmod='2013-01-02T14:00:00Z',
md5='1e0d5cb8ef6ba40c99b14c0237be735e',
sha256='854f61290e2e197a11bc91063afce22e43f8ccc655237050ace766adc68dc784',
length=14599,
mime_type='application/pdf',
path='/resources/res2') )
ex_xml = self._open_ex('resourcesync_ex_18').read()
self._assert_xml_equal( rdm.as_xml(), ex_xml )
def test_build_ex_19(self):
"""Change List with 4 changes, 'open' as no until"""
cl = ChangeList()
cl.up = 'http://example.com/dataset1/capabilitylist.xml'
cl.md_from="2013-01-03T00:00:00Z"
cl.add( Resource( uri='http://example.com/res1.html',
lastmod='2013-01-03T11:00:00Z',
change='created' ) )
cl.add( Resource( uri='http://example.com/res2.pdf',
lastmod='2013-01-03T13:00:00Z',
change='updated' ) )
cl.add( Resource( uri='http://example.com/res3.tiff',
lastmod='2013-01-03T18:00:00Z',
change='deleted' ) )
cl.add( Resource( uri='http://example.com/res2.pdf',
lastmod='2013-01-03T21:00:00Z',
change='updated' ) )
ex_xml = self._open_ex('resourcesync_ex_19').read()
self._assert_xml_equal( cl.as_xml(), ex_xml )
def test_build_ex_20(self):
"""Change List Index listing 3 Change Lists, the last one 'open'"""
cl = ChangeListArchive(resources_class=ResourceListOrdered) #order in example is non-canonical
cl.sitemapindex=True
cl.capability_name='changelist'
cl.up = 'http://example.com/dataset1/capabilitylist.xml'
cl.md_from="2013-01-01T00:00:00Z"
cl.add( Resource( uri='http://example.com/20130101-changelist.xml',
md_from='2013-01-01T00:00:00Z',
md_until='2013-01-02T00:00:00Z') )
cl.add( Resource( uri='http://example.com/20130102-changelist.xml',
md_from='2013-01-02T00:00:00Z',
md_until='2013-01-03T00:00:00Z') )
cl.add( Resource( uri='http://example.com/20130103-changelist.xml',
md_from='2013-01-03T00:00:00Z') )
ex_xml = self._open_ex('resourcesync_ex_20').read()
self._assert_xml_equal( cl.as_xml(), ex_xml )
def test_build_ex_21(self):
"""Change List which points back to index"""
cl = ChangeList()
cl.up = 'http://example.com/dataset1/capabilitylist.xml'
cl.index = 'http://example.com/dataset1/changelist.xml'
cl.md_from="2013-01-02T00:00:00Z"
cl.md_until="2013-01-03T00:00:00Z"
cl.add( Resource( uri='http://example.com/res7.html',
lastmod='2013-01-02T12:00:00Z',
change='created' ) )
cl.add( Resource( uri='http://example.com/res9.pdf',
lastmod='2013-01-02T13:00:00Z',
change='updated' ) )
cl.add( Resource( uri='http://example.com/res5.tiff',
lastmod='2013-01-02T19:00:00Z',
change='deleted' ) )
cl.add( Resource( uri='http://example.com/res7.html',
lastmod='2013-01-02T20:00:00Z',
change='updated' ) )
ex_xml = self._open_ex('resourcesync_ex_21').read()
self._assert_xml_equal( cl.as_xml(), ex_xml )
def test_build_ex_22(self):
"""Change Dump with three dump files"""
cd = ChangeDump()
cd.up = 'http://example.com/dataset1/capabilitylist.xml'
cd.md_from="2013-01-01T00:00:00Z"
z1 = Resource( uri='http://example.com/20130101-changedump.zip',
lastmod='2013-01-01T23:59:59Z',
length=3109,
md_from="2013-01-01T00:00:00Z",
md_until="2013-01-02T00:00:00Z",
mime_type="application/zip" )
z1.contents='http://example.com/20130101-changedump-manifest.xml'
z2 = Resource( uri='http://example.com/20130102-changedump.zip',
lastmod='2013-01-02T23:59:59Z',
length=6629,
md_from="2013-01-02T00:00:00Z",
md_until="2013-01-03T00:00:00Z",
mime_type="application/zip" )
z2.contents='http://example.com/20130102-changedump-manifest.xml'
z3 = Resource( uri='http://example.com/20130103-changedump.zip',
lastmod='2013-01-03T23:59:59Z',
length=8124,
md_from="2013-01-03T00:00:00Z",
md_until="2013-01-04T00:00:00Z",
mime_type="application/zip" )
z3.contents='http://example.com/20130103-changedump-manifest.xml'
cd.add( [z1, z2, z3] )
ex_xml = self._open_ex('resourcesync_ex_22').read()
self._assert_xml_equal( cd.as_xml(), ex_xml )
def test_build_ex_23(self):
cdm = ChangeDumpManifest()
cdm.up = "http://example.com/dataset1/capabilitylist.xml"
cdm.md_from = "2013-01-02T00:00:00Z"
cdm.md_until = "2013-01-03T00:00:00Z"
cdm.add( Resource(uri="http://example.com/res7.html",
lastmod="2013-01-02T12:00:00Z",
change="created",
md5="1c1b0e264fa9b7e1e9aa6f9db8d6362b",
length=4339,
mime_type="text/html",
path="/changes/res7.html") )
cdm.add( Resource(uri="http://example.com/res9.pdf",
lastmod="2013-01-02T13:00:00Z",
change="updated",
md5="f906610c3d4aa745cb2b986f25b37c5a",
length=38297,
mime_type="application/pdf",
path="/changes/res9.pdf") )
cdm.add( Resource(uri="http://example.com/res5.tiff",
lastmod="2013-01-02T19:00:00Z",
change="deleted") )
cdm.add( Resource(uri="http://example.com/res7.html",
lastmod="2013-01-02T20:00:00Z",
change="updated",
md5="0988647082c8bc51778894a48ec3b576",
length="5426", #should also take string
mime_type="text/html",
path="/changes/res7-v2.html") )
self._assert_xml_equal_ex( cdm.as_xml(), 'resourcesync_ex_23' )
def test_build_ex_24(self):
cl = ChangeList()
cl.up = "http://example.com/dataset1/capabilitylist.xml"
cl.md_from = "2013-01-03T00:00:00Z"
c1 = Resource(uri="http://example.com/res1",
lastmod="2013-01-03T18:00:00Z",
change="updated",
md5="1584abdf8ebdc9802ac0c6a7402c03b6",
length=8876,
mime_type="text/html")
# Resource.link_set with add or change link depending on one with
# the particular rel exists unless allow_duplicates=True.
# Resource.link_add will always add. Test both here...
c1.link_set(rel="duplicate",
href="http://mirror1.example.com/res1",
pri="1",
modified="2013-01-03T18:00:00Z")
c1.link_set(rel="duplicate",
href="http://mirror2.example.com/res1",
pri="2",
modified="2013-01-03T18:00:00Z",
allow_duplicates=True)
c1.link_add(rel="duplicate",
href="gsiftp://gridftp.example.com/res1",
pri="3",
modified="2013-01-03T18:00:00Z")
cl.add( c1 )
self._assert_xml_equal_ex( cl.as_xml(), 'resourcesync_ex_24' )
def test_build_ex_25(self):
cl = ChangeList()
cl.up = "http://example.com/dataset1/capabilitylist.xml"
cl.md_from = "2013-01-03T11:00:00Z"
c1 = Resource(uri="http://example.com/res1",
lastmod="2013-01-03T18:00:00Z",
change="updated")
c1.link_add(rel="alternate",
href="http://example.com/res1.html",
modified="2013-01-03T18:00:00Z",
type="text/html") #FIXME - inconsistent
c1.link_add(rel="alternate",
href="http://example.com/res1.pdf",
modified="2013-01-03T18:00:00Z",
type="application/pdf")
cl.add( c1 )
self._assert_xml_equal_ex( cl.as_xml(), 'resourcesync_ex_25' )
def test_build_ex_26(self):
cl = ChangeList()
cl.up = "http://example.com/dataset1/capabilitylist.xml"
cl.md_from = "2013-01-03T00:00:00Z"
c1 = Resource(uri="http://example.com/res1.html",
lastmod="2013-01-03T18:00:00Z",
change="updated",
md5="1584abdf8ebdc9802ac0c6a7402c03b6",
length=8876 )
c1.link_add(rel="canonical",
href="http://example.com/res1",
modified="2013-01-03T18:00:00Z")
cl.add( c1 )
self._assert_xml_equal_ex( cl.as_xml(), 'resourcesync_ex_26' )
def test_build_ex_27(self):
cl = ChangeList()
cl.up = "http://example.com/dataset1/capabilitylist.xml"
cl.md_from = "2013-01-03T00:00:00Z"
c1 = Resource(uri="http://example.com/res4",
lastmod="2013-01-03T17:00:00Z",
change="updated",
sha256="f4OxZX_x_DFGFDgghgdfb6rtSx-iosjf6735432nklj",
length=56778,
mime_type="application/json" )
c1.link_set(rel="http://www.openarchives.org/rs/terms/patch",
href="http://example.com/res4-json-patch",
modified="2013-01-03T17:00:00Z",
hash="sha-256:y66dER_t_HWEIKpesdkeb7rtSc-ippjf9823742opld", #FIXME - inconsistent
length=73,
type="application/json-patch")
c2 = Resource(uri="http://example.com/res5-full.tiff",
lastmod="2013-01-03T18:00:00Z",
change="updated",
sha256="f4OxZX_x_FO5LcGBSKHWXfwtSx-j1ncoSt3SABJtkGk",
length="9788456778",
mime_type="image/tiff")
c2.link_set(rel="http://www.openarchives.org/rs/terms/patch",
href="http://example.com/res5-diff",
modified="2013-01-03T18:00:00Z",
hash="sha-256:h986gT_t_87HTkjHYE76G558hY-jdfgy76t55sadJUYT",
length=4533,
type="application/x-tiff-diff" )
cl.add( [c1,c2] )
self._assert_xml_equal_ex( cl.as_xml(), 'resourcesync_ex_27' )
def test_build_ex_28(self):
cl = ChangeList()
cl.up = "http://example.com/dataset1/capabilitylist.xml"
cl.md_from = "2013-01-03T00:00:00Z"
c1 = Resource(uri="http://example.com/res2.pdf",
lastmod="2013-01-03T18:00:00Z",
change="updated",
md5="1584abdf8ebdc9802ac0c6a7402c03b6",
length=8876,
mime_type="application/pdf" )
c1.link_set(rel="describedby",
href="http://example.com/res2_dublin-core_metadata.xml",
modified="2013-01-01T12:00:00Z",
type="application/xml")
c2 = Resource(uri="http://example.com/res2_dublin-core_metadata.xml",
lastmod="2013-01-03T19:00:00Z",
change="updated",
mime_type="application/xml")
c2.link_set(rel="describes",
href="http://example.com/res2.pdf",
modified="2013-01-03T18:00:00Z",
hash="md5:1584abdf8ebdc9802ac0c6a7402c03b6",
length="8876",
type="application/pdf")
c2.link_set(rel="profile",
href="http://purl.org/dc/elements/1.1/")
cl.add( [c1,c2] )
self._assert_xml_equal_ex( cl.as_xml(), 'resourcesync_ex_28' )
def test_build_ex_29(self):
cl = ChangeList()
cl.up = "http://example.com/dataset1/capabilitylist.xml"
cl.md_from = "2013-01-03T00:00:00Z"
c1 = Resource(uri="http://example.com/res1",
lastmod="2013-01-03T18:00:00Z",
change="updated",
md5="1584abdf8ebdc9802ac0c6a7402c03b6",
length=8876,
mime_type="text/html" )
c1.link_add(rel="memento",
href="http://example.com/20130103070000/res1",
modified="2013-01-02T18:00:00Z",
hash="md5:1584abdf8ebdc9802ac0c6a7402c03b6",
length="8876",
type="text/html")
c1.link_add(rel="timegate",
href="http://example.com/timegate/http://example.com/res1")
c1.link_add(rel="timemap",
href="http://example.com/timemap/http://example.com/res1",
type="application/link-format")
cl.add( c1 )
self._assert_xml_equal_ex( cl.as_xml(), 'resourcesync_ex_29' )
def test_build_ex_30(self):
cl = ChangeList()
cl.up = "http://example.com/dataset1/capabilitylist.xml"
cl.md_from = "2013-01-03T00:00:00Z"
c1 = Resource(uri="http://example.com/res1",
lastmod="2013-01-03T07:00:00Z",
change="updated",
md5="1584abdf8ebdc9802ac0c6a7402c03b6",
length=8876,
mime_type="text/html" )
c1.link_add(rel="collection",
href="http://example.com/aggregation/0601007")
cl.add( c1 )
self._assert_xml_equal_ex( cl.as_xml(), 'resourcesync_ex_30' )
def test_build_ex_31(self):
cl = ChangeList()
cl.up = "http://example.com/dataset1/capabilitylist.xml"
cl.md_from = "2013-01-03T00:00:00Z"
c1 = Resource(uri="http://original.example.com/res1.html",
lastmod="2013-01-03T07:00:00Z",
change="updated",
md5="1584abdf8ebdc9802ac0c6a7402c03b6",
length=8876,
mime_type="text/html" )
cl.add( c1 )
self._assert_xml_equal_ex( cl.as_xml(), 'resourcesync_ex_31' )
def test_build_ex_32(self):
cl = ChangeList()
cl.up = "http://aggregator1.example.com/dataset1/capabilitylist.xml"
cl.md_from = "2013-01-03T11:00:00Z"
c1 = Resource(uri="http://aggregator1.example.com/res1.html",
lastmod="2013-01-03T20:00:00Z",
change="updated",
md5="1584abdf8ebdc9802ac0c6a7402c03b6",
length=8876,
mime_type="text/html" )
c1.link_add(rel="via",
href="http://original.example.com/res1.html",
modified="2013-01-03T07:00:00Z",
hash="md5:1584abdf8ebdc9802ac0c6a7402c03b6",
length="8876",
type="text/html")
cl.add( c1 )
self._assert_xml_equal_ex( cl.as_xml(), 'resourcesync_ex_32' )
def test_build_ex_33(self):
cl = ChangeList()
cl.up = "http://aggregator2.example.com/dataset1/capabilitylist.xml"
cl.md_from = "2013-01-03T12:00:00Z"
c1 = Resource(uri="http://aggregator2.example.com/res1.html",
lastmod="2013-01-04T09:00:00Z",
change="updated",
md5="1584abdf8ebdc9802ac0c6a7402c03b6",
length=8876,
mime_type="text/html" )
c1.link_add(rel="via",
href="http://original.example.com/res1.html",
modified="2013-01-03T07:00:00Z",
hash="md5:1584abdf8ebdc9802ac0c6a7402c03b6",
length="8876",
type="text/html")
cl.add( c1 )
self._assert_xml_equal_ex( cl.as_xml(), 'resourcesync_ex_33' )
##### Archives tests
def test_build_archives_ex_3_1(self):
"""Resource List Archive listing 3 Resource Lists"""
rla = ResourceListArchive()
rla.up = 'http://example.com/dataset1/capabilitylist.xml'
rla.add( Resource( uri='http://example.com/resourcelist1.xml',
md_at='2012-11-03T09:00:00Z') )
rla.add( Resource( uri='http://example.com/resourcelist2.xml',
md_at='2012-12-03T09:00:00Z') )
rla.add( Resource( uri='http://example.com/resourcelist3.xml',
md_at='2013-01-03T09:00:00Z') )
ex_xml = self._open_ex('archives_ex_3_1').read()
self._assert_xml_equal( rla.as_xml(), ex_xml )
def test_build_archives_ex_3_2(self):
"""Resource List Archive Index listing 2 component Resource List Archives"""
rlai = ResourceListArchive()
rlai.sitemapindex = True
rlai.up = 'http://example.com/dataset1/capabilitylist.xml'
rlai.add( Resource( uri='http://example.com/resourcelistarchive00001.xml' ))
rlai.add( Resource( uri='http://example.com/resourcelistarchive00002.xml' ))
ex_xml = self._open_ex('archives_ex_3_2').read()
self._assert_xml_equal( rlai.as_xml(), ex_xml )
def test_build_archives_ex_4_1(self):
"""Resource Dump Archive listing 2 Resource Dumps"""
rda = ResourceDumpArchive()
rda.up = 'http://example.com/dataset1/capabilitylist.xml'
rda.add( Resource( uri='http://example.com/resourcedump1.xml',
lastmod='2012-11-03T09:05:42Z',
md_at="2012-11-03T09:00:00Z",
md_completed="2012-11-03T09:05:01Z" ) )
rda.add( Resource( uri='http://example.com/resourcedump2.xml',
lastmod='2012-12-03T09:06:12Z',
md_at="2012-12-03T09:00:00Z",
md_completed="2012-12-03T09:05:17Z" ) )
ex_xml = self._open_ex('archives_ex_4_1').read()
self._assert_xml_equal( rda.as_xml(), ex_xml )
def test_build_archives_ex_5_1(self):
"""Change List Archive listing 3 Change Lists"""
cla = ChangeListArchive()
cla.up = 'http://example.com/dataset1/capabilitylist.xml'
cla.add( Resource( uri='http://example.com/changelist1.xml',
md_from='2013-01-01T09:00:00Z',
md_until='2013-01-02T09:00:00Z') )
cla.add( Resource( uri='http://example.com/changelist2.xml',
md_from='2013-01-02T09:00:00Z',
md_until='2013-01-03T09:00:00Z') )
cla.add( Resource( uri='http://example.com/changelist3.xml',
md_from='2013-01-03T09:00:00Z',
md_until='2013-01-04T09:00:00Z') )
ex_xml = self._open_ex('archives_ex_5_1').read()
self._assert_xml_equal( cla.as_xml(), ex_xml )
def test_build_archives_ex_6_1(self):
"""Change Dump Archive listing 2 Change Dumps"""
cda = ChangeDumpArchive()
cda.up = 'http://example.com/dataset1/capabilitylist.xml'
cda.add( Resource( uri='http://example.com/changedump-w1.xml',
lastmod='2012-12-20T09:02:43Z',
md_from="2012-01-13T09:00:00Z",
md_until="2013-01-20T09:00:00Z" ) )
cda.add( Resource( uri='http://example.com/changedump-w2.xml',
lastmod='2012-12-27T09:01:57Z',
md_from="2012-01-20T09:00:00Z",
md_until="2013-01-27T09:00:00Z" ) )
ex_xml = self._open_ex('archives_ex_6_1').read()
self._assert_xml_equal( cda.as_xml(), ex_xml )
##### UTILITIES FOR (APPROX) COMPARISON OF XML IN EXAMPLES AND OUTPUT
def _assert_xml_equal_ex(self,xml,ex):
"""Compare XML supplied with XML from example file ex"""
ex_xml = self._open_ex(ex).read()
self._assert_xml_equal( xml, ex_xml )
def _assert_xml_equal(self,a,b):
context = "Element mismatch in\n%s\nvs\n%s\n" % (a,b);
aa = self._xml_massage_split(a)
bb = self._xml_massage_split(b)
ia=iter(aa)
ib=iter(bb)
try:
while (1):
self._assert_xml_elements_equal( self._xml_reorder_attributes(ia.next()),
self._xml_reorder_attributes(ib.next()),
context )
except StopIteration:
# all is good provided there were the same number of elements
pass
self.assertEqual( len(aa), len(bb), "Same length check\n%s" % (context) )
def _assert_xml_elements_equal(self,a,b,context):
context = "Elements %s != %s\n%s" % (a,b,context)
self.assertEqual( a, b, context )
def _xml_reorder_attributes(self,xml):
"""Manipulate string for single element with atts in alpha order
This is a bit of a fudge because of pattern matching. Should give
correct match for all matches, but might give matches in rare cases
that should not.
"""
return(' '.join( sorted( xml.split(' ') ) ) )
def _xml_massage_split(self,xml):
"""Massage XML for comparison and split by elements (on >)"""
xml = re.sub( r'\s+$', '', xml)
xml = re.sub( r'^\s+', '', xml)
xml = re.sub( r'\s+', ' ', xml)
xml = re.sub( r'\s*/>', ' />', xml) #always one space before end of self-closing element
xml = re.sub( r'>\s+<', '><', xml) #remove space between elements
# FUDGES, need to check these are OK
xml = re.sub( r"version='1.0'", 'version="1.0"', xml )
xml = re.sub( r"encoding='UTF-8'", 'encoding="UTF-8"', xml )
# return self.assertEqual( x, 'xx' )
return( xml.split('>') )
def _open_ex(self,ex):
return open('resync/test/testdata/examples_from_spec/%s.xml'%(ex),'r')
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestExamplesFromSpec)
unittest.TextTestRunner(verbosity=2).run(suite)
| |
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from future import standard_library
from airflow.utils.log.logging_mixin import LoggingMixin
standard_library.install_aliases()
from builtins import str
from past.builtins import basestring
from datetime import datetime
from urllib.parse import urlparse
from time import sleep
import re
import sys
from airflow import settings
from airflow.exceptions import AirflowException, AirflowSensorTimeout, AirflowSkipException
from airflow.models import BaseOperator, TaskInstance
from airflow.hooks.base_hook import BaseHook
from airflow.hooks.hdfs_hook import HDFSHook
from airflow.hooks.http_hook import HttpHook
from airflow.utils.state import State
from airflow.utils.decorators import apply_defaults
class BaseSensorOperator(BaseOperator):
'''
Sensor operators are derived from this class an inherit these attributes.
Sensor operators keep executing at a time interval and succeed when
a criteria is met and fail if and when they time out.
:param soft_fail: Set to true to mark the task as SKIPPED on failure
:type soft_fail: bool
:param poke_interval: Time in seconds that the job should wait in
between each tries
:type poke_interval: int
:param timeout: Time, in seconds before the task times out and fails.
:type timeout: int
'''
ui_color = '#e6f1f2'
@apply_defaults
def __init__(
self,
poke_interval=60,
timeout=60*60*24*7,
soft_fail=False,
*args, **kwargs):
super(BaseSensorOperator, self).__init__(*args, **kwargs)
self.poke_interval = poke_interval
self.soft_fail = soft_fail
self.timeout = timeout
def poke(self, context):
'''
Function that the sensors defined while deriving this class should
override.
'''
raise AirflowException('Override me.')
def execute(self, context):
started_at = datetime.utcnow()
while not self.poke(context):
if (datetime.utcnow() - started_at).total_seconds() > self.timeout:
if self.soft_fail:
raise AirflowSkipException('Snap. Time is OUT.')
else:
raise AirflowSensorTimeout('Snap. Time is OUT.')
sleep(self.poke_interval)
self.log.info("Success criteria met. Exiting.")
class SqlSensor(BaseSensorOperator):
"""
Runs a sql statement until a criteria is met. It will keep trying while
sql returns no row, or if the first cell in (0, '0', '').
:param conn_id: The connection to run the sensor against
:type conn_id: string
:param sql: The sql to run. To pass, it needs to return at least one cell
that contains a non-zero / empty string value.
"""
template_fields = ('sql',)
template_ext = ('.hql', '.sql',)
ui_color = '#7c7287'
@apply_defaults
def __init__(self, conn_id, sql, *args, **kwargs):
self.sql = sql
self.conn_id = conn_id
super(SqlSensor, self).__init__(*args, **kwargs)
def poke(self, context):
hook = BaseHook.get_connection(self.conn_id).get_hook()
self.log.info('Poking: %s', self.sql)
records = hook.get_records(self.sql)
if not records:
return False
else:
if str(records[0][0]) in ('0', '',):
return False
else:
return True
class MetastorePartitionSensor(SqlSensor):
"""
An alternative to the HivePartitionSensor that talk directly to the
MySQL db. This was created as a result of observing sub optimal
queries generated by the Metastore thrift service when hitting
subpartitioned tables. The Thrift service's queries were written in a
way that wouldn't leverage the indexes.
:param schema: the schema
:type schema: str
:param table: the table
:type table: str
:param partition_name: the partition name, as defined in the PARTITIONS
table of the Metastore. Order of the fields does matter.
Examples: ``ds=2016-01-01`` or
``ds=2016-01-01/sub=foo`` for a sub partitioned table
:type partition_name: str
:param mysql_conn_id: a reference to the MySQL conn_id for the metastore
:type mysql_conn_id: str
"""
template_fields = ('partition_name', 'table', 'schema')
ui_color = '#8da7be'
@apply_defaults
def __init__(
self, table, partition_name, schema="default",
mysql_conn_id="metastore_mysql",
*args, **kwargs):
self.partition_name = partition_name
self.table = table
self.schema = schema
self.first_poke = True
self.conn_id = mysql_conn_id
# TODO(aoen): We shouldn't be using SqlSensor here but MetastorePartitionSensor.
# The problem is the way apply_defaults works isn't compatible with inheritance.
# The inheritance model needs to be reworked in order to support overriding args/
# kwargs with arguments here, then 'conn_id' and 'sql' can be passed into the
# constructor below and apply_defaults will no longer throw an exception.
super(SqlSensor, self).__init__(*args, **kwargs)
def poke(self, context):
if self.first_poke:
self.first_poke = False
if '.' in self.table:
self.schema, self.table = self.table.split('.')
self.sql = """
SELECT 'X'
FROM PARTITIONS A0
LEFT OUTER JOIN TBLS B0 ON A0.TBL_ID = B0.TBL_ID
LEFT OUTER JOIN DBS C0 ON B0.DB_ID = C0.DB_ID
WHERE
B0.TBL_NAME = '{self.table}' AND
C0.NAME = '{self.schema}' AND
A0.PART_NAME = '{self.partition_name}';
""".format(self=self)
return super(MetastorePartitionSensor, self).poke(context)
class ExternalTaskSensor(BaseSensorOperator):
"""
Waits for a task to complete in a different DAG
:param external_dag_id: The dag_id that contains the task you want to
wait for
:type external_dag_id: string
:param external_task_id: The task_id that contains the task you want to
wait for
:type external_task_id: string
:param allowed_states: list of allowed states, default is ``['success']``
:type allowed_states: list
:param execution_delta: time difference with the previous execution to
look at, the default is the same execution_date as the current task.
For yesterday, use [positive!] datetime.timedelta(days=1). Either
execution_delta or execution_date_fn can be passed to
ExternalTaskSensor, but not both.
:type execution_delta: datetime.timedelta
:param execution_date_fn: function that receives the current execution date
and returns the desired execution dates to query. Either execution_delta
or execution_date_fn can be passed to ExternalTaskSensor, but not both.
:type execution_date_fn: callable
"""
ui_color = '#19647e'
@apply_defaults
def __init__(
self,
external_dag_id,
external_task_id,
allowed_states=None,
execution_delta=None,
execution_date_fn=None,
*args, **kwargs):
super(ExternalTaskSensor, self).__init__(*args, **kwargs)
self.allowed_states = allowed_states or [State.SUCCESS]
if execution_delta is not None and execution_date_fn is not None:
raise ValueError(
'Only one of `execution_date` or `execution_date_fn` may'
'be provided to ExternalTaskSensor; not both.')
self.execution_delta = execution_delta
self.execution_date_fn = execution_date_fn
self.external_dag_id = external_dag_id
self.external_task_id = external_task_id
def poke(self, context):
if self.execution_delta:
dttm = context['execution_date'] - self.execution_delta
elif self.execution_date_fn:
dttm = self.execution_date_fn(context['execution_date'])
else:
dttm = context['execution_date']
dttm_filter = dttm if isinstance(dttm, list) else [dttm]
serialized_dttm_filter = ','.join(
[datetime.isoformat() for datetime in dttm_filter])
self.log.info(
'Poking for '
'{self.external_dag_id}.'
'{self.external_task_id} on '
'{} ... '.format(serialized_dttm_filter, **locals()))
TI = TaskInstance
session = settings.Session()
count = session.query(TI).filter(
TI.dag_id == self.external_dag_id,
TI.task_id == self.external_task_id,
TI.state.in_(self.allowed_states),
TI.execution_date.in_(dttm_filter),
).count()
session.commit()
session.close()
return count == len(dttm_filter)
class NamedHivePartitionSensor(BaseSensorOperator):
"""
Waits for a set of partitions to show up in Hive.
:param partition_names: List of fully qualified names of the
partitions to wait for. A fully qualified name is of the
form ``schema.table/pk1=pv1/pk2=pv2``, for example,
default.users/ds=2016-01-01. This is passed as is to the metastore
Thrift client ``get_partitions_by_name`` method. Note that
you cannot use logical or comparison operators as in
HivePartitionSensor.
:type partition_names: list of strings
:param metastore_conn_id: reference to the metastore thrift service
connection id
:type metastore_conn_id: str
"""
template_fields = ('partition_names', )
ui_color = '#8d99ae'
@apply_defaults
def __init__(
self,
partition_names,
metastore_conn_id='metastore_default',
poke_interval=60 * 3,
*args,
**kwargs):
super(NamedHivePartitionSensor, self).__init__(
poke_interval=poke_interval, *args, **kwargs)
if isinstance(partition_names, basestring):
raise TypeError('partition_names must be an array of strings')
self.metastore_conn_id = metastore_conn_id
self.partition_names = partition_names
self.next_poke_idx = 0
@classmethod
def parse_partition_name(self, partition):
try:
schema, table_partition = partition.split('.', 1)
table, partition = table_partition.split('/', 1)
return schema, table, partition
except ValueError as e:
raise ValueError('Could not parse ' + partition)
def poke(self, context):
if not hasattr(self, 'hook'):
from airflow.hooks.hive_hooks import HiveMetastoreHook
self.hook = HiveMetastoreHook(
metastore_conn_id=self.metastore_conn_id)
def poke_partition(partition):
schema, table, partition = self.parse_partition_name(partition)
self.log.info(
'Poking for {schema}.{table}/{partition}'.format(**locals())
)
return self.hook.check_for_named_partition(
schema, table, partition)
while self.next_poke_idx < len(self.partition_names):
if poke_partition(self.partition_names[self.next_poke_idx]):
self.next_poke_idx += 1
else:
return False
return True
class HivePartitionSensor(BaseSensorOperator):
"""
Waits for a partition to show up in Hive.
Note: Because ``partition`` supports general logical operators, it
can be inefficient. Consider using NamedHivePartitionSensor instead if
you don't need the full flexibility of HivePartitionSensor.
:param table: The name of the table to wait for, supports the dot
notation (my_database.my_table)
:type table: string
:param partition: The partition clause to wait for. This is passed as
is to the metastore Thrift client ``get_partitions_by_filter`` method,
and apparently supports SQL like notation as in ``ds='2015-01-01'
AND type='value'`` and comparison operators as in ``"ds>=2015-01-01"``
:type partition: string
:param metastore_conn_id: reference to the metastore thrift service
connection id
:type metastore_conn_id: str
"""
template_fields = ('schema', 'table', 'partition',)
ui_color = '#C5CAE9'
@apply_defaults
def __init__(
self,
table, partition="ds='{{ ds }}'",
metastore_conn_id='metastore_default',
schema='default',
poke_interval=60*3,
*args, **kwargs):
super(HivePartitionSensor, self).__init__(
poke_interval=poke_interval, *args, **kwargs)
if not partition:
partition = "ds='{{ ds }}'"
self.metastore_conn_id = metastore_conn_id
self.table = table
self.partition = partition
self.schema = schema
def poke(self, context):
if '.' in self.table:
self.schema, self.table = self.table.split('.')
self.log.info(
'Poking for table {self.schema}.{self.table}, '
'partition {self.partition}'.format(**locals()))
if not hasattr(self, 'hook'):
from airflow.hooks.hive_hooks import HiveMetastoreHook
self.hook = HiveMetastoreHook(
metastore_conn_id=self.metastore_conn_id)
return self.hook.check_for_partition(
self.schema, self.table, self.partition)
class HdfsSensor(BaseSensorOperator):
"""
Waits for a file or folder to land in HDFS
"""
template_fields = ('filepath',)
ui_color = settings.WEB_COLORS['LIGHTBLUE']
@apply_defaults
def __init__(
self,
filepath,
hdfs_conn_id='hdfs_default',
ignored_ext=['_COPYING_'],
ignore_copying=True,
file_size=None,
hook=HDFSHook,
*args, **kwargs):
super(HdfsSensor, self).__init__(*args, **kwargs)
self.filepath = filepath
self.hdfs_conn_id = hdfs_conn_id
self.file_size = file_size
self.ignored_ext = ignored_ext
self.ignore_copying = ignore_copying
self.hook = hook
@staticmethod
def filter_for_filesize(result, size=None):
"""
Will test the filepath result and test if its size is at least self.filesize
:param result: a list of dicts returned by Snakebite ls
:param size: the file size in MB a file should be at least to trigger True
:return: (bool) depending on the matching criteria
"""
if size:
log = LoggingMixin().log
log.debug('Filtering for file size >= %s in files: %s', size, map(lambda x: x['path'], result))
size *= settings.MEGABYTE
result = [x for x in result if x['length'] >= size]
log.debug('HdfsSensor.poke: after size filter result is %s', result)
return result
@staticmethod
def filter_for_ignored_ext(result, ignored_ext, ignore_copying):
"""
Will filter if instructed to do so the result to remove matching criteria
:param result: (list) of dicts returned by Snakebite ls
:param ignored_ext: (list) of ignored extensions
:param ignore_copying: (bool) shall we ignore ?
:return: (list) of dicts which were not removed
"""
if ignore_copying:
log = LoggingMixin().log
regex_builder = "^.*\.(%s$)$" % '$|'.join(ignored_ext)
ignored_extentions_regex = re.compile(regex_builder)
log.debug(
'Filtering result for ignored extensions: %s in files %s',
ignored_extentions_regex.pattern, map(lambda x: x['path'], result)
)
result = [x for x in result if not ignored_extentions_regex.match(x['path'])]
log.debug('HdfsSensor.poke: after ext filter result is %s', result)
return result
def poke(self, context):
sb = self.hook(self.hdfs_conn_id).get_conn()
self.log.info('Poking for file {self.filepath}'.format(**locals()))
try:
# IMOO it's not right here, as there no raise of any kind.
# if the filepath is let's say '/data/mydirectory', it's correct but if it is '/data/mydirectory/*',
# it's not correct as the directory exists and sb does not raise any error
# here is a quick fix
result = [f for f in sb.ls([self.filepath], include_toplevel=False)]
self.log.debug('HdfsSensor.poke: result is %s', result)
result = self.filter_for_ignored_ext(result, self.ignored_ext, self.ignore_copying)
result = self.filter_for_filesize(result, self.file_size)
return bool(result)
except:
e = sys.exc_info()
self.log.debug("Caught an exception !: %s", str(e))
return False
class WebHdfsSensor(BaseSensorOperator):
"""
Waits for a file or folder to land in HDFS
"""
template_fields = ('filepath',)
@apply_defaults
def __init__(
self,
filepath,
webhdfs_conn_id='webhdfs_default',
*args, **kwargs):
super(WebHdfsSensor, self).__init__(*args, **kwargs)
self.filepath = filepath
self.webhdfs_conn_id = webhdfs_conn_id
def poke(self, context):
from airflow.hooks.webhdfs_hook import WebHDFSHook
c = WebHDFSHook(self.webhdfs_conn_id)
self.log.info('Poking for file {self.filepath}'.format(**locals()))
return c.check_for_path(hdfs_path=self.filepath)
class S3KeySensor(BaseSensorOperator):
"""
Waits for a key (a file-like instance on S3) to be present in a S3 bucket.
S3 being a key/value it does not support folders. The path is just a key
a resource.
:param bucket_key: The key being waited on. Supports full s3:// style url
or relative path from root level.
:type bucket_key: str
:param bucket_name: Name of the S3 bucket
:type bucket_name: str
:param wildcard_match: whether the bucket_key should be interpreted as a
Unix wildcard pattern
:type wildcard_match: bool
:param aws_conn_id: a reference to the s3 connection
:type aws_conn_id: str
"""
template_fields = ('bucket_key', 'bucket_name')
@apply_defaults
def __init__(
self, bucket_key,
bucket_name=None,
wildcard_match=False,
aws_conn_id='aws_default',
*args, **kwargs):
super(S3KeySensor, self).__init__(*args, **kwargs)
# Parse
if bucket_name is None:
parsed_url = urlparse(bucket_key)
if parsed_url.netloc == '':
raise AirflowException('Please provide a bucket_name')
else:
bucket_name = parsed_url.netloc
if parsed_url.path[0] == '/':
bucket_key = parsed_url.path[1:]
else:
bucket_key = parsed_url.path
self.bucket_name = bucket_name
self.bucket_key = bucket_key
self.wildcard_match = wildcard_match
self.aws_conn_id = aws_conn_id
def poke(self, context):
from airflow.hooks.S3_hook import S3Hook
hook = S3Hook(aws_conn_id=self.aws_conn_id)
full_url = "s3://" + self.bucket_name + "/" + self.bucket_key
self.log.info('Poking for key : {full_url}'.format(**locals()))
if self.wildcard_match:
return hook.check_for_wildcard_key(self.bucket_key,
self.bucket_name)
else:
return hook.check_for_key(self.bucket_key, self.bucket_name)
class S3PrefixSensor(BaseSensorOperator):
"""
Waits for a prefix to exist. A prefix is the first part of a key,
thus enabling checking of constructs similar to glob airfl* or
SQL LIKE 'airfl%'. There is the possibility to precise a delimiter to
indicate the hierarchy or keys, meaning that the match will stop at that
delimiter. Current code accepts sane delimiters, i.e. characters that
are NOT special characters in the Python regex engine.
:param bucket_name: Name of the S3 bucket
:type bucket_name: str
:param prefix: The prefix being waited on. Relative path from bucket root level.
:type prefix: str
:param delimiter: The delimiter intended to show hierarchy.
Defaults to '/'.
:type delimiter: str
"""
template_fields = ('prefix', 'bucket_name')
@apply_defaults
def __init__(
self, bucket_name,
prefix, delimiter='/',
aws_conn_id='aws_default',
*args, **kwargs):
super(S3PrefixSensor, self).__init__(*args, **kwargs)
# Parse
self.bucket_name = bucket_name
self.prefix = prefix
self.delimiter = delimiter
self.full_url = "s3://" + bucket_name + '/' + prefix
self.aws_conn_id = aws_conn_id
def poke(self, context):
self.log.info('Poking for prefix : {self.prefix}\n'
'in bucket s3://{self.bucket_name}'.format(**locals()))
from airflow.hooks.S3_hook import S3Hook
hook = S3Hook(aws_conn_id=self.aws_conn_id)
return hook.check_for_prefix(
prefix=self.prefix,
delimiter=self.delimiter,
bucket_name=self.bucket_name)
class TimeSensor(BaseSensorOperator):
"""
Waits until the specified time of the day.
:param target_time: time after which the job succeeds
:type target_time: datetime.time
"""
template_fields = tuple()
@apply_defaults
def __init__(self, target_time, *args, **kwargs):
super(TimeSensor, self).__init__(*args, **kwargs)
self.target_time = target_time
def poke(self, context):
self.log.info('Checking if the time (%s) has come', self.target_time)
return datetime.utcnow().time() > self.target_time
class TimeDeltaSensor(BaseSensorOperator):
"""
Waits for a timedelta after the task's execution_date + schedule_interval.
In Airflow, the daily task stamped with ``execution_date``
2016-01-01 can only start running on 2016-01-02. The timedelta here
represents the time after the execution period has closed.
:param delta: time length to wait after execution_date before succeeding
:type delta: datetime.timedelta
"""
template_fields = tuple()
@apply_defaults
def __init__(self, delta, *args, **kwargs):
super(TimeDeltaSensor, self).__init__(*args, **kwargs)
self.delta = delta
def poke(self, context):
dag = context['dag']
target_dttm = dag.following_schedule(context['execution_date'])
target_dttm += self.delta
self.log.info('Checking if the time (%s) has come', target_dttm)
return datetime.utcnow() > target_dttm
class HttpSensor(BaseSensorOperator):
"""
Executes a HTTP get statement and returns False on failure:
404 not found or response_check function returned False
:param http_conn_id: The connection to run the sensor against
:type http_conn_id: string
:param method: The HTTP request method to use
:type method: string
:param endpoint: The relative part of the full url
:type endpoint: string
:param request_params: The parameters to be added to the GET url
:type request_params: a dictionary of string key/value pairs
:param headers: The HTTP headers to be added to the GET request
:type headers: a dictionary of string key/value pairs
:param response_check: A check against the 'requests' response object.
Returns True for 'pass' and False otherwise.
:type response_check: A lambda or defined function.
:param extra_options: Extra options for the 'requests' library, see the
'requests' documentation (options to modify timeout, ssl, etc.)
:type extra_options: A dictionary of options, where key is string and value
depends on the option that's being modified.
"""
template_fields = ('endpoint', 'request_params')
@apply_defaults
def __init__(self,
endpoint,
http_conn_id='http_default',
method='GET',
request_params=None,
headers=None,
response_check=None,
extra_options=None, *args, **kwargs):
super(HttpSensor, self).__init__(*args, **kwargs)
self.endpoint = endpoint
self.http_conn_id = http_conn_id
self.request_params = request_params or {}
self.headers = headers or {}
self.extra_options = extra_options or {}
self.response_check = response_check
self.hook = HttpHook(
method=method,
http_conn_id=http_conn_id)
def poke(self, context):
self.log.info('Poking: %s', self.endpoint)
try:
response = self.hook.run(self.endpoint,
data=self.request_params,
headers=self.headers,
extra_options=self.extra_options)
if self.response_check:
# run content check on response
return self.response_check(response)
except AirflowException as ae:
if str(ae).startswith("404"):
return False
raise ae
return True
| |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test DistributionStrategy, ReplicaContext, and supporting APIs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import distribute_lib
from tensorflow.python.distribute import distribution_strategy_context as ds_context
from tensorflow.python.distribute import input_lib
from tensorflow.python.distribute import reduce_util
from tensorflow.python.distribute import values
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.util import nest
class _TestReplicaContext(distribute_lib.ReplicaContext):
def merge_call(self, fn, *args, **kwargs):
return kwargs["test_arg"]
def _get_test_variable(name, synchronization, aggregation):
return {
"name": name,
"synchronization": synchronization,
"aggregation": aggregation
}
def _test_input_fn(input_context):
del input_context
return dataset_ops.DatasetV2.from_tensors(1.).repeat()
class _TestStrategy(distribute_lib.Strategy):
def __init__(self):
super(_TestStrategy, self).__init__(_TestExtended(self))
class _TestExtended(distribute_lib.StrategyExtendedV1):
def __init__(self, distribute):
super(_TestExtended, self).__init__(distribute)
device_map = values.ReplicaDeviceMap(["/device:CPU:0"])
worker_device_pairs = [("", ["/device:CPU:0"])]
self._input_workers = input_lib.InputWorkers(device_map,
worker_device_pairs)
def _call_for_each_replica(self, fn, args, kwargs):
with _TestReplicaContext(
self._container_strategy(),
replica_id_in_sync_group=constant_op.constant(0, dtypes.int32)):
return fn(*args, **kwargs)
def _create_variable(self, next_creator, *args, **kwargs):
return _get_test_variable(kwargs["name"], kwargs["synchronization"],
kwargs["aggregation"])
def _make_input_fn_iterator(
self,
input_fn,
replication_mode=distribute_lib.InputReplicationMode.PER_WORKER):
return input_lib.InputFunctionIterator(input_fn, self._input_workers,
[distribute_lib.InputContext()],
self._container_strategy())
def _experimental_distribute_datasets_from_function(self, dataset_fn):
return dataset_fn(distribute_lib.InputContext())
def _local_results(self, value):
return (value,)
def _reduce_to(self, reduce_op, value, destinations):
del reduce_op, destinations
return value
def _experimental_make_numpy_dataset(self, numpy_input, session):
del session
return dataset_ops.DatasetV2.from_tensor_slices(numpy_input)
def _experimental_run_steps_on_iterator(self, fn, iterator, iterations,
initial_loop_values=None):
# TODO(tomhennigan) This is missing many things (e.g. ctx.run_op).
ctx = input_lib.MultiStepContext()
for _ in range(iterations):
fn(ctx, iterator.get_next())
return ctx
def _update(self, var, fn, args, kwargs, group):
# The implementations of _update() and _update_non_slot() are identical
# except _update() passes `var` as the first argument to `fn()`.
return self._update_non_slot(var, fn, (var,) + tuple(args), kwargs, group)
def _update_non_slot(self, colocate_with, fn, args, kwargs, group):
del colocate_with
result = fn(*args, **kwargs)
if group:
return result
else:
return nest.map_structure(self._unwrap, result)
def _assert_in_default_state(t):
t.assertIs(ds_context._get_default_replica_context(),
ds_context.get_replica_context())
t.assertIs(None, ds_context.get_cross_replica_context())
t.assertFalse(ds_context.in_cross_replica_context())
t.assertIs(ds_context._get_default_strategy(), ds_context.get_strategy())
t.assertFalse(ds_context.has_strategy())
def _run_in_and_out_of_scope(unbound_test_method):
def wrapper(test_case):
dist = _TestStrategy()
# Running in the default (replica) scope should be supported.
_assert_in_default_state(test_case)
unbound_test_method(test_case, dist)
# As well as running in the strategy scope.
with dist.scope():
unbound_test_method(test_case, dist)
_assert_in_default_state(test_case)
# When run under a different strategy the test method should fail.
another_strategy = _TestStrategy()
msg = "Mixing different .*Strategy objects"
with test_case.assertRaisesRegexp(RuntimeError, msg):
with another_strategy.scope():
unbound_test_method(test_case, dist)
return wrapper
class TestStrategyTest(test.TestCase):
def testCallForEachReplica(self):
_assert_in_default_state(self)
dist = _TestStrategy()
def run_fn():
replica_context = ds_context.get_replica_context()
self.assertTrue(replica_context is not None)
self.assertIs(None, ds_context.get_cross_replica_context())
self.assertFalse(ds_context.in_cross_replica_context())
self.assertTrue(ds_context.has_strategy())
self.assertIs(dist, ds_context.get_strategy())
self.assertEqual("foo", replica_context.merge_call(None, test_arg="foo"))
expected_value = _get_test_variable(
"bar", variable_scope.VariableSynchronization.AUTO,
variable_scope.VariableAggregation.NONE)
self.assertDictEqual(expected_value,
variable_scope.variable(1.0, name="bar"))
dist.extended.call_for_each_replica(run_fn)
with dist.scope():
dist.extended.call_for_each_replica(run_fn)
_assert_in_default_state(self)
def testScope(self):
_assert_in_default_state(self)
dist = _TestStrategy()
with dist.scope():
self.assertIs(None, ds_context.get_replica_context())
self.assertIs(dist, ds_context.get_cross_replica_context())
self.assertTrue(ds_context.in_cross_replica_context())
self.assertTrue(ds_context.has_strategy())
self.assertIs(dist, ds_context.get_strategy())
expected_value = _get_test_variable(
"baz", variable_scope.VariableSynchronization.AUTO,
variable_scope.VariableAggregation.NONE)
self.assertDictEqual(expected_value,
variable_scope.variable(1.0, name="baz"))
_assert_in_default_state(self)
def testScopeDeviceNestingError(self):
_assert_in_default_state(self)
dist = _TestStrategy()
# Open a device scope with dist.scope().
dist.extended._default_device = "/device:GPU:0"
scope = dist.scope()
scope.__enter__()
self.assertIs(dist, ds_context.get_strategy())
with ops.device("/device:CPU:0"):
with self.assertRaisesRegexp(RuntimeError, "Device scope nesting error"):
scope.__exit__(None, None, None)
scope.__exit__(None, None, None)
_assert_in_default_state(self)
def testScopeVarCreatorNestingError(self):
def creator(next_creator, **kwargs):
return next_creator(**kwargs)
_assert_in_default_state(self)
dist = _TestStrategy()
scope = dist.scope()
scope.__enter__()
self.assertIs(dist, ds_context.get_strategy())
with variable_scope.variable_creator_scope(creator):
with self.assertRaisesRegexp(RuntimeError,
"Variable creator scope nesting error"):
scope.__exit__(None, None, None)
scope.__exit__(None, None, None)
_assert_in_default_state(self)
def testScopeVarScopeNestingError(self):
# We create a new graph here to simplify clean-up, since the error
# we are triggering happens in the middle of scope.__exit__() and
# leaves us in a weird state.
with ops.Graph().as_default():
_assert_in_default_state(self)
dist = _TestStrategy()
scope = dist.scope()
scope.__enter__()
self.assertIs(dist, ds_context.get_strategy())
with variable_scope.variable_scope("AA"):
with self.assertRaisesRegexp(RuntimeError,
"Variable scope nesting error"):
scope.__exit__(None, None, None)
_assert_in_default_state(self)
def testSettingSynchronizationAndAggregation(self):
_assert_in_default_state(self)
dist = _TestStrategy()
with dist.scope():
expected_value = _get_test_variable(
"baz", variable_scope.VariableSynchronization.ON_WRITE,
variable_scope.VariableAggregation.MEAN)
self.assertDictEqual(
expected_value,
variable_scope.variable(
1.0,
name="baz",
synchronization=variable_scope.VariableSynchronization.ON_WRITE,
aggregation=variable_scope.VariableAggregation.MEAN))
_assert_in_default_state(self)
def testSetStrategy(self):
_assert_in_default_state(self)
dist = _TestStrategy()
dist2 = _TestStrategy()
ds_context.experimental_set_strategy(dist)
self.assertIs(None, ds_context.get_replica_context())
self.assertIs(dist, ds_context.get_cross_replica_context())
self.assertTrue(ds_context.in_cross_replica_context())
self.assertTrue(ds_context.has_strategy())
self.assertIs(dist, ds_context.get_strategy())
expected_value = _get_test_variable(
"baz", variable_scope.VariableSynchronization.AUTO,
variable_scope.VariableAggregation.NONE)
self.assertDictEqual(expected_value,
variable_scope.variable(1.0, name="baz"))
ds_context.experimental_set_strategy(dist2)
self.assertIs(dist2, ds_context.get_strategy())
ds_context.experimental_set_strategy(None)
_assert_in_default_state(self)
def testSetStrategyInScope(self):
_assert_in_default_state(self)
dist = _TestStrategy()
with dist.scope():
with self.assertRaisesRegexp(
RuntimeError,
"Must not be called inside a `tf.distribute.Strategy` scope"):
ds_context.experimental_set_strategy(_TestStrategy())
with self.assertRaisesRegexp(
RuntimeError,
"Must not be called inside a `tf.distribute.Strategy` scope"):
ds_context.experimental_set_strategy(dist)
with self.assertRaisesRegexp(
RuntimeError,
"Must not be called inside a `tf.distribute.Strategy` scope"):
ds_context.experimental_set_strategy(None)
_assert_in_default_state(self)
def testSameScopeNesting(self):
_assert_in_default_state(self)
dist = _TestStrategy()
scope_a = dist.scope()
with scope_a:
self.assertIs(dist, ds_context.get_strategy())
scope_b = dist.scope()
with scope_b:
self.assertIs(dist, ds_context.get_strategy())
with scope_a:
self.assertIs(dist, ds_context.get_strategy())
self.assertIs(dist, ds_context.get_strategy())
self.assertIs(dist, ds_context.get_strategy())
dist2 = _TestStrategy()
scope2 = dist2.scope()
with self.assertRaisesRegexp(
RuntimeError,
"Mixing different tf.distribute.Strategy objects"):
with scope2:
pass
_assert_in_default_state(self)
with scope_b:
self.assertIs(dist, ds_context.get_strategy())
_assert_in_default_state(self)
@_run_in_and_out_of_scope
def testMakeInputFnIterator(self, dist):
self.assertIsNotNone(dist.make_input_fn_iterator(_test_input_fn))
@_run_in_and_out_of_scope
def testReduce(self, dist):
x = constant_op.constant(1.)
x_r = dist.reduce(reduce_util.ReduceOp.MEAN, x, axis=None)
self.assertEqual(self.evaluate(x), self.evaluate(x_r))
def testReductions_acceptStringOps(self):
dist = _TestStrategy()
for op in ("mean", "MEAN", "sum", "SUM"):
x = constant_op.constant(1.)
y = constant_op.constant(1.)
x_r = dist.reduce(op, x, axis=None)
self.assertEqual(self.evaluate(x), self.evaluate(x_r))
x_r = dist.extended.reduce_to(op, x, "/CPU:0")
self.assertEqual(self.evaluate(x), self.evaluate(x_r))
x_r, y_r = dist.extended.batch_reduce_to(op,
((x, "/CPU:0"), (y, "/CPU:0")))
self.assertEqual(self.evaluate(x), self.evaluate(x_r))
self.assertEqual(self.evaluate(y), self.evaluate(y_r))
@_run_in_and_out_of_scope
def testExperimentalMakeNumpyDataset(self, dist):
numpy_input = np.ones([10], dtype=np.float32)
dataset = dist.experimental_make_numpy_dataset(numpy_input)
self.assertEqual(
self.evaluate(dataset.reduce(0., lambda a, b: a + b)), 10.)
@_run_in_and_out_of_scope
def testExperimentalRunStepsOnIterator(self, dist):
all_inputs = []
dataset = dataset_ops.Dataset.from_tensors(1.).repeat()
dist.extended.experimental_run_steps_on_iterator(
lambda _, inputs: all_inputs.append(self.evaluate(inputs)),
dataset_ops.make_one_shot_iterator(dataset))
self.assertEqual(all_inputs, [1.])
@_run_in_and_out_of_scope
def testReduceTo(self, dist):
x = constant_op.constant(1.)
x_r = dist.extended.reduce_to(reduce_util.ReduceOp.MEAN, x, "/CPU:0")
self.assertEqual(self.evaluate(x), self.evaluate(x_r))
@_run_in_and_out_of_scope
def testBatchReduceTo(self, dist):
x = constant_op.constant(1.)
y = constant_op.constant(1.)
x_r, y_r = dist.extended.batch_reduce_to(reduce_util.ReduceOp.MEAN,
((x, "/CPU:0"), (y, "/CPU:0")))
self.assertEqual(self.evaluate(x), self.evaluate(x_r))
self.assertEqual(self.evaluate(y), self.evaluate(y_r))
@_run_in_and_out_of_scope
def testUpdate(self, dist):
with dist.scope():
v = variables.Variable(1.)
t = constant_op.constant(2.)
def assign_fn(vv, tt):
self.assertIs(vv, v)
self.assertIs(tt, t)
dist.extended.update(v, assign_fn, (t,))
@_run_in_and_out_of_scope
def testUpdateNonSlot(self, dist):
t = constant_op.constant(2.)
update_calls = []
dist.extended.update_non_slot(t, lambda: update_calls.append(1))
self.assertEqual(len(update_calls), 1)
# _TestStrategy2 is like _TestStrategy, except it doesn't change variable
# creation.
class _TestStrategy2(distribute_lib.Strategy):
def __init__(self):
super(_TestStrategy2, self).__init__(_TestExtended2(self))
class _TestExtended2(_TestExtended):
def _create_variable(self, next_creator, *args, **kwargs):
return next_creator(*args, **kwargs)
class DefaultDistributionStrategyTest(test.TestCase, parameterized.TestCase):
def testMergeCall(self):
_assert_in_default_state(self)
def merge_fn(dist, s):
self.assertIs(ds_context._get_default_strategy(), dist)
self.assertIs(None, ds_context.get_replica_context())
self.assertIs(dist, ds_context.get_cross_replica_context())
self.assertTrue(ds_context.in_cross_replica_context())
self.assertIs(dist, ds_context.get_strategy())
self.assertFalse(ds_context.has_strategy())
return "foo_" + s
replica_ctx = ds_context.get_replica_context()
self.assertIs(ds_context._get_default_replica_context(), replica_ctx)
self.assertEqual("foo_bar", replica_ctx.merge_call(merge_fn, args=("bar",)))
_assert_in_default_state(self)
def testScopeMostlyNoOp(self):
_assert_in_default_state(self)
test_strategy = _TestStrategy2()
with test_strategy.scope():
variable_scope.variable(1.0, name="before")
default_strategy = ds_context._get_default_strategy()
scope = default_strategy.scope()
with scope:
_assert_in_default_state(self)
with test_strategy.scope():
with self.assertRaisesRegexp(
RuntimeError, "Mixing different tf.distribute.Strategy objects"):
variable_scope.variable(1.0, name="error")
with scope:
_assert_in_default_state(self)
with test_strategy.scope():
with self.assertRaisesRegexp(
RuntimeError, "Mixing different tf.distribute.Strategy objects"):
variable_scope.variable(1.0, name="also_error")
_assert_in_default_state(self)
_assert_in_default_state(self)
with test_strategy.scope():
variable_scope.variable(1.0, name="after")
def testExperimentalRunV2(self):
default_strategy = ds_context._get_default_strategy()
dataset = dataset_ops.Dataset.range(10).batch(2)
iterator = default_strategy.extended._make_dataset_iterator(dataset)
next_val = iterator.get_next()
def train_step(input_data):
return input_data
for _ in range(2):
default_strategy.experimental_run_v2(train_step, args=(next_val,))
@combinations.generate(combinations.combine(mode=["graph", "eager"]))
def testDistributedDatasets(self):
default_strategy = ds_context._get_default_strategy()
if context.executing_eagerly():
dataset_fn = lambda _: dataset_ops.DatasetV2.range(10).batch(2)
dist_dataset = default_strategy.experimental_distribute_dataset(
dataset_fn(distribute_lib.InputContext()))
next_val = next(iter(dist_dataset))
else:
dataset_fn = lambda _: dataset_ops.DatasetV1.range(10).batch(2)
dist_dataset = default_strategy.experimental_distribute_dataset(
dataset_fn(distribute_lib.InputContext()))
iterator = dist_dataset.make_initializable_iterator()
self.evaluate(iterator.initializer)
next_val = iterator.get_next()
self.assertAllEqual([0, 1], self.evaluate(next_val))
@combinations.generate(combinations.combine(mode=["graph", "eager"]))
def testDistributedDatasetsFromFunction(self):
default_strategy = ds_context._get_default_strategy()
if context.executing_eagerly():
dataset_fn = lambda _: dataset_ops.DatasetV2.range(10).batch(2)
dist_dataset_from_func = \
default_strategy.experimental_distribute_datasets_from_function(
dataset_fn)
next_val = next(iter(dist_dataset_from_func))
self.assertAllEqual([0, 1], self.evaluate(next_val))
else:
dataset_fn = lambda _: dataset_ops.DatasetV2.range(10).batch(2)
dist_dataset_from_func = \
default_strategy.experimental_distribute_datasets_from_function(
dataset_fn)
dataset_ops.make_initializable_iterator(dist_dataset_from_func)
class InputContextTest(test.TestCase):
def testProperties(self):
input_context = distribute_lib.InputContext(
num_input_pipelines=2, input_pipeline_id=1, num_replicas_in_sync=6)
self.assertEqual(6, input_context.num_replicas_in_sync)
self.assertEqual(1, input_context.input_pipeline_id)
self.assertEqual(2, input_context.num_input_pipelines)
def testPerReplicaBatchSize(self):
input_context = distribute_lib.InputContext(
num_input_pipelines=2, input_pipeline_id=1, num_replicas_in_sync=6)
self.assertEqual(2, input_context.get_per_replica_batch_size(12))
with self.assertRaises(ValueError):
input_context.get_per_replica_batch_size(13)
if __name__ == "__main__":
test.main()
| |
# PhoPlay.py
# Copyright (c) <2012, 2015>, <Ben Sampson> All rights reserved.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
# OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
"""PhoPlay.py Entry point for the application"""
import signal
import sys
import os
import argparse
from PyQt4.QtGui import QMainWindow, QApplication, QFileDialog, qApp
from PyQt4.QtGui import QFileDialog, QMessageBox
from PyQt4.phonon import Phonon
from ui_MainWindow import Ui_MainWindow
class PhoPlay(QMainWindow, Ui_MainWindow):
"""Main class for the application, inherits class genrated from pyuic"""
AUDIO_FILE_TYPES = '*.aac *.aiff *.au *.bwf *.flac *.m4a *.m4p *.mp4 \
*.mp3 *.ogg *.ra *.raw *.rm *.wav *.wma *.wv'
def __init__(self, fileName=None, disableGui=False, quitOnFinish=False):
super().__init__()
super().setupUi(self)
# Copy config option to object
self.fileName = fileName
self.disableGui = disableGui
self.quitOnFinish = quitOnFinish
# Setup the GUI
self.setupGui()
# Process commandline args
if self.fileName:
self.playNew(self.fileName)
def setupGui(self):
"""Setup the Gui"""
self.setWindowTitle('PhoPlay')
# Print availble mime types
self.availableMimeTypes = \
Phonon.BackendCapabilities.availableMimeTypes()
#print(type(self.availableMimeTypes))
#print("Available Mime Types")
#print(self.availableMimeTypes)
# Print availble Audio Output Devices
#for device in Phonon.BackendCapabilities.availableAudioOutputDevices():
#print("Available Output Devices")
#print(device.index(), device.name(), device.description())
# Connect some slots
# Menus and buttons
self.openAction.triggered.connect(self.openFile)
self.exitAction.triggered.connect(qApp.quit)
self.infoAction.triggered.connect(self.showInfoDialog)
self.aboutAction.triggered.connect(self.showAboutDialog)
self.stopButton.clicked.connect(self.stop)
self.playButton.clicked.connect(self.play)
self.pauseButton.clicked.connect(self.pause)
# Setup phonon player
self.mediaObject = Phonon.MediaObject(self)
self.mediaObject.setTickInterval(100)
self.mediaObject.tick.connect(self.tick)
self.mediaObject.finished.connect(self.finished)
self.mediaObject.stateChanged.connect(self.catchStateChanged)
self.mediaObject.totalTimeChanged.connect(self.totalTime)
# bind AudioOutput with MediaObject
self.audioOutput = Phonon.AudioOutput(Phonon.MusicCategory, self)
Phonon.createPath(self.mediaObject, self.audioOutput)
# Setup the seek slider
self.seekSlider.setMediaObject(self.mediaObject)
# Setup the volume slider
self.volumeSlider.setAudioOutput(self.audioOutput)
#self.statusbar.showMessage('hello')
# Dont show the GUI if called that way AKA cli mode
if not self.disableGui:
self.show()
def openFile(self):
"""Open the file browser"""
fileName = QFileDialog.getOpenFileName(
self, 'Open File', os.getcwd(),
'Audio (' + self.AUDIO_FILE_TYPES + ');; \ All Files (*.*)')
if fileName:
print('Atempting to play' + str(fileName))
self.playNew(fileName)
def stop(self):
"""Stop playing"""
#print('Calling stop')
self.mediaObject.stop()
def playNew(self, url):
"""Play a new track"""
# Set media object to play url
# Set window title to URL
# Update track time
self.mediaObject.setCurrentSource(Phonon.MediaSource(url))
self.setWindowTitle(os.path.basename(url))
self.mediaObject.play()
#print(self.mediaObject.metaData())
def play(self):
"""Play / Resume Current playback"""
#print('Calling play')
self.mediaObject.play()
def pause(self):
"""Pause current playback"""
#print('Calling pause')
self.mediaObject.pause()
def tick(self, time):
"""Catch the signal from the media object and update the time"""
# time is received as time in milliseconds, convert to h m s
h, m, s = self.msToHms(time)
self.timeLabel.setText('%02d:%02d:%02d' % (h, m, s))
def finished(self):
"""Catch the signal emitted when the track has finished
if the app is in cli mode then quit after the track has finished
"""
#print('Caught finished')
if self.disableGui or self.quitOnFinish:
qApp.quit()
def totalTime(self, newTotalTime):
"""Catch the signal emitted when the total time is know or
updated and update the totalLabel
"""
h, m, s = self.msToHms(newTotalTime)
self.totalLabel.setText('%02d:%02d:%02d' % (h, m, s))
def catchStateChanged(self, newstate, oldstate):
"""Catch the stateChanged signal to check for errors quit app
if in CLI mode
"""
#print('State = ' + str(newstate))
#print('Meta Info')
#print(self.mediaObject.metaData())
if newstate == Phonon.ErrorState:
print('Error playing back file')
if self.disableGui:
qApp.quit()
def showInfoDialog(self):
"""Show the Mime types dialog"""
#print('Info Button Clicked')
QMessageBox.information(
self, 'Available Mime Types', str(self.availableMimeTypes))
def showAboutDialog(self):
"""Show the about application dialog"""
QMessageBox.about(self, 'About PhoPlay', 'PhoPlay\n \
(c) 2012, 2015 Ben Sampson\n \
License: BSD (http://www.opensource.org/licenses/bsd-license.php) \n \
Icons (c) Jojo Mendoza (http://www.deleket.com)')
def msToHms(self, timeMs):
"""Convert timeMS in milliseconds to h m s format"""
s = timeMs / 1000
m, s = divmod(s, 60)
h, m = divmod(m, 60)
return h, m, s
if __name__ == "__main__":
# Catch ctl-c and kill the app
signal.signal(signal.SIGINT, signal.SIG_DFL)
# Setup the commandline args
parser = argparse.ArgumentParser(description='Simple Audio Player')
parser.add_argument(
'fileName', metavar='file', nargs='?', help='filename to play')
parser.add_argument(
'-x, --no-gui', dest='nogui', action='store_true',
help='disable the GUI / CLI mode (requires a file)')
parser.add_argument(
'-q, --quit-finished', dest='quitOnFinish', action='store_true',
help='quit when finished playing (no effect when used with -x)')
args = parser.parse_args()
# Check that if CLI mode is enabled and a filename is also provided
if not args.fileName and args.nogui:
parser.error('fileName must be specified with -x, --no-gui mode')
app = QApplication(sys.argv)
#Need this for dbus on linux
app.setApplicationName('PhoPlay')
main = PhoPlay(args.fileName, args.nogui, args.quitOnFinish)
sys.exit(app.exec_())
| |
"""Container classes for extraction method components.
"""
import sys
import os
from gridd import (cellfeatures, rowfeatures, estimators, classifiers,
colproperties, annotations, evaluate, loaders, results,
util)
_v = util.verbose_print
class Extractor(object):
def __init__(self, verbose=False,
estimator_file=None, classifier_file=None,
show_estimates=False):
self.filetype = 'XLS'
self.verbose = verbose
self.estimator_file = estimator_file
self.classifier_file = classifier_file
self.show_estimates = show_estimates
def extract(self, filename, extract_cols=False):
cells = self.load_file(filename).get_cells_by_table()
result_list = []
for _, table_cells in cells:
row_classes = self.classify(table_cells)
col_properties = self.get_col_properties(table_cells, row_classes)
res = results.Results(table_cells, row_classes, col_properties)
result_list.append(res)
return result_list
def load_file(self, filename, *args, **kwargs):
loaded_file = loaders.load_file(filename, *args, **kwargs)
self.filetype = loaded_file.filetype
return loaded_file
def classify(self, cells, estimator_file=None):
estimator_file = estimator_file or self.estimator_file
# initialize processing stages
c_ex = self.cell_extractor()
r_ex = self.row_extractor(self.filetype)
r_est = self.row_estimator(self.filetype, estimator_file,
self.show_estimates)
r_cl = self.row_classifier(self.filetype, self.classifier_file)
# TODO: add relational/nonrelational classifier step
# perform processing
cell_features = c_ex.get_features(cells)
row_features = r_ex.get_features(cell_features)
row_estimates = r_est.get_estimates(row_features)
row_classes = r_cl.classify(row_estimates)
return row_classes
def get_col_properties(self, table_cells, row_classes):
col_extractor = self.col_extractor()
col_properties = col_extractor.get_properties(table_cells, row_classes)
return col_properties
def train(self, annotation_list, estimator_file=None):
# train relational/nonrelational classifier
# train row class estimator
# train row class classifier
estimator_file = estimator_file or self.estimator_file
train_set = []
for ann in annotation_list:
if ann['tableType'] != 'relational':
continue
row_classes = ann['rowClasses']
row_features = self.get_features(ann['file'], ann['tableId'])
f_list = [(row_features[row], row_classes[row])
for row in row_features if row in row_classes]
train_set.append(f_list)
_v('Training classifier')
self.row_estimator(self.filetype).train(train_set, estimator_file)
_v('Done training classifier')
def get_features(self, filename, table):
_v('loading file %s, table %s' % (filename, table))
cells = self.load_file(filename, table=table).get_cells()
c_ex = self.cell_extractor()
r_ex = self.row_extractor(self.filetype)
cell_features = c_ex.get_features(cells)
row_features = r_ex.get_features(cell_features)
return row_features
def test(self, annotation_list, estimator_file=None, out=sys.stdout):
"""Performs evaluation of an extractor by comparing extractor
performance with true schema information from annotation_list
parameter.
"""
estimator_file = estimator_file or self.estimator_file
result_list = []
for ann in annotation_list:
if ann['tableType'] != 'relational':
# TODO: ???
continue
true_classes = ann['rowClasses']
filename = ann['file']
table = int(ann['tableId'])
_v('loading file %s, table %s' % (filename, table))
cells = self.load_file(filename, table=table).get_cells()
row_classes = self.classify(cells, estimator_file)
row_list = sorted(set(true_classes).intersection(set(row_classes)))
# print message if any annotated rows are not in our output
missing_rows = [r for r in true_classes if r not in row_classes]
if len(missing_rows) > 0:
_v('row mismatch: %r' % (sorted(missing_rows),))
true_str = ''.join(true_classes[r] for r in row_list)
test_str = ''.join(row_classes[r] for r in row_list)
assert len(true_str) == len(test_str)
result_list.append((true_str, test_str))
out.write(str(evaluate.evaluate_results(result_list)))
return result_list
# overwritten by child extractors
cell_extractor = staticmethod(lambda *x: None)
row_extractor = staticmethod(lambda *x: None)
row_estimator = staticmethod(lambda *x: None)
row_classifier = staticmethod(lambda *x: None)
col_extractor = staticmethod(lambda *x: None)
#####################
# Extractor Classes #
#####################
class DefaultExtractor(Extractor):
cell_extractor = cellfeatures.CellFeatureExtractor
row_extractor = rowfeatures.SimFeatureExtractor
row_estimator = estimators.BayesEstimator
row_classifier = classifiers.MaxEstClassifier
col_extractor = colproperties.ColPropertyExtractor
# Equivalent to DefaultExtractor
class Bayes(DefaultExtractor):
pass
class Grammar(DefaultExtractor):
row_classifier = classifiers.GrammarClassifier
class DFA(DefaultExtractor):
row_classifier = classifiers.DFAClassifier
class WebTables(DefaultExtractor):
row_extractor = rowfeatures.WebTablesFeatureExtractor
row_estimator = estimators.WebTablesEstimator
class WebTablesRidor(WebTables):
row_estimator = estimators.WebTablesRidorEstimator
class WebTablesPart(WebTables):
row_estimator = estimators.WebTablesPartEstimator
class WebTablesRandomForest(WebTables):
row_estimator = estimators.WebTablesRandomForestEstimator
class RandomForest(DefaultExtractor):
row_estimator = estimators.RandomForestEstimator
class RandomForestDFA(DefaultExtractor):
row_estimator = estimators.RandomForestEstimator
row_classifier = classifiers.DFAClassifier
class C45(DefaultExtractor):
row_estimator = estimators.C45Estimator
class ConjunctiveRule(DefaultExtractor):
row_extractor = rowfeatures.WebTablesFeatureExtractor
row_estimator = estimators.ConjunctiveRuleEstimator
class CRF(DefaultExtractor):
row_estimator = estimators.CRFSuiteEstimator
class CRFDFA(DefaultExtractor):
row_estimator = estimators.CRFSuiteEstimatorSWIG
row_classifier = classifiers.DFAClassifier
class CRFBinary(DefaultExtractor):
row_estimator = estimators.CRFSuiteEstimatorBinary
class CRFBinaryDFA(DefaultExtractor):
row_estimator = estimators.CRFSuiteEstimatorBinarySWIG
row_classifier = classifiers.DFAClassifier
class CRFBinning(DefaultExtractor):
row_estimator = estimators.CRFSuiteEstimatorBinning
class CRFBinary2(DefaultExtractor):
row_estimator = estimators.CRFSuiteEstimatorBinary2
class CRFContinuous(DefaultExtractor):
row_estimator = estimators.CRFSuiteEstimatorContinuous
class BayesBin(Bayes):
row_estimator = estimators.BayesEstimatorBin
# Used by command runners
EXTRACTOR_TYPES = {
'grammar': Grammar,
'bayes': Bayes,
'bayesbin': BayesBin,
'webtables': WebTables,
'conjunctiverule': ConjunctiveRule,
'webtables.ridor': WebTablesRidor,
'webtables.part': WebTablesPart,
'webtables.randomforest': WebTablesRandomForest,
'dfa': DFA,
'randomforest': RandomForest,
'randomforestdfa': RandomForestDFA,
'c4.5': C45,
'crf': CRF,
'crfdfa': CRFDFA,
'crfbinary': CRFBinary,
'crfbinarydfa': CRFBinaryDFA,
'crfbinary2': CRFBinary2,
'crfbinning': CRFBinning,
'crfcontinuous': CRFContinuous,
}
def get_extractor(extractor_type, estimator_model, classifier_model,
show_estimates=False):
ext_class = EXTRACTOR_TYPES[extractor_type.lower()]
return ext_class(estimator_file=estimator_model,
classifier_file=classifier_model,
show_estimates=show_estimates)
###################
# Command Runners #
###################
def run_extraction(file_list, fmt, out, *args, **kwargs):
"""Performs schema extraction of specified file(s), in format specified
by fmt. Other arguments are passed directly to the extractor
constructor.
"""
extractor = get_extractor(*args, **kwargs)
result_list = (extractor.extract(f) for f in file_list)
for res in result_list:
for tab in res:
tab.dump_output(fmt=fmt, out=out)
def run_training(filename=None, dirname=None, *args, **kwargs):
"""Performs training of estimator/classifier, based on supplied
annotations.
Annotations can be supplied from a single file (using
filename) or, for use in cross validation training, a
directory (using dirname).
"""
extractor = get_extractor(*args, **kwargs)
if filename:
annotation_list = annotations.load(filename)
extractor.train(annotation_list)
elif dirname:
_v('Training from annotations in %s' % dirname)
partitions = annotations.load_training_sets(dirname)
for i, annotation_list in enumerate(partitions):
estimator_file = '%s_%d_estimator' % (kwargs['extractor_type'], i)
estimator_file = os.path.join(dirname, estimator_file)
extractor.train(annotation_list, estimator_file)
else:
raise Exception('A filename or directory for annotations must be ' +
'specified')
def run_tests(filename=None, dirname=None, *args, **kwargs):
"""Performs testing of estimator/classifier, based on supplied
annotations.
Annotations can be supplied from a single file (using
filename) or, for use in cross validation training, a
directory (using dirname).
"""
extractor = get_extractor(*args, **kwargs)
if filename:
annotation_list = annotations.load(filename)
extractor.test(annotation_list)
elif dirname:
_v('Testing classifiers in %s' % dirname)
partitions = annotations.load_testing_sets(dirname)
result_list = []
for i, annotation_list in enumerate(partitions):
estimator_file = '%s_%d_estimator' % (kwargs['extractor_type'], i)
estimator_file = os.path.join(dirname, estimator_file)
out = open(os.devnull, 'w') # suppress output for individual tests
res = extractor.test(annotation_list,
estimator_file=estimator_file,
out=out)
result_list.extend(res)
print('Aggregate results')
print('=================')
print(str(evaluate.evaluate_results(result_list)))
return result_list
else:
raise Exception('A filename or directory for annotations must be ' +
'specified')
| |
from datetime import datetime, timedelta
import random
from django.db.models import Prefetch
from django.db.models.query import QuerySet
from django.contrib.auth.models import User
from django.db import models, transaction
from django.db.models import Q, F
from django.utils.functional import cached_property
from natto import MeCab
from .constants import MAX_NEW_CARD_ORDINAL
from manabi.apps.flashcards.models.constants import (
GRADE_NONE, MIN_CARD_SPACE, CARD_SPACE_FACTOR)
from manabi.apps.flashcards.models.synchronization import (
copy_facts_to_subscribers)
from manabi.apps.reader_sources.models import ReaderSource
from manabi.apps.twitter_usages.jobs import harvest_tweets
#TODO-OLD
# separate the cards of this fact initially
# not used for child fact types (?)
#min_card_space = models.FloatField(default=seconds_to_days(600),
# help_text='Duration expressed in (partial) days.')
#TODO-OLD
# minimal interval multiplier between two cards of the same fact
#space_factor = models.FloatField(default=.1)
class FactQuerySet(QuerySet):
def deck_facts(self, deck):
return self.filter(deck=deck)
def buried(self, user, review_time=None, excluded_card_ids=[]):
'''
Facts with cards buried due to siblings.
'''
if review_time is None:
review_time = datetime.utcnow()
return self.filter(
Q(card__owner=user) & (
# Sibling was reviewed too recently.
(
Q(card__last_reviewed_at__gte=(
review_time - MIN_CARD_SPACE))
& Q(card__last_reviewed_at__gte=(
review_time - F('card__interval') * CARD_SPACE_FACTOR))
)
# Sibling is currently in the client-side review queue.
| Q(card__id__in=excluded_card_ids)
# Sibling is failed. (Either sibling's due, or it's shown
# before new cards.)
| Q(card__last_review_grade=GRADE_NONE)
)
)
def prefetch_active_card_templates(self):
'''
Puts the active card templates into `available_cards`.
'''
from manabi.apps.flashcards.models import Card
return self.prefetch_related(
Prefetch(
'card_set',
queryset=Card.objects.available(),
to_attr='available_cards',
)
)
def _filter_for_suspend_toggling_matching(self, reading, jmdict_id=None):
if jmdict_id is None:
return self.filter(reading=reading)
else:
return self.filter(
(Q(reading=reading) & Q(jmdict_id__isnull=True))
| Q(jmdict_id=jmdict_id)
)
def suspend_matching(self, reading, jmdict_id=None):
matches = self._filter_for_suspend_toggling_matching(
reading, jmdict_id=jmdict_id)
for match in matches:
match.suspend()
return matches
@transaction.atomic
def update_or_create_for_manabi_reader(
self, user, expression, reading, meaning, active_card_templates,
jmdict_id=None,
example_sentence=None,
reader_source_data=None,
):
'''
Corresponds to the "I Want to Learn" button in Manabi Reader, and thus
has some particular behaviors built in to provide a nice UX.
'''
from manabi.apps.flashcards.models import Deck
reader_source = None
if reader_source_data is not None:
(reader_source, _) = ReaderSource.objects.get_or_create(
source_url=reader_source_data['source_url'],
defaults={
'title': reader_source_data['title'],
'thumbnail_url': reader_source_data.get('thumbnail_url'),
},
)
fact = Fact.objects.filter(
deck__owner=user,
deck__active=True,
expression=expression,
reading=reading,
meaning=meaning,
active=True,
).first()
if fact is not None:
fact.suspended = False
if example_sentence is not None:
fact.example_sentence = example_sentence
if reader_source is not None:
fact.reader_source = reader_source
if jmdict_id is not None:
fact.jmdict_id = jmdict_id
fact.save(update_fields=[
'suspended', 'jmdict_id', 'reader_source', 'example_sentence'])
else:
deck = Deck.objects.get_or_create_manabi_reader_deck(user)
fact = Fact.objects.create(
deck=deck,
expression=expression,
reading=reading,
meaning=meaning,
reader_source=reader_source,
example_sentence=example_sentence or '',
jmdict_id=jmdict_id,
)
fact.set_active_card_templates(active_card_templates)
return fact
def _card_template_id_to_string(card_template_id):
from manabi.apps.flashcards.models import (
PRODUCTION, RECOGNITION, KANJI_READING, KANJI_WRITING)
return {
PRODUCTION: 'production',
RECOGNITION: 'recognition',
KANJI_READING: 'kanji_reading',
KANJI_WRITING: 'kanji_writing',
}[card_template_id]
def _card_template_string_to_id(card_template):
from manabi.apps.flashcards.models import (
PRODUCTION, RECOGNITION, KANJI_READING, KANJI_WRITING)
return {
'production': PRODUCTION,
'recognition': RECOGNITION,
'kanji_reading': KANJI_READING,
'kanji_writing': KANJI_WRITING,
}[card_template]
class Fact(models.Model):
objects = FactQuerySet.as_manager()
deck = models.ForeignKey(
'flashcards.Deck', models.CASCADE, db_index=True, related_name='facts')
synchronized_with = models.ForeignKey(
'self', models.CASCADE,
null=True, blank=True, related_name='subscriber_facts')
forked = models.BooleanField(default=False, blank=True)
new_fact_ordinal = models.PositiveIntegerField(null=True, blank=True)
active = models.BooleanField(default=True, blank=True)
# TODO Derive expression from reading..?
expression = models.CharField(max_length=500)
reading = models.CharField(max_length=1500, blank=True)
meaning = models.CharField(max_length=1000)
jmdict_id = models.PositiveIntegerField(null=True, blank=True)
example_sentence = models.TextField(blank=True)
reader_source = models.ForeignKey(
'reader_sources.ReaderSource', models.CASCADE,
null=True, blank=True, related_name='facts')
created_at = models.DateTimeField(auto_now_add=True, editable=False)
modified_at = models.DateTimeField(blank=True, null=True)
suspended = models.BooleanField(default=False)
class Meta:
app_label = 'flashcards'
unique_together = [('deck', 'synchronized_with')]
def roll_ordinal(self):
'''
Returns whether a new ordinal was given.
'''
if self.new_fact_ordinal:
return False
self.new_fact_ordinal = random.randrange(0, MAX_NEW_CARD_ORDINAL)
return True
def save(self, update_fields=None, *args, **kwargs):
'''
Set a random sorting index for new cards.
Propagates changes down to subscriber facts.
'''
self.modified_at = datetime.utcnow()
also_update_fields = {'modified_at'}
if self.deck.randomize_card_order and self.roll_ordinal():
also_update_fields.add('new_fact_ordinal')
# Fork if a subscriber card is being edited.
if (
not self.forked and
(
update_fields is None or
set(update_fields) & {
'expression', 'reading', 'meaning', 'example_sentence',
'jmdict_id',
}
) and
self.synchronized_with is not None and
(
self.synchronized_with.expression != self.expression
or self.synchronized_with.reading != self.reading
or self.synchronized_with.meaning != self.meaning
or self.synchronized_with.example_sentence
!= self.example_sentence
or self.synchronized_with.jmdict_id != self.jmdict_id
)
):
self.forked = True
also_update_fields.add('forked')
if update_fields is not None:
update_fields = list(set(update_fields) | also_update_fields)
is_new = self.pk is None
super(Fact, self).save(update_fields=update_fields, *args, **kwargs)
# Update subscriber cards as necessary.
if update_fields is None or (
set(update_fields) & {
'expression', 'reading', 'meaning', 'example_sentence',
'jmdict_id',
}
):
self.syncing_subscriber_facts.update(
expression=self.expression,
reading=self.reading,
meaning=self.meaning,
example_sentence=self.example_sentence,
jmdict_id=self.jmdict_id,
modified_at=self.modified_at,
)
if is_new and self.deck.shared:
copy_facts_to_subscribers([self])
if (
update_fields is None
or {'deck', 'deck_id', 'suspended', 'active'} & set(update_fields)
):
# Update cards.
card_attrs = {
'deck_id': self.deck_id,
'created_or_modified_at': datetime.utcnow(),
}
if not self.active:
card_attrs['active'] = False
self.card_set.update(**card_attrs)
self.deck.refresh_card_count()
if update_fields is None or 'jmdict_id' in update_fields:
self.card_set.exclude(
jmdict_id=self.jmdict_id,
).update(
jmdict_id=self.jmdict_id,
created_or_modified_at=self.modified_at,
)
if is_new:
harvest_tweets.delay(self)
@transaction.atomic
def delete(self, *args, **kwargs):
from manabi.apps.flashcards.models import Card
self.active = False
self.save(update_fields=['active'])
self.new_syncing_subscriber_cards.update(
active=False,
created_or_modified_at=datetime.utcnow(),
)
self.new_syncing_subscriber_facts.update(active=False)
self.subscriber_facts.clear()
@property
def syncing_subscriber_facts(self):
return self.subscriber_facts.exclude(forked=True)
@property
def new_syncing_subscriber_facts(self):
'''
"New" as in unreviewed.
'''
return self.syncing_subscriber_facts.exclude(
card__last_reviewed_at__isnull=False)
@property
def syncing_subscriber_cards(self):
from manabi.apps.flashcards.models import Card
return Card.objects.filter(
fact__in=self.syncing_subscriber_facts)
@property
def new_syncing_subscriber_cards(self):
return self.syncing_subscriber_cards.filter(
last_reviewed_at__isnull=True)
@property
def owner(self):
return self.deck.owner
@property
def card_count(self):
return self.card_set.filter(active=True).count()
@cached_property
def active_card_templates(self):
from manabi.apps.flashcards.models import PRODUCTION
try:
template_ids = [
card.template for card in self.available_cards
]
except AttributeError:
template_ids = (
self.card_set.available().values_list('template', flat=True)
)
return {
_card_template_id_to_string(id_) for id_ in template_ids
}
def set_active_card_templates(self, card_templates):
'''
Creates or updates associated `Card`s.
'''
from manabi.apps.flashcards.models import Card
template_ids = {
_card_template_string_to_id(template)
for template in card_templates
}
for activated_card in (
self.card_set.filter(template__in=template_ids)
):
activated_card.activate()
for deactivated_card in (
self.card_set.exclude(template__in=template_ids)
):
deactivated_card.deactivate()
existing_template_ids = set(self.card_set.values_list(
'template', flat=True))
for template_id in template_ids - existing_template_ids:
Card.objects.create(
owner=self.deck.owner,
deck=self.deck,
deck_suspended=self.deck.suspended,
jmdict_id=self.jmdict_id,
fact=self,
template=template_id,
new_card_ordinal=Card.random_card_ordinal(),
)
if len(template_ids) == 0:
self.suspend()
elif self.suspended:
self.unsuspend()
try:
del self.active_card_templates
except AttributeError:
pass
self._set_active_card_templates_for_subscribers(template_ids)
@transaction.atomic
def _set_active_card_templates_for_subscribers(self, template_ids):
from manabi.apps.flashcards.models import Card
subscriber_cards = Card.objects.filter(
fact__in=self.syncing_subscriber_facts,
)
new_subscriber_cards = subscriber_cards.filter(
last_reviewed_at__isnull=True,
)
new_subscriber_cards.filter(
template__in=template_ids,
).update(active=True, suspended=False)
new_subscriber_cards.exclude(
template__in=template_ids,
).update(active=False)
for template_id in template_ids:
facts_without_template = self.syncing_subscriber_facts.exclude(
card__in=subscriber_cards.filter(template=template_id),
).select_related('deck')
missing_cards = [
Card(
owner_id=owner_id,
deck_id=deck_id,
deck_suspended=deck_suspended,
fact_id=fact_id,
template=template_id,
new_card_ordinal=Card.random_card_ordinal(),
)
for fact_id, deck_id, deck_suspended, owner_id in
facts_without_template.values_list(
'id', 'deck_id', 'deck__suspended', 'deck__owner_id',
).iterator()
]
Card.objects.bulk_create(missing_cards)
if len(template_ids) == 0:
self.syncing_subscriber_facts.filter(
suspended=False).update(suspended=True)
else:
self.syncing_subscriber_facts.filter(
suspended=True).update(suspended=False)
@transaction.atomic
def move_to_deck(self, deck):
self.new_syncing_subscriber_facts.update(active=False)
self.subscriber_facts.clear()
self.deck = deck
self.synchronized_with = None
self.save(update_fields=['deck', 'synchronized_with'])
if self.deck.shared:
copy_facts_to_subscribers([self])
@transaction.atomic
def suspend(self):
self.card_set.update(suspended=True)
self.suspended = True
self.save(update_fields=['suspended'])
@transaction.atomic
def unsuspend(self):
self.card_set.update(suspended=False)
self.suspended = False
self.save(update_fields=['suspended'])
#TODELETE?
def all_owner_decks(self):
'''
Returns a list of all the decks this object belongs to,
including subscriber decks.
'''
return ([self.deck]
+ [d for d in self.deck.subscriber_decks.filter(active=True)])
def __str__(self):
return str(self.id)
| |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'GroupTagKey'
db.create_table('sentry_grouptagkey', (
('id', self.gf('sentry.db.models.fields.bounded.BoundedBigAutoField')(primary_key=True)),
('project', self.gf('sentry.db.models.fields.FlexibleForeignKey')(to=orm['sentry.Project'], null=True)),
('group', self.gf('sentry.db.models.fields.FlexibleForeignKey')(to=orm['sentry.Group'])),
('key', self.gf('django.db.models.fields.CharField')(max_length=32)),
('values_seen', self.gf('django.db.models.fields.PositiveIntegerField')(default=0)),
))
db.send_create_signal('sentry', ['GroupTagKey'])
# Adding unique constraint on 'GroupTagKey', fields ['project', 'group', 'key']
db.create_unique('sentry_grouptagkey', ['project_id', 'group_id', 'key'])
# Adding field 'FilterValue.times_seen'
db.add_column('sentry_filtervalue', 'times_seen',
self.gf('django.db.models.fields.PositiveIntegerField')(default=0),
keep_default=False)
# Adding field 'FilterValue.last_seen'
db.add_column('sentry_filtervalue', 'last_seen',
self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, null=True, db_index=True),
keep_default=False)
# Adding field 'FilterValue.first_seen'
db.add_column('sentry_filtervalue', 'first_seen',
self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, null=True, db_index=True),
keep_default=False)
# Adding field 'FilterKey.values_seen'
db.add_column('sentry_filterkey', 'values_seen',
self.gf('django.db.models.fields.PositiveIntegerField')(default=0),
keep_default=False)
def backwards(self, orm):
# Removing unique constraint on 'GroupTagKey', fields ['project', 'group', 'key']
db.delete_unique('sentry_grouptagkey', ['project_id', 'group_id', 'key'])
# Deleting model 'GroupTagKey'
db.delete_table('sentry_grouptagkey')
# Deleting field 'FilterValue.times_seen'
db.delete_column('sentry_filtervalue', 'times_seen')
# Deleting field 'FilterValue.last_seen'
db.delete_column('sentry_filtervalue', 'last_seen')
# Deleting field 'FilterValue.first_seen'
db.delete_column('sentry_filtervalue', 'first_seen')
# Deleting field 'FilterKey.values_seen'
db.delete_column('sentry_filterkey', 'values_seen')
models = {
'sentry.user': {
'Meta': {'object_name': 'User', 'db_table': "'auth_user'"},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'sentry.accessgroup': {
'Meta': {'unique_together': "(('team', 'name'),)", 'object_name': 'AccessGroup'},
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'managed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.User']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'projects': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Project']", 'symmetrical': 'False'}),
'team': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '50'})
},
'sentry.activity': {
'Meta': {'object_name': 'Activity'},
'data': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Event']", 'null': 'True'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'type': ('django.db.models.fields.PositiveIntegerField', [], {}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'})
},
'sentry.affecteduserbygroup': {
'Meta': {'unique_together': "(('project', 'tuser', 'group'),)", 'object_name': 'AffectedUserByGroup'},
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'tuser': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.TrackedUser']", 'null': 'True'})
},
'sentry.event': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'Event', 'db_table': "'sentry_message'"},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'db_column': "'message_id'"}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'event_set'", 'null': 'True', 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'server_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'}),
'site': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'}),
'time_spent': ('django.db.models.fields.FloatField', [], {'null': 'True'})
},
'sentry.filterkey': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'FilterKey'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'values_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'sentry.filtervalue': {
'Meta': {'unique_together': "(('project', 'key', 'value'),)", 'object_name': 'FilterValue'},
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.group': {
'Meta': {'unique_together': "(('project', 'checksum'),)", 'object_name': 'Group', 'db_table': "'sentry_groupedmessage'"},
'active_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'resolved_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'db_index': 'True'}),
'users_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.groupbookmark': {
'Meta': {'unique_together': "(('project', 'user', 'group'),)", 'object_name': 'GroupBookmark'},
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'sentry_bookmark_set'", 'to': "orm['sentry.User']"})
},
'sentry.groupcountbyminute': {
'Meta': {'unique_together': "(('project', 'group', 'date'),)", 'object_name': 'GroupCountByMinute', 'db_table': "'sentry_messagecountbyminute'"},
'date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'sentry.groupmeta': {
'Meta': {'unique_together': "(('group', 'key'),)", 'object_name': 'GroupMeta'},
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'value': ('django.db.models.fields.TextField', [], {})
},
'sentry.grouptag': {
'Meta': {'unique_together': "(('project', 'key', 'value', 'group'),)", 'object_name': 'GroupTag', 'db_table': "'sentry_messagefiltervalue'"},
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.grouptagkey': {
'Meta': {'unique_together': "(('project', 'group', 'key'),)", 'object_name': 'GroupTagKey'},
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'values_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'sentry.lostpasswordhash': {
'Meta': {'object_name': 'LostPasswordHash'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'unique': 'True'})
},
'sentry.messageindex': {
'Meta': {'unique_together': "(('column', 'value', 'object_id'),)", 'object_name': 'MessageIndex'},
'column': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'sentry.option': {
'Meta': {'object_name': 'Option'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'value': ('picklefield.fields.PickledObjectField', [], {})
},
'sentry.pendingteammember': {
'Meta': {'unique_together': "(('team', 'email'),)", 'object_name': 'PendingTeamMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'team': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'pending_member_set'", 'to': "orm['sentry.Team']"}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '50'})
},
'sentry.project': {
'Meta': {'unique_together': "(('team', 'slug'),)", 'object_name': 'Project'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'owner': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'sentry_owned_project_set'", 'null': 'True', 'to': "orm['sentry.User']"}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'null': 'True'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'team': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Team']", 'null': 'True'})
},
'sentry.projectcountbyminute': {
'Meta': {'unique_together': "(('project', 'date'),)", 'object_name': 'ProjectCountByMinute'},
'date': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'sentry.projectkey': {
'Meta': {'object_name': 'ProjectKey'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'key_set'", 'to': "orm['sentry.Project']"}),
'public_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'secret_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'}),
'user_added': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'keys_added_set'", 'null': 'True', 'to': "orm['sentry.User']"})
},
'sentry.projectoption': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'ProjectOption', 'db_table': "'sentry_projectoptions'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'value': ('picklefield.fields.PickledObjectField', [], {})
},
'sentry.searchdocument': {
'Meta': {'unique_together': "(('project', 'group'),)", 'object_name': 'SearchDocument'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_changed': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'total_events': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'})
},
'sentry.searchtoken': {
'Meta': {'unique_together': "(('document', 'field', 'token'),)", 'object_name': 'SearchToken'},
'document': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'token_set'", 'to': "orm['sentry.SearchDocument']"}),
'field': ('django.db.models.fields.CharField', [], {'default': "'text'", 'max_length': '64'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'token': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'sentry.team': {
'Meta': {'object_name': 'Team'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'team_memberships'", 'symmetrical': 'False', 'through': "orm['sentry.TeamMember']", 'to': "orm['sentry.User']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'owner': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'})
},
'sentry.teammember': {
'Meta': {'unique_together': "(('team', 'user'),)", 'object_name': 'TeamMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'team': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'member_set'", 'to': "orm['sentry.Team']"}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '50'}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'sentry_teammember_set'", 'to': "orm['sentry.User']"})
},
'sentry.trackeduser': {
'Meta': {'unique_together': "(('project', 'ident'),)", 'object_name': 'TrackedUser'},
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Group']", 'through': "orm['sentry.AffectedUserByGroup']", 'symmetrical': 'False'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'num_events': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.useroption': {
'Meta': {'unique_together': "(('user', 'project', 'key'),)", 'object_name': 'UserOption'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'value': ('picklefield.fields.PickledObjectField', [], {})
}
}
complete_apps = ['sentry']
| |
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Bundled copy of_predict_lib.
Includes (from the Cloud ML SDK):
- _predict_lib
- session_bundle
Important changes:
- Replace shutil.rmtree with core.util.files.RmTree.
- _file utilities have been inlined. We use tensorflow's file_io instead of
Apache Beam's. We use a more primitive version of globbing (using fnmatch)
instead of the Apache Beam Cloud Storage globbing (which file_io doesn't
support).
- Remove interfaces for DefaultModel (they don't change behavior).
- Set from_client(skip_preprocessing=True) and remove the pre-processing code.
"""
import base64
import collections
from contextlib import contextmanager
import fnmatch
import json
import logging
import os
import tempfile
import timeit
from googlecloudsdk.core.util import files
import numpy as np
import tensorflow as tf
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.lib.io import file_io
def _import_tensorflow_contrib():
"""Import tf.contrib.
Otherwise Tensorflow won't load those operations, and imported graphs may need
them.
Silence logging messages, since there are many.
"""
old_level = logging.getLogger().getEffectiveLevel()
logging.getLogger().setLevel(logging.ERROR)
import tensorflow.contrib # pylint: disable=unused-variable,g-import-not-at-top
logging.getLogger().setLevel(old_level)
_import_tensorflow_contrib()
# -------------------------
# session_bundle._constants
# -------------------------
VERSION_FORMAT_SPECIFIER = "%08d"
ASSETS_DIRECTORY = "assets"
META_GRAPH_DEF_FILENAME = "export.meta"
VARIABLES_FILENAME = "export"
VARIABLES_FILENAME_V2 = "export.data"
VARIABLES_FILENAME_PATTERN = "export-?????-of-?????"
VARIABLES_FILENAME_PATTERN_V2 = "export.data-?????-of-?????"
VARIABLES_INDEX_FILENAME_V2 = "export.index"
INIT_OP_KEY = "serving_init_op"
SIGNATURES_KEY = "serving_signatures"
ASSETS_KEY = "serving_assets"
GRAPH_KEY = "serving_graph"
INPUTS_KEY = "inputs"
OUTPUTS_KEY = "outputs"
KEYS_KEY = "keys"
def keys_used_for_serving():
"""Return a list of all keys used for predictions."""
return [
INIT_OP_KEY,
SIGNATURES_KEY,
ASSETS_KEY,
GRAPH_KEY,
INPUTS_KEY,
OUTPUTS_KEY,
KEYS_KEY,
]
# ------------------------------
# session_bundle._session_bundle
# ------------------------------
def load_session_bundle_from_path(export_dir, target="", config=None):
"""Load session bundle from the given path.
The function reads input from the export_dir, constructs the graph data to the
default graph and restores the parameters for the session created.
Args:
export_dir: the directory that contains files exported by exporter.
target: The execution engine to connect to. See target in tf.Session()
config: A ConfigProto proto with configuration options. See config in
tf.Session()
Returns:
session: a tensorflow session created from the variable files.
meta_graph: a meta graph proto saved in the exporter directory.
Raises:
RuntimeError: if the required files are missing or contain unrecognizable
fields, i.e. the exported model is invalid.
"""
if hasattr(tf, "GIT_VERSION"):
logging.info("tf.GIT_VERSION=%s", tf.GIT_VERSION)
else:
logging.info("tf.GIT_VERSION=unknown")
meta_graph_filename = os.path.join(export_dir,
META_GRAPH_DEF_FILENAME)
if not file_io.file_exists(meta_graph_filename):
raise RuntimeError("Expected meta graph file missing %s" %
meta_graph_filename)
variables_filename = ""
variables_filename_list = []
additional_files_to_copy = []
checkpoint_sharded = False
variables_index_filename = os.path.join(
export_dir, VARIABLES_INDEX_FILENAME_V2)
checkpoint_v2 = file_io.file_exists(variables_index_filename)
if checkpoint_v2:
# The checkpoint is in v2 format.
variables_filename = os.path.join(export_dir,
VARIABLES_FILENAME_V2)
# Check to see if the file "export" exists or not.
if file_io.file_exists(variables_filename):
variables_filename_list = [variables_filename]
else:
# Check to see if the sharded file named "export-?????-of-?????" exists.
variables_filename_list = fnmatch.filter(
file_io.list_directory(export_dir),
VARIABLES_FILENAME_PATTERN_V2)
checkpoint_sharded = True
# If the checkpoint is not local, we need to copy export.index locally too.
additional_files_to_copy = [variables_index_filename]
else:
variables_filename = os.path.join(export_dir,
VARIABLES_FILENAME)
if file_io.file_exists(variables_filename):
variables_filename_list = [variables_filename]
else:
variables_filename_list = fnmatch.filter(
file_io.list_directory(export_dir),
VARIABLES_FILENAME_PATTERN)
checkpoint_sharded = True
if not variables_filename_list or not variables_filename:
raise RuntimeError("No or bad checkpoint files found in %s" % export_dir)
# Prepare the files to restore a session.
restore_files = ""
if checkpoint_v2 or not checkpoint_sharded:
# For checkpoint v2 or v1 with non-sharded files, use "export" to restore
# the session.
restore_files = VARIABLES_FILENAME
else:
restore_files = VARIABLES_FILENAME_PATTERN
# Reads meta graph file.
meta_graph_def = meta_graph_pb2.MetaGraphDef()
with file_io.FileIO(meta_graph_filename, "r") as f:
logging.info("Reading metagraph from %s", meta_graph_filename)
meta_graph_def.ParseFromString(f.read())
collection_def = meta_graph_def.collection_def
graph_def = tf.GraphDef()
if GRAPH_KEY in collection_def:
logging.info("Using value of collection %s for the graph.",
GRAPH_KEY)
# Use serving graph_def in MetaGraphDef collection_def if exists
graph_def_any = collection_def[GRAPH_KEY].any_list.value
if len(graph_def_any) != 1:
raise RuntimeError(
"Expected exactly one serving GraphDef in : %s" % meta_graph_def)
else:
graph_def_any[0].Unpack(graph_def)
# Replace the graph def in meta graph proto.
meta_graph_def.graph_def.CopyFrom(graph_def)
# TODO(user): If we don't clear the collections then import_meta_graph
# fails.
#
# We can't delete all the collections because some of them are used
# by prediction to get the names of the input/output tensors.
keys_to_delete = (set(meta_graph_def.collection_def.keys()) -
set(keys_used_for_serving()))
for k in keys_to_delete:
del meta_graph_def.collection_def[k]
else:
logging.info("No %s found in metagraph. Using metagraph as serving graph",
GRAPH_KEY)
tf.reset_default_graph()
sess = tf.Session(target, graph=None, config=config)
# Import the graph.
saver = tf.train.import_meta_graph(meta_graph_def)
# Restore the session.
if variables_filename_list[0].startswith("gs://"):
# Make copy from GCS files.
# TODO(user): Retire this once tensorflow can access GCS.
try:
temp_dir_path = tempfile.mkdtemp("local_variable_files")
for f in variables_filename_list + additional_files_to_copy:
file_io.copy(f, os.path.join(temp_dir_path, os.path.basename(f)))
saver.restore(sess, os.path.join(temp_dir_path, restore_files))
finally:
try:
files.RmTree(temp_dir_path)
except OSError as e:
if e.message == "Cannot call rmtree on a symbolic link":
# Interesting synthetic exception made up by shutil.rmtree.
# Means we received a symlink from mkdtemp.
# Also means must clean up the symlink instead.
os.unlink(temp_dir_path)
else:
raise
else:
saver.restore(sess, os.path.join(export_dir, restore_files))
init_op_tensor = None
if INIT_OP_KEY in collection_def:
init_ops = collection_def[INIT_OP_KEY].node_list.value
if len(init_ops) != 1:
raise RuntimeError(
"Expected exactly one serving init op in : %s" % meta_graph_def)
init_op_tensor = tf.get_collection(INIT_OP_KEY)[0]
if init_op_tensor:
# Run the init op.
sess.run(fetches=[init_op_tensor])
return sess, meta_graph_def
# --------------------------
# prediction._prediction_lib
# --------------------------
ENGINE = "Prediction-Engine"
PREPROCESSING_TIME = "Prediction-Preprocessing-Time"
COLUMNARIZE_TIME = "Prediction-Columnarize-Time"
UNALIAS_TIME = "Prediction-Unalias-Time"
ENGINE_RUN_TIME = "Prediction-Engine-Run-Time"
SESSION_RUN_TIME = "Prediction-Session-Run-Time"
ALIAS_TIME = "Prediction-Alias-Time"
ROWIFY_TIME = "Prediction-Rowify-Time"
INPUT_PROCESSING_TIME = "Prediction-Input-Processing-Time"
SESSION_RUN_ENGINE_NAME = "TF_SESSION_RUN"
class PredictionError(Exception):
"""Customer exception for known prediction exception."""
# The error code for prediction.
# TODO(b/34686732) Use strings instead of ints for these errors.
FAILED_TO_LOAD_MODEL = 0
FAILED_TO_PREPROCESS_INPUTS = 1
FAILED_TO_PARSE_INPUTS = 2
FAILED_TO_HANDLE_BAD_INPUTS = 3
FAILED_TO_RUN_GRAPH = 4
FAILED_TO_GET_INPUT_TENSOR_ALIAS_MAP = 5
FAILED_TO_GET_OUTPUT_TENSOR_ALIAS_MAP = 6
FAILED_TO_RUN_GRAPH_BAD_OUTPUTS = 7
FAILED_TO_GET_DEFAULT_SIGNATURE = 8
def __init__(self, error_code, error_message, *args):
super(PredictionError, self).__init__(error_code, error_message, *args)
@property
def error_code(self):
return self.args[0]
@property
def error_message(self):
return self.args[1]
METADATA_FILENAMES = {"metadata.yaml", "metadata.json"}
MICRO = 1000000
MILLI = 1000
class Timer(object):
"""Context manager for timing code blocks.
The object is intended to be used solely as a context manager and not
as a general purpose object.
The timer starts when __enter__ is invoked on the context manager
and stopped when __exit__ is invoked. After __exit__ is called,
the duration properties report the amount of time between
__enter__ and __exit__ and thus do not change. However, if any of the
duration properties are called between the call to __enter__ and __exit__,
then they will return the "live" value of the timer.
If the same Timer object is re-used in multiple with statements, the values
reported will reflect the latest call. Do not use the same Timer object in
nested with blocks with the same Timer context manager.
Example usage:
with Timer() as timer:
foo()
print(timer.duration_secs)
"""
def __init__(self):
self.start = None
self.end = None
def __enter__(self):
self.end = None
self.start = timeit.default_timer()
return self
def __exit__(self, exc_type, value, traceback):
self.end = timeit.default_timer()
return False
@property
def seconds(self):
now = timeit.default_timer()
return (self.end or now) - (self.start or now)
@property
def microseconds(self):
return int(MICRO * self.seconds)
@property
def milliseconds(self):
return int(MILLI * self.seconds)
class Stats(dict):
"""An object for tracking stats.
This class is dict-like, so stats are accessed/stored like so:
stats = Stats()
stats["count"] = 1
stats["foo"] = "bar"
This class also facilitates collecting timing information via the
context manager obtained using the "time" method. Reported timings
are in microseconds.
Example usage:
with stats.time("foo_time"):
foo()
print(stats["foo_time"])
"""
@contextmanager
def time(self, name):
with Timer() as timer:
yield timer
self[name] = timer.microseconds
def batch(instances):
"""Batch up the inputs.
Each line in the input is a dictionary of input tensor names to the value
for that input, for a single instance. For each input tensor, we add each of
the input values to a list, i.e., batch them up.
The result is a map from input tensor name to a batch
of input data. This can be directly used as the feed dict during
prediction.
For example,
instances = [{"a": [1.0, 2.0], "b": "a"},
{"a": [3.0, 4.0], "b": "c"},
{"a": [5.0, 6.0], "b": "e"},]
batch = prediction_server_lib.batch(instances)
assert batch == {"a": [[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]],
"b": ["a", "c", "e"]}
Arguments:
instances: (list of dict) where the dictionaries map tensor aliases
to the values for those tensors.
Returns:
A dictionary mapping tensor names to values, as described above.
"""
batched = collections.defaultdict(list)
for instance in instances:
for k, v in instance.iteritems():
batched[k].append(v)
return batched
def unbatch(batched):
"""Unbatches input.
Consider the following code:
batched = {"prediction": np.array([1, # 1st instance
0, # 2nd
1]), # 3rd
"scores": np.array([[0.1, 0.9], # 1st instance
[0.7, 0.3], # 2nd
[0.4, 0.6]])} # 3rd
Then the following will return the equivalent of:
[{"prediction": 1, "scores": [0.1, 0.9]},
{"prediction": 0, "scores": [0.7, 0.3]},
{"prediction": 1, "scores": [0.4, 0.6]}]
(each row is yielded; no list is actually created).
Arguments:
batched: (dict) mapping names to numpy arrays, where the arrays
contain a batch of data.
Raises:
PredictionError: if the input doesn't have identical batch dimensions for
each of element.
Yields:
A map with a single instance, as described above. NB: instances
is not a numpy array.
"""
sizes_set = {e.shape[0] for e in batched.itervalues()}
# All the elements in the length array should be identical. Otherwise,
# raise an exception.
if len(sizes_set) != 1:
sizes_dict = {name: e.shape[0] for name, e in batched.iteritems()}
raise PredictionError(
PredictionError.FAILED_TO_RUN_GRAPH_BAD_OUTPUTS,
"Bad output from running tensorflow session: outputs had differing "
"sizes in the batch (outer) dimension. See the outputs and their "
"size: %s. Check your model for bugs that effect the size of the "
"outputs." % sizes_dict)
# Pick an arbitrary value in the map to get it's size.
num_instances = len(next(batched.itervalues()))
for row in xrange(num_instances):
yield {name: output[row, ...].tolist()
for name, output in batched.iteritems()}
def _build_signature(graph, input_map, output_map):
"""Return a Signature def using maps from alias to inputs/outputs."""
# Function for creating TensorInfo structures from tensor names.
def get_tensor_info(tensor_name):
tensor = graph.get_tensor_by_name(tensor_name)
return meta_graph_pb2.TensorInfo(
name=tensor_name,
dtype=tensor.dtype.as_datatype_enum,
tensor_shape=tensor.get_shape().as_proto(),)
inputs = {alias: get_tensor_info(tensor_name)
for alias, tensor_name in input_map.iteritems()}
outputs = {alias: get_tensor_info(tensor_name)
for alias, tensor_name in output_map.iteritems()}
return meta_graph_pb2.SignatureDef(inputs=inputs, outputs=outputs)
def _get_interfaces(graph):
"""Returns maps from aliases to inputs and outputs of the graph."""
try:
inputs = json.loads(graph.get_collection(INPUTS_KEY)[0])
except Exception as e:
logging.error(str(e))
raise PredictionError(
PredictionError.FAILED_TO_GET_INPUT_TENSOR_ALIAS_MAP,
("Invalid value for collection: {0}. Should be a tensor alias "
"map.".format(INPUTS_KEY)))
try:
outputs = json.loads(graph.get_collection(OUTPUTS_KEY)[0])
except Exception as e:
logging.error(str(e))
raise PredictionError(
PredictionError.FAILED_TO_GET_OUTPUT_TENSOR_ALIAS_MAP,
("Invalid value for collection: {0}. "
"Should be a tensor alias map.".format(OUTPUTS_KEY)))
return inputs, outputs
# TODO(b/34686738): when we no longer load the model to get the signature
# consider making this a named constructor on SessionClient.
def load_model(model_path):
"""Loads the model at the specified path.
Args:
model_path: the path to either session_bundle or SavedModel
Returns:
A pair of (Session, SignatureDef) objects.
Raises:
PredictionError: if the model could not be loaded.
"""
# Ideally, we could just always use bundle_shim to load legacy and
# regular graphs. However, bundle_shim and supporting functions are
# only available on recent versions of TF (~0.12). It's not even
# really possible to detect whether or not we're going to be able
# to use these functions, so in true Python function, it's better
# to ask forgiveness than permission...we try to import bundle_shim,
# which may fail, then we try to use bundle_shim, which may also fail
# for legacy graphs. In other failure case, we back off to our older
# custom session_bundle implementation.
try:
from tensorflow.contrib.session_bundle import bundle_shim # pylint: disable=g-import-not-at-top
from tensorflow.python.saved_model import tag_constants # pylint: disable=g-import-not-at-top
# We expect that the customer will export saved model and use
# tag_constants.SERVING for serving graph. This assumption also extends to
# model server.
session, meta_graph = (
bundle_shim.load_session_bundle_or_saved_model_bundle_from_path(
model_path, tags=[tag_constants.SERVING]))
except Exception: # pylint: disable=broad-except
session, meta_graph = load_session_bundle_from_path(
model_path)
if session is None:
raise PredictionError(PredictionError.FAILED_TO_LOAD_MODEL,
"Could not load model from %s" % model_path)
# Before the SavedModel spec came into existence the inputs and outputs
# of a model were specified using TensorFlow collections. Check if this model
# uses that spec.
graph = session.graph
collection_keys = graph.get_all_collection_keys()
if INPUTS_KEY in collection_keys and OUTPUTS_KEY in collection_keys:
signature = _get_legacy_signature(graph)
else:
# Otherwise, use (possibly upgraded from session_bundle) SavedModel.
signature = _get_signature_from_meta_graph(graph, meta_graph)
return session, signature
def _get_legacy_signature(graph):
# Get maps from alias to inputs/outputs.
input_map, output_map = _get_interfaces(graph)
# Create a SignatureDef from those maps.
return _build_signature(graph, input_map, output_map)
def _get_signature_from_meta_graph(graph, meta_graph):
"""Returns the SignatureDef in meta_graph update dtypes using graph."""
if not meta_graph.signature_def:
raise Exception("MetaGraph must have at least one signature_def.")
named_key = "serving_default_from_named"
if len(meta_graph.signature_def) > 1:
logging.warning("MetaGraph has multiple signatures %d. Support for "
"multiple signatures is limited. By default we select "
"named signatures.", len(meta_graph.signature_def))
if named_key in meta_graph.signature_def:
return meta_graph.signature_def[named_key]
# TODO(b/34690042): document these and point to a public, canonical constant.
signature = meta_graph.signature_def["serving_default"]
# Signatures often omit the dtype and shape information. Looks those up if
# necessary.
_update_dtypes(graph, signature.inputs)
_update_dtypes(graph, signature.outputs)
return signature
def _update_dtypes(graph, interface):
"""Adds dtype to TensorInfos in interface if necessary.
If already present, validates TensorInfo matches values in the graph.
TensorInfo is updated in place.
Args:
graph: the TensorFlow graph; used to lookup datatypes of tensors.
interface: map from alias to TensorInfo object.
Raises:
ValueError: if the data type in the TensorInfo does not match the type
found in graph.
"""
for alias, info in interface.iteritems():
# Postpone conversion to enum for better error messages.
dtype = graph.get_tensor_by_name(info.name).dtype
if not info.dtype:
info.dtype = dtype.as_datatype_enum
elif info.dtype != dtype.as_datatype_enum:
raise ValueError("Specified data types do not match for alias %s. "
"Graph has %d while TensorInfo reports %d." %
(alias, dtype, info.dtype))
class SessionClient(object):
"""A client for Prediction that uses Session.run."""
def __init__(self, session, signature):
self._session = session
self._signature = signature
# TensorFlow requires a bonefide list for the fetches. To regenerating the
# list every prediction, we cache the list of output tensor names.
self._output_tensors = [v.name for v in self._signature.outputs.values()]
@property
def signature(self):
return self._signature
def predict(self, inputs, stats):
"""Produces predictions for the given inputs.
Args:
inputs: a dict mapping input names to values
stats: Stats object for recording timing information.
Returns:
A dict mapping output names to output values, similar to the input
dict.
"""
stats[ENGINE] = "SessionRun"
with stats.time(UNALIAS_TIME):
try:
unaliased = {self.signature.inputs[key].name: val
for key, val in inputs.iteritems()}
except Exception as e:
raise PredictionError(PredictionError.FAILED_TO_HANDLE_BAD_INPUTS,
"Input mismatch: " + str(e))
with stats.time(SESSION_RUN_TIME):
try:
# TODO(b/33849399): measure the actual session.run() time, even in the
# case of ModelServer.
outputs = self._session.run(fetches=self._output_tensors,
feed_dict=unaliased)
except Exception as e:
logging.error("Exception during running the graph: " + str(e))
raise PredictionError(PredictionError.FAILED_TO_RUN_GRAPH,
"Exception during running the graph: " + str(e))
with stats.time(ALIAS_TIME):
return dict(zip(self._signature.outputs.iterkeys(), outputs))
class DefaultModel(object):
"""The default implementation of the Model interface.
This implementation optionally performs preprocessing and postprocessing
using the provided functions. These functions accept a single instance
as input and produce a corresponding output to send to the prediction
client.
"""
def __init__(self, client, preprocess_fn=None, postprocess_fn=None):
"""Constructs a DefaultModel.
Args:
client: An instance of ModelServerClient for performing prediction.
preprocess_fn: a function to run on each instance before calling predict,
if this parameter is not None. See class docstring.
postprocess_fn: a function to run on each instance after calling predict,
if this parameter is not None. See class docstring.
"""
self._client = client
self._preprocess_fn = preprocess_fn
self._postprocess_fn = postprocess_fn
def _get_batched_instance(self, instances):
"""Columnarize the batch, appending input_name, if necessary.
Instances are the same instances passed to the predict() method. Since
models with a single input can accept the raw input without the name,
we create a dict here with that name.
This list of instances is then converted into a column-oriented format:
The result is a dictionary mapping input name to a list of values for just
that input (one entry per row in the original instances list).
Args:
instances: the list of instances as provided to the predict() method.
Returns:
A dictionary mapping input names to their values.
"""
if len(self._client.signature.inputs) == 1:
input_name = self._client.signature.inputs.keys()[0]
return {input_name: instances}
return batch(instances)
def _preprocess(self, instances):
"""Runs the preprocessing function on the instances.
Args:
instances: list of instances as provided to the predict() method.
Returns:
A new list of preprocessed instances. Each instance is as described
in the predict() method.
"""
if not self._preprocess_fn:
return instances
try:
return [self._preprocess_fn(i).SerializeToString() for i in instances]
except Exception as e:
logging.error("Exception during preprocessing: " + str(e))
raise PredictionError(PredictionError.FAILED_TO_PREPROCESS_INPUTS,
"Exception during preprocessing: " + str(e))
# TODO(b/34686738): can this be removed?
def need_preprocess(self):
"""Returns True if preprocessing is needed."""
return bool(self._preprocess_fn)
# TODO(b/34686738): can this be removed?
def is_single_input(self):
"""Returns True if the graph only has one input tensor."""
return len(self._client.signature.inputs) == 1
# TODO(b/34686738): can this be removed?
def is_single_string_input(self):
"""Returns True if the graph only has one string input tensor."""
if self.is_single_input():
dtype = self._client.signature.inputs.values()[0].dtype
return dtype == dtypes.string.as_datatype_enum
return False
def maybe_preprocess(self, instances):
"""Preprocess the instances if necessary."""
# The instances should be already (b64-) decoded here.
if not self.is_single_input():
return instances
# Input is a single string tensor, the tensor name might or might not
# be given.
# There are 3 cases (assuming the tensor name is "t", tensor = "abc"):
# 1) {"t": "abc"}
# 2) "abc"
# 3) {"y": ...} --> wrong tensor name is given.
tensor_name = self._client.signature.inputs.keys()[0]
def parse_single_tensor(x, tensor_name):
if not isinstance(x, dict):
# case (2)
return x
elif len(x) == 1 and tensor_name == x.keys()[0]:
# case (1)
return x.values()[0]
else:
raise PredictionError(
PredictionError.FAILED_TO_PARSE_INPUTS,
"Expected tensor name: %s, got tensor name: %s." %
(tensor_name, x.keys()))
if not isinstance(instances, list):
instances = [instances]
instances = [parse_single_tensor(x, tensor_name) for x in instances]
preprocessed = self._preprocess(instances)
result = list(preprocessed)
return result
def predict(self, instances, stats=None):
"""Returns predictions for the provided instances.
Instances are the decoded values from the request.
Args:
instances: list of instances, as described in the API.
stats: Stats object for recording timing information.
Returns:
A two-element tuple (inputs, outputs). Both inputs and outputs are
lists. Each input/output is a dict mapping input/output alias to the
value for that input/output.
Raises:
PredictionError: if an error occurs during prediction.
"""
stats = stats or Stats()
with stats.time(PREPROCESSING_TIME):
preprocessed = self.maybe_preprocess(instances)
with stats.time(COLUMNARIZE_TIME):
batched = self._get_batched_instance(preprocessed)
for k, v in batched.iteritems():
# Detect whether or not the user omits an input in one or more inputs.
# TODO(b/34686738): check in batch?
if isinstance(v, list) and len(v) != len(preprocessed):
raise PredictionError(
PredictionError.FAILED_TO_HANDLE_BAD_INPUTS,
"Input %s was missing in at least one input instance." % k)
with stats.time(ENGINE_RUN_TIME):
outputs = self._client.predict(batched, stats)
with stats.time(ROWIFY_TIME):
# When returned element only contains one result (batch size == 1),
# tensorflow's session.run() will return a scalar directly instead of a
# a list. So we need to listify that scalar.
# TODO(b/34686738): verify this behavior is correct.
def listify(value):
if not hasattr(value, "shape"):
return np.asarray([value], dtype=np.object)
elif not value.shape:
# TODO(b/34686738): pretty sure this is a bug that only exists because
# samples like iris have a bug where they use tf.squeeze which removes
# the batch dimension. The samples should be fixed.
return np.expand_dims(value, axis=0)
else:
return value
outputs = {alias: listify(val) for alias, val in outputs.iteritems()}
outputs = unbatch(outputs)
# TODO(b/34686738): this should probably be taken care of directly
# in batch_prediction.py, or at least a helper method. That would
# allow us to avoid processing the inputs when not necessary.
with stats.time(INPUT_PROCESSING_TIME):
inputs = instances
if self.is_single_input:
input_name = self._client.signature.inputs.keys()[0]
inputs = [{input_name: i} for i in inputs]
return inputs, outputs
# TODO(b/34686738): use signatures instead; remove this method.
def outputs_type_map(self):
"""Returns a map from tensor alias to tensor type."""
return {alias: dtypes.DType(info.dtype)
for alias, info in self._client.signature.outputs.iteritems()}
# TODO(b/34686738). Seems like this should be split into helper methods:
# default_preprocess_fn(model_path, skip_preprocessing) and
# default_model_and_preprocessor.
@classmethod
def from_client(cls, client, model_path, skip_preprocessing=False):
"""Creates a DefaultModel from a SessionClient and model data files."""
del model_path # Unused in from_client
preprocess_fn = None
if not skip_preprocessing:
raise NotImplementedError("Preprocessing depends on features library, "
"which is not bundled.")
return cls(client, preprocess_fn)
def decode_base64(data):
if isinstance(data, list):
return [decode_base64(val) for val in data]
elif isinstance(data, dict):
if data.viewkeys() == {"b64"}:
return base64.b64decode(data["b64"])
else:
return {k: decode_base64(v) for k, v in data.iteritems()}
else:
return data
def encode_base64(instances, type_map):
"""Encodes binary data in a JSON-friendly way."""
if not isinstance(instances, list):
raise ValueError("only lists allowed in output; got %s" %
(type(instances),))
if not instances:
return instances
first_value = instances[0]
if not isinstance(first_value, dict):
if len(type_map) != 1:
return ValueError("The first instance was a string, but there are "
"more than one output tensor, so dict expected.")
# Only string tensors whose name ends in _bytes needs encoding.
tensor_name, tensor_type = type_map.items()[0]
if tensor_type == dtypes.string and tensor_name.endswith("_bytes"):
instances = _encode_str_tensor(instances)
return instances
encoded_data = []
for instance in instances:
encoded_instance = {}
for tensor_name, tensor_type in type_map.iteritems():
tensor_data = instance[tensor_name]
if tensor_type == dtypes.string and tensor_name.endswith("_bytes"):
tensor_data = _encode_str_tensor(tensor_data)
encoded_instance[tensor_name] = tensor_data
encoded_data.append(encoded_instance)
return encoded_data
def _encode_str_tensor(data):
if isinstance(data, list):
return [_encode_str_tensor(val) for val in data]
return {"b64": base64.b64encode(data)}
def local_predict(model_dir=None, instances=None):
instances = decode_base64(instances)
client = SessionClient(*load_model(model_dir))
model = DefaultModel.from_client(client, model_dir, skip_preprocessing=True)
_, predictions = model.predict(instances)
predictions = list(predictions)
predictions = encode_base64(predictions, model.outputs_type_map())
return {"predictions": predictions}
| |
"""Support for MQTT fans."""
import logging
import voluptuous as vol
from homeassistant.components import fan, mqtt
from homeassistant.components.fan import (
ATTR_SPEED, SPEED_HIGH, SPEED_LOW, SPEED_MEDIUM, SPEED_OFF,
SUPPORT_OSCILLATE, SUPPORT_SET_SPEED, FanEntity)
from homeassistant.const import (
CONF_DEVICE, CONF_NAME, CONF_OPTIMISTIC, CONF_PAYLOAD_OFF, CONF_PAYLOAD_ON,
CONF_STATE, STATE_OFF, STATE_ON)
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.typing import ConfigType, HomeAssistantType
from . import (
ATTR_DISCOVERY_HASH, CONF_COMMAND_TOPIC, CONF_QOS, CONF_RETAIN,
CONF_STATE_TOPIC, CONF_UNIQUE_ID, MqttAttributes, MqttAvailability,
MqttDiscoveryUpdate, MqttEntityDeviceInfo, subscription)
from .discovery import MQTT_DISCOVERY_NEW, clear_discovery_hash
_LOGGER = logging.getLogger(__name__)
CONF_STATE_VALUE_TEMPLATE = 'state_value_template'
CONF_SPEED_STATE_TOPIC = 'speed_state_topic'
CONF_SPEED_COMMAND_TOPIC = 'speed_command_topic'
CONF_SPEED_VALUE_TEMPLATE = 'speed_value_template'
CONF_OSCILLATION_STATE_TOPIC = 'oscillation_state_topic'
CONF_OSCILLATION_COMMAND_TOPIC = 'oscillation_command_topic'
CONF_OSCILLATION_VALUE_TEMPLATE = 'oscillation_value_template'
CONF_PAYLOAD_OSCILLATION_ON = 'payload_oscillation_on'
CONF_PAYLOAD_OSCILLATION_OFF = 'payload_oscillation_off'
CONF_PAYLOAD_LOW_SPEED = 'payload_low_speed'
CONF_PAYLOAD_MEDIUM_SPEED = 'payload_medium_speed'
CONF_PAYLOAD_HIGH_SPEED = 'payload_high_speed'
CONF_SPEED_LIST = 'speeds'
DEFAULT_NAME = 'MQTT Fan'
DEFAULT_PAYLOAD_ON = 'ON'
DEFAULT_PAYLOAD_OFF = 'OFF'
DEFAULT_OPTIMISTIC = False
OSCILLATE_ON_PAYLOAD = 'oscillate_on'
OSCILLATE_OFF_PAYLOAD = 'oscillate_off'
OSCILLATION = 'oscillation'
PLATFORM_SCHEMA = mqtt.MQTT_RW_PLATFORM_SCHEMA.extend({
vol.Optional(CONF_DEVICE): mqtt.MQTT_ENTITY_DEVICE_INFO_SCHEMA,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_OPTIMISTIC, default=DEFAULT_OPTIMISTIC): cv.boolean,
vol.Optional(CONF_OSCILLATION_COMMAND_TOPIC): mqtt.valid_publish_topic,
vol.Optional(CONF_OSCILLATION_STATE_TOPIC): mqtt.valid_subscribe_topic,
vol.Optional(CONF_OSCILLATION_VALUE_TEMPLATE): cv.template,
vol.Optional(CONF_PAYLOAD_HIGH_SPEED, default=SPEED_HIGH): cv.string,
vol.Optional(CONF_PAYLOAD_LOW_SPEED, default=SPEED_LOW): cv.string,
vol.Optional(CONF_PAYLOAD_MEDIUM_SPEED, default=SPEED_MEDIUM): cv.string,
vol.Optional(CONF_PAYLOAD_OFF, default=DEFAULT_PAYLOAD_OFF): cv.string,
vol.Optional(CONF_PAYLOAD_ON, default=DEFAULT_PAYLOAD_ON): cv.string,
vol.Optional(CONF_PAYLOAD_OSCILLATION_OFF,
default=DEFAULT_PAYLOAD_OFF): cv.string,
vol.Optional(CONF_PAYLOAD_OSCILLATION_ON,
default=DEFAULT_PAYLOAD_ON): cv.string,
vol.Optional(CONF_SPEED_COMMAND_TOPIC): mqtt.valid_publish_topic,
vol.Optional(CONF_SPEED_LIST,
default=[SPEED_OFF, SPEED_LOW,
SPEED_MEDIUM, SPEED_HIGH]): cv.ensure_list,
vol.Optional(CONF_SPEED_STATE_TOPIC): mqtt.valid_subscribe_topic,
vol.Optional(CONF_SPEED_VALUE_TEMPLATE): cv.template,
vol.Optional(CONF_STATE_VALUE_TEMPLATE): cv.template,
vol.Optional(CONF_UNIQUE_ID): cv.string,
}).extend(mqtt.MQTT_AVAILABILITY_SCHEMA.schema).extend(
mqtt.MQTT_JSON_ATTRS_SCHEMA.schema)
async def async_setup_platform(hass: HomeAssistantType, config: ConfigType,
async_add_entities, discovery_info=None):
"""Set up MQTT fan through configuration.yaml."""
await _async_setup_entity(config, async_add_entities)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up MQTT fan dynamically through MQTT discovery."""
async def async_discover(discovery_payload):
"""Discover and add a MQTT fan."""
try:
discovery_hash = discovery_payload.pop(ATTR_DISCOVERY_HASH)
config = PLATFORM_SCHEMA(discovery_payload)
await _async_setup_entity(config, async_add_entities, config_entry,
discovery_hash)
except Exception:
if discovery_hash:
clear_discovery_hash(hass, discovery_hash)
raise
async_dispatcher_connect(
hass, MQTT_DISCOVERY_NEW.format(fan.DOMAIN, 'mqtt'),
async_discover)
async def _async_setup_entity(config, async_add_entities, config_entry=None,
discovery_hash=None):
"""Set up the MQTT fan."""
async_add_entities([MqttFan(config, config_entry, discovery_hash)])
# pylint: disable=too-many-ancestors
class MqttFan(MqttAttributes, MqttAvailability, MqttDiscoveryUpdate,
MqttEntityDeviceInfo, FanEntity):
"""A MQTT fan component."""
def __init__(self, config, config_entry, discovery_hash):
"""Initialize the MQTT fan."""
self._unique_id = config.get(CONF_UNIQUE_ID)
self._state = False
self._speed = None
self._oscillation = None
self._supported_features = 0
self._sub_state = None
self._topic = None
self._payload = None
self._templates = None
self._optimistic = None
self._optimistic_oscillation = None
self._optimistic_speed = None
# Load config
self._setup_from_config(config)
device_config = config.get(CONF_DEVICE)
MqttAttributes.__init__(self, config)
MqttAvailability.__init__(self, config)
MqttDiscoveryUpdate.__init__(self, discovery_hash,
self.discovery_update)
MqttEntityDeviceInfo.__init__(self, device_config, config_entry)
async def async_added_to_hass(self):
"""Subscribe to MQTT events."""
await super().async_added_to_hass()
await self._subscribe_topics()
async def discovery_update(self, discovery_payload):
"""Handle updated discovery message."""
config = PLATFORM_SCHEMA(discovery_payload)
self._setup_from_config(config)
await self.attributes_discovery_update(config)
await self.availability_discovery_update(config)
await self.device_info_discovery_update(config)
await self._subscribe_topics()
self.async_write_ha_state()
def _setup_from_config(self, config):
"""(Re)Setup the entity."""
self._config = config
self._topic = {
key: config.get(key) for key in (
CONF_STATE_TOPIC,
CONF_COMMAND_TOPIC,
CONF_SPEED_STATE_TOPIC,
CONF_SPEED_COMMAND_TOPIC,
CONF_OSCILLATION_STATE_TOPIC,
CONF_OSCILLATION_COMMAND_TOPIC,
)
}
self._templates = {
CONF_STATE: config.get(CONF_STATE_VALUE_TEMPLATE),
ATTR_SPEED: config.get(CONF_SPEED_VALUE_TEMPLATE),
OSCILLATION: config.get(CONF_OSCILLATION_VALUE_TEMPLATE)
}
self._payload = {
STATE_ON: config[CONF_PAYLOAD_ON],
STATE_OFF: config[CONF_PAYLOAD_OFF],
OSCILLATE_ON_PAYLOAD: config[CONF_PAYLOAD_OSCILLATION_ON],
OSCILLATE_OFF_PAYLOAD: config[CONF_PAYLOAD_OSCILLATION_OFF],
SPEED_LOW: config[CONF_PAYLOAD_LOW_SPEED],
SPEED_MEDIUM: config[CONF_PAYLOAD_MEDIUM_SPEED],
SPEED_HIGH: config[CONF_PAYLOAD_HIGH_SPEED],
}
optimistic = config[CONF_OPTIMISTIC]
self._optimistic = optimistic or self._topic[CONF_STATE_TOPIC] is None
self._optimistic_oscillation = (
optimistic or self._topic[CONF_OSCILLATION_STATE_TOPIC] is None)
self._optimistic_speed = (
optimistic or self._topic[CONF_SPEED_STATE_TOPIC] is None)
self._supported_features = 0
self._supported_features |= (self._topic[CONF_OSCILLATION_STATE_TOPIC]
is not None and SUPPORT_OSCILLATE)
self._supported_features |= (self._topic[CONF_SPEED_STATE_TOPIC]
is not None and SUPPORT_SET_SPEED)
async def _subscribe_topics(self):
"""(Re)Subscribe to topics."""
topics = {}
templates = {}
for key, tpl in list(self._templates.items()):
if tpl is None:
templates[key] = lambda value: value
else:
tpl.hass = self.hass
templates[key] = tpl.async_render_with_possible_json_value
@callback
def state_received(msg):
"""Handle new received MQTT message."""
payload = templates[CONF_STATE](msg.payload)
if payload == self._payload[STATE_ON]:
self._state = True
elif payload == self._payload[STATE_OFF]:
self._state = False
self.async_write_ha_state()
if self._topic[CONF_STATE_TOPIC] is not None:
topics[CONF_STATE_TOPIC] = {
'topic': self._topic[CONF_STATE_TOPIC],
'msg_callback': state_received,
'qos': self._config[CONF_QOS]}
@callback
def speed_received(msg):
"""Handle new received MQTT message for the speed."""
payload = templates[ATTR_SPEED](msg.payload)
if payload == self._payload[SPEED_LOW]:
self._speed = SPEED_LOW
elif payload == self._payload[SPEED_MEDIUM]:
self._speed = SPEED_MEDIUM
elif payload == self._payload[SPEED_HIGH]:
self._speed = SPEED_HIGH
self.async_write_ha_state()
if self._topic[CONF_SPEED_STATE_TOPIC] is not None:
topics[CONF_SPEED_STATE_TOPIC] = {
'topic': self._topic[CONF_SPEED_STATE_TOPIC],
'msg_callback': speed_received,
'qos': self._config[CONF_QOS]}
self._speed = SPEED_OFF
@callback
def oscillation_received(msg):
"""Handle new received MQTT message for the oscillation."""
payload = templates[OSCILLATION](msg.payload)
if payload == self._payload[OSCILLATE_ON_PAYLOAD]:
self._oscillation = True
elif payload == self._payload[OSCILLATE_OFF_PAYLOAD]:
self._oscillation = False
self.async_write_ha_state()
if self._topic[CONF_OSCILLATION_STATE_TOPIC] is not None:
topics[CONF_OSCILLATION_STATE_TOPIC] = {
'topic': self._topic[CONF_OSCILLATION_STATE_TOPIC],
'msg_callback': oscillation_received,
'qos': self._config[CONF_QOS]}
self._oscillation = False
self._sub_state = await subscription.async_subscribe_topics(
self.hass, self._sub_state,
topics)
async def async_will_remove_from_hass(self):
"""Unsubscribe when removed."""
self._sub_state = await subscription.async_unsubscribe_topics(
self.hass, self._sub_state)
await MqttAttributes.async_will_remove_from_hass(self)
await MqttAvailability.async_will_remove_from_hass(self)
@property
def should_poll(self):
"""No polling needed for a MQTT fan."""
return False
@property
def assumed_state(self):
"""Return true if we do optimistic updates."""
return self._optimistic
@property
def is_on(self):
"""Return true if device is on."""
return self._state
@property
def name(self) -> str:
"""Get entity name."""
return self._config[CONF_NAME]
@property
def speed_list(self) -> list:
"""Get the list of available speeds."""
return self._config[CONF_SPEED_LIST]
@property
def supported_features(self) -> int:
"""Flag supported features."""
return self._supported_features
@property
def speed(self):
"""Return the current speed."""
return self._speed
@property
def oscillating(self):
"""Return the oscillation state."""
return self._oscillation
async def async_turn_on(self, speed: str = None, **kwargs) -> None:
"""Turn on the entity.
This method is a coroutine.
"""
mqtt.async_publish(
self.hass, self._topic[CONF_COMMAND_TOPIC],
self._payload[STATE_ON], self._config[CONF_QOS],
self._config[CONF_RETAIN])
if speed:
await self.async_set_speed(speed)
async def async_turn_off(self, **kwargs) -> None:
"""Turn off the entity.
This method is a coroutine.
"""
mqtt.async_publish(
self.hass, self._topic[CONF_COMMAND_TOPIC],
self._payload[STATE_OFF], self._config[CONF_QOS],
self._config[CONF_RETAIN])
async def async_set_speed(self, speed: str) -> None:
"""Set the speed of the fan.
This method is a coroutine.
"""
if self._topic[CONF_SPEED_COMMAND_TOPIC] is None:
return
if speed == SPEED_LOW:
mqtt_payload = self._payload[SPEED_LOW]
elif speed == SPEED_MEDIUM:
mqtt_payload = self._payload[SPEED_MEDIUM]
elif speed == SPEED_HIGH:
mqtt_payload = self._payload[SPEED_HIGH]
else:
mqtt_payload = speed
mqtt.async_publish(
self.hass, self._topic[CONF_SPEED_COMMAND_TOPIC],
mqtt_payload, self._config[CONF_QOS],
self._config[CONF_RETAIN])
if self._optimistic_speed:
self._speed = speed
self.async_write_ha_state()
async def async_oscillate(self, oscillating: bool) -> None:
"""Set oscillation.
This method is a coroutine.
"""
if self._topic[CONF_OSCILLATION_COMMAND_TOPIC] is None:
return
if oscillating is False:
payload = self._payload[OSCILLATE_OFF_PAYLOAD]
else:
payload = self._payload[OSCILLATE_ON_PAYLOAD]
mqtt.async_publish(
self.hass, self._topic[CONF_OSCILLATION_COMMAND_TOPIC],
payload, self._config[CONF_QOS], self._config[CONF_RETAIN])
if self._optimistic_oscillation:
self._oscillation = oscillating
self.async_write_ha_state()
@property
def unique_id(self):
"""Return a unique ID."""
return self._unique_id
| |
'''
Created on Mar 30, 2016
certain sites do not like being scraped, to get around this python needs to pretend to be a browser.
You need to download the program chrome driver http://chromedriver.storage.googleapis.com/index.html
I use 2.9 win32
just place chromedriver.exe into the same directory as gormonsel.exe
Instructions:
You need to log in after clicking on the Open log Page button in order for it to work. After the first time you log in, it shouldn't have to log in again for awhile,
you can open up a new tab and use the c-cex website normally but you don't want to work on the first tab the app opens
any questions or comments you can contact me at:
noe@stakeco.in
@author: Noe
'''
import Tkinter
import ordmonsel
import tkMessageBox
import threading
import time
import pygame
pygame.init()
alertmusic = pygame.mixer.music.load('air horn.wav')
tsnames = ('C-cex','Bittrex')
msgvar=('Select the site you want to monitor trades on.', "After logging in the first time you shouldn't have to login in again for awhile.\n\nIf you haven't used this app in a while you may have to login again.\n\nClick on the OPEN LOG PAGE , then START MONITORING button to start monitoring your trades.")
cyclemsg=("You can still use the website you're if you open up a new tab and don't disturb the tab controlled by the app.","You can open up as many tabs as you want and use the broswer normally, as long as you don't distrub the app controlled tab,/n you will not effect the functioning of the Trade Monitor.")
url = 'https://c-cex.com/?id=h&fr=&offset=&f=3'
class Gormonsel(Tkinter.Tk):
def __init__(self, *args, **kwargs):
Tkinter.Tk.__init__(self, *args, **kwargs)
container = Tkinter.Frame(self)
container.pack(side="top", fill="both", expand=True)
container.grid_rowconfigure(0, weight=1)
container.grid_columnconfigure(0, weight=1)
self.frames = {}
for F in (StartPage, Ccex, Bittrex):
page_name = F.__name__
frame = F(container, self)
self.frames[page_name] = frame
frame.grid(row=0, column=0, sticky='nsew')
self.show_frame("StartPage")
def show_frame(self, page_name):
frame = self.frames[page_name]
frame.tkraise()
class Startlogmon(threading.Thread):
def __init__(self,brwoser, browserconf):
threading.Thread.__init__(self)
self.browserconf = browserconf
self.browser = browser
def run(self):
orderstatustwo=None
while True:
orderstatusone = logmon.getlog()
if orderstatustwo:
if not orderstatusone == orderstatustwo:
self.startalarm()
self.rfreshlabel['text']='Refreshing in 60 seconds'
time.sleep(60)
self.browser.refresh()
self.rfreshlabel['text']=''
time.sleep(5)
orderstatustwo = logmon.getlog()
if not orderstatusone == orderstatustwo:
self.startalarm()
self.rfreshlabel['text']='Refreshing in 60 seconds'
time.sleep(60)
self.browser.refresh()
time.sleep(5)
def set(self, rfreshlabel, stopalarmbutton, talable, stslabel):
self.rfreshlabel = rfreshlabel
self.stopalarmbutton = stopalarmbutton
self.talable = talable
self.stslabel = stslabel
def startalarm(self):
pygame.mixer.music.play(-1)
self.talable['text']='A trade has Occured'
self.talable['bg']='green'
self.stopalarmbutton.config(state='normal',bg='yellow')
def endalarm(self):
self.stopalarmbutton.config(state='disable',bg='grey')
self.talable.config(text = 'No trades currently detected', bg='red')
pygame.mixer.music.stop()
class StartPage(Tkinter.Frame):
def __init__(self, parent, controller):
Tkinter.Frame.__init__(self, parent)
self.controller = controller
label = Tkinter.Label(self, text = 'Gui Order Monitor using Selenium and Chrome Driver')
label.grid(row=0, column=4, columnspan=2)
omvar = Tkinter.StringVar()
omvar.set('Trade Site')
nameMenu = Tkinter.OptionMenu(self, omvar, ())
nameMenu.grid(row=0, column=0, columnspan=2, sticky='w')
nameMenu.config(width=20)
menu = nameMenu.children['menu']
menu.delete(0, "end")
for name in tsnames:
menu.add_command(label=name, command=lambda v=name: controller.show_frame(v.replace('-','')))
MText = Tkinter.Text( self, width=40, height=20, wrap='word' )
MText.insert('1.0', "Don't forget to log in if you haven't already: Click open Log Page button.")
MText.config(state='disabled')
MText.grid(row=1, column=0, sticky='w')
UMText = Tkinter.Text( self, width=40, height=10, wrap='word' )
UMText.insert('1.0', msgvar[0])
UMText.config(state='disabled')
UMText.grid(row=9, column=0, sticky='w')
class Ccex(Tkinter.Frame):
def __init__(self, parent, controller):
Tkinter.Frame.__init__(self, parent)
self.controller = controller
label = Tkinter.Label(self, text = 'Gui Order Monitory using Selenium and Chrome Driver')
label.grid(row=0, column=4, columnspan=2)
omvar = Tkinter.StringVar()
nameMenu = Tkinter.OptionMenu(self, omvar, ())
omvar.set('C-Cex')
nameMenu.grid(row=0, column=0, columnspan=2, sticky='w')
nameMenu.config(width=20)
menu = nameMenu.children['menu']
menu.delete(0, "end")
for name in tsnames:
menu.add_command(label=name, command=lambda v=name: controller.show_frame(v.replace('-','')))
MText = Tkinter.Text( self, width=40, height=20, wrap='word' )
MText.insert('1.0', "After logging into C-cex, click OPEN lOG PAGE, when you are on the Trade log page, Click start to start Monitoring trades")
MText.config(state='disabled')
MText.grid(row=1, column=0, sticky='w')
UMText = Tkinter.Text( self, width=40, height=11, wrap='word' )
UMText.insert('1.0', msgvar[1])
UMText.config(state='disabled')
UMText.grid(row=9, column=0, sticky='w')
olpbutton = Tkinter.Button(self, text='Open Log Page', command=lambda u=url: browserconf.openlogpage(browser, browserconf, strtbutton))
olpbutton.grid(row=1, column=2, sticky='n')
strtbutton = Tkinter.Button(self, text='START MONITORING', command=lambda: slogmon.start())
strtbutton.grid(row=1, column=3, sticky='n')
strtbutton.config(state='disable')
stslabel = Tkinter.Label(self, text = 'Not currently Monitoring', bg='red')
stslabel.grid(row=3, column=3, columnspan=2, sticky='w')
startlabelvar = Tkinter.StringVar()
stopalarmbutton = Tkinter.Button(self, text='Stop Alarm', command=lambda: slogmon.endalarm())
stopalarmbutton.grid(row=3, column=4, sticky='e')
stopalarmbutton.config(state='disable', bg='grey')
talable = Tkinter.Label(self, text = 'No trades currently detected', bg='red')
talable.grid(row=3, column=5)
rfreshlabel = Tkinter.Label(self)
rfreshlabel.grid(row=1, column=4, columnspan=2, sticky='n')
ltradelabel = Tkinter.Label(self, text = '')
ltradelabel.grid(row=1, column=6, columnspan=2, sticky='n')
slogmon.set(rfreshlabel, stopalarmbutton, talable, stslabel)
logmon.set_strtbutton(strtbutton, rfreshlabel, ltradelabel, stslabel)
# def amessage(self):
# tkMessageBox.showinfo("Trade has Just Occured" )
class Bittrex(Tkinter.Frame):
def __init__(self, parent, controller):
Tkinter.Frame.__init__(self, parent)
self.controller = controller
label = Tkinter.Label(self, text = 'Future Update')
label.grid(row=0, column=4, columnspan=2)
omvar = Tkinter.StringVar()
nameMenu = Tkinter.OptionMenu(self, omvar, ())
omvar.set('Bittrex')
nameMenu.grid(row=0, column=0, columnspan=2)
nameMenu.config(width=20)
menu = nameMenu.children['menu']
menu.delete(0, "end")
for name in tsnames:
menu.add_command(label=name, command=lambda v=name: controller.show_frame(v.replace('-','')))
class AlertPage(Tkinter.Frame):
def __init__(self, parent, controller):
Tkinter.Frame.__init__(self, parent)
self.controller = controller
Abutton = Tkinter.Button(self, text='A trade has occured', command=lambda: self.killalert())
Abutton.pack()
def killalert(self):
pygame.mixer.music.stop()
self.controller.show_frame(Ccex)
if __name__ == '__main__':
browserconf = ordmonsel.ordmonsel()
browser = browserconf.setupbrowser()
logmon = ordmonsel.LogMon(browser, browserconf)
slogmon = Startlogmon(browser,browserconf)
slogmon.daemon = True
app = Gormonsel()
app.mainloop()
pass
| |
"""Support to manage a shopping list."""
import asyncio
import logging
import uuid
import voluptuous as vol
from homeassistant.components import http, websocket_api
from homeassistant.components.http.data_validator import RequestDataValidator
from homeassistant.const import HTTP_BAD_REQUEST, HTTP_NOT_FOUND
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from homeassistant.util.json import load_json, save_json
ATTR_NAME = "name"
DOMAIN = "shopping_list"
_LOGGER = logging.getLogger(__name__)
CONFIG_SCHEMA = vol.Schema({DOMAIN: {}}, extra=vol.ALLOW_EXTRA)
EVENT = "shopping_list_updated"
ITEM_UPDATE_SCHEMA = vol.Schema({"complete": bool, ATTR_NAME: str})
PERSISTENCE = ".shopping_list.json"
SERVICE_ADD_ITEM = "add_item"
SERVICE_COMPLETE_ITEM = "complete_item"
SERVICE_ITEM_SCHEMA = vol.Schema({vol.Required(ATTR_NAME): vol.Any(None, cv.string)})
WS_TYPE_SHOPPING_LIST_ITEMS = "shopping_list/items"
WS_TYPE_SHOPPING_LIST_ADD_ITEM = "shopping_list/items/add"
WS_TYPE_SHOPPING_LIST_UPDATE_ITEM = "shopping_list/items/update"
WS_TYPE_SHOPPING_LIST_CLEAR_ITEMS = "shopping_list/items/clear"
SCHEMA_WEBSOCKET_ITEMS = websocket_api.BASE_COMMAND_MESSAGE_SCHEMA.extend(
{vol.Required("type"): WS_TYPE_SHOPPING_LIST_ITEMS}
)
SCHEMA_WEBSOCKET_ADD_ITEM = websocket_api.BASE_COMMAND_MESSAGE_SCHEMA.extend(
{vol.Required("type"): WS_TYPE_SHOPPING_LIST_ADD_ITEM, vol.Required("name"): str}
)
SCHEMA_WEBSOCKET_UPDATE_ITEM = websocket_api.BASE_COMMAND_MESSAGE_SCHEMA.extend(
{
vol.Required("type"): WS_TYPE_SHOPPING_LIST_UPDATE_ITEM,
vol.Required("item_id"): str,
vol.Optional("name"): str,
vol.Optional("complete"): bool,
}
)
SCHEMA_WEBSOCKET_CLEAR_ITEMS = websocket_api.BASE_COMMAND_MESSAGE_SCHEMA.extend(
{vol.Required("type"): WS_TYPE_SHOPPING_LIST_CLEAR_ITEMS}
)
@asyncio.coroutine
def async_setup(hass, config):
"""Initialize the shopping list."""
@asyncio.coroutine
def add_item_service(call):
"""Add an item with `name`."""
data = hass.data[DOMAIN]
name = call.data.get(ATTR_NAME)
if name is not None:
data.async_add(name)
@asyncio.coroutine
def complete_item_service(call):
"""Mark the item provided via `name` as completed."""
data = hass.data[DOMAIN]
name = call.data.get(ATTR_NAME)
if name is None:
return
try:
item = [item for item in data.items if item["name"] == name][0]
except IndexError:
_LOGGER.error("Removing of item failed: %s cannot be found", name)
else:
data.async_update(item["id"], {"name": name, "complete": True})
data = hass.data[DOMAIN] = ShoppingData(hass)
yield from data.async_load()
hass.services.async_register(
DOMAIN, SERVICE_ADD_ITEM, add_item_service, schema=SERVICE_ITEM_SCHEMA
)
hass.services.async_register(
DOMAIN, SERVICE_COMPLETE_ITEM, complete_item_service, schema=SERVICE_ITEM_SCHEMA
)
hass.http.register_view(ShoppingListView)
hass.http.register_view(CreateShoppingListItemView)
hass.http.register_view(UpdateShoppingListItemView)
hass.http.register_view(ClearCompletedItemsView)
hass.components.frontend.async_register_built_in_panel(
"shopping-list", "shopping_list", "mdi:cart"
)
hass.components.websocket_api.async_register_command(
WS_TYPE_SHOPPING_LIST_ITEMS, websocket_handle_items, SCHEMA_WEBSOCKET_ITEMS
)
hass.components.websocket_api.async_register_command(
WS_TYPE_SHOPPING_LIST_ADD_ITEM, websocket_handle_add, SCHEMA_WEBSOCKET_ADD_ITEM
)
hass.components.websocket_api.async_register_command(
WS_TYPE_SHOPPING_LIST_UPDATE_ITEM,
websocket_handle_update,
SCHEMA_WEBSOCKET_UPDATE_ITEM,
)
hass.components.websocket_api.async_register_command(
WS_TYPE_SHOPPING_LIST_CLEAR_ITEMS,
websocket_handle_clear,
SCHEMA_WEBSOCKET_CLEAR_ITEMS,
)
return True
class ShoppingData:
"""Class to hold shopping list data."""
def __init__(self, hass):
"""Initialize the shopping list."""
self.hass = hass
self.items = []
@callback
def async_add(self, name):
"""Add a shopping list item."""
item = {"name": name, "id": uuid.uuid4().hex, "complete": False}
self.items.append(item)
self.hass.async_add_job(self.save)
return item
@callback
def async_update(self, item_id, info):
"""Update a shopping list item."""
item = next((itm for itm in self.items if itm["id"] == item_id), None)
if item is None:
raise KeyError
info = ITEM_UPDATE_SCHEMA(info)
item.update(info)
self.hass.async_add_job(self.save)
return item
@callback
def async_clear_completed(self):
"""Clear completed items."""
self.items = [itm for itm in self.items if not itm["complete"]]
self.hass.async_add_job(self.save)
@asyncio.coroutine
def async_load(self):
"""Load items."""
def load():
"""Load the items synchronously."""
return load_json(self.hass.config.path(PERSISTENCE), default=[])
self.items = yield from self.hass.async_add_job(load)
def save(self):
"""Save the items."""
save_json(self.hass.config.path(PERSISTENCE), self.items)
class ShoppingListView(http.HomeAssistantView):
"""View to retrieve shopping list content."""
url = "/api/shopping_list"
name = "api:shopping_list"
@callback
def get(self, request):
"""Retrieve shopping list items."""
return self.json(request.app["hass"].data[DOMAIN].items)
class UpdateShoppingListItemView(http.HomeAssistantView):
"""View to retrieve shopping list content."""
url = "/api/shopping_list/item/{item_id}"
name = "api:shopping_list:item:id"
async def post(self, request, item_id):
"""Update a shopping list item."""
data = await request.json()
try:
item = request.app["hass"].data[DOMAIN].async_update(item_id, data)
request.app["hass"].bus.async_fire(EVENT)
return self.json(item)
except KeyError:
return self.json_message("Item not found", HTTP_NOT_FOUND)
except vol.Invalid:
return self.json_message("Item not found", HTTP_BAD_REQUEST)
class CreateShoppingListItemView(http.HomeAssistantView):
"""View to retrieve shopping list content."""
url = "/api/shopping_list/item"
name = "api:shopping_list:item"
@RequestDataValidator(vol.Schema({vol.Required("name"): str}))
@asyncio.coroutine
def post(self, request, data):
"""Create a new shopping list item."""
item = request.app["hass"].data[DOMAIN].async_add(data["name"])
request.app["hass"].bus.async_fire(EVENT)
return self.json(item)
class ClearCompletedItemsView(http.HomeAssistantView):
"""View to retrieve shopping list content."""
url = "/api/shopping_list/clear_completed"
name = "api:shopping_list:clear_completed"
@callback
def post(self, request):
"""Retrieve if API is running."""
hass = request.app["hass"]
hass.data[DOMAIN].async_clear_completed()
hass.bus.async_fire(EVENT)
return self.json_message("Cleared completed items.")
@callback
def websocket_handle_items(hass, connection, msg):
"""Handle get shopping_list items."""
connection.send_message(
websocket_api.result_message(msg["id"], hass.data[DOMAIN].items)
)
@callback
def websocket_handle_add(hass, connection, msg):
"""Handle add item to shopping_list."""
item = hass.data[DOMAIN].async_add(msg["name"])
hass.bus.async_fire(EVENT)
connection.send_message(websocket_api.result_message(msg["id"], item))
@websocket_api.async_response
async def websocket_handle_update(hass, connection, msg):
"""Handle update shopping_list item."""
msg_id = msg.pop("id")
item_id = msg.pop("item_id")
msg.pop("type")
data = msg
try:
item = hass.data[DOMAIN].async_update(item_id, data)
hass.bus.async_fire(EVENT)
connection.send_message(websocket_api.result_message(msg_id, item))
except KeyError:
connection.send_message(
websocket_api.error_message(msg_id, "item_not_found", "Item not found")
)
@callback
def websocket_handle_clear(hass, connection, msg):
"""Handle clearing shopping_list items."""
hass.data[DOMAIN].async_clear_completed()
hass.bus.async_fire(EVENT)
connection.send_message(websocket_api.result_message(msg["id"]))
| |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument, len-as-condition
"""Backend compiler related feature registration"""
from tvm.te.hybrid import script
from tvm import topi
from tvm.runtime import convert
from .op import register_compute, register_shape_func
from .op import register_broadcast_schedule, register_injective_schedule
from .op import register_pattern, OpPattern
register_broadcast_schedule("log")
register_broadcast_schedule("log2")
register_broadcast_schedule("log10")
register_broadcast_schedule("tan")
register_broadcast_schedule("cos")
register_broadcast_schedule("cosh")
register_broadcast_schedule("sin")
register_broadcast_schedule("sinh")
register_broadcast_schedule("acos")
register_broadcast_schedule("acosh")
register_broadcast_schedule("asin")
register_broadcast_schedule("asinh")
register_broadcast_schedule("atan")
register_broadcast_schedule("atanh")
register_broadcast_schedule("exp")
register_broadcast_schedule("erf")
register_broadcast_schedule("sqrt")
register_broadcast_schedule("rsqrt")
register_broadcast_schedule("sigmoid")
register_broadcast_schedule("floor")
register_broadcast_schedule("ceil")
register_broadcast_schedule("trunc")
register_broadcast_schedule("round")
register_broadcast_schedule("sign")
register_broadcast_schedule("abs")
register_broadcast_schedule("tanh")
register_broadcast_schedule("add")
register_broadcast_schedule("subtract")
register_broadcast_schedule("multiply")
register_broadcast_schedule("divide")
register_broadcast_schedule("floor_divide")
register_broadcast_schedule("power")
register_broadcast_schedule("copy")
register_broadcast_schedule("logical_not")
register_broadcast_schedule("logical_and")
register_broadcast_schedule("logical_or")
register_broadcast_schedule("logical_xor")
register_broadcast_schedule("bitwise_not")
register_broadcast_schedule("bitwise_and")
register_broadcast_schedule("bitwise_or")
register_broadcast_schedule("bitwise_xor")
register_broadcast_schedule("negative")
register_broadcast_schedule("mod")
register_broadcast_schedule("floor_mod")
register_broadcast_schedule("equal")
register_broadcast_schedule("not_equal")
register_broadcast_schedule("less")
register_broadcast_schedule("less_equal")
register_broadcast_schedule("greater")
register_broadcast_schedule("greater_equal")
register_broadcast_schedule("isnan")
register_broadcast_schedule("isfinite")
register_broadcast_schedule("isinf")
register_injective_schedule("maximum")
register_injective_schedule("minimum")
register_injective_schedule("right_shift")
register_injective_schedule("left_shift")
register_injective_schedule("shape_of")
register_injective_schedule("ndarray_size")
register_injective_schedule("device_copy")
register_broadcast_schedule("fast_exp")
register_broadcast_schedule("fast_tanh")
register_broadcast_schedule("fast_erf")
# zeros
@register_compute("zeros")
def zeros_compute(attrs, inputs, output_type):
assert not inputs
return [topi.full(output_type.shape, output_type.dtype, 0.0)]
register_broadcast_schedule("zeros")
register_pattern("zeros", OpPattern.ELEMWISE)
# zeros_like
@register_compute("zeros_like")
def zeros_like_compute(attrs, inputs, output_type):
assert len(inputs) == 1
return [topi.full_like(inputs[0], 0.0)]
register_broadcast_schedule("zeros_like")
# ones
@register_compute("ones")
def ones_compute(attrs, inputs, output_type):
assert not inputs
return [topi.full(output_type.shape, output_type.dtype, 1.0)]
register_broadcast_schedule("ones")
register_pattern("ones", OpPattern.ELEMWISE)
# ones_like
@register_compute("ones_like")
def ones_like_compute(attrs, inputs, output_type):
assert len(inputs) == 1
return [topi.full_like(inputs[0], 1.0)]
register_broadcast_schedule("ones_like")
# clip
@register_compute("clip")
def clip_compute(attrs, inputs, output_type):
assert len(inputs) == 1
return [topi.clip(inputs[0], attrs.a_min, attrs.a_max)]
register_injective_schedule("clip")
# fixed point multiply
@register_compute("fixed_point_multiply")
def fixed_point_multiply_compute(attrs, inputs, output_type):
assert len(inputs) == 1
return [topi.fixed_point_multiply(inputs[0], attrs.multiplier, attrs.shift)]
register_injective_schedule("fixed_point_multiply")
# full
@script
def _full_shape_func(shape):
out_ndim = shape.shape[0]
out = output_tensor((out_ndim,), "int64")
for i in const_range(out_ndim):
out[i] = int64(shape[i])
return out
@script
def _convert_shape(shape):
out = output_tensor((len(shape),), "int64")
for i in const_range(len(shape)):
out[i] = int64(shape[i])
return out
def full_shape_func(attrs, inputs, out_ndims):
"""
Shape func for full.
"""
if len(inputs) > 1:
return [_full_shape_func(inputs[1])]
return [_convert_shape(convert(attrs.shape))]
def no_data_full_shape_func(attrs, inputs, out_ndims):
"""
Shape func for zeros and ones.
"""
if len(inputs) == 0:
return [_convert_shape(convert(attrs.shape))]
return [_full_shape_func(inputs[0])]
@script
def _broadcast_shape_func(x, y, ndim):
out = output_tensor((ndim,), "int64")
if len(x.shape) == 0:
for i in const_range(ndim):
out[i] = y[i]
elif len(y.shape) == 0:
for i in const_range(ndim):
out[i] = x[i]
else:
ndim1 = x.shape[0]
ndim2 = y.shape[0]
for i in const_range(1, min(ndim1, ndim2) + 1):
if x[ndim1 - i] == y[ndim2 - i]:
out[ndim - i] = x[ndim1 - i]
elif x[ndim1 - i] == 1:
out[ndim - i] = y[ndim2 - i]
else:
assert y[ndim2 - i] == 1, "Incompatible broadcast type %s and %s" % (
x[ndim1 - i],
y[ndim2 - i],
)
out[ndim - i] = x[ndim1 - i]
for i in const_range(min(ndim1, ndim2) + 1, ndim + 1):
if ndim1 >= ndim2:
out[ndim - i] = x[ndim1 - i]
else:
out[ndim - i] = y[ndim2 - i]
return out
def broadcast_shape_func(attrs, inputs, out_ndims):
"""
Shape function for broadcast op.
"""
return [_broadcast_shape_func(*inputs, out_ndims[0])]
def elemwise_shape_func(attrs, inputs, _):
"""
Shape function for elemwise op.
"""
return [topi.math.identity(inputs[0])]
register_shape_func("cast", False, elemwise_shape_func)
register_shape_func("cast_like", False, elemwise_shape_func)
register_shape_func("round", False, elemwise_shape_func)
register_shape_func("zeros", False, no_data_full_shape_func)
register_shape_func("zeros_like", False, elemwise_shape_func)
register_shape_func("ones", False, no_data_full_shape_func)
register_shape_func("ones_like", False, elemwise_shape_func)
register_shape_func("full", False, full_shape_func)
register_shape_func("full_like", False, elemwise_shape_func)
register_shape_func("broadcast_to", True, full_shape_func)
register_shape_func("add", False, broadcast_shape_func)
register_shape_func("subtract", False, broadcast_shape_func)
register_shape_func("multiply", False, broadcast_shape_func)
register_shape_func("divide", False, broadcast_shape_func)
register_shape_func("floor_divide", False, broadcast_shape_func)
register_shape_func("power", False, broadcast_shape_func)
register_shape_func("mod", False, broadcast_shape_func)
register_shape_func("floor_mod", False, broadcast_shape_func)
register_shape_func("logical_and", False, broadcast_shape_func)
register_shape_func("logical_or", False, broadcast_shape_func)
register_shape_func("logical_xor", False, broadcast_shape_func)
register_shape_func("bitwise_not", False, broadcast_shape_func)
register_shape_func("bitwise_and", False, broadcast_shape_func)
register_shape_func("bitwise_or", False, broadcast_shape_func)
register_shape_func("bitwise_xor", False, broadcast_shape_func)
register_shape_func("equal", False, broadcast_shape_func)
register_shape_func("not_equal", False, broadcast_shape_func)
register_shape_func("less", False, broadcast_shape_func)
register_shape_func("less_equal", False, broadcast_shape_func)
register_shape_func("greater", False, broadcast_shape_func)
register_shape_func("greater_equal", False, broadcast_shape_func)
register_shape_func("maximum", False, broadcast_shape_func)
register_shape_func("minimum", False, broadcast_shape_func)
register_shape_func("sqrt", False, elemwise_shape_func)
register_shape_func("negative", False, elemwise_shape_func)
register_shape_func("exp", False, elemwise_shape_func)
register_shape_func("tan", False, elemwise_shape_func)
register_shape_func("fast_exp", False, elemwise_shape_func)
register_shape_func("fast_tanh", False, elemwise_shape_func)
register_shape_func("fast_erf", False, elemwise_shape_func)
register_shape_func("floor", False, elemwise_shape_func)
register_shape_func("log", False, elemwise_shape_func)
register_shape_func("device_copy", False, elemwise_shape_func)
register_shape_func("clip", False, elemwise_shape_func)
register_shape_func("log2", False, elemwise_shape_func)
register_shape_func("sigmoid", False, elemwise_shape_func)
register_shape_func("tanh", False, elemwise_shape_func)
register_shape_func("logical_not", False, elemwise_shape_func)
| |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A parameter dictionary class which supports the nest structure."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import re
import six
import tensorflow as tf
import yaml
# regex pattern that matches on key-value pairs in a comma-separated
# key-value pair string. It splits each k-v pair on the = sign, and
# matches on values that are within single quotes, double quotes, single
# values (e.g. floats, ints, etc.), and a lists within brackets.
_PARAM_RE = re.compile(r"""
(?P<name>[a-zA-Z][\w\.]*) # variable name: "var" or "x"
\s*=\s*
((?P<val>\'(.*?)\' # single quote
|
\"(.*?)\" # double quote
|
[^,\[]* # single value
|
\[[^\]]*\])) # list of values
($|,\s*)""", re.VERBOSE)
_CONST_VALUE_RE = re.compile(r'(\d.*|-\d.*|None)')
class ParamsDict(object):
"""A hyperparameter container class."""
RESERVED_ATTR = ['_locked', '_restrictions']
def __init__(self, default_params=None, restrictions=None):
"""Instantiate a ParamsDict.
Instantiate a ParamsDict given a set of default parameters and a list of
restrictions. Upon initialization, it validates itself by checking all the
defined restrictions, and raise error if it finds inconsistency.
Args:
default_params: a Python dict or another ParamsDict object including the
default parameters to initialize.
restrictions: a list of strings, which define a list of restrictions to
ensure the consistency of different parameters internally. Each
restriction string is defined as a binary relation with a set of
operators, including {'==', '!=', '<', '<=', '>', '>='}.
"""
self._locked = False
self._restrictions = []
if restrictions:
self._restrictions = restrictions
if default_params is None:
default_params = {}
self.override(default_params, is_strict=False)
self.validate()
def _set(self, k, v):
if isinstance(v, dict):
self.__dict__[k] = ParamsDict(v)
else:
self.__dict__[k] = copy.deepcopy(v)
def __setattr__(self, k, v):
"""Sets the value of the existing key.
Note that this does not allow directly defining a new key. Use the
`override` method with `is_strict=False` instead.
Args:
k: the key string.
v: the value to be used to set the key `k`.
Raises:
KeyError: if k is not defined in the ParamsDict.
"""
if k not in ParamsDict.RESERVED_ATTR:
if k not in self.__dict__.keys():
raise KeyError('The key `%{}` does not exist. '
'To extend the existing keys, use '
'`override` with `is_strict` = True.'.format(k))
if self._locked:
raise ValueError('The ParamsDict has been locked. '
'No change is allowed.')
self._set(k, v)
def __getattr__(self, k):
"""Gets the value of the existing key.
Args:
k: the key string.
Returns:
the value of the key.
Raises:
AttributeError: if k is not defined in the ParamsDict.
"""
if k not in self.__dict__.keys():
raise AttributeError('The key `{}` does not exist. '.format(k))
return self.__dict__[k]
def __contains__(self, key):
"""Implements the membership test operator."""
return key in self.__dict__
def get(self, key, value=None):
"""Accesses through built-in dictionary get method."""
return self.__dict__.get(key, value)
def __delattr__(self, k):
"""Deletes the key and removes its values.
Args:
k: the key string.
Raises:
AttributeError: if k is reserverd or not defined in the ParamsDict.
ValueError: if the ParamsDict instance has been locked.
"""
if k in ParamsDict.RESERVED_ATTR:
raise AttributeError('The key `{}` is reserved. No change is allowes. '
.format(k))
if k not in self.__dict__.keys():
raise AttributeError('The key `{}` does not exist. '.format(k))
if self._locked:
raise ValueError('The ParamsDict has been locked. No change is allowed.')
del self.__dict__[k]
def override(self, override_params, is_strict=True):
"""Override the ParamsDict with a set of given params.
Args:
override_params: a dict or a ParamsDict specifying the parameters to
be overridden.
is_strict: a boolean specifying whether override is strict or not. If
True, keys in `override_params` must be present in the ParamsDict.
If False, keys in `override_params` can be different from what is
currently defined in the ParamsDict. In this case, the ParamsDict will
be extended to include the new keys.
"""
if self._locked:
raise ValueError('The ParamsDict has been locked. No change is allowed.')
if isinstance(override_params, ParamsDict):
override_params = override_params.as_dict()
self._override(override_params, is_strict) # pylint: disable=protected-access
def _override(self, override_dict, is_strict=True):
"""The implementation of `override`."""
for k, v in six.iteritems(override_dict):
if k in ParamsDict.RESERVED_ATTR:
raise KeyError('The key `%{}` is internally reserved. '
'Can not be overridden.')
if k not in self.__dict__.keys():
if is_strict:
raise KeyError('The key `{}` does not exist. '
'To extend the existing keys, use '
'`override` with `is_strict` = False.'.format(k))
else:
self._set(k, v)
else:
if isinstance(v, dict):
self.__dict__[k]._override(v, is_strict) # pylint: disable=protected-access
elif isinstance(v, ParamsDict):
self.__dict__[k]._override(v.as_dict(), is_strict) # pylint: disable=protected-access
else:
self.__dict__[k] = copy.deepcopy(v)
def lock(self):
"""Makes the ParamsDict immutable."""
self._locked = True
def as_dict(self):
"""Returns a dict representation of ParamsDict.
For the nested ParamsDict, a nested dict will be returned.
"""
params_dict = {}
for k, v in six.iteritems(self.__dict__):
if k not in ParamsDict.RESERVED_ATTR:
if isinstance(v, ParamsDict):
params_dict[k] = v.as_dict()
else:
params_dict[k] = copy.deepcopy(v)
return params_dict
def validate(self):
"""Validate the parameters consistency based on the restrictions.
This method validates the internal consistency using the pre-defined list of
restrictions. A restriction is defined as a string which specfiies a binary
operation. The supported binary operations are {'==', '!=', '<', '<=', '>',
'>='}. Note that the meaning of these operators are consistent with the
underlying Python immplementation. Users should make sure the define
restrictions on their type make sense.
For example, for a ParamsDict like the following
```
a:
a1: 1
a2: 2
b:
bb:
bb1: 10
bb2: 20
ccc:
a1: 1
a3: 3
```
one can define two restrictions like this
['a.a1 == b.ccc.a1', 'a.a2 <= b.bb.bb2']
What it enforces are:
- a.a1 = 1 == b.ccc.a1 = 2
- a.a2 = 2 <= b.bb.bb2 = 20
Raises:
KeyError: if any of the following happens
(1) any of parameters in any of restrictions is not defined in
ParamsDict,
(2) any inconsistency violating the restriction is found.
ValueError: if the restriction defined in the string is not supported.
"""
def _get_kv(dotted_string, params_dict):
"""Get keys and values indicated by dotted_string."""
if _CONST_VALUE_RE.match(dotted_string) is not None:
const_str = dotted_string
if const_str == 'None':
constant = None
else:
constant = float(const_str)
return None, constant
else:
tokenized_params = dotted_string.split('.')
v = params_dict
for t in tokenized_params:
v = v[t]
return tokenized_params[-1], v
def _get_kvs(tokens, params_dict):
if len(tokens) != 2:
raise ValueError('Only support binary relation in restriction.')
stripped_tokens = [t.strip() for t in tokens]
left_k, left_v = _get_kv(stripped_tokens[0], params_dict)
right_k, right_v = _get_kv(stripped_tokens[1], params_dict)
return left_k, left_v, right_k, right_v
params_dict = self.as_dict()
for restriction in self._restrictions:
if '==' in restriction:
tokens = restriction.split('==')
_, left_v, _, right_v = _get_kvs(tokens, params_dict)
if left_v != right_v:
raise KeyError('Found inconsistncy between key `{}` and key `{}`.'
.format(tokens[0], tokens[1]))
elif '!=' in restriction:
tokens = restriction.split('!=')
_, left_v, _, right_v = _get_kvs(tokens, params_dict)
if left_v == right_v:
raise KeyError('Found inconsistncy between key `{}` and key `{}`.'
.format(tokens[0], tokens[1]))
elif '<' in restriction:
tokens = restriction.split('<')
_, left_v, _, right_v = _get_kvs(tokens, params_dict)
if left_v >= right_v:
raise KeyError('Found inconsistncy between key `{}` and key `{}`.'
.format(tokens[0], tokens[1]))
elif '<=' in restriction:
tokens = restriction.split('<=')
_, left_v, _, right_v = _get_kvs(tokens, params_dict)
if left_v > right_v:
raise KeyError('Found inconsistncy between key `{}` and key `{}`.'
.format(tokens[0], tokens[1]))
elif '>' in restriction:
tokens = restriction.split('>')
_, left_v, _, right_v = _get_kvs(tokens, params_dict)
if left_v <= right_v:
raise KeyError('Found inconsistncy between key `{}` and key `{}`.'
.format(tokens[0], tokens[1]))
elif '>=' in restriction:
tokens = restriction.split('>=')
_, left_v, _, right_v = _get_kvs(tokens, params_dict)
if left_v < right_v:
raise KeyError('Found inconsistncy between key `{}` and key `{}`.'
.format(tokens[0], tokens[1]))
else:
raise ValueError('Unsupported relation in restriction.')
def read_yaml_to_params_dict(file_path):
"""Reads a YAML file to a ParamsDict."""
with tf.io.gfile.GFile(file_path, 'r') as f:
params_dict = yaml.load(f)
return ParamsDict(params_dict)
def save_params_dict_to_yaml(params, file_path):
"""Saves the input ParamsDict to a YAML file."""
with tf.io.gfile.GFile(file_path, 'w') as f:
def _my_list_rep(dumper, data):
# u'tag:yaml.org,2002:seq' is the YAML internal tag for sequence.
return dumper.represent_sequence(
u'tag:yaml.org,2002:seq', data, flow_style=True)
yaml.add_representer(list, _my_list_rep)
yaml.dump(params.as_dict(), f, default_flow_style=False)
def nested_csv_str_to_json_str(csv_str):
"""Converts a nested (using '.') comma-separated k=v string to a JSON string.
Converts a comma-separated string of key/value pairs that supports
nesting of keys to a JSON string. Nesting is implemented using
'.' between levels for a given key.
Spacing between commas and = is supported (e.g. there is no difference between
"a=1,b=2", "a = 1, b = 2", or "a=1, b=2") but there should be no spaces before
keys or after values (e.g. " a=1,b=2" and "a=1,b=2 " are not supported).
Note that this will only support values supported by CSV, meaning
values such as nested lists (e.g. "a=[[1,2,3],[4,5,6]]") are not
supported. Strings are supported as well, e.g. "a='hello'".
An example conversion would be:
"a=1, b=2, c.a=2, c.b=3, d.a.a=5"
to
"{ a: 1, b : 2, c: {a : 2, b : 3}, d: {a: {a : 5}}}"
Args:
csv_str: the comma separated string.
Returns:
the converted JSON string.
Raises:
ValueError: If csv_str is not in a comma separated string or
if the string is formatted incorrectly.
"""
if not csv_str:
return ''
formatted_entries = []
nested_map = collections.defaultdict(list)
pos = 0
while pos < len(csv_str):
m = _PARAM_RE.match(csv_str, pos)
if not m:
raise ValueError('Malformed hyperparameter value while parsing '
'CSV string: %s' % csv_str[pos:])
pos = m.end()
# Parse the values.
m_dict = m.groupdict()
name = m_dict['name']
v = m_dict['val']
# If a GCS path (e.g. gs://...) is provided, wrap this in quotes
# as yaml.load would otherwise throw an exception
if re.match(r'(?=[^\"\'])(?=[gs://])', v):
v = '\'{}\''.format(v)
name_nested = name.split('.')
if len(name_nested) > 1:
grouping = name_nested[0]
value = '.'.join(name_nested[1:]) + '=' + v
nested_map[grouping].append(value)
else:
formatted_entries.append('%s : %s' % (name, v))
for grouping, value in nested_map.items():
value = ','.join(value)
value = nested_csv_str_to_json_str(value)
formatted_entries.append('%s : %s' % (grouping, value))
return '{' + ', '.join(formatted_entries) + '}'
def override_params_dict(params, dict_or_string_or_yaml_file, is_strict):
"""Override a given ParamsDict using a dict, JSON/YAML/CSV string or YAML file.
The logic of the function is outlined below:
1. Test that the input is a dict. If not, proceed to 2.
2. Tests that the input is a string. If not, raise unknown ValueError
2.1. Test if the string is in a CSV format. If so, parse.
If not, proceed to 2.2.
2.2. Try loading the string as a YAML/JSON. If successful, parse to
dict and use it to override. If not, proceed to 2.3.
2.3. Try using the string as a file path and load the YAML file.
Args:
params: a ParamsDict object to be overridden.
dict_or_string_or_yaml_file: a Python dict, JSON/YAML/CSV string or
path to a YAML file specifying the parameters to be overridden.
is_strict: a boolean specifying whether override is strict or not.
Returns:
params: the overridden ParamsDict object.
Raises:
ValueError: if failed to override the parameters.
"""
if not dict_or_string_or_yaml_file:
return params
if isinstance(dict_or_string_or_yaml_file, dict):
params.override(dict_or_string_or_yaml_file, is_strict)
elif isinstance(dict_or_string_or_yaml_file, six.string_types):
try:
dict_or_string_or_yaml_file = (
nested_csv_str_to_json_str(dict_or_string_or_yaml_file))
except ValueError:
pass
params_dict = yaml.load(dict_or_string_or_yaml_file)
if isinstance(params_dict, dict):
params.override(params_dict, is_strict)
else:
with tf.io.gfile.GFile(dict_or_string_or_yaml_file) as f:
params.override(yaml.load(f), is_strict)
else:
raise ValueError('Unknown input type to parse.')
return params
| |
# Copyright (C) 2014 Sereina Riniker
#
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
""" Torsion Fingerprints (Deviation) (TFD)
According to a paper from Schulz-Gasch et al., JCIM, 52, 1499-1512 (2012).
"""
from rdkit import rdBase
from rdkit import RDConfig
from rdkit import Geometry
from rdkit import Chem
from rdkit.Chem import rdchem
from rdkit.Chem import rdMolDescriptors
import math, os
def _doMatch(inv, atoms):
""" Helper function to check if all atoms in the list are the same
Arguments:
- inv: atom invariants (used to define equivalence of atoms)
- atoms: list of atoms to check
Return: boolean
"""
match = True
for i in range(len(atoms)-1):
for j in range(i+1, len(atoms)):
if (inv[atoms[i].GetIdx()] != inv[atoms[j].GetIdx()]):
match = False
return match
return match
def _doNotMatch(inv, atoms):
""" Helper function to check if all atoms in the list are NOT the same
Arguments:
- inv: atom invariants (used to define equivalence of atoms)
- atoms: list of atoms to check
Return: boolean
"""
match = True
for i in range(len(atoms)-1):
for j in range(i+1, len(atoms)):
if (inv[atoms[i].GetIdx()] == inv[atoms[j].GetIdx()]):
match = False
return match
return match
def _doMatchExcept1(inv, atoms):
""" Helper function to check if two atoms in the list are the same,
and one not
Note: Works only for three atoms
Arguments:
- inv: atom invariants (used to define equivalence of atoms)
- atoms: list of atoms to check
Return: atom that is different
"""
if len(atoms) != 3:
raise ValueError("Number of atoms must be three")
a1 = atoms[0].GetIdx()
a2 = atoms[1].GetIdx()
a3 = atoms[2].GetIdx()
if (inv[a1] == inv[a2] and inv[a1] != inv[a3] and inv[a2] != inv[a3]):
return atoms[2]
elif (inv[a1] != inv[a2] and inv[a1] == inv[a3] and inv[a2] != inv[a3]):
return atoms[1]
elif (inv[a1] != inv[a2] and inv[a1] != inv[a3] and inv[a2] == inv[a3]):
return atoms[0]
return None
def _getAtomInvariantsWithRadius(mol, radius):
""" Helper function to calculate the atom invariants for each atom
with a given radius
Arguments:
- mol: the molecule of interest
- radius: the radius for the Morgan fingerprint
Return: list of atom invariants
"""
inv = []
for i in range(mol.GetNumAtoms()):
info = {}
fp = rdMolDescriptors.GetMorganFingerprint(mol, radius, fromAtoms=[i], bitInfo=info)
for k in info.keys():
if info[k][0][1] == radius:
inv.append(k)
return inv
def _getHeavyAtomNeighbors(atom1, aid2=-1):
""" Helper function to calculate the number of heavy atom neighbors.
Arguments:
- atom1: the atom of interest
- aid2: atom index that should be excluded from neighbors (default: none)
Return: a list of heavy atom neighbors of the given atom
"""
if aid2 < 0:
return [n for n in atom1.GetNeighbors() if n.GetSymbol()!='H']
else:
return [n for n in atom1.GetNeighbors() if (n.GetSymbol()!='H' and n.GetIdx()!=aid2)]
def _getIndexforTorsion(neighbors, inv):
""" Helper function to calculate the index of the reference atom for
a given atom
Arguments:
- neighbors: list of the neighbors of the atom
- inv: atom invariants
Return: list of atom indices as reference for torsion
"""
if len(neighbors) == 1: # atom has only one neighbor
return [neighbors[0]]
elif _doMatch(inv, neighbors): # atom has all symmetric neighbors
return neighbors
elif _doNotMatch(inv, neighbors): # atom has all different neighbors
# simply use the first neighbor
return [neighbors[0]]
at = _doMatchExcept1(inv, neighbors) # two neighbors the same, one different
if at is None:
raise ValueError("Atom neighbors are either all the same or all different")
return [at]
def _getBondsForTorsions(mol, ignoreColinearBonds):
""" Determine the bonds (or pair of atoms treated like a bond) for which
torsions should be calculated.
Arguments:
- refmol: the molecule of interest
- ignoreColinearBonds: if True (default), single bonds adjacent to
triple bonds are ignored
if False, alternative not-covalently bound
atoms are used to define the torsion
"""
# flag the atoms that cannot be part of the centre atoms of a torsion
# patterns: triple bonds and allenes
patts = [Chem.MolFromSmarts(x) for x in ['*#*', '[$([C](=*)=*)]']]
atomFlags = [0]*mol.GetNumAtoms()
for p in patts:
if mol.HasSubstructMatch(p):
matches = mol.GetSubstructMatches(p)
for match in matches:
for a in match:
atomFlags[a] = 1
bonds = []
doneBonds = [0]*mol.GetNumBonds()
for b in mol.GetBonds():
if b.IsInRing(): continue
a1 = b.GetBeginAtomIdx()
a2 = b.GetEndAtomIdx()
nb1 = _getHeavyAtomNeighbors(b.GetBeginAtom(), a2)
nb2 = _getHeavyAtomNeighbors(b.GetEndAtom(), a1)
if not doneBonds[b.GetIdx()] and (nb1 and nb2): # no terminal bonds
doneBonds[b.GetIdx()] = 1;
# check if atoms cannot be middle atoms
if atomFlags[a1] or atomFlags[a2]:
if not ignoreColinearBonds: # search for alternative not-covalently bound atoms
while len(nb1)==1 and atomFlags[a1]:
a1old = a1
a1 = nb1[0].GetIdx()
b = mol.GetBondBetweenAtoms(a1old, a1)
if b.GetEndAtom().GetIdx() == a1old:
nb1 = _getHeavyAtomNeighbors(b.GetBeginAtom(), a1old)
else:
nb1 = _getHeavyAtomNeighbors(b.GetEndAtom(), a1old)
doneBonds[b.GetIdx()] = 1;
while len(nb2)==1 and atomFlags[a2]:
doneBonds[b.GetIdx()] = 1;
a2old = a2
a2 = nb2[0].GetIdx()
b = mol.GetBondBetweenAtoms(a2old, a2)
if b.GetBeginAtom().GetIdx() == a2old:
nb2 = _getHeavyAtomNeighbors(b.GetEndAtom(), a2old)
else:
nb2 = _getHeavyAtomNeighbors(b.GetBeginAtom(), a2old)
doneBonds[b.GetIdx()] = 1;
if nb1 and nb2:
bonds.append((a1, a2, nb1, nb2))
else:
bonds.append((a1, a2, nb1, nb2))
return bonds
def CalculateTorsionLists(mol, maxDev='equal', symmRadius=2, ignoreColinearBonds=True):
""" Calculate a list of torsions for a given molecule. For each torsion
the four atom indices are determined and stored in a set.
Arguments:
- mol: the molecule of interest
- maxDev: maximal deviation used for normalization
'equal': all torsions are normalized using 180.0 (default)
'spec': each torsion is normalized using its specific
maximal deviation as given in the paper
- symmRadius: radius used for calculating the atom invariants
(default: 2)
- ignoreColinearBonds: if True (default), single bonds adjacent to
triple bonds are ignored
if False, alternative not-covalently bound
atoms are used to define the torsion
Return: two lists of torsions: non-ring and ring torsions
"""
if maxDev not in ['equal', 'spec']:
raise ValueError("maxDev must be either equal or spec")
# get non-terminal, non-cyclic bonds
bonds = _getBondsForTorsions(mol, ignoreColinearBonds)
# get atom invariants
if symmRadius > 0:
inv = _getAtomInvariantsWithRadius(mol, symmRadius)
else:
inv = rdMolDescriptors.GetConnectivityInvariants(mol)
# get the torsions
tors_list = [] # to store the atom indices of the torsions
for a1, a2, nb1, nb2 in bonds:
d1 = _getIndexforTorsion(nb1, inv)
d2 = _getIndexforTorsion(nb2, inv)
if len(d1) == 1 and len(d2) == 1: # case 1, 2, 4, 5, 7, 10, 16, 12, 17, 19
tors_list.append(([(d1[0].GetIdx(), a1, a2, d2[0].GetIdx())], 180.0))
elif len(d1) == 1: # case 3, 6, 8, 13, 20
if len(nb2) == 2: # two neighbors
tors_list.append(([(d1[0].GetIdx(), a1, a2, nb.GetIdx()) for nb in d2], 90.0))
else: # three neighbors
tors_list.append(([(d1[0].GetIdx(), a1, a2, nb.GetIdx()) for nb in d2], 60.0))
elif len(d2) == 1: # case 3, 6, 8, 13, 20
if len(nb1) == 2:
tors_list.append(([(nb.GetIdx(), a1, a2, d2[0].GetIdx()) for nb in d1], 90.0))
else: # three neighbors
tors_list.append(([(nb.GetIdx(), a1, a2, d2[0].GetIdx()) for nb in d1], 60.0))
else: # both symmetric
tmp = []
for n1 in d1:
for n2 in d2:
tmp.append((n1.GetIdx(), a1, a2, n2.GetIdx()))
if len(nb1) == 2 and len(nb2) == 2: # case 9
tors_list.append((tmp, 90.0))
elif len(nb1) == 3 and len(nb2) == 3: # case 21
tors_list.append((tmp, 60.0))
else: # case 15
tors_list.append((tmp, 30.0))
# maximal possible deviation for non-cyclic bonds
if maxDev == 'equal':
tors_list = [(t,180.0) for t,d in tors_list]
# rings
rings = Chem.GetSymmSSSR(mol)
tors_list_rings = []
for r in rings:
# get the torsions
tmp = []
num = len(r)
maxdev = 180.0 * math.exp(-0.025*(num-14)*(num-14))
for i in range(len(r)):
tmp.append((r[i], r[(i+1)%num], r[(i+2)%num], r[(i+3)%num]))
tors_list_rings.append((tmp,maxdev))
return tors_list, tors_list_rings
def _getTorsionAtomPositions(atoms, conf):
""" Helper function to retrieve the coordinates of the four atoms
in a torsion
Arguments:
- atoms: list with the four atoms
- conf: conformation of the molecule
Return: Point3D objects of the four atoms
"""
if len(atoms) != 4:
raise ValueError("List must contain exactly four atoms")
p1 = conf.GetAtomPosition(atoms[0])
p2 = conf.GetAtomPosition(atoms[1])
p3 = conf.GetAtomPosition(atoms[2])
p4 = conf.GetAtomPosition(atoms[3])
return p1, p2, p3, p4
def CalculateTorsionAngles(mol, tors_list, tors_list_rings, confId=-1):
""" Calculate the torsion angles for a list of non-ring and
a list of ring torsions.
Arguments:
- mol: the molecule of interest
- tors_list: list of non-ring torsions
- tors_list_rings: list of ring torsions
- confId: index of the conformation (default: first conformer)
Return: list of torsion angles
"""
torsions = []
conf = mol.GetConformer(confId)
for t,maxdev in tors_list:
if len(t) == 1:
t = t[0]
p1, p2, p3, p4 = _getTorsionAtomPositions(t, conf)
tors = (Geometry.ComputeSignedDihedralAngle(p1, p2, p3, p4)/math.pi)*180.0
if tors < 0: tors += 360.0 # angle between 0 and 360
else:
# loop over torsions and take minimum
tors = 360.0
for t2 in t:
p1, p2, p3, p4 = _getTorsionAtomPositions(t2, conf)
tmp = (Geometry.ComputeSignedDihedralAngle(p1, p2, p3, p4)/math.pi)*180.0
if tmp < 0: tmp += 360.0 # angle between 0 and 360
if tmp < tors: tors = tmp
torsions.append((tors, maxdev))
# rings
for t,maxdev in tors_list_rings:
num = len(t)
# loop over torsions and sum them up
tors = 0
for t2 in t:
p1, p2, p3, p4 = _getTorsionAtomPositions(t2, conf)
tmp = abs((Geometry.ComputeSignedDihedralAngle(p1, p2, p3, p4)/math.pi)*180.0)
tors += tmp
tors /= num
torsions.append((tors, maxdev))
return torsions
def _findCentralBond(mol, distmat):
""" Helper function to identify the atoms of the most central bond.
Arguments:
- mol: the molecule of interest
- distmat: distance matrix of the molecule
Return: atom indices of the two most central atoms (in order)
"""
from numpy import std
# get the most central atom = atom with the least STD of shortest distances
stds = []
for i in range(mol.GetNumAtoms()):
# only consider non-terminal atoms
if len(_getHeavyAtomNeighbors(mol.GetAtomWithIdx(i))) < 2: continue
tmp = [d for d in distmat[i]]
tmp.pop(i)
stds.append((std(tmp), i))
stds.sort()
aid1 = stds[0][1]
# find the second most central bond that is bonded to aid1
i = 1
while 1:
if mol.GetBondBetweenAtoms(aid1, stds[i][1]) is None:
i += 1
else:
aid2 = stds[i][1]
break
return aid1, aid2 # most central atom comes first
def _calculateBeta(mol, distmat, aid1):
""" Helper function to calculate the beta for torsion weights
according to the formula in the paper.
w(dmax/2) = 0.1
Arguments:
- mol: the molecule of interest
- distmat: distance matrix of the molecule
- aid1: atom index of the most central atom
Return: value of beta (float)
"""
# get all non-terminal bonds
bonds = []
for b in mol.GetBonds():
nb1 = _getHeavyAtomNeighbors(b.GetBeginAtom())
nb2 = _getHeavyAtomNeighbors(b.GetEndAtom())
if len(nb2) > 1 and len(nb2) > 1:
bonds.append(b)
# get shortest distance
dmax = 0
for b in bonds:
bid1 = b.GetBeginAtom().GetIdx()
bid2 = b.GetEndAtom().GetIdx()
d = max([distmat[aid1][bid1], distmat[aid1][bid2]])
if (d > dmax): dmax = d
dmax2 = dmax/2.0
beta = -math.log(0.1)/(dmax2*dmax2)
return beta
def CalculateTorsionWeights(mol, aid1=-1, aid2=-1, ignoreColinearBonds=True):
""" Calculate the weights for the torsions in a molecule.
By default, the highest weight is given to the bond
connecting the two most central atoms.
If desired, two alternate atoms can be specified (must
be connected by a bond).
Arguments:
- mol: the molecule of interest
- aid1: index of the first atom (default: most central)
- aid2: index of the second atom (default: second most central)
- ignoreColinearBonds: if True (default), single bonds adjacent to
triple bonds are ignored
if False, alternative not-covalently bound
atoms are used to define the torsion
Return: list of torsion weights (both non-ring and ring)
"""
# get distance matrix
distmat = Chem.GetDistanceMatrix(mol)
if aid1 < 0 and aid2 < 0:
aid1, aid2 = _findCentralBond(mol, distmat)
else:
b = mol.GetBondBetweenAtoms(aid1, aid2)
if b is None:
raise ValueError("Specified atoms must be connected by a bond.")
# calculate beta according to the formula in the paper
beta = _calculateBeta(mol, distmat, aid1)
# get non-terminal, non-cyclic bonds
bonds = _getBondsForTorsions(mol, ignoreColinearBonds)
# get shortest paths and calculate weights
weights = []
for bid1, bid2, nb1, nb2 in bonds:
if ((bid1, bid2) == (aid1, aid2)
or (bid2, bid1) == (aid1, aid2)): # if it's the most central bond itself
d = 0
else:
# get shortest distance between the 4 atoms and add 1 to get bond distance
d = min(distmat[aid1][bid1], distmat[aid1][bid2], distmat[aid2][bid1], distmat[aid2][bid2])+1
w = math.exp(-beta*(d*d))
weights.append(w)
## RINGS
rings = mol.GetRingInfo()
for r in rings.BondRings():
# get shortest distances
tmp = []
num = len(r)
for bidx in r:
b = mol.GetBondWithIdx(bidx)
bid1 = b.GetBeginAtomIdx()
bid2 = b.GetEndAtomIdx()
# get shortest distance between the 4 atoms and add 1 to get bond distance
d = min(distmat[aid1][bid1], distmat[aid1][bid2], distmat[aid2][bid1], distmat[aid2][bid2])+1
tmp.append(d)
# calculate weights and append to list
# Note: the description in the paper is not very clear, the following
# formula was found to give the same weights as shown in Fig. 1
# For a ring of size N: w = N/2 * exp(-beta*(sum(w of each bond in ring)/N)^2)
w = sum(tmp)/float(num)
w = math.exp(-beta*(w*w))
weights.append(w*(num/2.0))
return weights
def CalculateTFD(torsions1, torsions2, weights=None):
""" Calculate the torsion deviation fingerprint (TFD) given two lists of
torsion angles.
Arguments:
- torsions1: torsion angles of conformation 1
- torsions2: torsion angles of conformation 2
- weights: list of torsion weights (default: None)
Return: TFD value (float)
"""
if len(torsions1) != len(torsions2):
raise ValueError("List of torsions angles must have the same size.")
# calculate deviations and normalize (divide by max. possible deviation)
deviations = []
for t1, t2 in zip(torsions1, torsions2):
diff = abs(t1[0]-t2[0])
if (360.0-diff) < diff: # we do not care about direction
diff = 360.0 - diff
deviations.append(diff/t1[1])
# do we use weights?
if weights is not None:
if len(weights) != len(torsions1):
raise ValueError("List of torsions angles and weights must have the same size.")
deviations = [d*w for d,w in zip(deviations, weights)]
sum_weights = sum(weights)
else:
sum_weights = len(deviations)
tfd = sum(deviations)
if sum_weights != 0: # avoid division by zero
tfd /= sum_weights
return tfd
def _getSameAtomOrder(mol1, mol2):
""" Generate a new molecule with the atom order of mol1 and coordinates
from mol2.
Arguments:
- mol1: first instance of the molecule of interest
- mol2: second instance the molecule of interest
Return: RDKit molecule
"""
match = mol2.GetSubstructMatch(mol1)
atomNums = tuple(range(mol1.GetNumAtoms()))
if match != atomNums: # atom orders are not the same!
#print "Atoms of second molecule reordered."
mol3 = Chem.Mol(mol1)
mol3.RemoveAllConformers()
for conf2 in mol2.GetConformers():
confId = conf2.GetId()
conf = rdchem.Conformer(mol1.GetNumAtoms())
conf.SetId(confId)
for i in range(mol1.GetNumAtoms()):
conf.SetAtomPosition(i, mol2.GetConformer(confId).GetAtomPosition(match[i]))
cid = mol3.AddConformer(conf)
return mol3
else:
return Chem.Mol(mol2)
# some wrapper functions
def GetTFDBetweenConformers(mol, confIds1, confIds2, useWeights=True, maxDev='equal', symmRadius=2, ignoreColinearBonds=True):
""" Wrapper to calculate the TFD between two list of conformers
of a molecule
Arguments:
- mol: the molecule of interest
- confIds1: first list of conformer indices
- confIds2: second list of conformer indices
- useWeights: flag for using torsion weights in the TFD calculation
- maxDev: maximal deviation used for normalization
'equal': all torsions are normalized using 180.0 (default)
'spec': each torsion is normalized using its specific
maximal deviation as given in the paper
- symmRadius: radius used for calculating the atom invariants
(default: 2)
- ignoreColinearBonds: if True (default), single bonds adjacent to
triple bonds are ignored
if False, alternative not-covalently bound
atoms are used to define the torsion
Return: list of TFD values
"""
tl, tlr = CalculateTorsionLists(mol, maxDev=maxDev, symmRadius=symmRadius, ignoreColinearBonds=ignoreColinearBonds)
torsions1 = [CalculateTorsionAngles(mol, tl, tlr, confId=cid) for cid in confIds1]
torsions2 = [CalculateTorsionAngles(mol, tl, tlr, confId=cid) for cid in confIds2]
tfd = []
if useWeights:
weights = CalculateTorsionWeights(mol, ignoreColinearBonds=ignoreColinearBonds)
for t1 in torsions1:
for t2 in torsions2:
tfd.append(CalculateTFD(t1, t2, weights=weights))
else:
for t1 in torsions1:
for t2 in torsions2:
tfd.append(CalculateTFD(t1, t2))
return tfd
def GetTFDBetweenMolecules(mol1, mol2, confId1=-1, confId2=-1, useWeights=True, maxDev='equal', symmRadius=2, ignoreColinearBonds=True):
""" Wrapper to calculate the TFD between two molecules.
Important: The two molecules must be instances of the same molecule
Arguments:
- mol1: first instance of the molecule of interest
- mol2: second instance the molecule of interest
- confId1: conformer index for mol1 (default: first conformer)
- confId2: conformer index for mol2 (default: first conformer)
- useWeights: flag for using torsion weights in the TFD calculation
- maxDev: maximal deviation used for normalization
'equal': all torsions are normalized using 180.0 (default)
'spec': each torsion is normalized using its specific
maximal deviation as given in the paper
- symmRadius: radius used for calculating the atom invariants
(default: 2)
- ignoreColinearBonds: if True (default), single bonds adjacent to
triple bonds are ignored
if False, alternative not-covalently bound
atoms are used to define the torsion
Return: TFD value
"""
if (Chem.MolToSmiles(mol1) != Chem.MolToSmiles(mol2)):
raise ValueError("The two molecules must be instances of the same molecule!")
mol2 = _getSameAtomOrder(mol1, mol2)
tl, tlr = CalculateTorsionLists(mol1, maxDev=maxDev, symmRadius=symmRadius, ignoreColinearBonds=ignoreColinearBonds)
# first molecule
torsion1 = CalculateTorsionAngles(mol1, tl, tlr, confId=confId1)
# second molecule
torsion2 = CalculateTorsionAngles(mol2, tl, tlr, confId=confId2)
if useWeights:
weights = CalculateTorsionWeights(mol1, ignoreColinearBonds=ignoreColinearBonds)
tfd = CalculateTFD(torsion1, torsion2, weights=weights)
else:
tfd = CalculateTFD(torsion1, torsion2)
return tfd
def GetTFDMatrix(mol, useWeights=True, maxDev='equal', symmRadius=2, ignoreColinearBonds=True):
""" Wrapper to calculate the matrix of TFD values for the
conformers of a molecule.
Arguments:
- mol: the molecule of interest
- useWeights: flag for using torsion weights in the TFD calculation
- maxDev: maximal deviation used for normalization
'equal': all torsions are normalized using 180.0 (default)
'spec': each torsion is normalized using its specific
maximal deviation as given in the paper
- symmRadius: radius used for calculating the atom invariants
(default: 2)
- ignoreColinearBonds: if True (default), single bonds adjacent to
triple bonds are ignored
if False, alternative not-covalently bound
atoms are used to define the torsion
Return: matrix of TFD values
Note that the returned matrix is symmetrical, i.e. it is the
lower half of the matrix, e.g. for 5 conformers:
matrix = [ a,
b, c,
d, e, f,
g, h, i, j]
"""
tl, tlr = CalculateTorsionLists(mol, maxDev=maxDev, symmRadius=symmRadius, ignoreColinearBonds=ignoreColinearBonds)
numconf = mol.GetNumConformers()
torsions = [CalculateTorsionAngles(mol, tl, tlr, confId=conf.GetId()) for conf in mol.GetConformers()]
tfdmat = []
if useWeights:
weights = CalculateTorsionWeights(mol, ignoreColinearBonds=ignoreColinearBonds)
for i in range(0, numconf):
for j in range(0, i):
tfdmat.append(CalculateTFD(torsions[i], torsions[j], weights=weights))
else:
for i in range(0, numconf):
for j in range(0, i):
tfdmat.append(CalculateTFD(torsions[i], torsions[j]))
return tfdmat
| |
#
# Copyright (c) 2013-2015 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
from twext.enterprise.dal.record import fromTable, Record
from twext.enterprise.dal.syntax import Select, Insert, Delete, Parameter
from twext.enterprise.locking import NamedLock
from twext.enterprise.jobqueue import WorkItem, WORK_PRIORITY_MEDIUM, JobItem, \
WORK_WEIGHT_5, JobTemporaryError
from twext.python.log import Logger
from twisted.internet.defer import inlineCallbacks, returnValue, Deferred, \
succeed
from twistedcaldav.config import config
from twistedcaldav.ical import Component
from txdav.caldav.datastore.scheduling.cuaddress import calendarUserFromCalendarUserUID
from txdav.caldav.datastore.scheduling.itip import iTIPRequestStatus
from txdav.caldav.icalendarstore import ComponentUpdateState
from txdav.common.datastore.sql_tables import schema, \
scheduleActionToSQL, scheduleActionFromSQL
import collections
import datetime
import hashlib
import traceback
__all__ = [
"ScheduleOrganizerWork",
"ScheduleReplyWork",
"ScheduleRefreshWork",
"ScheduleAutoReplyWork",
]
log = Logger()
class ScheduleWorkMixin(WorkItem):
"""
Base class for common schedule work item behavior. Sub-classes have their own class specific data
stored in per-class tables. This class manages a SCHEDULE_WORK table that contains the work id, job id
and iCalendar UID. That table is used for locking all scheduling items with the same UID, as well as
allow smart re-scheduling/ordering etc of items with the same UID.
"""
# Track when all work is complete (needed for unit tests)
_allDoneCallback = None
_queued = 0
# Schedule work is grouped based on calendar object UID
default_priority = WORK_PRIORITY_MEDIUM
default_weight = WORK_WEIGHT_5
@classmethod
@inlineCallbacks
def create(cls, transaction, **kwargs):
"""
A new work item needs to be created. First we create a SCHEDULE_WORK record, then
we create the actual work item.
@param transaction: the transaction to use
@type transaction: L{IAsyncTransaction}
"""
baseargs = {
"jobID": kwargs.pop("jobID"),
"icalendarUID": kwargs.pop("icalendarUID"),
"workType": cls.workType()
}
baseWork = yield ScheduleWork.create(transaction, **baseargs)
kwargs["workID"] = baseWork.workID
work = yield super(ScheduleWorkMixin, cls).create(transaction, **kwargs)
work.addBaseWork(baseWork)
returnValue(work)
@classmethod
@inlineCallbacks
def loadForJob(cls, txn, jobID):
baseItems = yield ScheduleWork.query(txn, (ScheduleWork.jobID == jobID))
workItems = []
for baseItem in baseItems:
workItem = yield cls.query(txn, (cls.workID == baseItem.workID))
if len(workItem) == 0:
# This can happen if a cascade delete is done on the actual work item - that will not
# remove the corresponding L{JobItem} or L{ScheduleWork}
yield baseItem.delete()
continue
workItem[0].addBaseWork(baseItem)
workItems.append(workItem[0])
returnValue(workItems)
@inlineCallbacks
def runlock(self):
"""
Lock the "group" which is all the base items with the same UID. Also make sure
to lock this item after.
@return: an L{Deferred} that fires with L{True} if the L{WorkItem} was locked,
L{False} if not.
@rtype: L{Deferred}
"""
# Do the group lock first since this can impact multiple rows and thus could
# cause deadlocks if done in the wrong order
# Row level lock on this item
locked = yield self.baseWork.trylock(ScheduleWork.icalendarUID == self.icalendarUID)
if locked:
yield self.trylock()
returnValue(locked)
def addBaseWork(self, baseWork):
"""
Add the base work fields into the sub-classes as non-record attributes.
@param baseWork: the base work item to add
@type baseWork: L{ScheduleWork}
"""
self.__dict__["baseWork"] = baseWork
self.__dict__["jobID"] = baseWork.jobID
self.__dict__["icalendarUID"] = baseWork.icalendarUID
def delete(self):
"""
Delete the base work item which will delete this one via cascade.
@return: a L{Deferred} which fires with C{None} when the underlying row
has been deleted, or fails with L{NoSuchRecord} if the underlying
row was already deleted.
"""
return self.baseWork.delete()
@classmethod
@inlineCallbacks
def hasWork(cls, txn):
sch = cls.table
rows = (yield Select(
(sch.WORK_ID,),
From=sch,
).on(txn))
returnValue(len(rows) > 0)
@inlineCallbacks
def afterWork(self):
"""
A hook that gets called after the L{WorkItem} does its real work. This can be used
for common clean-up behaviors. The base implementation does nothing.
"""
yield super(ScheduleWorkMixin, self).afterWork()
# Find the next item and schedule to run immediately after this.
# We only coalesce ScheduleOrganizerSendWork.
if self.workType() == ScheduleOrganizerSendWork.workType():
all = yield self.baseWork.query(
self.transaction,
(ScheduleWork.icalendarUID == self.icalendarUID).And(ScheduleWork.workID != self.workID),
order=ScheduleWork.workID,
limit=1,
)
if all:
work = all[0]
if work.workType == self.workType():
job = yield JobItem.load(self.transaction, work.jobID)
yield job.update(notBefore=datetime.datetime.utcnow())
log.debug("ScheduleOrganizerSendWork - promoted job: {id}, UID: '{uid}'", id=work.workID, uid=self.icalendarUID)
@classmethod
def allDone(cls):
d = Deferred()
cls._allDoneCallback = d.callback
cls._queued = 0
return d
@classmethod
def _enqueued(cls):
"""
Called when a new item is enqueued - using for tracking purposes.
"""
ScheduleWorkMixin._queued += 1
def _dequeued(self):
"""
Called when an item is dequeued - using for tracking purposes. We call
the callback when the last item is dequeued.
"""
ScheduleWorkMixin._queued -= 1
if ScheduleWorkMixin._queued == 0:
if ScheduleWorkMixin._allDoneCallback:
def _post():
ScheduleWorkMixin._allDoneCallback(None)
ScheduleWorkMixin._allDoneCallback = None
self.transaction.postCommit(_post)
def serializeWithAncillaryData(self):
"""
Include the ancillary data in the serialized result.
@return: mapping of attribute to string values
@rtype: L{Deferred} returning an L{dict} of L{str}:L{str}
"""
return succeed(self.serialize())
def extractSchedulingResponse(self, queuedResponses):
"""
Extract a list of (recipient, status) pairs from a scheduling response, returning that list
and an indicator of whether any have a schedule status other than delivered.
@param queuedResponses: the scheduling response object
@type queuedResponses: L{list} of L{caldavxml.ScheduleResponse}
@return: a L{tuple} of the list and the status state
@rtype: L{tuple} of (L{list}, L{bool})
"""
# Map each recipient in the response to a status code
results = []
all_delivered = True
for response in queuedResponses:
for item in response.responses:
recipient = str(item.recipient.children[0])
status = str(item.reqstatus)
statusCode = status.split(";")[0]
results.append((recipient, statusCode,))
# Now apply to each ATTENDEE/ORGANIZER in the original data only if not 1.2
if statusCode != iTIPRequestStatus.MESSAGE_DELIVERED_CODE:
all_delivered = False
return results, all_delivered
def handleSchedulingResponse(self, response, calendar, is_organizer):
"""
Update a user's calendar object resource based on the results of a queued scheduling
message response. Note we only need to update in the case where there is an error response
as we will already have updated the calendar object resource to make it look like scheduling
worked prior to the work queue item being enqueued.
@param response: the scheduling response summary data
@type response: L{list} of L{tuple} of (L{str} - recipient, L{str} - status)
@param calendar: original calendar component
@type calendar: L{Component}
@param is_organizer: whether or not iTIP message was sent by the organizer
@type is_organizer: C{bool}
"""
# Map each recipient in the response to a status code
changed = False
recipients = collections.defaultdict(list)
for p in calendar.getAllAttendeeProperties() if is_organizer else calendar.getOrganizerProperties():
recipients[p.value()].append(p)
for recipient, statusCode in response:
# Now apply to each ATTENDEE/ORGANIZER in the original data only if not 1.2
if statusCode != iTIPRequestStatus.MESSAGE_DELIVERED_CODE:
# Now apply to each ATTENDEE/ORGANIZER in the original data
for p in recipients[recipient]:
p.setParameter("SCHEDULE-STATUS", statusCode)
changed = True
return changed
@inlineCallbacks
def checkTemporaryFailure(self, results):
"""
Check to see whether whether a temporary failure should be raised as opposed to continuing on with a permanent failure.
@param results: set of results gathered in L{extractSchedulingResponse}
@type results: L{list}
"""
if all([result[1] == iTIPRequestStatus.MESSAGE_PENDING_CODE for result in results]):
job = yield JobItem.load(self.transaction, self.jobID)
if job.failed >= config.Scheduling.Options.WorkQueues.MaxTemporaryFailures:
# Set results to SERVICE_UNAVAILABLE
for ctr, result in enumerate(results):
results[ctr] = (result[0], iTIPRequestStatus.SERVICE_UNAVAILABLE_CODE,)
returnValue(None)
else:
raise JobTemporaryError(config.Scheduling.Options.WorkQueues.TemporaryFailureDelay)
class ScheduleWork(Record, fromTable(schema.SCHEDULE_WORK)):
"""
@DynamicAttrs
A L{Record} based table whose rows are used for locking scheduling work by iCalendar UID value.
as well as helping to determine the next work for a particular UID.
"""
_classForWorkType = {}
@classmethod
def jobIDsQueryJoin(cls, homeID, other):
return Select(
[cls.jobID, ],
From=cls.table.join(other.table, on=(cls.workID == other.workID)),
Where=other.homeResourceID == homeID,
)
@classmethod
def classForWorkType(cls, workType):
return cls._classForWorkType.get(workType)
def migrate(self, mapIDsCallback):
"""
Abstract API that must be implemented by each sub-class. This method will take a record, and replace
the references to the home and any object resource id with those determined from the callback, and then
will create new job/work items for the record. This is used for cross-pod migration of work items.
@param mapIDsCallback: a callback that returns a tuple of the new home id and new resource id
"""
raise NotImplementedError
class ScheduleOrganizerWork(ScheduleWorkMixin, fromTable(schema.SCHEDULE_ORGANIZER_WORK)):
"""
@DynamicAttrs
The associated work item table is SCHEDULE_ORGANIZER_WORK.
This work item is used to generate a set of L{ScheduleOrganizerSendWork} work items for
each set of iTIP messages that need to be sent as the result of an organizer changing
their copy of the event.
"""
@classmethod
@inlineCallbacks
def schedule(cls, txn, uid, action, home, resource, calendar_old, calendar_new, organizer, attendee_count, smart_merge, pause=0):
"""
The actual arguments depend on the action:
1) If action is "create", resource is None, calendar_old is None, calendar_new is the new data
2) If action is "modify", resource is existing resource, calendar_old is the old calendar_old data, and
calendar_new is the new data
3) If action is "remove", resource is the existing resource, calendar_old is the old calendar_old data,
and calendar_new is None
Right now we will also create the iTIP message based on the diff of calendar_old and calendar_new rather than
looking at the current state of the orgnaizer's resource (which may have changed since this work item was
filed). That means that we are basically NOT doing any coalescing of changes - instead every change results
in its own iTIP message (pretty much as it would without the queue). Ultimately we need to support coalescing
for performance benefit, but the logic involved in doing that is tricky (e.g., certain properties like
SCHEDULE-FORCE-SEND are not preserved in the saved data, yet need to be accounted for because they change the
nature of the iTIP processing).
"""
# Always queue up new work - coalescing happens when work is executed
notBefore = datetime.datetime.utcnow() + datetime.timedelta(seconds=config.Scheduling.Options.WorkQueues.RequestDelaySeconds)
if isinstance(calendar_old, Component):
calendar_old = calendar_old.getTextWithTimezones(includeTimezones=not config.EnableTimezonesByReference)
if isinstance(calendar_new, Component):
calendar_new = calendar_new.getTextWithTimezones(includeTimezones=not config.EnableTimezonesByReference)
proposal = (yield txn.enqueue(
cls,
notBefore=notBefore,
icalendarUID=uid,
scheduleAction=scheduleActionToSQL[action],
homeResourceID=home.id(),
resourceID=resource.id() if resource else None,
icalendarTextOld=calendar_old,
icalendarTextNew=calendar_new,
attendeeCount=attendee_count,
smartMerge=smart_merge,
pause=pause,
))
cls._enqueued()
log.debug("ScheduleOrganizerWork - enqueued for ID: {id}, UID: {uid}, organizer: {org}", id=proposal.workItem.workID, uid=uid, org=organizer)
@inlineCallbacks
def migrate(self, txn, mapIDsCallback):
"""
See L{ScheduleWork.migrate}
"""
# Try to find a mapping
new_home, new_resource = yield mapIDsCallback(self.resourceID)
# If we previously had a resource ID and now don't, then don't create work
if self.resourceID is not None and new_resource is None:
returnValue(False)
if self.icalendarTextOld:
calendar_old = Component.fromString(self.icalendarTextOld)
uid = calendar_old.resourceUID()
else:
calendar_new = Component.fromString(self.icalendarTextNew)
uid = calendar_new.resourceUID()
# Insert new work - in paused state
yield ScheduleOrganizerWork.schedule(
txn, uid, scheduleActionFromSQL[self.scheduleAction],
new_home, new_resource, self.icalendarTextOld, self.icalendarTextNew,
new_home.uid(), self.attendeeCount, self.smartMerge,
pause=1
)
returnValue(True)
@inlineCallbacks
def doWork(self):
try:
home = (yield self.transaction.calendarHomeWithResourceID(self.homeResourceID))
resource = (yield home.objectResourceWithID(self.resourceID))
organizerAddress = yield calendarUserFromCalendarUserUID(home.uid(), self.transaction)
organizer = organizerAddress.record.canonicalCalendarUserAddress()
calendar_old = Component.fromString(self.icalendarTextOld) if self.icalendarTextOld else None
calendar_new = Component.fromString(self.icalendarTextNew) if self.icalendarTextNew else None
log.debug("ScheduleOrganizerWork - running for ID: {id}, UID: {uid}, organizer: {org}", id=self.workID, uid=self.icalendarUID, org=organizer)
# We need to get the UID lock for implicit processing.
yield NamedLock.acquire(self.transaction, "ImplicitUIDLock:%s" % (hashlib.md5(self.icalendarUID).hexdigest(),))
from txdav.caldav.datastore.scheduling.implicit import ImplicitScheduler
scheduler = ImplicitScheduler()
yield scheduler.queuedOrganizerProcessing(
self.transaction,
scheduleActionFromSQL[self.scheduleAction],
home,
resource,
self.icalendarUID,
calendar_old,
calendar_new,
self.smartMerge
)
self._dequeued()
except Exception, e:
log.debug("ScheduleOrganizerWork - exception ID: {id}, UID: '{uid}', {err}", id=self.workID, uid=self.icalendarUID, err=str(e))
log.debug(traceback.format_exc())
raise
except:
log.debug("ScheduleOrganizerWork - bare exception ID: {id}, UID: '{uid}'", id=self.workID, uid=self.icalendarUID)
log.debug(traceback.format_exc())
raise
log.debug("ScheduleOrganizerWork - done for ID: {id}, UID: {uid}, organizer: {org}", id=self.workID, uid=self.icalendarUID, org=organizer)
class ScheduleOrganizerSendWork(ScheduleWorkMixin, fromTable(schema.SCHEDULE_ORGANIZER_SEND_WORK)):
"""
@DynamicAttrs
The associated work item table is SCHEDULE_ORGANIZER_SEND_WORK.
This work item is used to send iTIP request and cancel messages when an organizer changes
their calendar object resource. One of these will be created for each iTIP message that
L{ScheduleOrganizerWork} needs to have sent.
"""
@classmethod
@inlineCallbacks
def schedule(cls, txn, action, home, resource, organizer, attendee, itipmsg, no_refresh, stagger, pause=0):
"""
Create the work item. Because there may be lots of these dumped onto the server in one go, we will
stagger them via notBefore. However, we are using a "chained" work item so when one completes, it
will reschedule the next one to run immediately after it, so if work is being done quickly, the
stagger interval is effectively ignored.
@param txn: the transaction to use
@type txn: L{CommonStoreTransaction}
@param organizer: the calendar user address of the organizer
@type organizer: L{str}
@param attendee: the calendar user address of the attendee to send the message to
@type attendee: L{str}
@param itipmsg: the iTIP message to send
@type itipmsg: L{Component}
@param no_refresh: whether or not refreshes are allowed
@type no_refresh: L{bool}
@param stagger: number of seconds into the future for notBefore
@type stagger: L{int}
"""
# Always queue up new work - coalescing happens when work is executed
notBefore = datetime.datetime.utcnow() + datetime.timedelta(seconds=config.Scheduling.Options.WorkQueues.RequestDelaySeconds + stagger)
uid = itipmsg.resourceUID()
proposal = (yield txn.enqueue(
cls,
notBefore=notBefore,
icalendarUID=uid,
scheduleAction=scheduleActionToSQL[action],
homeResourceID=home.id(),
resourceID=resource.id() if resource else None,
attendee=attendee,
itipMsg=itipmsg.getTextWithTimezones(includeTimezones=not config.EnableTimezonesByReference),
noRefresh=no_refresh,
pause=pause,
))
cls._enqueued()
log.debug(
"ScheduleOrganizerSendWork - enqueued for ID: {id}, UID: {uid}, organizer: {org}, attendee: {att}",
id=proposal.workItem.workID,
uid=uid,
org=organizer,
att=attendee
)
@inlineCallbacks
def migrate(self, txn, mapIDsCallback):
"""
See L{ScheduleWork.migrate}
"""
# Try to find a mapping
new_home, new_resource = yield mapIDsCallback(self.resourceID)
# If we previously had a resource ID and now don't, then don't create work
if self.resourceID is not None and new_resource is None:
returnValue(False)
if self.itipMsg:
itipmsg = Component.fromString(self.itipMsg)
# Insert new work - in paused state
yield ScheduleOrganizerSendWork.schedule(
txn, scheduleActionFromSQL[self.scheduleAction],
new_home, new_resource, new_home.uid(), self.attendee,
itipmsg, self.noRefresh, 0,
pause=1
)
returnValue(True)
@inlineCallbacks
def doWork(self):
try:
home = (yield self.transaction.calendarHomeWithResourceID(self.homeResourceID))
resource = (yield home.objectResourceWithID(self.resourceID))
itipmsg = Component.fromString(self.itipMsg)
organizerAddress = yield calendarUserFromCalendarUserUID(home.uid(), self.transaction)
organizer = organizerAddress.record.canonicalCalendarUserAddress()
log.debug(
"ScheduleOrganizerSendWork - running for ID: {id}, UID: {uid}, organizer: {org}, attendee: {att}",
id=self.workID,
uid=self.icalendarUID,
org=organizer,
att=self.attendee
)
# We need to get the UID lock for implicit processing.
yield NamedLock.acquire(self.transaction, "ImplicitUIDLock:%s" % (hashlib.md5(self.icalendarUID).hexdigest(),))
from txdav.caldav.datastore.scheduling.implicit import ImplicitScheduler
scheduler = ImplicitScheduler()
yield scheduler.queuedOrganizerSending(
self.transaction,
scheduleActionFromSQL[self.scheduleAction],
home,
resource,
self.icalendarUID,
organizer,
self.attendee,
itipmsg,
self.noRefresh
)
# Handle responses - update the actual resource in the store. Note that for a create the resource did not previously
# exist and is stored as None for the work item, but the scheduler will attempt to find the new resources and use
# that. We need to grab the scheduler's resource for further processing.
resource = scheduler.resource
if resource is not None:
responses, all_delivered = self.extractSchedulingResponse(scheduler.queuedResponses)
if not all_delivered:
# Check for all connection failed
yield self.checkTemporaryFailure(responses)
# Update calendar data to reflect error status
calendar = (yield resource.componentForUser())
changed = self.handleSchedulingResponse(responses, calendar, True)
if changed:
yield resource._setComponentInternal(calendar, internal_state=ComponentUpdateState.ORGANIZER_ITIP_UPDATE)
self._dequeued()
except Exception, e:
log.debug("ScheduleOrganizerSendWork - exception ID: {id}, UID: '{uid}', {err}", id=self.workID, uid=self.icalendarUID, err=str(e))
log.debug(traceback.format_exc())
raise
except:
log.debug("ScheduleOrganizerSendWork - bare exception ID: {id}, UID: '{uid}'", id=self.workID, uid=self.icalendarUID)
log.debug(traceback.format_exc())
raise
log.debug(
"ScheduleOrganizerSendWork - done for ID: {id}, UID: {uid}, organizer: {org}, attendee: {att}",
id=self.workID,
uid=self.icalendarUID,
org=organizer,
att=self.attendee
)
class ScheduleReplyWork(ScheduleWorkMixin, fromTable(schema.SCHEDULE_REPLY_WORK)):
"""
@DynamicAttrs
The associated work item table is SCHEDULE_REPLY_WORK.
This work item is used to send an iTIP reply message when an attendee changes
their partstat in the calendar object resource.
"""
@classmethod
@inlineCallbacks
def reply(cls, txn, home, resource, itipmsg, attendee, pause=0):
# Always queue up new work - coalescing happens when work is executed
notBefore = datetime.datetime.utcnow() + datetime.timedelta(seconds=config.Scheduling.Options.WorkQueues.ReplyDelaySeconds)
uid = itipmsg.resourceUID()
proposal = (yield txn.enqueue(
cls,
notBefore=notBefore,
icalendarUID=uid,
homeResourceID=home.id(),
resourceID=resource.id() if resource else None,
itipMsg=itipmsg.getTextWithTimezones(includeTimezones=not config.EnableTimezonesByReference),
pause=pause,
))
cls._enqueued()
log.debug("ScheduleReplyWork - enqueued for ID: {id}, UID: {uid}, attendee: {att}", id=proposal.workItem.workID, uid=uid, att=attendee)
@inlineCallbacks
def migrate(self, txn, mapIDsCallback):
"""
See L{ScheduleWork.migrate}
"""
# Try to find a mapping
new_home, new_resource = yield mapIDsCallback(self.resourceID)
# If we previously had a resource ID and now don't, then don't create work
if self.resourceID is not None and new_resource is None:
returnValue(False)
if self.itipMsg:
itipmsg = Component.fromString(self.itipMsg)
# Insert new work - in paused state
yield ScheduleReplyWork.reply(
txn,
new_home, new_resource, itipmsg, new_home.uid(),
pause=1
)
returnValue(True)
@inlineCallbacks
def sendToOrganizer(self, home, itipmsg, originator, recipient):
# Send scheduling message
# This is a local CALDAV scheduling operation.
from txdav.caldav.datastore.scheduling.caldav.scheduler import CalDAVScheduler
scheduler = CalDAVScheduler(self.transaction, home.uid())
# Do the PUT processing
log.info("Implicit REPLY - attendee: '%s' to organizer: '%s', UID: '%s'" % (originator, recipient, itipmsg.resourceUID(),))
response = (yield scheduler.doSchedulingViaPUT(originator, (recipient,), itipmsg, internal_request=True))
returnValue(response)
@inlineCallbacks
def doWork(self):
try:
home = (yield self.transaction.calendarHomeWithResourceID(self.homeResourceID))
resource = (yield home.objectResourceWithID(self.resourceID))
itipmsg = Component.fromString(self.itipMsg)
attendeeAddress = yield calendarUserFromCalendarUserUID(home.uid(), self.transaction)
attendee = attendeeAddress.record.canonicalCalendarUserAddress()
organizer = itipmsg.validOrganizerForScheduling()
log.debug("ScheduleReplyWork - running for ID: {id}, UID: {uid}, attendee: {att}", id=self.workID, uid=itipmsg.resourceUID(), att=attendee)
# We need to get the UID lock for implicit processing.
yield NamedLock.acquire(self.transaction, "ImplicitUIDLock:%s" % (hashlib.md5(itipmsg.resourceUID()).hexdigest(),))
# Send scheduling message and process response
response = (yield self.sendToOrganizer(home, itipmsg, attendee, organizer))
if resource is not None:
responses, all_delivered = self.extractSchedulingResponse((response,))
if not all_delivered:
# Check for all connection failed
yield self.checkTemporaryFailure(responses)
# Update calendar data to reflect error status
calendar = (yield resource.componentForUser())
changed = yield self.handleSchedulingResponse(responses, calendar, False)
if changed:
yield resource._setComponentInternal(calendar, internal_state=ComponentUpdateState.ATTENDEE_ITIP_UPDATE)
self._dequeued()
except Exception, e:
# FIXME: calendar may not be set here!
log.debug("ScheduleReplyWork - exception ID: {id}, UID: '{uid}', {err}", id=self.workID, uid=itipmsg.resourceUID(), err=str(e))
raise
except:
log.debug("ScheduleReplyWork - bare exception ID: {id}, UID: '{uid}'", id=self.workID, uid=itipmsg.resourceUID())
raise
log.debug("ScheduleReplyWork - done for ID: {id}, UID: {uid}, attendee: {att}", id=self.workID, uid=itipmsg.resourceUID(), att=attendee)
class ScheduleRefreshWork(ScheduleWorkMixin, fromTable(schema.SCHEDULE_REFRESH_WORK)):
"""
@DynamicAttrs
The associated work item table is SCHEDULE_REFRESH_WORK.
This work item is used to trigger an iTIP refresh of attendees. This happens when one attendee
replies to an invite, and we want to have the others attendees see that change - eventually. We
are going to use the SCHEDULE_REFRESH_ATTENDEES table to track the list of attendees needing
a refresh for each calendar object resource (identified by the organizer's resource-id for that
calendar object). We want to do refreshes in batches with a configurable time between each batch.
The tricky part here is handling race conditions, where two or more attendee replies happen at the
same time, or happen whilst a previously queued refresh has started batch processing. Here is how
we will handle that:
1) Each time a refresh is needed we will add all attendees to the SCHEDULE_REFRESH_ATTENDEES table.
This will happen even if those attendees are currently listed in that table. We ensure the table is
not unique wrt to attendees - this means that two simultaneous refreshes can happily insert the
same set of attendees without running into unique constraints and thus without having to use
savepoints to cope with that. This will mean duplicate attendees listed in the table, but we take
care of that when executing the work item, as per the next point. We also always schedule a new work
item for the refresh - even if others are present. The work items are coalesced when executed, with
the actual refresh only running at the time of the latest enqueued item. That ensures there is always
a pause between a change that causes a refresh and then next actual refresh batch being done, giving
some breathing space in case rapid changes are happening to the iCalendar data.
2) When a work item is triggered we get the set of unique attendees needing a refresh from the
SCHEDULE_REFRESH_ATTENDEES table. We split out a batch of those to actually refresh - with the
others being left in the table as-is. We then remove the batch of attendees from the
SCHEDULE_REFRESH_ATTENDEES table - this will remove duplicates. The refresh is then done and a
new work item scheduled to do the next batch. We only stop rescheduling work items when nothing
is found during the initial query. Note that if any refresh is done we will always reschedule work
even if we know none remain. That should handle the case where a new refresh occurs whilst
processing the last batch from a previous refresh.
Hopefully the above methodology will deal with concurrency issues, preventing any excessive locking
or failed inserts etc.
"""
@classmethod
@inlineCallbacks
def refreshAttendees(cls, txn, organizer_resource, organizer_calendar, attendees, pause=0):
# See if there is already a pending refresh and merge current attendees into that list,
# otherwise just mark all attendees as pending
sra = schema.SCHEDULE_REFRESH_ATTENDEES
pendingAttendees = (yield Select(
[sra.ATTENDEE, ],
From=sra,
Where=sra.RESOURCE_ID == organizer_resource.id(),
).on(txn))
pendingAttendees = [row[0] for row in pendingAttendees]
attendeesToRefresh = set(attendees) - set(pendingAttendees)
for attendee in attendeesToRefresh:
yield Insert(
{
sra.RESOURCE_ID: organizer_resource.id(),
sra.ATTENDEE: attendee,
}
).on(txn)
# Always queue up new work - coalescing happens when work is executed
notBefore = datetime.datetime.utcnow() + datetime.timedelta(seconds=config.Scheduling.Options.WorkQueues.AttendeeRefreshBatchDelaySeconds)
proposal = (yield txn.enqueue(
cls,
icalendarUID=organizer_resource.uid(),
homeResourceID=organizer_resource._home.id(),
resourceID=organizer_resource.id(),
attendeeCount=len(attendees),
notBefore=notBefore,
pause=pause,
))
cls._enqueued()
log.debug("ScheduleRefreshWork - enqueued for ID: {id}, UID: {uid}, attendees: {att}", id=proposal.workItem.workID, uid=organizer_resource.uid(), att=",".join(attendeesToRefresh))
@inlineCallbacks
def migrate(self, txn, mapIDsCallback):
"""
See L{ScheduleWork.migrate}
"""
# Try to find a mapping
_ignore_new_home, new_resource = yield mapIDsCallback(self.resourceID)
# If we previously had a resource ID and now don't, then don't create work
if new_resource is None:
returnValue(False)
# Insert new work - in paused state
yield ScheduleRefreshWork.refreshAttendees(
txn,
new_resource, None, self._refreshAttendees,
pause=1
)
returnValue(True)
@inlineCallbacks
def doWork(self):
# Look for other work items for this resource and ignore this one if other later ones exist
srw = schema.SCHEDULE_REFRESH_WORK
rows = (yield Select(
(srw.WORK_ID,),
From=srw,
Where=(
srw.HOME_RESOURCE_ID == self.homeResourceID).And(
srw.RESOURCE_ID == self.resourceID
),
).on(self.transaction))
if rows:
log.debug("Schedule refresh for resource-id: {rid} - ignored", rid=self.resourceID)
returnValue(None)
log.debug("ScheduleRefreshWork - running for ID: {id}, UID: {uid}", id=self.workID, uid=self.icalendarUID)
# Get the unique list of pending attendees and split into batch to process
# TODO: do a DELETE ... and rownum <= N returning attendee - but have to fix Oracle to
# handle multi-row returning. Would be better than entire select + delete of each one,
# but need to make sure to use UNIQUE as there may be duplicate attendees.
sra = schema.SCHEDULE_REFRESH_ATTENDEES
pendingAttendees = (yield Select(
[sra.ATTENDEE, ],
From=sra,
Where=sra.RESOURCE_ID == self.resourceID,
).on(self.transaction))
pendingAttendees = list(set([row[0] for row in pendingAttendees]))
# Nothing left so done
if len(pendingAttendees) == 0:
returnValue(None)
attendeesToProcess = pendingAttendees[:config.Scheduling.Options.AttendeeRefreshBatch]
pendingAttendees = pendingAttendees[config.Scheduling.Options.AttendeeRefreshBatch:]
yield Delete(
From=sra,
Where=(sra.RESOURCE_ID == self.resourceID).And(sra.ATTENDEE.In(Parameter("attendeesToProcess", len(attendeesToProcess))))
).on(self.transaction, attendeesToProcess=attendeesToProcess)
# Reschedule work item if pending attendees remain.
if len(pendingAttendees) != 0:
notBefore = datetime.datetime.utcnow() + datetime.timedelta(seconds=config.Scheduling.Options.WorkQueues.AttendeeRefreshBatchIntervalSeconds)
yield self.transaction.enqueue(
self.__class__,
icalendarUID=self.icalendarUID,
homeResourceID=self.homeResourceID,
resourceID=self.resourceID,
attendeeCount=len(pendingAttendees),
notBefore=notBefore
)
self._enqueued()
# Do refresh
yield self._doDelayedRefresh(attendeesToProcess)
self._dequeued()
log.debug("ScheduleRefreshWork - done for ID: {id}, UID: {uid}", id=self.workID, uid=self.icalendarUID)
@inlineCallbacks
def _doDelayedRefresh(self, attendeesToProcess):
"""
Do an attendee refresh that has been delayed until after processing of the request that called it. That
requires that we create a new transaction to work with.
@param attendeesToProcess: list of attendees to refresh.
@type attendeesToProcess: C{list}
"""
organizer_home = (yield self.transaction.calendarHomeWithResourceID(self.homeResourceID))
organizer_resource = (yield organizer_home.objectResourceWithID(self.resourceID))
if organizer_resource is not None:
try:
# We need to get the UID lock for implicit processing whilst we send the auto-reply
# as the Organizer processing will attempt to write out data to other attendees to
# refresh them. To prevent a race we need a lock.
yield NamedLock.acquire(self.transaction, "ImplicitUIDLock:%s" % (hashlib.md5(organizer_resource.uid()).hexdigest(),))
yield self._doRefresh(organizer_resource, attendeesToProcess)
except Exception, e:
log.debug("ImplicitProcessing - refresh exception UID: '{uid}', {exc}", uid=organizer_resource.uid(), exc=str(e))
raise
except:
log.debug("ImplicitProcessing - refresh bare exception UID: '{uid}'", uid=organizer_resource.uid())
raise
else:
log.debug("ImplicitProcessing - skipping refresh of missing ID: '{rid}'", rid=self.resourceID)
@inlineCallbacks
def _doRefresh(self, organizer_resource, only_attendees):
"""
Do a refresh of attendees.
@param organizer_resource: the resource for the organizer's calendar data
@type organizer_resource: L{DAVResource}
@param only_attendees: list of attendees to refresh (C{None} - refresh all)
@type only_attendees: C{tuple}
"""
log.debug("ImplicitProcessing - refreshing UID: '{uid}', Attendees: {att}", uid=organizer_resource.uid(), att=", ".join(only_attendees) if only_attendees else "all")
from txdav.caldav.datastore.scheduling.implicit import ImplicitScheduler
scheduler = ImplicitScheduler()
yield scheduler.refreshAllAttendeesExceptSome(
self.transaction,
organizer_resource,
only_attendees=only_attendees,
)
@inlineCallbacks
def serializeWithAncillaryData(self):
"""
Include the ancillary attendee list information in the serialized result.
@return: mapping of attribute to string values
@rtype: L{dict} of L{str}:L{str}
"""
# Certain values have to be mapped to str
result = self.serialize()
sra = schema.SCHEDULE_REFRESH_ATTENDEES
rows = (yield Select(
[sra.ATTENDEE, ],
From=sra,
Where=sra.RESOURCE_ID == self.resourceID,
).on(self.transaction))
result["_refreshAttendees"] = [row[0] for row in rows]
returnValue(result)
@classmethod
def deserialize(cls, attrmap):
"""
Handle the special attendee list attribute.
"""
attendees = attrmap.pop("_refreshAttendees")
record = super(ScheduleRefreshWork, cls).deserialize(attrmap)
record._refreshAttendees = attendees
return record
class ScheduleAutoReplyWork(ScheduleWorkMixin, fromTable(schema.SCHEDULE_AUTO_REPLY_WORK)):
"""
@DynamicAttrs
The associated work item table is SCHEDULE_AUTO_REPLY_WORK.
This work item is used to send auto-reply iTIP messages after the calendar data for the
auto-accept user has been written to the user calendar.
"""
@classmethod
@inlineCallbacks
def autoReply(cls, txn, resource, partstat, pause=0):
# Always queue up new work - coalescing happens when work is executed
notBefore = datetime.datetime.utcnow() + datetime.timedelta(seconds=config.Scheduling.Options.WorkQueues.AutoReplyDelaySeconds)
proposal = (yield txn.enqueue(
cls,
icalendarUID=resource.uid(),
homeResourceID=resource._home.id(),
resourceID=resource.id(),
partstat=partstat,
notBefore=notBefore,
pause=pause,
))
cls._enqueued()
log.debug("ScheduleAutoReplyWork - enqueued for ID: {id}, UID: {uid}", id=proposal.workItem.workID, uid=resource.uid())
@inlineCallbacks
def migrate(self, txn, mapIDsCallback):
"""
See L{ScheduleWork.migrate}
"""
# Try to find a mapping
_ignore_new_home, new_resource = yield mapIDsCallback(self.resourceID)
# If we previously had a resource ID and now don't, then don't create work
if new_resource is None:
returnValue(False)
# Insert new work - in paused state
yield ScheduleAutoReplyWork.autoReply(
txn,
new_resource, self.partstat,
pause=1
)
returnValue(True)
@inlineCallbacks
def doWork(self):
log.debug("ScheduleAutoReplyWork - running for ID: {id}, UID: {uid}", id=self.workID, uid=self.icalendarUID)
# Delete all other work items with the same pushID
yield Delete(
From=self.table,
Where=self.table.RESOURCE_ID == self.resourceID
).on(self.transaction)
# Do reply
yield self._sendAttendeeAutoReply()
self._dequeued()
log.debug("ScheduleAutoReplyWork - done for ID: {id}, UID: {uid}", id=self.workID, uid=self.icalendarUID)
@inlineCallbacks
def _sendAttendeeAutoReply(self):
"""
Auto-process the calendar option to generate automatic accept/decline status and
send a reply if needed.
We used to have logic to suppress attendee refreshes until after all auto-replies have
been processed. We can't do that with the work queue (easily) so we are going to ignore
that for now. It may not be a big deal given that the refreshes are themselves done in the
queue and we only do the refresh when the last queued work item is processed.
@param resource: calendar resource to process
@type resource: L{CalendarObject}
@param partstat: new partstat value
@type partstat: C{str}
"""
home = (yield self.transaction.calendarHomeWithResourceID(self.homeResourceID))
resource = (yield home.objectResourceWithID(self.resourceID))
if resource is not None:
try:
# We need to get the UID lock for implicit processing whilst we send the auto-reply
# as the Organizer processing will attempt to write out data to other attendees to
# refresh them. To prevent a race we need a lock.
yield NamedLock.acquire(self.transaction, "ImplicitUIDLock:%s" % (hashlib.md5(resource.uid()).hexdigest(),))
# Send out a reply
log.debug("ImplicitProcessing - recipient '%s' processing UID: '%s' - auto-reply: %s" % (home.uid(), resource.uid(), self.partstat))
from txdav.caldav.datastore.scheduling.implicit import ImplicitScheduler
scheduler = ImplicitScheduler()
yield scheduler.sendAttendeeReply(self.transaction, resource)
except Exception, e:
log.debug("ImplicitProcessing - auto-reply exception UID: '%s', %s" % (resource.uid(), str(e)))
raise
except:
log.debug("ImplicitProcessing - auto-reply bare exception UID: '%s'" % (resource.uid(),))
raise
else:
log.debug("ImplicitProcessing - skipping auto-reply of missing ID: '{rid}'", rid=self.resourceID)
allScheduleWork = (ScheduleOrganizerWork, ScheduleOrganizerSendWork, ScheduleReplyWork, ScheduleRefreshWork, ScheduleAutoReplyWork,)
for workClass in allScheduleWork:
ScheduleWork._classForWorkType[workClass.__name__] = workClass
| |
# -*- coding: utf-8 -*-
#
# login.py # # # # # # # # # #
# #
# Copyright 2016 Giorgio Ladu <giorgio.ladu >at< gmail.com> #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# # # # # # #
# 08.09.2016 V. 1.08
##
import pyrad.packet
import cgi
import sys
import os
import time
from pyrad.client import Client
from pyrad.dictionary import Dictionary
import store
import html_page
import config
import cgitb #debug
cgitb.enable()
form = cgi.FieldStorage()
mac = form.getvalue("mac", "00:00:00:00")
ip = form.getvalue("ip", "0.0.0.0")
gw_address = form.getvalue("gw_address", config.node_ip)
gw_port = form.getvalue("gw_port", config.node_port)
gw_id = form.getvalue("gw_id", config.node_name)
url = form.getvalue("url", config.custom_url)
token = form.getfirst("token", None) # None
user = form.getfirst("username", None)
passwd = form.getfirst("password", None)
user_agent = cgi.escape( os.environ[ "HTTP_USER_AGENT" ] )
ACCOUNT_STATUS_ERROR = -1
ACCOUNT_STATUS_DENIED = 0
ACCOUNT_STATUS_ALLOWED = 1
ACCOUNT_STATUS_VALIDATION = 5
ACCOUNT_STATUS_VALIDATION_FAILED = 6
ACCOUNT_STATUS_LOCKED = 254
radius_config = {
'server': config.radius_server, # radius server
'secret': config.radius_secret, # radius secret key
'dict': Dictionary(config.radius_dictionary),
}
srv = Client(**radius_config)
def SendPacket(srv, req):
try:
return srv.SendPacket(req)
except pyrad.client.Timeout:
html_page.error_page("RADIUS server does not reply")
sys.exit(1)
except socket.error as error:
html_page.error_page("Network error: " + str(error[1]))
sys.exit(1)
def auth(username, password):
store_data = store.store()
store_data.create()
store_data["auth"] = False
token = store_data.session_key
req = srv.CreateAuthPacket(
code=pyrad.packet.AccessRequest,
User_Name=username)
req["Acct-Status-Type"] = "Start"
req["User-Password"] = req.PwCrypt(password)
req["NAS-IP-Address"] = gw_address
req["NAS-Port"] = config.custom_nas_port
req["NAS-Port-Type"] = config.custom_nas_port_type
# MAC OF WIFIDOG "00-10-A4-23-19-C0"
req["NAS-Identifier"] = config.node_mac
req["Acct-Session-Id"] = token
# MAC OF WIFIDOG "00-10-A4-23-19-C0"
req["Called-Station-Id"] = config.node_mac
# MAC OF USER OR IP "00-00-B4-23-19-C0"
req["Calling-Station-Id"] = mac
req["Framed-IP-Address"] = ip
req["Service-Type"] = pyrad.packet.AccessRequest
req["Acct-Delay-Time"] = 0
req["Acct-Input-Octets"] = 0
req["Acct-Output-Octets"] = 0
# WISPr-Location-ID = "isocc=,cc=,ac=,network=Coova,Wicoin_Test"
req["WISPr-Location-ID"] = str(config.custom_wispr_location_id)
# WISPr-Location-Name = "Wicoin_Test"
req["WISPr-Location-Name"] = str(config.custom_wispr_location_name)
# http://7.0.0.1:2060/wifidog/auth?logout=1&token=4f473ae3ddc5c1c2165f7a0973c57a98
req["WISPr-Logoff-URL"] = "http://" + str(gw_address) + ':' + str(
gw_port) + "/wifidog/auth?logout=1&token=" + str(token)
reply = SendPacket(srv=srv, req=req)
auth_message = reply.code
if reply.code == pyrad.packet.AccessAccept:
store_data["auth"] = True
store_data["username"] = username
store_data["password"] = password
store_data["session_start"] = time.time()
store_data["auth_message"] = " User Access Accept "
store_data["auth_response"] = reply.code
for i in reply.keys():
store_data[i] = reply[i][0]
elif reply.code == pyrad.packet.AccessReject:
if "Reply-Message" in reply:
store_data["auth_message"] = " User Access Reject -" + \
str(reply["Reply-Message"][0])
else:
store_data["auth_message"] = " User Access Reject "
store_data["auth_response"] = reply.code
else:
store_data[
"auth_message"] = " An error occurred during the validation process "
store_data["auth_response"] = reply.code
store_data.save()
return store_data
def login_page():
html_page.header_page()
print """
<section id="container">
<div class="login">
<h1>Login</h1>
<form method="post" action="login.pyo">
<div class="form-group" >
<div class="input-group margin-bottom-sm">
<span class="input-group-addon"><i class="fa fa-user fa-fw" aria-hidden="true"></i></span>
<input class="form-control" type="text" id="username" name="username" placeholder="Username" required="required">
</div>
<div class="input-group">
<span class="input-group-addon"><i class="fa fa-key fa-fw" aria-hidden="true"></i></span>
<input class="form-control" type="password" id="password" name="password" placeholder="Password" required="required">
</div>
"""
print '<input type="hidden" id="gw_address" name="gw_address" value="' + str(gw_address) + '"/>'
print '<input type="hidden" id="gw_port" name="gw_port" value="' + str(gw_port) + '"/>'
print '<input type="hidden" id="gw_id" name="gw_id" value="' + str(gw_id) + '"/>'
print '<input type="hidden" id="mac" name="mac" value="' + str(mac) + '"/>'
print '<input type="hidden" id="url" name="url" value="' + str(url) + '"/>'
print """<br>
<button class="btn btn-success" type="submit"> <i class="fa fa-unlock fa-fw"></i> Let me in. </button>
</div>
</form>
</div>
</section>
"""
html_page.footer_page()
#if "Android 4" in user_agent:
#print 'HTTP/1.1 204 No Content'
#print ''
#sys.exit(0)
if "wispr" in user_agent:
success_apple()
sys.exit(0)
if "success.html" in url:
success_apple()
sys.exit(0)
if "hotspot-detect.html" in url:
success_apple()
sys.exit(0)
if url.find("ncsi.txt") != -1:
print 'HTTP/1.1 200 OK'
print 'Microsoft NCSI'
print ''
sys.exit(0)
if(gw_address and gw_id and mac):
if (user and passwd):
user = cgi.escape(user)
passwd = cgi.escape(passwd)
store_data = auth(user, passwd)
if store_data["auth"]:
store_data["username"] = user
store_data["password"] = passwd
token = store_data.session_key
store_data["ip"] = ip
store_data["mac"] = mac
store_data["gw_address"] = gw_address
store_data["gw_port"] = gw_port
store_data["gw_id"] = gw_id
store_data["token"] = token
store_data["url"] = url
store_data.save()
tokenurl = "http://" + str(gw_address) + ':' + str(
gw_port) + "/wifidog/auth?token=" + str(token)
print 'HTTP/1.1 302 Found'
print 'Location: ' + tokenurl
print ''
else:
html_page.error_page(store_data["auth_message"])
else:
login_page()
else:
html_page.error_page("BUMP!!")
| |
import agents as ag
import envgui as gui
import random
# ______________________________________________________________________________
loc_A, loc_B = (1, 1), (2, 1) # The two locations for the Vacuum world
def RandomVacuumAgent():
"Randomly choose one of the actions from the vacuum environment."
p = ag.RandomAgentProgram(['Right', 'Left', 'Up', 'Down', 'Suck', 'NoOp'])
return ag.Agent(p)
def TableDrivenVacuumAgent():
"[Figure 2.3]"
table = {((loc_A, 'Clean'),): 'Right',
((loc_A, 'Dirty'),): 'Suck',
((loc_B, 'Clean'),): 'Left',
((loc_B, 'Dirty'),): 'Suck',
((loc_A, 'Clean'), (loc_A, 'Clean')): 'Right',
((loc_A, 'Clean'), (loc_A, 'Dirty')): 'Suck',
# ...
((loc_A, 'Clean'), (loc_A, 'Clean'), (loc_A, 'Clean')): 'Right',
((loc_A, 'Clean'), (loc_A, 'Clean'), (loc_A, 'Dirty')): 'Suck',
# ...
}
p = ag.TableDrivenAgentProgram(table)
return ag.Agent()
def ReflexVacuumAgent():
"A reflex agent for the two-state vacuum environment. [Figure 2.8]"
def program(percept):
location, status = percept
if status == 'Dirty':
return 'Suck'
elif location == loc_A:
return 'Right'
elif location == loc_B:
return 'Left'
return ag.Agent(program)
def ModelBasedVacuumAgent() -> object:
"An agent that keeps track of what locations are clean or dirty."
model = {loc_A: None, loc_B: None}
def program(percept):
"Same as ReflexVacuumAgent, except if everything is clean, do NoOp."
location, status = percept
model[location] = status # Update the model here
if model[loc_A] == model[loc_B] == 'Clean':
return 'NoOp'
elif status == 'Dirty':
return 'Suck'
elif location == loc_A:
return 'Right'
elif location == loc_B:
return 'Left'
return ag.Agent(program)
# ______________________________________________________________________________
# Vacuum environment
class Dirt(ag.Thing):
pass
# class Floor(ag.Thing):
# pass
class VacuumEnvironment(ag.XYEnvironment):
"""The environment of [Ex. 2.12]. Agent perceives dirty or clean,
and bump (into obstacle) or not; 2D discrete world of unknown size;
performance measure is 100 for each dirt cleaned, and -1 for
each turn taken."""
def __init__(self, width=4, height=3):
super(VacuumEnvironment, self).__init__(width, height)
self.add_walls()
def thing_classes(self):
return [ag.Wall, Dirt, ReflexVacuumAgent, RandomVacuumAgent,
TableDrivenVacuumAgent, ModelBasedVacuumAgent]
def percept(self, agent):
"""The percept is a tuple of ('Dirty' or 'Clean', 'Bump' or 'None').
Unlike the TrivialVacuumEnvironment, location is NOT perceived."""
status = ('Dirty' if self.some_things_at(
agent.location, Dirt) else 'Clean')
bump = ('Bump' if agent.bump else'None')
return (bump, status)
def execute_action(self, agent, action):
if action == 'Suck':
dirt_list = self.list_things_at(agent.location, Dirt)
if dirt_list != []:
dirt = dirt_list[0]
agent.performance += 100
self.delete_thing(dirt)
else:
super(VacuumEnvironment, self).execute_action(agent, action)
if action != 'NoOp':
agent.performance -= 1
class TrivialVacuumEnvironment(VacuumEnvironment):
"""This environment has two locations, A and B. Each can be Dirty
or Clean. The agent perceives its location and the location's
status. This serves as an example of how to implement a simple
Environment."""
def __init__(self):
super(TrivialVacuumEnvironment, self).__init__()
choice = random.randint(0, 3)
if choice % 2: # 1 or 3
self.add_thing(Dirt(), loc_A)
if choice > 1: # 2 or 3
self.add_thing(Dirt(), loc_B)
def percept(self, agent):
"Returns the agent's location, and the location status (Dirty/Clean)."
status = ('Dirty' if self.some_things_at(
agent.location, Dirt) else 'Clean')
return (agent.location, status)
#
# def execute_action(self, agent, action):
# """Change agent's location and/or location's status; track performance.
# Score 10 for each dirt cleaned; -1 for each move."""
# if action == 'Right':
# agent.location = loc_B
# agent.performance -= 1
# elif action == 'Left':
# agent.location = loc_A
# agent.performance -= 1
# elif action == 'Suck':
# if self.status[agent.location] == 'Dirty':
# agent.performance += 10
# self.status[agent.location] = 'Clean'
#
def add_agent(self, a):
"Agents start in either location at random."
super().add_thing(a, random.choice([loc_A, loc_B]))
# _________________________________________________________________________
# >>> a = ReflexVacuumAgent()
# >>> a.program((loc_A, 'Clean'))
# 'Right'
# >>> a.program((loc_B, 'Clean'))
# 'Left'
# >>> a.program((loc_A, 'Dirty'))
# 'Suck'
# >>> a.program((loc_A, 'Dirty'))
# 'Suck'
#
# >>> e = TrivialVacuumEnvironment()
# >>> e.add_thing(ModelBasedVacuumAgent())
# >>> e.run(5)
# Produces text-based status output
# v = TrivialVacuumEnvironment()
# a = ModelBasedVacuumAgent()
# a = ag.TraceAgent(a)
# v.add_agent(a)
# v.run(10)
# Launch GUI of Trivial Environment
# v = TrivialVacuumEnvironment()
# a = RandomVacuumAgent()
# a = ag.TraceAgent(a)
# v.add_agent(a)
# g = gui.EnvGUI(v, 'Vaccuum')
# c = g.getCanvas()
# c.mapImageNames({
# Dirt: 'images/dirt.png',
# ag.Wall: 'images/wall.jpg',
# # Floor: 'images/floor.png',
# ag.Agent: 'images/vacuum.png',
# })
# c.update()
# g.mainloop()
# Launch GUI of more complex environment
v = VacuumEnvironment(5, 4)
#a = ModelBasedVacuumAgent()
a = RandomVacuumAgent()
a = ag.TraceAgent(a)
loc = v.random_location_inbounds()
v.add_thing(a, location=loc)
v.scatter_things(Dirt)
g = gui.EnvGUI(v, 'Vaccuum')
c = g.getCanvas()
c.mapImageNames({
ag.Wall: 'submissions/Becker/wall.jpg',
# Floor: 'images/floor.png',
Dirt: 'images/dirt.png',
ag.Agent: 'images/vacuum.png',
})
c.update()
g.mainloop()
| |
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015-2018 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""OAuth Server Settings Blueprint."""
from __future__ import absolute_import
from functools import wraps
from flask import Blueprint, abort, redirect, render_template, request, \
session, url_for
from flask_babelex import lazy_gettext as _
from flask_breadcrumbs import register_breadcrumb
from flask_login import current_user, login_required
from flask_menu import register_menu
from invenio_db import db
from invenio_theme.proxies import current_theme_icons
from speaklater import make_lazy_string
from ..forms import ClientForm, TokenForm
from ..models import Client, Token
from ..proxies import current_oauth2server
blueprint = Blueprint(
'invenio_oauth2server_settings',
__name__,
url_prefix='/account/settings/applications',
static_folder='../static',
template_folder='../templates',
)
#
# Decorator
#
def client_getter():
"""Decorator to retrieve Client object and check user permission."""
def wrapper(f):
@wraps(f)
def decorated(*args, **kwargs):
if 'client_id' not in kwargs:
abort(500)
client = Client.query.filter_by(
client_id=kwargs.pop('client_id'),
user_id=current_user.get_id(),
).first()
if client is None:
abort(404)
return f(client, *args, **kwargs)
return decorated
return wrapper
def token_getter(is_personal=True, is_internal=False):
"""Decorator to retrieve Token object and check user permission.
:param is_personal: Search for a personal token. (Default: ``True``)
:param is_internal: Search for a internal token. (Default: ``False``)
"""
def wrapper(f):
@wraps(f)
def decorated(*args, **kwargs):
if 'token_id' not in kwargs:
abort(500)
token = Token.query.filter_by(
id=kwargs.pop('token_id'),
user_id=current_user.get_id(),
is_personal=is_personal,
is_internal=is_internal,
).first()
if token is None:
abort(404)
return f(token, *args, **kwargs)
return decorated
return wrapper
#
# Views
#
@blueprint.route("/", methods=['GET', 'POST'])
@login_required
@register_menu(
blueprint, 'settings.applications',
_('%(icon)s Applications', icon=make_lazy_string(
lambda: f'<i class="{current_theme_icons.codepen}"></i>')),
order=5,
active_when=lambda: request.endpoint.startswith(
"invenio_oauth2server_settings.")
)
@register_breadcrumb(
blueprint, 'breadcrumbs.settings.applications', _('Applications')
)
def index():
"""List user tokens."""
clients = Client.query.filter_by(
user_id=current_user.get_id(),
is_internal=False,
).all()
tokens = Token.query.options(db.joinedload('client')).filter(
Token.user_id == current_user.get_id(),
Token.is_personal == True, # noqa
Token.is_internal == False,
Client.is_internal == True,
).all()
authorized_apps = Token.query.options(db.joinedload('client')).filter(
Token.user_id == current_user.get_id(),
Token.is_personal == False, # noqa
Token.is_internal == False,
Client.is_internal == False,
).all()
return render_template(
'invenio_oauth2server/settings/index.html',
clients=clients,
tokens=tokens,
authorized_apps=authorized_apps,
)
@blueprint.route("/clients/new/", methods=['GET', 'POST'])
@login_required
@register_breadcrumb(
blueprint, 'breadcrumbs.settings.applications.client_new', _('New')
)
def client_new():
"""Create new client."""
form = ClientForm(request.form)
if form.validate_on_submit():
c = Client(user_id=current_user.get_id())
c.gen_salt()
form.populate_obj(c)
db.session.add(c)
db.session.commit()
return redirect(url_for('.client_view', client_id=c.client_id))
return render_template(
'invenio_oauth2server/settings/client_new.html',
form=form,
)
@blueprint.route("/clients/<string:client_id>/", methods=['GET', 'POST'])
@login_required
@register_breadcrumb(
blueprint, 'breadcrumbs.settings.applications.client_edit', _('Edit')
)
@client_getter()
def client_view(client):
"""Show client's detail."""
if request.method == 'POST' and 'delete' in request.form:
db.session.delete(client)
db.session.commit()
return redirect(url_for('.index'))
form = ClientForm(request.form, obj=client)
if form.validate_on_submit():
form.populate_obj(client)
db.session.commit()
return render_template(
'invenio_oauth2server/settings/client_view.html',
client=client,
form=form,
)
@blueprint.route('/clients/<string:client_id>/reset/', methods=['POST'])
@login_required
@client_getter()
def client_reset(client):
"""Reset client's secret."""
if request.form.get('reset') == 'yes':
client.reset_client_secret()
db.session.commit()
return redirect(url_for('.client_view', client_id=client.client_id))
#
# Token views
#
@blueprint.route("/tokens/new/", methods=['GET', 'POST'])
@login_required
@register_breadcrumb(
blueprint, 'breadcrumbs.settings.applications.token_new', _('New')
)
def token_new():
"""Create new token."""
form = TokenForm(request.form)
form.scopes.choices = current_oauth2server.scope_choices()
if form.validate_on_submit():
t = Token.create_personal(
form.data['name'], current_user.get_id(), scopes=form.scopes.data
)
db.session.commit()
session['show_personal_access_token'] = True
return redirect(url_for(".token_view", token_id=t.id))
if len(current_oauth2server.scope_choices()) == 0:
del(form.scopes)
return render_template(
"invenio_oauth2server/settings/token_new.html",
form=form,
)
@blueprint.route("/tokens/<string:token_id>/", methods=['GET', 'POST'])
@login_required
@register_breadcrumb(
blueprint, 'breadcrumbs.settings.applications.token_edit', _('Edit')
)
@token_getter()
def token_view(token):
"""Show token details."""
if request.method == "POST" and 'delete' in request.form:
db.session.delete(token)
db.session.commit()
return redirect(url_for('.index'))
show_token = session.pop('show_personal_access_token', False)
form = TokenForm(request.form, name=token.client.name, scopes=token.scopes)
form.scopes.choices = current_oauth2server.scope_choices()
form.scopes.data = token.scopes
if form.validate_on_submit():
token.client.name = form.data['name']
token.scopes = form.data['scopes']
db.session.commit()
if len(current_oauth2server.scope_choices()) == 0:
del(form.scopes)
return render_template(
"invenio_oauth2server/settings/token_view.html",
token=token,
form=form,
show_token=show_token,
)
@blueprint.route("/tokens/<string:token_id>/revoke/", methods=['GET', ])
@login_required
@token_getter(is_personal=False, is_internal=False)
def token_revoke(token):
"""Revoke Authorized Application token."""
db.session.delete(token)
db.session.commit()
return redirect(url_for('.index'))
@blueprint.route("/tokens/<string:token_id>/view/", methods=['GET', ])
@login_required
@token_getter(is_personal=False, is_internal=False)
def token_permission_view(token):
"""Show permission garanted to authorized application token."""
scopes = [current_oauth2server.scopes[x] for x in token.scopes]
return render_template(
"invenio_oauth2server/settings/token_permission_view.html",
token=token,
scopes=scopes,
)
| |
# Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
from c7n.exceptions import PolicyValidationError
from .common import BaseTest, event_data
import logging
import time
LambdaFindingId = "us-east-2/644160558196/81cc9d38b8f8ebfd260ecc81585b4bc9/9f5932aa97900b5164502f41ae393d23" # NOQA
class SecurityHubMode(BaseTest):
def test_resolve_import_finding(self):
factory = self.replay_flight_data('test_security_hub_mode_resolve')
policy = self.load_policy({
'name': 'trail-fixer',
'resource': 'aws.iam-user',
'mode': {
'type': 'hub-finding',
'role': 'foo'}},
session_factory=factory)
event = event_data("event-securityhub-iamkey-finding-action.json")
hub = policy.get_execution_mode()
resources = hub.resolve_import_finding(event)
self.assertEqual(
sorted(resources),
sorted(['arn:aws:iam::644160558196:user/kapil']))
resources = hub.resolve_resources(event)
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]['UserName'], 'kapil')
def test_resolve_action_finding(self):
policy = self.load_policy({
'name': 'trail-fixer',
'resource': 'aws.cloudtrail',
'mode': {
'type': 'hub-finding',
'role': 'foo'}})
event = event_data("event-securityhub-cloudtrail-finding-action.json")
hub = policy.get_execution_mode()
resources = hub.resolve_action_finding(event)
self.assertEqual(
sorted(resources),
sorted([
'arn:aws:cloudtrail:us-east-1:644160558196:trail/skunk-trails']))
def test_resolve_action_insight(self):
factory = self.replay_flight_data(
"test_security_hub_mode_action_insight")
policy = self.load_policy({
'name': 'iam-key',
'resource': 'aws.iam-user',
'mode': {
'type': 'hub-action',
'role': 'foo'}},
session_factory=factory)
hub = policy.get_execution_mode()
event = event_data("event-securityhub-insight-2.json")
resources = hub.resolve_action_insight(event)
self.assertEqual(
sorted(resources),
sorted([
'arn:aws:iam::644160558196:user/brent.clements',
'arn:aws:iam::644160558196:user/david.shepherd2',
'arn:aws:iam::644160558196:user/david.yun',
'arn:aws:iam::644160558196:user/kapil']))
def test_resolve_multi_account_resource_sets(self):
factory = self.replay_flight_data(
'test_security_hub_multi_account_mode')
policy = self.load_policy({
'name': 'lambda-remediate',
'resource': 'aws.lambda',
'mode': {
'type': 'hub-action',
'role': 'CustodianPolicyExecution',
'member-role': 'arn:aws:iam::{account_id}:role/CustodianGuardDuty'
}},
config={'region': 'us-east-2',
'account_id': '519413311747'},
session_factory=factory)
hub = policy.get_execution_mode()
event = event_data('event-securityhub-lambda-cross.json')
partition_resources = hub.get_resource_sets(event)
self.assertEqual(
{p: list(map(repr, v)) for p, v in partition_resources.items()},
{('644160558196', 'us-east-1'): [
("<arn:aws:lambda:us-east-1:644160558196:function:"
"custodian-enterprise-ec2-instances-no-elastic-ip-isolate>")
]})
output = self.capture_logging(policy.log.name, level=logging.INFO)
results = hub.run(event, {})
self.assertIn('Assuming member role:arn:aws:iam::644160558196', output.getvalue())
self.assertEqual(
results[('644160558196', 'us-east-1')][0]['FunctionName'],
'custodian-enterprise-ec2-instances-no-elastic-ip-isolate')
class SecurityHubTest(BaseTest):
def test_custom_classifier(self):
templ = {
'name': 's3',
'resource': 's3',
'actions': [{'type': 'post-finding',
'types': ['Effects/CustomB/CustomA']}]}
self.load_policy(templ)
templ['actions'][0]['types'] = ['CustomA/CustomB/CustomC']
self.assertRaises(PolicyValidationError, self.load_policy, templ)
templ['actions'][0]['types'] = ['Effects/CustomB/CustomA/CustomD']
self.assertRaises(PolicyValidationError, self.load_policy, templ)
templ['actions'][0]['types'] = []
self.assertRaises(
PolicyValidationError, self.load_policy, templ, validate=True)
def test_s3_bucket_arn(self):
policy = self.load_policy({
'name': 's3',
'resource': 's3',
'actions': [
{'type': 'post-finding',
'types': [
"Software and Configuration Checks/AWS Security Best Practices/Network Reachability" # NOQA
]}]})
post_finding = policy.resource_manager.actions[0]
resource = post_finding.format_resource(
{'Name': 'xyz', 'CreationDate': 'xtf'})
self.assertEqual(resource['Id'], "arn:aws:s3:::xyz")
def test_bucket(self):
factory = self.replay_flight_data("test_security_hub_bucket")
policy = self.load_policy(
{
"name": "s3-finding",
"resource": "s3",
"filters": [],
"actions": [
{
"type": "post-finding",
'description': 'This one is important',
"types": [
"Software and Configuration Checks/AWS Security Best Practices/Network Reachability" # NOQA
],
}
],
},
config={"account_id": "644160558196"},
session_factory=factory,
)
def resources():
return [
{
"Name": "c7n-test-public-bucket",
"CreationDate": "2018-11-26T23:04:52.000Z",
}
]
self.patch(policy.resource_manager, "resources", resources)
resources = policy.run()
self.assertEqual(len(resources), 1)
client = factory().client("securityhub")
findings = client.get_findings(
Filters={
"ResourceAwsS3BucketOwnerId": [
{"Value": "Unknown", "Comparison": "EQUALS"}
],
"ResourceId": [
{
"Value": "arn:aws:::c7n-test-public-bucket",
"Comparison": "EQUALS",
}
],
}
).get("Findings")
self.assertEqual(len(findings), 1)
self.assertEqual(
findings[0]["Resources"][0],
{
"Details": {"AwsS3Bucket": {"OwnerId": "Unknown"}},
"Id": "arn:aws:::c7n-test-public-bucket",
"Region": "us-east-1",
"Type": "AwsS3Bucket",
},
)
def test_lambda(self):
# test lambda function via post finding gets tagged with finding id
factory = self.replay_flight_data('test_security_hub_lambda')
client = factory().client('lambda')
func = client.get_function(FunctionName='check')['Configuration']
def resources():
return [func]
policy = self.load_policy({
'name': 'sec-hub-lambda',
'resource': 'lambda',
'actions': [
{
"type": "post-finding",
"severity": 10,
"severity_normalized": 10,
"severity_label": "INFORMATIONAL",
"types": [
"Software and Configuration Checks/AWS Security Best Practices"
],
}]},
config={"account_id": "644160558196", 'region': 'us-east-2'},
session_factory=factory)
self.patch(policy.resource_manager, "resources", resources)
resources = policy.run()
self.assertEqual(len(resources), 1)
func_post_exec = client.get_function(FunctionName='check')
self.assertEqual(
func_post_exec['Tags']['c7n:FindingId:sec-hub-lambda'].split(":", 1)[0],
LambdaFindingId)
def test_lambda_update(self):
# test lambda function via post finding, uses tag to update finding.
factory = self.replay_flight_data('test_security_hub_lambda_update')
client = factory().client("securityhub", region_name='us-east-2')
finding_v1 = client.get_findings(
Filters={
"Id": [{
"Value": LambdaFindingId,
"Comparison": "EQUALS",
}]}).get("Findings")[0]
lambda_client = factory().client('lambda')
func = lambda_client.get_function(FunctionName='check')['Configuration']
def resources():
return [func]
policy = self.load_policy({
'name': 'sec-hub-lambda',
'resource': 'lambda',
'actions': [{
"type": "post-finding",
"severity": 10,
"severity_normalized": 10,
"types": [
"Software and Configuration Checks/AWS Security Best Practices"
],
}]},
config={"account_id": "644160558196", 'region': 'us-east-2'},
session_factory=factory)
self.patch(policy.resource_manager, "resources", resources)
resources = policy.run()
self.assertEqual(len(resources), 1)
if self.recording:
time.sleep(16)
finding_v2 = client.get_findings(
Filters={
"Id": [{
"Value": LambdaFindingId,
"Comparison": "EQUALS",
}]}).get("Findings")[0]
self.assertNotEqual(finding_v1['UpdatedAt'], finding_v2['UpdatedAt'])
def test_instance(self):
factory = self.replay_flight_data("test_security_hub_instance")
policy = self.load_policy(
{
"name": "ec2-finding",
"resource": "ec2",
"filters": [],
"actions": [
{
"type": "post-finding",
"severity": 10,
"severity_normalized": 10,
"types": [
"Software and Configuration Checks/AWS Security Best Practices"
],
}
],
},
config={"account_id": "644160558196"},
session_factory=factory,
)
resources = policy.run()
self.assertEqual(len(resources), 1)
client = factory().client("securityhub")
findings = client.get_findings(
Filters={
"ResourceId": [
{
"Value": "arn:aws:us-east-1:644160558196:instance/i-0fdc9cff318add68f",
"Comparison": "EQUALS",
}
]
}
).get("Findings")
self.assertEqual(len(findings), 1)
self.assertEqual(
findings[0]["Resources"][0],
{
"Details": {
"AwsEc2Instance": {
"IamInstanceProfileArn": "arn:aws:iam::644160558196:instance-profile/ecsInstanceRole", # NOQA
"ImageId": "ami-0ac019f4fcb7cb7e6",
"IpV4Addresses": ["10.205.2.134"],
"LaunchedAt": "2018-11-28T22:53:09+00:00",
"SubnetId": "subnet-07c118e47bb84cee7",
"Type": "t2.micro",
"VpcId": "vpc-03005fb9b8740263d",
}
},
"Id": "arn:aws:us-east-1:644160558196:instance/i-0fdc9cff318add68f",
"Region": "us-east-1",
"Tags": {"CreatorName": "kapil", "Name": "bar-run"},
"Type": "AwsEc2Instance",
},
)
def test_instance_findings_filter(self):
factory = self.replay_flight_data("test_security_hub_instance_findings_filter")
policy = self.load_policy(
{
"name": "ec2-findings-filter",
"resource": "ec2",
"filters": [{
"type": "finding",
"query": {
"Type": [{
"Value": "Software and Configuration Checks/AWS Security Best Practices", # NOQA
"Comparison": "EQUALS"}]
}
}],
},
config={"account_id": "101010101111"},
session_factory=factory,
)
resources = policy.run()
self.assertEqual(len(resources), 1)
def test_alb_findings_filter(self):
factory = self.replay_flight_data("test_security_hub_alb_findings_filter")
policy = self.load_policy(
{
"name": "alb-findings-filter",
"resource": "app-elb",
"filters": [{
"type": "finding",
"query": {
"Type": [{
"Value": "Software and Configuration Checks/AWS Security Best Practices", # NOQA
"Comparison": "EQUALS"
}]}
}],
},
config={"account_id": "101010101111"},
session_factory=factory,
)
resources = policy.run()
self.assertEqual(len(resources), 1)
def test_finding_ec2_arn(self):
# reuse another tests recorded data to get an ec2 instance
# not a best practice, avoid if practical.
factory = self.replay_flight_data("test_security_hub_instance")
client = factory().client('ec2')
instances = client.describe_instances().get('Reservations')[0]['Instances']
policy = self.load_policy({
'name': 'ec2',
'resource': 'ec2',
'actions': [{
'type': 'post-finding', 'severity': 10,
'types': ["Software and Configuration Checks/AWS Security Best Practices"]}]},
config={'region': 'us-east-1', 'account_id': '644160558196'})
post_finding = policy.resource_manager.actions.pop()
resource = post_finding.format_resource(instances[0])
self.assertEqual(
resource['Id'], 'arn:aws:ec2:us-east-1:644160558196:instance/i-0fdc9cff318add68f')
def test_iam_user(self):
factory = self.replay_flight_data("test_security_hub_iam_user")
policy = self.load_policy(
{
"name": "iam-user-finding",
"resource": "iam-user",
"filters": [],
"actions": [
{
"type": "post-finding",
"severity": 10,
"severity_normalized": 10,
"types": [
"Software and Configuration Checks/AWS Security Best Practices"
],
}
],
},
config={"account_id": "101010101111"},
session_factory=factory,
)
resources = policy.run()
self.assertEqual(len(resources), 1)
client = factory().client("securityhub")
findings = client.get_findings(
Filters={
"ResourceId": [
{
"Value": "arn:aws:iam::101010101111:user/developer",
"Comparison": "EQUALS",
}
]
}
).get("Findings")
self.assertEqual(len(findings), 1)
self.assertEqual(
findings[0]["Resources"][0],
{
"Region": "us-east-1",
"Type": "Other",
"Id": "arn:aws:iam::101010101111:user/developer",
"Details": {
"Other": {
"CreateDate": "2016-09-10T15:45:42+00:00",
"UserId": "AIDAJYFPV7WUG3EV7MIIO"
}
}
}
)
def test_iam_profile(self):
factory = self.replay_flight_data("test_security_hub_iam_profile")
policy = self.load_policy(
{
"name": "iam-profile-finding",
"resource": "iam-profile",
"filters": [{
"type": "value",
"key": "InstanceProfileName",
"value": "CloudCustodian"
}],
"actions": [
{
"type": "post-finding",
"severity": 10,
"severity_normalized": 10,
"types": [
"Software and Configuration Checks/AWS Security Best Practices"
],
}
],
},
config={"account_id": "101010101111"},
session_factory=factory,
)
resources = policy.run()
self.assertEqual(len(resources), 1)
client = factory().client("securityhub")
findings = client.get_findings(
Filters={
"ResourceId": [
{
"Value": "arn:aws:iam::101010101111:instance-profile/CloudCustodian",
"Comparison": "EQUALS",
}
]
}
).get("Findings")
self.assertEqual(len(findings), 1)
self.assertEqual(
findings[0]["Resources"][0],
{
"Region": "us-east-1",
"Type": "Other",
"Id": "arn:aws:iam::101010101111:instance-profile/CloudCustodian",
"Details": {
"Other": {
"InstanceProfileId": "AIPAJO63EBUVI2SO6IJFI",
"CreateDate": "2018-08-19T22:32:30+00:00",
"InstanceProfileName": "CloudCustodian",
"c7n:MatchedFilters": "[\"InstanceProfileName\"]"
}
}
}
)
def test_account(self):
factory = self.replay_flight_data("test_security_hub_account")
policy = self.load_policy(
{
"name": "account-finding",
"resource": "account",
"filters": [],
"actions": [
{
"type": "post-finding",
"severity": 10,
"severity_normalized": 10,
"types": [
"Software and Configuration Checks/AWS Security Best Practices"
],
}
],
},
config={"account_id": "101010101111"},
session_factory=factory,
)
resources = policy.run()
self.assertEqual(len(resources), 1)
client = factory().client("securityhub")
findings = client.get_findings(
Filters={
"ResourceId": [
{
"Value": "arn:aws:::101010101111:",
"Comparison": "EQUALS"
}
]
}
).get("Findings")
self.assertEqual(len(findings), 1)
self.assertEqual(
findings[0]["Resources"][0],
{
"Region": "us-east-1",
"Type": "Other",
"Id": "arn:aws:::101010101111:",
"Details": {
"Other": {
"account_name": "filiatra-primary"
}
}
}
)
def test_rds(self):
factory = self.replay_flight_data("test_security_hub_rds")
policy = self.load_policy(
{
"name": "rds-finding",
"resource": "rds",
"filters": [
],
"actions": [
{
"type": "post-finding",
"severity": 10,
"severity_normalized": 10,
"types": [
"Software and Configuration Checks/AWS Security Best Practices"
],
}
],
},
config={"account_id": "101010101111"},
session_factory=factory,
)
resources = policy.run()
self.assertEqual(len(resources), 1)
client = factory().client("securityhub")
findings = client.get_findings(
Filters={
"ResourceId": [
{
"Value": "arn:aws:rds:us-east-1:101010101111:db:testme",
"Comparison": "EQUALS",
}
]
}
).get("Findings")
self.assertEqual(len(findings), 1)
self.assertEqual(
findings[0]["Resources"][0],
{
"Details": {
"Other": {
"Engine": "mariadb",
"VpcId": "vpc-d6fe6cb1",
"PubliclyAccessible": "False",
"DBName": "testme",
"AvailabilityZone": "us-east-1a",
"InstanceCreateTime": "2018-11-05T03:25:12.384000+00:00",
"StorageEncrypted": "False",
"AllocatedStorage": "20",
"EngineVersion": "10.3.8",
"DBInstanceClass": "db.t2.micro",
"DBSubnetGroupName": "default"
}
},
"Region": "us-east-1",
"Type": "Other",
"Id": "arn:aws:rds:us-east-1:101010101111:db:testme",
"Tags": {
"workload-type": "other"}
})
def test_larger_batch_s3(self):
factory = self.replay_flight_data("test_larger_batch")
policy = self.load_policy(
{
"name": "ebs-finding",
"resource": "ebs-snapshot",
"filters": [],
"actions": [
{
"type": "post-finding",
"severity": 10,
"severity_normalized": 10,
"batch_size": 2,
"title": "EBS Testing",
"types": [
"Software and Configuration Checks/AWS Security Best Practices"
],
}
],
},
session_factory=factory,
)
resources = policy.run()
self.assertEqual(len(resources), 2)
client = factory().client("securityhub")
findings = client.get_findings(
Filters={
"Title": [
{
"Value": "EBS Testing",
"Comparison": "EQUALS",
}
]
}
).get("Findings")
self.assertEqual(len(findings), 2)
| |
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'CharacterClassVariant.requirements'
db.add_column('dnd_characterclassvariant', 'requirements', self.gf('django.db.models.fields.TextField')(default='', blank=True), keep_default=False)
# Adding field 'CharacterClassVariant.requirements_html'
db.add_column('dnd_characterclassvariant', 'requirements_html', self.gf('django.db.models.fields.TextField')(default='', blank=True), keep_default=False)
def backwards(self, orm):
# Deleting field 'CharacterClassVariant.requirements'
db.delete_column('dnd_characterclassvariant', 'requirements')
# Deleting field 'CharacterClassVariant.requirements_html'
db.delete_column('dnd_characterclassvariant', 'requirements_html')
models = {
'dnd.characterclass': {
'Meta': {'ordering': "['name']", 'object_name': 'CharacterClass'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'db_index': 'True'}),
'prestige': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'short_description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '64', 'db_index': 'True'})
},
'dnd.characterclassvariant': {
'Meta': {'unique_together': "(('character_class', 'rulebook'),)", 'object_name': 'CharacterClassVariant'},
'advancement': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'advancement_html': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'alignment': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'character_class': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.CharacterClass']"}),
'class_features': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'class_features_html': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'class_skills': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['dnd.Skill']", 'symmetrical': 'False'}),
'hit_die': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'page': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'requirements': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'requirements_html': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'rulebook': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.Rulebook']"}),
'skill_points': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'})
},
'dnd.dndedition': {
'Meta': {'ordering': "['name']", 'object_name': 'DndEdition'},
'core': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'system': ('django.db.models.fields.CharField', [], {'max_length': '16'})
},
'dnd.domain': {
'Meta': {'ordering': "['name']", 'object_name': 'Domain'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '64', 'db_index': 'True'})
},
'dnd.feat': {
'Meta': {'ordering': "['name']", 'unique_together': "(('name', 'rulebook'),)", 'object_name': 'Feat'},
'benefit': ('django.db.models.fields.TextField', [], {}),
'benefit_html': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
'description_html': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'feat_categories': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['dnd.FeatCategory']", 'symmetrical': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'db_index': 'True'}),
'normal': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'normal_html': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'page': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rulebook': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.Rulebook']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '64', 'db_index': 'True'}),
'special': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'special_feat_prerequisites': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['dnd.SpecialFeatPrerequisite']", 'through': "orm['dnd.FeatSpecialFeatPrerequisite']", 'symmetrical': 'False'}),
'special_html': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
'dnd.featcategory': {
'Meta': {'ordering': "['name']", 'object_name': 'FeatCategory'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'})
},
'dnd.featrequiresfeat': {
'Meta': {'object_name': 'FeatRequiresFeat'},
'additional_text': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'required_feat': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'required_by_feats'", 'to': "orm['dnd.Feat']"}),
'source_feat': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'required_feats'", 'to': "orm['dnd.Feat']"})
},
'dnd.featrequiresskill': {
'Meta': {'object_name': 'FeatRequiresSkill'},
'extra': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'feat': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'required_skills'", 'to': "orm['dnd.Feat']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'min_rank': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'skill': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.Skill']"})
},
'dnd.featspecialfeatprerequisite': {
'Meta': {'object_name': 'FeatSpecialFeatPrerequisite'},
'feat': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.Feat']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'special_feat_prerequisite': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.SpecialFeatPrerequisite']"}),
'value_1': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'value_2': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'})
},
'dnd.newsentry': {
'Meta': {'ordering': "['published']", 'object_name': 'NewsEntry'},
'body': ('django.db.models.fields.TextField', [], {}),
'body_html': ('django.db.models.fields.TextField', [], {}),
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'published': ('django.db.models.fields.DateField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'dnd.race': {
'Meta': {'ordering': "['name']", 'unique_together': "(('name', 'rulebook'),)", 'object_name': 'Race'},
'cha': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'combat': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'con': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'dex': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'int': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'level_adjustment': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'page': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'racial_traits': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'reach': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '5'}),
'rulebook': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.Rulebook']"}),
'size': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.RaceSize']", 'null': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '32', 'db_index': 'True'}),
'space': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '5'}),
'str': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'wis': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'})
},
'dnd.racefavoredcharacterclass': {
'Meta': {'object_name': 'RaceFavoredCharacterClass'},
'character_class': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.CharacterClass']"}),
'extra': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'race': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'favored_classes'", 'to': "orm['dnd.Race']"})
},
'dnd.racesize': {
'Meta': {'ordering': "['order']", 'object_name': 'RaceSize'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'order': ('django.db.models.fields.PositiveSmallIntegerField', [], {})
},
'dnd.racespeed': {
'Meta': {'object_name': 'RaceSpeed'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'race': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.Race']"}),
'speed': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': "orm['dnd.RaceSpeedType']"})
},
'dnd.racespeedtype': {
'Meta': {'ordering': "['name', 'extra']", 'object_name': 'RaceSpeedType'},
'extra': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'})
},
'dnd.rulebook': {
'Meta': {'ordering': "['name']", 'object_name': 'Rulebook'},
'abbr': ('django.db.models.fields.CharField', [], {'max_length': '7'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'dnd_edition': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.DndEdition']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'official_url': ('django.db.models.fields.URLField', [], {'max_length': '255', 'blank': 'True'}),
'published': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '128', 'db_index': 'True'}),
'year': ('django.db.models.fields.CharField', [], {'max_length': '4', 'null': 'True', 'blank': 'True'})
},
'dnd.skill': {
'Meta': {'ordering': "['name']", 'object_name': 'Skill'},
'armor_check_penalty': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'base_skill': ('django.db.models.fields.CharField', [], {'max_length': '4'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'required_by_feats': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['dnd.Feat']", 'through': "orm['dnd.FeatRequiresSkill']", 'symmetrical': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '64', 'db_index': 'True'}),
'trained_only': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'dnd.skillvariant': {
'Meta': {'unique_together': "(('skill', 'rulebook'),)", 'object_name': 'SkillVariant'},
'action': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'action_html': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'check': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'check_html': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'description_html': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'page': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rulebook': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.Rulebook']"}),
'skill': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.Skill']"}),
'special': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'special_html': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'synergy': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'synergy_html': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'try_again': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'try_again_html': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'untrained': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'untrained_html': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
'dnd.specialfeatprerequisite': {
'Meta': {'ordering': "['name']", 'object_name': 'SpecialFeatPrerequisite'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'print_format': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'dnd.spell': {
'Meta': {'ordering': "['name']", 'unique_together': "(('name', 'rulebook'),)", 'object_name': 'Spell'},
'added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'arcane_focus_component': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'area': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'casting_time': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'class_levels': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['dnd.CharacterClass']", 'through': "orm['dnd.SpellClassLevel']", 'symmetrical': 'False'}),
'description': ('django.db.models.fields.TextField', [], {}),
'description_html': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'descriptors': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['dnd.SpellDescriptor']", 'symmetrical': 'False', 'blank': 'True'}),
'divine_focus_component': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'domain_levels': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['dnd.Domain']", 'through': "orm['dnd.SpellDomainLevel']", 'symmetrical': 'False'}),
'duration': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'effect': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'extra_components': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'material_component': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'meta_breath_component': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'db_index': 'True'}),
'page': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'range': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'rulebook': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.Rulebook']"}),
'saving_throw': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'school': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.SpellSchool']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '64', 'db_index': 'True'}),
'somatic_component': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'spell_resistance': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'sub_school': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.SpellSubSchool']", 'null': 'True', 'blank': 'True'}),
'target': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'true_name_component': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'verbal_component': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'xp_component': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'dnd.spellclasslevel': {
'Meta': {'ordering': "['spell', 'level']", 'unique_together': "(('character_class', 'spell'),)", 'object_name': 'SpellClassLevel'},
'character_class': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.CharacterClass']"}),
'extra': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'spell': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.Spell']"})
},
'dnd.spelldescriptor': {
'Meta': {'ordering': "['name']", 'object_name': 'SpellDescriptor'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64', 'db_index': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '64', 'db_index': 'True'})
},
'dnd.spelldomainlevel': {
'Meta': {'ordering': "['spell', 'level']", 'unique_together': "(('domain', 'spell'),)", 'object_name': 'SpellDomainLevel'},
'domain': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.Domain']"}),
'extra': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'spell': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.Spell']"})
},
'dnd.spellschool': {
'Meta': {'ordering': "['name']", 'object_name': 'SpellSchool'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'})
},
'dnd.spellsubschool': {
'Meta': {'ordering': "['name']", 'object_name': 'SpellSubSchool'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'})
},
'dnd.staticpage': {
'Meta': {'object_name': 'StaticPage'},
'body': ('django.db.models.fields.TextField', [], {}),
'body_html': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'})
},
'dnd.textfeatprerequisite': {
'Meta': {'ordering': "['text']", 'object_name': 'TextFeatPrerequisite'},
'feat': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.Feat']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'text': ('django.db.models.fields.CharField', [], {'max_length': '256'})
}
}
complete_apps = ['dnd']
| |
from __future__ import absolute_import
import urwid
import urwid.util
import os
from netlib.http import CONTENT_MISSING
import netlib.utils
from .. import utils
from ..models import decoded
from . import signals
try:
import pyperclip
except:
pyperclip = False
VIEW_FLOW_REQUEST = 0
VIEW_FLOW_RESPONSE = 1
METHOD_OPTIONS = [
("get", "g"),
("post", "p"),
("put", "u"),
("head", "h"),
("trace", "t"),
("delete", "d"),
("options", "o"),
("edit raw", "e"),
]
def is_keypress(k):
"""
Is this input event a keypress?
"""
if isinstance(k, basestring):
return True
def highlight_key(str, key, textattr="text", keyattr="key"):
l = []
parts = str.split(key, 1)
if parts[0]:
l.append((textattr, parts[0]))
l.append((keyattr, key))
if parts[1]:
l.append((textattr, parts[1]))
return l
KEY_MAX = 30
def format_keyvals(lst, key="key", val="text", indent=0):
"""
Format a list of (key, value) tuples.
If key is None, it's treated specially:
- We assume a sub-value, and add an extra indent.
- The value is treated as a pre-formatted list of directives.
"""
ret = []
if lst:
maxk = min(max(len(i[0]) for i in lst if i and i[0]), KEY_MAX)
for i, kv in enumerate(lst):
if kv is None:
ret.append(urwid.Text(""))
else:
if isinstance(kv[1], urwid.Widget):
v = kv[1]
elif kv[1] is None:
v = urwid.Text("")
else:
v = urwid.Text([(val, kv[1])])
ret.append(
urwid.Columns(
[
("fixed", indent, urwid.Text("")),
(
"fixed",
maxk,
urwid.Text([(key, kv[0] or "")])
),
v
],
dividechars = 2
)
)
return ret
def shortcuts(k):
if k == " ":
k = "page down"
elif k == "j":
k = "down"
elif k == "k":
k = "up"
return k
def fcol(s, attr):
s = unicode(s)
return (
"fixed",
len(s),
urwid.Text(
[
(attr, s)
]
)
)
if urwid.util.detected_encoding:
SYMBOL_REPLAY = u"\u21ba"
SYMBOL_RETURN = u"\u2190"
SYMBOL_MARK = u"\u25cf"
else:
SYMBOL_REPLAY = u"[r]"
SYMBOL_RETURN = u"<-"
SYMBOL_MARK = "[m]"
def raw_format_flow(f, focus, extended, padding):
f = dict(f)
pile = []
req = []
if extended:
req.append(
fcol(
utils.format_timestamp(f["req_timestamp"]),
"highlight"
)
)
else:
req.append(fcol(">>" if focus else " ", "focus"))
if f["marked"]:
req.append(fcol(SYMBOL_MARK, "mark"))
if f["req_is_replay"]:
req.append(fcol(SYMBOL_REPLAY, "replay"))
req.append(fcol(f["req_method"], "method"))
preamble = sum(i[1] for i in req) + len(req) - 1
if f["intercepted"] and not f["acked"]:
uc = "intercept"
elif f["resp_code"] or f["err_msg"]:
uc = "text"
else:
uc = "title"
req.append(
urwid.Text([(uc, f["req_url"])])
)
pile.append(urwid.Columns(req, dividechars=1))
resp = []
resp.append(
("fixed", preamble, urwid.Text(""))
)
if f["resp_code"]:
codes = {
2: "code_200",
3: "code_300",
4: "code_400",
5: "code_500",
}
ccol = codes.get(f["resp_code"] / 100, "code_other")
resp.append(fcol(SYMBOL_RETURN, ccol))
if f["resp_is_replay"]:
resp.append(fcol(SYMBOL_REPLAY, "replay"))
resp.append(fcol(f["resp_code"], ccol))
if f["intercepted"] and f["resp_code"] and not f["acked"]:
rc = "intercept"
else:
rc = "text"
if f["resp_ctype"]:
resp.append(fcol(f["resp_ctype"], rc))
resp.append(fcol(f["resp_clen"], rc))
resp.append(fcol(f["roundtrip"], rc))
elif f["err_msg"]:
resp.append(fcol(SYMBOL_RETURN, "error"))
resp.append(
urwid.Text([
(
"error",
f["err_msg"]
)
])
)
pile.append(urwid.Columns(resp, dividechars=1))
return urwid.Pile(pile)
# Save file to disk
def save_data(path, data, master, state):
if not path:
return
try:
with file(path, "wb") as f:
f.write(data)
except IOError as v:
signals.status_message.send(message=v.strerror)
def ask_save_overwite(path, data, master, state):
if not path:
return
path = os.path.expanduser(path)
if os.path.exists(path):
def save_overwite(k):
if k == "y":
save_data(path, data, master, state)
signals.status_prompt_onekey.send(
prompt = "'" + path + "' already exists. Overwite?",
keys = (
("yes", "y"),
("no", "n"),
),
callback = save_overwite
)
else:
save_data(path, data, master, state)
def ask_save_path(prompt, data, master, state):
signals.status_prompt_path.send(
prompt = prompt,
callback = ask_save_overwite,
args = (data, master, state)
)
def copy_flow_format_data(part, scope, flow):
if part == "u":
data = flow.request.url
else:
data = ""
if scope in ("q", "a"):
if flow.request.content is None or flow.request.content == CONTENT_MISSING:
return None, "Request content is missing"
with decoded(flow.request):
if part == "h":
data += flow.client_conn.protocol.assemble(flow.request)
elif part == "c":
data += flow.request.content
else:
raise ValueError("Unknown part: {}".format(part))
if scope == "a" and flow.request.content and flow.response:
# Add padding between request and response
data += "\r\n" * 2
if scope in ("s", "a") and flow.response:
if flow.response.content is None or flow.response.content == CONTENT_MISSING:
return None, "Response content is missing"
with decoded(flow.response):
if part == "h":
data += flow.client_conn.protocol.assemble(flow.response)
elif part == "c":
data += flow.response.content
else:
raise ValueError("Unknown part: {}".format(part))
return data, False
def copy_flow(part, scope, flow, master, state):
"""
part: _c_ontent, _h_eaders+content, _u_rl
scope: _a_ll, re_q_uest, re_s_ponse
"""
data, err = copy_flow_format_data(part, scope, flow)
if err:
signals.status_message.send(message=err)
return
if not data:
if scope == "q":
signals.status_message.send(message="No request content to copy.")
elif scope == "s":
signals.status_message.send(message="No response content to copy.")
else:
signals.status_message.send(message="No contents to copy.")
return
# pyperclip calls encode('utf-8') on data to be copied without checking.
# if data are already encoded that way UnicodeDecodeError is thrown.
toclip = ""
try:
toclip = data.decode('utf-8')
except (UnicodeDecodeError):
toclip = data
try:
pyperclip.copy(toclip)
except (RuntimeError, UnicodeDecodeError, AttributeError):
def save(k):
if k == "y":
ask_save_path("Save data", data, master, state)
signals.status_prompt_onekey.send(
prompt = "Cannot copy data to clipboard. Save as file?",
keys = (
("yes", "y"),
("no", "n"),
),
callback = save
)
def ask_copy_part(scope, flow, master, state):
choices = [
("content", "c"),
("headers+content", "h")
]
if scope != "s":
choices.append(("url", "u"))
signals.status_prompt_onekey.send(
prompt = "Copy",
keys = choices,
callback = copy_flow,
args = (scope, flow, master, state)
)
def ask_save_body(part, master, state, flow):
"""
Save either the request or the response body to disk. part can either be
"q" (request), "s" (response) or None (ask user if necessary).
"""
request_has_content = flow.request and flow.request.content
response_has_content = flow.response and flow.response.content
if part is None:
# We first need to determine whether we want to save the request or the
# response content.
if request_has_content and response_has_content:
signals.status_prompt_onekey.send(
prompt = "Save",
keys = (
("request", "q"),
("response", "s"),
),
callback = ask_save_body,
args = (master, state, flow)
)
elif response_has_content:
ask_save_body("s", master, state, flow)
else:
ask_save_body("q", master, state, flow)
elif part == "q" and request_has_content:
ask_save_path(
"Save request content",
flow.request.get_decoded_content(),
master,
state
)
elif part == "s" and response_has_content:
ask_save_path(
"Save response content",
flow.response.get_decoded_content(),
master,
state
)
else:
signals.status_message.send(message="No content to save.")
flowcache = utils.LRUCache(800)
def format_flow(f, focus, extended=False, hostheader=False, padding=2,
marked=False):
d = dict(
intercepted = f.intercepted,
acked = f.reply.acked,
req_timestamp = f.request.timestamp_start,
req_is_replay = f.request.is_replay,
req_method = f.request.method,
req_url = f.request.pretty_url if hostheader else f.request.url,
err_msg = f.error.msg if f.error else None,
resp_code = f.response.status_code if f.response else None,
marked = marked,
)
if f.response:
if f.response.content:
contentdesc = netlib.utils.pretty_size(len(f.response.content))
elif f.response.content == CONTENT_MISSING:
contentdesc = "[content missing]"
else:
contentdesc = "[no content]"
duration = 0
if f.response.timestamp_end and f.request.timestamp_start:
duration = f.response.timestamp_end - f.request.timestamp_start
roundtrip = utils.pretty_duration(duration)
d.update(dict(
resp_code = f.response.status_code,
resp_is_replay = f.response.is_replay,
resp_clen = contentdesc,
roundtrip = roundtrip,
))
t = f.response.headers.get("content-type")
if t:
d["resp_ctype"] = t.split(";")[0]
else:
d["resp_ctype"] = ""
return flowcache.get(
raw_format_flow,
tuple(sorted(d.items())), focus, extended, padding
)
| |
"""
(c) RIKEN 2017. All rights reserved.
Author: Keitaro Yamashita
This software is released under the new BSD License; see LICENSE.
"""
import iotbx.phil
from cctbx import sgtbx
from dxtbx.model.experiment_list import ExperimentListFactory
from dxtbx.model.experiment_list import ExperimentListDumper
from dxtbx.model import Crystal
from yamtbx.dataproc.xds import integrate_hkl_as_flex
from yamtbx.dataproc.xds.idxreflp import SpotXds
from yamtbx.dataproc.xds.xparm import prep_xparm_objects_from_integrate_lp
from dials.array_family import flex
from dials.algorithms.centroid import centroid_px_to_mm_panel
import os
import copy
master_params_str="""\
xds_inp = None
.type = path
xparm = None
.type = path
integrate_lp = None
.type = path
integrate_hkl = None
.type = path
spot_xds = None
.type = path
space_group = None
.type = str
reindex_op = None
.type = str
out_prefix = None
.type = str
out_dir = "."
.type = path
"""
def import_integrated(integrate_hkl, min_ios=3):
reader = integrate_hkl_as_flex.reader(integrate_hkl, "IOBS,SIGMA,XCAL,YCAL,ZCAL,XOBS,YOBS,ZOBS,ISEG".split(","))
# reference: dials/command_line/import_xds.py
table = flex.reflection_table()
table["id"] = flex.int(len(reader.hkl), 0)
table["panel"] = flex.size_t(len(reader.hkl), 0) # only assuming single panel
table["miller_index"] = reader.hkl
table["xyzcal.px.value"] = flex.vec3_double(reader.data["XCAL"], reader.data["YCAL"], flex.double(len(reader.hkl), 0))
table["xyzcal.px"] = table["xyzcal.px.value"]
table["xyzobs.px.value"] = flex.vec3_double(reader.data["XOBS"], reader.data["YOBS"], flex.double(len(reader.hkl), 0))
table["intensity.sum.value"] = reader.data["IOBS"]
table["intensity.sum.sigma"] = reader.data["SIGMA"] # not valid name, just for making selection
#table["intensity.sum.variance"] = table["intensity.sum.sigma"]**2
table["flags"] = flex.size_t(len(table), table.flags.indexed | table.flags.strong)
table = table.select(table["intensity.sum.sigma"] > 0)
table = table.select(table["intensity.sum.value"]/table["intensity.sum.sigma"] >= min_ios)
table = table.select(table["xyzobs.px.value"].norms() > 0) # remove invalid xyzobs
# dummy
table["xyzobs.px.variance"] = flex.vec3_double(len(table), (1,1,1)) # TODO appropriate variance value
table["s1"] = flex.vec3_double(len(table), (0,0,0)) # will be updated by set_obs_s1()
del table["intensity.sum.value"]
del table["intensity.sum.sigma"]
return table
# import_integrated()
def import_spot_xds(spot_xds):
sx = SpotXds(spot_xds)
spots = filter(lambda x: x[-1][0] is not None and not (x[-1][0]==x[-1][1]==x[-1][2]==0), sx.items)
# reference: dials/command_line/import_xds.py
table = flex.reflection_table()
table["id"] = flex.int(len(spots), 0)
table["panel"] = flex.size_t(len(spots), 0) # only assuming single panel
table["miller_index"] = flex.miller_index(map(lambda x: x[-1], spots))
table["xyzobs.px.value"] = flex.vec3_double(map(lambda x: (x[0][0], x[0][1], 0.), spots))
table["flags"] = flex.size_t(len(table), table.flags.indexed | table.flags.strong)
# dummy
table["xyzobs.px.variance"] = flex.vec3_double(len(table), (1,1,1)) # TODO appropriate variance value
table["s1"] = flex.vec3_double(len(table), (0,0,0)) # will be updated by set_obs_s1()
return table
# import_spot_xds()
def px_to_mm(experiment, table):
# reference: dials/algorithms/indexing/indexer.py map_spots_pixel_to_mm_rad()
centroid_position, centroid_variance, _ = centroid_px_to_mm_panel(experiment.detector[0], experiment.scan,
table['xyzobs.px.value'],
table['xyzobs.px.variance'],
flex.vec3_double(len(table), (1,1,1)))
print centroid_position
table['xyzobs.mm.value'] = centroid_position
table['xyzobs.mm.variance'] = centroid_variance
if "xyzcal.px.value" not in table:
return
centroid_position, centroid_variance, _ = centroid_px_to_mm_panel(experiment.detector[0], experiment.scan,
table['xyzcal.px.value'],
flex.vec3_double(len(table), (1,1,1)),
flex.vec3_double(len(table), (1,1,1)))
table['xyzcal.mm'] = centroid_position
# px_to_mm()
def import_xds_as_still(xdsinp, xparm_in):
#from dxtbx.serialize import xds
from dxtbx.datablock import DataBlockFactory
# Get the sweep from the XDS files
#sweep = xds.to_imageset(xds_inp, xds_other)
from iotbx.xds import xds_inp
from dxtbx.imageset import ImageSetFactory
import dxtbx
# Read the input filename
handle = xds_inp.reader()
handle.read_file(xdsinp)
# Get the template
template = handle.name_template_of_data_frames[0]
image_range = handle.data_range
detector_name = handle.detector
#assert image_range[0] == image_range[1]
im_nr = int((image_range[1]-image_range[0]+1)/2)
from yamtbx.dataproc.dataset import template_to_filenames
# Create the imageset
#imageset = ImageSetFactory.from_template(template, image_range=image_range, check_format=False)[0]
imageset = ImageSetFactory.make_imageset([os.path.realpath(template_to_filenames(template, im_nr, im_nr)[0])])
models = dxtbx.load(xparm_in)
detector = models.get_detector()
if detector_name.strip() in ('PILATUS', 'EIGER') or handle.silicon is not None:
from dxtbx.model import ParallaxCorrectedPxMmStrategy
from cctbx.eltbx import attenuation_coefficient
if handle.silicon is None:
table = attenuation_coefficient.get_table("Si")
wavelength = models.get_beam().get_wavelength()
mu = table.mu_at_angstrom(wavelength) / 10.0
else:
mu = handle.silicon
t0 = handle.sensor_thickness
for panel in detector:
panel.set_px_mm_strategy(ParallaxCorrectedPxMmStrategy(mu, t0))
panel.set_trusted_range((handle.minimum_valid_pixel_value, handle.overload))
imageset.set_beam(models.get_beam())
imageset.set_detector(detector)
imageset.set_goniometer(None)
imageset.set_scan(None)
#imageset.set_goniometer(models.get_goniometer())
# take the image range from XDS.INP
#scan = models.get_scan()
#scan.set_image_range(image_range)
#imageset.set_scan(scan)
from dxtbx.serialize import xds
# Get the crystal from the XDS files
crystal = xds.to_crystal(xparm_in)
# Create the experiment list
experiments = ExperimentListFactory.from_imageset_and_crystal(imageset, crystal)
# Set the crystal in the experiment list
assert(len(experiments) == 1)
# Return the experiment list
return experiments
"""
def derive_reindex_matrix(experiment, integrate_hkl):
# dials/command_line/import_xds.py
'''Derive a reindexing matrix to go from the orientation matrix used
for XDS integration to the one used for DIALS integration.'''
from scitbx import matrix
reader = integrate_hkl_as_flex.reader(integrate_hkl, [], read_data=False)
dA = matrix.sqr(experiment.crystal.get_A())
dbeam = matrix.col(experiment.beam.get_direction())
daxis = matrix.col((-1,0,0))#experiment.goniometer.get_rotation_axis())
xbeam = matrix.col(reader.beam_direction).normalize()
xaxis = matrix.col(reader.rotation_axis).normalize()
# want to align XDS -s0 vector...
from rstbx.cftbx.coordinate_frame_helpers import align_reference_frame
R = align_reference_frame(- xbeam, dbeam, xaxis, daxis)
xA = matrix.sqr(
reader.a_axis +
reader.b_axis +
reader.c_axis).inverse()
xA = R * xA
# assert that this should just be a simple integer rotation matrix
# i.e. reassignment of a, b, c so...
return matrix.sqr(map(int, map(round, (dA.inverse() * xA).elems)))
# """
def run(xds_inp, xparm=None, integrate_lp=None, integrate_hkl=None, spot_xds=None,
space_group=None, reindex_op=None, out_prefix=None, out_dir="."):
out_prefix = out_prefix+"_" if out_prefix else ""
if integrate_lp is not None:
xparm_objs = prep_xparm_objects_from_integrate_lp(integrate_lp, xparm_ref=xparm)
rr, xp = xparm_objs[0]
xparm = os.path.join(os.path.dirname(xds_inp), "XPARM.XDS_%.6d-%.6d"%rr)
open(xparm, "w").write(xp.xparm_str())
# FIXME template of experiment.imageset could be wrong when relative path
# and ######.h5 may need to be replaced with master.h5
#experiments = ExperimentListFactory.from_xds(xds_inp, xparm) # XDS.INP needed for sweep info
experiments = import_xds_as_still(xds_inp, xparm)
assert len(experiments) == 1
experiment = experiments[0]
# I don't know what invalid X/Y/ZOBS values should be when multi-panel detector
assert len(experiment.detector) == 1
if None not in (space_group, reindex_op):
cryst_orig = copy.deepcopy(experiment.crystal)
cryst_reindexed = cryst_orig.change_basis(reindex_op)
a, b, c = cryst_reindexed.get_real_space_vectors()
cryst_reindexed = Crystal(a, b, c, space_group=space_group)
experiment.crystal.update(cryst_reindexed)
# Very dirty fix.. but no way to change template after object creation??
json_str = ExperimentListDumper(experiments).as_json().replace("_######.h5", "_master.h5")
open(os.path.join(out_dir, out_prefix+"experiments.json"), "w").write(json_str)
if integrate_hkl is not None:
table = import_integrated(integrate_hkl)
px_to_mm(experiment, table)
if None not in (space_group, reindex_op): table["miller_index"] = reindex_op.apply(table["miller_index"])
table.as_pickle(os.path.join(out_dir, out_prefix+"integrate_hkl.pickle"))
if spot_xds is not None:
table = import_spot_xds(spot_xds)
px_to_mm(experiment, table)
if None not in (space_group, reindex_op): table["miller_index"] = reindex_op.apply(table["miller_index"])
table.as_pickle(os.path.join(out_dir, out_prefix+"spot_xds.pickle"))
# run()
if __name__ == "__main__":
import sys
cmdline = iotbx.phil.process_command_line(args=sys.argv[1:],
master_string=master_params_str)
params = cmdline.work.extract()
args = cmdline.remaining_args
if len(args) ==1 and os.path.isdir(args[0]):
if not params.xds_inp: params.xds_inp = os.path.join(args[0], "XDS.INP")
if not params.xparm: params.xparm = os.path.join(args[0], "XPARM.XDS")
if not params.integrate_lp: params.integrate_lp = os.path.join(args[0], "INTEGRATE.LP")
if not params.integrate_hkl: params.integrate_hkl = os.path.join(args[0], "INTEGRATE.HKL")
run(xds_inp=params.xds_inp, xparm=params.xparm, integrate_lp=params.integrate_lp,
integrate_hkl=params.integrate_hkl, spot_xds=params.spot_xds,
space_group=sgtbx.space_group_info(params.space_group).group() if params.space_group else None,
reindex_op=sgtbx.change_of_basis_op(params.reindex_op) if params.reindex_op else None,
out_prefix=params.out_prefix, out_dir=params.out_dir)
| |
"""ACME Identifier Validation Challenges."""
import functools
import hashlib
import logging
import socket
import OpenSSL
import requests
from acme import errors
from acme import crypto_util
from acme import fields
from acme import jose
from acme import other
logger = logging.getLogger(__name__)
# pylint: disable=too-few-public-methods
class Challenge(jose.TypedJSONObjectWithFields):
# _fields_to_partial_json | pylint: disable=abstract-method
"""ACME challenge."""
TYPES = {}
class ContinuityChallenge(Challenge): # pylint: disable=abstract-method
"""Client validation challenges."""
class DVChallenge(Challenge): # pylint: disable=abstract-method
"""Domain validation challenges."""
class ChallengeResponse(jose.TypedJSONObjectWithFields):
# _fields_to_partial_json | pylint: disable=abstract-method
"""ACME challenge response."""
TYPES = {}
resource_type = 'challenge'
resource = fields.Resource(resource_type)
@Challenge.register
class SimpleHTTP(DVChallenge):
"""ACME "simpleHttp" challenge.
:ivar unicode token:
"""
typ = "simpleHttp"
TOKEN_SIZE = 128 / 8 # Based on the entropy value from the spec
"""Minimum size of the :attr:`token` in bytes."""
# TODO: acme-spec doesn't specify token as base64-encoded value
token = jose.Field(
"token", encoder=jose.encode_b64jose, decoder=functools.partial(
jose.decode_b64jose, size=TOKEN_SIZE, minimum=True))
@property
def good_token(self): # XXX: @token.decoder
"""Is `token` good?
.. todo:: acme-spec wants "It MUST NOT contain any non-ASCII
characters", but it should also warrant that it doesn't
contain ".." or "/"...
"""
# TODO: check that path combined with uri does not go above
# URI_ROOT_PATH!
return b'..' not in self.token and b'/' not in self.token
@ChallengeResponse.register
class SimpleHTTPResponse(ChallengeResponse):
"""ACME "simpleHttp" challenge response.
:ivar bool tls:
"""
typ = "simpleHttp"
tls = jose.Field("tls", default=True, omitempty=True)
URI_ROOT_PATH = ".well-known/acme-challenge"
"""URI root path for the server provisioned resource."""
_URI_TEMPLATE = "{scheme}://{domain}/" + URI_ROOT_PATH + "/{token}"
CONTENT_TYPE = "application/jose+json"
@property
def scheme(self):
"""URL scheme for the provisioned resource."""
return "https" if self.tls else "http"
@property
def port(self):
"""Port that the ACME client should be listening for validation."""
return 443 if self.tls else 80
def uri(self, domain, chall):
"""Create an URI to the provisioned resource.
Forms an URI to the HTTPS server provisioned resource
(containing :attr:`~SimpleHTTP.token`).
:param unicode domain: Domain name being verified.
:param challenges.SimpleHTTP chall:
"""
return self._URI_TEMPLATE.format(
scheme=self.scheme, domain=domain, token=chall.encode("token"))
def gen_resource(self, chall):
"""Generate provisioned resource.
:param .SimpleHTTP chall:
:rtype: SimpleHTTPProvisionedResource
"""
return SimpleHTTPProvisionedResource(token=chall.token, tls=self.tls)
def gen_validation(self, chall, account_key, alg=jose.RS256, **kwargs):
"""Generate validation.
:param .SimpleHTTP chall:
:param .JWK account_key: Private account key.
:param .JWA alg:
:returns: `.SimpleHTTPProvisionedResource` signed in `.JWS`
:rtype: .JWS
"""
return jose.JWS.sign(
payload=self.gen_resource(chall).json_dumps(
sort_keys=True).encode('utf-8'),
key=account_key, alg=alg, **kwargs)
def check_validation(self, validation, chall, account_public_key):
"""Check validation.
:param .JWS validation:
:param .SimpleHTTP chall:
:type account_public_key:
`~cryptography.hazmat.primitives.asymmetric.rsa.RSAPublicKey`
or
`~cryptography.hazmat.primitives.asymmetric.dsa.DSAPublicKey`
or
`~cryptography.hazmat.primitives.asymmetric.ec.EllipticCurvePublicKey`
wrapped in `.ComparableKey
:rtype: bool
"""
if not validation.verify(key=account_public_key):
return False
try:
resource = SimpleHTTPProvisionedResource.json_loads(
validation.payload.decode('utf-8'))
except jose.DeserializationError as error:
logger.debug(error)
return False
return resource.token == chall.token and resource.tls == self.tls
def simple_verify(self, chall, domain, account_public_key, port=None):
"""Simple verify.
According to the ACME specification, "the ACME server MUST
ignore the certificate provided by the HTTPS server", so
``requests.get`` is called with ``verify=False``.
:param .SimpleHTTP chall: Corresponding challenge.
:param unicode domain: Domain name being verified.
:param account_public_key: Public key for the key pair
being authorized. If ``None`` key verification is not
performed!
:type account_public_key:
`~cryptography.hazmat.primitives.asymmetric.rsa.RSAPublicKey`
or
`~cryptography.hazmat.primitives.asymmetric.dsa.DSAPublicKey`
or
`~cryptography.hazmat.primitives.asymmetric.ec.EllipticCurvePublicKey`
wrapped in `.ComparableKey
:param int port: Port used in the validation.
:returns: ``True`` iff validation is successful, ``False``
otherwise.
:rtype: bool
"""
# TODO: ACME specification defines URI template that doesn't
# allow to use a custom port... Make sure port is not in the
# request URI, if it's standard.
if port is not None and port != self.port:
logger.warn(
"Using non-standard port for SimpleHTTP verification: %s", port)
domain += ":{0}".format(port)
uri = self.uri(domain, chall)
logger.debug("Verifying %s at %s...", chall.typ, uri)
try:
http_response = requests.get(uri, verify=False)
except requests.exceptions.RequestException as error:
logger.error("Unable to reach %s: %s", uri, error)
return False
logger.debug("Received %s: %s. Headers: %s", http_response,
http_response.text, http_response.headers)
if self.CONTENT_TYPE != http_response.headers.get(
"Content-Type", self.CONTENT_TYPE):
return False
try:
validation = jose.JWS.json_loads(http_response.text)
except jose.DeserializationError as error:
logger.debug(error)
return False
return self.check_validation(validation, chall, account_public_key)
class SimpleHTTPProvisionedResource(jose.JSONObjectWithFields):
"""SimpleHTTP provisioned resource."""
typ = fields.Fixed("type", SimpleHTTP.typ)
token = SimpleHTTP._fields["token"]
# If the "tls" field is not included in the response, then
# validation object MUST have its "tls" field set to "true".
tls = jose.Field("tls", omitempty=False)
@Challenge.register
class DVSNI(DVChallenge):
"""ACME "dvsni" challenge.
:ivar bytes token: Random data, **not** base64-encoded.
"""
typ = "dvsni"
PORT = 443
"""Port to perform DVSNI challenge."""
TOKEN_SIZE = 128 / 8 # Based on the entropy value from the spec
"""Minimum size of the :attr:`token` in bytes."""
token = jose.Field(
"token", encoder=jose.encode_b64jose, decoder=functools.partial(
jose.decode_b64jose, size=TOKEN_SIZE, minimum=True))
def gen_response(self, account_key, alg=jose.RS256, **kwargs):
"""Generate response.
:param .JWK account_key: Private account key.
:rtype: .DVSNIResponse
"""
return DVSNIResponse(validation=jose.JWS.sign(
payload=self.json_dumps(sort_keys=True).encode('utf-8'),
key=account_key, alg=alg, **kwargs))
@ChallengeResponse.register
class DVSNIResponse(ChallengeResponse):
"""ACME "dvsni" challenge response.
:param bytes s: Random data, **not** base64-encoded.
"""
typ = "dvsni"
DOMAIN_SUFFIX = b".acme.invalid"
"""Domain name suffix."""
PORT = DVSNI.PORT
"""Port to perform DVSNI challenge."""
validation = jose.Field("validation", decoder=jose.JWS.from_json)
@property
def z(self): # pylint: disable=invalid-name
"""The ``z`` parameter.
:rtype: bytes
"""
# Instance of 'Field' has no 'signature' member
# pylint: disable=no-member
return hashlib.sha256(self.validation.signature.encode(
"signature").encode("utf-8")).hexdigest().encode()
@property
def z_domain(self):
"""Domain name for certificate subjectAltName.
:rtype: bytes
"""
z = self.z # pylint: disable=invalid-name
return z[:32] + b'.' + z[32:] + self.DOMAIN_SUFFIX
@property
def chall(self):
"""Get challenge encoded in the `validation` payload.
:rtype: DVSNI
"""
# pylint: disable=no-member
return DVSNI.json_loads(self.validation.payload.decode('utf-8'))
def gen_cert(self, key=None, bits=2048):
"""Generate DVSNI certificate.
:param OpenSSL.crypto.PKey key: Optional private key used in
certificate generation. If not provided (``None``), then
fresh key will be generated.
:param int bits: Number of bits for newly generated key.
:rtype: `tuple` of `OpenSSL.crypto.X509` and
`OpenSSL.crypto.PKey`
"""
if key is None:
key = OpenSSL.crypto.PKey()
key.generate_key(OpenSSL.crypto.TYPE_RSA, bits)
return crypto_util.gen_ss_cert(key, [
# z_domain is too big to fit into CN, hence first dummy domain
'dummy', self.z_domain.decode()], force_san=True), key
def probe_cert(self, domain, **kwargs):
"""Probe DVSNI challenge certificate.
:param unicode domain:
"""
host = socket.gethostbyname(domain)
logging.debug('%s resolved to %s', domain, host)
kwargs.setdefault("host", host)
kwargs.setdefault("port", self.PORT)
kwargs["name"] = self.z_domain
# TODO: try different methods?
# pylint: disable=protected-access
return crypto_util._probe_sni(**kwargs)
def verify_cert(self, cert):
"""Verify DVSNI challenge certificate."""
# pylint: disable=protected-access
sans = crypto_util._pyopenssl_cert_or_req_san(cert)
logging.debug('Certificate %s. SANs: %s', cert.digest('sha1'), sans)
return self.z_domain.decode() in sans
def simple_verify(self, chall, domain, account_public_key,
cert=None, **kwargs):
"""Simple verify.
Verify ``validation`` using ``account_public_key``, optionally
probe DVSNI certificate and check using `verify_cert`.
:param .challenges.DVSNI chall: Corresponding challenge.
:param str domain: Domain name being validated.
:type account_public_key:
`~cryptography.hazmat.primitives.asymmetric.rsa.RSAPublicKey`
or
`~cryptography.hazmat.primitives.asymmetric.dsa.DSAPublicKey`
or
`~cryptography.hazmat.primitives.asymmetric.ec.EllipticCurvePublicKey`
wrapped in `.ComparableKey
:param OpenSSL.crypto.X509 cert: Optional certificate. If not
provided (``None``) certificate will be retrieved using
`probe_cert`.
:returns: ``True`` iff client's control of the domain has been
verified, ``False`` otherwise.
:rtype: bool
"""
# pylint: disable=no-member
if not self.validation.verify(key=account_public_key):
return False
# TODO: it's not checked that payload has exectly 2 fields!
try:
decoded_chall = self.chall
except jose.DeserializationError as error:
logger.debug(error, exc_info=True)
return False
if decoded_chall.token != chall.token:
logger.debug("Wrong token: expected %r, found %r",
chall.token, decoded_chall.token)
return False
if cert is None:
try:
cert = self.probe_cert(domain=domain, **kwargs)
except errors.Error as error:
logger.debug(error, exc_info=True)
return False
return self.verify_cert(cert)
@Challenge.register
class RecoveryContact(ContinuityChallenge):
"""ACME "recoveryContact" challenge.
:ivar unicode activation_url:
:ivar unicode success_url:
:ivar unicode contact:
"""
typ = "recoveryContact"
activation_url = jose.Field("activationURL", omitempty=True)
success_url = jose.Field("successURL", omitempty=True)
contact = jose.Field("contact", omitempty=True)
@ChallengeResponse.register
class RecoveryContactResponse(ChallengeResponse):
"""ACME "recoveryContact" challenge response.
:ivar unicode token:
"""
typ = "recoveryContact"
token = jose.Field("token", omitempty=True)
@Challenge.register
class ProofOfPossession(ContinuityChallenge):
"""ACME "proofOfPossession" challenge.
:ivar .JWAAlgorithm alg:
:ivar bytes nonce: Random data, **not** base64-encoded.
:ivar hints: Various clues for the client (:class:`Hints`).
"""
typ = "proofOfPossession"
NONCE_SIZE = 16
class Hints(jose.JSONObjectWithFields):
"""Hints for "proofOfPossession" challenge.
:ivar jwk: JSON Web Key (:class:`acme.jose.JWK`)
:ivar tuple cert_fingerprints: `tuple` of `unicode`
:ivar tuple certs: Sequence of :class:`acme.jose.ComparableX509`
certificates.
:ivar tuple subject_key_identifiers: `tuple` of `unicode`
:ivar tuple issuers: `tuple` of `unicode`
:ivar tuple authorized_for: `tuple` of `unicode`
"""
jwk = jose.Field("jwk", decoder=jose.JWK.from_json)
cert_fingerprints = jose.Field(
"certFingerprints", omitempty=True, default=())
certs = jose.Field("certs", omitempty=True, default=())
subject_key_identifiers = jose.Field(
"subjectKeyIdentifiers", omitempty=True, default=())
serial_numbers = jose.Field("serialNumbers", omitempty=True, default=())
issuers = jose.Field("issuers", omitempty=True, default=())
authorized_for = jose.Field("authorizedFor", omitempty=True, default=())
@certs.encoder
def certs(value): # pylint: disable=missing-docstring,no-self-argument
return tuple(jose.encode_cert(cert) for cert in value)
@certs.decoder
def certs(value): # pylint: disable=missing-docstring,no-self-argument
return tuple(jose.decode_cert(cert) for cert in value)
alg = jose.Field("alg", decoder=jose.JWASignature.from_json)
nonce = jose.Field(
"nonce", encoder=jose.encode_b64jose, decoder=functools.partial(
jose.decode_b64jose, size=NONCE_SIZE))
hints = jose.Field("hints", decoder=Hints.from_json)
@ChallengeResponse.register
class ProofOfPossessionResponse(ChallengeResponse):
"""ACME "proofOfPossession" challenge response.
:ivar bytes nonce: Random data, **not** base64-encoded.
:ivar acme.other.Signature signature: Sugnature of this message.
"""
typ = "proofOfPossession"
NONCE_SIZE = ProofOfPossession.NONCE_SIZE
nonce = jose.Field(
"nonce", encoder=jose.encode_b64jose, decoder=functools.partial(
jose.decode_b64jose, size=NONCE_SIZE))
signature = jose.Field("signature", decoder=other.Signature.from_json)
def verify(self):
"""Verify the challenge."""
# self.signature is not Field | pylint: disable=no-member
return self.signature.verify(self.nonce)
@Challenge.register
class DNS(DVChallenge):
"""ACME "dns" challenge.
:ivar unicode token:
"""
typ = "dns"
token = jose.Field("token")
@ChallengeResponse.register
class DNSResponse(ChallengeResponse):
"""ACME "dns" challenge response."""
typ = "dns"
| |
"""
This module contains the machinery handling assumptions.
All symbolic objects have assumption attributes that can be accessed via
.is_<assumption name> attribute.
Assumptions determine certain properties of symbolic objects and can
have 3 possible values: True, False, None. True is returned if the
object has the property and False is returned if it doesn't or can't
(i.e. doesn't make sense):
>>> from sympy import I
>>> I.is_algebraic
True
>>> I.is_real
False
>>> I.is_prime
False
When the property cannot be determined (or when a method is not
implemented) None will be returned, e.g. a generic symbol, x, may or
may not be positive so a value of None is returned for x.is_positive.
By default, all symbolic values are in the largest set in the given context
without specifying the property. For example, a symbol that has a property
being integer, is also real, complex, etc.
Here follows a list of possible assumption names:
.. glossary::
commutative
object commutes with any other object with
respect to multiplication operation.
complex
object can have only values from the set
of complex numbers.
imaginary
object value is a number that can be written as a real
number multiplied by the imaginary unit ``I``. See
[3]_. Please note, that ``0`` is not considered to be an
imaginary number, see
`issue #7649 <https://github.com/sympy/sympy/issues/7649>`_.
real
object can have only values from the set
of real numbers.
integer
object can have only values from the set
of integers.
odd
even
object can have only values from the set of
odd (even) integers [2]_.
prime
object is a natural number greater than ``1`` that has
no positive divisors other than ``1`` and itself. See [6]_.
composite
object is a positive integer that has at least one positive
divisor other than ``1`` or the number itself. See [4]_.
zero
nonzero
object is zero (not zero).
rational
object can have only values from the set
of rationals.
algebraic
object can have only values from the set
of algebraic numbers [11]_.
transcendental
object can have only values from the set
of transcendental numbers [10]_.
irrational
object value cannot be represented exactly by Rational, see [5]_.
finite
infinite
object absolute value is bounded (is value is
arbitrarily large). See [7]_, [8]_, [9]_.
negative
nonnegative
object can have only negative (only
nonnegative) values [1]_.
positive
nonpositive
object can have only positive (only
nonpositive) values.
hermitian
antihermitian
object belongs to the field of hermitian
(antihermitian) operators.
Examples
========
>>> from sympy import Symbol
>>> x = Symbol('x', real=True); x
x
>>> x.is_real
True
>>> x.is_complex
True
See Also
========
.. seealso::
:py:class:`sympy.core.numbers.ImaginaryUnit`
:py:class:`sympy.core.numbers.Zero`
:py:class:`sympy.core.numbers.One`
Notes
=====
Assumption values are stored in obj._assumptions dictionary or
are returned by getter methods (with property decorators) or are
attributes of objects/classes.
References
==========
.. [1] http://en.wikipedia.org/wiki/Negative_number
.. [2] http://en.wikipedia.org/wiki/Parity_%28mathematics%29
.. [3] http://en.wikipedia.org/wiki/Imaginary_number
.. [4] http://en.wikipedia.org/wiki/Composite_number
.. [5] http://en.wikipedia.org/wiki/Irrational_number
.. [6] http://en.wikipedia.org/wiki/Prime_number
.. [7] http://en.wikipedia.org/wiki/Finite
.. [8] https://docs.python.org/3/library/math.html#math.isfinite
.. [9] http://docs.scipy.org/doc/numpy/reference/generated/numpy.isfinite.html
.. [10] http://en.wikipedia.org/wiki/Transcendental_number
.. [11] http://en.wikipedia.org/wiki/Algebraic_number
"""
from __future__ import print_function, division
from sympy.core.facts import FactRules, FactKB
from sympy.core.core import BasicMeta
from sympy.core.compatibility import integer_types, with_metaclass
from random import shuffle
_assume_rules = FactRules([
'integer -> rational',
'rational -> real',
'rational -> algebraic',
'algebraic -> complex',
'real -> complex',
'real -> hermitian',
'imaginary -> complex',
'imaginary -> antihermitian',
'complex -> commutative',
'odd == integer & !even',
'even == integer & !odd',
'real == negative | zero | positive',
'transcendental == complex & !algebraic',
'negative == nonpositive & nonzero',
'positive == nonnegative & nonzero',
'zero == nonnegative & nonpositive',
'nonpositive == real & !positive',
'nonnegative == real & !negative',
'zero -> even & finite',
'prime -> integer & positive',
'composite -> integer & positive & !prime',
'irrational == real & !rational',
'imaginary -> !real',
'infinite -> !finite',
'noninteger == real & !integer',
'nonzero == !zero',
])
_assume_defined = _assume_rules.defined_facts.copy()
_assume_defined.add('polar')
_assume_defined = frozenset(_assume_defined)
class StdFactKB(FactKB):
"""A FactKB specialised for the built-in rules
This is the only kind of FactKB that Basic objects should use.
"""
rules = _assume_rules
def __init__(self, facts=None):
# save a copy of the facts dict
if not facts:
self._generator = {};
elif not isinstance(facts, FactKB):
self._generator = facts.copy()
else:
self._generator = facts.generator
if facts:
self.deduce_all_facts(facts)
def copy(self):
return self.__class__(self)
@property
def generator(self):
return self._generator.copy()
def as_property(fact):
"""Convert a fact name to the name of the corresponding property"""
return 'is_%s' % fact
def make_property(fact):
"""Create the automagic property corresponding to a fact."""
def getit(self):
try:
return self._assumptions[fact]
except KeyError:
if self._assumptions is self.default_assumptions:
self._assumptions = self.default_assumptions.copy()
return _ask(fact, self)
getit.func_name = as_property(fact)
return property(getit)
def _ask(fact, obj):
"""
Find the truth value for a property of an object.
This function is called when a request is made to see what a fact
value is.
For this we use several techniques:
First, the fact-evaluation function is tried, if it exists (for
example _eval_is_integer). Then we try related facts. For example
rational --> integer
another example is joined rule:
integer & !odd --> even
so in the latter case if we are looking at what 'even' value is,
'integer' and 'odd' facts will be asked.
In all cases, when we settle on some fact value, its implications are
deduced, and the result is cached in ._assumptions.
"""
assumptions = obj._assumptions
handler_map = obj._prop_handler
# Store None into the assumptions so that recursive attempts at
# evaluating the same fact don't trigger infinite recursion.
assumptions._tell(fact, None)
# First try the assumption evaluation function if it exists
try:
evaluate = handler_map[fact]
except KeyError:
pass
else:
a = evaluate(obj)
if a is not None:
assumptions.deduce_all_facts(((fact, a),))
return a
# Try assumption's prerequisites
prereq = list(_assume_rules.prereq[fact])
shuffle(prereq)
for pk in prereq:
if pk in assumptions:
continue
if pk in handler_map:
_ask(pk, obj)
# we might have found the value of fact
ret_val = assumptions.get(fact)
if ret_val is not None:
return ret_val
# Note: the result has already been cached
return None
class ManagedProperties(BasicMeta):
"""Metaclass for classes with old-style assumptions"""
def __init__(cls, *args, **kws):
BasicMeta.__init__(cls, *args, **kws)
local_defs = {}
for k in _assume_defined:
attrname = as_property(k)
v = cls.__dict__.get(attrname, '')
if isinstance(v, (bool, integer_types, type(None))):
if v is not None:
v = bool(v)
local_defs[k] = v
defs = {}
for base in reversed(cls.__bases__):
try:
defs.update(base._explicit_class_assumptions)
except AttributeError:
pass
defs.update(local_defs)
cls._explicit_class_assumptions = defs
cls.default_assumptions = StdFactKB(defs)
cls._prop_handler = {}
for k in _assume_defined:
try:
cls._prop_handler[k] = getattr(cls, '_eval_is_%s' % k)
except AttributeError:
pass
# Put definite results directly into the class dict, for speed
for k, v in cls.default_assumptions.items():
setattr(cls, as_property(k), v)
# protection e.g. for Integer.is_even=F <- (Rational.is_integer=F)
derived_from_bases = set()
for base in cls.__bases__:
try:
derived_from_bases |= set(base.default_assumptions)
except AttributeError:
continue # not an assumption-aware class
for fact in derived_from_bases - set(cls.default_assumptions):
pname = as_property(fact)
if pname not in cls.__dict__:
setattr(cls, pname, make_property(fact))
# Finally, add any missing automagic property (e.g. for Basic)
for fact in _assume_defined:
pname = as_property(fact)
if not hasattr(cls, pname):
setattr(cls, pname, make_property(fact))
| |
"""
Client related classes (Proxy, mostly)
Pyro - Python Remote Objects. Copyright by Irmen de Jong (irmen@razorvine.net).
"""
import time
import logging
import serpent
import contextlib
from . import config, core, serializers, protocol, errors, socketutil
from .callcontext import current_context
try:
from greenlet import getcurrent as get_ident
except ImportError:
from threading import get_ident
log = logging.getLogger("Pyro5.client")
__all__ = ["Proxy", "BatchProxy", "SerializedBlob"]
class Proxy(object):
"""
Pyro proxy for a remote object. Intercepts method calls and dispatches them to the remote object.
.. automethod:: _pyroBind
.. automethod:: _pyroRelease
.. automethod:: _pyroReconnect
.. automethod:: _pyroValidateHandshake
.. autoattribute:: _pyroTimeout
.. attribute:: _pyroMaxRetries
Number of retries to perform on communication calls by this proxy, allows you to override the default setting.
.. attribute:: _pyroSerializer
Name of the serializer to use by this proxy, allows you to override the default setting.
.. attribute:: _pyroHandshake
The data object that should be sent in the initial connection handshake message. Can be any serializable object.
"""
__pyroAttributes = frozenset(
["__getnewargs__", "__getnewargs_ex__", "__getinitargs__", "_pyroConnection", "_pyroUri",
"_pyroOneway", "_pyroMethods", "_pyroAttrs", "_pyroTimeout", "_pyroSeq",
"_pyroRawWireResponse", "_pyroHandshake", "_pyroMaxRetries", "_pyroSerializer",
"_Proxy__pyroTimeout", "_Proxy__pyroOwnerThread"])
def __init__(self, uri, connected_socket=None):
if connected_socket:
uri = core.URI("PYRO:" + uri + "@<<connected-socket>>:0")
if isinstance(uri, str):
uri = core.URI(uri)
elif not isinstance(uri, core.URI):
raise TypeError("expected Pyro URI")
self._pyroUri = uri
self._pyroConnection = None
self._pyroSerializer = None # can be set to the name of a serializer to override the global one per-proxy
self._pyroMethods = set() # all methods of the remote object, gotten from meta-data
self._pyroAttrs = set() # attributes of the remote object, gotten from meta-data
self._pyroOneway = set() # oneway-methods of the remote object, gotten from meta-data
self._pyroSeq = 0 # message sequence number
self._pyroRawWireResponse = False # internal switch to enable wire level responses
self._pyroHandshake = "hello" # the data object that should be sent in the initial connection handshake message
self._pyroMaxRetries = config.MAX_RETRIES
self.__pyroTimeout = config.COMMTIMEOUT
self.__pyroOwnerThread = get_ident() # the thread that owns this proxy
if config.SERIALIZER not in serializers.serializers:
raise ValueError("unknown serializer configured")
# note: we're not clearing the client annotations dict here.
# that is because otherwise it will be wiped if a new proxy is needed to connect PYRONAME uris.
# clearing the response annotations is okay.
current_context.response_annotations = {}
if connected_socket:
self.__pyroCreateConnection(False, connected_socket)
def __del__(self):
if hasattr(self, "_pyroConnection"):
try:
self._pyroRelease()
except Exception:
pass
def __getattr__(self, name):
if name in Proxy.__pyroAttributes:
# allows it to be safely pickled
raise AttributeError(name)
# get metadata if it's not there yet
if not self._pyroMethods and not self._pyroAttrs:
self._pyroGetMetadata()
if name in self._pyroAttrs:
return self._pyroInvoke("__getattr__", (name,), None)
if name not in self._pyroMethods:
# client side check if the requested attr actually exists
raise AttributeError("remote object '%s' has no exposed attribute or method '%s'" % (self._pyroUri, name))
return _RemoteMethod(self._pyroInvoke, name, self._pyroMaxRetries)
def __setattr__(self, name, value):
if name in Proxy.__pyroAttributes:
return super(Proxy, self).__setattr__(name, value) # one of the special pyro attributes
# get metadata if it's not there yet
if not self._pyroMethods and not self._pyroAttrs:
self._pyroGetMetadata()
if name in self._pyroAttrs:
return self._pyroInvoke("__setattr__", (name, value), None) # remote attribute
# client side validation if the requested attr actually exists
raise AttributeError("remote object '%s' has no exposed attribute '%s'" % (self._pyroUri, name))
def __repr__(self):
if self._pyroConnection:
connected = "connected " + self._pyroConnection.family()
else:
connected = "not connected"
return "<%s.%s at 0x%x; %s; for %s; owner %s>" % (self.__class__.__module__, self.__class__.__name__,
id(self), connected, self._pyroUri, self.__pyroOwnerThread)
def __getstate__(self):
# make sure a tuple of just primitive types are used to allow for proper serialization
return str(self._pyroUri), tuple(self._pyroOneway), tuple(self._pyroMethods), \
tuple(self._pyroAttrs), self._pyroHandshake, self._pyroSerializer
def __setstate__(self, state):
self._pyroUri = core.URI(state[0])
self._pyroOneway = set(state[1])
self._pyroMethods = set(state[2])
self._pyroAttrs = set(state[3])
self._pyroHandshake = state[4]
self._pyroSerializer = state[5]
self.__pyroTimeout = config.COMMTIMEOUT
self._pyroMaxRetries = config.MAX_RETRIES
self._pyroConnection = None
self._pyroSeq = 0
self._pyroRawWireResponse = False
self.__pyroOwnerThread = get_ident()
def __copy__(self):
p = object.__new__(type(self))
p.__setstate__(self.__getstate__())
p._pyroTimeout = self._pyroTimeout
p._pyroRawWireResponse = self._pyroRawWireResponse
p._pyroMaxRetries = self._pyroMaxRetries
return p
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self._pyroRelease()
def __eq__(self, other):
if other is self:
return True
return isinstance(other, Proxy) and other._pyroUri == self._pyroUri
def __ne__(self, other):
if other and isinstance(other, Proxy):
return other._pyroUri != self._pyroUri
return True
def __hash__(self):
return hash(self._pyroUri)
def __dir__(self):
result = dir(self.__class__) + list(self.__dict__.keys())
return sorted(set(result) | self._pyroMethods | self._pyroAttrs)
def _pyroRelease(self):
"""release the connection to the pyro daemon"""
self.__check_owner()
if self._pyroConnection is not None:
self._pyroConnection.close()
self._pyroConnection = None
def _pyroBind(self):
"""
Bind this proxy to the exact object from the uri. That means that the proxy's uri
will be updated with a direct PYRO uri, if it isn't one yet.
If the proxy is already bound, it will not bind again.
"""
return self.__pyroCreateConnection(True)
def __pyroGetTimeout(self):
return self.__pyroTimeout
def __pyroSetTimeout(self, timeout):
self.__pyroTimeout = timeout
if self._pyroConnection is not None:
self._pyroConnection.timeout = timeout
_pyroTimeout = property(__pyroGetTimeout, __pyroSetTimeout, doc="""
The timeout in seconds for calls on this proxy. Defaults to ``None``.
If the timeout expires before the remote method call returns,
Pyro will raise a :exc:`Pyro5.errors.TimeoutError`""")
def _pyroInvoke(self, methodname, vargs, kwargs, flags=0, objectId=None):
"""perform the remote method call communication"""
self.__check_owner()
current_context.response_annotations = {}
if self._pyroConnection is None:
self.__pyroCreateConnection()
serializer = serializers.serializers[self._pyroSerializer or config.SERIALIZER]
objectId = objectId or self._pyroConnection.objectId
annotations = current_context.annotations
if vargs and isinstance(vargs[0], SerializedBlob):
# special serialization of a 'blob' that stays serialized
data, flags = self.__serializeBlobArgs(vargs, kwargs, annotations, flags, objectId, methodname, serializer)
else:
# normal serialization of the remote call
data = serializer.dumpsCall(objectId, methodname, vargs, kwargs)
if methodname in self._pyroOneway:
flags |= protocol.FLAGS_ONEWAY
self._pyroSeq = (self._pyroSeq + 1) & 0xffff
msg = protocol.SendingMessage(protocol.MSG_INVOKE, flags, self._pyroSeq, serializer.serializer_id, data, annotations=annotations)
if config.LOGWIRE:
protocol.log_wiredata(log, "proxy wiredata sending", msg)
try:
self._pyroConnection.send(msg.data)
del msg # invite GC to collect the object, don't wait for out-of-scope
if flags & protocol.FLAGS_ONEWAY:
return None # oneway call, no response data
else:
msg = protocol.recv_stub(self._pyroConnection, [protocol.MSG_RESULT])
if config.LOGWIRE:
protocol.log_wiredata(log, "proxy wiredata received", msg)
self.__pyroCheckSequence(msg.seq)
if msg.serializer_id != serializer.serializer_id:
error = "invalid serializer in response: %d" % msg.serializer_id
log.error(error)
raise errors.SerializeError(error)
if msg.annotations:
current_context.response_annotations = msg.annotations
if self._pyroRawWireResponse:
return msg
data = serializer.loads(msg.data)
if msg.flags & protocol.FLAGS_ITEMSTREAMRESULT:
streamId = bytes(msg.annotations.get("STRM", b"")).decode()
if not streamId:
raise errors.ProtocolError("result of call is an iterator, but the server is not configured to allow streaming")
return _StreamResultIterator(streamId, self)
if msg.flags & protocol.FLAGS_EXCEPTION:
raise data # if you see this in your traceback, you should probably inspect the remote traceback as well
else:
return data
except (errors.CommunicationError, KeyboardInterrupt):
# Communication error during read. To avoid corrupt transfers, we close the connection.
# Otherwise we might receive the previous reply as a result of a new method call!
# Special case for keyboardinterrupt: people pressing ^C to abort the client
# may be catching the keyboardinterrupt in their code. We should probably be on the
# safe side and release the proxy connection in this case too, because they might
# be reusing the proxy object after catching the exception...
self._pyroRelease()
raise
def __pyroCheckSequence(self, seq):
if seq != self._pyroSeq:
err = "invoke: reply sequence out of sync, got %d expected %d" % (seq, self._pyroSeq)
log.error(err)
raise errors.ProtocolError(err)
def __pyroCreateConnection(self, replaceUri=False, connected_socket=None):
"""
Connects this proxy to the remote Pyro daemon. Does connection handshake.
Returns true if a new connection was made, false if an existing one was already present.
"""
def connect_and_handshake(conn):
try:
if self._pyroConnection is not None:
return False # already connected
if config.SSL:
sslContext = socketutil.get_ssl_context(clientcert=config.SSL_CLIENTCERT,
clientkey=config.SSL_CLIENTKEY,
keypassword=config.SSL_CLIENTKEYPASSWD,
cacerts=config.SSL_CACERTS)
else:
sslContext = None
sock = socketutil.create_socket(connect=connect_location,
reuseaddr=config.SOCK_REUSE,
timeout=self.__pyroTimeout,
nodelay=config.SOCK_NODELAY,
sslContext=sslContext)
conn = socketutil.SocketConnection(sock, uri.object)
# Do handshake.
serializer = serializers.serializers[self._pyroSerializer or config.SERIALIZER]
data = {"handshake": self._pyroHandshake, "object": uri.object}
data = serializer.dumps(data)
msg = protocol.SendingMessage(protocol.MSG_CONNECT, 0, self._pyroSeq, serializer.serializer_id,
data, annotations=current_context.annotations)
if config.LOGWIRE:
protocol.log_wiredata(log, "proxy connect sending", msg)
conn.send(msg.data)
msg = protocol.recv_stub(conn, [protocol.MSG_CONNECTOK, protocol.MSG_CONNECTFAIL])
if config.LOGWIRE:
protocol.log_wiredata(log, "proxy connect response received", msg)
except Exception as x:
if conn:
conn.close()
err = "cannot connect to %s: %s" % (connect_location, x)
log.error(err)
if isinstance(x, errors.CommunicationError):
raise
else:
raise errors.CommunicationError(err) from x
else:
handshake_response = "?"
if msg.data:
serializer = serializers.serializers_by_id[msg.serializer_id]
handshake_response = serializer.loads(msg.data)
if msg.type == protocol.MSG_CONNECTFAIL:
error = "connection to %s rejected: %s" % (connect_location, handshake_response)
conn.close()
log.error(error)
raise errors.CommunicationError(error)
elif msg.type == protocol.MSG_CONNECTOK:
self.__processMetadata(handshake_response["meta"])
handshake_response = handshake_response["handshake"]
self._pyroConnection = conn
if replaceUri:
self._pyroUri = uri
self._pyroValidateHandshake(handshake_response)
log.debug("connected to %s - %s - %s", self._pyroUri, conn.family(), "SSL" if sslContext else "unencrypted")
if msg.annotations:
current_context.response_annotations = msg.annotations
else:
conn.close()
err = "cannot connect to %s: invalid msg type %d received" % (connect_location, msg.type)
log.error(err)
raise errors.ProtocolError(err)
self.__check_owner()
if self._pyroConnection is not None:
return False # already connected
uri = core.resolve(self._pyroUri)
# socket connection (normal or Unix domain socket)
conn = None
log.debug("connecting to %s", uri)
connect_location = uri.sockname or (uri.host, uri.port)
if connected_socket:
self._pyroConnection = socketutil.SocketConnection(connected_socket, uri.object, True)
else:
connect_and_handshake(conn)
# obtain metadata if this feature is enabled, and the metadata is not known yet
if not self._pyroMethods and not self._pyroAttrs:
self._pyroGetMetadata(uri.object)
return True
def _pyroGetMetadata(self, objectId=None, known_metadata=None):
"""
Get metadata from server (methods, attrs, oneway, ...) and remember them in some attributes of the proxy.
Usually this will already be known due to the default behavior of the connect handshake, where the
connect response also includes the metadata.
"""
objectId = objectId or self._pyroUri.object
log.debug("getting metadata for object %s", objectId)
if self._pyroConnection is None and not known_metadata:
try:
self.__pyroCreateConnection()
except errors.PyroError:
log.error("problem getting metadata: cannot connect")
raise
if self._pyroMethods or self._pyroAttrs:
return # metadata has already been retrieved as part of creating the connection
try:
# invoke the get_metadata method on the daemon
result = known_metadata or self._pyroInvoke("get_metadata", [objectId], {}, objectId=core.DAEMON_NAME)
self.__processMetadata(result)
except errors.PyroError:
log.exception("problem getting metadata")
raise
def __processMetadata(self, metadata):
if not metadata:
return
self._pyroOneway = set(metadata["oneway"])
self._pyroMethods = set(metadata["methods"])
self._pyroAttrs = set(metadata["attrs"])
if log.isEnabledFor(logging.DEBUG):
log.debug("from meta: methods=%s, oneway methods=%s, attributes=%s",
sorted(self._pyroMethods), sorted(self._pyroOneway), sorted(self._pyroAttrs))
if not self._pyroMethods and not self._pyroAttrs:
raise errors.PyroError("remote object doesn't expose any methods or attributes. Did you forget setting @expose on them?")
def _pyroReconnect(self, tries=100000000):
"""
(Re)connect the proxy to the daemon containing the pyro object which the proxy is for.
In contrast to the _pyroBind method, this one first releases the connection (if the proxy is still connected)
and retries making a new connection until it succeeds or the given amount of tries ran out.
"""
self._pyroRelease()
while tries:
try:
self.__pyroCreateConnection()
return
except errors.CommunicationError:
tries -= 1
if tries:
time.sleep(2)
msg = "failed to reconnect"
log.error(msg)
raise errors.ConnectionClosedError(msg)
def _pyroInvokeBatch(self, calls, oneway=False):
flags = protocol.FLAGS_BATCH
if oneway:
flags |= protocol.FLAGS_ONEWAY
return self._pyroInvoke("<batch>", calls, None, flags)
def _pyroValidateHandshake(self, response):
"""
Process and validate the initial connection handshake response data received from the daemon.
Simply return without error if everything is ok.
Raise an exception if something is wrong and the connection should not be made.
"""
return
def _pyroClaimOwnership(self):
"""
The current thread claims the ownership of this proxy from another thread.
Any existing connection will remain active!
"""
if get_ident() != self.__pyroOwnerThread:
# if self._pyroConnection is not None:
# self._pyroConnection.close()
# self._pyroConnection = None
self.__pyroOwnerThread = get_ident()
def __serializeBlobArgs(self, vargs, kwargs, annotations, flags, objectId, methodname, serializer):
"""
Special handling of a "blob" argument that has to stay serialized until explicitly deserialized in client code.
This makes efficient, transparent gateways or dispatchers and such possible:
they don't have to de/reserialize the message and are independent from the serialized class definitions.
Annotations are passed in because some blob metadata is added. They're not part of the blob itself.
"""
if len(vargs) > 1 or kwargs:
raise errors.SerializeError("if SerializedBlob is used, it must be the only argument")
blob = vargs[0]
flags |= protocol.FLAGS_KEEPSERIALIZED
# Pass the objectId and methodname separately in an annotation because currently,
# they are embedded inside the serialized message data. And we're not deserializing that,
# so we have to have another means of knowing the object and method it is meant for...
# A better solution is perhaps to split the actual remote method arguments from the
# control data (object + methodname) but that requires a major protocol change.
# The code below is not as nice but it works without any protocol change and doesn't
# require a hack either - so it's actually not bad like this.
import marshal
annotations["BLBI"] = marshal.dumps((blob.info, objectId, methodname))
if blob._contains_blob:
# directly pass through the already serialized msg data from within the blob
protocol_msg = blob._data
return protocol_msg.data, flags
else:
# replaces SerializedBlob argument with the data to be serialized
return serializer.dumpsCall(objectId, methodname, blob._data, kwargs), flags
def __check_owner(self):
if get_ident() != self.__pyroOwnerThread:
raise errors.PyroError("the calling thread is not the owner of this proxy, "
"create a new proxy in this thread or transfer ownership.")
class _RemoteMethod(object):
"""method call abstraction"""
def __init__(self, send, name, max_retries):
self.__send = send
self.__name = name
self.__max_retries = max_retries
def __getattr__(self, name):
return _RemoteMethod(self.__send, "%s.%s" % (self.__name, name), self.__max_retries)
def __call__(self, *args, **kwargs):
for attempt in range(self.__max_retries + 1):
try:
return self.__send(self.__name, args, kwargs)
except (errors.ConnectionClosedError, errors.TimeoutError):
# only retry for recoverable network errors
if attempt >= self.__max_retries:
# last attempt, raise the exception
raise
class _StreamResultIterator(object):
"""
Pyro returns this as a result of a remote call which returns an iterator or generator.
It is a normal iterable and produces elements on demand from the remote iterator.
You can simply use it in for loops, list comprehensions etc.
"""
def __init__(self, streamId, proxy):
self.streamId = streamId
self.proxy = proxy
self.pyroseq = proxy._pyroSeq
def __iter__(self):
return self
def __next__(self):
if self.proxy is None:
raise StopIteration
if self.proxy._pyroConnection is None:
raise errors.ConnectionClosedError("the proxy for this stream result has been closed")
self.pyroseq += 1
try:
return self.proxy._pyroInvoke("get_next_stream_item", [self.streamId], {}, objectId=core.DAEMON_NAME)
except (StopIteration, GeneratorExit):
# when the iterator is exhausted, the proxy is removed to avoid unneeded close_stream calls later
# (the server has closed its part of the stream by itself already)
self.proxy = None
raise
def __del__(self):
try:
self.close()
except Exception:
pass
def close(self):
if self.proxy and self.proxy._pyroConnection is not None:
if self.pyroseq == self.proxy._pyroSeq:
# we're still in sync, it's okay to use the same proxy to close this stream
self.proxy._pyroInvoke("close_stream", [self.streamId], {},
flags=protocol.FLAGS_ONEWAY, objectId=core.DAEMON_NAME)
else:
# The proxy's sequence number has diverged.
# One of the reasons this can happen is because this call is being done from python's GC where
# it decides to gc old iterator objects *during a new call on the proxy*.
# If we use the same proxy and do a call in between, the other call on the proxy will get an out of sync seq and crash!
# We create a temporary second proxy to call close_stream on. This is inefficient, but avoids the problem.
with contextlib.suppress(errors.CommunicationError):
with self.proxy.__copy__() as closingProxy:
closingProxy._pyroInvoke("close_stream", [self.streamId], {},
flags=protocol.FLAGS_ONEWAY, objectId=core.DAEMON_NAME)
self.proxy = None
class _BatchedRemoteMethod(object):
"""method call abstraction that is used with batched calls"""
def __init__(self, calls, name):
self.__calls = calls
self.__name = name
def __getattr__(self, name):
return _BatchedRemoteMethod(self.__calls, "%s.%s" % (self.__name, name))
def __call__(self, *args, **kwargs):
self.__calls.append((self.__name, args, kwargs))
class BatchProxy(object):
"""Proxy that lets you batch multiple method calls into one.
It is constructed with a reference to the normal proxy that will
carry out the batched calls. Call methods on this object that you want to batch,
and finally call the batch proxy itself. That call will return a generator
for the results of every method call in the batch (in sequence)."""
def __init__(self, proxy):
self.__proxy = proxy
self.__calls = []
def __getattr__(self, name):
return _BatchedRemoteMethod(self.__calls, name)
def __enter__(self):
return self
def __exit__(self, *args):
pass
def __copy__(self):
copy = type(self)(self.__proxy)
copy.__calls = list(self.__calls)
return copy
def __resultsgenerator(self, results):
for result in results:
if isinstance(result, core._ExceptionWrapper):
result.raiseIt() # re-raise the remote exception locally.
else:
yield result # it is a regular result object, yield that and continue.
def __call__(self, oneway=False):
self.__proxy._pyroClaimOwnership()
results = self.__proxy._pyroInvokeBatch(self.__calls, oneway)
self.__calls = [] # clear for re-use
if not oneway:
return self.__resultsgenerator(results)
def _pyroInvoke(self, name, args, kwargs):
# ignore all parameters, we just need to execute the batch
results = self.__proxy._pyroInvokeBatch(self.__calls)
self.__calls = [] # clear for re-use
return self.__resultsgenerator(results)
class SerializedBlob(object):
"""
Used to wrap some data to make Pyro pass this object transparently (it keeps the serialized payload as-is)
Only when you need to access the actual client data you can deserialize on demand.
This makes efficient, transparent gateways or dispatchers and such possible:
they don't have to de/reserialize the message and are independent from the serialized class definitions.
You have to pass this as the only parameter to a remote method call for Pyro to understand it.
Init arguments:
``info`` = some (small) descriptive data about the blob. Can be a simple id or name or guid. Must be marshallable.
``data`` = the actual client data payload that you want to transfer in the blob. Can be anything that you would
otherwise have used as regular remote call arguments.
"""
def __init__(self, info, data, is_blob=False):
self.info = info
self._data = data
self._contains_blob = is_blob
if is_blob and not isinstance(data, (protocol.SendingMessage, protocol.ReceivingMessage)):
raise TypeError("data should be a protocol message object if is_blob is true")
def deserialized(self):
"""Retrieves the client data stored in this blob. Deserializes the data automatically if required."""
if self._contains_blob:
protocol_msg = self._data
serializer = serializers.serializers_by_id[protocol_msg.serializer_id]
if isinstance(protocol_msg, protocol.ReceivingMessage):
_, _, data, _ = serializer.loads(protocol_msg.data)
else:
# strip off header bytes from SendingMessage
payload_data = memoryview(protocol_msg.data)[protocol._header_size:]
_, _, data, _ = serializer.loads(payload_data)
return data
else:
return self._data
# register the special serializers for the pyro objects
serpent.register_class(Proxy, serializers.pyro_class_serpent_serializer)
serializers.SerializerBase.register_class_to_dict(Proxy, serializers.serialize_pyro_object_to_dict, serpent_too=False)
| |
# ---------------------------------------------------------------
# OVERVIEW
# ---------------------------------------------------------------
# The following script implements a binomial heap in Python. A binary heap is a collection of binomial trees.
# A Binomial Tree of order k has following properties.
# a) It has exactly 2k nodes.
# b) It has depth as k.
# c) There are exactly kCi nodes at depth i for i = 0, 1, . . . , k.
# d) The root has degree k and children of root are Binomial Trees with order k-1, k-2,.. 0 from left to right
#
# eg. A Binomial Heap with 13 nodes:
#
# 12------------10--------------------20
# / \ / | \
# 15 50 70 50 40
# | / | |
# 30 80 85 65
# |
# 100
#
# It is a collection of 3 Binomial Trees of orders 0, 2 and 3 from left to right.
# ---------------------------------------------------------------
# CLASSES
# ---------------------------------------------------------------
# CLASS: Node
# DESCRIPTION: This class stores the object representation of each node. Each node gets initialized with the following
# fields along with their getters and setters:
# ---------------------------------------------------------
# [Element Name] -> [Initialization value] >> [Description]
# ---------------------------------------------------------
# parent -> None >> Parent of this node
# key -> None >> The value stored at this node
# degree -> -1 >> Height of the node
# sibling -> None >> Points to the adjacent sibling on the right of this element
# child -> None >> Stores a pointer to the left-most child of this node
class Node:
def get_parent(self):
return self.parent
def set_parent(self, parent):
self.parent = parent
def get_key(self):
return self.key
def set_key(self, key):
self.key = key
def get_degree(self):
return self.degree
def set_degree(self, degree):
self.degree = degree
def get_sibling(self):
return self.sibling
def set_sibling(self, sibling):
self.sibling = sibling
def get_child(self):
return self.child
def set_child(self, child):
self.child = child
def get_minimum(self):
y = self
x = self
min = x.get_key()
while x is not None:
if x.get_key() < min:
min = x.get_key()
y = x
x = x.get_sibling()
return y
def get_node(self, key):
temp = self
node = None
while temp is not None:
if temp.get_key() == key:
node = temp
break
if temp.get_child() is None:
temp = temp.get_sibling()
else:
node = temp.get_child().get_node(key)
if node is None:
temp = temp.get_sibling()
else:
break
return node
def reverse(self, s):
rev = None
if self.get_sibling() is not None:
rev = self.get_sibling().reverse(self)
else:
rev = self
self.set_sibling(s)
return rev
def walk(self, depth):
result = ""
for i in range(0, self.get_degree()):
result += " "
if self.get_parent():
result += ("key = " + str(self.get_key()) + ", parent = " + str(self.get_parent().get_key()) + "\n")
else:
result += ("key = " + str(self.get_key()) + ", root\n")
x = self.get_child()
while x is not None:
result += x.walk(depth + 1)
x = x.sibling
return result
def search_key(self, key):
temp = self
node = None
while temp is not None:
if temp.get_key() == key:
node = temp
break
if temp.get_child() is None:
temp = temp.get_sibling()
else:
node = temp.get_child().search_key(key)
if node is None:
temp = temp.sibling
else:
break
return node
def __init__(self, key):
self.key = key
self.parent = None
self.degree = 0
self.sibling = None
self.child = None
# CLASS: BinomialHeap
# DESCRIPTION: BinomialHeap class stores the skeleton of binomial heap structure. It is initialized by specifying the
# root element of the binary heap structure as the default hollow element. Since the root is not null, we need to update
# the key and pointers stored in this element to point to the correct values.
class BinomialHeap(object):
def __init__(self):
self.head_node = None # Initialize with head element which is None
# FUNCTION SIGNATURE: get_head()
# DESCRIPTION: This function fetches the head of this binomial heap and returns it to the caller
# ARGUMENTS: -NA-
# RETURNS: A Node object which is the head of this binomial heap
def get_head(self):
return self.head_node
def set_head(self, newHead):
self.head_node = newHead
# FUNCTION SIGNATURE: binomial_heap_minimum()
# DESCRIPTION: Since a binomial heap is min-heap-ordered, the minimum key must reside in a root node. This
# procedure checks all roots, which number at most [lg n] + 1, saving the current minimum in minNodeKey and a
# pointer to the current minimum in minNode.
# ARGUMENTS: -NA-
# RETURNS: A Node object which stores the minimum value within the binomial heap
def binomial_heap_minimum(self):
minNode = self.get_head()
minNodeKey = 9999
if minNode is not None:
currNode = minNode.get_sibling()
while currNode is not None:
nodeKey = currNode.get_key()
if nodeKey < minNodeKey:
minNode = currNode
minNodeKey = nodeKey
currNode = currNode.get_sibling()
return minNode
# FUNCTION SIGNATURE: binomial_link(Node, Node)
# DESCRIPTION: This function makes the Node y parent of Node z
# ARGUMENTS:
# y - A Node object whose parent is to be altered
# z - A Node object which acts as the new parent for Node y
# RETURNS: True iff the procedure is completed successfully
def binomial_link(self, y, z):
y.set_parent(z)
y.set_sibling(z.get_child())
z.set_child(y)
z.set_degree(z.get_degree() + 1)
return y, z
# FUNCTION SIGNATURE: binomial_heap_merge(BinomialHeap)
# DESCRIPTION: This function merges the heap passed in as argument with this instance of binomial heap
# ARGUMENTS:
# node2 - A Node object which is to be merged with this heap
# RETURNS: The new head Node for the merged binomial heaps
def binomial_heap_merge(self, node2):
node1 = self.get_head()
while node1 is not None and node2 is not None:
if node1.get_degree() == node2.get_degree():
tempNode = node2
node2 = node2.get_sibling()
tempNode.set_sibling(node1.get_sibling())
node1.set_sibling(tempNode)
node1 = tempNode.get_sibling()
else:
if node1.get_degree() < node2.get_degree():
if node1.get_sibling() is None or node1.get_sibling().get_degree() > node2.get_degree():
tempNode = node2
node2 = node2.get_sibling()
tempNode.set_sibling(node1.get_sibling())
node1.set_sibling(tempNode)
node1 = tempNode.get_sibling()
else:
node1 = node1.get_sibling()
else:
tempNode = node1
node1 = node2
node2 = node2.get_sibling()
node1.set_sibling(tempNode)
if tempNode == self.get_head():
self.set_head(node1)
if node1 is None:
node1 = self.get_head()
while node1.get_sibling() is not None:
node1 = node1.get_sibling()
node1.set_sibling(node2)
# FUNCTION SIGNATURE: binomial_heap_union(Node)
# DESCRIPTION: The following functions unites the node passed in as arguments and returns the resulting heap
# ARGUMENTS:
# h2 - A BinomialHeap object to be merged with this instance of binomial heap
# RETURNS: A BinomialHeap object which is a union of the two objects passed in as arguments
def binomial_heap_union(self, node2):
self.binomial_heap_merge(node2)
prevTemp = None
temp = self.get_head()
nextTemp = self.get_head().get_sibling()
while nextTemp is not None:
if (temp.get_degree() != nextTemp.get_degree()) or (nextTemp.get_sibling() is not None and nextTemp.get_sibling().get_degree() == temp.get_degree()):
prevTemp = temp
temp = nextTemp
else:
if temp.get_key() <= nextTemp.get_key():
temp.set_sibling(nextTemp.get_sibling())
nextTemp.set_parent(temp)
nextTemp.set_sibling(temp.get_child())
temp.set_child(nextTemp)
temp.set_degree(temp.get_degree() + 1)
else:
if prevTemp is None:
self.set_head(nextTemp)
else:
prevTemp.set_sibling(nextTemp)
temp.set_parent(nextTemp)
temp.set_sibling(nextTemp.get_child())
nextTemp.set_child(temp)
nextTemp.set_degree(nextTemp.get_degree() + 1)
temp = nextTemp
nextTemp = temp.get_sibling()
# FUNCTION SIGNATURE: binomial_heap_extract_min()
# DESCRIPTION: This function finds the minimum value from the binomial heap and removes the its Node from the heap
# ARGUMENTS: -NA-
# RETURNS: A Node object which stores the minimum value in the Heap
def binomial_heap_extract_min(self):
temp = self.get_head()
prevTemp = None
minNode = self.get_head().get_minimum()
while temp.get_key() != minNode.get_key():
prevTemp = temp
temp = temp.get_sibling()
if prevTemp is None:
self.set_head(temp.get_sibling())
else:
prevTemp.set_sibling(temp.get_sibling())
temp = temp.get_child()
fakeNode = temp
while temp is not None:
temp.set_parent(None)
temp = temp.get_sibling()
if not(self.get_head() is None and fakeNode is None):
if self.get_head() is None and fakeNode is not None:
self.set_head(fakeNode.reverse(None))
else:
if not(Node is not None and fakeNode is None):
self.binomial_heap_union(fakeNode.reverse(None))
return minNode.get_key()
# FUNCTION SIGNATURE: binomial_heap_insert(Node)
# DESCRIPTION: The following function inserts a new node into the binomial heap by creating a one node binomial
# heap and merging it with this instance of binomial heap
# ARGUMENTS:
# x - A Node object which needs to be inserted into the binomial heap
# RETURNS: True iff the procedure is completed successfully
def binomial_heap_insert(self, x):
if self.get_head() is None:
self.set_head(x)
return self
else:
return self.binomial_heap_union(x)
# FUNCTION SIGNATURE: binomial_heap_extract_min()
# DESCRIPTION: This function decreases the node x in this instance of binomial heap to a new value k
# ARGUMENTS:
# x - The Node whose value is to be updated
# k - new value for the node
# RETURNS: true iff the value is updated, false iff the value of k is greater than the current key in Node x
def binomial_heap_decrease_key(self, val, k):
temp = self.get_head().get_node(val)
if temp is None:
return False
temp.set_key(k)
tempParent = temp.get_parent()
while tempParent is not None and temp.get_key() < temp.get_parent().get_key():
z = temp.get_key()
temp.set_key(tempParent.get_key())
tempParent.set_key(z)
temp = tempParent
tempParent = tempParent.get_parent()
# FUNCTION SIGNATURE: binomial_heap_delete(Node)
# DESCRIPTION: This function deletes a Node object from this instance os the binomial heap by decreasing the key
# stored in x to -9999 and then extracting this Node from the heap
# ARGUMENTS:
# x - The Node object to be deleted from the heap
# RETURNS: True iff the Node is deleted from the heap
def binomial_heap_delete(self, x):
self.binomial_heap_decrease_key(x, -9999)
self.binomial_heap_extract_min()
return True
# FUNCTION SIGNATURE: binomial_heap_walk()
# DESCRIPTION: This function prints the current structure of the binomial heap
# ARGUMENTS: -NA-
# RETURNS: -NA-
def binomial_heap_walk(self):
result = ""
x = self.get_head()
while x is not None:
result += x.walk(0)
x = x.get_sibling()
print result
def binomial_node_search(self, key):
currNode = self.get_head()
if currNode.get_key() == key:
return currNode
else:
return currNode.search_key(key)
# FUNCTION SIGNATURE: make_binomial_heap()
# DESCRIPTION: This function creates a new Binary Heap object and returns the object and its head node to the caller.
# ARGUMENTS: -NA-
# RETURNS: A BinaryHeap object and a Node object which points to the head of this Binary Heap
def make_binomial_heap():
heapObj = BinomialHeap()
return heapObj
# FUNCTION SIGNATURE: main()
# DESCRIPTION: This function acts as the starting point for this program. All the initial declarations and program
# logic go in here
# ARGUMENTS: -NA-
# RETURNS: -NA-
def main():
# Creating a test heap
heap1 = make_binomial_heap()
# Insert procedures for heap1
heap1.binomial_heap_insert(Node(37))
#print "Initializing Heap 1:"
#heap1.binomial_heap_walk()
heap1.binomial_heap_insert(Node(41))
#print "Initializing Heap 2:"
#heap1.binomial_heap_walk()
heap1.binomial_heap_insert(Node(12))
#print "Initializing Heap 3:"
#heap1.binomial_heap_walk()
heap1.binomial_heap_insert(Node(55))
print "Initializing Heap 1:"
heap1.binomial_heap_walk()
heap2 = make_binomial_heap()
heap2.binomial_heap_insert(Node(12))
heap2.binomial_heap_insert(Node(36))
heap2.binomial_heap_insert(Node(1))
heap2.binomial_heap_insert(Node(10))
heap2.binomial_heap_insert(Node(22))
print "Initializing Heap 2:"
heap2.binomial_heap_walk()
heap1.binomial_heap_union(heap2.get_head())
print "Union of heap 1 and heap 3:"
heap1.binomial_heap_walk()
print "Extracting min: " + str(heap1.binomial_heap_extract_min())
print "Heap after extraction:"
print heap1.binomial_heap_walk()
print "Decreasing 55 to 7"
print heap1.binomial_heap_decrease_key(55, 7)
print heap1.binomial_heap_walk()
print "Deleting 36"
heap1.binomial_heap_delete(36)
print heap1.binomial_heap_walk()
if __name__ == "__main__":
main()
| |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import itertools
import logging
import math
import operator
import os
import queue
import time
from threading import Thread
import numpy as np
import torch
from fairseq.data import data_utils
logger = logging.getLogger(__name__)
# Object used by _background_consumer to signal the source is exhausted
# to the main thread.
_sentinel = object()
class CountingIterator(object):
"""Wrapper around an iterable that maintains the iteration count.
Args:
iterable (iterable): iterable to wrap
start (int): starting iteration count. Note that this doesn't
actually advance the iterator.
total (int): override the iterator length returned by ``__len``.
This can be used to truncate *iterator*.
Attributes:
n (int): number of elements consumed from this iterator
"""
def __init__(self, iterable, start=None, total=None):
self._itr = iter(iterable)
self.n = start or getattr(iterable, "n", 0)
self.total = total if total is not None else self.n + len(iterable)
def __len__(self):
return self.total
def __iter__(self):
return self
def __next__(self):
if not self.has_next():
raise StopIteration
try:
x = next(self._itr)
except StopIteration:
raise IndexError(
f"Iterator expected to have length {self.total}, "
"but exhausted at position {self.n}."
)
self.n += 1
return x
def has_next(self):
"""Whether the iterator has been exhausted."""
return self.n < self.total
def skip(self, n):
"""Fast-forward the iterator by skipping n elements."""
for _ in range(n):
next(self)
return self
def take(self, n):
"""Truncate the iterator to n elements at most."""
self.total = min(self.total, n)
# Propagate this change to the underlying iterator
if hasattr(self._itr, "take"):
self._itr.take(max(n - self.n, 0))
return self
class EpochBatchIterating(object):
def __len__(self) -> int:
raise NotImplementedError
@property
def next_epoch_idx(self):
raise NotImplementedError
def next_epoch_itr(
self, shuffle=True, fix_batches_to_gpus=False, set_dataset_epoch=True
):
"""Return a new iterator over the dataset.
Args:
shuffle (bool, optional): shuffle batches before returning the
iterator (default: True).
fix_batches_to_gpus (bool, optional): ensure that batches are always
allocated to the same shards across epochs. Requires
that :attr:`dataset` supports prefetching (default: False).
set_dataset_epoch (bool, optional): update the wrapped Dataset with
the new epoch number (default: True).
"""
raise NotImplementedError
def end_of_epoch(self) -> bool:
"""Returns whether the most recent epoch iterator has been exhausted"""
raise NotImplementedError
@property
def iterations_in_epoch(self) -> int:
"""The number of consumed batches in the current epoch."""
raise NotImplementedError
def state_dict(self):
"""Returns a dictionary containing a whole state of the iterator."""
raise NotImplementedError
def load_state_dict(self, state_dict):
"""Copies the state of the iterator from the given *state_dict*."""
raise NotImplementedError
@property
def first_batch(self):
return "DUMMY"
class StreamingEpochBatchIterator(EpochBatchIterating):
"""A steaming-style iterator over a :class:`torch.utils.data.IterableDataset`.
Args:
dataset (~torch.utils.data.Dataset): dataset from which to load the data
max_sentences: batch size
collate_fn (callable): merges a list of samples to form a mini-batch
num_workers (int, optional): how many subprocesses to use for data
loading. 0 means the data will be loaded in the main process
(default: 0).
epoch (int, optional): the epoch to start the iterator from
(default: 1).
buffer_size (int, optional): the number of batches to keep ready in the
queue. Helps speeding up dataloading. When buffer_size is zero, the
default torch.utils.data.DataLoader preloading is used.
timeout (int, optional): if positive, the timeout value for collecting a batch
from workers. Should always be non-negative (default: ``0``).
"""
def __init__(
self,
dataset,
max_sentences=1,
collate_fn=None,
epoch=1,
num_workers=0,
buffer_size=0,
timeout=0,
):
assert isinstance(dataset, torch.utils.data.IterableDataset)
self.dataset = dataset
self.max_sentences = max_sentences
self.collate_fn = collate_fn
self.epoch = max(epoch, 1) # we use 1-based indexing for epochs
self.num_workers = num_workers
# This upper limit here is to prevent people from abusing this feature
# in a shared computing environment.
self.buffer_size = min(buffer_size, 20)
self.timeout = timeout
self._current_epoch_iterator = None
@property
def next_epoch_idx(self):
"""Return the epoch index after *next_epoch_itr* is called."""
if self._current_epoch_iterator is not None and self.end_of_epoch():
return self.epoch + 1
else:
return self.epoch
def next_epoch_itr(
self, shuffle=True, fix_batches_to_gpus=False, set_dataset_epoch=True
):
self.epoch = self.next_epoch_idx
if set_dataset_epoch and hasattr(self.dataset, "set_epoch"):
self.dataset.set_epoch(self.epoch)
self._current_epoch_iterator = self._get_iterator_for_epoch(self.epoch, shuffle)
return self._current_epoch_iterator
def end_of_epoch(self) -> bool:
return not self._current_epoch_iterator.has_next()
@property
def iterations_in_epoch(self) -> int:
if self._current_epoch_iterator is not None:
return self._current_epoch_iterator.n
return 0
def state_dict(self):
return {
"epoch": self.epoch,
}
def load_state_dict(self, state_dict):
self.epoch = state_dict["epoch"]
def _get_iterator_for_epoch(self, epoch, shuffle, offset=0):
if self.num_workers > 0:
os.environ["PYTHONWARNINGS"] = "ignore:semaphore_tracker:UserWarning"
# Create data loader
worker_init_fn = getattr(self.dataset, "worker_init_fn", None)
itr = torch.utils.data.DataLoader(
self.dataset,
batch_size=self.max_sentences,
collate_fn=self.collate_fn,
num_workers=self.num_workers,
timeout=self.timeout,
worker_init_fn=worker_init_fn,
pin_memory=True,
)
# Wrap with a BufferedIterator if needed
if self.buffer_size > 0:
itr = BufferedIterator(self.buffer_size, itr)
# Wrap with CountingIterator
itr = CountingIterator(itr, start=offset)
return itr
class EpochBatchIterator(EpochBatchIterating):
"""A multi-epoch iterator over a :class:`torch.utils.data.Dataset`.
Compared to :class:`torch.utils.data.DataLoader`, this iterator:
- can be reused across multiple epochs with the :func:`next_epoch_itr`
method (optionally shuffled between epochs)
- can be serialized/deserialized with the :func:`state_dict` and
:func:`load_state_dict` methods
- supports sharding with the *num_shards* and *shard_id* arguments
Args:
dataset (~torch.utils.data.Dataset): dataset from which to load the data
collate_fn (callable): merges a list of samples to form a mini-batch
batch_sampler (~torch.utils.data.Sampler or a callable): an iterator over batches of
indices, or a callable to create such an iterator (~torch.utils.data.Sampler).
A callable batch_sampler will be called for each epoch to enable per epoch dynamic
batch iterators defined by this callable batch_sampler.
seed (int, optional): seed for random number generator for
reproducibility (default: 1).
num_shards (int, optional): shard the data iterator into N
shards (default: 1).
shard_id (int, optional): which shard of the data iterator to
return (default: 0).
num_workers (int, optional): how many subprocesses to use for data
loading. 0 means the data will be loaded in the main process
(default: 0).
epoch (int, optional): the epoch to start the iterator from
(default: 1).
buffer_size (int, optional): the number of batches to keep ready in the
queue. Helps speeding up dataloading. When buffer_size is zero, the
default torch.utils.data.DataLoader preloading is used.
timeout (int, optional): if positive, the timeout value for collecting a batch
from workers. Should always be non-negative (default: ``0``).
disable_shuffling (bool, optional): force disable shuffling
(default: ``False``).
skip_remainder_batch (bool, optional): if set, discard the last batch in an epoch
for the sake of training stability, as the last batch is usually smaller than
local_batch_size * distributed_word_size (default: ``False``).
grouped_shuffling (bool, optional): enable shuffling batches in groups
of num_shards. Ensures that each GPU receives similar length sequences when
batches are sorted by length.
"""
def __init__(
self,
dataset,
collate_fn,
batch_sampler,
seed=1,
num_shards=1,
shard_id=0,
num_workers=0,
epoch=1,
buffer_size=0,
timeout=0,
disable_shuffling=False,
skip_remainder_batch=False,
grouped_shuffling=False,
):
assert isinstance(dataset, torch.utils.data.Dataset)
self.dataset = dataset
self.collate_fn = collate_fn
self.batch_sampler = batch_sampler
self._frozen_batches = (
tuple(batch_sampler) if not callable(batch_sampler) else None
)
self.seed = seed
self.num_shards = num_shards
self.shard_id = shard_id
self.num_workers = num_workers
# This upper limit here is to prevent people from abusing this feature
# in a shared computing environment.
self.buffer_size = min(buffer_size, 20)
self.timeout = timeout
self.disable_shuffling = disable_shuffling
self.skip_remainder_batch = skip_remainder_batch
self.grouped_shuffling = grouped_shuffling
self.epoch = max(epoch, 1) # we use 1-based indexing for epochs
self.shuffle = not disable_shuffling
self._cur_epoch_itr = None
self._next_epoch_itr = None
self._supports_prefetch = getattr(dataset, "supports_prefetch", False)
@property
def frozen_batches(self):
if self._frozen_batches is None:
self._frozen_batches = tuple(self.batch_sampler(self.dataset, self.epoch))
return self._frozen_batches
@property
def first_batch(self):
if len(self.frozen_batches) == 0:
raise Exception(
"The dataset is empty. This could indicate "
"that all elements in the dataset have been skipped. "
"Try increasing the max number of allowed tokens or using "
"a larger dataset."
)
if getattr(self.dataset, "supports_fetch_outside_dataloader", True):
return self.collate_fn([self.dataset[i] for i in self.frozen_batches[0]])
else:
return "DUMMY"
def __len__(self):
return int(math.ceil(len(self.frozen_batches) / float(self.num_shards)))
@property
def n(self):
return self.iterations_in_epoch
@property
def next_epoch_idx(self):
"""Return the epoch index after *next_epoch_itr* is called."""
if self._next_epoch_itr is not None:
return self.epoch
elif self._cur_epoch_itr is not None and self.end_of_epoch():
return self.epoch + 1
else:
return self.epoch
def next_epoch_itr(
self, shuffle=True, fix_batches_to_gpus=False, set_dataset_epoch=True
):
"""Return a new iterator over the dataset.
Args:
shuffle (bool, optional): shuffle batches before returning the
iterator (default: True).
fix_batches_to_gpus (bool, optional): ensure that batches are always
allocated to the same shards across epochs. Requires
that :attr:`dataset` supports prefetching (default: False).
set_dataset_epoch (bool, optional): update the wrapped Dataset with
the new epoch number (default: True).
"""
if self.disable_shuffling:
shuffle = False
prev_epoch = self.epoch
self.epoch = self.next_epoch_idx
if set_dataset_epoch and hasattr(self.dataset, "set_epoch"):
self.dataset.set_epoch(self.epoch)
if self._next_epoch_itr is not None:
self._cur_epoch_itr = self._next_epoch_itr
self._next_epoch_itr = None
else:
if callable(self.batch_sampler) and prev_epoch != self.epoch:
# reset _frozen_batches to refresh the next epoch
self._frozen_batches = None
self._cur_epoch_itr = self._get_iterator_for_epoch(
self.epoch,
shuffle,
fix_batches_to_gpus=fix_batches_to_gpus,
)
self.shuffle = shuffle
return self._cur_epoch_itr
def end_of_epoch(self) -> bool:
"""Returns whether the most recent epoch iterator has been exhausted"""
return not self._cur_epoch_itr.has_next()
@property
def iterations_in_epoch(self):
"""The number of consumed batches in the current epoch."""
if self._cur_epoch_itr is not None:
return self._cur_epoch_itr.n
elif self._next_epoch_itr is not None:
return self._next_epoch_itr.n
return 0
def state_dict(self):
"""Returns a dictionary containing a whole state of the iterator."""
if self.end_of_epoch():
epoch = self.epoch + 1
iter_in_epoch = 0
else:
epoch = self.epoch
iter_in_epoch = self.iterations_in_epoch
return {
"version": 2,
"epoch": epoch,
"iterations_in_epoch": iter_in_epoch,
"shuffle": self.shuffle,
}
def load_state_dict(self, state_dict):
"""Copies the state of the iterator from the given *state_dict*."""
self.epoch = state_dict["epoch"]
itr_pos = state_dict.get("iterations_in_epoch", 0)
version = state_dict.get("version", 1)
if itr_pos > 0:
# fast-forward epoch iterator
self._next_epoch_itr = self._get_iterator_for_epoch(
self.epoch,
shuffle=state_dict.get("shuffle", True),
offset=itr_pos,
)
if self._next_epoch_itr is None:
if version == 1:
# legacy behavior: we finished the epoch, increment epoch counter
self.epoch += 1
else:
raise RuntimeError(
"Cannot resume training due to dataloader mismatch, please "
"report this to the fairseq developers. You can relaunch "
"training with `--reset-dataloader` and it should work."
)
else:
self._next_epoch_itr = None
def _get_iterator_for_epoch(
self, epoch, shuffle, fix_batches_to_gpus=False, offset=0
):
def shuffle_batches(batches, seed):
with data_utils.numpy_seed(seed):
if self.grouped_shuffling:
grouped_batches = [
batches[(i * self.num_shards) : ((i + 1) * self.num_shards)]
for i in range((len(batches) // self.num_shards))
]
np.random.shuffle(grouped_batches)
batches = list(itertools.chain(*grouped_batches))
else:
np.random.shuffle(batches)
return batches
if self._supports_prefetch:
batches = self.frozen_batches
if shuffle and not fix_batches_to_gpus:
batches = shuffle_batches(list(batches), self.seed + epoch)
batches = list(
ShardedIterator(batches, self.num_shards, self.shard_id, fill_value=[])
)
self.dataset.prefetch([i for s in batches for i in s])
if shuffle and fix_batches_to_gpus:
batches = shuffle_batches(batches, self.seed + epoch + self.shard_id)
else:
if shuffle:
batches = shuffle_batches(list(self.frozen_batches), self.seed + epoch)
else:
batches = self.frozen_batches
batches = list(
ShardedIterator(batches, self.num_shards, self.shard_id, fill_value=[])
)
if offset > 0 and offset >= len(batches):
return None
if self.num_workers > 0:
os.environ["PYTHONWARNINGS"] = "ignore:semaphore_tracker:UserWarning"
# Create data loader
itr = torch.utils.data.DataLoader(
self.dataset,
collate_fn=self.collate_fn,
batch_sampler=batches[offset:],
num_workers=self.num_workers,
timeout=self.timeout,
pin_memory=True,
)
# Wrap with a BufferedIterator if needed
if self.buffer_size > 0:
itr = BufferedIterator(self.buffer_size, itr)
# Wrap with CountingIterator
itr = CountingIterator(itr, start=offset)
if self.skip_remainder_batch:
# TODO: Below is a lazy implementation which discard the final batch regardless
# of whether it is a full batch or not.
total_num_itrs = len(batches) - 1
itr.take(total_num_itrs)
logger.info(f"skip final residual batch, total_num_itrs = {total_num_itrs}")
return itr
class GroupedIterator(CountingIterator):
"""Wrapper around an iterable that returns groups (chunks) of items.
Args:
iterable (iterable): iterable to wrap
chunk_size (int): size of each chunk
skip_remainder_batch (bool, optional): if set, discard the last grouped batch in
each training epoch, as the last grouped batch is usually smaller than
local_batch_size * distributed_word_size * chunk_size (default: ``False``).
Attributes:
n (int): number of elements consumed from this iterator
"""
def __init__(self, iterable, chunk_size, skip_remainder_batch=False):
if skip_remainder_batch:
total_num_itrs = int(math.floor(len(iterable) / float(chunk_size)))
logger.info(
f"skip final residual batch, grouped total_num_itrs = {total_num_itrs}"
)
else:
total_num_itrs = int(math.ceil(len(iterable) / float(chunk_size)))
logger.info(f"grouped total_num_itrs = {total_num_itrs}")
itr = _chunk_iterator(iterable, chunk_size, skip_remainder_batch)
super().__init__(
itr,
start=int(math.ceil(getattr(iterable, "n", 0) / float(chunk_size))),
total=total_num_itrs,
)
self.chunk_size = chunk_size
if skip_remainder_batch:
self.take(total_num_itrs)
# TODO: [Hack] Here the grouped iterator modifies the base iterator size so that
# training can move into the next epoch once the grouped iterator is exhausted.
# Double-check this implementation in case unexpected behavior occurs.
iterable.take(total_num_itrs * chunk_size)
def _chunk_iterator(itr, chunk_size, skip_remainder_batch=False):
chunk = []
for x in itr:
chunk.append(x)
if len(chunk) == chunk_size:
yield chunk
chunk = []
if not skip_remainder_batch and len(chunk) > 0:
yield chunk
class ShardedIterator(CountingIterator):
"""A sharded wrapper around an iterable, padded to length.
Args:
iterable (iterable): iterable to wrap
num_shards (int): number of shards to split the iterable into
shard_id (int): which shard to iterator over
fill_value (Any, optional): padding value when the iterable doesn't
evenly divide *num_shards* (default: None).
Attributes:
n (int): number of elements consumed from this iterator
"""
def __init__(
self, iterable, num_shards, shard_id, fill_value=None, skip_remainder_batch=None
):
"""
Args:
skip_remainder_batch: ignored"""
if shard_id < 0 or shard_id >= num_shards:
raise ValueError("shard_id must be between 0 and num_shards")
sharded_len = int(math.ceil(len(iterable) / float(num_shards)))
itr = map(
operator.itemgetter(1),
itertools.zip_longest(
range(sharded_len),
itertools.islice(iterable, shard_id, len(iterable), num_shards),
fillvalue=fill_value,
),
)
super().__init__(
itr,
start=int(math.ceil(getattr(iterable, "n", 0) / float(num_shards))),
total=sharded_len,
)
class BackgroundConsumer(Thread):
def __init__(self, queue, source, max_len, cuda_device):
Thread.__init__(self)
self._queue = queue
self._source = source
self._max_len = max_len
self.count = 0
self.cuda_device = cuda_device
def run(self):
# set_device to avoid creation of GPU0 context when using pin_memory
if self.cuda_device is not None:
torch.cuda.set_device(self.cuda_device)
try:
for item in self._source:
self._queue.put(item)
# Stop if we reached the maximum length
self.count += 1
if self._max_len is not None and self.count >= self._max_len:
break
# Signal the consumer we are done.
self._queue.put(_sentinel)
except Exception as e:
self._queue.put(e)
class BufferedIterator(object):
def __init__(self, size, iterable):
self._queue = queue.Queue(size)
self._iterable = iterable
self._consumer = None
self.start_time = time.time()
self.warning_time = None
self.total = len(iterable)
def _create_consumer(self):
self._consumer = BackgroundConsumer(
self._queue,
self._iterable,
self.total,
torch.cuda.current_device() if torch.cuda.is_available() else None,
)
self._consumer.daemon = True
self._consumer.start()
def __iter__(self):
return self
def __len__(self):
return self.total
def take(self, n):
self.total = min(self.total, n)
# Propagate this change to the underlying iterator
if hasattr(self._iterable, "take"):
self._iterable.take(n)
return self
def __next__(self):
# Create consumer if not created yet
if self._consumer is None:
self._create_consumer()
# Notify the user if there is a data loading bottleneck
if self._queue.qsize() < min(2, max(1, self._queue.maxsize // 2)):
if time.time() - self.start_time > 5 * 60:
if (
self.warning_time is None
or time.time() - self.warning_time > 15 * 60
):
logger.debug(
"Data loading buffer is empty or nearly empty. This may "
"indicate a data loading bottleneck, and increasing the "
"number of workers (--num-workers) may help."
)
self.warning_time = time.time()
# Get next example
item = self._queue.get(True)
if isinstance(item, Exception):
raise item
if item is _sentinel:
raise StopIteration()
return item
class GroupedEpochBatchIterator(EpochBatchIterator):
"""Grouped version of EpochBatchIterator
It takes several samplers from different datasets.
Each epoch shuffle the dataset wise sampler individually with different
random seed. The those sub samplers are combined with into
one big samplers with deterministic permutation to mix batches from
different datasets. It will act like EpochBatchIterator but make sure
1) data from one data set each time
2) for different workers, they use the same order to fetch the data
so they will use data from the same dataset everytime
mult_rate is used for update_freq > 1 case where we want to make sure update_freq
mini-batches come from same source
"""
def __init__(
self,
dataset,
collate_fn,
batch_samplers,
seed=1,
num_shards=1,
shard_id=0,
num_workers=0,
epoch=0,
mult_rate=1,
buffer_size=0,
skip_remainder_batch=False,
):
super().__init__(
dataset,
collate_fn,
batch_samplers,
seed,
num_shards,
shard_id,
num_workers,
epoch,
buffer_size,
skip_remainder_batch=skip_remainder_batch,
)
# level 0: sub-samplers 1: batch_idx 2: batches
self._frozen_batches = tuple([tuple(sub_batch) for sub_batch in batch_samplers])
self.step_size = mult_rate * num_shards
self.lengths = [
(len(x) // self.step_size) * self.step_size for x in self.frozen_batches
]
def __len__(self):
return sum(self.lengths)
@property
def first_batch(self):
if len(self.frozen_batches) == 0:
raise Exception(
"The dataset is empty. This could indicate "
"that all elements in the dataset have been skipped. "
"Try increasing the max number of allowed tokens or using "
"a larger dataset."
)
if self.dataset.supports_fetch_outside_dataloader:
return self.collate_fn([self.dataset[i] for i in self.frozen_batches[0][0]])
else:
return "DUMMY"
def _get_iterator_for_epoch(
self, epoch, shuffle, fix_batches_to_gpus=False, offset=0
):
def shuffle_batches(batches, seed):
with data_utils.numpy_seed(seed):
np.random.shuffle(batches)
return batches
def return_full_batches(batch_sets, seed, shuffle):
if shuffle:
batch_sets = [shuffle_batches(list(x), seed) for x in batch_sets]
batch_sets = [
batch_sets[i][: self.lengths[i]] for i in range(len(batch_sets))
]
batches = list(itertools.chain.from_iterable(batch_sets))
if shuffle:
with data_utils.numpy_seed(seed):
idx = np.random.permutation(len(batches) // self.step_size)
if len(idx) * self.step_size != len(batches):
raise ValueError(
"ERROR: %d %d %d %d"
% (len(idx), self.step_size, len(batches), self.shard_id),
":".join(["%d" % x for x in self.lengths]),
)
mini_shards = [
batches[i * self.step_size : (i + 1) * self.step_size]
for i in idx
]
batches = list(itertools.chain.from_iterable(mini_shards))
return batches
if self._supports_prefetch:
raise NotImplementedError("To be implemented")
else:
batches = return_full_batches(
self.frozen_batches, self.seed + epoch, shuffle
)
batches = list(
ShardedIterator(batches, self.num_shards, self.shard_id, fill_value=[])
)
if offset > 0 and offset >= len(batches):
return None
if self.num_workers > 0:
os.environ["PYTHONWARNINGS"] = "ignore:semaphore_tracker:UserWarning"
itr = torch.utils.data.DataLoader(
self.dataset,
collate_fn=self.collate_fn,
batch_sampler=batches[offset:],
num_workers=self.num_workers,
)
if self.buffer_size > 0:
itr = BufferedIterator(self.buffer_size, itr)
return CountingIterator(itr, start=offset)
| |
#!python
# ***** BEGIN LICENSE BLOCK *****
# Version: MPL 1.1/GPL 2.0/LGPL 2.1
#
# The contents of this file are subject to the Mozilla Public License
# Version 1.1 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
# http://www.mozilla.org/MPL/
#
# Software distributed under the License is distributed on an "AS IS"
# basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
# License for the specific language governing rights and limitations
# under the License.
#
# The Original Code is Komodo code.
#
# The Initial Developer of the Original Code is ActiveState Software Inc.
# Portions created by ActiveState Software Inc are Copyright (C) 2000-2007
# ActiveState Software Inc. All Rights Reserved.
#
# Contributor(s):
# ActiveState Software Inc
#
# Alternatively, the contents of this file may be used under the terms of
# either the GNU General Public License Version 2 or later (the "GPL"), or
# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
# in which case the provisions of the GPL or the LGPL are applicable instead
# of those above. If you wish to allow use of your version of this file only
# under the terms of either the GPL or the LGPL, and not to allow others to
# use your version of this file under the terms of the MPL, indicate your
# decision by deleting the provisions above and replace them with the notice
# and other provisions required by the GPL or the LGPL. If you do not delete
# the provisions above, a recipient may use your version of this file under
# the terms of any one of the MPL, the GPL or the LGPL.
#
# ***** END LICENSE BLOCK *****
"""The stdlib zone of the codeintel database.
See the database/database.py module docstring for an overview.
"""
import sys
import os
from os.path import (join, dirname, exists, expanduser, splitext, basename,
split, abspath, isabs, isdir, isfile)
import pickle as pickle
import threading
import time
import bisect
import fnmatch
from glob import glob
from pprint import pprint, pformat
import logging
from io import StringIO
import codecs
import copy
import weakref
import queue
import ciElementTree as ET
from codeintel2.common import *
from codeintel2.buffer import Buffer
from codeintel2.util import dedent, safe_lang_from_lang, banner
from codeintel2.tree import tree_from_cix_path
from codeintel2.database.resource import AreaResource
from codeintel2.database.util import (rmdir, filter_blobnames_for_prefix)
#---- globals
log = logging.getLogger("codeintel.db")
# log.setLevel(logging.DEBUG)
#---- Database zone and lib implementations
class StdLib(object):
"""Singleton lib managing a particular db/stdlibs/<stdlib-name>
area of the db.
These are dished out via Database.get_stdlib(), which indirectly
then is dished out by the StdLibsZone.get_lib().
Because (1) any updating of the stdlib db area for this language has
already been done (by StdLibsZone.get_lib()) and (2) this is a
singleton: we shouldn't have to worry about locking.
"""
_blob_index = None
_toplevelname_index = None
_toplevelprefix_index = None
def __init__(self, db, base_dir, lang, name):
self.db = db
self.lang = lang
self.name = name
self.base_dir = base_dir
self._import_handler = None
self._blob_imports_from_prefix_cache = {}
self._blob_from_blobname = {}
def __repr__(self):
return "<%s stdlib>" % self.name
@property
def import_handler(self):
if self._import_handler is None:
self._import_handler \
= self.db.mgr.citadel.import_handler_from_lang(self.lang)
return self._import_handler
@property
def blob_index(self):
if self._blob_index is None:
idxpath = join(self.base_dir, "blob_index")
self._blob_index = self.db.load_pickle(idxpath)
return self._blob_index
@property
def toplevelname_index(self):
if self._toplevelname_index is None:
idxpath = join(self.base_dir, "toplevelname_index")
self._toplevelname_index = self.db.load_pickle(idxpath)
return self._toplevelname_index
@property
def toplevelprefix_index(self):
if self._toplevelprefix_index is None:
idxpath = join(self.base_dir, "toplevelprefix_index")
self._toplevelprefix_index = self.db.load_pickle(idxpath)
return self._toplevelprefix_index
def has_blob(self, blobname):
return blobname in self.blob_index
def get_blob(self, blobname):
# Cache the blob once. Don't need to worry about invalidating the stdlib
# blobs as stdlibs should not change during a Komodo session, bug
# 65502.
blob = self._blob_from_blobname.get(blobname)
if blob is None:
try:
dbfile = self.blob_index[blobname]
except KeyError:
return None
blob = self.db.load_blob(join(self.base_dir, dbfile))
self._blob_from_blobname[blobname] = blob
return blob
def get_blob_imports(self, prefix):
"""Return the set of imports under the given prefix.
"prefix" is a tuple of import name parts. E.g. ("xml", "sax")
for "import xml.sax." in Python. Or ("XML", "Parser") for
"use XML::Parser::" in Perl.
See description in database.py docstring for details.
"""
if prefix not in self._blob_imports_from_prefix_cache:
matches = filter_blobnames_for_prefix(self.blob_index,
prefix, self.import_handler.sep)
self._blob_imports_from_prefix_cache[prefix] = matches
return self._blob_imports_from_prefix_cache[prefix]
def hits_from_lpath(self, lpath, ctlr=None, curr_buf=None):
"""Return all hits of the given lookup path.
I.e. a symbol table lookup across all files in the dirs of this
lib.
"lpath" is a lookup name list, e.g. ['Casper', 'Logging']
or ['dojo', 'animation'].
"ctlr" (optional) is an EvalController instance. If
specified it should be used in the normal way (logging,
checking .is_aborted()).
"curr_buf" (optional) is not relevant for StdLib. Used for
other *Lib classes.
A "hit" is (<CIX node>, <scope-ref>). Each one represent a
scope-tag or variable-tag hit in all of the blobs for the
execution set buffers.
Returns the empty list if no hits.
"""
assert isinstance(lpath, tuple) # common mistake to pass in a string
hits = []
# toplevelname_index: {ilk -> toplevelname -> blobnames}
for blobnames_from_toplevelname in self.toplevelname_index.values():
for blobname in blobnames_from_toplevelname.get(lpath[0], ()):
blob = self.get_blob(blobname)
try:
elem = blob
for p in lpath:
# LIMITATION: *Imported* names at each scope are
# not being included here. This is fine while we
# just care about JavaScript.
elem = elem.names[p]
except KeyError:
continue
hits.append((elem, (blob, list(lpath[:-1]))))
return hits
def toplevel_cplns(self, prefix=None, ilk=None):
"""Return completion info for all top-level names matching the
given prefix and ilk in all blobs in this lib.
"prefix" is a 3-character prefix with which to filter top-level
names. If None (or not specified), results are not filtered
based on the prefix.
"ilk" is a symbol type (e.g. "class", "variable", "function")
with which to filter results. If None (or not specified),
results of any ilk are returned.
"ctlr" (optional) is an EvalController instance. If
specified it should be used in the normal way (logging,
checking .is_aborted()).
Returns a list of 2-tuples: (<ilk>, <name>).
Note: the list is not sorted, because often some special sorting
is required for the different completion evaluators that might use
this API.
"""
cplns = []
if prefix is None:
# Use 'toplevelname_index': {ilk -> toplevelname -> blobnames}
for i, bft in self.toplevelname_index.items():
if ilk is not None and i != ilk:
continue
cplns += [(i, toplevelname) for toplevelname in bft]
else:
# Use 'toplevelprefix_index':
# {ilk -> prefix -> toplevelnames}
if ilk is not None:
try:
toplevelnames = self.toplevelprefix_index[ilk][prefix]
except KeyError:
pass
else:
cplns += [(ilk, t) for t in toplevelnames]
else:
for i, tfp in self.toplevelprefix_index.items():
if prefix not in tfp:
continue
cplns += [(i, t) for t in tfp[prefix]]
return cplns
def reportMemory(self, reporter, closure=None):
"""
Report on memory usage from this StdLib.
@returns {dict} memory usage; keys are the paths, values are a dict of
"amount" -> number
"units" -> "bytes" | "count"
"desc" -> str description
"""
log.debug("%s StdLib %s: reporting memory", self.lang, self.name)
import memutils
return {
"explicit/python/codeintel/%s/stdlib/%s" % (self.lang, self.name): {
"amount": memutils.memusage(self._blob_from_blobname) +
memutils.memusage(
self._blob_imports_from_prefix_cache),
"units": "bytes",
"desc": "The number of bytes of %s codeintel stdlib %s blobs." % (self.lang, self.name),
}
}
return total_mem_usage
class StdLibsZone(object):
"""Singleton zone managing the db/stdlibs/... area.
Because this is a singleton we shouldn't have to worry about locking
to prevent corruption.
"""
_res_index = None # cix-path -> last-updated
def __init__(self, db):
self.db = db
self.stdlibs_dir = join(dirname(dirname(__file__)), "stdlibs")
self.base_dir = join(self.db.base_dir, "db", "stdlibs")
self._stdlib_from_stdlib_ver_and_name = {
} # cache of StdLib singletons
self._vers_and_names_from_lang = {
} # lang -> ordered list of (ver, name)
def vers_and_names_from_lang(self, lang):
"Returns an ordered list of (ver, name) for the given lang."
# _vers_and_names_from_lang = {
# "php": [
# ((4,3), "php-4.3"),
# ((5.0), "php-5.0"),
# ((5.1), "php-5.1"),
# ((5.2), "php-5.2"),
# ((5,3), "php-5.3")
# ],
# "ruby": [
# (None, "ruby"),
# ],
# ...
# }
vers_and_names = self._vers_and_names_from_lang.get(lang)
if vers_and_names is None:
# Find the available stdlibs for this language.
cix_glob = join(
self.stdlibs_dir, safe_lang_from_lang(lang)+"*.cix")
cix_paths = glob(cix_glob)
vers_and_names = []
for cix_path in cix_paths:
name = splitext(basename(cix_path))[0]
if '-' in name:
base, ver_str = name.split('-', 1)
ver = _ver_from_ver_str(ver_str)
else:
base = name
ver = None
if base.lower() != lang.lower():
# Only process when the base name matches the language.
# I.e. skip if base is "python3" and lang is "python".
continue
vers_and_names.append((ver, name))
vers_and_names.sort()
self._vers_and_names_from_lang[lang] = vers_and_names
return vers_and_names
@property
def res_index(self):
"cix-path -> last-updated"
if self._res_index is None:
idxpath = join(self.base_dir, "res_index")
self._res_index = self.db.load_pickle(idxpath, {})
return self._res_index
def save(self):
if self._res_index is not None:
self.db.save_pickle(join(self.base_dir, "res_index"),
self._res_index)
def cull_mem(self):
"""Cull memory usage as appropriate.
This is a no-op for StdLibsZone because its memory use is bounded and
doesn't really need culling.
"""
pass
def reportMemory(self):
"""
Report on memory usage from this StdLibZone.
@returns {dict} memory usage; keys are the paths, values are a dict of
"amount" -> number
"units" -> "bytes" | "count"
"desc" -> str description
"""
log.debug("StdLibZone: reporting memory")
result = {}
for stdlib in list(self._stdlib_from_stdlib_ver_and_name.values()):
result.update(stdlib.reportMemory())
return result
def get_lib(self, lang, ver_str=None):
"""Return a view into the stdlibs zone for a particular language
and version's stdlib.
"lang" is the language, e.g. "Perl", for which to get a
stdlib.
"ver_str" (optional) is a specific version of the language,
e.g. "5.8".
On first get of a stdlib for a particular language, all
available stdlibs for that lang are updated, if necessary.
Returns None if there is not stdlib for this language.
"""
vers_and_names = self.vers_and_names_from_lang(lang)
if not vers_and_names:
return None
if ver_str is None:
# Default to the latest version.
ver = vers_and_names[-1][0]
else:
ver = _ver_from_ver_str(ver_str)
# Here is something like what we have for PHP:
# vers_and_names = [
# (None, "php"),
# ((4,0), "php-4.0"),
# ((4,1), "php-4.1"),
# ((4,2), "php-4.2"),
# ((4,3), "php-4.3"),
# ((5,0), "php-5.0"),
# ((5,1), "php-5.1"),
# ]
# We want to (quickly) pick the best fit stdlib for the given
# PHP version:
# PHP (ver=None): php
# PHP 3.0: php
# PHP 4.0: php-4.0 (exact match)
# PHP 4.0.2: php-4.0 (higher sub-version)
# PHP 4.4: php-4.3
# PHP 6.0: php-5.1
key = (ver, "zzz") # 'zzz' > any stdlib name (e.g., 'zzz' > 'php-4.2')
idx = max(0, bisect.bisect_right(vers_and_names, key)-1)
log.debug("best stdlib fit for %s ver=%s in %s is %s",
lang, ver, vers_and_names, vers_and_names[idx])
stdlib_match = vers_and_names[idx]
stdlib_ver, stdlib_name = stdlib_match
if stdlib_match not in self._stdlib_from_stdlib_ver_and_name:
# TODO: This _update_lang_with_ver method should really moved into
# the StdLib class.
self._update_lang_with_ver(lang, ver=stdlib_ver)
stdlib = StdLib(self.db,
join(self.base_dir, stdlib_name),
lang, stdlib_name)
self._stdlib_from_stdlib_ver_and_name[stdlib_match] = stdlib
return self._stdlib_from_stdlib_ver_and_name[stdlib_match]
def _get_preload_zip(self):
return join(self.stdlibs_dir, "stdlibs.zip")
def can_preload(self):
"""Return True iff can preload."""
if exists(self.base_dir):
log.info("can't preload stdlibs: `%s' exists", self.base_dir)
return False
try:
import process
import which
except ImportError as ex:
log.info("can't preload stdlibs: %s", ex)
return False
try:
which.which("unzip")
except which.WhichError as ex:
log.info("can't preload stdlibs: %s", ex)
return False
preload_zip = self._get_preload_zip()
if not exists(preload_zip):
log.info("can't preload stdlibs: `%s' does not exist", preload_zip)
return False
return True
def preload(self, progress_cb=None):
"""Pre-load the stdlibs zone, if able.
"progress_cb" (optional) is a callable that is called as
follows to show the progress of the update:
progress_cb(<desc>, <value>)
where <desc> is a short string describing the current step
and <value> is an integer between 0 and 100 indicating the
level of completeness.
Use `.can_preload()' to determine if able to pre-load.
"""
import which
import process
log.debug("preloading stdlibs zone")
if progress_cb:
try:
progress_cb("Preloading stdlibs...", None)
except:
log.exception("error in progress_cb (ignoring)")
preload_zip = self._get_preload_zip()
unzip_exe = which.which("unzip")
cmd = '"%s" -q -d "%s" "%s"'\
% (unzip_exe, dirname(self.base_dir), preload_zip)
p = process.ProcessOpen(cmd, stdin=None)
stdout, stderr = p.communicate()
retval = p.wait()
if retval:
raise OSError("error running '%s'" % cmd)
# TODO: Add ver_str option (as per get_lib above) and only update
# the relevant stdlib.
def remove_lang(self, lang):
"""Remove the given language from the stdlib zone."""
log.debug("update '%s' stdlibs", lang)
# Figure out what updates need to be done...
cix_glob = join(self.stdlibs_dir, safe_lang_from_lang(lang)+"*.cix")
todo = []
for area, subpath in self.res_index:
res = AreaResource(subpath, area)
if fnmatch.fnmatch(res.path, cix_glob):
todo.append(("remove", AreaResource(subpath, area)))
# ... and then do them.
self._handle_res_todos(lang, todo)
self.save()
def _update_lang_with_ver(self, lang, ver=None, progress_cb=None):
"""Import stdlib data for this lang, if necessary.
"lang" is the language to update.
"ver" (optional) is a specific version of the language,
e.g. (5, 8).
"progress_cb" (optional) is a callable that is called as
follows to show the progress of the update:
progress_cb(<desc>, <value>)
where <desc> is a short string describing the current step
and <value> is an integer between 0 and 100 indicating the
level of completeness.
"""
log.debug("update '%s' stdlibs", lang)
# Figure out what updates need to be done...
if progress_cb:
try:
progress_cb("Determining necessary updates...", 5)
except:
log.exception("error in progress_cb (ignoring)")
if ver is not None:
ver_str = ".".join(map(str, ver))
cix_path = join(self.stdlibs_dir,
"%s-%s.cix" % (safe_lang_from_lang(lang), ver_str))
else:
cix_path = join(self.stdlibs_dir,
"%s.cix" % (safe_lang_from_lang(lang), ))
# Need to acquire db lock, as the indexer and main thread may both be
# calling into _update_lang_with_ver at the same time.
self.db.acquire_lock()
try:
todo = []
res = AreaResource(cix_path, "ci-pkg-dir")
try:
last_updated = self.res_index[res.area_path]
except KeyError:
todo.append(("add", res))
else:
mtime = os.stat(cix_path).st_mtime
if last_updated != mtime: # epsilon? '>=' instead of '!='?
todo.append(("update", res))
# ... and then do them.
self._handle_res_todos(lang, todo, progress_cb)
self.save()
finally:
self.db.release_lock()
def update_lang(self, lang, progress_cb=None, ver=None):
vers_and_names = self.vers_and_names_from_lang(lang)
if ver is not None:
ver = _ver_from_ver_str(ver)
key = (
ver, "zzz") # 'zzz' > any stdlib name (e.g., 'zzz' > 'php-4.2')
idx = max(0, bisect.bisect_right(vers_and_names, key)-1)
log.debug("update_lang: best stdlib fit for %s ver=%s in %s is %s",
lang, ver, vers_and_names, vers_and_names[idx])
# Just update the one version for this language.
vers_and_names = [vers_and_names[idx]]
for ver, name in vers_and_names:
self._update_lang_with_ver(lang, ver, progress_cb)
def _handle_res_todos(self, lang, todo, progress_cb=None):
if not todo:
return
for i, (action, res) in enumerate(todo):
cix_path = res.path
name = splitext(basename(cix_path))[0]
if '-' in name:
base, ver_str = name.split('-', 1)
ver = _ver_from_ver_str(ver_str)
else:
base = name
ver = None
assert base == safe_lang_from_lang(lang)
log.debug("%s %s stdlib: `%s'", action, name, cix_path)
verb = {"add": "Adding", "remove": "Removing",
"update": "Updating"}[action]
desc = "%s %s stdlib" % (verb, name)
if progress_cb:
try:
progress_cb(desc, (5 + 95/len(todo)*i))
except:
log.exception("error in progress_cb (ignoring)")
else:
self.db.report_event(desc)
if action == "add":
self._add_res(res, lang, name, ver)
elif action == "remove":
self._remove_res(res, lang, name, ver)
elif action == "update":
# XXX Bad for filesystem. Change this to do it
# more intelligently if possible.
self._remove_res(res, lang, name, ver)
self._add_res(res, lang, name, ver)
def _remove_res(self, res, lang, name, ver):
log.debug("%s stdlibs: remove %s", lang, res)
del self.res_index[res.area_path]
dbdir = join(self.base_dir, name)
try:
rmdir(dbdir)
except OSError as ex:
try:
os.rename(dbdir, dbdir+".zombie")
except OSError as ex2:
log.error("could not remove %s stdlib database dir `%s' (%s): "
"couldn't even rename it to `%s.zombie' (%s): "
"giving up", name, dbdir, ex, name, ex2)
else:
log.warn("could not remove %s stdlib database dir `%s' (%s): "
"moved it to `%s.zombie'", name, dbdir, ex)
def _add_res(self, res, lang, name, ver):
log.debug("%s stdlibs: add %s", lang, res)
cix_path = res.path
try:
tree = tree_from_cix_path(cix_path)
except ET.XMLParserError as ex:
log.warn("could not load %s stdlib from `%s' (%s): skipping",
name, cix_path, ex)
return
dbdir = join(self.base_dir, name)
if exists(dbdir):
log.warn("`db/stdlibs/%s' already exists and should not: "
"removing it", name)
try:
rmdir(dbdir)
except OSError as ex:
log.error("could not remove `%s' to create %s stdlib in "
"database (%s): skipping", dbdir, name)
if not exists(dbdir):
os.makedirs(dbdir)
# Create 'blob_index' and 'toplevel*_index' and write out
# '.blob' file.
LEN_PREFIX = self.db.LEN_PREFIX
is_hits_from_lpath_lang = lang in self.db.import_everything_langs
blob_index = {} # {blobname -> dbfile}
toplevelname_index = {} # {ilk -> toplevelname -> blobnames}
toplevelprefix_index = {} # {ilk -> prefix -> toplevelnames}
for blob in tree.findall("file/scope"):
assert lang == blob.get("lang"), \
"Adding %s resource %s to %s blob" % (
lang, res, blob.get("lang"))
blobname = blob.get("name")
dbfile = self.db.bhash_from_blob_info(cix_path, lang, blobname)
blob_index[blobname] = dbfile
ET.ElementTree(blob).write(join(dbdir, dbfile+".blob"))
for toplevelname, elem in blob.names.items():
if "__local__" in elem.get("attributes", "").split():
# this is internal to the stdlib
continue
ilk = elem.get("ilk") or elem.tag
bft = toplevelname_index.setdefault(ilk, {})
if toplevelname not in bft:
bft[toplevelname] = set([blobname])
else:
bft[toplevelname].add(blobname)
prefix = toplevelname[:LEN_PREFIX]
tfp = toplevelprefix_index.setdefault(ilk, {})
if prefix not in tfp:
tfp[prefix] = set([toplevelname])
else:
tfp[prefix].add(toplevelname)
self.db.save_pickle(join(dbdir, "blob_index"), blob_index)
self.db.save_pickle(join(dbdir, "toplevelname_index"),
toplevelname_index)
self.db.save_pickle(join(dbdir, "toplevelprefix_index"),
toplevelprefix_index)
mtime = os.stat(cix_path).st_mtime
self.res_index[res.area_path] = mtime
#---- internal support stuff
def _ver_from_ver_str(ver_str):
"""Convert a version string to a version object as used internally
for the "stdlibs" area of the database.
>>> _ver_from_ver_str("5.8")
(5, 8)
>>> _ver_from_ver_str("1.8.2")
(1, 8, 2)
>>> _ver_from_ver_str("ecma")
'ecma'
>>> _ver_from_ver_str("ie")
'ie'
"""
ver = []
for s in ver_str.split('.'):
try:
ver.append(int(s))
except ValueError:
ver.append(s)
return tuple(ver)
| |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2015, Alcatel-Lucent Inc, 2017 Nokia
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from .fetchers import NUWebDomainNamesFetcher
from .fetchers import NUPermissionsFetcher
from .fetchers import NUMetadatasFetcher
from .fetchers import NUGlobalMetadatasFetcher
from bambou import NURESTObject
class NUWebCategory(NURESTObject):
""" Represents a WebCategory in the VSD
Notes:
This entity provides the definition of Web Category. It will be used in ACL definition to filter web traffic.
"""
__rest_name__ = "webcategory"
__resource_name__ = "webcategories"
## Constants
CONST_ENTITY_SCOPE_GLOBAL = "GLOBAL"
CONST_TYPE_WEB_DOMAIN_NAME = "WEB_DOMAIN_NAME"
CONST_ENTITY_SCOPE_ENTERPRISE = "ENTERPRISE"
def __init__(self, **kwargs):
""" Initializes a WebCategory instance
Notes:
You can specify all parameters while calling this methods.
A special argument named `data` will enable you to load the
object from a Python dictionary
Examples:
>>> webcategory = NUWebCategory(id=u'xxxx-xxx-xxx-xxx', name=u'WebCategory')
>>> webcategory = NUWebCategory(data=my_dict)
"""
super(NUWebCategory, self).__init__()
# Read/Write Attributes
self._name = None
self._last_updated_by = None
self._last_updated_date = None
self._web_category_identifier = None
self._default_category = None
self._description = None
self._embedded_metadata = None
self._entity_scope = None
self._creation_date = None
self._owner = None
self._external_id = None
self._type = None
self.expose_attribute(local_name="name", remote_name="name", attribute_type=str, is_required=True, is_unique=True)
self.expose_attribute(local_name="last_updated_by", remote_name="lastUpdatedBy", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="last_updated_date", remote_name="lastUpdatedDate", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="web_category_identifier", remote_name="webCategoryIdentifier", attribute_type=int, is_required=False, is_unique=True)
self.expose_attribute(local_name="default_category", remote_name="defaultCategory", attribute_type=bool, is_required=False, is_unique=False)
self.expose_attribute(local_name="description", remote_name="description", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="embedded_metadata", remote_name="embeddedMetadata", attribute_type=list, is_required=False, is_unique=False)
self.expose_attribute(local_name="entity_scope", remote_name="entityScope", attribute_type=str, is_required=False, is_unique=False, choices=[u'ENTERPRISE', u'GLOBAL'])
self.expose_attribute(local_name="creation_date", remote_name="creationDate", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="owner", remote_name="owner", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="external_id", remote_name="externalID", attribute_type=str, is_required=False, is_unique=True)
self.expose_attribute(local_name="type", remote_name="type", attribute_type=str, is_required=True, is_unique=False, choices=[u'WEB_DOMAIN_NAME'])
# Fetchers
self.web_domain_names = NUWebDomainNamesFetcher.fetcher_with_object(parent_object=self, relationship="member")
self.permissions = NUPermissionsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.metadatas = NUMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.global_metadatas = NUGlobalMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self._compute_args(**kwargs)
# Properties
@property
def name(self):
""" Get name value.
Notes:
A customer friendly name for this web category
"""
return self._name
@name.setter
def name(self, value):
""" Set name value.
Notes:
A customer friendly name for this web category
"""
self._name = value
@property
def last_updated_by(self):
""" Get last_updated_by value.
Notes:
ID of the user who last updated the object.
This attribute is named `lastUpdatedBy` in VSD API.
"""
return self._last_updated_by
@last_updated_by.setter
def last_updated_by(self, value):
""" Set last_updated_by value.
Notes:
ID of the user who last updated the object.
This attribute is named `lastUpdatedBy` in VSD API.
"""
self._last_updated_by = value
@property
def last_updated_date(self):
""" Get last_updated_date value.
Notes:
Time stamp when this object was last updated.
This attribute is named `lastUpdatedDate` in VSD API.
"""
return self._last_updated_date
@last_updated_date.setter
def last_updated_date(self, value):
""" Set last_updated_date value.
Notes:
Time stamp when this object was last updated.
This attribute is named `lastUpdatedDate` in VSD API.
"""
self._last_updated_date = value
@property
def web_category_identifier(self):
""" Get web_category_identifier value.
Notes:
The unique identifier of a web category to be used by NSG
This attribute is named `webCategoryIdentifier` in VSD API.
"""
return self._web_category_identifier
@web_category_identifier.setter
def web_category_identifier(self, value):
""" Set web_category_identifier value.
Notes:
The unique identifier of a web category to be used by NSG
This attribute is named `webCategoryIdentifier` in VSD API.
"""
self._web_category_identifier = value
@property
def default_category(self):
""" Get default_category value.
Notes:
Indicates if this is a system-defined web category
This attribute is named `defaultCategory` in VSD API.
"""
return self._default_category
@default_category.setter
def default_category(self, value):
""" Set default_category value.
Notes:
Indicates if this is a system-defined web category
This attribute is named `defaultCategory` in VSD API.
"""
self._default_category = value
@property
def description(self):
""" Get description value.
Notes:
A customer friendly description for this web category
"""
return self._description
@description.setter
def description(self, value):
""" Set description value.
Notes:
A customer friendly description for this web category
"""
self._description = value
@property
def embedded_metadata(self):
""" Get embedded_metadata value.
Notes:
Metadata objects associated with this entity. This will contain a list of Metadata objects if the API request is made using the special flag to enable the embedded Metadata feature. Only a maximum of Metadata objects is returned based on the value set in the system configuration.
This attribute is named `embeddedMetadata` in VSD API.
"""
return self._embedded_metadata
@embedded_metadata.setter
def embedded_metadata(self, value):
""" Set embedded_metadata value.
Notes:
Metadata objects associated with this entity. This will contain a list of Metadata objects if the API request is made using the special flag to enable the embedded Metadata feature. Only a maximum of Metadata objects is returned based on the value set in the system configuration.
This attribute is named `embeddedMetadata` in VSD API.
"""
self._embedded_metadata = value
@property
def entity_scope(self):
""" Get entity_scope value.
Notes:
Specify if scope of entity is Data center or Enterprise level
This attribute is named `entityScope` in VSD API.
"""
return self._entity_scope
@entity_scope.setter
def entity_scope(self, value):
""" Set entity_scope value.
Notes:
Specify if scope of entity is Data center or Enterprise level
This attribute is named `entityScope` in VSD API.
"""
self._entity_scope = value
@property
def creation_date(self):
""" Get creation_date value.
Notes:
Time stamp when this object was created.
This attribute is named `creationDate` in VSD API.
"""
return self._creation_date
@creation_date.setter
def creation_date(self, value):
""" Set creation_date value.
Notes:
Time stamp when this object was created.
This attribute is named `creationDate` in VSD API.
"""
self._creation_date = value
@property
def owner(self):
""" Get owner value.
Notes:
Identifies the user that has created this object.
"""
return self._owner
@owner.setter
def owner(self, value):
""" Set owner value.
Notes:
Identifies the user that has created this object.
"""
self._owner = value
@property
def external_id(self):
""" Get external_id value.
Notes:
External object ID. Used for integration with third party systems
This attribute is named `externalID` in VSD API.
"""
return self._external_id
@external_id.setter
def external_id(self, value):
""" Set external_id value.
Notes:
External object ID. Used for integration with third party systems
This attribute is named `externalID` in VSD API.
"""
self._external_id = value
@property
def type(self):
""" Get type value.
Notes:
Type of the Web Category
"""
return self._type
@type.setter
def type(self, value):
""" Set type value.
Notes:
Type of the Web Category
"""
self._type = value
| |
#!/usr/bin/env python
import sys
import os
import guessit
import locale
import glob
import argparse
import struct
import logging
from extensions import valid_tagging_extensions
from readSettings import ReadSettings
from tvdb_mp4 import Tvdb_mp4
from tmdb_mp4 import tmdb_mp4
from mkvtomp4 import MkvtoMp4
from post_processor import PostProcessor
from tvdb_api import tvdb_api
from tmdb_api import tmdb
from extensions import tmdb_api_key
from logging.config import fileConfig
if sys.version[0] == "3":
raw_input = input
logpath = '/var/log/sickbeard_mp4_automator'
if os.name == 'nt':
logpath = os.path.dirname(sys.argv[0])
elif not os.path.isdir(logpath):
try:
os.mkdir(logpath)
except:
logpath = os.path.dirname(sys.argv[0])
configPath = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), 'logging.ini')).replace("\\", "\\\\")
logPath = os.path.abspath(os.path.join(logpath, 'index.log')).replace("\\", "\\\\")
fileConfig(configPath, defaults={'logfilename': logPath})
log = logging.getLogger("MANUAL")
logging.getLogger("subliminal").setLevel(logging.CRITICAL)
logging.getLogger("requests").setLevel(logging.WARNING)
logging.getLogger("enzyme").setLevel(logging.WARNING)
logging.getLogger("qtfaststart").setLevel(logging.WARNING)
log.info("Manual processor started.")
settings = ReadSettings(os.path.dirname(sys.argv[0]), "autoProcess.ini", logger=log)
def mediatype():
print("Select media type:")
print("1. Movie (via IMDB ID)")
print("2. Movie (via TMDB ID)")
print("3. TV")
print("4. Convert without tagging")
print("5. Skip file")
result = raw_input("#: ")
if 0 < int(result) < 6:
return int(result)
else:
print("Invalid selection")
return mediatype()
def getValue(prompt, num=False):
print(prompt + ":")
value = raw_input("#: ").strip(' \"')
# Remove escape characters in non-windows environments
if os.name != 'nt':
value = value.replace('\\', '')
try:
value = value.decode(sys.stdout.encoding)
except:
pass
if num is True and value.isdigit() is False:
print("Must be a numerical value")
return getValue(prompt, num)
else:
return value
def getYesNo():
yes = ['y', 'yes', 'true', '1']
no = ['n', 'no', 'false', '0']
data = raw_input("# [y/n]: ")
if data.lower() in yes:
return True
elif data.lower() in no:
return False
else:
print("Invalid selection")
return getYesNo()
def getinfo(fileName=None, silent=False, tag=True, tvdbid=None):
tagdata = None
# Try to guess the file is guessing is enabled
if fileName is not None:
tagdata = guessInfo(fileName, tvdbid)
if silent is False:
if tagdata:
print("Proceed using guessed identification from filename?")
if getYesNo():
return tagdata
else:
print("Unable to determine identity based on filename, must enter manually")
m_type = mediatype()
if m_type is 3:
tvdbid = getValue("Enter TVDB Series ID", True)
season = getValue("Enter Season Number", True)
episode = getValue("Enter Episode Number", True)
return m_type, tvdbid, season, episode
elif m_type is 1:
imdbid = getValue("Enter IMDB ID")
return m_type, imdbid
elif m_type is 2:
tmdbid = getValue("Enter TMDB ID", True)
return m_type, tmdbid
elif m_type is 4:
return None
elif m_type is 5:
return False
else:
if tagdata and tag:
return tagdata
else:
return None
def guessInfo(fileName, tvdbid=None):
if tvdbid:
guess = guessit.guess_episode_info(fileName)
return tvdbInfo(guess, tvdbid)
if not settings.fullpathguess:
fileName = os.path.basename(fileName)
guess = guessit.guess_file_info(fileName)
try:
if guess['type'] == 'movie':
return tmdbInfo(guess)
elif guess['type'] == 'episode':
return tvdbInfo(guess, tvdbid)
else:
return None
except Exception as e:
print(e)
return None
def tmdbInfo(guessData):
tmdb.configure(tmdb_api_key)
movies = tmdb.Movies(guessData["title"].encode('ascii', errors='ignore'), limit=4)
for movie in movies.iter_results():
# Identify the first movie in the collection that matches exactly the movie title
foundname = ''.join(e for e in movie["title"] if e.isalnum())
origname = ''.join(e for e in guessData["title"] if e.isalnum())
# origname = origname.replace('&', 'and')
if foundname.lower() == origname.lower():
print("Matched movie title as: %s %s" % (movie["title"].encode(sys.stdout.encoding, errors='ignore'), movie["release_date"].encode(sys.stdout.encoding, errors='ignore')))
movie = tmdb.Movie(movie["id"])
if isinstance(movie, dict):
tmdbid = movie["id"]
else:
tmdbid = movie.get_id()
return 2, tmdbid
return None
def tvdbInfo(guessData, tvdbid=None):
series = guessData["series"]
if 'year' in guessData:
fullseries = series + " (" + str(guessData["year"]) + ")"
season = guessData["season"]
episode = guessData["episodeNumber"]
t = tvdb_api.Tvdb(interactive=False, cache=False, banners=False, actors=False, forceConnect=True, language='en')
try:
tvdbid = str(tvdbid) if tvdbid else t[fullseries]['id']
series = t[int(tvdbid)]['seriesname']
except:
tvdbid = t[series]['id']
try:
print("Matched TV episode as %s (TVDB ID:%d) S%02dE%02d" % (series.encode(sys.stdout.encoding, errors='ignore'), int(tvdbid), int(season), int(episode)))
except:
print("Matched TV episode")
return 3, tvdbid, season, episode
def processFile(inputfile, tagdata, relativePath=None):
# Gather tagdata
if tagdata is False:
return # This means the user has elected to skip the file
elif tagdata is None:
tagmp4 = None # No tag data specified but convert the file anyway
elif tagdata[0] is 1:
imdbid = tagdata[1]
tagmp4 = tmdb_mp4(imdbid, language=settings.taglanguage, logger=log)
try:
print("Processing %s" % (tagmp4.title.encode(sys.stdout.encoding, errors='ignore')))
except:
print("Processing movie")
elif tagdata[0] is 2:
tmdbid = tagdata[1]
tagmp4 = tmdb_mp4(tmdbid, True, language=settings.taglanguage, logger=log)
try:
print("Processing %s" % (tagmp4.title.encode(sys.stdout.encoding, errors='ignore')))
except:
print("Processing movie")
elif tagdata[0] is 3:
tvdbid = int(tagdata[1])
season = int(tagdata[2])
episode = int(tagdata[3])
tagmp4 = Tvdb_mp4(tvdbid, season, episode, language=settings.taglanguage, logger=log)
try:
print("Processing %s Season %02d Episode %02d - %s" % (tagmp4.show.encode(sys.stdout.encoding, errors='ignore'), int(tagmp4.season), int(tagmp4.episode), tagmp4.title.encode(sys.stdout.encoding, errors='ignore')))
except:
print("Processing TV episode")
# Process
if MkvtoMp4(settings, logger=log).validSource(inputfile):
converter = MkvtoMp4(settings, logger=log)
output = converter.process(inputfile, True)
if output:
if tagmp4 is not None and output['output_extension'] in valid_tagging_extensions:
try:
tagmp4.setHD(output['x'], output['y'])
tagmp4.writeTags(output['output'], settings.artwork, settings.thumbnail)
except Exception as e:
print("There was an error tagging the file")
print(e)
if settings.relocate_moov and output['output_extension'] in valid_tagging_extensions:
converter.QTFS(output['output'])
output_files = converter.replicate(output['output'], relativePath=relativePath)
if settings.postprocess:
post_processor = PostProcessor(output_files)
if tagdata:
if tagdata[0] is 1:
post_processor.setMovie(tagdata[1])
elif tagdata[0] is 2:
post_processor.setMovie(tagdata[1])
elif tagdata[0] is 3:
post_processor.setTV(tagdata[1], tagdata[2], tagdata[3])
post_processor.run_scripts()
def walkDir(dir, silent=False, preserveRelative=False, tvdbid=None, tag=True):
for r, d, f in os.walk(dir):
for file in f:
filepath = os.path.join(r, file)
relative = os.path.split(os.path.relpath(filepath, dir))[0] if preserveRelative else None
try:
if MkvtoMp4(settings, logger=log).validSource(filepath):
try:
print("Processing file %s" % (filepath.encode(sys.stdout.encoding, errors='ignore')))
except:
try:
print("Processing file %s" % (filepath.encode('utf-8', errors='ignore')))
except:
print("Processing file")
if tag:
tagdata = getinfo(filepath, silent, tvdbid=tvdbid)
else:
tagdata = None
processFile(filepath, tagdata, relativePath=relative)
except Exception as e:
print("An unexpected error occurred, processing of this file has failed")
print(str(e))
def main():
global settings
parser = argparse.ArgumentParser(description="Manual conversion and tagging script for sickbeard_mp4_automator")
parser.add_argument('-i', '--input', help='The source that will be converted. May be a file or a directory')
parser.add_argument('-c', '--config', help='Specify an alternate configuration file location')
parser.add_argument('-a', '--auto', action="store_true", help="Enable auto mode, the script will not prompt you for any further input, good for batch files. It will guess the metadata using guessit")
parser.add_argument('-tv', '--tvdbid', help="Set the TVDB ID for a tv show")
parser.add_argument('-s', '--season', help="Specifiy the season number")
parser.add_argument('-e', '--episode', help="Specify the episode number")
parser.add_argument('-imdb', '--imdbid', help="Specify the IMDB ID for a movie")
parser.add_argument('-tmdb', '--tmdbid', help="Specify theMovieDB ID for a movie")
parser.add_argument('-nm', '--nomove', action='store_true', help="Overrides and disables the custom moving of file options that come from output_dir and move-to")
parser.add_argument('-nc', '--nocopy', action='store_true', help="Overrides and disables the custom copying of file options that come from output_dir and move-to")
parser.add_argument('-nd', '--nodelete', action='store_true', help="Overrides and disables deleting of original files")
parser.add_argument('-nt', '--notag', action="store_true", help="Overrides and disables tagging when using the automated option")
parser.add_argument('-np', '--nopost', action="store_true", help="Overrides and disables the execution of additional post processing scripts")
parser.add_argument('-pr', '--preserveRelative', action='store_true', help="Preserves relative directories when processing multiple files using the copy-to or move-to functionality")
parser.add_argument('-cmp4', '--convertmp4', action='store_true', help="Overrides convert-mp4 setting in autoProcess.ini enabling the reprocessing of mp4 files")
parser.add_argument('-m', '--moveto', help="Override move-to value setting in autoProcess.ini changing the final destination of the file")
args = vars(parser.parse_args())
# Setup the silent mode
silent = args['auto']
tag = True
print("%sbit Python." % (struct.calcsize("P") * 8))
# Settings overrides
if(args['config']):
if os.path.exists(args['config']):
print('Using configuration file "%s"' % (args['config']))
settings = ReadSettings(os.path.split(args['config'])[0], os.path.split(args['config'])[1], logger=log)
elif os.path.exists(os.path.join(os.path.dirname(sys.argv[0]), args['config'])):
print('Using configuration file "%s"' % (args['config']))
settings = ReadSettings(os.path.dirname(sys.argv[0]), args['config'], logger=log)
else:
print('Configuration file "%s" not present, using default autoProcess.ini' % (args['config']))
if (args['nomove']):
settings.output_dir = None
settings.moveto = None
print("No-move enabled")
elif (args['moveto']):
settings.moveto = args['moveto']
print("Overriden move-to to " + args['moveto'])
if (args['nocopy']):
settings.copyto = None
print("No-copy enabled")
if (args['nodelete']):
settings.delete = False
print("No-delete enabled")
if (args['convertmp4']):
settings.processMP4 = True
print("Reprocessing of MP4 files enabled")
if (args['notag']):
settings.tagfile = False
print("No-tagging enabled")
if (args['nopost']):
settings.postprocess = False
print("No post processing enabled")
# Establish the path we will be working with
if (args['input']):
path = (str(args['input']))
try:
path = glob.glob(path)[0]
except:
pass
else:
path = getValue("Enter path to file")
tvdbid = int(args['tvdbid']) if args['tvdbid'] else None
if os.path.isdir(path):
walkDir(path, silent, tvdbid=tvdbid, preserveRelative=args['preserveRelative'], tag=settings.tagfile)
elif (os.path.isfile(path) and MkvtoMp4(settings, logger=log).validSource(path)):
if (not settings.tagfile):
tagdata = None
elif (args['tvdbid'] and not (args['imdbid'] or args['tmdbid'])):
season = int(args['season']) if args['season'] else None
episode = int(args['episode']) if args['episode'] else None
if (tvdbid and season and episode):
tagdata = [3, tvdbid, season, episode]
else:
tagdata = getinfo(path, silent=silent, tvdbid=tvdbid)
elif ((args['imdbid'] or args['tmdbid']) and not args['tvdbid']):
if (args['imdbid']):
imdbid = args['imdbid']
tagdata = [1, imdbid]
elif (args['tmdbid']):
tmdbid = int(args['tmdbid'])
tagdata = [2, tmdbid]
else:
tagdata = getinfo(path, silent=silent, tvdbid=tvdbid)
processFile(path, tagdata)
else:
try:
print("File %s is not in the correct format" % (path))
except:
print("File is not in the correct format")
if __name__ == '__main__':
main()
| |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) 2014, Nicolas P. Rougier
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
# -----------------------------------------------------------------------------
import re
import numpy as np
import OpenGL.GL as gl
from glumpy import library
from glumpy.log import log
def remove_comments(code):
""" Remove C-style comment from GLSL code string """
pattern = r"(\".*?\"|\'.*?\')|(/\*.*?\*/|//[^\r\n]*\n)"
# first group captures quoted strings (double or single)
# second group captures comments (//single-line or /* multi-line */)
regex = re.compile(pattern, re.MULTILINE|re.DOTALL)
def do_replace(match):
# if the 2nd group (capturing comments) is not None,
# it means we have captured a non-quoted (real) comment string.
if match.group(2) is not None:
return "" # so we will return empty to remove the comment
else: # otherwise, we will return the 1st group
return match.group(1) # captured quoted-string
return regex.sub(do_replace, code)
def remove_version(code):
""" Remove any version directive """
pattern = '\#\s*version[^\r\n]*\n'
regex = re.compile(pattern, re.MULTILINE|re.DOTALL)
return regex.sub('\n', code)
def merge_includes(code):
""" Merge all includes recursively """
# pattern = '\#\s*include\s*"(?P<filename>[a-zA-Z0-9\-\.\/]+)"[^\r\n]*\n'
pattern = '\#\s*include\s*"(?P<filename>[a-zA-Z0-9\-\.\/]+)"'
regex = re.compile(pattern)
includes = []
def replace(match):
filename = match.group("filename")
if filename not in includes:
includes.append(filename)
path = library.find(filename)
if not path:
log.critical('"%s" not found' % filename)
raise RuntimeError("File not found")
text = '\n// --- start of "%s" ---\n' % filename
text += remove_comments(open(path).read())
text += '// --- end of "%s" ---\n' % filename
return text
return ''
# Limit recursion to depth 10
for i in range(10):
if re.search(regex, code):
code = re.sub(regex, replace, code)
else:
break;
return code
def preprocess(code):
""" Preprocess a code by removing comments, version and merging includes """
if code:
code = remove_comments(code)
code = remove_version(code)
code = merge_includes(code)
return code
def get_declarations(code, qualifier = ""):
""" Extract declarations of type:
qualifier type name[,name,...];
"""
if not len(code):
return []
variables = []
if qualifier:
re_type = re.compile("""
%s # Variable qualifier
\s+(?P<type>\w+) # Variable type
\s+(?P<names>[\w,\[\]\n =\.$]+); # Variable name(s)
""" % qualifier, re.VERBOSE)
else:
re_type = re.compile("""
\s*(?P<type>\w+) # Variable type
\s+(?P<names>[\w\[\] ]+) # Variable name(s)
""", re.VERBOSE)
re_names = re.compile("""
(?P<name>\w+) # Variable name
\s*(\[(?P<size>\d+)\])? # Variable size
(\s*[^,]+)?
""", re.VERBOSE)
for match in re.finditer(re_type, code):
vtype = match.group('type')
names = match.group('names')
for match in re.finditer(re_names, names):
name = match.group('name')
size = match.group('size')
if size is None:
variables.append((name, vtype))
else:
size = int(size)
if size == 0:
raise RuntimeError("Size of a variable array cannot be zero")
for i in range(size):
iname = '%s[%d]' % (name,i)
variables.append((iname, vtype))
return variables
def get_hooks(code):
if not len(code):
return []
hooks = []
# re_hooks = re.compile("""\<(?P<hook>\w+)
# (\.(?P<subhook>\w+))?
# (\([^<>]+\))?\>""", re.VERBOSE )
re_hooks = re.compile("""\<(?P<hook>\w+)
(\.(?P<subhook>.+))?
(\([^<>]+\))?\>""", re.VERBOSE )
# re_hooks = re.compile("\<(?P<hook>\w+)\>", re.VERBOSE)
for match in re.finditer(re_hooks, code):
# hooks.append( (match.group('hook'),match.group('subhook')) )
hooks.append((match.group('hook'), None))
# print match.group('hook')
# print match.group('subhook')
# print set(hooks)
return list(set(hooks))
def get_args(code):
return get_declarations(code, qualifier = "")
def get_externs(code):
return get_declarations(code, qualifier = "extern")
def get_consts(code):
return get_declarations(code, qualifier = "const")
def get_uniforms(code):
return get_declarations(code, qualifier = "uniform")
def get_attributes(code):
return get_declarations(code, qualifier = "attribute")
def get_varyings(code):
return get_declarations(code, qualifier = "varying")
def get_functions(code):
def brace_matcher (n):
# From stack overflow: python-how-to-match-nested-parentheses-with-regex
# poor man's matched brace scanning, gives up
# after n+1 levels. Matches any string with balanced
# braces inside; add the outer braces yourself if needed.
# Nongreedy.
return r"[^{}]*?(?:{"*n+r"[^{}]*?"+r"}[^{}]*?)*?"*n
functions = []
regex = re.compile("""
\s*(?P<type>\w+) # Function return type
\s+(?P<name>[\w]+) # Function name
\s*\((?P<args>.*?)\) # Function arguments
\s*\{(?P<code>%s)\} # Function content
""" % brace_matcher(5), re.VERBOSE | re.DOTALL)
for match in re.finditer(regex, code):
rtype = match.group('type')
name = match.group('name')
args = match.group('args')
fcode = match.group('code')
if name not in ("if", "while"):
functions.append( (rtype, name, args, fcode) )
return functions
def parse(code):
""" Parse a shader """
code = preprocess(code)
externs = get_externs(code) if code else []
consts = get_consts(code) if code else []
uniforms = get_uniforms(code) if code else []
attributes= get_attributes(code) if code else []
varyings = get_varyings(code) if code else []
hooks = get_hooks(code) if code else []
functions = get_functions(code) if code else []
return { 'externs' : externs,
'consts' : consts,
'uniforms' : uniforms,
'attributes': attributes,
'varyings' : varyings,
'hooks' : hooks,
'functions' : functions }
# -----------------------------------------------------------------------------
if __name__ == '__main__':
code = """
#version 120
#include "colormaps/colormaps.glsl"
extern float extern_a[2] /* comment */,
extern_b, /* comment */
extern_c /* comment */;
const float const_a = <hook_1>;
const float const_b = 2.0, const_c = 3.0;
uniform float uniform_a;
uniform float uniform_b;
uniform float uniform_c[2];
uniform float <hook_2>;
attribute float attribute_a[2] , attribute_b , attribute_c;
varying float varying_a[2];
varying vec4 varying_b;
varying mat4 varying_c;
<hook_3>;
<hook_4(args)>;
<hook_5.subhook>;
<hook_6.subhook(args)>;
void
function_a(int a, int b, int c)
{
float a = 1;
}
void function_b(int a, int b, int c) {}
"""
code = preprocess(code)
print(get_hooks(code))
# for key in p.keys():
# print key
# if key not in["functions", "hooks"]:
# for (name,vtype) in p[key]:
# print " - %s (%s)"% (name,vtype)
# print
# elif key == "hooks":
# for name in p[key]:
# print " - %s " % name
# print
# else:
# for (rtype,name,args,func) in p[key]:
# print " - %s %s (%s) { ... }"% (rtype, name, args)
# print
| |
# Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: laran@reciprocitylabs.com
# Maintained By: miha@reciprocitylabs.com
from datetime import date, timedelta
from monthdelta import monthdelta
from calendar import monthrange
'''
All dates are calculated raw. They are not adjusted for holidays or workdays.
Use WorkflowDateCalculator.nearest_work_day() to get a date adjusted for
weekends & holidays.
# !Note! The dates returned by these basic methods do not adjust for holidays or weekends.
# !Note! Adjusting for holidays and weekends is way too hard to test effectively, and, because
# !Note! of the fact that the dates are calculated relatively from one another, adjusting them
# !Note! as they're being calculated makes it impossible to do things like jump ahead to future
# !Note! cycles or jump back to previous ones.
# !Note! So adjust for weekends & holidays, calculate the final start/end date that you want.
# !Note! Once you have the date, adjust that final date by calling WorkflowDateCalculator.nearest_workday(your_date)
# !Note! to adjust the final date.
The calculator works in two ways:
1) Create an instance, giving it a workflow, and it call instance methods. This is a bit more object-oriented:
# Get the boundary dates for a cycle
start_date = calculator.nearest_start_date_after_basedate(basedate)
end_date = calculator.nearest_end_date_after_start_date(start_date)
# Calculate prior cycle periods from a basedate
prior_cycle_start_date = calculator.previous_cycle_start_date_before_basedate(basedate)
prior_cycle_end_date = calculator.nearest_end_date_after_start_date(prior_cycle_start_date)
# For convenience, calculate subsequent cycle start date
next_cycle_start_date = calculator.next_cycle_start_date_after_basedate(basedate)
# This is effectively the same as doing this:
next_cycle_start_date = calculator.nearest_start_date_after_basedate(start_date + timedelta(days=1))
2) Use the static methods which calculate cycle boundaries given dates. This is a bit more utilitarian, and
is valuable in certain cases, such as when making calculations directly with TaskGroupTasks, where you may or may
not have the workflow in scope.
# Get the boundary dates for a cycle
# !Incidentally, for weekly cycles the relative_{start|end}_month can be None
# !Notice that these methods below use relative month+day instead of date values.
start_date = WorkflowDateCalculator.nearest_start_date_after_basedate_from_dates(\
basedate, frequency, relative_start_month, relative_start_day)
end_date = WorkflowDateCalculator.nearest_end_date_after_start_date_from_dates(\
frequency, start_date, end_month, end_day)
prior_cycle_start_date = WorkflowDateCalculator.previous_cycle_start_date_before_basedate_from_dates(
basedate, frequency, relative_start_month, relative_start_day)
prior_cycle_end_date = WorkflowDateCalculator.nearest_end_date_after_start_date_from_dates(\
basedate, frequency, prior_cycle_start_date.month, prior_cycle_start_date.day)
next_cycle_start_date = WorkflowDateCalculator.next_cycle_start_date_after_basedate_from_dates(\
basedate, frequency, relative_start_month, relative_start_day
# Again, this is equivalent to this
next_cycle_start_date = WorkflowDateCalculator.nearest_start_date_after_basedate(\
basedate, frequency, relative_start_month, relative_start_day+1)
Again, the dates returned by all of the methods above do not adjust for weekends or holidays.
To adjust a date for weekends & holidays you have three (static) methods to use:
# Know that direction = 1 means forward, direction = -1 means backward
WorkflowDateCalculator.nearest_work_day(your_date, direction)
# You can also use the slightly more obvious methods which hide the complexity of understanding what the direction
# values mean.
WorkflowDateCalculator.adjust_start_date(frequency, your_start_date)
WorkflowDateCalculator.adjust_end_date(frequency, your_end_date)
One thing to note is:
TaskGroupTasks for one_time workflows store their date values as start_date & end_date.
TaskGroupTasks for non-one_time worklows store their date values as
relative_start_{month|day} & relative_end_{month|day}
The logic and tests have taken this into account.
'''
class WorkflowDateCalculator(object):
def __init__(self, workflow=None):
self.workflow = workflow
'''
direction = 1 indicates FORWARD
direction = -1 indicates BACKWARD
'''
@staticmethod
def nearest_work_day(date_, direction, frequency):
if date_ is None:
return None
year = date.today().year
holidays = [
date(year=year, month=1, day=1), # Jan 01 New Year's Day
date(year=year, month=1, day=19), # Jan 19 Martin Luther King Day
date(year=year, month=2, day=16), # Feb 16 President's Day
date(year=year, month=5, day=25), # May 25 Memorial Day
date(year=year, month=7, day=2), # Jul 02 Independence Day Holiday
date(year=year, month=7, day=3), # Jul 03 Independence Day Eve
date(year=year, month=9, day=7), # Sep 07 Labor Day
date(year=year, month=11, day=26), # Nov 26 Thanksgiving Day
date(year=year, month=11, day=27), # Nov 27 Thanksgiving Day 2
date(year=year, month=12, day=23), # Dec 23 Christmas Holiday
date(year=year, month=12, day=24), # Dec 24 Christmas Eve
date(year=year, month=12, day=25), # Dec 25 Christmas Day
date(year=year, month=12, day=31), # Dec 31 New Year's Eve
]
if frequency != "one_time":
holidays = []
while date_.isoweekday() > 5 or date_ in holidays:
date_ = date_ + timedelta(direction)
return date_
@staticmethod
def adjust_start_date(frequency, start_date):
return WorkflowDateCalculator.nearest_work_day(start_date, 1, frequency)
@staticmethod
def adjust_end_date(frequency, end_date):
return WorkflowDateCalculator.nearest_work_day(end_date, -1, frequency)
def nearest_start_date_after_basedate(self, basedate):
frequency = self.workflow.frequency
min_relative_start_day = self._min_relative_start_day_from_tasks()
min_relative_start_month = self._min_relative_start_month_from_tasks()
# Both min_relative_start values will be None when the workflow has no tasks.
if min_relative_start_day is None and min_relative_start_month is None:
return None
return WorkflowDateCalculator.nearest_start_date_after_basedate_from_dates(
basedate, frequency, min_relative_start_month, min_relative_start_day)
@staticmethod
def nearest_start_date_after_basedate_from_dates(
basedate, frequency, relative_start_month, relative_start_day):
if basedate is None:
return None
if "one_time" == frequency:
return date(year=basedate.year, month=relative_start_month, day=relative_start_day)
elif "weekly" == frequency:
if relative_start_day == basedate.isoweekday():
return basedate
elif relative_start_day > basedate.isoweekday():
day_delta = relative_start_day - basedate.isoweekday()
return basedate + timedelta(days=day_delta)
elif relative_start_day < basedate.isoweekday():
day_delta = basedate.isoweekday() - relative_start_day
return basedate + timedelta(days=7 - day_delta)
elif "monthly" == frequency:
if relative_start_day == basedate.day:
return basedate
elif relative_start_day > basedate.day:
day_delta = relative_start_day - basedate.day
return basedate + timedelta(days=day_delta)
elif relative_start_day < basedate.day:
start_date = basedate
while start_date.day > relative_start_day:
start_date = start_date + timedelta(days=-1)
return start_date + monthdelta(1)
elif "quarterly" == frequency:
base_quarter_month = basedate.month % 3
# We want 1-3 indexing instead of 0-2
if base_quarter_month == 0:
base_quarter_month = 3
min_relative_start_quarter_month = relative_start_month
if min_relative_start_quarter_month == base_quarter_month:
if relative_start_day == basedate.day:
return basedate # Start today
elif relative_start_day < basedate.day:
start_date = date(basedate.year, basedate.month, basedate.day)
start_date = start_date + monthdelta(3)
day_delta = -1 * (basedate.day - relative_start_day)
start_date = start_date + timedelta(days=day_delta)
return start_date
else:
return date(year=basedate.year, month=basedate.month, day=relative_start_day)
elif min_relative_start_quarter_month < base_quarter_month:
start_date = date(
year=basedate.year,
month=basedate.month,
day=relative_start_day
) + monthdelta(1)
tmp_start_date = start_date
tmp_quarter_month = tmp_start_date.month % 3
if tmp_quarter_month == 0:
tmp_quarter_month = 3
month_counter = 1
while tmp_quarter_month < min_relative_start_quarter_month:
# Use start_date + monthdelta instead of adding 1 month at a time
# with monthdelta(1) because monthdelta(1) adjusts the end date of
# the month for the number of days in the month.
tmp_start_date = start_date + monthdelta(month_counter)
tmp_quarter_month = tmp_start_date.month % 3
if tmp_quarter_month == 0:
tmp_quarter_month = 3
return tmp_start_date
else: # min_relative_start_quarter_month > base_quarter_month: Walk forward to a valid month
delta = abs(relative_start_month - base_quarter_month)
start_date = basedate + monthdelta(int(delta)) # int cast because delta is a long
return date(
year=start_date.year,
month=start_date.month,
day=relative_start_day # we are hoping the user didn't enter an invalid start_date (this pattern is used throughout this file)
)
elif "annually" == frequency:
if basedate.month == relative_start_month:
if basedate.day == relative_start_day:
return basedate
elif basedate.day > relative_start_day:
return date(year=basedate.year, month=relative_start_month, day=relative_start_day) + monthdelta(12)
elif basedate.day < relative_start_day:
return date(year=basedate.year, month=relative_start_month, day=relative_start_day)
elif basedate.month > relative_start_month:
return date(year=basedate.year, month=relative_start_month, day=relative_start_day) + monthdelta(12)
else:
return date(year=basedate.year, month=relative_start_month, day=relative_start_day)
else:
pass
def nearest_end_date_after_start_date(self, start_date):
frequency = self.workflow.frequency
#TODO: fix the entire logic here. months and days can't be calculated separately
max_relative_end_day = self._max_relative_end_day_from_tasks()
max_relative_end_month = self._max_relative_end_month_from_tasks()
return WorkflowDateCalculator.nearest_end_date_after_start_date_from_dates(
frequency, start_date, max_relative_end_month, max_relative_end_day)
@staticmethod
def nearest_end_date_after_start_date_from_dates(frequency, start_date, end_month, end_day):
# Handle no start_date, which will happen when the workflow has no tasks.
if start_date is None:
return None
if "one_time" == frequency:
end_day = min(monthrange(start_date.year, end_month)[1], end_day)
end_date = date(year=start_date.year, month=end_month, day=end_day)
if end_date < start_date:
raise ValueError("End date cannot be before start date.")
return end_date
elif "weekly" == frequency:
if end_day == start_date.isoweekday():
return start_date
elif end_day < start_date.isoweekday():
return start_date + timedelta(days=end_day + (7 - start_date.isoweekday()))
else:
return start_date + timedelta(days=(end_day - start_date.isoweekday()))
elif "monthly" == frequency:
if end_day == start_date.day:
return start_date
elif end_day < start_date.day:
end_date = start_date + monthdelta(1)
while end_date.day > end_day:
end_date = end_date + timedelta(days=-1)
return end_date
else:
return start_date + timedelta(days=(end_day - start_date.day))
elif "quarterly" == frequency:
start_quarter_month = start_date.month % 3
# Offset month because we want 1-based indexing, not 0-based
if start_quarter_month == 0:
start_quarter_month = 3
if start_quarter_month == end_month:
if start_date.day == end_day:
return start_date
elif start_date.day < end_day:
return date(year=start_date.year, month=start_date.month, day=end_day)
else:
_end_month = start_date.month + 3
_year = start_date.year
if _end_month > 12:
_year += _end_month / 12
_end_month = (_end_month % 12)
return date(year=_year, month=_end_month, day=end_day)
elif start_quarter_month < end_month:
return date(
year=start_date.year,
month=start_date.month + (end_month - start_quarter_month),
day=end_day)
else:
end_date = date(
year=start_date.year,
month=start_date.month,
day=end_day
) + monthdelta(1)
tmp_end_date = end_date
tmp_quarter_month = tmp_end_date.month % 3
if tmp_quarter_month == 0:
tmp_quarter_month = 3
month_counter = 1
# Can't use less_than operator here because of the looping
# around quarters.
while tmp_quarter_month != end_month:
# Use start_date + monthdelta instead of adding 1 month at a time
# with monthdelta(1) because monthdelta(1) adjusts the end date of
# the month for the number of days in the month.
tmp_end_date = end_date + monthdelta(month_counter)
tmp_quarter_month = tmp_end_date.month % 3
if tmp_quarter_month == 0:
tmp_quarter_month = 3
return tmp_end_date
elif "annually" == frequency:
if start_date.month == end_month:
if start_date.day == end_day:
return start_date
elif start_date.day < end_day:
return date(year=start_date.year, month=start_date.month, day=end_day)
else:
return date(year=start_date.year, month=start_date.month, day=end_day) + monthdelta(12)
elif start_date.month < end_month:
return date(year=start_date.year, month=end_month, day=end_day)
else:
return date(year=start_date.year, month=end_month, day=end_day) + monthdelta(12)
else:
pass
@staticmethod
def next_cycle_start_date_after_start_date(start_date, frequency):
if start_date is None:
return None
if "one_time" == frequency:
return start_date
elif "weekly" == frequency:
return start_date + timedelta(days=7)
elif "monthly" == frequency:
return start_date + monthdelta(1)
elif "quarterly" == frequency:
return start_date + monthdelta(3)
elif "annually" == frequency:
return start_date + monthdelta(12)
else:
pass
@staticmethod
def next_cycle_start_date_after_basedate_from_dates(
basedate, frequency, relative_start_month, relative_start_day):
start_date = WorkflowDateCalculator.\
nearest_start_date_after_basedate_from_dates(
basedate, frequency, relative_start_month, relative_start_day)
return WorkflowDateCalculator.next_cycle_start_date_after_start_date(start_date, frequency)
def next_cycle_start_date_after_basedate(self, basedate):
start_date = self.nearest_start_date_after_basedate(basedate)
frequency = self.workflow.frequency
return WorkflowDateCalculator.\
next_cycle_start_date_after_start_date(start_date, frequency)
@staticmethod
def previous_cycle_start_date_before_basedate_from_dates(
basedate, frequency, relative_start_month, relative_start_day):
start_date = WorkflowDateCalculator.\
nearest_start_date_after_basedate_from_dates(
basedate, frequency, relative_start_month, relative_start_day)
return WorkflowDateCalculator.\
previous_cycle_start_date(start_date, frequency)
@staticmethod
def previous_cycle_start_date(start_date, frequency):
if start_date is None:
return None
if "one_time" == frequency:
return start_date
elif "weekly" == frequency:
return start_date + timedelta(days=-7)
elif "monthly" == frequency:
return start_date + monthdelta(-1)
elif "quarterly" == frequency:
return start_date + monthdelta(-3)
elif "annually" == frequency:
return start_date + monthdelta(-12)
else:
pass
@staticmethod
def relative_month_from_date(_date, frequency):
if "one_time" == frequency:
return _date.month
elif "weekly" == frequency:
return None
elif "monthly" == frequency:
return None
elif "quarterly" == frequency:
month = _date.month % 3
if month == 0:
month = 3
return month
elif "annually" == frequency:
return _date.month
else:
pass
@staticmethod
def relative_day_from_date(_date, frequency):
if "one_time" == frequency:
return _date.day
elif "weekly" == frequency:
return _date.isoweekday()
elif "monthly" == frequency:
return _date.day
elif "quarterly" == frequency:
return _date.day
elif "annually" == frequency:
return _date.day
else:
pass
def previous_cycle_start_date_before_basedate(self, basedate):
start_date = self.nearest_start_date_after_basedate(basedate)
frequency = self.workflow.frequency
return WorkflowDateCalculator.\
previous_cycle_start_date(start_date, frequency)
def _min_relative_start_month_from_tasks(self):
min_start_month = None
for tg in self.workflow.task_groups:
for t in tg.task_group_tasks:
if "one_time" == self.workflow.frequency:
relative_start_month = WorkflowDateCalculator.\
relative_month_from_date(t.start_date, self.workflow.frequency)
else:
relative_start_month = t.relative_start_month
if min_start_month is None or relative_start_month < min_start_month:
min_start_month = relative_start_month
return min_start_month
def _min_relative_start_day_from_tasks(self):
min_start_day = None
for tg in self.workflow.task_groups:
for t in tg.task_group_tasks:
if "one_time" == self.workflow.frequency:
relative_start_day = WorkflowDateCalculator.\
relative_day_from_date(t.start_date, self.workflow.frequency)
else:
relative_start_day = t.relative_start_day
if min_start_day is None or relative_start_day < min_start_day:
min_start_day = relative_start_day
return min_start_day
def _max_relative_end_day_from_tasks(self):
max_end_day = None
for tg in self.workflow.task_groups:
for t in tg.task_group_tasks:
if "one_time" == self.workflow.frequency:
relative_end_day = WorkflowDateCalculator.\
relative_day_from_date(t.end_date, self.workflow.frequency)
else:
relative_end_day = t.relative_end_day
if max_end_day is None or relative_end_day > max_end_day:
max_end_day = relative_end_day
return max_end_day
def _max_relative_end_month_from_tasks(self):
max_end_month = None
for tg in self.workflow.task_groups:
for t in tg.task_group_tasks:
if "one_time" == self.workflow.frequency:
relative_end_month = WorkflowDateCalculator.\
relative_month_from_date(t.end_date, self.workflow.frequency)
else:
relative_end_month = t.relative_end_month
if max_end_month is None or relative_end_month > max_end_month:
max_end_month = relative_end_month
return max_end_month
| |
"""This file contains code used in "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2010 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function, division
import nsfg
import nsfg2
import first
import thinkstats2
import thinkplot
import copy
import random
import numpy as np
import matplotlib.pyplot as pyplot
class CoinTest(thinkstats2.HypothesisTest):
"""Tests the hypothesis that a coin is fair."""
def TestStatistic(self, data):
"""Computes the test statistic.
data: data in whatever form is relevant
"""
heads, tails = data
test_stat = abs(heads - tails)
return test_stat
def RunModel(self):
"""Run the model of the null hypothesis.
returns: simulated data
"""
heads, tails = self.data
n = heads + tails
sample = [random.choice('HT') for _ in range(n)]
hist = thinkstats2.Hist(sample)
data = hist['H'], hist['T']
return data
class DiffMeansPermute(thinkstats2.HypothesisTest):
"""Tests a difference in means by permutation."""
def TestStatistic(self, data):
"""Computes the test statistic.
data: data in whatever form is relevant
"""
group1, group2 = data
test_stat = abs(group1.mean() - group2.mean())
return test_stat
def MakeModel(self):
"""Build a model of the null hypothesis.
"""
group1, group2 = self.data
self.n, self.m = len(group1), len(group2)
self.pool = np.hstack((group1, group2))
def RunModel(self):
"""Run the model of the null hypothesis.
returns: simulated data
"""
np.random.shuffle(self.pool)
data = self.pool[:self.n], self.pool[self.n:]
return data
class DiffMeansOneSided(DiffMeansPermute):
"""Tests a one-sided difference in means by permutation."""
def TestStatistic(self, data):
"""Computes the test statistic.
data: data in whatever form is relevant
"""
group1, group2 = data
test_stat = group1.mean() - group2.mean()
return test_stat
class DiffStdPermute(DiffMeansPermute):
"""Tests a one-sided difference in standard deviation by permutation."""
def TestStatistic(self, data):
"""Computes the test statistic.
data: data in whatever form is relevant
"""
group1, group2 = data
test_stat = group1.std() - group2.std()
return test_stat
class CorrelationPermute(thinkstats2.HypothesisTest):
"""Tests correlations by permutation."""
def TestStatistic(self, data):
"""Computes the test statistic.
data: tuple of xs and ys
"""
xs, ys = data
test_stat = abs(thinkstats2.Corr(xs, ys))
return test_stat
def RunModel(self):
"""Run the model of the null hypothesis.
returns: simulated data
"""
xs, ys = self.data
xs = np.random.permutation(xs)
return xs, ys
class DiceTest(thinkstats2.HypothesisTest):
"""Tests whether a six-sided die is fair."""
def TestStatistic(self, data):
"""Computes the test statistic.
data: list of frequencies
"""
observed = data
n = sum(observed)
expected = np.ones(6) * n / 6
test_stat = sum(abs(observed - expected))
return test_stat
def RunModel(self):
"""Run the model of the null hypothesis.
returns: simulated data
"""
n = sum(self.data)
values = [1,2,3,4,5,6]
rolls = np.random.choice(values, n, replace=True)
hist = thinkstats2.Hist(rolls)
freqs = hist.Freqs(values)
return freqs
class DiceChiTest(DiceTest):
"""Tests a six-sided die using a chi-squared statistic."""
def TestStatistic(self, data):
"""Computes the test statistic.
data: list of frequencies
"""
observed = data
n = sum(observed)
expected = np.ones(6) * n / 6
test_stat = sum((observed - expected)**2 / expected)
return test_stat
class PregLengthTest(thinkstats2.HypothesisTest):
"""Tests difference in pregnancy length using a chi-squared statistic."""
def TestStatistic(self, data):
"""Computes the test statistic.
data: pair of lists of pregnancy lengths
"""
firsts, others = data
stat = self.ChiSquared(firsts) + self.ChiSquared(others)
return stat
def ChiSquared(self, lengths):
"""Computes the chi-squared statistic.
lengths: sequence of lengths
returns: float
"""
hist = thinkstats2.Hist(lengths)
observed = np.array(hist.Freqs(self.values))
expected = self.expected_probs * len(lengths)
stat = sum((observed - expected)**2 / expected)
return stat
def MakeModel(self):
"""Build a model of the null hypothesis.
"""
firsts, others = self.data
self.n = len(firsts)
self.pool = np.hstack((firsts, others))
pmf = thinkstats2.Pmf(self.pool)
self.values = range(35, 44)
self.expected_probs = np.array(pmf.Probs(self.values))
def RunModel(self):
"""Run the model of the null hypothesis.
returns: simulated data
"""
np.random.shuffle(self.pool)
data = self.pool[:self.n], self.pool[self.n:]
return data
def RunDiceTest():
"""Tests whether a die is fair.
"""
data = [8, 9, 19, 5, 8, 11]
dt = DiceTest(data)
print('dice test', dt.PValue(iters=10000))
dt = DiceChiTest(data)
print('dice chi test', dt.PValue(iters=10000))
def FalseNegRate(data, num_runs=1000):
"""Computes the chance of a false negative based on resampling.
data: pair of sequences
num_runs: how many experiments to simulate
returns: float false negative rate
"""
group1, group2 = data
count = 0
for i in range(num_runs):
sample1 = thinkstats2.Resample(group1)
sample2 = thinkstats2.Resample(group2)
ht = DiffMeansPermute((sample1, sample2))
p_value = ht.PValue(iters=101)
if p_value > 0.05:
count += 1
return count / num_runs
def PrintTest(p_value, ht):
"""Prints results from a hypothesis test.
p_value: float
ht: HypothesisTest
"""
print('p-value =', p_value)
print('actual =', ht.actual)
print('ts max =', ht.MaxTestStat())
def RunTests(data, iters=1000):
"""Runs several tests on the given data.
data: pair of sequences
iters: number of iterations to run
"""
# test the difference in means
ht = DiffMeansPermute(data)
p_value = ht.PValue(iters=iters)
print('\nmeans permute two-sided')
PrintTest(p_value, ht)
ht.PlotCdf()
thinkplot.Save(root='hypothesis1',
title='Permutation test',
xlabel='difference in means (weeks)',
ylabel='CDF',
legend=False)
# test the difference in means one-sided
ht = DiffMeansOneSided(data)
p_value = ht.PValue(iters=iters)
print('\nmeans permute one-sided')
PrintTest(p_value, ht)
# test the difference in std
ht = DiffStdPermute(data)
p_value = ht.PValue(iters=iters)
print('\nstd permute one-sided')
PrintTest(p_value, ht)
def ReplicateTests():
"""Replicates tests with the new NSFG data."""
live, firsts, others = nsfg2.MakeFrames()
# compare pregnancy lengths
print('\nprglngth2')
data = firsts.prglngth.values, others.prglngth.values
ht = DiffMeansPermute(data)
p_value = ht.PValue(iters=1000)
print('means permute two-sided')
PrintTest(p_value, ht)
print('\nbirth weight 2')
data = (firsts.totalwgt_lb.dropna().values,
others.totalwgt_lb.dropna().values)
ht = DiffMeansPermute(data)
p_value = ht.PValue(iters=1000)
print('means permute two-sided')
PrintTest(p_value, ht)
# test correlation
live2 = live.dropna(subset=['agepreg', 'totalwgt_lb'])
data = live2.agepreg.values, live2.totalwgt_lb.values
ht = CorrelationPermute(data)
p_value = ht.PValue()
print('\nage weight correlation 2')
PrintTest(p_value, ht)
# compare pregnancy lengths (chi-squared)
data = firsts.prglngth.values, others.prglngth.values
ht = PregLengthTest(data)
p_value = ht.PValue()
print('\npregnancy length chi-squared 2')
PrintTest(p_value, ht)
def main():
thinkstats2.RandomSeed(17)
# run the coin test
ct = CoinTest((140, 110))
pvalue = ct.PValue()
print('coin test p-value', pvalue)
# compare pregnancy lengths
print('\nprglngth')
live, firsts, others = first.MakeFrames()
data = firsts.prglngth.values, others.prglngth.values
RunTests(data)
# compare birth weights
print('\nbirth weight')
data = (firsts.totalwgt_lb.dropna().values,
others.totalwgt_lb.dropna().values)
ht = DiffMeansPermute(data)
p_value = ht.PValue(iters=1000)
print('means permute two-sided')
PrintTest(p_value, ht)
# test correlation
live2 = live.dropna(subset=['agepreg', 'totalwgt_lb'])
data = live2.agepreg.values, live2.totalwgt_lb.values
ht = CorrelationPermute(data)
p_value = ht.PValue()
print('\nage weight correlation')
print('n=', len(live2))
PrintTest(p_value, ht)
# run the dice test
RunDiceTest()
# compare pregnancy lengths (chi-squared)
data = firsts.prglngth.values, others.prglngth.values
ht = PregLengthTest(data)
p_value = ht.PValue()
print('\npregnancy length chi-squared')
PrintTest(p_value, ht)
# compute the false negative rate for difference in pregnancy length
data = firsts.prglngth.values, others.prglngth.values
neg_rate = FalseNegRate(data)
print('false neg rate', neg_rate)
# run the tests with new nsfg data
ReplicateTests()
if __name__ == "__main__":
main()
| |
#!/usr/bin/env python
import sys
from gi.repository import Gtk, Gio, Gdk, GObject
from asciiplayback import *
from gtkasciiplayer import *
from revealerexpander import *
from frameedit import *
class ASCIImatorDesktop(Gtk.Window):
def __init__(self):
Gtk.Window.__init__(self, title="ASCIImator Desktop")
self.set_default_size(640, 400)
if len(sys.argv) > 1:
self.filename = sys.argv[1]
self.asciimation = ASCIImation(filename=self.filename)
self.player = ASCIIPlayback(self.asciimation, speed=0)
else:
self.filename = ""
self.asciimation = ASCIImation(font_family='monospace', size=[15, 3])
self.asciimation.frames.append(Frame(text='\nNo file loaded!\n'))
self.player = ASCIIPlayback(asciimation=self.asciimation, speed=0)
# print('\n'.join(Gtk.IconTheme().list_icons()))
self.hsize_group = Gtk.SizeGroup(Gtk.SizeGroupMode.HORIZONTAL)
right_box = self.main_content()
left_box = self.sidebar()
separator = Gtk.Separator(orientation=Gtk.Orientation.VERTICAL)
hb = self.headerbar()
self.set_titlebar(hb)
box = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL)
box.pack_start(left_box, False, False, 0)
box.pack_start(separator, False, False, 0)
box.pack_start(right_box, True, True, 0)
self.add(box)
def headerbar(self):
header = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL)
left_header = Gtk.HeaderBar()
left_header.props.show_close_button = True
right_header = Gtk.HeaderBar()
right_header.props.show_close_button = True
left_header.get_style_context().add_class("titlebar")
left_header.get_style_context().add_class("titlebar-left")
right_header.get_style_context().add_class("titlebar")
right_header.get_style_context().add_class("titlebar-right")
layout_desc = Gtk.Settings.get_default().props.gtk_decoration_layout
tokens = layout_desc.split(":", 2)
if tokens != None:
right_header.props.decoration_layout = ":" + tokens[1]
left_header.props.decoration_layout = tokens[0]
self.title = Gtk.Label(self.filename)
self.title.get_style_context().add_class("title")
right_header.set_custom_title(self.title)
button = Gtk.Button(image=Gtk.Image.new_from_gicon(Gio.ThemedIcon(
name="document-save-symbolic"),
Gtk.IconSize.BUTTON))
right_header.pack_end(button)
button = Gtk.Button(image=Gtk.Image.new_from_gicon(Gio.ThemedIcon(
name="document-open-symbolic"),
Gtk.IconSize.BUTTON))
button.connect("clicked", self.do_open)
right_header.pack_end(button)
left_header.props.title = "ASCIImator Desktop"
left_header.props.subtitle = "Offline ASCII Animator"
header.pack_start(left_header, False, False, 0)
header.pack_start(Gtk.Separator(orientation=Gtk.Orientation.VERTICAL),
False, False, 0)
header.pack_start(right_header, True, True, 0)
self.hsize_group.add_widget(left_header)
return header
def sidebar(self):
left = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)
expander_scroll = Gtk.ScrolledWindow()
expander_scroll.set_policy(Gtk.PolicyType.NEVER,
Gtk.PolicyType.AUTOMATIC)
expander_box = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)
expander = RevealerExpander("_View")
view = Gtk.ListBox()
view.set_selection_mode(Gtk.SelectionMode.NONE)
optionsize = Gtk.SizeGroup(Gtk.SizeGroupMode.HORIZONTAL)
row = Gtk.ListBoxRow()
hbox = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL, spacing=12)
row.add(hbox)
label = Gtk.Label("Loop", xalign=0)
switch = Gtk.Switch()
switch.set_active(True)
hbox.pack_start(label, True, True, 12)
hbox.pack_start(switch, False, True, 12)
Gtk.StyleContext.add_class(row.get_style_context(), "option")
view.add(row)
row = Gtk.ListBoxRow()
hbox = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL, spacing=12)
row.add(hbox)
label = Gtk.Label("ms/frame", xalign=0)
spin = Gtk.SpinButton.new_with_range(0, 1000, 5)
spin.set_value(100)
optionsize.add_widget(spin)
hbox.pack_start(label, True, True, 12)
hbox.pack_start(spin, False, True, 12)
Gtk.StyleContext.add_class(row.get_style_context(), "option")
view.add(row)
row = Gtk.ListBoxRow()
hbox = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL, spacing=12)
row.add(hbox)
label = Gtk.Label("Font", xalign=0)
font = Gtk.FontButton.new_with_font("Courier New 12")
optionsize.add_widget(font)
hbox.pack_start(label, True, True, 12)
hbox.pack_start(font, False, False, 12)
Gtk.StyleContext.add_class(row.get_style_context(), "option")
view.add(row)
row = Gtk.ListBoxRow()
hbox = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL, spacing=12)
row.add(hbox)
label = Gtk.Label("Width", xalign=0)
spin = Gtk.SpinButton.new_with_range(1, 1000, 1)
spin.set_value(20)
optionsize.add_widget(spin)
hbox.pack_start(label, True, True, 12)
hbox.pack_start(spin, False, True, 12)
Gtk.StyleContext.add_class(row.get_style_context(), "option")
view.add(row)
row = Gtk.ListBoxRow()
hbox = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL, spacing=12)
row.add(hbox)
label = Gtk.Label("Height", xalign=0)
spin = Gtk.SpinButton.new_with_range(1, 1000, 1)
spin.set_value(10)
optionsize.add_widget(spin)
hbox.pack_start(label, True, True, 12)
hbox.pack_start(spin, False, True, 12)
Gtk.StyleContext.add_class(row.get_style_context(), "option")
view.add(row)
expander.add(view)
expander.set_expanded(True)
expander_box.add(expander)
expander = RevealerExpander("_Color")
expander.add(Gtk.ColorButton())
expander_box.add(expander)
expander = RevealerExpander("_Frame")
expander.add(Gtk.FontButton())
expander_box.add(expander)
expander = RevealerExpander("_Layer")
expander.add(Gtk.FontButton())
expander_box.add(expander)
expander = RevealerExpander("_Replace")
expander.add(Gtk.FontButton())
expander_box.add(expander)
expander_scroll.add(expander_box)
left.pack_start(expander_scroll, True, True, 0)
ab = Gtk.ActionBar()
ab.set_center_widget(self.stack_switcher)
left.pack_end(ab, False, False, 0)
self.hsize_group.add_widget(left)
return left
def main_content(self):
stack = Gtk.Stack()
stack.set_transition_type(Gtk.StackTransitionType.SLIDE_LEFT_RIGHT)
stack.set_transition_duration(250)
self.stack_switcher = Gtk.StackSwitcher()
self.stack_switcher.set_stack(stack)
#### Edit panel ####
edit = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)
edit_window = Gtk.ScrolledWindow()
edit_flow = Gtk.FlowBox()
edit_flow.set_homogeneous(False)
edit_flow.set_column_spacing(12)
edit_flow.set_row_spacing(12)
frame = FrameEdit()
edit_flow.add(frame)
frame = FrameEdit()
edit_flow.add(frame)
frame = FrameEdit()
edit_flow.add(frame)
frame = FrameEdit()
edit_flow.add(frame)
frame = FrameEdit()
edit_flow.add(frame)
frame = FrameEdit()
edit_flow.add(frame)
frame = FrameEdit()
edit_flow.add(frame)
frame = FrameEdit()
edit_flow.add(frame)
edit_window.add(edit_flow)
edit.pack_start(edit_window, True, True, 0)
ab = Gtk.ActionBar()
ab_buttons = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL)
Gtk.StyleContext.add_class(ab_buttons.get_style_context(), "linked")
button = Gtk.Button(image=Gtk.Image.new_from_gicon(Gio.ThemedIcon(
name="edit-undo-symbolic"),
Gtk.IconSize.BUTTON))
ab_buttons.add(button)
button = Gtk.Button(image=Gtk.Image.new_from_gicon(Gio.ThemedIcon(
name="edit-redo-symbolic"),
Gtk.IconSize.BUTTON))
ab_buttons.add(button)
button = Gtk.Button(image=Gtk.Image.new_from_gicon(Gio.ThemedIcon(
name="list-add-symbolic"),
Gtk.IconSize.BUTTON))
ab_buttons.add(button)
button = Gtk.Button(image=Gtk.Image.new_from_gicon(Gio.ThemedIcon(
name="list-remove-symbolic"),
Gtk.IconSize.BUTTON))
ab_buttons.add(button)
button = Gtk.Button(image=Gtk.Image.new_from_gicon(Gio.ThemedIcon(
name="edit-copy-symbolic"),
Gtk.IconSize.BUTTON))
ab_buttons.add(button)
button = Gtk.Button(image=Gtk.Image.new_from_gicon(Gio.ThemedIcon(
name="edit-paste-symbolic"),
Gtk.IconSize.BUTTON))
ab_buttons.add(button)
ab.set_center_widget(ab_buttons)
edit.pack_end(ab, False, False, 0)
stack.add_titled(edit, "edit", "Edit")
#### Preview panel ####
preview = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)
self.video = GtkASCIIPlayer(self.player)
preview.pack_start(self.video, True, False, 0)
ab = Gtk.ActionBar()
self.controls = GtkASCIIControls(self.player)
ab.set_center_widget(self.controls)
preview.pack_end(ab, False, False, 0)
stack.add_titled(preview, "preview", "Preview")
return stack
def do_open(self, button):
dialog = Gtk.FileChooserDialog("Open", self,
Gtk.FileChooserAction.OPEN,
(Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL,
Gtk.STOCK_OPEN, Gtk.ResponseType.OK))
self.add_filters(dialog)
response = dialog.run()
if response == Gtk.ResponseType.OK:
self.filename = dialog.get_filename()
self.title.set_text(self.filename)
self.asciimation = ASCIImation(filename=self.filename)
self.player = ASCIIPlayback(asciimation=self.asciimation, speed=0)
self.video.player = self.player
self.controls.player = self.player
elif response == Gtk.ResponseType.CANCEL:
pass
dialog.destroy()
def add_filters(self, dialog):
filter_json = Gtk.FileFilter()
filter_json.set_name("JSON files")
filter_json.add_mime_type("application/json")
dialog.add_filter(filter_json)
filter_any = Gtk.FileFilter()
filter_any.set_name("All files")
filter_any.add_pattern("*")
dialog.add_filter(filter_any)
win = ASCIImatorDesktop()
win.connect("delete-event", Gtk.main_quit)
win.show_all()
style_provider = Gtk.CssProvider()
css = b"""
.titlebar-left:dir(ltr),
.titlebar-left:dir(rtl) {
border-top-right-radius: 0;
}
.titlebar-right:dir(ltr),
.titlebar-right:dir(rtl) {
border-top-left-radius: 0;
}
.option {
padding-top: 3px;
background-color: @theme_bg_color;
}
.option:hover {
background-color: @theme_bg_color;
}
"""
style_provider.load_from_data(css)
Gtk.StyleContext.add_provider_for_screen(
Gdk.Screen.get_default(),
style_provider,
Gtk.STYLE_PROVIDER_PRIORITY_APPLICATION
)
Gtk.main()
| |
# Copyright 2013 OpenStack Foundation.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
###
### Codes from neutron wsgi
###
import logging
from xml.etree import ElementTree as etree
from xml.parsers import expat
from oslo.serialization import jsonutils
import six
from neutronclient.common import constants
from neutronclient.common import exceptions as exception
from neutronclient.i18n import _
LOG = logging.getLogger(__name__)
if six.PY3:
long = int
class ActionDispatcher(object):
"""Maps method name to local methods through action name."""
def dispatch(self, *args, **kwargs):
"""Find and call local method."""
action = kwargs.pop('action', 'default')
action_method = getattr(self, str(action), self.default)
return action_method(*args, **kwargs)
def default(self, data):
raise NotImplementedError()
class DictSerializer(ActionDispatcher):
"""Default request body serialization."""
def serialize(self, data, action='default'):
return self.dispatch(data, action=action)
def default(self, data):
return ""
class JSONDictSerializer(DictSerializer):
"""Default JSON request body serialization."""
def default(self, data):
def sanitizer(obj):
return six.text_type(obj, 'utf8')
return jsonutils.dumps(data, default=sanitizer)
class XMLDictSerializer(DictSerializer):
def __init__(self, metadata=None, xmlns=None):
"""XMLDictSerializer constructor.
:param metadata: information needed to deserialize XML into
a dictionary.
:param xmlns: XML namespace to include with serialized XML
"""
super(XMLDictSerializer, self).__init__()
self.metadata = metadata or {}
if not xmlns:
xmlns = self.metadata.get('xmlns')
if not xmlns:
xmlns = constants.XML_NS_V20
self.xmlns = xmlns
def default(self, data):
"""Default serializer of XMLDictSerializer.
:param data: expect data to contain a single key as XML root, or
contain another '*_links' key as atom links. Other
case will use 'VIRTUAL_ROOT_KEY' as XML root.
"""
try:
links = None
has_atom = False
if data is None:
root_key = constants.VIRTUAL_ROOT_KEY
root_value = None
else:
link_keys = [k for k in six.iterkeys(data) or []
if k.endswith('_links')]
if link_keys:
links = data.pop(link_keys[0], None)
has_atom = True
root_key = (len(data) == 1 and
list(data.keys())[0] or constants.VIRTUAL_ROOT_KEY)
root_value = data.get(root_key, data)
doc = etree.Element("_temp_root")
used_prefixes = []
self._to_xml_node(doc, self.metadata, root_key,
root_value, used_prefixes)
if links:
self._create_link_nodes(list(doc)[0], links)
return self.to_xml_string(list(doc)[0], used_prefixes, has_atom)
except AttributeError as e:
LOG.exception(str(e))
return ''
def __call__(self, data):
# Provides a migration path to a cleaner WSGI layer, this
# "default" stuff and extreme extensibility isn't being used
# like originally intended
return self.default(data)
def to_xml_string(self, node, used_prefixes, has_atom=False):
self._add_xmlns(node, used_prefixes, has_atom)
return etree.tostring(node, encoding='UTF-8')
#NOTE (ameade): the has_atom should be removed after all of the
# XML serializers and view builders have been updated to the current
# spec that required all responses include the xmlns:atom, the has_atom
# flag is to prevent current tests from breaking
def _add_xmlns(self, node, used_prefixes, has_atom=False):
node.set('xmlns', self.xmlns)
node.set(constants.TYPE_XMLNS, self.xmlns)
if has_atom:
node.set(constants.ATOM_XMLNS, constants.ATOM_NAMESPACE)
node.set(constants.XSI_NIL_ATTR, constants.XSI_NAMESPACE)
ext_ns = self.metadata.get(constants.EXT_NS, {})
for prefix in used_prefixes:
if prefix in ext_ns:
node.set('xmlns:' + prefix, ext_ns[prefix])
def _to_xml_node(self, parent, metadata, nodename, data, used_prefixes):
"""Recursive method to convert data members to XML nodes."""
result = etree.SubElement(parent, nodename)
if ":" in nodename:
used_prefixes.append(nodename.split(":", 1)[0])
#TODO(bcwaldon): accomplish this without a type-check
if isinstance(data, list):
if not data:
result.set(
constants.TYPE_ATTR,
constants.TYPE_LIST)
return result
singular = metadata.get('plurals', {}).get(nodename, None)
if singular is None:
if nodename.endswith('s'):
singular = nodename[:-1]
else:
singular = 'item'
for item in data:
self._to_xml_node(result, metadata, singular, item,
used_prefixes)
#TODO(bcwaldon): accomplish this without a type-check
elif isinstance(data, dict):
if not data:
result.set(
constants.TYPE_ATTR,
constants.TYPE_DICT)
return result
attrs = metadata.get('attributes', {}).get(nodename, {})
for k, v in sorted(data.items()):
if k in attrs:
result.set(k, str(v))
else:
self._to_xml_node(result, metadata, k, v,
used_prefixes)
elif data is None:
result.set(constants.XSI_ATTR, 'true')
else:
if isinstance(data, bool):
result.set(
constants.TYPE_ATTR,
constants.TYPE_BOOL)
elif isinstance(data, int):
result.set(
constants.TYPE_ATTR,
constants.TYPE_INT)
elif isinstance(data, long):
result.set(
constants.TYPE_ATTR,
constants.TYPE_LONG)
elif isinstance(data, float):
result.set(
constants.TYPE_ATTR,
constants.TYPE_FLOAT)
LOG.debug("Data %(data)s type is %(type)s",
{'data': data,
'type': type(data)})
result.text = six.text_type(data)
return result
def _create_link_nodes(self, xml_doc, links):
for link in links:
link_node = etree.SubElement(xml_doc, 'atom:link')
link_node.set('rel', link['rel'])
link_node.set('href', link['href'])
class TextDeserializer(ActionDispatcher):
"""Default request body deserialization."""
def deserialize(self, datastring, action='default'):
return self.dispatch(datastring, action=action)
def default(self, datastring):
return {}
class JSONDeserializer(TextDeserializer):
def _from_json(self, datastring):
try:
return jsonutils.loads(datastring)
except ValueError:
msg = _("Cannot understand JSON")
raise exception.MalformedResponseBody(reason=msg)
def default(self, datastring):
return {'body': self._from_json(datastring)}
class XMLDeserializer(TextDeserializer):
def __init__(self, metadata=None):
"""XMLDeserializer constructor.
:param metadata: information needed to deserialize XML into
a dictionary.
"""
super(XMLDeserializer, self).__init__()
self.metadata = metadata or {}
xmlns = self.metadata.get('xmlns')
if not xmlns:
xmlns = constants.XML_NS_V20
self.xmlns = xmlns
def _get_key(self, tag):
tags = tag.split("}", 1)
if len(tags) == 2:
ns = tags[0][1:]
bare_tag = tags[1]
ext_ns = self.metadata.get(constants.EXT_NS, {})
if ns == self.xmlns:
return bare_tag
for prefix, _ns in ext_ns.items():
if ns == _ns:
return prefix + ":" + bare_tag
else:
return tag
def _get_links(self, root_tag, node):
link_nodes = node.findall(constants.ATOM_LINK_NOTATION)
root_tag = self._get_key(node.tag)
link_key = "%s_links" % root_tag
link_list = []
for link in link_nodes:
link_list.append({'rel': link.get('rel'),
'href': link.get('href')})
# Remove link node in order to avoid link node being
# processed as an item in _from_xml_node
node.remove(link)
return link_list and {link_key: link_list} or {}
def _from_xml(self, datastring):
if datastring is None:
return None
plurals = set(self.metadata.get('plurals', {}))
try:
node = etree.fromstring(datastring)
root_tag = self._get_key(node.tag)
links = self._get_links(root_tag, node)
result = self._from_xml_node(node, plurals)
# There is no case where root_tag = constants.VIRTUAL_ROOT_KEY
# and links is not None because of the way data are serialized
if root_tag == constants.VIRTUAL_ROOT_KEY:
return result
return dict({root_tag: result}, **links)
except Exception as e:
parseError = False
# Python2.7
if (hasattr(etree, 'ParseError') and
isinstance(e, getattr(etree, 'ParseError'))):
parseError = True
# Python2.6
elif isinstance(e, expat.ExpatError):
parseError = True
if parseError:
msg = _("Cannot understand XML")
raise exception.MalformedResponseBody(reason=msg)
else:
raise
def _from_xml_node(self, node, listnames):
"""Convert a minidom node to a simple Python type.
:param node: minidom node name
:param listnames: list of XML node names whose subnodes should
be considered list items.
"""
attrNil = node.get(str(etree.QName(constants.XSI_NAMESPACE, "nil")))
attrType = node.get(str(etree.QName(
self.metadata.get('xmlns'), "type")))
if (attrNil and attrNil.lower() == 'true'):
return None
elif not len(node) and not node.text:
if (attrType and attrType == constants.TYPE_DICT):
return {}
elif (attrType and attrType == constants.TYPE_LIST):
return []
else:
return ''
elif (len(node) == 0 and node.text):
converters = {constants.TYPE_BOOL:
lambda x: x.lower() == 'true',
constants.TYPE_INT:
lambda x: int(x),
constants.TYPE_LONG:
lambda x: long(x),
constants.TYPE_FLOAT:
lambda x: float(x)}
if attrType and attrType in converters:
return converters[attrType](node.text)
else:
return node.text
elif self._get_key(node.tag) in listnames:
return [self._from_xml_node(n, listnames) for n in node]
else:
result = dict()
for attr in node.keys():
if (attr == 'xmlns' or
attr.startswith('xmlns:') or
attr == constants.XSI_ATTR or
attr == constants.TYPE_ATTR):
continue
result[self._get_key(attr)] = node.get(attr)
children = list(node)
for child in children:
result[self._get_key(child.tag)] = self._from_xml_node(
child, listnames)
return result
def default(self, datastring):
return {'body': self._from_xml(datastring)}
def __call__(self, datastring):
# Adding a migration path to allow us to remove unncessary classes
return self.default(datastring)
# NOTE(maru): this class is duplicated from neutron.wsgi
class Serializer(object):
"""Serializes and deserializes dictionaries to certain MIME types."""
def __init__(self, metadata=None, default_xmlns=None):
"""Create a serializer based on the given WSGI environment.
'metadata' is an optional dict mapping MIME types to information
needed to serialize a dictionary to that type.
"""
self.metadata = metadata or {}
self.default_xmlns = default_xmlns
def _get_serialize_handler(self, content_type):
handlers = {
'application/json': JSONDictSerializer(),
'application/xml': XMLDictSerializer(self.metadata),
}
try:
return handlers[content_type]
except Exception:
raise exception.InvalidContentType(content_type=content_type)
def serialize(self, data, content_type):
"""Serialize a dictionary into the specified content type."""
return self._get_serialize_handler(content_type).serialize(data)
def deserialize(self, datastring, content_type):
"""Deserialize a string to a dictionary.
The string must be in the format of a supported MIME type.
"""
return self.get_deserialize_handler(content_type).deserialize(
datastring)
def get_deserialize_handler(self, content_type):
handlers = {
'application/json': JSONDeserializer(),
'application/xml': XMLDeserializer(self.metadata),
}
try:
return handlers[content_type]
except Exception:
raise exception.InvalidContentType(content_type=content_type)
| |
#!/usr/bin/env python2
import logging
logging.basicConfig(level=logging.WARNING)
from functools import partial
import wx
from numpy import concatenate
from spacq import VERSION
from spacq.gui.display.plot.static.delegator import formats, available_formats
from spacq.gui.display.table.filter import FilterListDialog
from spacq.gui.display.table.generic import TabularDisplayFrame
from spacq.gui.display.plot.plotmath.derivative import DerivativeMathSetupDialog
from spacq.gui.tool.box import load_csv, MessageDialog
class DataExplorerApp(wx.App):
default_title = 'Data Explorer'
def OnInit(self):
self.filters = {}
self.filter_columns = {}
self.filter_dialog = None
# Frames.
self.csv_frame = TabularDisplayFrame(None, title=self.default_title)
# Menu.
menuBar = wx.MenuBar()
## File.
menu = wx.Menu()
menuBar.Append(menu, '&File')
item = menu.Append(wx.ID_OPEN, '&Open...')
self.Bind(wx.EVT_MENU, self.OnMenuFileOpen, item)
item = menu.Append(wx.ID_CLOSE, '&Close')
self.Bind(wx.EVT_MENU, self.OnMenuFileClose, item)
menu.AppendSeparator()
self.filter_menu_item = menu.Append(wx.ID_ANY, '&Filters...')
self.filter_menu_item.Enable(False)
self.Bind(wx.EVT_MENU, self.OnMenuFileFilters, self.filter_menu_item)
menu.AppendSeparator()
item = menu.Append(wx.ID_EXIT, 'E&xit')
self.Bind(wx.EVT_MENU, self.OnMenuFileExit, item)
## Plot.
menu = wx.Menu()
menuBar.Append(menu, '&Plot')
menu.Append(wx.ID_ANY, ' 2D:').Enable(False)
self.two_dimensional_menu = menu.Append(wx.ID_ANY, '&Curve...')
self.Bind(wx.EVT_MENU, partial(self.create_plot, formats.two_dimensional),
self.two_dimensional_menu)
menu.AppendSeparator()
menu.Append(wx.ID_ANY, ' 3D:').Enable(False)
self.colormapped_menu = menu.Append(wx.ID_ANY, '&Colormapped...')
self.Bind(wx.EVT_MENU, partial(self.create_plot, formats.colormapped),
self.colormapped_menu)
self.surface_menu = menu.Append(wx.ID_ANY, '&Surface...')
self.Bind(wx.EVT_MENU, partial(self.create_plot, formats.surface),
self.surface_menu)
menu.AppendSeparator()
menu.Append(wx.ID_ANY, ' List:').Enable(False)
self.waveforms_menu = menu.Append(wx.ID_ANY, '&Waveforms...')
self.Bind(wx.EVT_MENU, partial(self.create_plot, formats.waveforms, type='list'),
self.waveforms_menu)
## Math.
menu = wx.Menu()
menuBar.Append(menu, '&Math')
item = menu.Append(wx.ID_ANY, '&Derivative...')
self.Bind(wx.EVT_MENU, self.OnMenuMathDerivative, item)
## Help.
menu = wx.Menu()
menuBar.Append(menu, '&Help')
### About.
item = menu.Append(wx.ID_ABOUT, '&About...')
self.Bind(wx.EVT_MENU, self.OnMenuHelpAbout, item)
self.csv_frame.SetMenuBar(menuBar)
self.update_plot_menus(False)
# Display.
self.csv_frame.Show()
self.csv_frame.SetSize((800, 600))
self.SetTopWindow(self.csv_frame)
self.csv_frame.Raise()
return True
def update_plot_menus(self, status):
"""
If status is True, enable the plot menus corresponding to the available formats. Otherwise, disable all.
"""
pairs = [
(formats.two_dimensional, self.two_dimensional_menu),
(formats.colormapped, self.colormapped_menu),
(formats.surface, self.surface_menu),
(formats.waveforms, self.waveforms_menu),
]
for format, menu in pairs:
if not status or format in available_formats:
menu.Enable(status)
def create_plot(self, format, evt=None, type='scalar'):
"""
Open up a dialog to configure the selected plot format.
"""
headings, rows, types = self.csv_frame.display_panel.GetValue(types=[type])
available_formats[format](self.csv_frame, headings, rows).Show()
def OnMenuFileOpen(self, evt=None):
try:
result = load_csv(self.csv_frame)
except IOError as e:
MessageDialog(self.csv_frame, str(e), 'Could not load data').Show()
return
if result is None:
return
else:
self.OnMenuFileClose()
has_header, values, filename = result
self.csv_frame.display_panel.from_csv_data(has_header, values)
self.csv_frame.Title = '{0} - {1}'.format(filename, self.default_title)
self.update_plot_menus(len(self.csv_frame.display_panel) > 0)
self.filter_menu_item.Enable(True)
def OnMenuFileClose(self, evt=None):
self.csv_frame.display_panel.SetValue([], [])
self.csv_frame.Title = self.default_title
self.update_plot_menus(False)
self.filter_menu_item.Enable(False)
if self.filter_dialog is not None:
self.filter_dialog.Close()
self.filters = {}
self.filter_columns = {}
def OnMenuFileFilters(self, evt=None):
def close_callback(dlg):
self.filters = dlg.filters
self.filter_columns = dlg.filter_columns
self.filter_dialog = None
if self.filter_dialog is None:
self.filter_dialog = FilterListDialog(self.csv_frame, self.csv_frame.display_panel.table,
close_callback, self.filters, self.filter_columns)
self.filter_dialog.Show()
self.filter_dialog.Raise()
def OnMenuFileExit(self, evt=None):
if self.csv_frame:
self.csv_frame.Close()
def OnMenuMathDerivative(self, format, evt=None, type='scalar'):
"""
Open up a dialog to calculate derivative
"""
headings, rows, types = self.csv_frame.display_panel.GetValue(types=[type])
dmath = DerivativeMathSetupDialog(self.csv_frame, headings, rows)
dmath_open = dmath.ShowModal()
new_headings = concatenate([headings,[dmath.dheading]],1)
new_rows = concatenate([rows.astype(float),dmath.ddata],1)
self.csv_frame.display_panel.SetValue(new_headings,new_rows)
def OnMenuHelpAbout(self, evt=None):
info = wx.AboutDialogInfo()
info.SetName('Data Explorer')
info.SetDescription('An application for displaying data in tabular and graphical form.\n'
'\n'
'Using Spanish Acquisition version {0}.'.format(VERSION)
)
wx.AboutBox(info)
if __name__ == "__main__":
app = DataExplorerApp()
app.MainLoop()
| |
#!/usr/bin/env python
#
# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
#
#
# analytics_uvetest.py
#
# UVE and Alarm tests
#
import os
import sys
import threading
threading._DummyThread._Thread__stop = lambda x: 42
import signal
import gevent
from gevent import monkey
monkey.patch_all()
import unittest
import testtools
import fixtures
import socket
from utils.util import obj_to_dict, find_buildroot
from utils.analytics_fixture import AnalyticsFixture
from utils.generator_fixture import GeneratorFixture
from mockredis import mockredis
from mockzoo import mockzoo
import logging
import time
from opserver.sandesh.viz.constants import *
from opserver.sandesh.viz.constants import _OBJECT_TABLES
from sandesh_common.vns.ttypes import Module
from sandesh_common.vns.constants import ModuleNames
import platform
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
builddir = find_buildroot(os.getcwd())
class AnalyticsUveTest(testtools.TestCase, fixtures.TestWithFixtures):
@classmethod
def setUpClass(cls):
if (os.getenv('LD_LIBRARY_PATH', '').find('build/lib') < 0):
if (os.getenv('DYLD_LIBRARY_PATH', '').find('build/lib') < 0):
assert(False)
cls.redis_port = AnalyticsUveTest.get_free_port()
mockredis.start_redis(cls.redis_port)
@classmethod
def tearDownClass(cls):
mockredis.stop_redis(cls.redis_port)
#@unittest.skip('Skipping non-cassandra test with vizd')
def test_00_nocassandra(self):
'''
This test starts redis,vizd,opserver and qed
Then it checks that the collector UVE (via redis)
can be accessed from opserver.
'''
logging.info("%%% test_00_nocassandra %%%")
vizd_obj = self.useFixture(
AnalyticsFixture(logging, builddir, self.__class__.redis_port, 0))
assert vizd_obj.verify_on_setup()
return True
# end test_00_nocassandra
#@unittest.skip('Skipping VM UVE test')
def test_01_vm_uve(self):
'''
This test starts redis, vizd, opserver, qed, and a python generator
that simulates vrouter and sends UveVirtualMachineAgentTrace messages.
Then it checks that the VM UVE (via redis) can be accessed from
opserver.
'''
logging.info("%%% test_01_vm_uve %%%")
vizd_obj = self.useFixture(
AnalyticsFixture(logging, builddir, self.__class__.redis_port, 0))
assert vizd_obj.verify_on_setup()
collectors = [vizd_obj.get_collector()]
generator_obj = self.useFixture(
GeneratorFixture("contrail-vrouter-agent", collectors,
logging, vizd_obj.get_opserver_port()))
assert generator_obj.verify_on_setup()
generator_obj.send_vm_uve(vm_id='abcd',
num_vm_ifs=5,
msg_count=5)
assert generator_obj.verify_vm_uve(vm_id='abcd',
num_vm_ifs=5,
msg_count=5)
# Delete the VM UVE and verify that the deleted flag is set
# in the UVE cache
generator_obj.delete_vm_uve('abcd')
assert generator_obj.verify_vm_uve_cache(vm_id='abcd', delete=True)
# Add the VM UVE with the same vm_id and verify that the deleted flag
# is cleared in the UVE cache
generator_obj.send_vm_uve(vm_id='abcd',
num_vm_ifs=5,
msg_count=5)
assert generator_obj.verify_vm_uve_cache(vm_id='abcd')
assert generator_obj.verify_vm_uve(vm_id='abcd',
num_vm_ifs=5,
msg_count=5)
# Generate VM with vm_id containing XML control character
generator_obj.send_vm_uve(vm_id='<abcd&>', num_vm_ifs=2, msg_count=2)
assert generator_obj.verify_vm_uve(vm_id='<abcd&>', num_vm_ifs=2,
msg_count=2)
return True
# end test_01_vm_uve
#@unittest.skip('Skipping VM UVE test')
def test_02_vm_uve_with_password(self):
'''
This test starts redis, vizd, opserver, qed, and a python generator
that simulates vrouter and sends UveVirtualMachineAgentTrace messages.
Then it checks that the VM UVE (via redis) can be accessed from
opserver.
'''
logging.info("%%% test_02_vm_uve_with_password %%%")
vizd_obj = self.useFixture(
AnalyticsFixture(logging, builddir, -1, 0,
redis_password='contrail'))
assert vizd_obj.verify_on_setup()
collectors = [vizd_obj.get_collector()]
generator_obj = self.useFixture(
GeneratorFixture("contrail-vrouter-agent", collectors,
logging, vizd_obj.get_opserver_port()))
assert generator_obj.verify_on_setup()
generator_obj.send_vm_uve(vm_id='abcd',
num_vm_ifs=5,
msg_count=5)
assert generator_obj.verify_vm_uve(vm_id='abcd',
num_vm_ifs=5,
msg_count=5)
return True
# end test_02_vm_uve_with_password
#@unittest.skip('verify redis-uve restart')
def test_03_redis_uve_restart(self):
logging.info('%%% test_03_redis_uve_restart %%%')
vizd_obj = self.useFixture(
AnalyticsFixture(logging, builddir, -1, 0,
start_kafka = True))
assert vizd_obj.verify_on_setup()
collectors = [vizd_obj.get_collector()]
alarm_gen1 = self.useFixture(
GeneratorFixture('vrouter-agent', collectors, logging,
None, hostname=socket.gethostname()))
alarm_gen1.verify_on_setup()
# send vrouter UVE without build_info !!!
# check for PartialSysinfo alarm
alarm_gen1.send_vrouterinfo("myvrouter1")
assert(vizd_obj.verify_uvetable_alarm("ObjectVRouter",
"ObjectVRouter:myvrouter1", "partial-sysinfo-compute"))
self.verify_uve_resync(vizd_obj)
# Alarm should return after redis restart
assert(vizd_obj.verify_uvetable_alarm("ObjectVRouter",
"ObjectVRouter:myvrouter1", "partial-sysinfo-compute"))
# should there be a return True here?
# end test_03_redis_uve_restart
#@unittest.skip('verify redis-uve restart')
def test_04_redis_uve_restart_with_password(self):
logging.info('%%% test_03_redis_uve_restart_with_password %%%')
vizd_obj = self.useFixture(
AnalyticsFixture(logging,
builddir, -1, 0,
redis_password='contrail'))
self.verify_uve_resync(vizd_obj)
return True
# end test_04_redis_uve_restart
def verify_uve_resync(self, vizd_obj):
assert vizd_obj.verify_on_setup()
assert vizd_obj.verify_collector_redis_uve_connection(
vizd_obj.collectors[0])
assert vizd_obj.verify_opserver_redis_uve_connection(
vizd_obj.opserver)
# verify redis-uve list
host = socket.gethostname()
gen_list = [host+':Analytics:contrail-collector:0',
host+':Analytics:contrail-query-engine:0',
host+':Analytics:contrail-analytics-api:0']
assert vizd_obj.verify_generator_uve_list(gen_list)
# stop redis-uve
vizd_obj.redis_uves[0].stop()
assert vizd_obj.verify_collector_redis_uve_connection(
vizd_obj.collectors[0], False)
assert vizd_obj.verify_opserver_redis_uve_connection(
vizd_obj.opserver, False)
# start redis-uve and verify that contrail-collector and Opserver are
# connected to the redis-uve
vizd_obj.redis_uves[0].start()
assert vizd_obj.verify_collector_redis_uve_connection(
vizd_obj.collectors[0])
assert vizd_obj.verify_opserver_redis_uve_connection(
vizd_obj.opserver)
# verify that UVEs are resynced with redis-uve
assert vizd_obj.verify_generator_uve_list(gen_list)
#@unittest.skip('Skipping contrail-collector HA test')
def test_05_collector_ha(self):
logging.info('%%% test_05_collector_ha %%%')
vizd_obj = self.useFixture(
AnalyticsFixture(logging, builddir, -1, 0,
collector_ha_test=True))
assert vizd_obj.verify_on_setup()
# OpServer, AlarmGen and QE are started with collectors[0] as
# primary and collectors[1] as secondary
exp_genlist = ['contrail-collector', 'contrail-analytics-api',
'contrail-query-engine']
assert vizd_obj.verify_generator_list(vizd_obj.collectors[0],
exp_genlist)
# start the contrail-vrouter-agent with collectors[1] as primary and
# collectors[0] as secondary
collectors = [vizd_obj.collectors[1].get_addr(),
vizd_obj.collectors[0].get_addr()]
vr_agent = self.useFixture(
GeneratorFixture("contrail-vrouter-agent", collectors,
logging, vizd_obj.get_opserver_port()))
assert vr_agent.verify_on_setup()
exp_genlist = ['contrail-collector', 'contrail-vrouter-agent']
assert vizd_obj.verify_generator_list(vizd_obj.collectors[1],
exp_genlist)
# stop collectors[0] and verify that OpServer, AlarmGen and QE switch
# from primary to secondary collector
vizd_obj.collectors[0].stop()
exp_genlist = ['contrail-collector', 'contrail-vrouter-agent',
'contrail-analytics-api',
'contrail-query-engine']
assert vizd_obj.verify_generator_list(vizd_obj.collectors[1],
exp_genlist)
# start collectors[0]
vizd_obj.collectors[0].start()
exp_genlist = ['contrail-collector']
assert vizd_obj.verify_generator_list(vizd_obj.collectors[0],
exp_genlist)
# verify that the old UVEs are flushed from redis when collector restarts
exp_genlist = [vizd_obj.collectors[0].get_generator_id()]
assert vizd_obj.verify_generator_list_in_redis(\
vizd_obj.collectors[0].get_redis_uve(),
exp_genlist)
# stop collectors[1] and verify that OpServer, AlarmGen and QE switch
# from secondary to primary and contrail-vrouter-agent from primary to
# secondary
vizd_obj.collectors[1].stop()
exp_genlist = ['contrail-collector', 'contrail-vrouter-agent',
'contrail-analytics-api',
'contrail-query-engine']
assert vizd_obj.verify_generator_list(vizd_obj.collectors[0],
exp_genlist)
# verify the generator list in redis
exp_genlist = [vizd_obj.collectors[0].get_generator_id(),
vr_agent.get_generator_id(),
vizd_obj.opserver.get_generator_id(),
vizd_obj.query_engine.get_generator_id()]
assert vizd_obj.verify_generator_list_in_redis(\
vizd_obj.collectors[0].get_redis_uve(),
exp_genlist)
# stop QE
vizd_obj.query_engine.stop()
exp_genlist = ['contrail-collector', 'contrail-vrouter-agent',
'contrail-analytics-api']
assert vizd_obj.verify_generator_list(vizd_obj.collectors[0],
exp_genlist)
# verify the generator list in redis
exp_genlist = [vizd_obj.collectors[0].get_generator_id(),
vizd_obj.opserver.get_generator_id(),
vr_agent.get_generator_id()]
assert vizd_obj.verify_generator_list_in_redis(\
vizd_obj.collectors[0].get_redis_uve(),
exp_genlist)
# start a python generator and QE with collectors[1] as the primary and
# collectors[0] as the secondary. On generator startup, verify
# that they connect to the secondary collector, if the
# connection to the primary fails
vr2_collectors = [vizd_obj.collectors[1].get_addr(),
vizd_obj.collectors[0].get_addr()]
vr2_agent = self.useFixture(
GeneratorFixture("contrail-snmp-collector", collectors,
logging, vizd_obj.get_opserver_port()))
assert vr2_agent.verify_on_setup()
vizd_obj.query_engine.set_primary_collector(
vizd_obj.collectors[1].get_addr())
vizd_obj.query_engine.set_secondary_collector(
vizd_obj.collectors[0].get_addr())
vizd_obj.query_engine.start()
exp_genlist = ['contrail-collector', 'contrail-vrouter-agent',
'contrail-analytics-api', 'contrail-snmp-collector',
'contrail-query-engine']
assert vizd_obj.verify_generator_list(vizd_obj.collectors[0],
exp_genlist)
# stop the collectors[0] - both collectors[0] and collectors[1] are down
# send the VM UVE and verify that the VM UVE is synced after connection
# to the collector
vizd_obj.collectors[0].stop()
# Make sure the connection to the collector is teared down before
# sending the VM UVE
while True:
if vr_agent.verify_on_setup() is False:
break
vr_agent.send_vm_uve(vm_id='abcd-1234-efgh-5678',
num_vm_ifs=5, msg_count=5)
vizd_obj.collectors[1].start()
exp_genlist = ['contrail-collector', 'contrail-vrouter-agent',
'contrail-analytics-api', 'contrail-snmp-collector',
'contrail-query-engine']
assert vizd_obj.verify_generator_list(vizd_obj.collectors[1],
exp_genlist)
assert vr_agent.verify_vm_uve(vm_id='abcd-1234-efgh-5678',
num_vm_ifs=5, msg_count=5)
# end test_05_collector_ha
#@unittest.skip('Skipping AlarmGen basic test')
def test_06_alarmgen_basic(self):
'''
This test starts the analytics processes.
It enables partition 0 on alarmgen, and confirms
that it got enabled
'''
logging.info("%%% test_06_alarmgen_basic %%%")
if AnalyticsUveTest._check_skip_kafka() is True:
return True
vizd_obj = self.useFixture(
AnalyticsFixture(logging, builddir, self.__class__.redis_port, 0,
start_kafka = True))
assert vizd_obj.verify_on_setup()
assert(vizd_obj.verify_uvetable_alarm("ObjectCollectorInfo",
"ObjectCollectorInfo:" + socket.gethostname(), "process-status"))
# setup generator for sending Vrouter build_info
collector = vizd_obj.collectors[0].get_addr()
alarm_gen1 = self.useFixture(
GeneratorFixture('vrouter-agent', [collector], logging,
None, hostname=socket.gethostname()))
alarm_gen1.verify_on_setup()
# send vrouter UVE without build_info !!!
# check for PartialSysinfo alarm
alarm_gen1.send_vrouterinfo("myvrouter1")
assert(vizd_obj.verify_uvetable_alarm("ObjectVRouter",
"ObjectVRouter:myvrouter1", "partial-sysinfo-compute",
rules=[{"and_list": [{
"condition": {
"operation": "==",
"operand1": "ObjectVRouter.build_info",
"operand2": {
"json_value": "null"
}
},
"match": [{"json_operand1_value": "null"}]
}]}]
))
# Now try to clear the alarm by sending build_info
alarm_gen1.send_vrouterinfo("myvrouter1", b_info = True)
assert(vizd_obj.verify_uvetable_alarm("ObjectVRouter",
"ObjectVRouter:myvrouter1", "partial-sysinfo-compute", is_set = False))
# send vrouter UVE without build_info !!!
# check for PartialSysinfo alarm
alarm_gen1.send_vrouterinfo("myvrouter1", deleted = True)
alarm_gen1.send_vrouterinfo("myvrouter1")
assert(vizd_obj.verify_uvetable_alarm("ObjectVRouter",
"ObjectVRouter:myvrouter1", "partial-sysinfo-compute"))
# Now try to clear the alarm by deleting the UVE
alarm_gen1.send_vrouterinfo("myvrouter1", deleted = True)
assert(vizd_obj.verify_uvetable_alarm("ObjectVRouter",
"ObjectVRouter:myvrouter1", "partial-sysinfo-compute", is_set = False))
alarm_gen2 = self.useFixture(
GeneratorFixture('vrouter-agent', [collector], logging,
None, hostname=socket.gethostname(), inst = "1"))
alarm_gen2.verify_on_setup()
# send vrouter UVE without build_info !!!
# check for PartialSysinfo alarm
alarm_gen2.send_vrouterinfo("myvrouter2")
assert(vizd_obj.verify_uvetable_alarm("ObjectVRouter",
"ObjectVRouter:myvrouter2", "partial-sysinfo-compute"))
# Now try to clear the alarm by disconnecting the generator
alarm_gen2._sandesh_instance._client._connection.set_admin_state(\
down=True)
assert(vizd_obj.verify_uvetable_alarm("ObjectVRouter",
"ObjectVRouter:myvrouter2", "partial-sysinfo-compute", is_set = False))
# send vrouter UVE of myvrouter without build_info again !!!
# check for PartialSysinfo alarm
alarm_gen1.send_vrouterinfo("myvrouter1")
assert(vizd_obj.verify_uvetable_alarm("ObjectVRouter",
"ObjectVRouter:myvrouter1", "partial-sysinfo-compute"))
# Verify that we can give up partition ownership
assert(vizd_obj.set_alarmgen_partition(0,0) == 'true')
assert(vizd_obj.verify_alarmgen_partition(0,'false'))
# Give up the other partitions
assert(vizd_obj.set_alarmgen_partition(1,0) == 'true')
assert(vizd_obj.set_alarmgen_partition(2,0) == 'true')
assert(vizd_obj.set_alarmgen_partition(3,0) == 'true')
# Confirm that alarms are all gone
assert(vizd_obj.verify_uvetable_alarm("ObjectVRouter",
None, None))
# Get the partitions again
assert(vizd_obj.set_alarmgen_partition(0,1) == 'true')
assert(vizd_obj.set_alarmgen_partition(1,1) == 'true')
assert(vizd_obj.set_alarmgen_partition(2,1) == 'true')
assert(vizd_obj.set_alarmgen_partition(3,1) == 'true')
assert(vizd_obj.verify_alarmgen_partition(0,'true'))
# The PartialSysinfo alarm om myvrouter should return
assert(vizd_obj.verify_uvetable_alarm("ObjectVRouter",
"ObjectVRouter:myvrouter1", "partial-sysinfo-compute"))
return True
# end test_06_alarmgen_basic
#@unittest.skip('Skipping Alarm test')
def test_07_alarm(self):
'''
This test starts redis, collectors, analytics-api and
python generators that simulates alarm generator. This
test sends alarms from alarm generators and verifies the
retrieval of alarms from analytics-api.
'''
logging.info('%%% test_07_alarm %%%')
if AnalyticsUveTest._check_skip_kafka() is True:
return True
# collector_ha_test flag is set to True, because we wanna test
# retrieval of alarms across multiple redis servers.
vizd_obj = self.useFixture(
AnalyticsFixture(logging, builddir, -1, 0,
collector_ha_test=True,
start_kafka = True))
assert vizd_obj.verify_on_setup()
# create alarm-generator and attach it to the first collector.
collectors = [vizd_obj.collectors[0].get_addr(),
vizd_obj.collectors[1].get_addr()]
alarm_gen1 = self.useFixture(
GeneratorFixture('contrail-alarm-gen', [collectors[0]], logging,
None, hostname=socket.gethostname()+'_1'))
alarm_gen1.verify_on_setup()
# send process state alarm for analytics-node
alarms = alarm_gen1.create_process_state_alarm(
'contrail-query-engine')
alarm_gen1.send_alarm(socket.gethostname()+'_1', alarms,
COLLECTOR_INFO_TABLE)
analytics_tbl = _OBJECT_TABLES[COLLECTOR_INFO_TABLE].log_query_name
# send proces state alarm for control-node
alarms = alarm_gen1.create_process_state_alarm('contrail-dns')
alarm_gen1.send_alarm('<&'+socket.gethostname()+'_1>', alarms,
BGP_ROUTER_TABLE)
control_tbl = _OBJECT_TABLES[BGP_ROUTER_TABLE].log_query_name
# create another alarm-generator and attach it to the second collector.
alarm_gen2 = self.useFixture(
GeneratorFixture('contrail-alarm-gen', [collectors[1]], logging,
None, hostname=socket.gethostname()+'_2'))
alarm_gen2.verify_on_setup()
# send process state alarm for analytics-node
alarms = alarm_gen2.create_process_state_alarm(
'contrail-topology')
alarm_gen2.send_alarm(socket.gethostname()+'_2', alarms,
COLLECTOR_INFO_TABLE)
keys = [socket.gethostname()+'_1', socket.gethostname()+'_2']
assert(vizd_obj.verify_alarm_list_include(analytics_tbl,
expected_alarms=keys))
assert(vizd_obj.verify_alarm(analytics_tbl, keys[0], obj_to_dict(
alarm_gen1.alarms[COLLECTOR_INFO_TABLE][keys[0]].data)))
assert(vizd_obj.verify_alarm(analytics_tbl, keys[1], obj_to_dict(
alarm_gen2.alarms[COLLECTOR_INFO_TABLE][keys[1]].data)))
keys = ['<&'+socket.gethostname()+'_1>']
assert(vizd_obj.verify_alarm_list_include(control_tbl, expected_alarms=keys))
assert(vizd_obj.verify_alarm(control_tbl, keys[0], obj_to_dict(
alarm_gen1.alarms[BGP_ROUTER_TABLE][keys[0]].data)))
# delete analytics-node alarm generated by alarm_gen2
alarm_gen2.delete_alarm(socket.gethostname()+'_2',
COLLECTOR_INFO_TABLE)
# verify analytics-node alarms
keys = [socket.gethostname()+'_1']
assert(vizd_obj.verify_alarm_list_include(analytics_tbl,
expected_alarms=keys))
ukeys = [socket.gethostname()+'_2']
assert(vizd_obj.verify_alarm_list_exclude(analytics_tbl,
unexpected_alms=ukeys))
assert(vizd_obj.verify_alarm(analytics_tbl, keys[0], obj_to_dict(
alarm_gen1.alarms[COLLECTOR_INFO_TABLE][keys[0]].data)))
assert(vizd_obj.verify_alarm(analytics_tbl, ukeys[0], {}))
# Disconnect alarm_gen1 from Collector and verify that all
# alarms generated by alarm_gen1 is removed by the Collector.
alarm_gen1.disconnect_from_collector()
ukeys = [socket.gethostname()+'_1']
assert(vizd_obj.verify_alarm_list_exclude(analytics_tbl,
unexpected_alms=ukeys))
assert(vizd_obj.verify_alarm(analytics_tbl, ukeys[0], {}))
ukeys = ['<&'+socket.gethostname()+'_1']
assert(vizd_obj.verify_alarm_list_exclude(control_tbl,
unexpected_alms=ukeys))
assert(vizd_obj.verify_alarm(control_tbl, ukeys[0], {}))
# update analytics-node alarm in disconnect state
alarms = alarm_gen1.create_process_state_alarm(
'contrail-snmp-collector')
alarm_gen1.send_alarm(socket.gethostname()+'_1', alarms,
COLLECTOR_INFO_TABLE)
# Connect alarm_gen1 to Collector and verify that all
# alarms generated by alarm_gen1 is synced with Collector.
alarm_gen1.connect_to_collector()
keys = [socket.gethostname()+'_1']
assert(vizd_obj.verify_alarm_list_include(analytics_tbl,
expected_alarms=keys))
assert(vizd_obj.verify_alarm(analytics_tbl, keys[0], obj_to_dict(
alarm_gen1.alarms[COLLECTOR_INFO_TABLE][keys[0]].data)))
keys = ['<&'+socket.gethostname()+'_1>']
assert(vizd_obj.verify_alarm_list_include(control_tbl,
expected_alarms=keys))
assert(vizd_obj.verify_alarm(control_tbl, keys[0], obj_to_dict(
alarm_gen1.alarms[BGP_ROUTER_TABLE][keys[0]].data)))
# end test_07_alarm
#@unittest.skip('Skipping UVE/Alarm Filter test')
def test_08_uve_alarm_filter(self):
'''
This test verifies the filter options kfilt, sfilt, mfilt and cfilt
in the UVE/Alarm GET and POST methods.
'''
logging.info('%%% test_08_uve_alarm_filter %%%')
if AnalyticsUveTest._check_skip_kafka() is True:
return True
vizd_obj = self.useFixture(
AnalyticsFixture(logging, builddir, -1, 0,
collector_ha_test=True, start_kafka = True))
assert vizd_obj.verify_on_setup()
collectors = [vizd_obj.collectors[0].get_addr(),
vizd_obj.collectors[1].get_addr()]
api_server_name = socket.gethostname()+'_1'
api_server = self.useFixture(
GeneratorFixture('contrail-api', [collectors[0]], logging,
None, node_type='Config',
hostname=api_server_name))
vr_agent_name = socket.gethostname()+'_2'
vr_agent = self.useFixture(
GeneratorFixture('contrail-vrouter-agent', [collectors[1]],
logging, None, node_type='Compute',
hostname=vr_agent_name))
alarm_gen1_name = socket.gethostname()+'_1'
alarm_gen1 = self.useFixture(
GeneratorFixture('contrail-alarm-gen', [collectors[0]], logging,
None, node_type='Analytics',
hostname=alarm_gen1_name))
alarm_gen2_name = socket.gethostname()+'_3'
alarm_gen2 = self.useFixture(
GeneratorFixture('contrail-alarm-gen', [collectors[1]], logging,
None, node_type='Analytics',
hostname=alarm_gen2_name))
api_server.verify_on_setup()
vr_agent.verify_on_setup()
alarm_gen1.verify_on_setup()
alarm_gen2.verify_on_setup()
vn_list = ['default-domain:project1:vn1',
'default-domain:project1:vn2',
'default-domain:project2:vn1',
'default-domain:project2:vn1&']
# generate UVEs for the filter test
api_server.send_vn_config_uve(name=vn_list[0],
partial_conn_nw=[vn_list[1]],
num_acl_rules=2)
api_server.send_vn_config_uve(name=vn_list[1],
num_acl_rules=3)
vr_agent.send_vn_agent_uve(name=vn_list[1], num_acl_rules=3,
ipkts=2, ibytes=1024)
vr_agent.send_vn_agent_uve(name=vn_list[2], ipkts=4, ibytes=128)
vr_agent.send_vn_agent_uve(name=vn_list[3], ipkts=8, ibytes=256)
# generate Alarms for the filter test
alarms = alarm_gen1.create_alarm('InPktsThreshold')
alarms += alarm_gen1.create_alarm('InBytesThreshold', ack=True)
alarm_gen1.send_alarm(vn_list[1], alarms, VN_TABLE)
alarms = alarm_gen2.create_alarm('ConfigNotPresent', ack=False)
alarm_gen2.send_alarm(vn_list[2], alarms, VN_TABLE)
alarms = alarm_gen2.create_alarm('ConfigNotPresent', ack=False)
alarm_gen2.send_alarm(vn_list[3], alarms, VN_TABLE)
filt_test = [
# no filter
{
'uve_list_get': [
'default-domain:project1:vn1',
'default-domain:project1:vn2',
'default-domain:project2:vn1',
'default-domain:project2:vn1&'
],
'get_alarms': {
'virtual-network': [
{ 'name' : 'default-domain:project1:vn2',
'value' : { 'UVEAlarms': {
'alarms': [
{
'type': 'InPktsThreshold',
},
{
'type': 'InBytesThreshold',
'ack': True
}
]
} }
},
{ 'name' : 'default-domain:project2:vn1',
'value' : { 'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'ack': False
}
]
} }
},
{ 'name' : 'default-domain:project2:vn1&',
'value' : { 'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'ack': False
}
]
} }
},
]
},
'uve_get_post': {
'value': [
{
'name': 'default-domain:project1:vn1',
'value': {
'UveVirtualNetworkConfig': {
'partially_connected_networks': [
'default-domain:project1:vn2'
],
'total_acl_rules': 2
}
}
},
{
'name': 'default-domain:project1:vn2',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 2,
'in_bytes': 1024,
'total_acl_rules': 3
},
'UveVirtualNetworkConfig': {
'total_acl_rules': 3
},
'UVEAlarms': {
'alarms': [
{
'type': 'InPktsThreshold',
},
{
'type': 'InBytesThreshold',
'ack': True
}
]
}
}
},
{
'name': 'default-domain:project2:vn1',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 4,
'in_bytes': 128
},
'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'ack': False
}
]
}
}
},
{
'name': 'default-domain:project2:vn1&',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 8,
'in_bytes': 256
},
'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'ack': False
}
]
}
}
}
]
},
},
# kfilt
{
'kfilt': ['*'],
'uve_list_get': [
'default-domain:project1:vn1',
'default-domain:project1:vn2',
'default-domain:project2:vn1',
'default-domain:project2:vn1&'
],
'uve_get_post': {
'value': [
{
'name': 'default-domain:project1:vn1',
'value': {
'UveVirtualNetworkConfig': {
'partially_connected_networks': [
'default-domain:project1:vn2'
],
'total_acl_rules': 2
}
}
},
{
'name': 'default-domain:project1:vn2',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 2,
'in_bytes': 1024,
'total_acl_rules': 3
},
'UveVirtualNetworkConfig': {
'total_acl_rules': 3
},
'UVEAlarms': {
'alarms': [
{
'type': 'InPktsThreshold',
},
{
'type': 'InBytesThreshold',
'ack': True
}
]
}
}
},
{
'name': 'default-domain:project2:vn1',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 4,
'in_bytes': 128
},
'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'ack': False
}
]
}
}
},
{
'name': 'default-domain:project2:vn1&',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 8,
'in_bytes': 256
},
'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'ack': False
}
]
}
}
}
]
},
},
{
'kfilt': ['default-domain:project1:*',
'default-domain:project2:*'],
'uve_list_get': [
'default-domain:project1:vn1',
'default-domain:project1:vn2',
'default-domain:project2:vn1',
'default-domain:project2:vn1&'
],
'uve_get_post': {
'value': [
{
'name': 'default-domain:project1:vn1',
'value': {
'UveVirtualNetworkConfig': {
'partially_connected_networks': [
'default-domain:project1:vn2'
],
'total_acl_rules': 2
}
}
},
{
'name': 'default-domain:project1:vn2',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 2,
'in_bytes': 1024,
'total_acl_rules': 3
},
'UveVirtualNetworkConfig': {
'total_acl_rules': 3
},
'UVEAlarms': {
'alarms': [
{
'type': 'InPktsThreshold',
},
{
'type': 'InBytesThreshold',
'ack': True
}
]
}
}
},
{
'name': 'default-domain:project2:vn1',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 4,
'in_bytes': 128
},
'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'ack': False
}
]
}
}
},
{
'name': 'default-domain:project2:vn1&',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 8,
'in_bytes': 256
},
'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'ack': False
}
]
}
}
}
]
},
},
{
'kfilt': ['default-domain:project1:vn1',
'default-domain:project2:*'],
'uve_list_get': [
'default-domain:project1:vn1',
'default-domain:project2:vn1',
'default-domain:project2:vn1&'
],
'uve_get_post': {
'value': [
{
'name': 'default-domain:project1:vn1',
'value': {
'UveVirtualNetworkConfig': {
'partially_connected_networks': [
'default-domain:project1:vn2'
],
'total_acl_rules': 2
}
}
},
{
'name': 'default-domain:project2:vn1',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 4,
'in_bytes': 128
},
'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'ack': False
}
]
}
}
},
{
'name': 'default-domain:project2:vn1&',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 8,
'in_bytes': 256
},
'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'ack': False
}
]
}
}
}
]
},
},
{
'kfilt': [
'default-domain:project2:*',
'invalid-vn:*'
],
'get_alarms': {
'virtual-network': [
{ 'name' : 'default-domain:project2:vn1',
'value' : { 'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'ack': False
}
]
} }
},
{ 'name' : 'default-domain:project2:vn1&',
'value' : { 'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'ack': False
}
]
} }
},
]
},
'uve_list_get': [
'default-domain:project2:vn1',
'default-domain:project2:vn1&'
],
'uve_get_post': {
'value': [
{
'name': 'default-domain:project2:vn1',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 4,
'in_bytes': 128
},
'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'ack': False
}
]
}
}
},
{
'name': 'default-domain:project2:vn1&',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 8,
'in_bytes': 256
},
'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'ack': False
}
]
}
}
}
]
},
},
{
'kfilt': [
'default-domain:project1:vn2',
'default-domain:project2:vn1&',
'invalid-vn'
],
'uve_list_get': [
'default-domain:project1:vn2',
'default-domain:project2:vn1&'
],
'uve_get_post': {
'value': [
{
'name': 'default-domain:project1:vn2',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 2,
'in_bytes': 1024,
'total_acl_rules': 3
},
'UveVirtualNetworkConfig': {
'total_acl_rules': 3
},
'UVEAlarms': {
'alarms': [
{
'type': 'InPktsThreshold',
},
{
'type': 'InBytesThreshold',
'ack': True
}
]
}
}
},
{
'name': 'default-domain:project2:vn1&',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 8,
'in_bytes': 256
},
'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'ack': False
}
]
}
}
}
]
},
},
{
'kfilt': ['invalid-vn'],
'uve_list_get': [],
'uve_get_post': {'value': []},
},
# sfilt
{
'sfilt': socket.gethostname()+'_1',
'uve_list_get': [
'default-domain:project1:vn1',
'default-domain:project1:vn2'
],
'uve_get_post': {
'value': [
{
'name': 'default-domain:project1:vn1',
'value': {
'UveVirtualNetworkConfig': {
'partially_connected_networks': [
'default-domain:project1:vn2'
],
'total_acl_rules': 2
}
}
},
{
'name': 'default-domain:project1:vn2',
'value': {
'UveVirtualNetworkConfig': {
'total_acl_rules': 3
},
'UVEAlarms': {
'alarms': [
{
'type': 'InPktsThreshold',
},
{
'type': 'InBytesThreshold',
'ack': True
}
]
}
}
}
]
},
},
{
'sfilt': socket.gethostname()+'_3',
'uve_list_get': [
'default-domain:project2:vn1',
'default-domain:project2:vn1&'
],
'uve_get_post': {
'value': [
{
'name': 'default-domain:project2:vn1',
'value': {
'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'ack': False
}
]
}
}
},
{
'name': 'default-domain:project2:vn1&',
'value': {
'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'ack': False
}
]
}
}
}
]
},
},
{
'sfilt': 'invalid_source',
'uve_list_get': [],
'uve_get_post': {'value': []},
},
# mfilt
{
'mfilt': 'Config:contrail-api:0',
'uve_list_get': [
'default-domain:project1:vn1',
'default-domain:project1:vn2'
],
'uve_get_post': {
'value': [
{
'name': 'default-domain:project1:vn1',
'value': {
'UveVirtualNetworkConfig': {
'partially_connected_networks': [
'default-domain:project1:vn2'
],
'total_acl_rules': 2
}
}
},
{
'name': 'default-domain:project1:vn2',
'value': {
'UveVirtualNetworkConfig': {
'total_acl_rules': 3
}
}
}
]
},
},
{
'mfilt': 'Analytics:contrail-alarm-gen:0',
'uve_list_get': [
'default-domain:project1:vn2',
'default-domain:project2:vn1',
'default-domain:project2:vn1&'
],
'uve_get_post': {
'value': [
{
'name': 'default-domain:project1:vn2',
'value': {
'UVEAlarms': {
'alarms': [
{
'type': 'InPktsThreshold',
},
{
'type': 'InBytesThreshold',
'ack': True
}
]
}
}
},
{
'name': 'default-domain:project2:vn1',
'value': {
'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'ack': False
}
]
}
}
},
{
'name': 'default-domain:project2:vn1&',
'value': {
'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'ack': False
}
]
}
}
}
]
},
},
{
'mfilt': 'Analytics:contrail-invalid:0',
'uve_list_get': [],
'uve_get_post': {'value': []},
},
# cfilt
{
'cfilt': ['UveVirtualNetworkAgent'],
'uve_list_get': [
'default-domain:project1:vn2',
'default-domain:project2:vn1',
'default-domain:project2:vn1&'
],
'uve_get_post': {
'value': [
{
'name': 'default-domain:project1:vn2',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 2,
'in_bytes': 1024,
'total_acl_rules': 3
}
}
},
{
'name': 'default-domain:project2:vn1',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 4,
'in_bytes': 128
}
}
},
{
'name': 'default-domain:project2:vn1&',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 8,
'in_bytes': 256
}
}
}
]
},
},
{
'cfilt': [
'UveVirtualNetworkAgent:total_acl_rules',
'UveVirtualNetworkConfig:partially_connected_networks'
],
'uve_list_get': [
'default-domain:project1:vn1',
'default-domain:project1:vn2'
],
'uve_get_post': {
'value': [
{
'name': 'default-domain:project1:vn1',
'value': {
'UveVirtualNetworkConfig': {
'partially_connected_networks': [
'default-domain:project1:vn2'
]
}
}
},
{
'name': 'default-domain:project1:vn2',
'value': {
'UveVirtualNetworkAgent': {
'total_acl_rules': 3
}
}
}
]
},
},
{
'cfilt': [
'UveVirtualNetworkConfig:invalid',
'UveVirtualNetworkAgent:in_tpkts',
'UVEAlarms:alarms'
],
'uve_list_get': [
'default-domain:project1:vn2',
'default-domain:project2:vn1',
'default-domain:project2:vn1&'
],
'uve_get_post': {
'value': [
{
'name': 'default-domain:project1:vn2',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 2,
},
'UVEAlarms': {
'alarms': [
{
'type': 'InPktsThreshold',
},
{
'type': 'InBytesThreshold',
'ack': True
}
]
}
}
},
{
'name': 'default-domain:project2:vn1',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 4,
},
'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'ack': False
}
]
}
}
},
{
'name': 'default-domain:project2:vn1&',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 8,
},
'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'ack': False
}
]
}
}
}
]
},
},
{
'cfilt': [
'UveVirtualNetworkAgent:invalid',
'UVEAlarms:invalid_alarms',
'invalid'
],
'uve_list_get': [],
'uve_get_post': {'value': []},
},
# ackfilt
{
'ackfilt': True,
'uve_list_get': [
'default-domain:project1:vn1',
'default-domain:project1:vn2',
'default-domain:project2:vn1',
'default-domain:project2:vn1&'
],
'uve_get_post': {
'value': [
{
'name': 'default-domain:project1:vn1',
'value': {
'UveVirtualNetworkConfig': {
'partially_connected_networks': [
'default-domain:project1:vn2'
],
'total_acl_rules': 2
}
}
},
{
'name': 'default-domain:project1:vn2',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 2,
'in_bytes': 1024,
'total_acl_rules': 3
},
'UveVirtualNetworkConfig': {
'total_acl_rules': 3
},
'UVEAlarms': {
'alarms': [
{
'type': 'InBytesThreshold',
'ack': True
}
]
}
}
},
{
'name': 'default-domain:project2:vn1',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 4,
'in_bytes': 128
},
}
},
{
'name': 'default-domain:project2:vn1&',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 8,
'in_bytes': 256
},
}
}
]
},
},
{
'ackfilt': False,
'uve_list_get': [
'default-domain:project1:vn1',
'default-domain:project1:vn2',
'default-domain:project2:vn1',
'default-domain:project2:vn1&'
],
'get_alarms': {
'virtual-network': [
{ 'name' : 'default-domain:project1:vn2',
'value' : { 'UVEAlarms': {
'alarms': [
{
'type': 'InPktsThreshold',
},
]
} }
},
{ 'name' : 'default-domain:project2:vn1',
'value' : { 'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'ack': False
}
]
} }
},
{ 'name' : 'default-domain:project2:vn1&',
'value' : { 'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'ack': False
}
]
} }
},
]
},
'uve_get_post': {
'value': [
{
'name': 'default-domain:project1:vn1',
'value': {
'UveVirtualNetworkConfig': {
'partially_connected_networks': [
'default-domain:project1:vn2'
],
'total_acl_rules': 2
}
}
},
{
'name': 'default-domain:project1:vn2',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 2,
'in_bytes': 1024,
'total_acl_rules': 3
},
'UveVirtualNetworkConfig': {
'total_acl_rules': 3
},
'UVEAlarms': {
'alarms': [
{
'type': 'InPktsThreshold',
}
]
}
}
},
{
'name': 'default-domain:project2:vn1',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 4,
'in_bytes': 128
},
'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'ack': False
}
]
}
}
},
{
'name': 'default-domain:project2:vn1&',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 8,
'in_bytes': 256
},
'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'ack': False
}
]
}
}
}
]
},
},
# kfilt + sfilt
{
'kfilt': [
'default-domain:project1:*',
'default-domain:project2:vn1',
'default-domain:invalid'
],
'sfilt': socket.gethostname()+'_2',
'uve_list_get': [
'default-domain:project1:vn2',
'default-domain:project2:vn1'
],
'uve_get_post': {
'value': [
{
'name': 'default-domain:project1:vn2',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 2,
'in_bytes': 1024,
'total_acl_rules': 3
}
}
},
{
'name': 'default-domain:project2:vn1',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 4,
'in_bytes': 128
}
}
}
]
},
},
# kfilt + sfilt + ackfilt
{
'kfilt': [
'default-domain:project1:vn1',
'default-domain:project2:*',
'default-domain:invalid'
],
'sfilt': socket.gethostname()+'_2',
'ackfilt': True,
'uve_list_get': [
'default-domain:project2:vn1',
'default-domain:project2:vn1&'
],
'uve_get_post': {
'value': [
{
'name': 'default-domain:project2:vn1',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 4,
'in_bytes': 128
}
}
},
{
'name': 'default-domain:project2:vn1&',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 8,
'in_bytes': 256
}
}
}
]
},
},
# kfilt + sfilt + cfilt
{
'kfilt': [
'default-domain:project1:vn1',
'default-domain:project1:vn2',
'default-domain:project2:vn1'
],
'sfilt': socket.gethostname()+'_1',
'cfilt': [
'UveVirtualNetworkAgent',
'UVEAlarms',
'UveVirtualNetworkConfig:Invalid'
],
'uve_list_get': [
'default-domain:project1:vn2'
],
'uve_get_post': {
'value': [
{
'name': 'default-domain:project1:vn2',
'value': {
'UVEAlarms': {
'alarms': [
{
'type': 'InPktsThreshold',
},
{
'type': 'InBytesThreshold',
'ack': True
}
]
}
}
}
]
},
},
# kfilt + mfilt + cfilt
{
'kfilt': ['*'],
'mfilt': 'Config:contrail-api:0',
'cfilt': [
'UveVirtualNetworkAgent',
'UVEAlarms:alarms'
],
'uve_list_get': [],
'uve_get_post': {'value': []},
},
# kfilt + sfilt + mfilt + cfilt
{
'kfilt': [
'default-domain:project1:vn1',
'default-domain:project1:vn2',
'default-domain:project2:*'
],
'sfilt': socket.gethostname()+'_1',
'mfilt': 'Config:contrail-api:0',
'cfilt': [
'UveVirtualNetworkConfig:partially_connected_networks',
'UveVirtualNetworkConfig:total_acl_rules',
'UVEAlarms'
],
'uve_list_get': [
'default-domain:project1:vn1',
'default-domain:project1:vn2'
],
'uve_get_post': {
'value': [
{
'name': 'default-domain:project1:vn1',
'value': {
'UveVirtualNetworkConfig': {
'partially_connected_networks': [
'default-domain:project1:vn2'
],
'total_acl_rules': 2
}
}
},
{
'name': 'default-domain:project1:vn2',
'value': {
'UveVirtualNetworkConfig': {
'total_acl_rules': 3
},
}
}
]
},
},
{
'kfilt': [
'default-domain:project1:*',
'default-domain:project2:vn1',
'default-domain:project2:invalid'
],
'sfilt': socket.gethostname()+'_3',
'mfilt': 'Analytics:contrail-alarm-gen:0',
'cfilt': [
'UveVirtualNetworkConfig',
'UVEAlarms:alarms',
'UveVirtualNetworkAgent'
],
'uve_list_get': [
'default-domain:project2:vn1'
],
'uve_get_post': {
'value': [
{
'name': 'default-domain:project2:vn1',
'value': {
'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'ack': False
}
]
}
}
}
]
},
},
# kfilt + sfilt + mfilt + cfilt + ackfilt
{
'kfilt': [
'default-domain:project1:*',
'default-domain:project2:vn1&',
'default-domain:project2:invalid'
],
'sfilt': socket.gethostname()+'_3',
'mfilt': 'Analytics:contrail-alarm-gen:0',
'cfilt': [
'UveVirtualNetworkConfig',
'UVEAlarms:alarms',
'UveVirtualNetworkAgent'
],
'ackfilt': True,
'uve_list_get': [
'default-domain:project2:vn1&'
],
'uve_get_post': {'value': []},
}
]
vn_table = _OBJECT_TABLES[VN_TABLE].log_query_name
for i in range(len(filt_test)):
filters = dict(kfilt=filt_test[i].get('kfilt'),
sfilt=filt_test[i].get('sfilt'),
mfilt=filt_test[i].get('mfilt'),
cfilt=filt_test[i].get('cfilt'),
ackfilt=filt_test[i].get('ackfilt'))
assert(vizd_obj.verify_uve_list(vn_table,
filts=filters, exp_uve_list=filt_test[i]['uve_list_get']))
assert(vizd_obj.verify_multi_uve_get(vn_table,
filts=filters, exp_uves=filt_test[i]['uve_get_post']))
assert(vizd_obj.verify_uve_post(vn_table,
filts=filters, exp_uves=filt_test[i]['uve_get_post']))
if 'get_alarms' in filt_test[i]:
filters['tablefilt'] = 'virtual-network'
assert(vizd_obj.verify_get_alarms(vn_table,
filts=filters, exp_uves=filt_test[i]['get_alarms']))
# end test_08_uve_alarm_filter
@staticmethod
def get_free_port():
cs = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
cs.bind(("", 0))
cport = cs.getsockname()[1]
cs.close()
return cport
@staticmethod
def _check_skip_kafka():
(PLATFORM, VERSION, EXTRA) = platform.linux_distribution()
if PLATFORM.lower() == 'ubuntu':
if VERSION.find('12.') == 0:
return True
if PLATFORM.lower() == 'centos':
if VERSION.find('6.') == 0:
return True
return False
def _term_handler(*_):
raise IntSignal()
if __name__ == '__main__':
gevent.signal(signal.SIGINT,_term_handler)
unittest.main(catchbreak=True)
| |
#!/usr/bin/env python2
'''
This script collects PDBs corresponding to certain TCIDs (of any level) and assigns TMSs based on STRIDE and PDBTM (and a pinch of geometry)
'''
from __future__ import print_function, division
import xml.etree.ElementTree as ET
import os, re, subprocess, sys
import numpy as np
import yaml
import Bio.PDB
DEBUG = 1
VERBOSITY = 0
MAX_POSS_HELIX = 50
def status(line='done!'):
'''
prints 'done!' or anything else, really. Improves greppability when debugging with print statements
'''
print(line, file=sys.stderr)
def info(*lines):
'''
prints INFO text to stderr
'''
for l in lines: print('[INFO]:', l, file=sys.stderr)
def warn(*lines):
'''
prints WARNING text to stderr
'''
for l in lines: print('[WARNING]:', l, file=sys.stderr)
def error(*lines):
'''
prints error text to stderr and exits
'''
for l in lines: print('[ERROR]:', l, file=sys.stderr)
exit()
def progress(*lines):
'''
prints INFO text to stderr without a trailing newline
'''
for l in lines: print('[INFO]:', l, end=' ', file=sys.stderr)
def prompt(line, default=None):
'''
does y/n prompts
'''
while 1:
if default is None:
x = raw_input('%s [y/n] ' % str(line))
if x.lower().strip().startswith('y'): return True
elif x.lower().strip().startswith('n'): return False
elif default is True:
x = raw_input('%s [Y/n] ' % str(line))
if x.lower().strip().startswith('n'): return False
else: return True
elif default is False:
x = raw_input('%s [y/N] ' % str(line))
if x.lower().strip().startswith('y'): return True
else: return False
class Chain(object):
'''
container for chain objects
'''
def __init__(self, id):
self.id = id
self.seq = ''
self.tmh = []
self.tms = []
class PDB:
'''
container for PDB objects
'''
def __init__(self, fn):
self.chains = {}
self.parse_xml(fn)
def parse_xml(self, fn):
'''
parse pdbtm entries built by pdbtmtop/dbtool.py (which cleans up the anomalous XML in places)
'''
self.tree = ET.parse(fn)
self.root = self.tree.getroot()
self.id = self.root.attrib['ID']
###<BIOMATRIX> <APPLY_TO_CHAIN_CHAINID="A" NEW_CHAINID="D"> for example
removeme = set()
for x in self.root:
if x.tag.endswith('CHAIN'):
chainid = x.attrib['CHAINID']
if x.attrib['TYPE'] == 'non_tm': continue
for y in x:
if y.tag.endswith('SEQ'):
seq = y.text.replace('U', 'X')
seq = re.sub('[^\nA-Z]', '', seq).strip()
if not seq.replace('X', '').strip(): break
self.chains[chainid] = Chain(self.id + '_' + chainid)
self.chains[chainid].seq = seq
elif y.tag.endswith('REGION'):
if y.attrib['type'] == 'H' or y.attrib['type'] == 'C':
self.chains[chainid].tmh.append((int(y.attrib['seq_beg'])-1, int(y.attrib['seq_end'])-1))
if y.attrib['type'] == 'B':
self.chains[chainid].tms.append((int(y.attrib['seq_beg'])-1, int(y.attrib['seq_end'])-1))
#print(dir(y))
elif x.tag.endswith('BIOMATRIX'):
for y in x:
if y.tag.endswith('DELETE'): removeme.add(y.attrib['CHAINID'])
elif y.tag.endswith('MATRIX'):
for z in y:
if z.tag.endswith('APPLY_TO_CHAIN'):
removeme.add(z.attrib['NEW_CHAINID'])
for c in list(removeme):
try: self.chains.pop(c)
except KeyError: continue
def cat(self):
'''
dumps the info
'''
out = ''
for chain in self.chains:
out += '>%s\n%s\n' % (self.chains[chain].id, self.chains[chain].seq)
return out.strip()
class BLAST:
'''
container for blastp-related functions
'''
def __init__(self):
self.hits = {}
#def blast(self, query): pass
# #blastp -db tcdb -comp_based_stats no -outfmt 7 -max_target_seqs 3 < pdbtm.fa
def parse7(self, results, minl=60, evalue=1e-5):
'''
parser for outfmt 7. May be deprecated in favor of Biopython's BLAST results parser
'''
if type(results) is str: f = iter(results.split('\n'))
else: f = results
blacklist = ''
for l in f:
if not l.strip(): continue
elif l.strip().startswith('#'): continue
else:
sl = l.strip().split()
if sl[0] == blacklist: continue
data = (float(sl[2]),) + tuple([int(x) for x in sl[3:10]]) + (float(sl[10]), float(sl[11]))
if data[1] < minl: continue
elif data[8] > evalue: continue
if data[0] >= 95: blacklist = sl[0]
try: self.hits[sl[0]][sl[1]] = data
except KeyError: self.hits[sl[0]] = {sl[1]:data}
for q in self.hits:
if len(self.hits[q]) > 1:
prods = sorted([(self.hits[q][t][0] * self.hits[q][t][1], t) for t in self.hits[q]])[::-1]
self.hits[q] = {prods[0][1]:self.hits[q][prods[0][1]]}
def by_target(self, namestart):
'''
searches for all queries matching an initial substring of the target (i.e. targets matching /^${namestart}/)
'''
out = {}
for q in self.hits:
for t in self.hits[q]:
if t.startswith(namestart):
try: out[q][t] = self.hits[q][t]
except KeyError: out[q] = {t:self.hits[q][t]}
return out
class Protocol1:
'''
wrapper for doing its Protocol1-like task
'''
def __init__(self, pdbtmdir, outdir, force=False, offline=False):
'''
sets up the workspace
'''
self.pdbs = []
self.outdir = outdir
self.pdbtmdir = pdbtmdir
self.force = force
self.helices = {}
if not os.path.isdir(outdir): os.mkdir(outdir)
self.ntmss = {}
self.offline = offline
self.blast = BLAST()
self.hits = []
self.lengths = {}
self.fams = set()
try:
with open('%s/deuterocol1.yaml' % self.outdir) as f:
d1 = yaml.safe_load(f)
self.lengths = d1['lengths']
self.fams = set(d1['fams'])
except IOError: pass
def blast_pdbs(self):
'''
blasts all PDBs in PDBTM against TCDB
TODO: optimize away identical sequences when possible, e.g. by collapsing them by BLASTing them against PDB to resolve to a single sequence and copying the results with modified query fields
'''
fastas = ''
if self.offline: blasting = 0
elif self.force: blasting = 1
else:
if os.path.isfile('%s/blastp.tbl' % self.outdir) and os.path.getsize('%s/blastp.tbl' % self.outdir):
#blasting = prompt('[WARNING]: Found an existing blastp table. Overwrite?', default=False)
blasting = 0
else: blasting = 1
if VERBOSITY: progress('Checking PDBTM database...')
pdb = ''
for basename in os.listdir(self.pdbtmdir):
if basename.lower().endswith('xml'):
self.pdbs.append(PDB(self.pdbtmdir + '/' + basename))
seqs = self.pdbs[-1].cat().split('\n')
for seg in seqs:
if seg.startswith('>'):
pdb = seg[1:]
self.lengths[pdb] = 0
else: self.lengths[pdb] += len(seg)
if VERBOSITY: status()
self.dump_inputs()
if blasting:
for pdb in self.pdbs:
fastas += pdb.cat() + '\n'
if VERBOSITY: progress('BLASTing %d sequences...' % len(self.pdbs))
p = subprocess.Popen(['blastp', '-db', 'tcdb', '-comp_based_stats', 'no', '-outfmt', '7'], stdin=subprocess.PIPE, stdout=subprocess.PIPE)
out, err = p.communicate(input=fastas)
with open('%s/blastp.tbl' % self.outdir, 'w') as f: f.write(out)
if VERBOSITY: status()
else:
try:
with open('%s/deuterocol1.yaml' % self.outdir) as f: self.lengths = yaml.safe_load(f)['lengths']
except IOError: self.dump_inputs()
with open('%s/blastp.tbl' % self.outdir) as f: self.blast.parse7(f)
def get_queries(self, startswith):
'''
download the PDBs with decent correspondences to TCDB sequences
TODO (low): skip using wget by figuring out why urllib2 misbehaves with HTTPS URLs on the Macs
'''
self.fams.add(startswith)
self.dump_inputs()
for q in self.blast.by_target(startswith):
if q not in self.hits and not q.endswith('_'):
self.hits.append(q)
#if self.force: write = 1
#elif os.path.isfile('%s/pdblist.wget' % self.outdir): write = 0
#else: write = 1
write = 1
pdbs = []
for chain in self.hits:
if chain[:4] not in pdbs: pdbs.append(chain[:4])
with open('%s/pdblist.wget' % self.outdir, 'w') as f:
for pdb in pdbs:
f.write('https://files.rcsb.org/view/%s.pdb\n' % pdb[:4])
if not self.offline: subprocess.check_output(['wget', '--no-check-certificate', '-nc', '-i', '%s/pdblist.wget' % self.outdir, '-P', '%s/pdbs_raw' % self.outdir])
#if offline:
#
# cache = os.listdir('%s/pdbs_raw' % self.outdir)
# missing = []
# for fn in cache:
# if fn.endswith('pdb') and not os.path.getsize('%s/pdbs_raw/%s' % (self.outdir, fn)): missing.append(fn)
# #if missing: raise IOError('Cannot run offline: Could not find %s' % missing)
# for pdb in pdbs:
# if not os.path.getsize('%s/pdbs_raw/%s.pdb' % (self.outdir, pdb)): missing.append(pdb)
# #if missing: raise IOError('Cannot run offline: Could not find PDBs for %s' % missing)
removeme = []
for pdb in pdbs:
with open('%s/pdbs_raw/%s.pdb' % (self.outdir, pdb)) as f:
for l in f:
if 'THEORETICAL MODEL' in l:
removeme.append(pdb)
break
removemefinal = []
for pdb in removeme:
for x in self.hits:
if x.startswith(pdb):
removemefinal.append(x)
for x in removemefinal:
self.hits.remove(x)
return self.hits
def assign_helices(self):
'''
integrate STRIDE (assigns many small helices) and PDBTM (assigns correct but incomplete TMSs) definitions to get full TMSs
'''
if not os.path.isdir('%s/derp' % self.outdir): os.mkdir('%s/derp' % self.outdir)
if VERBOSITY: progress('Computing helices...')
removeme = []
for pdb in self.hits:
fn = '%s/pdbs_raw/%s.pdb' % (self.outdir, pdb[:4])
try:
d = DERP(pdb, self.outdir, self.pdbtmdir)
self.helices[pdb] = d.get_helices()
self.ntmss[pdb] = d.get_ntmss()
except subprocess.CalledProcessError: removeme.append(pdb)
for x in removeme: self.hits.remove(x)
if VERBOSITY: status()
self.dump_inputs()
def generate_loopless(self, extend=2):
'''
generate loopless PDBs (or loop-reduced PDBs, if extend is non-zero)
'''
for pdb in self.hits:
out = ''
chain = pdb[-1]
with open('%s/pdbs_raw/%s.pdb' % (self.outdir, pdb[:4])) as f:
for l in f:
if l.startswith('DBREF'):
if l[11:13].strip() == chain: out += l
elif l.startswith('SEQADV'):
if l[15:17].strip() == chain: out += l
elif l.startswith('SEQRES'):
if l[10:12].strip() == chain: out += l
elif l.startswith('MODRES'):
if l[15:17].strip() == chain: out += l
elif l.startswith('HET '):
if l[11:13].strip() == chain: out += l
elif l.startswith('HELIX'):
if l[18:20].strip() == chain: out += l
elif l.startswith('SHEET'):
if l[12:14].strip() == chain: out += l
elif l.startswith('SSBOND'):
if l[14:16].strip() == chain: out += l
elif l[28:30].strip() == chain: out += l
elif l.startswith('SITE '):
if l[10:12].strip() == chain: out += l
elif l[21:23].strip() == chain: out += l
elif l.startswith('CISPEP'):
if l[14:16].strip() == chain: out += l
elif l.startswith('LINK '):
if l[20:22].strip() == chain: out += l
elif l[50:52].strip() == chain: out += l
elif l.startswith('ANISOU'):
continue#if l[20:22].strip() == chain: out += l
elif (l.startswith('ATOM ') or l.startswith('HETATM') or l.startswith('TER ')) and (l[20:22].strip() == chain):
for h in self.helices[pdb]:
if (h[0] - extend) <= int(l[22:26].strip()) <= (h[1] + extend):
out += l
break
elif l[:6] not in ('DBREF ', 'SEQADV', 'SEQRES', 'HET ', 'HELIX ', 'SHEET ', 'SSBOND', 'SITE ', 'ATOM ', 'HETATM', 'TER ', 'CISPEP', 'ANISOU', 'LINK ', 'MODRES'):
out += l
if not os.path.isdir('%s/pdbs_loopless' % self.outdir): os.mkdir('%s/pdbs_loopless' % self.outdir)
with open('%s/pdbs_loopless/%s.pdb' % (self.outdir, pdb), 'w') as f: f.write(out)
def dump_inputs(self):
'''
dump inputs to the deuterocol1 configuration file
'''
with open('%s/deuterocol1.yaml' % self.outdir, 'w') as f:
yaml.safe_dump({'lengths':self.lengths, 'fams':list(self.fams), 'ntmss':self.ntmss}, f)
def parse_pdbtm(fn):
'''
another PDBTM parser for some reason
TODO: remove one of them
'''
x = ET.parse(fn)
root = x.getroot()
helices = {}
for y in root:
if y.tag.endswith('CHAIN'):
helices[y.attrib['CHAINID']] = []
for z in y:
if z.tag.endswith('REGION') and (z.attrib['type'] == 'H' or z.attrib['type'] == 'C'):
#chainhelices[y.tag append
helices[y.attrib['CHAINID']].append((int(z.attrib['pdb_beg']), int(z.attrib['pdb_end'])))
return helices
class DERP:
'''
Determine Egregious Rods in Proteins
This code does the actual integration between STRIDE and PDBTM
'''
def __init__(self, pdb_c, outdir, pdbtmdir):
self.pdb_c = pdb_c
self.pdb = pdb_c[:4]
self.outdir = outdir
self.pdbtmdir = pdbtmdir
def get_tangent(self, c, interval):
'''
averages the Can - Can+1 - Can+2 normals to obtain an axis angle for the segment
this works best with sufficiently long helices
'''
coords = []
for model in self.structure:
for chain in model:
if chain.id == c:
for residue in chain:
if interval[0] <= residue.id[1] <= interval[1]:
#print(dir(residue))
coords.append([atom.coord for atom in list(residue.get_iterator())[:3]])
normal = np.zeros(3)
for i in range(1, len(coords)-1):
for j in range(3):
normal += np.cross(coords[i][j]-coords[i-1][j], coords[i+1][j]-coords[i][j])
return normal/np.linalg.norm(normal)
def get_ntmss(self):
'''
gets the number of TMSs assigned to a PDB chain from DERP output
'''
n = ''
n = 0
with open('%s/derp/%s.derp' % (self.outdir, self.pdb_c)) as f:
for l in f:
if not l.strip(): continue
elif l.lstrip().startswith('#'): continue
else: n += 1
#n = l.strip().split()[0]
return n
def get_helices(self, angle=45):
'''
attempts to get helix ranges from DERP output if possible and generates it if not
'''
if os.path.isfile('%s/derp/%s.derp' % (self.outdir, self.pdb_c)) and os.path.getsize('%s/derp/%s.derp' % (self.outdir, self.pdb_c)):
helices = []
with open('%s/derp/%s.derp' % (self.outdir, self.pdb_c)) as f:
for l in f:
if not l.strip(): continue
else: helices.append([int(x) for x in l.split()[1:]])
return helices
elif not (os.path.isfile('%s/derp/%s.stride' % (self.outdir, self.pdb)) and os.path.getsize('%s/derp/%s.stride' % (self.outdir, self.pdb))):
strideout = subprocess.check_output(['stride', '%s/pdbs_raw/%s.pdb' % (self.outdir, self.pdb)])
with open('%s/derp/%s.stride' % (self.outdir, self.pdb), 'w') as f: f.write(strideout)
stridehelices = {}
parser = Bio.PDB.PDBParser()
self.structure = parser.get_structure(self.pdb, '%s/pdbs_raw/%s.pdb' % (self.outdir, self.pdb))
with open('%s/derp/%s.stride' % (self.outdir, self.pdb)) as f:
for l in f:
if l.startswith('LOC AlphaHelix'):
chain = l[27:29].strip()
start = int(l[21:27].strip())
end = int(l[38:45].strip())
try: stridehelices[chain].append((start,end))
except KeyError: stridehelices[chain] = [(start,end)]
pdbtmhelices = parse_pdbtm('%s/%s.xml' % (self.pdbtmdir, self.pdb))
truetmhelices = []
for ph in pdbtmhelices[self.pdb_c[-1]]:
phcandidate = ph
try:
for sh in stridehelices[self.pdb_c[-1]]:
if set(range(*sh)).intersection(set(range(*phcandidate))):
if sh[1] - sh[0] > MAX_POSS_HELIX: pass
elif np.dot(self.get_tangent(self.pdb_c[-1], phcandidate), self.get_tangent(self.pdb_c[-1], sh)) > np.cos(angle*np.pi/180):
phcandidate = (min(phcandidate[0], sh[0]), max(phcandidate[1], sh[1]))
except KeyError: warn('Could not find chain %s of %s' % (self.pdb_c[-1], self.pdb))
truetmhelices.append(phcandidate)
#out = 'color red, i. '
#for h in truetmhelices: out += '%s-%s+' % h
#out = out[:-1]
#out += '\ncolor yellow, i. '
#for h in pdbtmhelices[self.pdb_c[-1]]: out += '%s-%s+' % h
#out = out[:-1]
#print(out)
with open('%s/derp/%s.derp' % (self.outdir, self.pdb_c), 'w') as f:
for i, h in enumerate(truetmhelices):
f.write('%d\t%d\t%d\n' % ((i+1,)+h))
return truetmhelices
def protocol1(fams, pdbtmdir='pdbtm', outdir='ubi_out', overwrite=False, extend=2):
'''
the highest-level wrapper contained in Deuterocol1
does everything needed in a single line
'''
p = Protocol1(pdbtmdir, outdir, force=overwrite)
p.blast_pdbs()
for fam in fams: p.get_queries(fam)
p.assign_helices()
p.generate_loopless(extend=extend)
relevant_hits = {}
for pdb in p.hits: relevant_hits[pdb] = p.blast.hits[pdb]
return p.hits, relevant_hits
if __name__ == '__main__':
'''
finally, the interface for those running this directly from the command line or a non-Python script
'''
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-l', type=int, default=2, help='how many residues to extend TMSs by (for loopless cuts) {default:2}')
parser.add_argument('-v', action='store_true', help='verbose output')
parser.add_argument('-d', default='pdbtm', help='PDBTM database {default:./pdbtm}')
parser.add_argument('-o', '--outdir', default='ubi_out', help='where to put everything')
parser.add_argument('families', nargs='+', help='prefixes for family 1, e.g. 1.A.24.1. or 1.H.1. or 8.A.16')
parser.add_argument('-F', '--force-overwrite', action='store_true', help='force overwrites/regenerations')
args = parser.parse_args()
if args.v: VERBOSITY = 1
if not args.families:
print('[ERROR]: Family/ies must be specified!', file=sys.stderr)
parser.print_usage()
exit()
protocol1(args.families, args.d, args.outdir, overwrite=args.force_overwrite, extend=args.l)
| |
import numpy as np
import unittest
from discretize import TensorMesh, CurvilinearMesh
from discretize.utils import ndgrid
class BasicCurvTests(unittest.TestCase):
def setUp(self):
a = np.array([1, 1, 1])
b = np.array([1, 2])
c = np.array([1, 4])
def gridIt(h):
return [np.cumsum(np.r_[0, x]) for x in h]
X, Y = ndgrid(gridIt([a, b]), vector=False)
self.TM2 = TensorMesh([a, b])
self.Curv2 = CurvilinearMesh([X, Y])
X, Y, Z = ndgrid(gridIt([a, b, c]), vector=False)
self.TM3 = TensorMesh([a, b, c])
self.Curv3 = CurvilinearMesh([X, Y, Z])
def test_area_3D(self):
test_area = np.array(
[
1,
1,
1,
1,
2,
2,
2,
2,
4,
4,
4,
4,
8,
8,
8,
8,
1,
1,
1,
1,
1,
1,
1,
1,
1,
4,
4,
4,
4,
4,
4,
4,
4,
4,
1,
1,
1,
2,
2,
2,
1,
1,
1,
2,
2,
2,
1,
1,
1,
2,
2,
2,
]
)
self.assertTrue(np.all(self.Curv3.area == test_area))
def test_vol_3D(self):
test_vol = np.array([1, 1, 1, 2, 2, 2, 4, 4, 4, 8, 8, 8])
np.testing.assert_almost_equal(self.Curv3.vol, test_vol)
self.assertTrue(True) # Pass if you get past the assertion.
def test_vol_2D(self):
test_vol = np.array([1, 1, 1, 2, 2, 2])
t1 = np.all(self.Curv2.vol == test_vol)
self.assertTrue(t1)
def test_edge_3D(self):
test_edge = np.array(
[
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
2,
2,
2,
2,
1,
1,
1,
1,
2,
2,
2,
2,
1,
1,
1,
1,
2,
2,
2,
2,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
4,
4,
4,
4,
4,
4,
4,
4,
4,
4,
4,
4,
]
)
t1 = np.all(self.Curv3.edge == test_edge)
self.assertTrue(t1)
def test_edge_2D(self):
test_edge = np.array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2])
t1 = np.all(self.Curv2.edge == test_edge)
self.assertTrue(t1)
def test_tangents(self):
T = self.Curv2.tangents
self.assertTrue(
np.all(self.Curv2.r(T, "E", "Ex", "V")[0] == np.ones(self.Curv2.nEx))
)
self.assertTrue(
np.all(self.Curv2.r(T, "E", "Ex", "V")[1] == np.zeros(self.Curv2.nEx))
)
self.assertTrue(
np.all(self.Curv2.r(T, "E", "Ey", "V")[0] == np.zeros(self.Curv2.nEy))
)
self.assertTrue(
np.all(self.Curv2.r(T, "E", "Ey", "V")[1] == np.ones(self.Curv2.nEy))
)
T = self.Curv3.tangents
self.assertTrue(
np.all(self.Curv3.r(T, "E", "Ex", "V")[0] == np.ones(self.Curv3.nEx))
)
self.assertTrue(
np.all(self.Curv3.r(T, "E", "Ex", "V")[1] == np.zeros(self.Curv3.nEx))
)
self.assertTrue(
np.all(self.Curv3.r(T, "E", "Ex", "V")[2] == np.zeros(self.Curv3.nEx))
)
self.assertTrue(
np.all(self.Curv3.r(T, "E", "Ey", "V")[0] == np.zeros(self.Curv3.nEy))
)
self.assertTrue(
np.all(self.Curv3.r(T, "E", "Ey", "V")[1] == np.ones(self.Curv3.nEy))
)
self.assertTrue(
np.all(self.Curv3.r(T, "E", "Ey", "V")[2] == np.zeros(self.Curv3.nEy))
)
self.assertTrue(
np.all(self.Curv3.r(T, "E", "Ez", "V")[0] == np.zeros(self.Curv3.nEz))
)
self.assertTrue(
np.all(self.Curv3.r(T, "E", "Ez", "V")[1] == np.zeros(self.Curv3.nEz))
)
self.assertTrue(
np.all(self.Curv3.r(T, "E", "Ez", "V")[2] == np.ones(self.Curv3.nEz))
)
def test_normals(self):
N = self.Curv2.normals
self.assertTrue(
np.all(self.Curv2.r(N, "F", "Fx", "V")[0] == np.ones(self.Curv2.nFx))
)
self.assertTrue(
np.all(self.Curv2.r(N, "F", "Fx", "V")[1] == np.zeros(self.Curv2.nFx))
)
self.assertTrue(
np.all(self.Curv2.r(N, "F", "Fy", "V")[0] == np.zeros(self.Curv2.nFy))
)
self.assertTrue(
np.all(self.Curv2.r(N, "F", "Fy", "V")[1] == np.ones(self.Curv2.nFy))
)
N = self.Curv3.normals
self.assertTrue(
np.all(self.Curv3.r(N, "F", "Fx", "V")[0] == np.ones(self.Curv3.nFx))
)
self.assertTrue(
np.all(self.Curv3.r(N, "F", "Fx", "V")[1] == np.zeros(self.Curv3.nFx))
)
self.assertTrue(
np.all(self.Curv3.r(N, "F", "Fx", "V")[2] == np.zeros(self.Curv3.nFx))
)
self.assertTrue(
np.all(self.Curv3.r(N, "F", "Fy", "V")[0] == np.zeros(self.Curv3.nFy))
)
self.assertTrue(
np.all(self.Curv3.r(N, "F", "Fy", "V")[1] == np.ones(self.Curv3.nFy))
)
self.assertTrue(
np.all(self.Curv3.r(N, "F", "Fy", "V")[2] == np.zeros(self.Curv3.nFy))
)
self.assertTrue(
np.all(self.Curv3.r(N, "F", "Fz", "V")[0] == np.zeros(self.Curv3.nFz))
)
self.assertTrue(
np.all(self.Curv3.r(N, "F", "Fz", "V")[1] == np.zeros(self.Curv3.nFz))
)
self.assertTrue(
np.all(self.Curv3.r(N, "F", "Fz", "V")[2] == np.ones(self.Curv3.nFz))
)
def test_grid(self):
self.assertTrue(np.all(self.Curv2.gridCC == self.TM2.gridCC))
self.assertTrue(np.all(self.Curv2.gridN == self.TM2.gridN))
self.assertTrue(np.all(self.Curv2.gridFx == self.TM2.gridFx))
self.assertTrue(np.all(self.Curv2.gridFy == self.TM2.gridFy))
self.assertTrue(np.all(self.Curv2.gridEx == self.TM2.gridEx))
self.assertTrue(np.all(self.Curv2.gridEy == self.TM2.gridEy))
self.assertTrue(np.all(self.Curv3.gridCC == self.TM3.gridCC))
self.assertTrue(np.all(self.Curv3.gridN == self.TM3.gridN))
self.assertTrue(np.all(self.Curv3.gridFx == self.TM3.gridFx))
self.assertTrue(np.all(self.Curv3.gridFy == self.TM3.gridFy))
self.assertTrue(np.all(self.Curv3.gridFz == self.TM3.gridFz))
self.assertTrue(np.all(self.Curv3.gridEx == self.TM3.gridEx))
self.assertTrue(np.all(self.Curv3.gridEy == self.TM3.gridEy))
self.assertTrue(np.all(self.Curv3.gridEz == self.TM3.gridEz))
if __name__ == "__main__":
unittest.main()
| |
#!/usr/bin/env python
"""
Run lldb to disassemble all the available functions for an executable image.
"""
from __future__ import print_function
import os
import re
import sys
from optparse import OptionParser
def setupSysPath():
"""
Add LLDB.framework/Resources/Python and the test dir to the sys.path.
"""
# Get the directory containing the current script.
scriptPath = sys.path[0]
if not scriptPath.endswith(os.path.join('utils', 'test')):
print("This script expects to reside in lldb's utils/test directory.")
sys.exit(-1)
# This is our base name component.
base = os.path.abspath(os.path.join(scriptPath, os.pardir, os.pardir))
# This is for the goodies in the test directory under base.
sys.path.append(os.path.join(base, 'test'))
# These are for xcode build directories.
xcode3_build_dir = ['build']
xcode4_build_dir = ['build', 'lldb', 'Build', 'Products']
dbg = ['Debug']
rel = ['Release']
bai = ['BuildAndIntegration']
python_resource_dir = ['LLDB.framework', 'Resources', 'Python']
dbgPath = os.path.join(
base, *(xcode3_build_dir + dbg + python_resource_dir))
dbgPath2 = os.path.join(
base, *(xcode4_build_dir + dbg + python_resource_dir))
relPath = os.path.join(
base, *(xcode3_build_dir + rel + python_resource_dir))
relPath2 = os.path.join(
base, *(xcode4_build_dir + rel + python_resource_dir))
baiPath = os.path.join(
base, *(xcode3_build_dir + bai + python_resource_dir))
baiPath2 = os.path.join(
base, *(xcode4_build_dir + bai + python_resource_dir))
lldbPath = None
if os.path.isfile(os.path.join(dbgPath, 'lldb.py')):
lldbPath = dbgPath
elif os.path.isfile(os.path.join(dbgPath2, 'lldb.py')):
lldbPath = dbgPath2
elif os.path.isfile(os.path.join(relPath, 'lldb.py')):
lldbPath = relPath
elif os.path.isfile(os.path.join(relPath2, 'lldb.py')):
lldbPath = relPath2
elif os.path.isfile(os.path.join(baiPath, 'lldb.py')):
lldbPath = baiPath
elif os.path.isfile(os.path.join(baiPath2, 'lldb.py')):
lldbPath = baiPath2
if not lldbPath:
print('This script requires lldb.py to be in either ' + dbgPath + ',', end=' ')
print(relPath + ', or ' + baiPath)
sys.exit(-1)
# This is to locate the lldb.py module. Insert it right after sys.path[0].
sys.path[1:1] = [lldbPath]
# print "sys.path:", sys.path
def run_command(ci, cmd, res, echo=True):
if echo:
print("run command:", cmd)
ci.HandleCommand(cmd, res)
if res.Succeeded():
if echo:
print("run_command output:", res.GetOutput())
else:
if echo:
print("run command failed!")
print("run_command error:", res.GetError())
def do_lldb_disassembly(lldb_commands, exe, disassemble_options, num_symbols,
symbols_to_disassemble,
re_symbol_pattern,
quiet_disassembly):
import lldb
import atexit
import re
# Create the debugger instance now.
dbg = lldb.SBDebugger.Create()
if not dbg:
raise Exception('Invalid debugger instance')
# Register an exit callback.
atexit.register(lambda: lldb.SBDebugger.Terminate())
# We want our debugger to be synchronous.
dbg.SetAsync(False)
# Get the command interpreter from the debugger.
ci = dbg.GetCommandInterpreter()
if not ci:
raise Exception('Could not get the command interpreter')
# And the associated result object.
res = lldb.SBCommandReturnObject()
# See if there any extra command(s) to execute before we issue the file
# command.
for cmd in lldb_commands:
run_command(ci, cmd, res, not quiet_disassembly)
# Now issue the file command.
run_command(ci, 'file %s' % exe, res, not quiet_disassembly)
# Create a target.
#target = dbg.CreateTarget(exe)
target = dbg.GetSelectedTarget()
stream = lldb.SBStream()
def IsCodeType(symbol):
"""Check whether an SBSymbol represents code."""
return symbol.GetType() == lldb.eSymbolTypeCode
# Define a generator for the symbols to disassemble.
def symbol_iter(num, symbols, re_symbol_pattern, target, verbose):
# If we specify the symbols to disassemble, ignore symbol table dump.
if symbols:
for i in range(len(symbols)):
if verbose:
print("symbol:", symbols[i])
yield symbols[i]
else:
limited = True if num != -1 else False
if limited:
count = 0
if re_symbol_pattern:
pattern = re.compile(re_symbol_pattern)
stream = lldb.SBStream()
for m in target.module_iter():
if verbose:
print("module:", m)
for s in m:
if limited and count >= num:
return
# If a regexp symbol pattern is supplied, consult it.
if re_symbol_pattern:
# If the pattern does not match, look for the next
# symbol.
if not pattern.match(s.GetName()):
continue
# If we come here, we're ready to disassemble the symbol.
if verbose:
print("symbol:", s.GetName())
if IsCodeType(s):
if limited:
count = count + 1
if verbose:
print("returning symbol:", s.GetName())
yield s.GetName()
if verbose:
print("start address:", s.GetStartAddress())
print("end address:", s.GetEndAddress())
s.GetDescription(stream)
print("symbol description:", stream.GetData())
stream.Clear()
# Disassembly time.
for symbol in symbol_iter(
num_symbols,
symbols_to_disassemble,
re_symbol_pattern,
target,
not quiet_disassembly):
cmd = "disassemble %s '%s'" % (disassemble_options, symbol)
run_command(ci, cmd, res, not quiet_disassembly)
def main():
# This is to set up the Python path to include the pexpect-2.4 dir.
# Remember to update this when/if things change.
scriptPath = sys.path[0]
sys.path.append(
os.path.join(
scriptPath,
os.pardir,
os.pardir,
'test',
'pexpect-2.4'))
parser = OptionParser(usage="""\
Run lldb to disassemble all the available functions for an executable image.
Usage: %prog [options]
""")
parser.add_option(
'-C',
'--lldb-command',
type='string',
action='append',
metavar='COMMAND',
default=[],
dest='lldb_commands',
help='Command(s) lldb executes after starting up (can be empty)')
parser.add_option(
'-e',
'--executable',
type='string',
action='store',
dest='executable',
help="""Mandatory: the executable to do disassembly on.""")
parser.add_option(
'-o',
'--options',
type='string',
action='store',
dest='disassemble_options',
help="""Mandatory: the options passed to lldb's 'disassemble' command.""")
parser.add_option(
'-q',
'--quiet-disassembly',
action='store_true',
default=False,
dest='quiet_disassembly',
help="""The symbol(s) to invoke lldb's 'disassemble' command on, if specified.""")
parser.add_option(
'-n',
'--num-symbols',
type='int',
action='store',
default=-1,
dest='num_symbols',
help="""The number of symbols to disassemble, if specified.""")
parser.add_option(
'-p',
'--symbol_pattern',
type='string',
action='store',
dest='re_symbol_pattern',
help="""The regular expression of symbols to invoke lldb's 'disassemble' command.""")
parser.add_option(
'-s',
'--symbol',
type='string',
action='append',
metavar='SYMBOL',
default=[],
dest='symbols_to_disassemble',
help="""The symbol(s) to invoke lldb's 'disassemble' command on, if specified.""")
opts, args = parser.parse_args()
lldb_commands = opts.lldb_commands
if not opts.executable or not opts.disassemble_options:
parser.print_help()
sys.exit(1)
executable = opts.executable
disassemble_options = opts.disassemble_options
quiet_disassembly = opts.quiet_disassembly
num_symbols = opts.num_symbols
symbols_to_disassemble = opts.symbols_to_disassemble
re_symbol_pattern = opts.re_symbol_pattern
# We have parsed the options.
if not quiet_disassembly:
print("lldb commands:", lldb_commands)
print("executable:", executable)
print("disassemble options:", disassemble_options)
print("quiet disassembly output:", quiet_disassembly)
print("num of symbols to disassemble:", num_symbols)
print("symbols to disassemble:", symbols_to_disassemble)
print("regular expression of symbols to disassemble:", re_symbol_pattern)
setupSysPath()
do_lldb_disassembly(lldb_commands, executable, disassemble_options,
num_symbols,
symbols_to_disassemble,
re_symbol_pattern,
quiet_disassembly)
if __name__ == '__main__':
main()
| |
#!python
"""
A simple high available heartbeat implementation.
An instance of this is started on each host. One is elected as the
worker, executing a command in a loop. The other instance of the
script loops and checks by connecting to a network port if the worker
is still available. Once it detects that the worker is down, it
assumes the worker role, starts its own network listener and executes
the command in a loop.
Some examples:
Heartbeat command ("dir c:") executed every 30 seconds, pinned to
one machine (m2), i.e., m1 takes over if m2 is down, as m2 comes
back, m1 becomes supervisor again:
h@m1 $ python ha_heartbeat.py -l m1:22221 -r m2:22222 -fallback "dir c:"
h@m2 $ python ha_heartbeat.py -l m2:22222 -r m1:22221 -mode WORKER "dir c:"
Heartbeat command ("ls -l") executed every 10 seconds, peer status
checked every 5 seconds, no fallback once the supervisor takes over
from a failed worker:
h@m1 $ python ha_heartbeat.py -i 10 -t 5 -l m1:22221 -r m2:22222 "ls -l"
h@m2 $ python ha_heartbeat.py -i 10 -t 5 -l m2:22222 -r m1:22221 "ls -l"
"""
import socket
import threading
import SocketServer
import random
import time
from optparse import OptionParser
import logging
import subprocess
import sys
WORKER="WORKER"
SUPERVISOR="SUPERVISOR"
BIND_RETRY_WAIT = 30
class ThreadedTCPRequestHandler(SocketServer.BaseRequestHandler):
def handle(self):
try:
# The client only connects and does not send any data.
pass
except Exception as e:
log.debug("Error in server handle(): %s" % (e,))
class ThreadedTCPServer(SocketServer.ThreadingMixIn, SocketServer.TCPServer):
pass
def client(ip, port, message="\n"):
result = False
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((ip, port))
result = True
except Exception as e:
log.debug("%s" % (e,))
finally:
if sock:
sock.close()
return result
def stop_listener_thread((listener, listener_thread)):
if listener:
listener.shutdown()
listener = None
if listener_thread:
if listener_thread.is_alive():
listener_thread.join(2)
if listener_thread.is_alive():
log.warning("Network listener thread still running.")
return (listener, listener_thread)
else:
log.debug("Network listener thread successfully stopped.")
return None
def start_listener_thread(local_host, local_port, wait_for_serversocket=300, bind_retry_wait=30):
listener = listener_thread = None
for retry in range(1, int(wait_for_serversocket) / BIND_RETRY_WAIT):
try:
listener = ThreadedTCPServer((local_host, local_port), ThreadedTCPRequestHandler)
# Start a thread with the listener -- that thread
# will then start one more thread for each request
listener_thread = threading.Thread(target=listener.serve_forever)
# Exit the listener thread when the main thread terminates
listener_thread.setDaemon(True)
listener_thread.start()
log.debug("TCP Server loop running on host %s, port %s, in thread:%s" % (local_host, local_port, listener_thread.getName()))
break
except Exception as e:
# This may happen if the socket is still in
# TIME_WAIT mode. This can be tuned on the OS
# level.
log.info("Listener not running: %s" % (e))
log.debug("Will try to start again in %d seconds." % (BIND_RETRY_WAIT))
time.sleep(BIND_RETRY_WAIT)
return (listener, listener_thread)
if __name__ == "__main__":
usage_message = "Usage: %prog [options] command"
parser = OptionParser(usage=usage_message)
parser.add_option("-l", "--local", dest="local_host_port",
help="local host and port in <host>:<port> format (default: localhost:22221)", default="localhost:22221")
parser.add_option("-r", "--remote", dest="remote_host_port",
help="remote host and port in <host>:<port> format (default: localhost:22222)", default="localhost:22222")
parser.add_option("-t", "--interval-test", dest="interval_test",
help="Maximum interval between peer checks (default: 10)", default="10")
parser.add_option("-c", "--check-count", dest="check_count",
help="Number of times a supervisor will check a dead peer before failing over. (default: 3)", default="3")
parser.add_option("-i", "--interval-commands", dest="interval_command",
help="Interval between command executions in seconds (default: 30)", default="30")
parser.add_option("-w", "--wait-for-serversocket", dest="wait_for_serversocket",
help="Wait seconds for the serversocket to become available (default: 300)", default="300")
parser.add_option("-m", "--mode", dest="mode",
help="Start process in WORKER or SUPERVISOR mode (default: SUPERVISOR)", default=SUPERVISOR)
parser.add_option("-f", "--fallback", dest="fallback", action="store_true",
help="Fallback to peer once it is up again (default: False)", default=False)
parser.add_option("-v", "--verbose", dest="verbose", action="store_true",
help="Verbose output", default=False)
(options, args) = parser.parse_args()
if len(args) != 1:
parser.error("Incorrect number of arguments. Try the -h or --help options for help.")
if options.verbose:
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(levelname)s %(message)s')
else:
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)s %(message)s')
log = logging.getLogger(__file__)
peer_host, peer_port = options.remote_host_port.split(':')
local_host, local_port = options.local_host_port.split(':')
log.debug("Peer host = %s, peer port = %s" % (peer_host, peer_port,))
log.debug("Local host = %s, local port = %s" % (local_host, local_port,))
checked = 0
state = options.mode
listener_details = None
while True:
if state == SUPERVISOR:
sleepsecs = (random.random() * int(options.interval_test)) or 1
if client(peer_host, int(peer_port)) != 0:
if checked >= int(options.check_count):
log.warning("Peer is dead, now becoming a WORKER.")
state = WORKER
else:
checked += 1
log.info("Peer is dead, check again in %f secs" % (sleepsecs,))
time.sleep(sleepsecs)
else:
log.debug("Peer is alive, next check in %f secs" % (sleepsecs,))
checked = 0
time.sleep(sleepsecs)
elif state == WORKER:
if not listener_details:
listener_details = start_listener_thread(local_host, int(local_port), int(options.wait_for_serversocket))
if listener_details[0] == None:
log.warning("Listener not started.")
log.debug("Sanity check if peer is a WORKER ...:")
if client(peer_host, int(peer_port)) == 0:
if options.fallback == True:
log.info("Peer is alive, falling back to SUPERVISOR mode")
listener_details = stop_listener_thread(listener_details)
state = SUPERVISOR
continue
else:
# Stay in WORKER mode, the other process should shut down.
log.info("Peer is an alive WORKER, but this process is also a WORKER")
else:
log.debug("Peer is still dead or in SUPERVISOR mode.")
log.debug("Executing command ...")
try:
p = subprocess.Popen(args[0], shell=True, close_fds=True)
p.communicate()
rc = p.returncode
if rc < 0:
log.warning("Command was terminated by signal %d" % (-rc,))
else:
log.debug("Command executed with return code = %d" % (rc,))
except OSError, e:
log.warning("Command execution failed (%s)" % (e,))
log.debug("Next job will run in %f seconds" % int(options.interval_command))
time.sleep(int(options.interval_command))
else:
log.error("Unknown state %s. Exiting." % (state,))
sys.exit(1)
| |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# Copyright 2016 The Cartographer Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A dumb configuration.rst generator that relies on source comments."""
import io
import os
TARGET = 'docs/source/configuration.rst'
ROOT = 'cartographer'
PREFIX = """.. Copyright 2016 The Cartographer Authors
.. Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
.. http://www.apache.org/licenses/LICENSE-2.0
.. Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
=============
Configuration
=============
.. DO NOT EDIT! This documentation is AUTOGENERATED, please edit .proto files as
.. needed and run scripts/update_configuration_doc.py.
"""
SUFFIX = """
"""
NODOC = 'Not yet documented.'
class Message(object):
def __init__(self, name, package, preceding_comments):
self.name = name
self.package = package
self.preceding_comments = preceding_comments
self.trailing_comments = None
self.options = []
def AddTrailingComments(self, comments):
self.trailing_comments = comments
def AddOption(self, option_type, name, comments):
self.options.append((option_type, name, comments))
def ParseProtoFile(proto_file):
"""Computes the list of Message objects of the option messages in a file."""
line_iter = iter(proto_file)
# We ignore the license header and search for the 'package' line.
for line in line_iter:
line = line.strip()
if line.startswith('package'):
assert line[-1] == ';'
package = line[7:-1].strip()
break
else:
assert '}' not in line
message_list = []
while True:
# Search for the next options message and capture preceding comments.
message_comments = []
for line in line_iter:
line = line.strip()
if '}' in line:
# The preceding comments were for a different message it seems.
message_comments = []
elif line.startswith('//'):
# We keep comments preceding an options message.
comment = line[2:].strip()
if not comment.startswith('NEXT ID:'):
message_comments.append(comment)
elif line.startswith('message') and line.endswith('Options {'):
message_name = package + '.' + line[7:-1].strip()
break
else:
# We reached the end of file.
break
print(" Found '%s'." % message_name)
message = Message(message_name, package, message_comments)
message_list.append(message)
# We capture the contents of this message.
option_comments = []
multiline = ''
for line in line_iter:
line = line.strip()
if '}' in line:
# We reached the end of this message.
message.AddTrailingComments(option_comments)
break
elif line.startswith('//'):
comment = line[2:].strip()
if not comment.startswith('NEXT ID:'):
option_comments.append(comment)
else:
assert not line.startswith('required')
multiline += ' ' + line
if not multiline.endswith(';'):
continue
assert len(multiline) < 200
option = multiline[:-1].strip().rstrip('0123456789').strip()
assert option.endswith('=')
if option.startswith('repeated'):
option = option[8:]
option_type, option_name = option[:-1].strip().split();
print(" Option '%s'." % option_name)
multiline = ''
message.AddOption(option_type, option_name, option_comments)
option_comments = []
return message_list
def ParseProtoFilesRecursively(root):
"""Recursively parses all proto files into a list of Message objects."""
message_list = []
for dirpath, dirnames, filenames in os.walk(root):
for name in filenames:
if name.endswith('.proto'):
path = os.path.join(dirpath, name)
print("Found '%s'..." % path)
assert not os.path.islink(path)
message_list.extend(ParseProtoFile(io.open(path, encoding='UTF-8')))
return message_list
class ResolutionError(Exception):
"""Raised when resolving a message name fails."""
class Resolver(object):
def __init__(self, name_set):
self.name_set = set(iter(name_set))
def Resolve(self, message_name, package_name):
if message_name in ('bool', 'double', 'float', 'int32'):
return message_name
if message_name.startswith('.'):
return message_name[1:]
package = package_name.split('.')
for levels in range(len(package), -1, -1):
candidate = '.'.join(package[0:levels]) + '.' + message_name
if candidate in self.name_set:
return candidate
raise ResolutionError(
'Resolving %s in %s failed.' % (message_name, package_name))
def GenerateDocumentation(output_file, root):
"""Recursively generates documentation, sorts and writes it."""
message_list = ParseProtoFilesRecursively(root)
resolver = Resolver(message.name for message in message_list)
output_dict = {}
for message in message_list:
content = [message.name, '=' * len(message.name), '']
assert message.name not in output_dict
output_dict[message.name] = content
if message.preceding_comments:
content.extend(message.preceding_comments)
content.append('')
for option_type, option_name, option_comments in message.options:
# TODO(whess): For now we exclude InitialTrajectoryPose from the
# documentation. It is documented itself (since it has no Options suffix)
# and is not parsed from the Lua files.
if option_type in ('InitialTrajectoryPose',):
continue
content.append(
resolver.Resolve(option_type, message.package) + ' ' + option_name)
if not option_comments:
option_comments.append(NODOC)
for comment in option_comments:
content.append(' ' + comment)
content.append('')
if message.trailing_comments:
content.extend(message.trailing_comments)
content.append('')
output = ['\n'.join(doc) for key, doc in sorted(list(output_dict.items()))]
print('\n\n'.join(output), file=output_file)
def main():
assert not os.path.islink(TARGET) and os.path.isfile(TARGET)
assert not os.path.islink(ROOT) and os.path.isdir(ROOT)
output_file = io.open(TARGET, mode='w', encoding='UTF-8', newline='\n')
output_file.write(PREFIX)
GenerateDocumentation(output_file, ROOT)
output_file.write(SUFFIX)
output_file.close()
if __name__ == "__main__":
main()
| |
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2020 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See https://swift.org/LICENSE.txt for license information
# See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
from __future__ import absolute_import, unicode_literals
import multiprocessing
import os
import android.adb.commands
from swift_build_support.swift_build_support import host
from swift_build_support.swift_build_support import targets
from swift_build_support.swift_build_support.targets import \
StdlibDeploymentTarget
from . import argparse
from . import defaults
__all__ = [
'create_argument_parser',
]
class _ApplyDefaultsArgumentParser(argparse.ArgumentParser):
"""Wrapper class around the default ArgumentParser that allows for
post-processing the parsed argument namespace to apply default argument
transformations.
"""
def __init__(self, apply_defaults=None, *args, **kwargs):
self._apply_defaults = apply_defaults
super(_ApplyDefaultsArgumentParser, self).__init__(*args, **kwargs)
def parse_known_args(self, args=None, namespace=None):
args, argv = super(_ApplyDefaultsArgumentParser, self)\
.parse_known_args(args, namespace)
self._apply_defaults(args)
return args, argv
def _apply_default_arguments(args):
"""Preprocess argument namespace to apply default behaviors.
"""
# Build cmark if any cmark-related options were specified.
if (args.cmark_build_variant is not None):
args.build_cmark = True
# Build LLDB if any LLDB-related options were specified.
if args.lldb_build_variant is not None or \
args.lldb_assertions is not None or \
args.lldb_build_with_xcode is not None:
args.build_lldb = True
# Set the default build variant.
if args.build_variant is None:
args.build_variant = 'Debug'
if args.llvm_build_variant is None:
args.llvm_build_variant = args.build_variant
if args.swift_build_variant is None:
args.swift_build_variant = args.build_variant
if args.swift_stdlib_build_variant is None:
args.swift_stdlib_build_variant = args.build_variant
if args.cmark_build_variant is None:
args.cmark_build_variant = args.swift_build_variant
if args.lldb_build_variant is None:
args.lldb_build_variant = args.build_variant
if args.lldb_build_with_xcode is None:
args.lldb_build_with_xcode = '0'
if args.foundation_build_variant is None:
args.foundation_build_variant = args.build_variant
if args.libdispatch_build_variant is None:
args.libdispatch_build_variant = args.build_variant
if args.libicu_build_variant is None:
args.libicu_build_variant = args.build_variant
# Assertions are enabled by default.
if args.assertions is None:
args.assertions = True
# Propagate the default assertions setting.
if args.cmark_assertions is None:
args.cmark_assertions = args.assertions
if args.llvm_assertions is None:
args.llvm_assertions = args.assertions
if args.swift_assertions is None:
args.swift_assertions = args.assertions
if args.swift_stdlib_assertions is None:
args.swift_stdlib_assertions = args.assertions
if args.llbuild_assertions is None:
args.llbuild_assertions = args.assertions
if args.lldb_assertions is None:
args.lldb_assertions = args.assertions
# Set the default CMake generator.
if args.cmake_generator is None:
args.cmake_generator = 'Ninja'
# --ios-all etc are not supported by open-source Swift.
if args.ios_all:
raise ValueError('error: --ios-all is unavailable in open-source '
'Swift.\nUse --ios to skip iOS device tests.')
if args.tvos_all:
raise ValueError('error: --tvos-all is unavailable in open-source '
'Swift.\nUse --tvos to skip tvOS device tests.')
if args.watchos_all:
raise ValueError('error: --watchos-all is unavailable in open-source '
'Swift.\nUse --watchos to skip watchOS device tests.')
# Propagate global --skip-build
if args.skip_build:
args.build_linux = False
args.build_freebsd = False
args.build_cygwin = False
args.build_osx = False
args.build_ios = False
args.build_tvos = False
args.build_watchos = False
args.build_android = False
args.build_benchmarks = False
args.build_external_benchmarks = False
args.build_lldb = False
args.build_llbuild = False
args.build_libcxx = False
args.build_swiftpm = False
args.build_xctest = False
args.build_foundation = False
args.build_libdispatch = False
args.build_libicu = False
args.build_playgroundsupport = False
# --skip-{ios,tvos,watchos} or --skip-build-{ios,tvos,watchos} are
# merely shorthands for --skip-build-{**os}-{device,simulator}
if not args.ios or not args.build_ios:
args.build_ios_device = False
args.build_ios_simulator = False
if not args.tvos or not args.build_tvos:
args.build_tvos_device = False
args.build_tvos_simulator = False
if not args.watchos or not args.build_watchos:
args.build_watchos_device = False
args.build_watchos_simulator = False
if not args.android or not args.build_android:
args.build_android = False
# --test-paths implies --test and/or --validation-test
# depending on what directories/files have been specified.
if args.test_paths:
for path in args.test_paths:
if path.startswith('test'):
args.test = True
elif path.startswith('validation-test'):
args.test = True
args.validation_test = True
# --validation-test implies --test.
if args.validation_test:
args.test = True
# --test-optimized implies --test.
if args.test_optimized:
args.test = True
# --test-optimize-size implies --test.
if args.test_optimize_for_size:
args.test = True
# --test-optimize-none-with-implicit-dynamic implies --test.
if args.test_optimize_none_with_implicit_dynamic:
args.test = True
# If none of tests specified skip swift stdlib test on all platforms
if not args.test and not args.validation_test and not args.long_test:
args.test_linux = False
args.test_freebsd = False
args.test_cygwin = False
args.test_osx = False
args.test_ios = False
args.test_tvos = False
args.test_watchos = False
args.test_android = False
args.test_swiftpm = False
args.test_swiftsyntax = False
args.test_indexstoredb = False
args.test_sourcekitlsp = False
args.test_skstresstester = False
args.test_swiftevolve = False
args.test_toolchainbenchmarks = False
# --skip-test-ios is merely a shorthand for host and simulator tests.
if not args.test_ios:
args.test_ios_host = False
args.test_ios_simulator = False
# --skip-test-tvos is merely a shorthand for host and simulator tests.
if not args.test_tvos:
args.test_tvos_host = False
args.test_tvos_simulator = False
# --skip-test-watchos is merely a shorthand for host and simulator
# --tests.
if not args.test_watchos:
args.test_watchos_host = False
args.test_watchos_simulator = False
# --skip-build-{ios,tvos,watchos}-{device,simulator} implies
# --skip-test-{ios,tvos,watchos}-{host,simulator}
if not args.build_ios_device:
args.test_ios_host = False
if not args.build_ios_simulator:
args.test_ios_simulator = False
if not args.build_tvos_device:
args.test_tvos_host = False
if not args.build_tvos_simulator:
args.test_tvos_simulator = False
if not args.build_watchos_device:
args.test_watchos_host = False
if not args.build_watchos_simulator:
args.test_watchos_simulator = False
if not args.build_android:
args.test_android = False
args.test_android_host = False
if not args.test_android:
args.test_android_host = False
if not args.host_test:
args.test_ios_host = False
args.test_tvos_host = False
args.test_watchos_host = False
args.test_android_host = False
def create_argument_parser():
"""Return a configured argument parser."""
# NOTE: USAGE, DESCRIPTION and EPILOG are defined at the bottom of the file
parser = _ApplyDefaultsArgumentParser(
apply_defaults=_apply_default_arguments,
formatter_class=argparse.RawDescriptionHelpFormatter,
usage=USAGE,
description=DESCRIPTION,
epilog=EPILOG)
builder = parser.to_builder()
# Prepare DSL functions
option = builder.add_option
set_defaults = builder.set_defaults
in_group = builder.in_group
mutually_exclusive_group = builder.mutually_exclusive_group
# Prepare DSL actions
append = builder.actions.append
store = builder.actions.store
store_true = builder.actions.store_true
store_false = builder.actions.store_false
store_int = builder.actions.store_int
store_path = builder.actions.store_path
toggle_true = builder.actions.toggle_true
toggle_false = builder.actions.toggle_false
unsupported = builder.actions.unsupported
# -------------------------------------------------------------------------
# Top-level options
option(['-n', '--dry-run'], store_true,
help='print the commands that would be executed, but do not '
'execute them')
option('--dump-config', toggle_true,
help='instead of building, write JSON to stdout containing '
'various values used to build in this configuration')
option('--legacy-impl', store_true('legacy_impl'),
help='use legacy implementation')
option('--build-runtime-with-host-compiler', toggle_true,
help='Use the host compiler, not the self-built one to compile the '
'Swift runtime')
option(['-i', '--ios'], store_true,
help='also build for iOS, but disallow tests that require an iOS '
'device')
option(['-I', '--ios-all'], store_true('ios_all'),
help='also build for iOS, and allow all iOS tests')
option(['--skip-local-build'], toggle_true('skip_local_build'),
help='set to skip building for the local platform')
option('--skip-ios', store_false('ios'),
help='set to skip everything iOS-related')
option('--tvos', toggle_true,
help='also build for tvOS, but disallow tests that require a tvos '
'device')
option('--tvos-all', toggle_true('tvos_all'),
help='also build for tvOS, and allow all tvOS tests')
option('--skip-tvos', store_false('tvos'),
help='set to skip everything tvOS-related')
option('--watchos', toggle_true,
help='also build for watchOS, but disallow tests that require an '
'watchOS device')
option('--watchos-all', toggle_true('watchos_all'),
help='also build for Apple watchOS, and allow all Apple watchOS '
'tests')
option('--skip-watchos', store_false('watchos'),
help='set to skip everything watchOS-related')
option('--maccatalyst', toggle_true,
help='Enable building Swift with macCatalyst support')
option('--maccatalyst-ios-tests', toggle_true,
help='When building for macCatalyst run tests with iOS-like '
'target triple')
option('--android', toggle_true,
help='also build for Android')
option('--swift-analyze-code-coverage', store,
choices=['false', 'not-merged', 'merged'],
# so CMake can see the inert mode as a false value
default=defaults.SWIFT_ANALYZE_CODE_COVERAGE,
help='enable code coverage analysis in Swift (false, not-merged, '
'merged).')
option('--build-subdir', store,
metavar='PATH',
help='name of the directory under $SWIFT_BUILD_ROOT where the '
'build products will be placed')
option('--install-prefix', store_path,
default=targets.install_prefix(),
help='The installation prefix. This is where built Swift products '
'(like bin, lib, and include) will be installed.')
option('--install-symroot', store_path,
help='the path to install debug symbols into')
option('--install-destdir', store_path,
help='the path to use as the filesystem root for the installation')
option(['-j', '--jobs'], store_int('build_jobs'),
default=multiprocessing.cpu_count(),
help='the number of parallel build jobs to use')
option('--darwin-xcrun-toolchain', store,
help='the name of the toolchain to use on Darwin')
option('--cmake', store_path(executable=True),
help='the path to a CMake executable that will be used to build '
'Swift')
option('--show-sdks', toggle_true,
help='print installed Xcode and SDK versions')
option('--extra-swift-args', append,
help='Pass through extra flags to swift in the form of a CMake '
'list "module_regexp;flag". Can be called multiple times to '
'add multiple such module_regexp flag pairs. All semicolons '
'in flags must be escaped with a "\\"')
option('--host-cc', store_path(executable=True),
help='the absolute path to CC, the "clang" compiler for the host '
'platform. Default is auto detected.')
option('--host-cxx', store_path(executable=True),
help='the absolute path to CXX, the "clang++" compiler for the '
'host platform. Default is auto detected.')
option('--cmake-c-launcher', store_path(executable=True),
default=os.environ.get('C_COMPILER_LAUNCHER', None),
help='the absolute path to set CMAKE_C_COMPILER_LAUNCHER')
option('--cmake-cxx-launcher', store_path(executable=True),
default=os.environ.get('CXX_COMPILER_LAUNCHER', None),
help='the absolute path to set CMAKE_CXX_COMPILER_LAUNCHER')
option('--host-lipo', store_path(executable=True),
help='the absolute path to lipo. Default is auto detected.')
option('--host-libtool', store_path(executable=True),
help='the absolute path to libtool. Default is auto detected.')
option('--distcc', toggle_true,
help='use distcc in pump mode')
option('--enable-asan', toggle_true,
help='enable Address Sanitizer')
option('--enable-ubsan', toggle_true,
help='enable Undefined Behavior Sanitizer')
option('--enable-tsan', toggle_true,
help='enable Thread Sanitizer for swift tools')
option('--enable-tsan-runtime', toggle_true,
help='enable Thread Sanitizer on the swift runtime')
option('--enable-lsan', toggle_true,
help='enable Leak Sanitizer for swift tools')
option('--enable-sanitize-coverage', toggle_true,
help='enable sanitizer coverage for swift tools. Necessary for '
'fuzzing swiftc')
option('--compiler-vendor', store,
choices=['none', 'apple'],
default=defaults.COMPILER_VENDOR,
help='Compiler vendor name')
option('--clang-compiler-version', store,
type=argparse.ClangVersionType(),
metavar='MAJOR.MINOR.PATCH',
help='string that indicates a compiler version for Clang')
option('--clang-user-visible-version', store,
type=argparse.ClangVersionType(),
default=defaults.CLANG_USER_VISIBLE_VERSION,
metavar='MAJOR.MINOR.PATCH',
help='User-visible version of the embedded Clang and LLVM '
'compilers')
option('--swift-compiler-version', store,
type=argparse.SwiftVersionType(),
metavar='MAJOR.MINOR',
help='string that indicates a compiler version for Swift')
option('--swift-user-visible-version', store,
type=argparse.SwiftVersionType(),
default=defaults.SWIFT_USER_VISIBLE_VERSION,
metavar='MAJOR.MINOR',
help='User-visible version of the embedded Swift compiler')
option('--darwin-deployment-version-osx', store,
default=defaults.DARWIN_DEPLOYMENT_VERSION_OSX,
metavar='MAJOR.MINOR',
help='minimum deployment target version for OS X')
option('--darwin-deployment-version-ios', store,
default=defaults.DARWIN_DEPLOYMENT_VERSION_IOS,
metavar='MAJOR.MINOR',
help='minimum deployment target version for iOS')
option('--darwin-deployment-version-tvos', store,
default=defaults.DARWIN_DEPLOYMENT_VERSION_TVOS,
metavar='MAJOR.MINOR',
help='minimum deployment target version for tvOS')
option('--darwin-deployment-version-watchos', store,
default=defaults.DARWIN_DEPLOYMENT_VERSION_WATCHOS,
metavar='MAJOR.MINOR',
help='minimum deployment target version for watchOS')
option('--extra-cmake-options', append,
type=argparse.ShellSplitType(),
help='Pass through extra options to CMake in the form of comma '
'separated options "-DCMAKE_VAR1=YES,-DCMAKE_VAR2=/tmp". Can '
'be called multiple times to add multiple such options.')
option('--build-args', store,
type=argparse.ShellSplitType(),
default=[],
help='arguments to the build tool. This would be prepended to the '
'default argument that is "-j8" when CMake generator is '
'"Ninja".')
option('--verbose-build', toggle_true,
help='print the commands executed during the build')
option('--lto', store('lto_type'),
choices=['thin', 'full'],
const='full',
default=None,
metavar='LTO_TYPE',
help='use lto optimization on llvm/swift tools. This does not '
'imply using lto on the swift standard library or runtime. '
'Options: thin, full. If no optional arg is provided, full is '
'chosen by default')
option('--clang-profile-instr-use', store_path,
help='profile file to use for clang PGO')
default_max_lto_link_job_counts = host.max_lto_link_job_counts()
option('--llvm-max-parallel-lto-link-jobs', store_int,
default=default_max_lto_link_job_counts['llvm'],
metavar='COUNT',
help='the maximum number of parallel link jobs to use when '
'compiling llvm')
option('--swift-tools-max-parallel-lto-link-jobs', store_int,
default=default_max_lto_link_job_counts['swift'],
metavar='COUNT',
help='the maximum number of parallel link jobs to use when '
'compiling swift tools.')
option('--disable-guaranteed-normal-arguments', store_true,
help='Disable guaranteed normal arguments')
option('--enable-stdlibcore-exclusivity-checking', store_true,
help='Enable exclusivity checking in stdlibCore')
option('--force-optimized-typechecker', store_true,
help='Force the type checker to be built with '
'optimization')
option('--lit-args', store,
default='-sv',
metavar='LITARGS',
help='lit args to use when testing')
option('--coverage-db', store_path,
help='coverage database to use when prioritizing testing')
# -------------------------------------------------------------------------
in_group('Host and cross-compilation targets')
option('--host-target', store,
default=StdlibDeploymentTarget.host_target().name,
help='The host target. LLVM, Clang, and Swift will be built for '
'this target. The built LLVM and Clang will be used to '
'compile Swift for the cross-compilation targets.')
option('--cross-compile-hosts', append,
type=argparse.ShellSplitType(),
default=[],
help='A space separated list of targets to cross-compile host '
'Swift tools for. Can be used multiple times.')
option('--stdlib-deployment-targets', store,
type=argparse.ShellSplitType(),
default=None,
help='The targets to compile or cross-compile the Swift standard '
'library for. %(default)s by default.'
' Comma separated list: {}'.format(
' '.join(StdlibDeploymentTarget.get_target_names())))
option('--build-stdlib-deployment-targets', store,
type=argparse.ShellSplitType(),
default=['all'],
help='A space-separated list that filters which of the configured '
'targets to build the Swift standard library for, or "all".')
option('--swift-darwin-supported-archs', store,
metavar='ARCHS',
help='Semicolon-separated list of architectures to configure on '
'Darwin platforms. If left empty all default architectures '
'are configured.')
option('--swift-darwin-module-archs', store,
metavar='ARCHS',
help='Semicolon-separated list of architectures to configure Swift '
'module-only targets on Darwin platforms. These targets are '
'in addition to the full library targets.')
# -------------------------------------------------------------------------
in_group('Options to select projects')
option(['-l', '--lldb'], store_true('build_lldb'),
help='build LLDB')
option(['-b', '--llbuild'], store_true('build_llbuild'),
help='build llbuild')
option(['--libcxx'], store_true('build_libcxx'),
help='build libcxx')
option(['-p', '--swiftpm'], toggle_true('build_swiftpm'),
help='build swiftpm')
option(['--install-swiftpm'], toggle_true('install_swiftpm'),
help='install swiftpm')
option(['--swiftsyntax'], store_true('build_swiftsyntax'),
help='build swiftSyntax')
option(['--skstresstester'], store_true('build_skstresstester'),
help='build the SourceKit stress tester')
option(['--swiftevolve'], store_true('build_swiftevolve'),
help='build the swift-evolve tool')
option(['--indexstore-db'], toggle_true('build_indexstoredb'),
help='build IndexStoreDB')
option(['--sourcekit-lsp'], toggle_true('build_sourcekitlsp'),
help='build SourceKitLSP')
option('--install-swiftsyntax', toggle_true('install_swiftsyntax'),
help='install SwiftSyntax')
option('--swiftsyntax-verify-generated-files',
toggle_true('swiftsyntax_verify_generated_files'),
help='set to verify that the generated files in the source tree '
'match the ones that would be generated from current master')
option(['--install-sourcekit-lsp'], toggle_true('install_sourcekitlsp'),
help='install SourceKitLSP')
option(['--install-skstresstester'], toggle_true('install_skstresstester'),
help='install the SourceKit stress tester')
option(['--install-swiftevolve'], toggle_true('install_swiftevolve'),
help='install SwiftEvolve')
option(['--toolchain-benchmarks'],
toggle_true('build_toolchainbenchmarks'),
help='build Swift Benchmarks using swiftpm against the just built '
'toolchain')
option('--xctest', toggle_true('build_xctest'),
help='build xctest')
option('--foundation', toggle_true('build_foundation'),
help='build foundation')
option('--libdispatch', toggle_true('build_libdispatch'),
help='build libdispatch')
option('--libicu', toggle_true('build_libicu'),
help='build libicu')
option('--playgroundsupport', store_true('build_playgroundsupport'),
help='build PlaygroundSupport')
option('--build-ninja', toggle_true,
help='build the Ninja tool')
option(['--build-libparser-only'], store_true('build_libparser_only'),
help='build only libParser for SwiftSyntax')
option('--skip-build-clang-tools-extra',
toggle_false('build_clang_tools_extra'),
default=True,
help='skip building clang-tools-extra as part of llvm')
# -------------------------------------------------------------------------
in_group('Extra actions to perform before or in addition to building')
option(['-c', '--clean'], store_true,
help='do a clean build')
option('--export-compile-commands', toggle_true,
help='generate compilation databases in addition to building')
option('--symbols-package', store_path,
help='if provided, an archive of the symbols directory will be '
'generated at this path')
# -------------------------------------------------------------------------
in_group('Build variant')
with mutually_exclusive_group():
set_defaults(build_variant='Debug')
option(['-d', '--debug'], store('build_variant'),
const='Debug',
help='build the Debug variant of everything (LLVM, Clang, '
'Swift host tools, target Swift standard libraries, LLDB) '
'(default is %(default)s)')
option(['-r', '--release-debuginfo'], store('build_variant'),
const='RelWithDebInfo',
help='build the RelWithDebInfo variant of everything (default '
'is %(default)s)')
option(['-R', '--release'], store('build_variant'),
const='Release',
help='build the Release variant of everything (default is '
'%(default)s)')
# -------------------------------------------------------------------------
in_group('Override build variant for a specific project')
option('--debug-llvm', store('llvm_build_variant'),
const='Debug',
help='build the Debug variant of LLVM')
option('--debug-swift', store('swift_build_variant'),
const='Debug',
help='build the Debug variant of Swift host tools')
option('--debug-swift-stdlib', store('swift_stdlib_build_variant'),
const='Debug',
help='build the Debug variant of the Swift standard library and '
' SDK overlay')
option('--debug-lldb', store('lldb_build_variant'),
const='Debug',
help='build the Debug variant of LLDB')
option('--lldb-build-with-xcode', store('lldb_build_with_xcode'),
const='1',
help='build LLDB using xcodebuild, if possible')
option('--lldb-build-with-cmake', store('lldb_build_with_xcode'),
const='0',
help='build LLDB using CMake')
option('--debug-cmark', store('cmark_build_variant'),
const='Debug',
help='build the Debug variant of CommonMark')
option('--debug-foundation', store('foundation_build_variant'),
const='Debug',
help='build the Debug variant of Foundation')
option('--debug-libdispatch', store('libdispatch_build_variant'),
const='Debug',
help='build the Debug variant of libdispatch')
option('--debug-libicu', store('libicu_build_variant'),
const='Debug',
help='build the Debug variant of libicu')
# -------------------------------------------------------------------------
# Assertions group
with mutually_exclusive_group():
set_defaults(assertions=True)
# TODO: Convert to store_true
option(['-a', '--assertions'], store,
const=True,
help='enable assertions in all projects')
# TODO: Convert to store_false
option(['-A', '--no-assertions'], store('assertions'),
const=False,
help='disable assertions in all projects')
# -------------------------------------------------------------------------
in_group('Control assertions in a specific project')
option('--cmark-assertions', store,
const=True,
help='enable assertions in CommonMark')
option('--llvm-assertions', store,
const=True,
help='enable assertions in LLVM')
option('--no-llvm-assertions', store('llvm_assertions'),
const=False,
help='disable assertions in LLVM')
option('--swift-assertions', store,
const=True,
help='enable assertions in Swift')
option('--no-swift-assertions', store('swift_assertions'),
const=False,
help='disable assertions in Swift')
option('--swift-stdlib-assertions', store,
const=True,
help='enable assertions in the Swift standard library')
option('--no-swift-stdlib-assertions', store('swift_stdlib_assertions'),
const=False,
help='disable assertions in the Swift standard library')
option('--lldb-assertions', store,
const=True,
help='enable assertions in LLDB')
option('--no-lldb-assertions', store('lldb_assertions'),
const=False,
help='disable assertions in LLDB')
option('--llbuild-assertions', store,
const=True,
help='enable assertions in llbuild')
option('--no-llbuild-assertions', store('llbuild_assertions'),
const=False,
help='disable assertions in llbuild')
# -------------------------------------------------------------------------
in_group('Select the CMake generator')
set_defaults(cmake_generator=defaults.CMAKE_GENERATOR)
option(['-e', '--eclipse'], store('cmake_generator'),
const='Eclipse CDT4 - Ninja',
help="use CMake's Eclipse generator (%(default)s by default)")
option(['-m', '--make'], store('cmake_generator'),
const='Unix Makefiles',
help="use CMake's Makefile generator (%(default)s by default)")
option(['-x', '--xcode'], store('cmake_generator'),
const='Xcode',
help="use CMake's Xcode generator (%(default)s by default)")
# -------------------------------------------------------------------------
in_group('Run tests')
# NOTE: We can't merge -t and --test, because nargs='?' makes
# `-ti` to be treated as `-t=i`.
# FIXME: Convert to store_true action
option('-t', store('test', const=True),
help='test Swift after building')
option('--test', toggle_true,
help='test Swift after building')
option('-T', store('validation_test', const=True),
help='run the validation test suite (implies --test)')
option('--validation-test', toggle_true,
help='run the validation test suite (implies --test)')
# FIXME: Convert to store_true action
option('-o', store('test_optimized', const=True),
help='run the test suite in optimized mode too (implies --test)')
option('--test-optimized', toggle_true,
help='run the test suite in optimized mode too (implies --test)')
# FIXME: Convert to store_true action
option('-s', store('test_optimize_for_size', const=True),
help='run the test suite in optimize for size mode too '
'(implies --test)')
option('--test-optimize-for-size', toggle_true,
help='run the test suite in optimize for size mode too '
'(implies --test)')
# FIXME: Convert to store_true action
option('-y', store('test_optimize_none_with_implicit_dynamic', const=True),
help='run the test suite in optimize none with implicit dynamic'
' mode too (implies --test)')
option('--test-optimize-none-with-implicit-dynamic', toggle_true,
help='run the test suite in optimize none with implicit dynamic'
'mode too (implies --test)')
option('--long-test', toggle_true,
help='run the long test suite')
option('--stress-test', toggle_true,
help='run the stress test suite')
option('--host-test', toggle_true,
help='run executable tests on host devices (such as iOS or tvOS)')
option('--only-executable-test', toggle_true,
help='Only run executable tests. Does nothing if host-test is not '
'allowed')
option('--test-paths', append,
type=argparse.ShellSplitType(),
help='run tests located in specific directories and/or files '
'(implies --test and/or --validation-test)')
option(['-B', '--benchmark'], store_true,
help='run the Swift Benchmark Suite after building')
option('--benchmark-num-o-iterations', store_int,
default=3,
help='if the Swift Benchmark Suite is run after building, run N '
'iterations with -O')
option('--benchmark-num-onone-iterations', store_int,
default=3,
help='if the Swift Benchmark Suite is run after building, run N '
'iterations with -Onone')
# We want to run the TSan (compiler-rt) libdispatch tests on Linux, where
# libdispatch is just another library and not available by default. To do
# so we build Clang/LLVM/libdispatch and use it to compile/run the TSan
# libdispatch tests.
option('--tsan-libdispatch-test', toggle_true,
help='Builds a new toolchain including the libdispatch C library. '
'Then re-builds the TSan runtime (compiler-rt) using this '
'freshly-built Clang and runs the TSan libdispatch tests.')
option('--skip-test-osx', toggle_false('test_osx'),
help='skip testing Swift stdlibs for Mac OS X')
option('--skip-test-linux', toggle_false('test_linux'),
help='skip testing Swift stdlibs for Linux')
option('--skip-test-freebsd', toggle_false('test_freebsd'),
help='skip testing Swift stdlibs for FreeBSD')
option('--skip-test-cygwin', toggle_false('test_cygwin'),
help='skip testing Swift stdlibs for Cygwin')
# -------------------------------------------------------------------------
in_group('Run build')
option('--build-swift-dynamic-stdlib', toggle_true,
default=True,
help='build dynamic variants of the Swift standard library')
option('--build-swift-static-stdlib', toggle_true,
help='build static variants of the Swift standard library')
option('--build-swift-dynamic-sdk-overlay', toggle_true,
default=True,
help='build dynamic variants of the Swift SDK overlay')
option('--build-swift-static-sdk-overlay', toggle_true,
help='build static variants of the Swift SDK overlay')
option('--build-swift-stdlib-unittest-extra', toggle_true,
help='Build optional StdlibUnittest components')
option(['-S', '--skip-build'], store_true,
help='generate build directory only without building')
option('--skip-build-linux', toggle_false('build_linux'),
help='skip building Swift stdlibs for Linux')
option('--skip-build-freebsd', toggle_false('build_freebsd'),
help='skip building Swift stdlibs for FreeBSD')
option('--skip-build-cygwin', toggle_false('build_cygwin'),
help='skip building Swift stdlibs for Cygwin')
option('--skip-build-osx', toggle_false('build_osx'),
help='skip building Swift stdlibs for MacOSX')
option('--skip-build-ios', toggle_false('build_ios'),
help='skip building Swift stdlibs for iOS')
option('--skip-build-ios-device', toggle_false('build_ios_device'),
help='skip building Swift stdlibs for iOS devices '
'(i.e. build simulators only)')
option('--skip-build-ios-simulator', toggle_false('build_ios_simulator'),
help='skip building Swift stdlibs for iOS simulator '
'(i.e. build devices only)')
option('--skip-build-tvos', toggle_false('build_tvos'),
help='skip building Swift stdlibs for tvOS')
option('--skip-build-tvos-device', toggle_false('build_tvos_device'),
help='skip building Swift stdlibs for tvOS devices '
'(i.e. build simulators only)')
option('--skip-build-tvos-simulator', toggle_false('build_tvos_simulator'),
help='skip building Swift stdlibs for tvOS simulator '
'(i.e. build devices only)')
option('--skip-build-watchos', toggle_false('build_watchos'),
help='skip building Swift stdlibs for watchOS')
option('--skip-build-watchos-device', toggle_false('build_watchos_device'),
help='skip building Swift stdlibs for watchOS devices '
'(i.e. build simulators only)')
option('--skip-build-watchos-simulator',
toggle_false('build_watchos_simulator'),
help='skip building Swift stdlibs for watchOS simulator '
'(i.e. build devices only)')
option('--skip-build-android', toggle_false('build_android'),
help='skip building Swift stdlibs for Android')
option('--skip-build-benchmarks', toggle_false('build_benchmarks'),
help='skip building Swift Benchmark Suite')
option('--build-external-benchmarks', toggle_true,
help='skip building Swift Benchmark Suite')
# -------------------------------------------------------------------------
in_group('Skip testing specified targets')
option('--skip-test-ios',
toggle_false('test_ios'),
help='skip testing all iOS targets. Equivalent to specifying both '
'--skip-test-ios-simulator and --skip-test-ios-host')
option('--skip-test-ios-simulator',
toggle_false('test_ios_simulator'),
help='skip testing iOS simulator targets')
option('--skip-test-ios-32bit-simulator',
toggle_false('test_ios_32bit_simulator'),
help='skip testing iOS 32 bit simulator targets')
option('--skip-test-ios-host',
toggle_false('test_ios_host'),
help='skip testing iOS device targets on the host machine (the '
'phone itself)')
option('--skip-test-tvos',
toggle_false('test_tvos'),
help='skip testing all tvOS targets. Equivalent to specifying both '
'--skip-test-tvos-simulator and --skip-test-tvos-host')
option('--skip-test-tvos-simulator',
toggle_false('test_tvos_simulator'),
help='skip testing tvOS simulator targets')
option('--skip-test-tvos-host',
toggle_false('test_tvos_host'),
help='skip testing tvOS device targets on the host machine (the '
'TV itself)')
option('--skip-test-watchos',
toggle_false('test_watchos'),
help='skip testing all tvOS targets. Equivalent to specifying both '
'--skip-test-watchos-simulator and --skip-test-watchos-host')
option('--skip-test-watchos-simulator',
toggle_false('test_watchos_simulator'),
help='skip testing watchOS simulator targets')
option('--skip-test-watchos-host',
toggle_false('test_watchos_host'),
help='skip testing watchOS device targets on the host machine (the '
'watch itself)')
option('--skip-test-android',
toggle_false('test_android'),
help='skip testing all Android targets.')
option('--skip-test-android-host',
toggle_false('test_android_host'),
help='skip testing Android device targets on the host machine (the '
'phone itself)')
option('--skip-test-swiftpm', toggle_false('test_swiftpm'),
help='skip testing swiftpm')
option('--skip-test-swiftsyntax', toggle_false('test_swiftsyntax'),
help='skip testing SwiftSyntax')
option('--skip-test-indexstore-db', toggle_false('test_indexstoredb'),
help='skip testing indexstore-db')
option('--skip-test-sourcekit-lsp', toggle_false('test_sourcekitlsp'),
help='skip testing sourcekit-lsp')
option('--skip-test-skstresstester', toggle_false('test_skstresstester'),
help='skip testing the SourceKit Stress tester')
option('--skip-test-swiftevolve', toggle_false('test_swiftevolve'),
help='skip testing SwiftEvolve')
option('--skip-test-toolchain-benchmarks',
toggle_false('test_toolchainbenchmarks'),
help='skip testing toolchain benchmarks')
# -------------------------------------------------------------------------
in_group('Build settings specific for LLVM')
option('--llvm-targets-to-build', store,
default='X86;ARM;AArch64;PowerPC;SystemZ;Mips',
help='LLVM target generators to build')
# -------------------------------------------------------------------------
in_group('Build settings for Android')
option('--android-ndk', store_path,
help='An absolute path to the NDK that will be used as a libc '
'implementation for Android builds')
option('--android-api-level', store,
default='21',
help='The Android API level to target when building for Android. '
'Currently only 21 or above is supported')
option('--android-ndk-gcc-version', store,
choices=['4.8', '4.9'],
default='4.9',
help='The GCC version to use when building for Android. Currently '
'only 4.9 is supported. %(default)s is also the default '
'value. This option may be used when experimenting with '
'versions of the Android NDK not officially supported by '
'Swift')
option('--android-icu-uc', store_path,
help='Path to libicuuc.so')
option('--android-icu-uc-include', store_path,
help='Path to a directory containing headers for libicuuc')
option('--android-icu-i18n', store_path,
help='Path to libicui18n.so')
option('--android-icu-i18n-include', store_path,
help='Path to a directory containing headers libicui18n')
option('--android-icu-data', store_path,
help='Path to libicudata.so')
option('--android-deploy-device-path', store_path,
default=android.adb.commands.DEVICE_TEMP_DIR,
help='Path on an Android device to which built Swift stdlib '
'products will be deployed. If running host tests, specify '
'the "{}" directory.'.format(
android.adb.commands.DEVICE_TEMP_DIR))
option('--android-arch', store,
choices=['armv7', 'aarch64'],
default='armv7',
help='The Android target architecture when building for Android. '
'Currently only armv7 and aarch64 are supported. '
'%(default)s is the default.')
# -------------------------------------------------------------------------
in_group('Experimental language features')
option('--enable-experimental-differentiable-programming', toggle_true,
default=True,
help='Enable experimental Swift differentiable programming language'
' features.')
# -------------------------------------------------------------------------
in_group('Unsupported options')
option('--build-jobs', unsupported)
option('--common-cmake-options', unsupported)
option('--only-execute', unsupported)
option('--skip-test-optimize-for-size', unsupported)
option('--skip-test-optimize-none-with-implicit-dynamic', unsupported)
option('--skip-test-optimized', unsupported)
# -------------------------------------------------------------------------
in_group('Build-script-impl arguments (for disambiguation)')
# We need to list --skip-test-swift explicitly because otherwise argparse
# will auto-expand arguments like --skip-test-swift to the only known
# argument --skip-test-swiftevolve.
# These arguments are forwarded to impl_args in migration.py
option('--install-swift', toggle_true('impl_install_swift'))
option('--skip-test-swift', toggle_true('impl_skip_test_swift'))
# -------------------------------------------------------------------------
return builder.build()
# ----------------------------------------------------------------------------
USAGE = """
%(prog)s [-h | --help] [OPTION ...]
%(prog)s --preset=NAME [SUBSTITUTION ...]
"""
DESCRIPTION = """
Use this tool to build, test, and prepare binary distribution archives of Swift
and related tools.
Builds Swift (and, optionally, LLDB), incrementally, optionally
testing it thereafter. Different build configurations are maintained in
parallel.
"""
EPILOG = """
Using option presets:
--preset-file=PATH load presets from the specified file
--preset=NAME use the specified option preset
The preset mode is mutually exclusive with other options. It is not
possible to add ad-hoc customizations to a preset. This is a deliberate
design decision. (Rationale: a preset is a certain important set of
options that we want to keep in a centralized location. If you need to
customize it, you should create another preset in a centralized location,
rather than scattering the knowledge about the build across the system.)
Presets support substitutions for controlled customizations. Substitutions
are defined in the preset file. Values for substitutions are supplied
using the name=value syntax on the command line.
Any arguments not listed are forwarded directly to Swift's
'build-script-impl'. See that script's help for details. The listed
build-script-impl arguments are only for disambiguation in the argument parser.
Environment variables
---------------------
This script respects a few environment variables, should you
choose to set them:
SWIFT_SOURCE_ROOT: a directory containing the source for LLVM, Clang, Swift.
If this script is located in a Swift
source directory, the location of SWIFT_SOURCE_ROOT will be
inferred if the variable is not set.
'build-script' expects the sources to be laid out in the following way:
$SWIFT_SOURCE_ROOT/llvm
/clang
/swift
/lldb (optional)
/llbuild (optional)
/swiftpm (optional, requires llbuild)
/swift-syntax (optional, requires swiftpm)
/swift-stress-tester (optional,
requires swift-syntax)
/compiler-rt (optional)
/swift-corelibs-xctest (optional)
/swift-corelibs-foundation (optional)
/swift-corelibs-libdispatch (optional)
/icu (optional)
SWIFT_BUILD_ROOT: a directory in which to create out-of-tree builds.
Defaults to "$SWIFT_SOURCE_ROOT/build/".
Preparing to run this script
----------------------------
See README.md for instructions on cloning Swift subprojects.
If you intend to use the -l, -L, --lldb, or --debug-lldb options.
That's it; you're ready to go!
Examples
--------
Given the above layout of sources, the simplest invocation of 'build-script' is
just:
[~/src/s]$ ./swift/utils/build-script
This builds LLVM, Clang, Swift and Swift standard library in debug mode.
All builds are incremental. To incrementally build changed files, repeat the
same 'build-script' command.
Typical uses of 'build-script'
------------------------------
To build everything with optimization without debug information:
[~/src/s]$ ./swift/utils/build-script -R
To run tests, add '-t':
[~/src/s]$ ./swift/utils/build-script -R -t
To run normal tests and validation tests, add '-T':
[~/src/s]$ ./swift/utils/build-script -R -T
To build LLVM+Clang with optimization without debug information, and a
debuggable Swift compiler:
[~/src/s]$ ./swift/utils/build-script -R --debug-swift
To build a debuggable Swift standard library:
[~/src/s]$ ./swift/utils/build-script -R --debug-swift-stdlib
iOS build targets are always configured and present, but are not built by
default. To build the standard library for OS X, iOS simulator and iOS device:
[~/src/s]$ ./swift/utils/build-script -R -i
To run OS X and iOS tests that don't require a device:
[~/src/s]$ ./swift/utils/build-script -R -i -t
To use 'make' instead of 'ninja', use '-m':
[~/src/s]$ ./swift/utils/build-script -m -R
To create Xcode projects that can build Swift, use '-x':
[~/src/s]$ ./swift/utils/build-script -x -R
Preset mode in build-script
---------------------------
All buildbots and automated environments use 'build-script' in *preset mode*.
In preset mode, the command line only specifies the preset name and allows
limited customization (extra output paths). The actual options come from
the selected preset in 'utils/build-presets.ini'. For example, to build like
the incremental buildbot, run:
[~/src/s]$ ./swift/utils/build-script --preset=buildbot_incremental
To build with AddressSanitizer:
[~/src/s]$ ./swift/utils/build-script --preset=asan
To build a root for Xcode XYZ, '/tmp/xcode-xyz-root.tar.gz':
[~/src/s]$ ./swift/utils/build-script --preset=buildbot_BNI_internal_XYZ \\
install_destdir="/tmp/install"
install_symroot="/tmp/symroot"
installable_package="/tmp/xcode-xyz-root.tar.gz"
If you have your own favorite set of options, you can create your own, local,
preset. For example, let's create a preset called 'ds' (which stands for
Debug Swift):
$ cat > ~/.swift-build-presets
[preset: ds]
release
debug-swift
debug-swift-stdlib
test
build-subdir=ds
To use it, specify the '--preset=' argument:
[~/src/s]$ ./swift/utils/build-script --preset=ds
./swift/utils/build-script: using preset 'ds', which expands to
./swift/utils/build-script --release --debug-swift --debug-swift-stdlib \
--test
--build-subdir=ds --
...
Existing presets can be found in `utils/build-presets.ini`
Philosophy
----------
While you can invoke CMake directly to build Swift, this tool will save you
time by taking away the mechanical parts of the process, providing you controls
for the important options.
For all automated build environments, this tool is regarded as *the* *only* way
to build Swift. This is not a technical limitation of the Swift build system.
It is a policy decision aimed at making the builds uniform across all
environments and easily reproducible by engineers who are not familiar with the
details of the setups of other systems or automated environments.
"""
| |
"""The tests for the utility_meter component."""
from datetime import timedelta
from unittest.mock import patch
from homeassistant.components.utility_meter.const import (
ATTR_TARIFF,
DOMAIN,
SERVICE_RESET,
SERVICE_SELECT_NEXT_TARIFF,
SERVICE_SELECT_TARIFF,
)
import homeassistant.components.utility_meter.sensor as um_sensor
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_UNIT_OF_MEASUREMENT,
CONF_PLATFORM,
ENERGY_KILO_WATT_HOUR,
EVENT_HOMEASSISTANT_START,
Platform,
)
from homeassistant.core import State
from homeassistant.setup import async_setup_component
import homeassistant.util.dt as dt_util
from tests.common import mock_restore_cache
async def test_restore_state(hass):
"""Test utility sensor restore state."""
config = {
"utility_meter": {
"energy_bill": {
"source": "sensor.energy",
"tariffs": ["onpeak", "midpeak", "offpeak"],
}
}
}
mock_restore_cache(
hass,
[
State(
"utility_meter.energy_bill",
"midpeak",
),
],
)
assert await async_setup_component(hass, DOMAIN, config)
assert await async_setup_component(hass, Platform.SENSOR, config)
await hass.async_block_till_done()
# restore from cache
state = hass.states.get("utility_meter.energy_bill")
assert state.state == "midpeak"
async def test_services(hass):
"""Test energy sensor reset service."""
config = {
"utility_meter": {
"energy_bill": {
"source": "sensor.energy",
"cycle": "hourly",
"tariffs": ["peak", "offpeak"],
}
}
}
assert await async_setup_component(hass, DOMAIN, config)
assert await async_setup_component(hass, Platform.SENSOR, config)
await hass.async_block_till_done()
hass.bus.async_fire(EVENT_HOMEASSISTANT_START)
entity_id = config[DOMAIN]["energy_bill"]["source"]
hass.states.async_set(
entity_id, 1, {ATTR_UNIT_OF_MEASUREMENT: ENERGY_KILO_WATT_HOUR}
)
await hass.async_block_till_done()
now = dt_util.utcnow() + timedelta(seconds=10)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.states.async_set(
entity_id,
3,
{ATTR_UNIT_OF_MEASUREMENT: ENERGY_KILO_WATT_HOUR},
force_update=True,
)
await hass.async_block_till_done()
state = hass.states.get("sensor.energy_bill_peak")
assert state.state == "2"
state = hass.states.get("sensor.energy_bill_offpeak")
assert state.state == "0"
# Next tariff
data = {ATTR_ENTITY_ID: "utility_meter.energy_bill"}
await hass.services.async_call(DOMAIN, SERVICE_SELECT_NEXT_TARIFF, data)
await hass.async_block_till_done()
now += timedelta(seconds=10)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.states.async_set(
entity_id,
4,
{ATTR_UNIT_OF_MEASUREMENT: ENERGY_KILO_WATT_HOUR},
force_update=True,
)
await hass.async_block_till_done()
state = hass.states.get("sensor.energy_bill_peak")
assert state.state == "2"
state = hass.states.get("sensor.energy_bill_offpeak")
assert state.state == "1"
# Change tariff
data = {ATTR_ENTITY_ID: "utility_meter.energy_bill", ATTR_TARIFF: "wrong_tariff"}
await hass.services.async_call(DOMAIN, SERVICE_SELECT_TARIFF, data)
await hass.async_block_till_done()
# Inexisting tariff, ignoring
assert hass.states.get("utility_meter.energy_bill").state != "wrong_tariff"
data = {ATTR_ENTITY_ID: "utility_meter.energy_bill", ATTR_TARIFF: "peak"}
await hass.services.async_call(DOMAIN, SERVICE_SELECT_TARIFF, data)
await hass.async_block_till_done()
now += timedelta(seconds=10)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.states.async_set(
entity_id,
5,
{ATTR_UNIT_OF_MEASUREMENT: ENERGY_KILO_WATT_HOUR},
force_update=True,
)
await hass.async_block_till_done()
state = hass.states.get("sensor.energy_bill_peak")
assert state.state == "3"
state = hass.states.get("sensor.energy_bill_offpeak")
assert state.state == "1"
# Reset meters
data = {ATTR_ENTITY_ID: "utility_meter.energy_bill"}
await hass.services.async_call(DOMAIN, SERVICE_RESET, data)
await hass.async_block_till_done()
state = hass.states.get("sensor.energy_bill_peak")
assert state.state == "0"
state = hass.states.get("sensor.energy_bill_offpeak")
assert state.state == "0"
async def test_cron(hass, legacy_patchable_time):
"""Test cron pattern and offset fails."""
config = {
"utility_meter": {
"energy_bill": {
"source": "sensor.energy",
"cron": "*/5 * * * *",
}
}
}
assert await async_setup_component(hass, DOMAIN, config)
async def test_cron_and_meter(hass, legacy_patchable_time):
"""Test cron pattern and meter type fails."""
config = {
"utility_meter": {
"energy_bill": {
"source": "sensor.energy",
"cycle": "hourly",
"cron": "0 0 1 * *",
}
}
}
assert not await async_setup_component(hass, DOMAIN, config)
async def test_both_cron_and_meter(hass, legacy_patchable_time):
"""Test cron pattern and meter type passes in different meter."""
config = {
"utility_meter": {
"energy_bill": {
"source": "sensor.energy",
"cron": "0 0 1 * *",
},
"water_bill": {
"source": "sensor.water",
"cycle": "hourly",
},
}
}
assert await async_setup_component(hass, DOMAIN, config)
async def test_cron_and_offset(hass, legacy_patchable_time):
"""Test cron pattern and offset fails."""
config = {
"utility_meter": {
"energy_bill": {
"source": "sensor.energy",
"offset": {"days": 1},
"cron": "0 0 1 * *",
}
}
}
assert not await async_setup_component(hass, DOMAIN, config)
async def test_bad_cron(hass, legacy_patchable_time):
"""Test bad cron pattern."""
config = {
"utility_meter": {"energy_bill": {"source": "sensor.energy", "cron": "*"}}
}
assert not await async_setup_component(hass, DOMAIN, config)
async def test_setup_missing_discovery(hass):
"""Test setup with configuration missing discovery_info."""
assert not await um_sensor.async_setup_platform(hass, {CONF_PLATFORM: DOMAIN}, None)
| |
#!/usr/bin/env python
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Run tests in parallel."""
import argparse
import glob
import itertools
import json
import multiprocessing
import os
import platform
import random
import re
import subprocess
import sys
import time
import xml.etree.cElementTree as ET
import jobset
import watch_dirs
ROOT = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '../..'))
os.chdir(ROOT)
_FORCE_ENVIRON_FOR_WRAPPERS = {}
# SimpleConfig: just compile with CONFIG=config, and run the binary to test
class SimpleConfig(object):
def __init__(self, config, environ=None):
if environ is None:
environ = {}
self.build_config = config
self.allow_hashing = (config != 'gcov')
self.environ = environ
self.environ['CONFIG'] = config
def job_spec(self, cmdline, hash_targets, shortname=None, environ={}):
"""Construct a jobset.JobSpec for a test under this config
Args:
cmdline: a list of strings specifying the command line the test
would like to run
hash_targets: either None (don't do caching of test results), or
a list of strings specifying files to include in a
binary hash to check if a test has changed
-- if used, all artifacts needed to run the test must
be listed
"""
actual_environ = self.environ.copy()
for k, v in environ.iteritems():
actual_environ[k] = v
return jobset.JobSpec(cmdline=cmdline,
shortname=shortname,
environ=actual_environ,
hash_targets=hash_targets
if self.allow_hashing else None)
# ValgrindConfig: compile with some CONFIG=config, but use valgrind to run
class ValgrindConfig(object):
def __init__(self, config, tool, args=None):
if args is None:
args = []
self.build_config = config
self.tool = tool
self.args = args
self.allow_hashing = False
def job_spec(self, cmdline, hash_targets):
return jobset.JobSpec(cmdline=['valgrind', '--tool=%s' % self.tool] +
self.args + cmdline,
shortname='valgrind %s' % cmdline[0],
hash_targets=None)
class CLanguage(object):
def __init__(self, make_target, test_lang):
self.make_target = make_target
if platform.system() == 'Windows':
plat = 'windows'
else:
plat = 'posix'
self.platform = plat
with open('tools/run_tests/tests.json') as f:
js = json.load(f)
self.binaries = [tgt
for tgt in js
if tgt['language'] == test_lang and
plat in tgt['platforms']]
def test_specs(self, config, travis):
out = []
for target in self.binaries:
if travis and target['flaky']:
continue
if self.platform == 'windows':
binary = 'vsprojects/test_bin/%s.exe' % (target['name'])
else:
binary = 'bins/%s/%s' % (config.build_config, target['name'])
out.append(config.job_spec([binary], [binary]))
return sorted(out)
def make_targets(self):
return ['buildtests_%s' % self.make_target, 'tools_%s' % self.make_target]
def build_steps(self):
return []
def supports_multi_config(self):
return True
def __str__(self):
return self.make_target
class NodeLanguage(object):
def test_specs(self, config, travis):
return [config.job_spec(['tools/run_tests/run_node.sh'], None,
environ=_FORCE_ENVIRON_FOR_WRAPPERS)]
def make_targets(self):
return ['static_c', 'shared_c']
def build_steps(self):
return [['tools/run_tests/build_node.sh']]
def supports_multi_config(self):
return False
def __str__(self):
return 'node'
class PhpLanguage(object):
def test_specs(self, config, travis):
return [config.job_spec(['src/php/bin/run_tests.sh'], None,
environ=_FORCE_ENVIRON_FOR_WRAPPERS)]
def make_targets(self):
return ['static_c', 'shared_c']
def build_steps(self):
return [['tools/run_tests/build_php.sh']]
def supports_multi_config(self):
return False
def __str__(self):
return 'php'
class PythonLanguage(object):
def __init__(self):
self._build_python_versions = ['2.7']
self._has_python_versions = []
def test_specs(self, config, travis):
environment = dict(_FORCE_ENVIRON_FOR_WRAPPERS)
environment['PYVER'] = '2.7'
return [config.job_spec(
['tools/run_tests/run_python.sh'],
None,
environ=environment,
shortname='py.test',
)]
def make_targets(self):
return ['static_c', 'grpc_python_plugin', 'shared_c']
def build_steps(self):
commands = []
for python_version in self._build_python_versions:
try:
with open(os.devnull, 'w') as output:
subprocess.check_call(['which', 'python' + python_version],
stdout=output, stderr=output)
commands.append(['tools/run_tests/build_python.sh', python_version])
self._has_python_versions.append(python_version)
except:
jobset.message('WARNING', 'Missing Python ' + python_version,
do_newline=True)
return commands
def supports_multi_config(self):
return False
def __str__(self):
return 'python'
class RubyLanguage(object):
def test_specs(self, config, travis):
return [config.job_spec(['tools/run_tests/run_ruby.sh'], None,
environ=_FORCE_ENVIRON_FOR_WRAPPERS)]
def make_targets(self):
return ['static_c']
def build_steps(self):
return [['tools/run_tests/build_ruby.sh']]
def supports_multi_config(self):
return False
def __str__(self):
return 'ruby'
class CSharpLanguage(object):
def __init__(self):
if platform.system() == 'Windows':
plat = 'windows'
else:
plat = 'posix'
self.platform = plat
def test_specs(self, config, travis):
assemblies = ['Grpc.Core.Tests',
'Grpc.Examples.Tests',
'Grpc.HealthCheck.Tests',
'Grpc.IntegrationTesting']
if self.platform == 'windows':
cmd = 'tools\\run_tests\\run_csharp.bat'
else:
cmd = 'tools/run_tests/run_csharp.sh'
return [config.job_spec([cmd, assembly],
None, shortname=assembly,
environ=_FORCE_ENVIRON_FOR_WRAPPERS)
for assembly in assemblies ]
def make_targets(self):
# For Windows, this target doesn't really build anything,
# everything is build by buildall script later.
return ['grpc_csharp_ext']
def build_steps(self):
if self.platform == 'windows':
return [['src\\csharp\\buildall.bat']]
else:
return [['tools/run_tests/build_csharp.sh']]
def supports_multi_config(self):
return False
def __str__(self):
return 'csharp'
class ObjCLanguage(object):
def test_specs(self, config, travis):
return [config.job_spec(['src/objective-c/tests/run_tests.sh'], None,
environ=_FORCE_ENVIRON_FOR_WRAPPERS)]
def make_targets(self):
return ['grpc_objective_c_plugin', 'interop_server']
def build_steps(self):
return [['src/objective-c/tests/build_tests.sh']]
def supports_multi_config(self):
return False
def __str__(self):
return 'objc'
class Sanity(object):
def test_specs(self, config, travis):
return [config.job_spec('tools/run_tests/run_sanity.sh', None),
config.job_spec('tools/run_tests/check_sources_and_headers.py', None)]
def make_targets(self):
return ['run_dep_checks']
def build_steps(self):
return []
def supports_multi_config(self):
return False
def __str__(self):
return 'sanity'
class Build(object):
def test_specs(self, config, travis):
return []
def make_targets(self):
return ['static']
def build_steps(self):
return []
def supports_multi_config(self):
return True
def __str__(self):
return self.make_target
# different configurations we can run under
_CONFIGS = {
'dbg': SimpleConfig('dbg'),
'opt': SimpleConfig('opt'),
'tsan': SimpleConfig('tsan', environ={
'TSAN_OPTIONS': 'suppressions=tools/tsan_suppressions.txt:halt_on_error=1:second_deadlock_stack=1'}),
'msan': SimpleConfig('msan'),
'ubsan': SimpleConfig('ubsan'),
'asan': SimpleConfig('asan', environ={
'ASAN_OPTIONS': 'detect_leaks=1:color=always:suppressions=tools/tsan_suppressions.txt',
'LSAN_OPTIONS': 'report_objects=1'}),
'asan-noleaks': SimpleConfig('asan', environ={
'ASAN_OPTIONS': 'detect_leaks=0:color=always:suppressions=tools/tsan_suppressions.txt'}),
'gcov': SimpleConfig('gcov'),
'memcheck': ValgrindConfig('valgrind', 'memcheck', ['--leak-check=full']),
'helgrind': ValgrindConfig('dbg', 'helgrind')
}
_DEFAULT = ['opt']
_LANGUAGES = {
'c++': CLanguage('cxx', 'c++'),
'c': CLanguage('c', 'c'),
'node': NodeLanguage(),
'php': PhpLanguage(),
'python': PythonLanguage(),
'ruby': RubyLanguage(),
'csharp': CSharpLanguage(),
'objc' : ObjCLanguage(),
'sanity': Sanity(),
'build': Build(),
}
# parse command line
argp = argparse.ArgumentParser(description='Run grpc tests.')
argp.add_argument('-c', '--config',
choices=['all'] + sorted(_CONFIGS.keys()),
nargs='+',
default=_DEFAULT)
def runs_per_test_type(arg_str):
"""Auxilary function to parse the "runs_per_test" flag.
Returns:
A positive integer or 0, the latter indicating an infinite number of
runs.
Raises:
argparse.ArgumentTypeError: Upon invalid input.
"""
if arg_str == 'inf':
return 0
try:
n = int(arg_str)
if n <= 0: raise ValueError
return n
except:
msg = "'{}' isn't a positive integer or 'inf'".format(arg_str)
raise argparse.ArgumentTypeError(msg)
argp.add_argument('-n', '--runs_per_test', default=1, type=runs_per_test_type,
help='A positive integer or "inf". If "inf", all tests will run in an '
'infinite loop. Especially useful in combination with "-f"')
argp.add_argument('-r', '--regex', default='.*', type=str)
argp.add_argument('-j', '--jobs', default=2 * multiprocessing.cpu_count(), type=int)
argp.add_argument('-s', '--slowdown', default=1.0, type=float)
argp.add_argument('-f', '--forever',
default=False,
action='store_const',
const=True)
argp.add_argument('-t', '--travis',
default=False,
action='store_const',
const=True)
argp.add_argument('--newline_on_success',
default=False,
action='store_const',
const=True)
argp.add_argument('-l', '--language',
choices=['all'] + sorted(_LANGUAGES.keys()),
nargs='+',
default=['all'])
argp.add_argument('-S', '--stop_on_failure',
default=False,
action='store_const',
const=True)
argp.add_argument('-a', '--antagonists', default=0, type=int)
argp.add_argument('-x', '--xml_report', default=None, type=str,
help='Generates a JUnit-compatible XML report')
args = argp.parse_args()
# grab config
run_configs = set(_CONFIGS[cfg]
for cfg in itertools.chain.from_iterable(
_CONFIGS.iterkeys() if x == 'all' else [x]
for x in args.config))
build_configs = set(cfg.build_config for cfg in run_configs)
if args.travis:
_FORCE_ENVIRON_FOR_WRAPPERS = {'GRPC_TRACE': 'surface,batch'}
make_targets = []
languages = set(_LANGUAGES[l]
for l in itertools.chain.from_iterable(
_LANGUAGES.iterkeys() if x == 'all' else [x]
for x in args.language))
if len(build_configs) > 1:
for language in languages:
if not language.supports_multi_config():
print language, 'does not support multiple build configurations'
sys.exit(1)
if platform.system() == 'Windows':
def make_jobspec(cfg, targets):
return jobset.JobSpec(['make.bat', 'CONFIG=%s' % cfg] + targets,
cwd='vsprojects', shell=True)
else:
def make_jobspec(cfg, targets):
return jobset.JobSpec([os.getenv('MAKE', 'make'),
'-j', '%d' % (multiprocessing.cpu_count() + 1),
'EXTRA_DEFINES=GRPC_TEST_SLOWDOWN_MACHINE_FACTOR=%f' %
args.slowdown,
'CONFIG=%s' % cfg] + targets)
build_steps = [make_jobspec(cfg,
list(set(itertools.chain.from_iterable(
l.make_targets() for l in languages))))
for cfg in build_configs]
build_steps.extend(set(
jobset.JobSpec(cmdline, environ={'CONFIG': cfg})
for cfg in build_configs
for l in languages
for cmdline in l.build_steps()))
one_run = set(
spec
for config in run_configs
for language in languages
for spec in language.test_specs(config, args.travis)
if re.search(args.regex, spec.shortname))
runs_per_test = args.runs_per_test
forever = args.forever
class TestCache(object):
"""Cache for running tests."""
def __init__(self, use_cache_results):
self._last_successful_run = {}
self._use_cache_results = use_cache_results
self._last_save = time.time()
def should_run(self, cmdline, bin_hash):
if cmdline not in self._last_successful_run:
return True
if self._last_successful_run[cmdline] != bin_hash:
return True
if not self._use_cache_results:
return True
return False
def finished(self, cmdline, bin_hash):
self._last_successful_run[cmdline] = bin_hash
if time.time() - self._last_save > 1:
self.save()
def dump(self):
return [{'cmdline': k, 'hash': v}
for k, v in self._last_successful_run.iteritems()]
def parse(self, exdump):
self._last_successful_run = dict((o['cmdline'], o['hash']) for o in exdump)
def save(self):
with open('.run_tests_cache', 'w') as f:
f.write(json.dumps(self.dump()))
self._last_save = time.time()
def maybe_load(self):
if os.path.exists('.run_tests_cache'):
with open('.run_tests_cache') as f:
self.parse(json.loads(f.read()))
def _build_and_run(check_cancelled, newline_on_success, travis, cache, xml_report=None):
"""Do one pass of building & running tests."""
# build latest sequentially
if not jobset.run(build_steps, maxjobs=1,
newline_on_success=newline_on_success, travis=travis):
return 1
# start antagonists
antagonists = [subprocess.Popen(['tools/run_tests/antagonist.py'])
for _ in range(0, args.antagonists)]
try:
infinite_runs = runs_per_test == 0
# When running on travis, we want out test runs to be as similar as possible
# for reproducibility purposes.
if travis:
massaged_one_run = sorted(one_run, key=lambda x: x.shortname)
else:
# whereas otherwise, we want to shuffle things up to give all tests a
# chance to run.
massaged_one_run = list(one_run) # random.shuffle needs an indexable seq.
random.shuffle(massaged_one_run) # which it modifies in-place.
if infinite_runs:
assert len(massaged_one_run) > 0, 'Must have at least one test for a -n inf run'
runs_sequence = (itertools.repeat(massaged_one_run) if infinite_runs
else itertools.repeat(massaged_one_run, runs_per_test))
all_runs = itertools.chain.from_iterable(runs_sequence)
root = ET.Element('testsuites') if xml_report else None
testsuite = ET.SubElement(root, 'testsuite', id='1', package='grpc', name='tests') if xml_report else None
if not jobset.run(all_runs, check_cancelled,
newline_on_success=newline_on_success, travis=travis,
infinite_runs=infinite_runs,
maxjobs=args.jobs,
stop_on_failure=args.stop_on_failure,
cache=cache if not xml_report else None,
xml_report=testsuite):
return 2
finally:
for antagonist in antagonists:
antagonist.kill()
if xml_report:
tree = ET.ElementTree(root)
tree.write(xml_report, encoding='UTF-8')
if cache: cache.save()
return 0
test_cache = TestCache(runs_per_test == 1)
test_cache.maybe_load()
if forever:
success = True
while True:
dw = watch_dirs.DirWatcher(['src', 'include', 'test', 'examples'])
initial_time = dw.most_recent_change()
have_files_changed = lambda: dw.most_recent_change() != initial_time
previous_success = success
success = _build_and_run(check_cancelled=have_files_changed,
newline_on_success=False,
travis=args.travis,
cache=test_cache) == 0
if not previous_success and success:
jobset.message('SUCCESS',
'All tests are now passing properly',
do_newline=True)
jobset.message('IDLE', 'No change detected')
while not have_files_changed():
time.sleep(1)
else:
result = _build_and_run(check_cancelled=lambda: False,
newline_on_success=args.newline_on_success,
travis=args.travis,
cache=test_cache,
xml_report=args.xml_report)
if result == 0:
jobset.message('SUCCESS', 'All tests passed', do_newline=True)
else:
jobset.message('FAILED', 'Some tests failed', do_newline=True)
sys.exit(result)
| |
# Copyright 2022 The Magenta Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python2, python3
"""Autoencoder model for training on spectrograms."""
from magenta.contrib import training as contrib_training
from magenta.models.nsynth import utils
import numpy as np
import tensorflow.compat.v1 as tf
import tf_slim as slim
def get_hparams(config_name):
"""Set hyperparameters.
Args:
config_name: Name of config module to use.
Returns:
A HParams object (magenta) with defaults.
"""
hparams = contrib_training.HParams(
# Optimization
batch_size=16,
learning_rate=1e-4,
adam_beta=0.5,
max_steps=6000 * 50000,
samples_per_second=16000,
num_samples=64000,
# Preprocessing
n_fft=1024,
hop_length=256,
mask=True,
log_mag=True,
use_cqt=False,
re_im=False,
dphase=True,
mag_only=False,
pad=True,
mu_law_num=0,
raw_audio=False,
# Graph
num_latent=64, # dimension of z.
cost_phase_mask=False,
phase_loss_coeff=1.0,
fw_loss_coeff=1.0, # Frequency weighted cost
fw_loss_cutoff=1000,
)
# Set values from a dictionary in the config
config = utils.get_module("baseline.models.ae_configs.%s" % config_name)
if hasattr(config, "config_hparams"):
config_hparams = config.config_hparams
hparams.update(config_hparams)
return hparams
def compute_mse_loss(x, xhat, hparams):
"""MSE loss function.
Args:
x: Input data tensor.
xhat: Reconstruction tensor.
hparams: Hyperparameters.
Returns:
total_loss: MSE loss scalar.
"""
with tf.name_scope("Losses"):
if hparams.raw_audio:
total_loss = tf.reduce_mean((x - xhat)**2)
else:
# Magnitude
m = x[:, :, :, 0] if hparams.cost_phase_mask else 1.0
fm = utils.frequency_weighted_cost_mask(
hparams.fw_loss_coeff,
hz_flat=hparams.fw_loss_cutoff,
n_fft=hparams.n_fft)
mag_loss = tf.reduce_mean(fm * (x[:, :, :, 0] - xhat[:, :, :, 0])**2)
if hparams.mag_only:
total_loss = mag_loss
else:
# Phase
if hparams.dphase:
phase_loss = tf.reduce_mean(fm * m *
(x[:, :, :, 1] - xhat[:, :, :, 1])**2)
else:
# Von Mises Distribution "Circular Normal"
# Added constant to keep positive (Same Probability) range [0, 2]
phase_loss = 1 - tf.reduce_mean(fm * m * tf.cos(
(x[:, :, :, 1] - xhat[:, :, :, 1]) * np.pi))
total_loss = mag_loss + hparams.phase_loss_coeff * phase_loss
tf.summary.scalar("Loss/Mag", mag_loss)
tf.summary.scalar("Loss/Phase", phase_loss)
tf.summary.scalar("Loss/Total", total_loss)
return total_loss
def train_op(batch, hparams, config_name):
"""Define a training op, including summaries and optimization.
Args:
batch: Dictionary produced by NSynthDataset.
hparams: Hyperparameters dictionary.
config_name: Name of config module.
Returns:
train_op: A complete iteration of training with summaries.
"""
config = utils.get_module("baseline.models.ae_configs.%s" % config_name)
if hparams.raw_audio:
x = batch["audio"]
# Add height and channel dims
x = tf.expand_dims(tf.expand_dims(x, 1), -1)
else:
x = batch["spectrogram"]
# Define the model
with tf.name_scope("Model"):
z = config.encode(x, hparams)
xhat = config.decode(z, batch, hparams)
# For interpolation
tf.add_to_collection("x", x)
tf.add_to_collection("pitch", batch["pitch"])
tf.add_to_collection("z", z)
tf.add_to_collection("xhat", xhat)
# Compute losses
total_loss = compute_mse_loss(x, xhat, hparams)
# Apply optimizer
with tf.name_scope("Optimizer"):
global_step = tf.get_variable(
"global_step", [],
tf.int64,
initializer=tf.constant_initializer(0),
trainable=False)
optimizer = tf.train.AdamOptimizer(hparams.learning_rate, hparams.adam_beta)
train_step = slim.learning.create_train_op(total_loss,
optimizer,
global_step=global_step)
return train_step
def eval_op(batch, hparams, config_name):
"""Define a evaluation op.
Args:
batch: Batch produced by NSynthReader.
hparams: Hyperparameters.
config_name: Name of config module.
Returns:
eval_op: A complete evaluation op with summaries.
"""
phase = not (hparams.mag_only or hparams.raw_audio)
config = utils.get_module("baseline.models.ae_configs.%s" % config_name)
if hparams.raw_audio:
x = batch["audio"]
# Add height and channel dims
x = tf.expand_dims(tf.expand_dims(x, 1), -1)
else:
x = batch["spectrogram"]
# Define the model
with tf.name_scope("Model"):
z = config.encode(x, hparams, is_training=False)
xhat = config.decode(z, batch, hparams, is_training=False)
# For interpolation
tf.add_to_collection("x", x)
tf.add_to_collection("pitch", batch["pitch"])
tf.add_to_collection("z", z)
tf.add_to_collection("xhat", xhat)
total_loss = compute_mse_loss(x, xhat, hparams)
# Define the metrics:
names_to_values, names_to_updates = slim.metrics.aggregate_metric_map({
"Loss": slim.metrics.mean(total_loss),
})
# Define the summaries
for name, value in names_to_values.items():
slim.summaries.add_scalar_summary(value, name, print_summary=True)
# Interpolate
with tf.name_scope("Interpolation"):
xhat = config.decode(z, batch, hparams, reuse=True, is_training=False)
# Linear interpolation
z_shift_one_example = tf.concat([z[1:], z[:1]], 0)
z_linear_half = (z + z_shift_one_example) / 2.0
xhat_linear_half = config.decode(z_linear_half, batch, hparams, reuse=True,
is_training=False)
# Pitch shift
pitch_plus_2 = tf.clip_by_value(batch["pitch"] + 2, 0, 127)
pitch_minus_2 = tf.clip_by_value(batch["pitch"] - 2, 0, 127)
batch["pitch"] = pitch_minus_2
xhat_pitch_minus_2 = config.decode(z, batch, hparams,
reuse=True, is_training=False)
batch["pitch"] = pitch_plus_2
xhat_pitch_plus_2 = config.decode(z, batch, hparams,
reuse=True, is_training=False)
utils.specgram_summaries(x, "Training Examples", hparams, phase=phase)
utils.specgram_summaries(xhat, "Reconstructions", hparams, phase=phase)
utils.specgram_summaries(
x - xhat, "Difference", hparams, audio=False, phase=phase)
utils.specgram_summaries(
xhat_linear_half, "Linear Interp. 0.5", hparams, phase=phase)
utils.specgram_summaries(xhat_pitch_plus_2, "Pitch +2", hparams, phase=phase)
utils.specgram_summaries(xhat_pitch_minus_2, "Pitch -2", hparams, phase=phase)
return list(names_to_updates.values())
| |
# -*- coding: utf-8 -*-
from contextlib import contextmanager
from ...query.expression import QueryExpression
from ..collection import Collection
from ..builder import Builder
class Relation(object):
_constraints = True
def __init__(self, query, parent):
"""
:param query: A Builder instance
:type query: orm.orator.Builder
:param parent: The parent model
:type parent: Model
"""
self._query = query
self._parent = parent
self._related = query.get_model()
self._extra_query = None
self.add_constraints()
def add_constraints(self):
"""
Set the base constraints on the relation query.
:rtype: None
"""
raise NotImplementedError
def add_eager_constraints(self, models):
"""
Set the constraints for an eager load of the relation.
:type models: list
"""
raise NotImplementedError
def init_relation(self, models, relation):
"""
Initialize the relation on a set of models.
:type models: list
:type relation: str
"""
raise NotImplementedError
def match(self, models, results, relation):
"""
Match the eagerly loaded results to their parents.
:type models: list
:type results: Collection
:type relation: str
"""
raise NotImplementedError
def get_results(self):
"""
Get the results of the relationship.
"""
raise NotImplementedError
def get_eager(self):
"""
Get the relationship for eager loading.
:rtype: Collection
"""
return self.get()
def touch(self):
"""
Touch all of the related models for the relationship.
"""
column = self.get_related().get_updated_at_column()
self.raw_update({column: self.get_related().fresh_timestamp()})
def raw_update(self, attributes=None):
"""
Run a raw update against the base query.
:type attributes: dict
:rtype: int
"""
if attributes is None:
attributes = {}
if self._query is not None:
return self._query.update(attributes)
def get_relation_count_query(self, query, parent):
"""
Add the constraints for a relationship count query.
:type query: Builder
:type parent: Builder
:rtype: Builder
"""
query.select(QueryExpression("COUNT(*)"))
key = self.wrap(self.get_qualified_parent_key_name())
return query.where(self.get_has_compare_key(), "=", QueryExpression(key))
@classmethod
@contextmanager
def no_constraints(cls, with_subclasses=False):
"""
Runs a callback with constraints disabled on the relation.
"""
cls._constraints = False
if with_subclasses:
for klass in cls.__subclasses__():
klass._constraints = False
try:
yield cls
except Exception:
raise
finally:
cls._constraints = True
if with_subclasses:
for klass in cls.__subclasses__():
klass._constraints = True
def get_keys(self, models, key=None):
"""
Get all the primary keys for an array of models.
:type models: list
:type key: str
:rtype: list
"""
return list(
set(
map(
lambda value: value.get_attribute(key) if key else value.get_key(),
models,
)
)
)
def get_query(self):
return self._query
def get_base_query(self):
return self._query.get_query()
def merge_query(self, query):
if isinstance(query, Builder):
query = query.get_query()
self._query.merge(query)
def get_parent(self):
return self._parent
def get_qualified_parent_key_name(self):
return self._parent.get_qualified_key_name()
def get_related(self):
return self._related
def created_at(self):
"""
Get the name of the "created at" column.
:rtype: str
"""
return self._parent.get_created_at_column()
def updated_at(self):
"""
Get the name of the "updated at" column.
:rtype: str
"""
return self._parent.get_updated_at_column()
def get_related_updated_at(self):
"""
Get the name of the related model's "updated at" column.
:rtype: str
"""
return self._related.get_updated_at_column()
def wrap(self, value):
"""
Wrap the given value with the parent's query grammar.
:rtype: str
"""
return self._parent.new_query().get_query().get_grammar().wrap(value)
def set_parent(self, parent):
self._parent = parent
def set_extra_query(self, query):
self._extra_query = query
def new_query(self, related=None):
if related is None:
related = self._related
query = related.new_query()
if self._extra_query:
query.merge(self._extra_query.get_query())
return query
def new_instance(self, model, **kwargs):
new = self._new_instance(model, **kwargs)
if self._extra_query:
new.set_extra_query(self._extra_query)
return new
def __dynamic(self, method):
attribute = getattr(self._query, method)
def call(*args, **kwargs):
result = attribute(*args, **kwargs)
if result is self._query:
return self
return result
if not callable(attribute):
return attribute
return call
def __getattr__(self, item):
return self.__dynamic(item)
| |
from Module import AbstractModule
class Module(AbstractModule):
def __init__(self):
AbstractModule.__init__(self)
def run(
self, network, antecedents, out_attributes, user_options, num_cores,
out_path):
import os
from genomicode import filelib
from genomicode import parallel
from genomicode import alignlib
from Betsy import module_utils as mlib
# For debugging.
RUN_VARIANT_CALLING = True
FILTER_CALLS = True
MERGE_CALLS = True
FIX_VCF_FILES = True
dna_bam_node, rna_bam_node, nc_node, ref_node = antecedents
dna_bam_filenames = mlib.find_bam_files(dna_bam_node.identifier)
assert dna_bam_filenames, "No DNA .bam files."
rna_bam_filenames = mlib.find_bam_files(rna_bam_node.identifier)
assert rna_bam_filenames, "No RNA .bam files."
nc_match = mlib.read_normal_cancer_file(nc_node.identifier)
ref = alignlib.create_reference_genome(ref_node.identifier)
filelib.safe_mkdir(out_path)
metadata = {}
metadata["tool"] = "Radia %s" % alignlib.get_radia_version()
## Make sure the BAM files do not contain spaces in the
## filenames. Radia doesn't work well with spaces.
#filenames = dna_bam_filenames + rna_bam_filenames
#has_spaces = []
#for filename in filenames:
# if filename.find(" ") >= 0:
# has_spaces.append(filename)
#x = has_spaces
#if len(x) > 5:
# x = x[:5] + ["..."]
#x = ", ".join(x)
#msg = "Radia breaks if there are spaces in filenames: %s" % x
#assert not has_spaces, msg
# sample -> bam filename
dnasample2bamfile = mlib.root2filename(dna_bam_filenames)
rnasample2bamfile = mlib.root2filename(rna_bam_filenames)
# Make sure files exist for all the samples. The DNA-Seq
# should have both normal and cancer. RNA is not needed for
# normal sample.
mlib.assert_normal_cancer_samples(nc_match, dnasample2bamfile)
mlib.assert_normal_cancer_samples(
nc_match, rnasample2bamfile, ignore_normal_sample=True)
# Make sure Radia and snpEff are configured.
radia_genome_assembly = mlib.get_user_option(
user_options, "radia_genome_assembly", not_empty=True)
assert radia_genome_assembly == "hg19", "Only hg19 handled."
snp_eff_genome = mlib.get_user_option(
user_options, "snp_eff_genome", not_empty=True)
radia_path = mlib.get_config("radia_path", assert_exists=True)
snp_eff_path = mlib.get_config("snp_eff_path", assert_exists=True)
radia_files = get_radia_files(radia_path, radia_genome_assembly)
# Make a list of the chromosomes to use. Pick an arbitrarily
# BAM file. Look at only the chromosomes that are present in
# all files.
all_bamfiles = dnasample2bamfile.values() + rnasample2bamfile.values()
chroms = list_common_chromosomes(all_bamfiles)
assert chroms, "No chromosomes found in all files."
# Only use the chromosomes that can be filtered by Radia.
chroms = filter_radia_chromosomes(chroms, radia_files)
# Make output directories.
radia_outpath = "radia1.tmp"
filter_outpath = "radia2.tmp"
merge_outpath = "radia3.tmp"
if not os.path.exists(radia_outpath):
os.mkdir(radia_outpath)
if not os.path.exists(filter_outpath):
os.mkdir(filter_outpath)
if not os.path.exists(merge_outpath):
os.mkdir(merge_outpath)
# Steps:
# 1. Call variants (radia.py)
# -o <file.vcf>
# 2. Filter variants (filterRadia.py)
# <outpath>
# Creates a file: <filter_outpath>/<patient_id>_chr<chrom>.vcf
# 3. Merge (mergeChroms.py)
# Takes as input: <filter_outpath>
# Produces: <merge_outpath>/<patient_id>.vcf
# list of (normal_sample, cancer_sample, chrom,
# normal_bamfile, dna_tumor_bamfile, rna_tumor_bamfile,
# radia_vcf_outfile, filter_vcf_outfile, merge_vcf_outfile,
# final_vcf_outfile,
# radia_logfile, filter_logfile, merge_logfile)
opj = os.path.join
jobs = []
for i, (normal_sample, cancer_sample) in enumerate(nc_match):
normal_bamfile = dnasample2bamfile[normal_sample]
dna_tumor_bamfile = dnasample2bamfile[cancer_sample]
rna_tumor_bamfile = rnasample2bamfile[cancer_sample]
merge_vcf_outfile = opj(merge_outpath, "%s.vcf" % cancer_sample)
merge_logfile = opj(merge_outpath, "%s.log" % cancer_sample)
final_vcf_outfile = opj(out_path, "%s.vcf" % cancer_sample)
for chrom in chroms:
radia_vcf_outfile = opj(
radia_outpath, "%s_chr%s.vcf" % (cancer_sample, chrom))
filter_vcf_outfile = opj(
filter_outpath, "%s_chr%s.vcf" % (cancer_sample, chrom))
radia_logfile = opj(
radia_outpath, "%s_chr%s.log" % (cancer_sample, chrom))
filter_logfile = opj(
filter_outpath, "%s_chr%s.log" % (cancer_sample, chrom))
x = normal_sample, cancer_sample, chrom, \
normal_bamfile, dna_tumor_bamfile, rna_tumor_bamfile, \
radia_vcf_outfile, filter_vcf_outfile, merge_vcf_outfile, \
final_vcf_outfile, \
radia_logfile, filter_logfile, merge_logfile
jobs.append(x)
# Since Radia doesn't work well if there are spaces in the
# filenames, symlink these files here to guarantee that there
# are no spaces.
normal_path = "normal.bam"
dna_path = "dna.bam"
rna_path = "rna.bam"
if not os.path.exists(normal_path):
os.mkdir(normal_path)
if not os.path.exists(dna_path):
os.mkdir(dna_path)
if not os.path.exists(rna_path):
os.mkdir(rna_path)
for i, x in enumerate(jobs):
normal_sample, cancer_sample, chrom, \
normal_bamfile, dna_tumor_bamfile, rna_tumor_bamfile, \
radia_vcf_outfile, filter_vcf_outfile, merge_vcf_outfile, \
final_vcf_outfile, \
radia_logfile, filter_logfile, merge_logfile = x
x1 = hash_and_symlink_bamfile(normal_bamfile, normal_path)
x2 = hash_and_symlink_bamfile(dna_tumor_bamfile, dna_path)
x3 = hash_and_symlink_bamfile(rna_tumor_bamfile, rna_path)
clean_normal, clean_dna, clean_rna = x1, x2, x3
x = normal_sample, cancer_sample, chrom, \
clean_normal, clean_dna, clean_rna, \
radia_vcf_outfile, filter_vcf_outfile, merge_vcf_outfile, \
final_vcf_outfile, \
radia_logfile, filter_logfile, merge_logfile
jobs[i] = x
# Generate the commands for doing variant calling.
python = mlib.get_config("python", which_assert_file=True)
# filterRadia.py calls the "blat" command, and there's no way
# to set the path. Make sure "blat" is executable.
if not filelib.which("blat"):
# Find "blat" in the configuration and add it to the path.
x = mlib.get_config("blat", which_assert_file=True)
path, x = os.path.split(x)
if os.environ["PATH"]:
path = "%s:%s" % (os.environ["PATH"], path)
os.environ["PATH"] = path
# Make sure it's findable now.
filelib.which_assert("blat")
# STEP 1. Call variants with radia.py.
# python radia.py test31 5 \
# -n bam04/PIM001_G.bam \
# -t bam04/196B-MG.bam \
# -r bam34/196B-MG.bam \
# -f genomes/Broad.hg19/Homo_sapiens_assembly19.fa \
# -o test32.vcf
# --dnaTumorMitochon MT \
# --rnaTumorMitochon MT \
sq = mlib.sq
commands = []
for x in jobs:
normal_sample, cancer_sample, chrom, \
normal_bamfile, dna_tumor_bamfile, rna_tumor_bamfile, \
radia_vcf_outfile, filter_vcf_outfile, merge_vcf_outfile, \
final_vcf_outfile, \
radia_logfile, filter_logfile, merge_logfile = x
x = [
sq(python),
sq(radia_files.radia_py),
cancer_sample,
chrom,
"-n", sq(normal_bamfile),
"-t", sq(dna_tumor_bamfile),
"-r", sq(rna_tumor_bamfile),
"-f", sq(ref.fasta_file_full),
"-o", radia_vcf_outfile,
]
if "MT" in chroms:
x += [
"--dnaNormalMitochon MT",
"--dnaTumorMitochon MT",
"--rnaTumorMitochon MT",
]
x = " ".join(x)
x = "%s >& %s" % (x, radia_logfile)
commands.append(x)
assert len(commands) == len(jobs)
# Only uses ~200 Mb of ram.
if RUN_VARIANT_CALLING:
parallel.pshell(commands, max_procs=num_cores)
metadata["num_cores"] = num_cores
metadata["commands"] = commands
# Make sure log files are empty.
logfiles = [x[10] for x in jobs]
filelib.assert_exists_z_many(logfiles)
# STEP 2. Filter variants with filterRadia.py.
commands = []
for x in jobs:
normal_sample, cancer_sample, chrom, \
normal_bamfile, dna_tumor_bamfile, rna_tumor_bamfile, \
radia_vcf_outfile, filter_vcf_outfile, merge_vcf_outfile, \
final_vcf_outfile, \
radia_logfile, filter_logfile, merge_logfile = x
x = [
sq(python),
sq(radia_files.filterRadia_py),
cancer_sample,
chrom,
sq(radia_vcf_outfile),
sq(filter_outpath),
sq(radia_files.scripts_dir),
"-b", sq(radia_files.blacklist_dir),
"-d", sq(radia_files.snp_dir),
"-r", sq(radia_files.retro_dir),
"-p", sq(radia_files.pseudo_dir),
"-c", sq(radia_files.cosmic_dir),
"-t", sq(radia_files.target_dir),
"-s", sq(snp_eff_path),
"-e", snp_eff_genome,
"--rnaGeneBlckFile", sq(radia_files.rnageneblck_file),
"--rnaGeneFamilyBlckFile", sq(
radia_files.rnagenefamilyblck_file),
]
x = " ".join(x)
x = "%s >& %s" % (x, filter_logfile)
commands.append(x)
assert len(commands) == len(jobs)
# Sometimes samtools crashes in the middle of a run. Detect
# this case, and re-run the analysis if needed.
assert len(commands) == len(jobs)
py_commands = []
for x, cmd in zip(jobs, commands):
normal_sample, cancer_sample, chrom, \
normal_bamfile, dna_tumor_bamfile, rna_tumor_bamfile, \
radia_vcf_outfile, filter_vcf_outfile, merge_vcf_outfile, \
final_vcf_outfile, \
radia_logfile, filter_logfile, merge_logfile = x
args = cmd, cancer_sample, chrom, filter_logfile
x = _run_filterRadia_with_restart, args, {}
py_commands.append(x)
# Takes ~10 Gb each.
nc = mlib.calc_max_procs_from_ram(25, upper_max=num_cores)
if FILTER_CALLS:
parallel.pyfun(py_commands, num_procs=nc)
metadata["commands"] += commands
# Make sure log files are empty.
logfiles = [x[11] for x in jobs]
filelib.assert_exists_z_many(logfiles)
# Make sure filter_vcf_outfile exists.
outfiles = [x[7] for x in jobs]
filelib.assert_exists_nz_many(outfiles)
# STEP 3. Merge the results.
commands = []
for x in jobs:
normal_sample, cancer_sample, chrom, \
normal_bamfile, dna_tumor_bamfile, rna_tumor_bamfile, \
radia_vcf_outfile, filter_vcf_outfile, merge_vcf_outfile, \
final_vcf_outfile, \
radia_logfile, filter_logfile, merge_logfile = x
# python /usr/local/radia/scripts/mergeChroms.py 196B-MG \
# radia2.tmp/ radia3.tmp
# The "/" after radia2.tmp is important. If not given,
# will generate some files with only newlines.
fo = filter_outpath
if not fo.endswith("/"):
fo = "%s/" % fo
x = [
sq(python),
sq(radia_files.mergeChroms_py),
cancer_sample,
fo,
merge_outpath,
]
x = " ".join(x)
x = "%s >& %s" % (x, merge_logfile)
commands.append(x)
assert len(commands) == len(jobs)
# Since the chromosomes were separated for the previous steps,
# this will generate one merge for each chromosome. This is
# unnecessary, since we only need to merge once per sample.
# Get rid of duplicates.
commands = sorted({}.fromkeys(commands))
if MERGE_CALLS:
parallel.pshell(commands, max_procs=num_cores)
metadata["commands"] += commands
# Make sure log files are empty.
logfiles = [x[12] for x in jobs]
logfiles = sorted({}.fromkeys(logfiles))
filelib.assert_exists_z_many(logfiles)
# Fix the VCF files.
commands = []
for x in jobs:
normal_sample, cancer_sample, chrom, \
normal_bamfile, dna_tumor_bamfile, rna_tumor_bamfile, \
radia_vcf_outfile, filter_vcf_outfile, merge_vcf_outfile, \
final_vcf_outfile, \
radia_logfile, filter_logfile, merge_logfile = x
args = normal_sample, cancer_sample, \
merge_vcf_outfile, final_vcf_outfile
x = alignlib.clean_radia_vcf, args, {}
commands.append(x)
if FIX_VCF_FILES:
parallel.pyfun(commands, num_procs=num_cores)
# Make sure output VCF files exist.
x = [x[9] for x in jobs]
filelib.assert_exists_nz_many(x)
return metadata
def name_outfile(self, antecedents, user_options):
return "radia.vcf"
class RadiaFiles:
def __init__(
self, radia_py, filterRadia_py, mergeChroms_py,
scripts_dir,
blacklist_dir, snp_dir, retro_dir, pseudo_dir, cosmic_dir, target_dir,
rnageneblck_file, rnagenefamilyblck_file):
self.radia_py = radia_py
self.filterRadia_py = filterRadia_py
self.mergeChroms_py = mergeChroms_py
self.scripts_dir = scripts_dir
self.blacklist_dir = blacklist_dir
self.snp_dir = snp_dir
self.retro_dir = retro_dir
self.pseudo_dir = pseudo_dir
self.cosmic_dir = cosmic_dir
self.target_dir = target_dir
self.rnageneblck_file = rnageneblck_file
self.rnagenefamilyblck_file = rnagenefamilyblck_file
def get_radia_files(radia_path, assembly):
import os
from genomicode import filelib
opj = os.path.join
radia_py = opj(radia_path, "scripts", "radia.py")
filterRadia_py = opj(radia_path, "scripts", "filterRadia.py")
mergeChroms_py = opj(radia_path, "scripts", "mergeChroms.py")
# For hg19 only.
scripts_dir = opj(radia_path, "scripts")
blacklist_dir = opj(
radia_path, "data/%s/blacklists/1000Genomes/phase1" % assembly)
snp_dir = opj(radia_path, "data/%s/snp135" % assembly)
retro_dir = opj(radia_path, "data/%s/retroGenes" % assembly)
pseudo_dir = opj(radia_path, "data/%s/pseudoGenes" % assembly)
cosmic_dir = opj(radia_path, "data/%s/cosmic" % assembly)
target_dir = opj(radia_path, "data/%s/gaf/2_1" % assembly)
rnageneblck_file = opj(radia_path, "data/rnaGeneBlacklist.tab")
rnagenefamilyblck_file = opj(radia_path, "data/rnaGeneFamilyBlacklist.tab")
files = [
radia_py,
filterRadia_py,
mergeChroms_py,
rnageneblck_file,
rnagenefamilyblck_file,
]
paths = [
scripts_dir,
blacklist_dir,
snp_dir,
retro_dir,
pseudo_dir,
cosmic_dir,
target_dir,
]
filelib.assert_exists_nz_many(files)
filelib.assert_exists_many(paths)
x = RadiaFiles(
radia_py, filterRadia_py, mergeChroms_py,
scripts_dir,
blacklist_dir, snp_dir, retro_dir, pseudo_dir, cosmic_dir, target_dir,
rnageneblck_file, rnagenefamilyblck_file)
return x
def list_common_chromosomes(bam_filenames):
from genomicode import alignlib
common_chroms = None
for filename in bam_filenames:
x = alignlib.call_samtools_idxstats(filename)
x = [x[0] for x in x]
x = [x for x in x if x != "*"]
chroms = sorted(x)
if common_chroms is None:
common_chroms = chroms
common_chroms = [x for x in common_chroms if x in chroms]
return common_chroms
def filter_radia_chromosomes(chroms, radia_files):
# Only want the ones that exists in the black files.
# Files in blacklist_dir.
# - <blacklist_dir>/chr1.bed.gz
import os
x = os.listdir(radia_files.blacklist_dir)
x = [x.replace(".gz", "") for x in x]
x = [x.replace(".bed", "") for x in x]
blacklist_chroms = x
good_chroms = []
for chrom in chroms:
if chrom in blacklist_chroms:
good_chroms.append(chrom)
continue
if not chrom.startswith("chr"):
c = "chr%s" % chrom
if c in blacklist_chroms:
good_chroms.append(chrom)
continue
return good_chroms
def hash_and_symlink_bamfile(bam_filename, out_path):
import os
from genomicode import hashlib
p, f = os.path.split(bam_filename)
f = hashlib.hash_alnum(f)
outfile = os.path.join(out_path, f)
if not os.path.exists(outfile):
os.symlink(bam_filename, outfile)
# Also symlink the index.
index_filename = "%s.bai" % bam_filename
index_outfile = "%s.bai" % outfile
if os.path.exists(index_filename) and not os.path.exists(index_outfile):
os.symlink(index_filename, index_outfile)
return outfile
def _run_filterRadia_with_restart(cmd, cancer_sample, chrom, logfile):
# Sometimes samtools crashes in the middle of a run. Detect this
# case, and re-run the analysis if needed.
from genomicode import parallel
from genomicode import filelib
num_tries = 0
while num_tries <= 3:
num_tries += 1
parallel.sshell(cmd, ignore_nonzero_exit=True)
filelib.assert_exists(logfile)
log = open(logfile).read()
# Empty logfile means cmd completed successfully.
if not log.strip():
break
# Look for evidence that samtools died. If this occurs, try again.
# 06/29/2016 09:57:16 AM ERROR The return code of '1' from the
# following filter command indicates an error.
# 06/29/2016 09:57:16 AM ERROR Error from /usr/bin/python
# /usr/local/radia/scripts/createBlatFile.pyc 196C-lung2
# radia2.tmp/196C-lung2_dnaFiltered_chr1.vcf
# radia2.tmp/196C-lung2_mpileup_rna_origin_chr1.vcf
# -o radia2.tmp/196C-lung2_blatInput_chr1.fa
# --allVCFCalls --blatRnaNormalReads --blatRnaTumorReads:
# <Traceback>
# [...]
# samtoolsCall.kill()
# [...]
# OSError: [Errno 3] No such process
if log.find("samtoolsCall.kill") >= 0 \
and log.find("No such process") >= 0:
continue
# Otherwise, the process failed for some other reason. Raise
# an exception.
raise AssertionError, "Problem filtering: %s %s\n%s" % (
cancer_sample, chrom, log)
| |
# -*- coding: utf-8 -*-
"""
jinja2.debug
~~~~~~~~~~~~
Implements the debug interface for Jinja. This module does some pretty
ugly stuff with the Python traceback system in order to achieve tracebacks
with correct line numbers, locals and contents.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import sys
import traceback
from jinja2.utils import CodeType, missing, internal_code
from jinja2.exceptions import TemplateSyntaxError
# how does the raise helper look like?
try:
exec "raise TypeError, 'foo'"
except SyntaxError:
raise_helper = 'raise __jinja_exception__[1]'
except TypeError:
raise_helper = 'raise __jinja_exception__[0], __jinja_exception__[1]'
class TracebackFrameProxy(object):
"""Proxies a traceback frame."""
def __init__(self, tb):
self.tb = tb
def _set_tb_next(self, next):
if tb_set_next is not None:
tb_set_next(self.tb, next and next.tb or None)
self._tb_next = next
def _get_tb_next(self):
return self._tb_next
tb_next = property(_get_tb_next, _set_tb_next)
del _get_tb_next, _set_tb_next
@property
def is_jinja_frame(self):
return '__jinja_template__' in self.tb.tb_frame.f_globals
def __getattr__(self, name):
return getattr(self.tb, name)
class ProcessedTraceback(object):
"""Holds a Jinja preprocessed traceback for priting or reraising."""
def __init__(self, exc_type, exc_value, frames):
assert frames, 'no frames for this traceback?'
self.exc_type = exc_type
self.exc_value = exc_value
self.frames = frames
def chain_frames(self):
"""Chains the frames. Requires ctypes or the debugsupport extension."""
prev_tb = None
for tb in self.frames:
if prev_tb is not None:
prev_tb.tb_next = tb
prev_tb = tb
prev_tb.tb_next = None
def render_as_text(self, limit=None):
"""Return a string with the traceback."""
lines = traceback.format_exception(self.exc_type, self.exc_value,
self.frames[0], limit=limit)
return ''.join(lines).rstrip()
def render_as_html(self, full=False):
"""Return a unicode string with the traceback as rendered HTML."""
from jinja2.debugrenderer import render_traceback
return u'%s\n\n<!--\n%s\n-->' % (
render_traceback(self, full=full),
self.render_as_text().decode('utf-8', 'replace')
)
@property
def is_template_syntax_error(self):
"""`True` if this is a template syntax error."""
return isinstance(self.exc_value, TemplateSyntaxError)
@property
def exc_info(self):
"""Exception info tuple with a proxy around the frame objects."""
return self.exc_type, self.exc_value, self.frames[0]
@property
def standard_exc_info(self):
"""Standard python exc_info for re-raising"""
return self.exc_type, self.exc_value, self.frames[0].tb
def make_traceback(exc_info, source_hint=None):
"""Creates a processed traceback object from the exc_info."""
exc_type, exc_value, tb = exc_info
if isinstance(exc_value, TemplateSyntaxError):
exc_info = translate_syntax_error(exc_value, source_hint)
initial_skip = 0
else:
initial_skip = 1
return translate_exception(exc_info, initial_skip)
def translate_syntax_error(error, source=None):
"""Rewrites a syntax error to please traceback systems."""
error.source = source
error.translated = True
exc_info = (error.__class__, error, None)
filename = error.filename
if filename is None:
filename = '<unknown>'
return fake_exc_info(exc_info, filename, error.lineno)
def translate_exception(exc_info, initial_skip=0):
"""If passed an exc_info it will automatically rewrite the exceptions
all the way down to the correct line numbers and frames.
"""
tb = exc_info[2]
frames = []
# skip some internal frames if wanted
for x in xrange(initial_skip):
if tb is not None:
tb = tb.tb_next
initial_tb = tb
while tb is not None:
# skip frames decorated with @internalcode. These are internal
# calls we can't avoid and that are useless in template debugging
# output.
if tb.tb_frame.f_code in internal_code:
tb = tb.tb_next
continue
# save a reference to the next frame if we override the current
# one with a faked one.
next = tb.tb_next
# fake template exceptions
template = tb.tb_frame.f_globals.get('__jinja_template__')
if template is not None:
lineno = template.get_corresponding_lineno(tb.tb_lineno)
tb = fake_exc_info(exc_info[:2] + (tb,), template.filename,
lineno)[2]
frames.append(TracebackFrameProxy(tb))
tb = next
# if we don't have any exceptions in the frames left, we have to
# reraise it unchanged.
# XXX: can we backup here? when could this happen?
if not frames:
raise exc_info[0], exc_info[1], exc_info[2]
traceback = ProcessedTraceback(exc_info[0], exc_info[1], frames)
if tb_set_next is not None:
traceback.chain_frames()
return traceback
def fake_exc_info(exc_info, filename, lineno):
"""Helper for `translate_exception`."""
exc_type, exc_value, tb = exc_info
# figure the real context out
if tb is not None:
real_locals = tb.tb_frame.f_locals.copy()
ctx = real_locals.get('context')
if ctx:
locals = ctx.get_all()
else:
locals = {}
for name, value in real_locals.iteritems():
if name.startswith('l_') and value is not missing:
locals[name[2:]] = value
# if there is a local called __jinja_exception__, we get
# rid of it to not break the debug functionality.
locals.pop('__jinja_exception__', None)
else:
locals = {}
# assamble fake globals we need
globals = {
'__name__': filename,
'__file__': filename,
'__jinja_exception__': exc_info[:2],
# we don't want to keep the reference to the template around
# to not cause circular dependencies, but we mark it as Jinja
# frame for the ProcessedTraceback
'__jinja_template__': None
}
# and fake the exception
code = compile('\n' * (lineno - 1) + raise_helper, filename, 'exec')
# if it's possible, change the name of the code. This won't work
# on some python environments such as google appengine
try:
if tb is None:
location = 'template'
else:
function = tb.tb_frame.f_code.co_name
if function == 'root':
location = 'top-level template code'
elif function.startswith('block_'):
location = 'block "%s"' % function[6:]
else:
location = 'template'
code = CodeType(0, code.co_nlocals, code.co_stacksize,
code.co_flags, code.co_code, code.co_consts,
code.co_names, code.co_varnames, filename,
location, code.co_firstlineno,
code.co_lnotab, (), ())
except:
pass
# execute the code and catch the new traceback
try:
exec code in globals, locals
except:
exc_info = sys.exc_info()
new_tb = exc_info[2].tb_next
# return without this frame
return exc_info[:2] + (new_tb,)
def _init_ugly_crap():
"""This function implements a few ugly things so that we can patch the
traceback objects. The function returned allows resetting `tb_next` on
any python traceback object.
"""
import ctypes
from types import TracebackType
# figure out side of _Py_ssize_t
if hasattr(ctypes.pythonapi, 'Py_InitModule4_64'):
_Py_ssize_t = ctypes.c_int64
else:
_Py_ssize_t = ctypes.c_int
# regular python
class _PyObject(ctypes.Structure):
pass
_PyObject._fields_ = [
('ob_refcnt', _Py_ssize_t),
('ob_type', ctypes.POINTER(_PyObject))
]
# python with trace
if object.__basicsize__ != ctypes.sizeof(_PyObject):
class _PyObject(ctypes.Structure):
pass
_PyObject._fields_ = [
('_ob_next', ctypes.POINTER(_PyObject)),
('_ob_prev', ctypes.POINTER(_PyObject)),
('ob_refcnt', _Py_ssize_t),
('ob_type', ctypes.POINTER(_PyObject))
]
class _Traceback(_PyObject):
pass
_Traceback._fields_ = [
('tb_next', ctypes.POINTER(_Traceback)),
('tb_frame', ctypes.POINTER(_PyObject)),
('tb_lasti', ctypes.c_int),
('tb_lineno', ctypes.c_int)
]
def tb_set_next(tb, next):
"""Set the tb_next attribute of a traceback object."""
if not (isinstance(tb, TracebackType) and
(next is None or isinstance(next, TracebackType))):
raise TypeError('tb_set_next arguments must be traceback objects')
obj = _Traceback.from_address(id(tb))
if tb.tb_next is not None:
old = _Traceback.from_address(id(tb.tb_next))
old.ob_refcnt -= 1
if next is None:
obj.tb_next = ctypes.POINTER(_Traceback)()
else:
next = _Traceback.from_address(id(next))
next.ob_refcnt += 1
obj.tb_next = ctypes.pointer(next)
return tb_set_next
# try to get a tb_set_next implementation
try:
from jinja2._debugsupport import tb_set_next
except ImportError:
try:
tb_set_next = _init_ugly_crap()
except:
tb_set_next = None
del _init_ugly_crap
| |
# coding=utf-8
# Copyright 2021 The Circuit Training Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""circuit training agent definition and utility functions."""
from typing import Optional, Text, Tuple
from absl import logging
from circuit_training.model import model
import tensorflow as tf
from tf_agents.agents.ppo import ppo_agent
from tf_agents.agents.ppo import ppo_utils
from tf_agents.networks import network
from tf_agents.trajectories import time_step as ts
from tf_agents.typing import types
from tf_agents.utils import common
from tf_agents.utils import eager_utils
from tf_agents.utils import nest_utils
from tf_agents.utils import object_identity
from tf_agents.utils import value_ops
def _normalize_advantages(advantages, axes=(0), variance_epsilon=1e-8):
adv_mean, adv_var = tf.nn.moments(x=advantages, axes=axes, keepdims=True)
normalized_advantages = ((advantages - adv_mean) /
(tf.sqrt(adv_var) + variance_epsilon))
return normalized_advantages
class CircuitPPOAgent(ppo_agent.PPOAgent):
"""A PPO Agent for circuit training aligned with Menger.
Major differencs between this and ppo_agent.PPOAgent:
- Loss aggregation uses reduce_mean instead of common.aggregate_losses which
handles aggregation across multiple accelerator cores.
- Value bootstrapping uses the second to last observation, instead of the
last one. This is likely temporarily for aligning with Menger.
- The additional time dimension ([B, 1, ...] was squeezed at the beginning,
which eventually leads to different behavior when generating the action
distribution. b/202055908 tracks the work on fully understanding and
documenting this.
- Normalization is done manually as opposed to `tf.nn.batch_normalization`
which leads to different results in TPU setups.
"""
def __init__(self,
time_step_spec: ts.TimeStep,
action_spec: types.NestedTensorSpec,
optimizer: Optional[types.Optimizer] = None,
actor_net: Optional[network.Network] = None,
value_net: Optional[network.Network] = None,
importance_ratio_clipping: types.Float = 0.2,
discount_factor: types.Float = 1.0,
entropy_regularization: types.Float = 0.01,
value_pred_loss_coef: types.Float = 0.5,
gradient_clipping: Optional[types.Float] = 1.0,
value_clipping: Optional[types.Float] = None,
check_numerics: bool = False,
debug_summaries: bool = False,
summarize_grads_and_vars: bool = False,
train_step_counter: Optional[tf.Variable] = None,
aggregate_losses_across_replicas=False,
loss_scaling_factor=1.,
name: Optional[Text] = 'PPOClipAgent'):
"""Creates a PPO Agent implementing the clipped probability ratios.
Args:
time_step_spec: A `TimeStep` spec of the expected time_steps.
action_spec: A nest of BoundedTensorSpec representing the actions.
optimizer: Optimizer to use for the agent.
actor_net: A function actor_net(observations, action_spec) that returns
tensor of action distribution params for each observation. Takes nested
observation and returns nested action.
value_net: A function value_net(time_steps) that returns value tensor from
neural net predictions for each observation. Takes nested observation
and returns batch of value_preds.
importance_ratio_clipping: Epsilon in clipped, surrogate PPO objective.
For more detail, see explanation at the top of the doc.
discount_factor: Discount factor for return computation.
entropy_regularization: Coefficient for entropy regularization loss term.
value_pred_loss_coef: Multiplier for value prediction loss to balance with
policy gradient loss.
gradient_clipping: Norm length to clip gradients. Default: no clipping.
value_clipping: Difference between new and old value predictions are
clipped to this threshold. Value clipping could be helpful when training
very deep networks. Default: no clipping.
check_numerics: If true, adds tf.debugging.check_numerics to help find NaN
/ Inf values. For debugging only.
debug_summaries: A bool to gather debug summaries.
summarize_grads_and_vars: If true, gradient summaries will be written.
train_step_counter: An optional counter to increment every time the train
op is run. Defaults to the global_step.
aggregate_losses_across_replicas: only applicable to setups using multiple
relicas. Default to aggregating across multiple cores using common.
aggregate_losses. If set to `False`, use `reduce_mean` directly, which
is faster but may impact learning results.
loss_scaling_factor: the multiplier for scaling the loss, oftentimes
1/num_replicas_in_sync.
name: The name of this agent. All variables in this module will fall under
that name. Defaults to the class name.
Raises:
ValueError: If the actor_net is not a DistributionNetwork.
"""
self._loss_scaling_factor = loss_scaling_factor
self._use_tpu = bool(tf.config.list_logical_devices('TPU'))
super(CircuitPPOAgent, self).__init__(
time_step_spec,
action_spec,
optimizer,
actor_net,
value_net,
importance_ratio_clipping=importance_ratio_clipping,
discount_factor=discount_factor,
entropy_regularization=entropy_regularization,
value_pred_loss_coef=value_pred_loss_coef,
gradient_clipping=gradient_clipping,
value_clipping=value_clipping,
check_numerics=check_numerics,
debug_summaries=debug_summaries,
summarize_grads_and_vars=summarize_grads_and_vars,
train_step_counter=train_step_counter,
name=name,
aggregate_losses_across_replicas=aggregate_losses_across_replicas,
# Epochs are set through the tf.Data pipeline outside of the agent.
num_epochs=1,
# Value and advantages are computed as part of the data pipeline, this
# is set to False for all setups using minibatching and PPOLearner.
compute_value_and_advantage_in_train=False,
# Skips GAE, TD lambda returns, rewards and observations normalization.
use_gae=False,
use_td_lambda_return=False,
normalize_rewards=False,
normalize_observations=False,
update_normalizers_in_train=False,
# Skips log probability clipping and L2 losses.
log_prob_clipping=0.0,
policy_l2_reg=0.,
value_function_l2_reg=0.,
shared_vars_l2_reg=0.,
# Skips parameters used for the adaptive KL loss penalty version of PPO.
kl_cutoff_factor=0.0,
kl_cutoff_coef=0.0,
initial_adaptive_kl_beta=0.0,
adaptive_kl_target=0.0,
adaptive_kl_tolerance=0.0)
def compute_return_and_advantage(
self, next_time_steps: ts.TimeStep,
value_preds: types.Tensor) -> Tuple[types.Tensor, types.Tensor]:
"""Compute the Monte Carlo return and advantage.
Args:
next_time_steps: batched tensor of TimeStep tuples after action is taken.
value_preds: Batched value prediction tensor. Should have one more entry
in time index than time_steps, with the final value corresponding to the
value prediction of the final state.
Returns:
tuple of (return, advantage), both are batched tensors.
"""
discounts = next_time_steps.discount * tf.constant(
self._discount_factor, dtype=tf.float32)
rewards = next_time_steps.reward
# TODO(b/202226773): Move debugging to helper function for clarity.
if self._debug_summaries:
# Summarize rewards before they get normalized below.
# TODO(b/171573175): remove the condition once histograms are
# supported on TPUs.
if not self._use_tpu:
tf.compat.v2.summary.histogram(
name='rewards', data=rewards, step=self.train_step_counter)
tf.compat.v2.summary.scalar(
name='rewards_mean',
data=tf.reduce_mean(rewards),
step=self.train_step_counter)
# Normalize rewards if self._reward_normalizer is defined.
if self._reward_normalizer:
rewards = self._reward_normalizer.normalize(
rewards, center_mean=False, clip_value=self._reward_norm_clipping)
if self._debug_summaries:
# TODO(b/171573175): remove the condition once histograms are
# supported on TPUs.
if not self._use_tpu:
tf.compat.v2.summary.histogram(
name='rewards_normalized',
data=rewards,
step=self.train_step_counter)
tf.compat.v2.summary.scalar(
name='rewards_normalized_mean',
data=tf.reduce_mean(rewards),
step=self.train_step_counter)
# Make discount 0.0 at end of each episode to restart cumulative sum
# end of each episode.
episode_mask = common.get_episode_mask(next_time_steps)
discounts *= episode_mask
# Compute Monte Carlo returns. Data from incomplete trajectories, not
# containing the end of an episode will also be used, with a bootstrapped
# estimation from the last value.
# Note that when a trajectory driver is used, then the final step is
# terminal, the bootstrapped estimation will not be used, as it will be
# multiplied by zero (the discount on the last step).
# TODO(b/202055908): Use -1 instead to bootstrap from the last step, once
# we verify that it has no negative impact on learning.
final_value_bootstrapped = value_preds[:, -2]
returns = value_ops.discounted_return(
rewards,
discounts,
time_major=False,
final_value=final_value_bootstrapped)
# TODO(b/171573175): remove the condition once histograms are
# supported on TPUs.
if self._debug_summaries and not self._use_tpu:
tf.compat.v2.summary.histogram(
name='returns', data=returns, step=self.train_step_counter)
# Compute advantages.
advantages = self.compute_advantages(rewards, returns, discounts,
value_preds)
# TODO(b/171573175): remove the condition once historgrams are
# supported on TPUs.
if self._debug_summaries and not self._use_tpu:
tf.compat.v2.summary.histogram(
name='advantages', data=advantages, step=self.train_step_counter)
# Return TD-Lambda returns if both use_td_lambda_return and use_gae.
if self._use_td_lambda_return:
if not self._use_gae:
logging.warning('use_td_lambda_return was True, but use_gae was '
'False. Using Monte Carlo return.')
else:
returns = tf.add(
advantages, value_preds[:, :-1], name='td_lambda_returns')
return returns, advantages
def _train(self, experience, weights):
experience = self._as_trajectory(experience)
if self._compute_value_and_advantage_in_train:
processed_experience = self._preprocess(experience)
else:
processed_experience = experience
def squeeze_time_dim(t):
return tf.squeeze(t, axis=[1])
processed_experience = tf.nest.map_structure(squeeze_time_dim,
processed_experience)
valid_mask = ppo_utils.make_trajectory_mask(processed_experience)
masked_weights = valid_mask
if weights is not None:
masked_weights *= weights
# Reconstruct per-timestep policy distribution from stored distribution
# parameters.
old_action_distribution_parameters = (
processed_experience.policy_info['dist_params'])
old_actions_distribution = (
ppo_utils.distribution_from_spec(
self._action_distribution_spec,
old_action_distribution_parameters,
legacy_distribution_network=isinstance(
self._actor_net, network.DistributionNetwork)))
# Compute log probability of actions taken during data collection, using the
# collect policy distribution.
old_act_log_probs = common.log_probability(
old_actions_distribution, processed_experience.action,
self._action_spec)
# TODO(b/171573175): remove the condition once histograms are
# supported on TPUs.
if self._debug_summaries and not self._use_tpu:
actions_list = tf.nest.flatten(processed_experience.action)
show_action_index = len(actions_list) != 1
for i, single_action in enumerate(actions_list):
action_name = ('actions_{}'.format(i)
if show_action_index else 'actions')
tf.compat.v2.summary.histogram(
name=action_name, data=single_action, step=self.train_step_counter)
time_steps = ts.TimeStep(
step_type=processed_experience.step_type,
reward=processed_experience.reward,
discount=processed_experience.discount,
observation=processed_experience.observation)
actions = processed_experience.action
returns = processed_experience.policy_info['return']
advantages = processed_experience.policy_info['advantage']
normalized_advantages = _normalize_advantages(
advantages, variance_epsilon=1e-8)
# TODO(b/171573175): remove the condition once histograms are
# supported on TPUs.
if self._debug_summaries and not self._use_tpu:
tf.compat.v2.summary.histogram(
name='advantages_normalized',
data=normalized_advantages,
step=self.train_step_counter)
old_value_predictions = processed_experience.policy_info[
'value_prediction']
batch_size = nest_utils.get_outer_shape(time_steps, self._time_step_spec)[0]
loss_info = None # TODO(b/123627451): Remove.
variables_to_train = list(
object_identity.ObjectIdentitySet(self._actor_net.trainable_weights +
self._value_net.trainable_weights))
# Sort to ensure tensors on different processes end up in same order.
variables_to_train = sorted(variables_to_train, key=lambda x: x.name)
with tf.GradientTape(watch_accessed_variables=False) as tape:
tape.watch(variables_to_train)
loss_info = self.get_loss(
time_steps,
actions,
old_act_log_probs,
returns,
normalized_advantages,
old_action_distribution_parameters,
masked_weights,
self.train_step_counter,
self._debug_summaries,
old_value_predictions=old_value_predictions,
training=True)
# Scales the loss, often set to 1/num_replicas, which results in using
# the average loss across all of the replicas for backprop.
scaled_loss = loss_info.loss * self._loss_scaling_factor
grads = tape.gradient(scaled_loss, variables_to_train)
if self._gradient_clipping > 0:
grads, _ = tf.clip_by_global_norm(grads, self._gradient_clipping)
# Tuple is used for py3, where zip is a generator producing values once.
grads_and_vars = tuple(zip(grads, variables_to_train))
# If summarize_gradients, create functions for summarizing both
# gradients and variables.
if self._summarize_grads_and_vars and self._debug_summaries:
eager_utils.add_gradients_summaries(grads_and_vars,
self.train_step_counter)
eager_utils.add_variables_summaries(grads_and_vars,
self.train_step_counter)
self._optimizer.apply_gradients(grads_and_vars)
self.train_step_counter.assign_add(1)
# TODO(b/1613650790: Move this logic to PPOKLPenaltyAgent.
if self._initial_adaptive_kl_beta > 0:
# After update epochs, update adaptive kl beta, then update observation
# normalizer and reward normalizer.
policy_state = self._collect_policy.get_initial_state(batch_size)
# Compute the mean kl from previous action distribution.
kl_divergence = self._kl_divergence(
time_steps, old_action_distribution_parameters,
self._collect_policy.distribution(time_steps, policy_state).action)
self.update_adaptive_kl_beta(kl_divergence)
if self.update_normalizers_in_train:
self.update_observation_normalizer(time_steps.observation)
self.update_reward_normalizer(processed_experience.reward)
loss_info = tf.nest.map_structure(tf.identity, loss_info)
with tf.name_scope('Losses/'):
tf.compat.v2.summary.scalar(
name='policy_gradient_loss',
data=loss_info.extra.policy_gradient_loss,
step=self.train_step_counter)
tf.compat.v2.summary.scalar(
name='value_estimation_loss',
data=loss_info.extra.value_estimation_loss,
step=self.train_step_counter)
tf.compat.v2.summary.scalar(
name='l2_regularization_loss',
data=loss_info.extra.l2_regularization_loss,
step=self.train_step_counter)
tf.compat.v2.summary.scalar(
name='entropy_regularization_loss',
data=loss_info.extra.entropy_regularization_loss,
step=self.train_step_counter)
tf.compat.v2.summary.scalar(
name='kl_penalty_loss',
data=loss_info.extra.kl_penalty_loss,
step=self.train_step_counter)
total_abs_loss = (
tf.abs(loss_info.extra.policy_gradient_loss) +
tf.abs(loss_info.extra.value_estimation_loss) +
tf.abs(loss_info.extra.entropy_regularization_loss) +
tf.abs(loss_info.extra.l2_regularization_loss) +
tf.abs(loss_info.extra.kl_penalty_loss))
tf.compat.v2.summary.scalar(
name='total_abs_loss',
data=total_abs_loss,
step=self.train_step_counter)
with tf.name_scope('LearningRate/'):
learning_rate = ppo_utils.get_learning_rate(self._optimizer)
tf.compat.v2.summary.scalar(
name='learning_rate',
data=learning_rate,
step=self.train_step_counter)
# TODO(b/171573175): remove the condition once histograms are
# supported on TPUs.
if self._summarize_grads_and_vars and not self._use_tpu:
with tf.name_scope('Variables/'):
all_vars = (
self._actor_net.trainable_weights +
self._value_net.trainable_weights)
for var in all_vars:
tf.compat.v2.summary.histogram(
name=var.name.replace(':', '_'),
data=var,
step=self.train_step_counter)
return loss_info
def create_circuit_ppo_grl_agent(
train_step: tf.Variable, observation_tensor_spec: types.NestedTensorSpec,
action_tensor_spec: types.NestedTensorSpec,
time_step_tensor_spec: types.TimeStep, strategy: tf.distribute.Strategy,
static_features=None,
use_model_tpu=False,
**kwargs) -> CircuitPPOAgent:
"""Creates a PPO agent using the GRL networks."""
grl_shared_net = model.GrlModel(
observation_tensor_spec,
action_tensor_spec,
static_features=static_features,
use_model_tpu=use_model_tpu,
)
grl_actor_net = model.GrlPolicyModel(grl_shared_net, observation_tensor_spec,
action_tensor_spec)
grl_value_net = model.GrlValueModel(observation_tensor_spec, grl_shared_net)
return CircuitPPOAgent(
time_step_tensor_spec,
action_tensor_spec,
optimizer=tf.keras.optimizers.Adam(learning_rate=4e-4, epsilon=1e-5),
actor_net=grl_actor_net,
value_net=grl_value_net,
value_pred_loss_coef=0.5,
entropy_regularization=0.01,
importance_ratio_clipping=0.2,
discount_factor=1.0,
gradient_clipping=1.0,
debug_summaries=False,
train_step_counter=train_step,
value_clipping=None,
aggregate_losses_across_replicas=False,
loss_scaling_factor=1. / float(strategy.num_replicas_in_sync),
**kwargs)
| |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""An Optional type for representing potentially missing values."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
from tensorflow.python.data.util import nest
from tensorflow.python.data.util import sparse
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor as sparse_tensor_lib
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import gen_dataset_ops
class Optional(object):
"""Wraps a nested structure of tensors that may/may not be present at runtime.
An `Optional` can represent the result of an operation that may fail as a
value, rather than raising an exception and halting execution. For example,
`tf.contrib.data.get_next_as_optional` returns an `Optional` that either
contains the next value from a `tf.data.Iterator` if one exists, or a "none"
value that indicates the end of the sequence has been reached.
"""
@abc.abstractmethod
def has_value(self, name=None):
"""Returns a tensor that evaluates to `True` if this optional has a value.
Args:
name: (Optional.) A name for the created operation.
Returns:
A scalar `tf.Tensor` of type `tf.bool`.
"""
raise NotImplementedError("Optional.has_value()")
@abc.abstractmethod
def get_value(self, name=None):
"""Returns a nested structure of values wrapped by this optional.
If this optional does not have a value (i.e. `self.has_value()` evaluates
to `False`), this operation will raise `tf.errors.InvalidArgumentError`
at runtime.
Args:
name: (Optional.) A name for the created operation.
Returns:
A nested structure of `tf.Tensor` and/or `tf.SparseTensor` objects.
"""
raise NotImplementedError("Optional.get_value()")
@abc.abstractproperty
def output_classes(self):
"""Returns the class of each component of this optional.
The expected values are `tf.Tensor` and `tf.SparseTensor`.
Returns:
A nested structure of Python `type` objects corresponding to each
component of this optional.
"""
raise NotImplementedError("Optional.output_classes")
@abc.abstractproperty
def output_shapes(self):
"""Returns the shape of each component of this optional.
Returns:
A nested structure of `tf.TensorShape` objects corresponding to each
component of this optional.
"""
raise NotImplementedError("Optional.output_shapes")
@abc.abstractproperty
def output_types(self):
"""Returns the type of each component of this optional.
Returns:
A nested structure of `tf.DType` objects corresponding to each component
of this optional.
"""
raise NotImplementedError("Optional.output_types")
@staticmethod
def from_value(value):
"""Returns an `Optional` that wraps the given value.
Args:
value: A nested structure of `tf.Tensor` and/or `tf.SparseTensor` objects.
Returns:
An `Optional` that wraps `value`.
"""
# TODO(b/110122868): Consolidate this destructuring logic with the
# similar code in `Dataset.from_tensors()`.
with ops.name_scope("optional") as scope:
with ops.name_scope("value"):
value = nest.pack_sequence_as(value, [
sparse_tensor_lib.SparseTensor.from_value(t)
if sparse_tensor_lib.is_sparse(t) else ops.convert_to_tensor(
t, name="component_%d" % i)
for i, t in enumerate(nest.flatten(value))
])
encoded_value = nest.flatten(sparse.serialize_sparse_tensors(value))
output_classes = sparse.get_classes(value)
output_shapes = nest.pack_sequence_as(
value, [t.get_shape() for t in nest.flatten(value)])
output_types = nest.pack_sequence_as(
value, [t.dtype for t in nest.flatten(value)])
return _OptionalImpl(
gen_dataset_ops.optional_from_value(encoded_value, name=scope),
output_shapes, output_types, output_classes)
@staticmethod
def none_from_structure(output_shapes, output_types, output_classes):
"""Returns an `Optional` that has no value.
NOTE: This method takes arguments that define the structure of the value
that would be contained in the returned `Optional` if it had a value.
Args:
output_shapes: A nested structure of `tf.TensorShape` objects
corresponding to each component of this optional.
output_types: A nested structure of `tf.DType` objects corresponding to
each component of this optional.
output_classes: A nested structure of Python `type` objects corresponding
to each component of this optional.
Returns:
An `Optional` that has no value.
"""
return _OptionalImpl(gen_dataset_ops.optional_none(), output_shapes,
output_types, output_classes)
class _OptionalImpl(Optional):
"""Concrete implementation of `tf.contrib.data.Optional`.
NOTE(mrry): This implementation is kept private, to avoid defining
`Optional.__init__()` in the public API.
"""
def __init__(self, variant_tensor, output_shapes, output_types,
output_classes):
# TODO(b/110122868): Consolidate the structure validation logic with the
# similar logic in `Iterator.from_structure()` and
# `Dataset.from_generator()`.
output_types = nest.map_structure(dtypes.as_dtype, output_types)
output_shapes = nest.map_structure_up_to(
output_types, tensor_shape.as_shape, output_shapes)
nest.assert_same_structure(output_types, output_shapes)
nest.assert_same_structure(output_types, output_classes)
self._variant_tensor = variant_tensor
self._output_shapes = output_shapes
self._output_types = output_types
self._output_classes = output_classes
def has_value(self, name=None):
return gen_dataset_ops.optional_has_value(self._variant_tensor, name=name)
def get_value(self, name=None):
# TODO(b/110122868): Consolidate the restructuring logic with similar logic
# in `Iterator.get_next()` and `StructuredFunctionWrapper`.
with ops.name_scope(name, "OptionalGetValue",
[self._variant_tensor]) as scope:
return sparse.deserialize_sparse_tensors(
nest.pack_sequence_as(
self._output_types,
gen_dataset_ops.optional_get_value(
self._variant_tensor,
name=scope,
output_types=nest.flatten(
sparse.as_dense_types(self._output_types,
self._output_classes)),
output_shapes=nest.flatten(
sparse.as_dense_shapes(self._output_shapes,
self._output_classes)))),
self._output_types, self._output_shapes, self._output_classes)
@property
def output_classes(self):
return self._output_classes
@property
def output_shapes(self):
return self._output_shapes
@property
def output_types(self):
return self._output_types
| |
"""
Covariance estimators using shrinkage.
Shrinkage corresponds to regularising `cov` using a convex combination:
shrunk_cov = (1-shrinkage)*cov + shrinkage*structured_estimate.
"""
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# Virgile Fritsch <virgile.fritsch@inria.fr>
#
# License: BSD 3 clause
# avoid division truncation
import warnings
import numpy as np
from . import empirical_covariance, EmpiricalCovariance
from .._config import config_context
from ..utils import check_array
# ShrunkCovariance estimator
def shrunk_covariance(emp_cov, shrinkage=0.1):
"""Calculates a covariance matrix shrunk on the diagonal
Read more in the :ref:`User Guide <shrunk_covariance>`.
Parameters
----------
emp_cov : array-like of shape (n_features, n_features)
Covariance matrix to be shrunk
shrinkage : float, default=0.1
Coefficient in the convex combination used for the computation
of the shrunk estimate. Range is [0, 1].
Returns
-------
shrunk_cov : ndarray of shape (n_features, n_features)
Shrunk covariance.
Notes
-----
The regularized (shrunk) covariance is given by:
(1 - shrinkage) * cov + shrinkage * mu * np.identity(n_features)
where mu = trace(cov) / n_features
"""
emp_cov = check_array(emp_cov)
n_features = emp_cov.shape[0]
mu = np.trace(emp_cov) / n_features
shrunk_cov = (1.0 - shrinkage) * emp_cov
shrunk_cov.flat[:: n_features + 1] += shrinkage * mu
return shrunk_cov
class ShrunkCovariance(EmpiricalCovariance):
"""Covariance estimator with shrinkage.
Read more in the :ref:`User Guide <shrunk_covariance>`.
Parameters
----------
store_precision : bool, default=True
Specify if the estimated precision is stored.
assume_centered : bool, default=False
If True, data will not be centered before computation.
Useful when working with data whose mean is almost, but not exactly
zero.
If False, data will be centered before computation.
shrinkage : float, default=0.1
Coefficient in the convex combination used for the computation
of the shrunk estimate. Range is [0, 1].
Attributes
----------
covariance_ : ndarray of shape (n_features, n_features)
Estimated covariance matrix
location_ : ndarray of shape (n_features,)
Estimated location, i.e. the estimated mean.
precision_ : ndarray of shape (n_features, n_features)
Estimated pseudo inverse matrix.
(stored only if store_precision is True)
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
See Also
--------
EllipticEnvelope : An object for detecting outliers in
a Gaussian distributed dataset.
EmpiricalCovariance : Maximum likelihood covariance estimator.
GraphicalLasso : Sparse inverse covariance estimation
with an l1-penalized estimator.
GraphicalLassoCV : Sparse inverse covariance with cross-validated
choice of the l1 penalty.
LedoitWolf : LedoitWolf Estimator.
MinCovDet : Minimum Covariance Determinant
(robust estimator of covariance).
OAS : Oracle Approximating Shrinkage Estimator.
Notes
-----
The regularized covariance is given by:
(1 - shrinkage) * cov + shrinkage * mu * np.identity(n_features)
where mu = trace(cov) / n_features
Examples
--------
>>> import numpy as np
>>> from sklearn.covariance import ShrunkCovariance
>>> from sklearn.datasets import make_gaussian_quantiles
>>> real_cov = np.array([[.8, .3],
... [.3, .4]])
>>> rng = np.random.RandomState(0)
>>> X = rng.multivariate_normal(mean=[0, 0],
... cov=real_cov,
... size=500)
>>> cov = ShrunkCovariance().fit(X)
>>> cov.covariance_
array([[0.7387..., 0.2536...],
[0.2536..., 0.4110...]])
>>> cov.location_
array([0.0622..., 0.0193...])
"""
def __init__(self, *, store_precision=True, assume_centered=False, shrinkage=0.1):
super().__init__(
store_precision=store_precision, assume_centered=assume_centered
)
self.shrinkage = shrinkage
def fit(self, X, y=None):
"""Fit the shrunk covariance model to X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
self : object
Returns the instance itself.
"""
X = self._validate_data(X)
# Not calling the parent object to fit, to avoid a potential
# matrix inversion when setting the precision
if self.assume_centered:
self.location_ = np.zeros(X.shape[1])
else:
self.location_ = X.mean(0)
covariance = empirical_covariance(X, assume_centered=self.assume_centered)
covariance = shrunk_covariance(covariance, self.shrinkage)
self._set_covariance(covariance)
return self
# Ledoit-Wolf estimator
def ledoit_wolf_shrinkage(X, assume_centered=False, block_size=1000):
"""Estimates the shrunk Ledoit-Wolf covariance matrix.
Read more in the :ref:`User Guide <shrunk_covariance>`.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Data from which to compute the Ledoit-Wolf shrunk covariance shrinkage.
assume_centered : bool, default=False
If True, data will not be centered before computation.
Useful to work with data whose mean is significantly equal to
zero but is not exactly zero.
If False, data will be centered before computation.
block_size : int, default=1000
Size of blocks into which the covariance matrix will be split.
Returns
-------
shrinkage : float
Coefficient in the convex combination used for the computation
of the shrunk estimate.
Notes
-----
The regularized (shrunk) covariance is:
(1 - shrinkage) * cov + shrinkage * mu * np.identity(n_features)
where mu = trace(cov) / n_features
"""
X = check_array(X)
# for only one feature, the result is the same whatever the shrinkage
if len(X.shape) == 2 and X.shape[1] == 1:
return 0.0
if X.ndim == 1:
X = np.reshape(X, (1, -1))
if X.shape[0] == 1:
warnings.warn(
"Only one sample available. You may want to reshape your data array"
)
n_samples, n_features = X.shape
# optionally center data
if not assume_centered:
X = X - X.mean(0)
# A non-blocked version of the computation is present in the tests
# in tests/test_covariance.py
# number of blocks to split the covariance matrix into
n_splits = int(n_features / block_size)
X2 = X ** 2
emp_cov_trace = np.sum(X2, axis=0) / n_samples
mu = np.sum(emp_cov_trace) / n_features
beta_ = 0.0 # sum of the coefficients of <X2.T, X2>
delta_ = 0.0 # sum of the *squared* coefficients of <X.T, X>
# starting block computation
for i in range(n_splits):
for j in range(n_splits):
rows = slice(block_size * i, block_size * (i + 1))
cols = slice(block_size * j, block_size * (j + 1))
beta_ += np.sum(np.dot(X2.T[rows], X2[:, cols]))
delta_ += np.sum(np.dot(X.T[rows], X[:, cols]) ** 2)
rows = slice(block_size * i, block_size * (i + 1))
beta_ += np.sum(np.dot(X2.T[rows], X2[:, block_size * n_splits :]))
delta_ += np.sum(np.dot(X.T[rows], X[:, block_size * n_splits :]) ** 2)
for j in range(n_splits):
cols = slice(block_size * j, block_size * (j + 1))
beta_ += np.sum(np.dot(X2.T[block_size * n_splits :], X2[:, cols]))
delta_ += np.sum(np.dot(X.T[block_size * n_splits :], X[:, cols]) ** 2)
delta_ += np.sum(
np.dot(X.T[block_size * n_splits :], X[:, block_size * n_splits :]) ** 2
)
delta_ /= n_samples ** 2
beta_ += np.sum(
np.dot(X2.T[block_size * n_splits :], X2[:, block_size * n_splits :])
)
# use delta_ to compute beta
beta = 1.0 / (n_features * n_samples) * (beta_ / n_samples - delta_)
# delta is the sum of the squared coefficients of (<X.T,X> - mu*Id) / p
delta = delta_ - 2.0 * mu * emp_cov_trace.sum() + n_features * mu ** 2
delta /= n_features
# get final beta as the min between beta and delta
# We do this to prevent shrinking more than "1", which whould invert
# the value of covariances
beta = min(beta, delta)
# finally get shrinkage
shrinkage = 0 if beta == 0 else beta / delta
return shrinkage
def ledoit_wolf(X, *, assume_centered=False, block_size=1000):
"""Estimates the shrunk Ledoit-Wolf covariance matrix.
Read more in the :ref:`User Guide <shrunk_covariance>`.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Data from which to compute the covariance estimate
assume_centered : bool, default=False
If True, data will not be centered before computation.
Useful to work with data whose mean is significantly equal to
zero but is not exactly zero.
If False, data will be centered before computation.
block_size : int, default=1000
Size of blocks into which the covariance matrix will be split.
This is purely a memory optimization and does not affect results.
Returns
-------
shrunk_cov : ndarray of shape (n_features, n_features)
Shrunk covariance.
shrinkage : float
Coefficient in the convex combination used for the computation
of the shrunk estimate.
Notes
-----
The regularized (shrunk) covariance is:
(1 - shrinkage) * cov + shrinkage * mu * np.identity(n_features)
where mu = trace(cov) / n_features
"""
X = check_array(X)
# for only one feature, the result is the same whatever the shrinkage
if len(X.shape) == 2 and X.shape[1] == 1:
if not assume_centered:
X = X - X.mean()
return np.atleast_2d((X ** 2).mean()), 0.0
if X.ndim == 1:
X = np.reshape(X, (1, -1))
warnings.warn(
"Only one sample available. You may want to reshape your data array"
)
n_features = X.size
else:
_, n_features = X.shape
# get Ledoit-Wolf shrinkage
shrinkage = ledoit_wolf_shrinkage(
X, assume_centered=assume_centered, block_size=block_size
)
emp_cov = empirical_covariance(X, assume_centered=assume_centered)
mu = np.sum(np.trace(emp_cov)) / n_features
shrunk_cov = (1.0 - shrinkage) * emp_cov
shrunk_cov.flat[:: n_features + 1] += shrinkage * mu
return shrunk_cov, shrinkage
class LedoitWolf(EmpiricalCovariance):
"""LedoitWolf Estimator.
Ledoit-Wolf is a particular form of shrinkage, where the shrinkage
coefficient is computed using O. Ledoit and M. Wolf's formula as
described in "A Well-Conditioned Estimator for Large-Dimensional
Covariance Matrices", Ledoit and Wolf, Journal of Multivariate
Analysis, Volume 88, Issue 2, February 2004, pages 365-411.
Read more in the :ref:`User Guide <shrunk_covariance>`.
Parameters
----------
store_precision : bool, default=True
Specify if the estimated precision is stored.
assume_centered : bool, default=False
If True, data will not be centered before computation.
Useful when working with data whose mean is almost, but not exactly
zero.
If False (default), data will be centered before computation.
block_size : int, default=1000
Size of blocks into which the covariance matrix will be split
during its Ledoit-Wolf estimation. This is purely a memory
optimization and does not affect results.
Attributes
----------
covariance_ : ndarray of shape (n_features, n_features)
Estimated covariance matrix.
location_ : ndarray of shape (n_features,)
Estimated location, i.e. the estimated mean.
precision_ : ndarray of shape (n_features, n_features)
Estimated pseudo inverse matrix.
(stored only if store_precision is True)
shrinkage_ : float
Coefficient in the convex combination used for the computation
of the shrunk estimate. Range is [0, 1].
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
See Also
--------
EllipticEnvelope : An object for detecting outliers in
a Gaussian distributed dataset.
EmpiricalCovariance : Maximum likelihood covariance estimator.
GraphicalLasso : Sparse inverse covariance estimation
with an l1-penalized estimator.
GraphicalLassoCV : Sparse inverse covariance with cross-validated
choice of the l1 penalty.
MinCovDet : Minimum Covariance Determinant
(robust estimator of covariance).
OAS : Oracle Approximating Shrinkage Estimator.
ShrunkCovariance : Covariance estimator with shrinkage.
Notes
-----
The regularised covariance is:
(1 - shrinkage) * cov + shrinkage * mu * np.identity(n_features)
where mu = trace(cov) / n_features
and shrinkage is given by the Ledoit and Wolf formula (see References)
References
----------
"A Well-Conditioned Estimator for Large-Dimensional Covariance Matrices",
Ledoit and Wolf, Journal of Multivariate Analysis, Volume 88, Issue 2,
February 2004, pages 365-411.
Examples
--------
>>> import numpy as np
>>> from sklearn.covariance import LedoitWolf
>>> real_cov = np.array([[.4, .2],
... [.2, .8]])
>>> np.random.seed(0)
>>> X = np.random.multivariate_normal(mean=[0, 0],
... cov=real_cov,
... size=50)
>>> cov = LedoitWolf().fit(X)
>>> cov.covariance_
array([[0.4406..., 0.1616...],
[0.1616..., 0.8022...]])
>>> cov.location_
array([ 0.0595... , -0.0075...])
"""
def __init__(self, *, store_precision=True, assume_centered=False, block_size=1000):
super().__init__(
store_precision=store_precision, assume_centered=assume_centered
)
self.block_size = block_size
def fit(self, X, y=None):
"""Fit the Ledoit-Wolf shrunk covariance model to X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
self : object
Returns the instance itself.
"""
# Not calling the parent object to fit, to avoid computing the
# covariance matrix (and potentially the precision)
X = self._validate_data(X)
if self.assume_centered:
self.location_ = np.zeros(X.shape[1])
else:
self.location_ = X.mean(0)
with config_context(assume_finite=True):
covariance, shrinkage = ledoit_wolf(
X - self.location_, assume_centered=True, block_size=self.block_size
)
self.shrinkage_ = shrinkage
self._set_covariance(covariance)
return self
# OAS estimator
def oas(X, *, assume_centered=False):
"""Estimate covariance with the Oracle Approximating Shrinkage algorithm.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Data from which to compute the covariance estimate.
assume_centered : bool, default=False
If True, data will not be centered before computation.
Useful to work with data whose mean is significantly equal to
zero but is not exactly zero.
If False, data will be centered before computation.
Returns
-------
shrunk_cov : array-like of shape (n_features, n_features)
Shrunk covariance.
shrinkage : float
Coefficient in the convex combination used for the computation
of the shrunk estimate.
Notes
-----
The regularised (shrunk) covariance is:
(1 - shrinkage) * cov + shrinkage * mu * np.identity(n_features)
where mu = trace(cov) / n_features
The formula we used to implement the OAS is slightly modified compared
to the one given in the article. See :class:`OAS` for more details.
"""
X = np.asarray(X)
# for only one feature, the result is the same whatever the shrinkage
if len(X.shape) == 2 and X.shape[1] == 1:
if not assume_centered:
X = X - X.mean()
return np.atleast_2d((X ** 2).mean()), 0.0
if X.ndim == 1:
X = np.reshape(X, (1, -1))
warnings.warn(
"Only one sample available. You may want to reshape your data array"
)
n_samples = 1
n_features = X.size
else:
n_samples, n_features = X.shape
emp_cov = empirical_covariance(X, assume_centered=assume_centered)
mu = np.trace(emp_cov) / n_features
# formula from Chen et al.'s **implementation**
alpha = np.mean(emp_cov ** 2)
num = alpha + mu ** 2
den = (n_samples + 1.0) * (alpha - (mu ** 2) / n_features)
shrinkage = 1.0 if den == 0 else min(num / den, 1.0)
shrunk_cov = (1.0 - shrinkage) * emp_cov
shrunk_cov.flat[:: n_features + 1] += shrinkage * mu
return shrunk_cov, shrinkage
class OAS(EmpiricalCovariance):
"""Oracle Approximating Shrinkage Estimator.
Read more in the :ref:`User Guide <shrunk_covariance>`.
OAS is a particular form of shrinkage described in
"Shrinkage Algorithms for MMSE Covariance Estimation"
Chen et al., IEEE Trans. on Sign. Proc., Volume 58, Issue 10, October 2010.
The formula used here does not correspond to the one given in the
article. In the original article, formula (23) states that 2/p is
multiplied by Trace(cov*cov) in both the numerator and denominator, but
this operation is omitted because for a large p, the value of 2/p is
so small that it doesn't affect the value of the estimator.
Parameters
----------
store_precision : bool, default=True
Specify if the estimated precision is stored.
assume_centered : bool, default=False
If True, data will not be centered before computation.
Useful when working with data whose mean is almost, but not exactly
zero.
If False (default), data will be centered before computation.
Attributes
----------
covariance_ : ndarray of shape (n_features, n_features)
Estimated covariance matrix.
location_ : ndarray of shape (n_features,)
Estimated location, i.e. the estimated mean.
precision_ : ndarray of shape (n_features, n_features)
Estimated pseudo inverse matrix.
(stored only if store_precision is True)
shrinkage_ : float
coefficient in the convex combination used for the computation
of the shrunk estimate. Range is [0, 1].
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
See Also
--------
EllipticEnvelope : An object for detecting outliers in
a Gaussian distributed dataset.
EmpiricalCovariance : Maximum likelihood covariance estimator.
GraphicalLasso : Sparse inverse covariance estimation
with an l1-penalized estimator.
GraphicalLassoCV : Sparse inverse covariance with cross-validated
choice of the l1 penalty.
LedoitWolf : LedoitWolf Estimator.
MinCovDet : Minimum Covariance Determinant
(robust estimator of covariance).
ShrunkCovariance : Covariance estimator with shrinkage.
Notes
-----
The regularised covariance is:
(1 - shrinkage) * cov + shrinkage * mu * np.identity(n_features)
where mu = trace(cov) / n_features
and shrinkage is given by the OAS formula (see References)
References
----------
"Shrinkage Algorithms for MMSE Covariance Estimation"
Chen et al., IEEE Trans. on Sign. Proc., Volume 58, Issue 10, October 2010.
Examples
--------
>>> import numpy as np
>>> from sklearn.covariance import OAS
>>> from sklearn.datasets import make_gaussian_quantiles
>>> real_cov = np.array([[.8, .3],
... [.3, .4]])
>>> rng = np.random.RandomState(0)
>>> X = rng.multivariate_normal(mean=[0, 0],
... cov=real_cov,
... size=500)
>>> oas = OAS().fit(X)
>>> oas.covariance_
array([[0.7533..., 0.2763...],
[0.2763..., 0.3964...]])
>>> oas.precision_
array([[ 1.7833..., -1.2431... ],
[-1.2431..., 3.3889...]])
>>> oas.shrinkage_
0.0195...
"""
def fit(self, X, y=None):
"""Fit the Oracle Approximating Shrinkage covariance model to X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
self : object
Returns the instance itself.
"""
X = self._validate_data(X)
# Not calling the parent object to fit, to avoid computing the
# covariance matrix (and potentially the precision)
if self.assume_centered:
self.location_ = np.zeros(X.shape[1])
else:
self.location_ = X.mean(0)
covariance, shrinkage = oas(X - self.location_, assume_centered=True)
self.shrinkage_ = shrinkage
self._set_covariance(covariance)
return self
| |
# Copyright (C) 2009, Pauli Virtanen <pav@iki.fi>
# Distributed under the same license as SciPy.
import warnings
import numpy as np
from numpy.linalg import LinAlgError
from scipy.linalg import get_blas_funcs
from .utils import make_system
from ._gcrotmk import _fgmres
__all__ = ['lgmres']
def lgmres(A, b, x0=None, tol=1e-5, maxiter=1000, M=None, callback=None,
inner_m=30, outer_k=3, outer_v=None, store_outer_Av=True,
prepend_outer_v=False, atol=None):
"""
Solve a matrix equation using the LGMRES algorithm.
The LGMRES algorithm [1]_ [2]_ is designed to avoid some problems
in the convergence in restarted GMRES, and often converges in fewer
iterations.
Parameters
----------
A : {sparse matrix, ndarray, LinearOperator}
The real or complex N-by-N matrix of the linear system.
Alternatively, ``A`` can be a linear operator which can
produce ``Ax`` using, e.g.,
``scipy.sparse.linalg.LinearOperator``.
b : ndarray
Right hand side of the linear system. Has shape (N,) or (N,1).
x0 : ndarray
Starting guess for the solution.
tol, atol : float, optional
Tolerances for convergence, ``norm(residual) <= max(tol*norm(b), atol)``.
The default for ``atol`` is `tol`.
.. warning::
The default value for `atol` will be changed in a future release.
For future compatibility, specify `atol` explicitly.
maxiter : int, optional
Maximum number of iterations. Iteration will stop after maxiter
steps even if the specified tolerance has not been achieved.
M : {sparse matrix, ndarray, LinearOperator}, optional
Preconditioner for A. The preconditioner should approximate the
inverse of A. Effective preconditioning dramatically improves the
rate of convergence, which implies that fewer iterations are needed
to reach a given error tolerance.
callback : function, optional
User-supplied function to call after each iteration. It is called
as callback(xk), where xk is the current solution vector.
inner_m : int, optional
Number of inner GMRES iterations per each outer iteration.
outer_k : int, optional
Number of vectors to carry between inner GMRES iterations.
According to [1]_, good values are in the range of 1...3.
However, note that if you want to use the additional vectors to
accelerate solving multiple similar problems, larger values may
be beneficial.
outer_v : list of tuples, optional
List containing tuples ``(v, Av)`` of vectors and corresponding
matrix-vector products, used to augment the Krylov subspace, and
carried between inner GMRES iterations. The element ``Av`` can
be `None` if the matrix-vector product should be re-evaluated.
This parameter is modified in-place by `lgmres`, and can be used
to pass "guess" vectors in and out of the algorithm when solving
similar problems.
store_outer_Av : bool, optional
Whether LGMRES should store also A*v in addition to vectors `v`
in the `outer_v` list. Default is True.
prepend_outer_v : bool, optional
Whether to put outer_v augmentation vectors before Krylov iterates.
In standard LGMRES, prepend_outer_v=False.
Returns
-------
x : ndarray
The converged solution.
info : int
Provides convergence information:
- 0 : successful exit
- >0 : convergence to tolerance not achieved, number of iterations
- <0 : illegal input or breakdown
Notes
-----
The LGMRES algorithm [1]_ [2]_ is designed to avoid the
slowing of convergence in restarted GMRES, due to alternating
residual vectors. Typically, it often outperforms GMRES(m) of
comparable memory requirements by some measure, or at least is not
much worse.
Another advantage in this algorithm is that you can supply it with
'guess' vectors in the `outer_v` argument that augment the Krylov
subspace. If the solution lies close to the span of these vectors,
the algorithm converges faster. This can be useful if several very
similar matrices need to be inverted one after another, such as in
Newton-Krylov iteration where the Jacobian matrix often changes
little in the nonlinear steps.
References
----------
.. [1] A.H. Baker and E.R. Jessup and T. Manteuffel, "A Technique for
Accelerating the Convergence of Restarted GMRES", SIAM J. Matrix
Anal. Appl. 26, 962 (2005).
.. [2] A.H. Baker, "On Improving the Performance of the Linear Solver
restarted GMRES", PhD thesis, University of Colorado (2003).
Examples
--------
>>> from scipy.sparse import csc_matrix
>>> from scipy.sparse.linalg import lgmres
>>> A = csc_matrix([[3, 2, 0], [1, -1, 0], [0, 5, 1]], dtype=float)
>>> b = np.array([2, 4, -1], dtype=float)
>>> x, exitCode = lgmres(A, b)
>>> print(exitCode) # 0 indicates successful convergence
0
>>> np.allclose(A.dot(x), b)
True
"""
A,M,x,b,postprocess = make_system(A,M,x0,b)
if not np.isfinite(b).all():
raise ValueError("RHS must contain only finite numbers")
if atol is None:
warnings.warn("scipy.sparse.linalg.lgmres called without specifying `atol`. "
"The default value will change in the future. To preserve "
"current behavior, set ``atol=tol``.",
category=DeprecationWarning, stacklevel=2)
atol = tol
matvec = A.matvec
psolve = M.matvec
if outer_v is None:
outer_v = []
axpy, dot, scal = None, None, None
nrm2 = get_blas_funcs('nrm2', [b])
b_norm = nrm2(b)
ptol_max_factor = 1.0
for k_outer in range(maxiter):
r_outer = matvec(x) - b
# -- callback
if callback is not None:
callback(x)
# -- determine input type routines
if axpy is None:
if np.iscomplexobj(r_outer) and not np.iscomplexobj(x):
x = x.astype(r_outer.dtype)
axpy, dot, scal, nrm2 = get_blas_funcs(['axpy', 'dot', 'scal', 'nrm2'],
(x, r_outer))
# -- check stopping condition
r_norm = nrm2(r_outer)
if r_norm <= max(atol, tol * b_norm):
break
# -- inner LGMRES iteration
v0 = -psolve(r_outer)
inner_res_0 = nrm2(v0)
if inner_res_0 == 0:
rnorm = nrm2(r_outer)
raise RuntimeError("Preconditioner returned a zero vector; "
"|v| ~ %.1g, |M v| = 0" % rnorm)
v0 = scal(1.0/inner_res_0, v0)
ptol = min(ptol_max_factor, max(atol, tol*b_norm)/r_norm)
try:
Q, R, B, vs, zs, y, pres = _fgmres(matvec,
v0,
inner_m,
lpsolve=psolve,
atol=ptol,
outer_v=outer_v,
prepend_outer_v=prepend_outer_v)
y *= inner_res_0
if not np.isfinite(y).all():
# Overflow etc. in computation. There's no way to
# recover from this, so we have to bail out.
raise LinAlgError()
except LinAlgError:
# Floating point over/underflow, non-finite result from
# matmul etc. -- report failure.
return postprocess(x), k_outer + 1
# Inner loop tolerance control
if pres > ptol:
ptol_max_factor = min(1.0, 1.5 * ptol_max_factor)
else:
ptol_max_factor = max(1e-16, 0.25 * ptol_max_factor)
# -- GMRES terminated: eval solution
dx = zs[0]*y[0]
for w, yc in zip(zs[1:], y[1:]):
dx = axpy(w, dx, dx.shape[0], yc) # dx += w*yc
# -- Store LGMRES augmentation vectors
nx = nrm2(dx)
if nx > 0:
if store_outer_Av:
q = Q.dot(R.dot(y))
ax = vs[0]*q[0]
for v, qc in zip(vs[1:], q[1:]):
ax = axpy(v, ax, ax.shape[0], qc)
outer_v.append((dx/nx, ax/nx))
else:
outer_v.append((dx/nx, None))
# -- Retain only a finite number of augmentation vectors
while len(outer_v) > outer_k:
del outer_v[0]
# -- Apply step
x += dx
else:
# didn't converge ...
return postprocess(x), maxiter
return postprocess(x), 0
| |
#!/usr/bin/python
#import urllib.request, urllib.parse, urllib.error #Use this for Python > 3
import urllib #Use this line instead of the previous for Python < 3.0
import xml.etree.ElementTree as elementree
import re
import string
class QBConn:
def __init__(self,url,appid,token=None, user_token=None,realm=""):
self.url = url
self.token = token
self.user_token = user_token
self.appid = appid
self.ticket = None
self.realm = realm #This allows one QuickBase realm to proxy for another
self.error = 0 #Set after every API call. A non-zero value indicates an error. A negative value indicates an error with this library
self.tables = {}
def authenticate(self,username=None,password=None):
if self.user_token:
self.tables = self._getTables()
return
params = {'act':'API_Authenticate','username':username,'password':password}
resp = self.request(params,'main')
if self.error != 0:
return
else:
self.ticket = resp.find("ticket").text
self.tables = self._getTables()
#Adds the appropriate fields to the request and sends it to QB
#Takes a dict of parameter:value pairs and the url extension (main or your table ID, mostly)
def request(self,params,url_ext):
url = self.url
url += url_ext
if self.user_token:
params['usertoken'] = self.user_token
else:
params['ticket'] = self.ticket
params['apptoken'] = self.token
params['realmhost'] = self.realm
#urlparams = urllib.parse.urlencode(params) #Use this line for Python > 3
urlparams = urllib.urlencode(params) #use this line for < Python 3
#resp = urllib.request.FancyURLopener().open(url+"?"+urlparams).read() #Use this line for Python > 3
resp = urllib.FancyURLopener().open(url+"?"+urlparams).read() #use this line for < Python 3
if re.match('^\<\?xml version=',resp.decode("utf-8")) == None:
print("No useful data received")
self.error = -1 #No XML data returned
else:
tree = elementree.fromstring(resp)
self.error = int(tree.find('errcode').text)
return tree
#Creates a record with the given data in the table specified by tableID
#Takes a tableID (you can get this using qb.tables["yourtable"])
#Also takes a dict containing field name:field value pairs
def addRecord(self,tableID,data):
fields = self.getFields(tableID)
params = {'act':'API_AddRecord'}
for field in data:
if field in fields:
params["_fid_"+fields[field]] = data[field]
return self.request(params,tableID)
#Updates a reord with the given data
#Takes the record's table ID, record ID, a dict containing field:newvalue pairs, and an optional dict with param:value pairs
def editRecord(self,tableID,rid,newdata,options={}):
params = {'act':'API_EditRecord','rid':rid}
fields = self.getFields(tableID)
for key,value in list(newdata.items()):
if key.isdigit():
params["_fid_"+key] = value
else:
if key in fields:
params["_fid_"+fields[key]] = value
params = dict(params,**options)
return self.request(params,tableID)
#Deletes the record specified by rid from the table given by tableID
def deleteRecord(self,tableID,rid):
params = {'act':'API_DeleteRecord','rid':rid}
return self.request(params,tableID)
#Deletes every record from tableID selected by query
def purgeRecords(self,tableID,query):
params = {'act':'API_PurgeRecords','query':query}
return self.request(params,tableID)
#Returns a dict containing fieldname:fieldid pairs
#Field names will have spaces replaced with not spaces
def getFields(self,tableID):
params = {'act':'API_GetSchema'}
schema = self.request(params,tableID)
fields = schema.find('table').find('fields')
fieldlist = {}
for field in fields:
label = field.find('label').text.lower().replace(' ','')
fieldlist[label] = field.attrib['id']
return fieldlist
#Returns a dict of tablename:tableID pairs
#This is called automatically after successful authentication
def _getTables(self):
if self.appid == None:
return {}
params = {'act':'API_GetSchema'}
schema = self.request(params,self.appid)
chdbs = schema.find('table').find('chdbids')
tables = {}
for chdb in chdbs:
tables[chdb.attrib['name'][6:]] = chdb.text
return tables
#Executes a query on tableID
#Returns a list of dicts containing fieldname:value pairs. record ID will always be specified by the "rid" key
def query(self,tableID,query):
params = dict(query)
params['act'] = "API_DoQuery"
params['includeRids'] = '1'
params['fmt'] = "structured"
records = self.request(params,tableID).find('table').find('records')
data = []
fields = {fid:name for name,fid in list(self.getFields(tableID).items())}
for record in records:
temp = {}
temp['rid'] = record.attrib['rid']
for field in record:
if(field.tag == "f"):
temp[fields[field.attrib['id']]] = field.text
data.append(temp)
return data
#Emulates the syntax of basic (SELECT,DELETE) SQL queries
#Example: qb.sql("SELECT * FROM users WHERE name`EX`John\_Doe OR role`EX`fakeperson") #The \_ represents a space. This is a very basic function that doesn't use state machines. Note: field and table names will not have spaces
#Example: qb.sql("SELECT firstname|lastname FROM users WHERE paid`EX`true ORDER BY lastname ASC LIMIT 100")
#Example: qb.sql("DELETE FROM assets WHERE value`BF`0")
#I encourage you to modify this to suit your needs. Please contribute this back to the Python-QuickBase-SDK repository. Give QuickBase the API it deserves...
def sql(self,querystr):
tokens = querystr.split(" ")
if tokens[0] == "SELECT":
query = {}
tid = self.tables[tokens[3]]
tfields = self.getFields(tid)
if tokens[1] != "*":
clist = ""
for field in tokens[1].split("|"):
clist += tfields[field]+"."
query['clist'] = clist[:len(clist)-1]
if len(tokens) > 4:
try:
where = tokens.index("WHERE")
querystr = ""
for i in range(where+1,len(tokens)):
if (i-where+1)%2 == 0:
filt = tokens[i].split("`")
querystr += "{'"+tfields[filt[0]]+"'."+filt[1]+".'"+filt[2].replace("\_"," ")+"'}"
elif tokens[i] == "AND" or tokens[i] == "OR":
querystr += tokens[i]
else:
break
query['query'] = querystr
except ValueError:
pass
except:
print("SQL error near WHERE")
self.error = -2
return
try:
orderby = tokens.index("ORDER")+1
orderings = tokens[orderby+1].split("|")
slist = ""
for ordering in orderings:
slist += tfields[ordering]+"."
query['slist'] = slist[:len(slist)-1]
query['options'] = (query['options']+"." if 'options' in query else "")+"sortorder-"+("A" if tokens[orderby+2] == "ASC" else "D")
except ValueError:
pass
except:
print("SQL error near ORDER")
self.error = -2
return
try:
limit = tokens[tokens.index("LIMIT")+1]
limit = limit.split(",")
if(len(limit) > 1):
query['options'] = (query['options']+"." if 'options' in query else "")+"skp-"+limit[0]+".num-"+limit[1]
else:
query['options'] = (query['options']+"." if 'options' in query else "")+"num-"+limit[0]
except ValueError:
pass
except:
print("SQL error near LIMIT")
self.error = -2
return
return self.query(tid,query)
elif tokens[0] == "DELETE":
tid = self.tables[tokens[2]]
tfields = self.getFields(tid)
where = 3
querystr = ""
for i in range(where+1,len(tokens)):
if (i-where+1)%2 == 0:
filt = tokens[i].split("`")
querystr += "{'"+tfields[filt[0]]+"'."+filt[1]+".'"+filt[2]+"'}"
elif tokens[i] == "AND" or tokens[i] == "OR":
querystr += tokens[i]
else:
break
return self.purgeRecords(tid,querystr)
| |
import datetime
from mock import Mock, MagicMock
from elasticmagic import Index
from elasticmagic import (
Index, Document, DynamicDocument,
SearchQuery, Params, Term, Bool, MultiMatch,
FunctionScore, Sort, QueryRescorer, agg
)
from elasticmagic.compiler import QueryCompiled20
from elasticmagic.util import collect_doc_classes
from elasticmagic.types import String, Integer, Float, Object
from elasticmagic.expression import Field
from .base import BaseTestCase
class SearchQueryTest(BaseTestCase):
def test_search_query_compile(self):
f = DynamicDocument.fields
sq = SearchQuery()
self.assert_expression(sq, {})
self.assertEqual(collect_doc_classes(sq), set())
sq = SearchQuery(Term(f.user, 'kimchy')).limit(10).offset(0)
self.assert_expression(
sq,
{
"from": 0,
"size": 10,
"query": {
"term": {"user": "kimchy"}
}
}
)
self.assertEqual(collect_doc_classes(sq), {DynamicDocument})
sq = SearchQuery(Term(f.user, 'kimchy')).filter(f.age >= 16)
self.assert_expression(
sq,
{
"query": {
"filtered": {
"query": {
"term": {"user": "kimchy"}
},
"filter": {
"range": {
"age": {"gte": 16}
}
}
}
}
}
)
self.assertEqual(collect_doc_classes(sq), {DynamicDocument})
sq = SearchQuery(Term(f.user, 'kimchy'), _compiler=QueryCompiled20).filter(f.age >= 16)
self.assert_expression(
sq,
{
"query": {
"bool": {
"must": {
"term": {"user": "kimchy"}
},
"filter": {
"range": {
"age": {"gte": 16}
}
}
}
}
}
)
self.assertEqual(collect_doc_classes(sq), {DynamicDocument})
sq = (
SearchQuery(Term(f.user, 'kimchy'))
.query(f.user != 'kimchy')
)
self.assert_expression(
sq,
{
"query": {
"bool": {
"must_not": [
{
"term": {"user": "kimchy"}
}
]
}
}
}
)
self.assertEqual(collect_doc_classes(sq), {DynamicDocument})
sq = (
SearchQuery(Term(f.user, 'kimchy'))
.query(None)
)
self.assert_expression(sq, {})
self.assertEqual(collect_doc_classes(sq), set())
sq = (
SearchQuery(Term(f.user, 'kimchy'))
.filter(f.age >= 16)
.filter(f.lang == 'English')
)
self.assert_expression(
sq,
{
"query": {
"filtered": {
"query": {
"term": {"user": "kimchy"}
},
"filter": {
"bool": {
"must": [
{
"range": {
"age": {"gte": 16}
}
},
{
"term": {
"lang": "English"
}
}
]
}
}
}
}
}
)
self.assertEqual(collect_doc_classes(sq), {DynamicDocument})
sq = (
SearchQuery()
.order_by(
f.opinion_rating.desc(missing='_last'),
f.opinion_count.desc(),
f.id
)
)
self.assert_expression(
sq,
{
"sort": [
{
"opinion_rating": {
"order": "desc",
"missing": "_last"
}
},
{
"opinion_count": "desc"
},
"id"
]
}
)
self.assertEqual(collect_doc_classes(sq), {DynamicDocument})
sq = (
SearchQuery()
.order_by(
f.opinion_rating.desc(missing='_last'),
f.opinion_count.desc(),
f.id
)
.order_by(None)
.order_by(None)
)
self.assert_expression(sq, {})
self.assertEqual(collect_doc_classes(sq), set())
sq = SearchQuery().source(f.name, f.company)
self.assert_expression(
sq,
{
"_source": ["name", "company"]
}
)
self.assertEqual(collect_doc_classes(sq), {DynamicDocument})
sq = SearchQuery().source(exclude=[f.name, f.company])
self.assert_expression(
sq,
{
"_source": {
"exclude": ["name", "company"]
}
}
)
self.assertEqual(collect_doc_classes(sq), {DynamicDocument})
sq = (
SearchQuery()
.source(
include=[f.obj1.wildcard('*'), f.obj2.wildcard('*')],
# FIXME: f.wildcard('*')
exclude=DynamicDocument.wildcard('*').description
)
)
self.assert_expression(
sq,
{
"_source": {
"include": ["obj1.*", "obj2.*"],
"exclude": "*.description"
}
}
)
self.assertEqual(collect_doc_classes(sq), {DynamicDocument})
sq = (
SearchQuery()
.source(None)
.source(f.name, f.company)
.source(None)
)
self.assert_expression(sq, {})
self.assertEqual(collect_doc_classes(sq), set())
sq = (
SearchQuery()
.source(f.name, f.company)
.source(False)
)
self.assert_expression(
sq,
{
"_source": False
}
)
self.assertEqual(collect_doc_classes(sq), set())
sq = (
SearchQuery()
.source(True)
)
self.assert_expression(
sq,
{
"_source": True
}
)
self.assertEqual(collect_doc_classes(sq), set())
sq = SearchQuery().fields(f.name, f.company)
self.assert_expression(
sq,
{
"fields": ["name", "company"]
}
)
self.assertEqual(collect_doc_classes(sq), {DynamicDocument})
sq = (
SearchQuery()
.fields(True)
)
self.assert_expression(
sq,
{
"fields": '*'
}
)
self.assertEqual(collect_doc_classes(sq), set())
sq = (
SearchQuery()
.fields(None)
.fields(f.name, f.company)
.fields(None)
)
self.assert_expression(sq, {})
self.assertEqual(collect_doc_classes(sq), set())
sq = (
SearchQuery()
.fields(f.name, f.company)
.fields(False)
)
self.assert_expression(
sq,
{
"fields": []
}
)
self.assertEqual(collect_doc_classes(sq), set())
self.assert_expression(
SearchQuery()
.function_score({'random_score': {"seed": 1234}}),
{
"query": {
"function_score": {
"functions": [
{
"random_score": {"seed": 1234}
}
],
}
}
}
)
sq = (
SearchQuery(MultiMatch('Iphone 6', fields=[f.name, f.description]))
.filter(f.status == 0)
.function_score(None)
.function_score({'_score': {"seed": 1234}})
.function_score(None)
.function_score({'field_value_factor': {'field': f.popularity,
'factor': 1.2,
'modifier': 'sqrt'}},
boost_mode='sum')
.function_score({'boost_factor': 3,
'filter': f.region == 12})
)
self.assert_expression(
sq,
{
"query": {
"filtered": {
"query": {
"function_score": {
"query": {
"multi_match": {
"query": "Iphone 6",
"fields": ["name", "description"]
}
},
"functions": [
{
"field_value_factor": {
"field": "popularity",
"factor": 1.2,
"modifier": "sqrt"
}
},
{
"filter": {
"term": {"region": 12}
},
"boost_factor": 3
}
],
"boost_mode": "sum"
}
},
"filter": {
"term": {"status": 0}
}
}
}
}
)
self.assertEqual(collect_doc_classes(sq), {DynamicDocument})
sq = (
SearchQuery()
.filter(f.status == 0)
.boost_score(
{'filter': f.discount_percent >= 10, 'weight': 1000},
{'filter': f.discount_percent >= 50, 'weight': 2000},
{'filter': f.presence == 'available', 'weight': 10000},
)
)
self.assert_expression(
sq,
{
"query": {
"filtered": {
"query": {
"function_score": {
"functions": [
{
"filter": {"range": {"discount_percent": {"gte": 10}}},
"weight": 1000
},
{
"filter": {"range": {"discount_percent": {"gte": 50}}},
"weight": 2000
},
{
"filter": {"term": {"presence": "available"}},
"weight": 10000
},
],
"score_mode": "sum",
"boost_mode": "sum"
}
},
"filter": {
"term": {"status": 0}
}
}
}
}
)
sq = (
SearchQuery(f.name.match('test'))
.filter(f.status == 0)
.function_score(
{'field_value_factor': {'field': f.popularity}},
)
.boost_score(
{'filter': f.discount_percent >= 10, 'weight': 100},
)
.boost_score(None)
.boost_score(
{'filter': f.discount_percent >= 10, 'weight': 1000},
{'filter': f.discount_percent >= 50, 'weight': 2000},
score_mode='max',
)
)
self.assert_expression(
sq,
{
"query": {
"filtered": {
"query": {
"function_score": {
"query": {
"function_score": {
"query": {
"match": {
"name": "test"
}
},
"functions": [
{
"field_value_factor": {
"field": "popularity"
}
}
]
}
},
"functions": [
{
"filter": {"range": {"discount_percent": {"gte": 10}}},
"weight": 1000
},
{
"filter": {"range": {"discount_percent": {"gte": 50}}},
"weight": 2000
},
],
"score_mode": "max",
"boost_mode": "sum"
}
},
"filter": {
"term": {"status": 0}
}
}
}
}
)
sq = (
SearchQuery()
.rescore(
QueryRescorer(
self.index.t.field1.match('the quick brown', type='phrase', slop=2)
)
)
.rescore(None)
.rescore(
QueryRescorer(
self.index.t.field1.match('the quick brown fox', type='phrase', slop=2),
window_size=100,
query_weight=0.7,
rescore_query_weight=1.2
),
)
.rescore(
QueryRescorer(
FunctionScore(script_score={'script': "log10(doc['numeric'].value + 2)"}),
window_size=10,
score_mode='multiply'
),
)
)
self.assert_expression(
sq,
{
"rescore": [
{
"window_size": 100,
"query": {
"rescore_query": {
"match": {
"field1": {
"query": "the quick brown fox",
"type": "phrase",
"slop": 2
}
}
},
"query_weight": 0.7,
"rescore_query_weight": 1.2
}
},
{
"window_size": 10,
"query": {
"score_mode": "multiply",
"rescore_query": {
"function_score": {
"script_score": {
"script": "log10(doc['numeric'].value + 2)"
}
}
}
}
}
]
}
)
self.assertEqual(collect_doc_classes(sq), {self.index.t})
sq = SearchQuery().post_filter(self.index.shirt.color == 'red')
self.assert_expression(
sq,
{
"post_filter": {
"term": {"color": "red"}
}
}
)
self.assertEqual(collect_doc_classes(sq), {self.index.shirt})
sq = (
SearchQuery()
.filter(self.index.shirt.brand == 'gucci')
.post_filter(self.index.shirt.color == 'red')
.post_filter(self.index.shirt.model == 't-shirt')
)
self.assert_expression(
sq,
{
"query": {
"filtered": {
"filter": {
"term": {"brand": "gucci"}
}
}
},
"post_filter": {
"bool": {
"must": [
{"term": {"color": "red"}},
{"term": {"model": "t-shirt"}}
]
}
}
}
)
self.assertEqual(collect_doc_classes(sq), {self.index.shirt})
def test_aggregations(self):
f = DynamicDocument.fields
sq = SearchQuery().aggregations(min_price=agg.Min(f.price))
self.assert_expression(
sq,
{
"aggregations": {
"min_price": {
"min": {"field": "price"}
}
}
}
)
self.assertEqual(collect_doc_classes(sq), {DynamicDocument})
sq = SearchQuery().aggregations(genders=agg.Terms(f.gender))
self.assert_expression(
sq,
{
"aggregations": {
"genders": {
"terms": {"field": "gender"}
}
}
}
)
self.assertEqual(collect_doc_classes(sq), {DynamicDocument})
sq = (
SearchQuery()
.aggregations(
type=agg.Terms(f.type, aggs={'min_price': agg.Min(f.price)})
)
)
self.assert_expression(
sq,
{
"aggregations": {
"type": {
"terms": {"field": "type"},
"aggregations": {
"min_price": {
"min": {"field": "price"}
}
}
}
}
},
)
self.assertEqual(collect_doc_classes(sq), {DynamicDocument})
sq = (
SearchQuery()
.aggregations(
top_tags=(
agg.Terms(
f.tags,
size=3,
aggs={
'top_tag_hits': agg.TopHits(
sort=f.last_activity_date.desc(),
size=1,
_source=Params(include=[f.title]))
}
)
)
)
)
self.assert_expression(
sq,
{
"aggregations": {
"top_tags": {
"terms": {
"field": "tags",
"size": 3
},
"aggregations": {
"top_tag_hits": {
"top_hits": {
"sort": {
"last_activity_date": "desc"
},
"_source": {
"include": ["title"]
},
"size" : 1
}
}
}
}
}
}
)
self.assertEqual(collect_doc_classes(sq), {DynamicDocument})
sq = (
SearchQuery()
.aggregations({
'top_sites': agg.Terms(
f.domain,
order=Sort('top_hit', 'desc'),
aggs={
'top_tags_hits': agg.TopHits(),
'top_hit': agg.Max(script='_doc.score'),
}
)
})
)
self.assert_expression(
sq,
{
"aggregations": {
"top_sites": {
"terms": {
"field": "domain",
"order": {
"top_hit": "desc"
}
},
"aggregations": {
"top_tags_hits": {
"top_hits": {}
},
"top_hit" : {
"max": {
"script": "_doc.score"
}
}
}
}
}
}
)
self.assertEqual(collect_doc_classes(sq), {DynamicDocument})
def test_count(self):
self.client.count.return_value = {
"count" : 1024,
"_shards" : {
"total" : 5,
"successful" : 5,
"failed" : 0
}
}
self.assertEqual(
SearchQuery(index=self.index, doc_cls=self.index.car)
.count(),
1024
)
self.client.count.assert_called_with(
index='test',
doc_type='car',
body=None,
)
self.client.count.return_value = {
"count" : 2,
"_shards" : {
"total" : 5,
"successful" : 5,
"failed" : 0
}
}
self.assertEqual(
SearchQuery(index=self.index)
.filter(self.index.car.status == 1)
.function_score({'boost_factor': 3})
.count(),
2
)
self.client.count.assert_called_with(
index='test',
doc_type='car',
body={
"query": {
"filtered": {
"filter": {
"term": {"status": 1}
}
}
}
}
)
def test_exists(self):
self.client.search_exists.return_value = {"exists" : True}
self.assertEqual(
SearchQuery(index=self.index, doc_cls=self.index.car).exists(refresh=True),
True
)
self.client.search_exists.assert_called_with(
index='test',
doc_type='car',
body=None,
refresh=True
)
self.client.search_exists.return_value = {"exists" : False}
self.assertEqual(
SearchQuery(index=self.index)
.filter(self.index.car.status == 1)
.function_score({'boost_factor': 3})
.exists(),
False
)
self.client.search_exists.assert_called_with(
index='test',
doc_type='car',
body={
"query": {
"filtered": {
"filter": {
"term": {"status": 1}
}
}
}
}
)
def test_search(self):
class CarObject(object):
def __init__(self, id):
self.id = id
self.name = '{0}:{0}'.format(id)
def _obj_mapper(ids):
return {id: CarObject(int(id)) for id in ids}
obj_mapper = Mock(wraps=_obj_mapper)
class NameDocument(Document):
first = Field(String)
last = Field(String)
class CarSellerDocument(Document):
name = Field(Object(NameDocument))
rating = Field(Float)
class CarDocument(Document):
__doc_type__ = 'car'
vendor = Field(String)
model = Field(String)
year = Field(Integer)
seller = Field(Object(CarSellerDocument))
self.client.search = MagicMock(
return_value={
'hits': {
'hits': [
{
'_id': '31888815',
'_type': 'car',
'_index': 'ads',
'_score': 4.675524,
'_source': {
'vendor': 'Subaru',
'model': 'Imprezza',
'year': 2004,
},
},
{
'_id': '987321',
'_type': 'car',
'_index': 'ads',
'_score': 3.654321,
'_source': {
'vendor': 'Subaru',
'model': 'Forester',
'year': 2007,
},
}
],
'max_score': 4.675524,
'total': 6234
},
'timed_out': False,
'took': 47
}
)
sq = (
self.index.query(
CarDocument.seller.name.first.match('Alex'),
search_type='dfs_query_then_fetch',
)
.filter(CarDocument.seller.rating > 4)
.with_instance_mapper(obj_mapper)
)
self.assertEqual(collect_doc_classes(sq), {CarDocument})
results = sq.get_result()
self.assertEquals(len(results), 2)
self.client.search.assert_called_with(
index='test',
doc_type='car',
body={
'query': {
'filtered': {
'query': {
'match': {'seller.name.first': 'Alex'}
},
'filter': {
'range': {'seller.rating': {'gt': 4.0}}
}
}
}
},
search_type='dfs_query_then_fetch',
)
self.assertEqual(len(sq.get_result().hits), 2)
doc = sq.get_result().hits[0]
self.assertIsInstance(doc, CarDocument)
self.assertEqual(doc._id, '31888815')
self.assertEqual(doc._type, 'car')
self.assertEqual(doc._index, 'ads')
self.assertAlmostEqual(doc._score, 4.675524)
self.assertEqual(doc.vendor, 'Subaru')
self.assertEqual(doc.model, 'Imprezza')
self.assertEqual(doc.year, 2004)
self.assertEqual(doc.instance.id, 31888815)
self.assertEqual(doc.instance.name, '31888815:31888815')
doc = sq.get_result().hits[1]
self.assertIsInstance(doc, CarDocument)
self.assertEqual(doc._id, '987321')
self.assertEqual(doc._type, 'car')
self.assertEqual(doc._index, 'ads')
self.assertAlmostEqual(doc._score, 3.654321)
self.assertEqual(doc.vendor, 'Subaru')
self.assertEqual(doc.model, 'Forester')
self.assertEqual(doc.year, 2007)
self.assertEqual(doc.instance.id, 987321)
self.assertEqual(doc.instance.name, '987321:987321')
self.assertEqual(obj_mapper.call_count, 1)
def test_multi_type_search(self):
def seller_mapper(ids):
return {id: '{0}-{0}'.format(id) for id in ids}
def customer_mapper(ids):
return {id: '{0}:{0}'.format(id) for id in ids}
sq = (
self.index.query(
self.index.seller.name.first.match('Alex'),
doc_cls=(self.index.seller, self.index.customer)
)
.with_instance_mapper({self.index.seller: seller_mapper,
self.index.customer: customer_mapper})
.filter(self.index.customer.birthday >= datetime.date(1960, 1, 1))
.limit(2)
)
self.assertEqual(collect_doc_classes(sq), {self.index.seller, self.index.customer})
self.client.search = MagicMock(
return_value={
'hits': {
'hits': [
{
'_id': '3',
'_type': 'customer',
'_index': 'test',
'_score': 2.437682,
'_source': {
'name': {
'first': 'Alex',
'last': 'Exler'
},
'birthday': '1966-10-04'
},
},
{
'_id': '21',
'_type': 'seller',
'_index': 'test',
'_score': 2.290845,
'_source': {
'name': {
'first': 'Alexa',
'last': 'Chung'
},
'birthday': '1983-10-05',
'rating': 4.8
},
}
],
'max_score': 2.437682,
'total': 73
},
'timed_out': False,
'took': 25
}
)
results = sq.get_result()
self.assertEquals(len(results), 2)
self.client.search.assert_called_with(
index='test',
doc_type='seller,customer',
body={
'query': {
'filtered': {
'query': {
'match': {'name.first': 'Alex'}
},
'filter': {
'range': {'birthday': {'gte': datetime.date(1960, 1, 1)}}
}
}
},
'size': 2
},
)
self.assertEqual(len(sq.get_result().hits), 2)
doc = sq.get_result().hits[0]
self.assertIsInstance(doc, self.index.customer)
self.assertEqual(doc._id, '3')
self.assertEqual(doc._type, 'customer')
self.assertEqual(doc._index, 'test')
self.assertAlmostEqual(doc._score, 2.437682)
self.assertEqual(doc.name.first, 'Alex')
self.assertEqual(doc.name.last, 'Exler')
self.assertEqual(doc.birthday, '1966-10-04')
self.assertEqual(doc.instance, '3:3')
doc = sq.get_result().hits[1]
self.assertIsInstance(doc, self.index.seller)
self.assertEqual(doc._id, '21')
self.assertEqual(doc._type, 'seller')
self.assertEqual(doc._index, 'test')
self.assertAlmostEqual(doc._score, 2.290845)
self.assertEqual(doc.name.first, 'Alexa')
self.assertEqual(doc.name.last, 'Chung')
self.assertEqual(doc.birthday, '1983-10-05')
self.assertAlmostEqual(doc.rating, 4.8)
self.assertEqual(doc.instance, '21-21')
def test_search_scroll(self):
self.client.search = MagicMock(
return_value={
'_scroll_id': 'c2Nhbjs2OzM0NDg1ODpzRlBLc0FXNlNyNm5JWUc1',
'hits': {
'total': 93570,
'max_score': 0,
'hits': []
},
'timed_out': False,
'took': 90
}
)
sq = (
self.index.search_query(search_type='scan', scroll='1m')
.limit(1000)
)
result = sq.get_result()
self.client.search.assert_called_with(
index='test',
body={
'size': 1000
},
search_type='scan',
scroll='1m',
)
self.assertEqual(result.scroll_id, 'c2Nhbjs2OzM0NDg1ODpzRlBLc0FXNlNyNm5JWUc1')
self.assertEqual(list(result), [])
def test_delete(self):
self.index.query(self.index.car.vendor == 'Focus').delete()
self.client.delete_by_query.assert_called_with(
index='test',
doc_type='car',
body={
'query': {
'term': {'vendor': 'Focus'}
}
},
)
self.index.query(self.index.car.vendor == 'Focus') \
.filter(self.index.car.status == 0) \
.limit(20) \
.delete(timeout='1m', replication='async')
self.client.delete_by_query.assert_called_with(
index='test',
doc_type='car',
body={
"query": {
"filtered": {
"query": {
"term": {"vendor": "Focus"}
},
"filter": {
"term": {"status": 0}
}
}
}
},
timeout='1m',
replication='async',
)
def test_as_bool(self):
self.assertTrue(bool(self.index.search_query()))
self.assertFalse(self.client.count.called)
self.assertFalse(self.client.search.called)
def test_search_params(self):
sq = SearchQuery()
self.assertEqual(
sq._search_params,
{}
)
sq = SearchQuery(search_type='count')
self.assertEqual(
sq._search_params,
{
'search_type': 'count'
}
)
sq = sq.with_search_type(None)
self.assertEqual(
sq._search_params,
{}
)
sq = sq.with_search_params({'search_type': 'count', 'query_cache': True}, unknown_param='none')
self.assertEqual(
sq._search_params,
{
'search_type': 'count',
'query_cache': True,
'unknown_param': 'none',
}
)
sq = sq.with_routing(1234)
self.assertEqual(
sq._search_params,
{
'routing': 1234,
'search_type': 'count',
'query_cache': True,
'unknown_param': 'none',
}
)
def test_suggest(self):
sq = SearchQuery()
sq = sq.suggest(text="Complete",
in_title={'term': {'size': 3, 'field': 'title'}})
self.assert_expression(
sq,
{
'suggest': {
'text': 'Complete',
'in_title': {
'term': {
'size': 3,
'field': 'title',
}
}
}
}
)
sq = sq.suggest(in_body={'completion': {'field': 'body'}})
self.assert_expression(
sq,
{
'suggest': {
'text': 'Complete',
'in_title': {
'term': {
'size': 3,
'field': 'title',
}
},
'in_body': {
'completion': {
'field': 'body',
}
},
}
}
)
sq = sq.suggest(None)
self.assert_expression(sq, {})
def test_highlight(self):
sq = SearchQuery()
sq = sq.highlight(fields={'content': {}})
self.assertEqual(collect_doc_classes(sq), set())
self.assert_expression(
sq,
{
"highlight": {
"fields": {
"content": {}
}
}
}
)
sq = SearchQuery()
sq = sq.highlight(
fields=[self.index.test.content],
pre_tags=['[em]'],
post_tags=['[/em]']
)
self.assertEqual(collect_doc_classes(sq), {self.index.test})
self.assert_expression(
sq,
{
"highlight": {
"fields": [
{
"content": {}
}
],
"pre_tags": ["[em]"],
"post_tags": ["[/em]"]
}
}
)
sq = SearchQuery()
sq = sq.highlight(
fields=[
self.index.test.content.highlight(
matched_fields=[self.index.test.content, self.index.test.content.plain],
type='fvh',
)
]
)
self.assertEqual(collect_doc_classes(sq), {self.index.test})
self.assert_expression(
sq,
{
"highlight": {
"fields": [
{
"content": {
"matched_fields": ["content", "content.plain"],
"type": "fvh"
}
}
]
}
}
)
| |
from torch.utils.data import Dataset
import os
import collections
from gyro import (
LoadGyroData,
LoadOISData,
LoadFrameData,
GetGyroAtTimeStamp,
get_static,
GetMetadata,
GetProjections,
train_GetGyroAtTimeStamp,
QuaternionProduct,
QuaternionReciprocal,
FindOISAtTimeStamp,
norm_quat
)
import random
import numpy as np
import torchvision.transforms as transforms
import torch
from flownet2 import flow_utils
from scipy import ndimage, misc
from numpy import linalg as LA
def get_data_loader(cf, no_flo = False):
size = cf["data"]["batch_size"]
num_workers = cf["data"]["num_workers"]
train_data, test_data = get_dataset(cf, no_flo)
trainloader = torch.utils.data.DataLoader(train_data, batch_size=size,shuffle=True, pin_memory=True, num_workers=num_workers)
testloader = torch.utils.data.DataLoader(test_data, batch_size=size,shuffle=False, pin_memory=True, num_workers=num_workers)
return trainloader,testloader
def get_dataset(cf, no_flo = False):
resize_ratio = cf["data"]["resize_ratio"]
train_transform, test_transform = _data_transforms()
train_path = os.path.join(cf["data"]["data_dir"], "training")
test_path = os.path.join(cf["data"]["data_dir"], "test")
if not os.path.exists(train_path):
train_path = cf["data"]["data_dir"]
if not os.path.exists(test_path):
test_path = cf["data"]["data_dir"]
train_data = Dataset_Gyro(
train_path, sample_freq = cf["data"]["sample_freq"]*1000000, number_real = cf["data"]["number_real"],
time_train = cf["data"]["time_train"]*1000000, transform = train_transform, resize_ratio = resize_ratio, no_flo = no_flo)
test_data = Dataset_Gyro(
test_path, sample_freq = cf["data"]["sample_freq"]*1000000, number_real = cf["data"]["number_real"],
time_train = cf["data"]["time_train"]*1000000, transform = test_transform, resize_ratio = resize_ratio, no_flo = no_flo)
return train_data, test_data
def get_inference_data_loader(cf, data_path, no_flo = False):
test_data = get_inference_dataset(cf, data_path, no_flo)
testloader = torch.utils.data.DataLoader(test_data, batch_size=1,shuffle=False, pin_memory=True, num_workers=1)
return testloader
def get_inference_dataset(cf, data_path, no_flo = False):
resize_ratio = cf["data"]["resize_ratio"]
_, test_transform = _data_transforms()
test_data = Dataset_Gyro(
data_path, sample_freq = cf["data"]["sample_freq"]*1000000, number_real = cf["data"]["number_real"],
time_train = cf["data"]["time_train"]*1000000, transform = test_transform, resize_ratio = resize_ratio,
inference_only = True, no_flo = no_flo)
return test_data
def _data_transforms():
test_transform = transforms.Compose(
[transforms.ToTensor(),
])
train_transform = transforms.Compose(
[transforms.ToTensor(),
])
return train_transform, test_transform
class DVS_data():
def __init__(self):
self.gyro = None
self.ois = None
self.frame = None
self.length = 0
self.flo_path = None
self.flo_shape = None
self.flo_back_path = None
class Dataset_Gyro(Dataset):
def __init__(self, path, sample_freq = 33*1000000, number_real = 10, time_train = 2000*1000000, \
transform = None, inference_only = False, no_flo = False, resize_ratio = 1):
r"""
Arguments:
sample_freq: real quaternions [t-sample_freq*number_real, t+sample_freq*number_real] ns
number_real: real gyro num in half time_interval
time_train: time for a batch ns
"""
self.sample_freq = sample_freq
self.number_real = number_real
self.no_flo = no_flo
self.resize_ratio = resize_ratio
self.static_options = get_static()
self.inference_only = inference_only
self.ois_ratio = np.array([self.static_options["crop_window_width"] / self.static_options["width"], \
self.static_options["crop_window_height"] / self.static_options["height"]]) * 0.01
self.unit_size = 4
if inference_only:
self.length = 1
self.data = [self.process_one_video(path)]
self.number_train = self.data[0].length
return
self.time_train = time_train
self.number_train = time_train//self.sample_freq
self.data_name = sorted(os.listdir(path))
self.length = len(self.data_name)
self.data = []
for i in range(self.length):
self.data.append(self.process_one_video(os.path.join(path,self.data_name[i])))
def process_one_video(self, path):
dvs_data = DVS_data()
files = sorted(os.listdir(path))
print(path)
for f in files:
file_path = os.path.join(path,f)
if "gimbal" in file_path.lower():
continue
if "frame" in f and "txt" in f:
dvs_data.frame = LoadFrameData(file_path)
print("frame:", dvs_data.frame.shape, end=" ")
elif "gyro" in f:
dvs_data.gyro = LoadGyroData(file_path)
dvs_data.gyro = preprocess_gyro(dvs_data.gyro)
print("gyro:", dvs_data.gyro.shape, end=" ")
elif "ois" in f and "txt" in f:
dvs_data.ois = LoadOISData(file_path)
print("ois:", dvs_data.ois.shape, end=" ")
elif f == "flo":
dvs_data.flo_path, dvs_data.flo_shape = LoadFlow(file_path)
print("flo_path:", len(dvs_data.flo_path), end=" ")
print("flo_shape:", dvs_data.flo_shape, end=" ")
elif f == "flo_back":
dvs_data.flo_back_path, _ = LoadFlow(file_path)
print()
if dvs_data.flo_path is not None:
dvs_data.length = min(dvs_data.frame.shape[0] - 1, len(dvs_data.flo_path))
else:
dvs_data.length = dvs_data.frame.shape[0] - 1
return dvs_data
def generate_quaternions(self, dvs_data):
first_id = random.randint(0, dvs_data.length - self.number_train) + 1 # skip the first frame
sample_data = np.zeros((self.number_train, 2 * self.number_real + 1, self.unit_size), dtype=np.float32)
sample_ois = np.zeros((self.number_train, 2), dtype=np.float32)
sample_time = np.zeros((self.number_train+1), dtype=np.float32)
sample_time[0] = get_timestamp(dvs_data.frame, first_id - 1)
real_postion = np.zeros((self.number_train, 4), dtype=np.float32)
time_start = sample_time[0]
for i in range(self.number_train):
sample_time[i+1] = get_timestamp(dvs_data.frame, first_id + i)
real_postion[i] = GetGyroAtTimeStamp(dvs_data.gyro, sample_time[i+1] - self.sample_freq)
sample_ois[i] = self.get_ois_at_timestamp(dvs_data.ois, sample_time[i+1])
for j in range(-self.number_real, self.number_real+1):
index = j + self.number_real
time_stamp = sample_time[i+1] + self.sample_freq * j
sample_data[i, index] = self.get_data_at_timestamp(dvs_data.gyro, dvs_data.ois, time_stamp, real_postion[i])
sample_data = np.reshape(sample_data, (self.number_train, (2*self.number_real+1) * self.unit_size))
return sample_data, sample_time, first_id, real_postion, sample_ois
def load_flo(self, idx, first_id):
shape = self.data[idx].flo_shape
h, w = shape[0], shape[1]
flo = np.zeros((self.number_train, h, w, 2))
flo_back = np.zeros((self.number_train, h, w, 2))
for i in range(self.number_train):
frame_id = i + first_id
f = flow_utils.readFlow(self.data[idx].flo_path[frame_id-1]).astype(np.float32)
flo[i] = f
f_b = flow_utils.readFlow(self.data[idx].flo_back_path[frame_id-1]).astype(np.float32)
flo_back[i] = f_b
return flo, flo_back
def load_real_projections(self, idx, first_id):
real_projections = np.zeros((self.number_train + 1, self.static_options["num_grid_rows"], 3, 3))
for i in range(self.number_train + 1):
frame_id = i + first_id
metadata = GetMetadata(self.data[idx].frame, frame_id - 1)
real_projections[i] = np.array(GetProjections(self.static_options, metadata, self.data[idx].gyro, np.zeros(self.data[idx].ois.shape), no_shutter = True))
return real_projections
def __getitem__(self, idx):
inputs, times, first_id, real_postion, ois = self.generate_quaternions(self.data[idx])
real_projections = self.load_real_projections(idx, first_id)
if self.no_flo:
flo, flo_back = 0, 0
else:
flo, flo_back = self.load_flo(idx, first_id)
return inputs, times, flo, flo_back, real_projections, real_postion, ois, idx
def __len__(self):
return self.length
def get_virtual_data(self, virtual_queue, real_queue_idx, pre_times, cur_times, time_start, batch_size, number_virtual, quat_t_1):
# virtual_queue: [batch_size, num, 5 (timestamp, quats)]
# eular angle,
# deta R angular velocity [Q't-1, Q't-2]
# output virtual angular velocity, x, x*dtime => detaQt
virtual_data = np.zeros((batch_size, number_virtual, 4), dtype=np.float32)
vt_1 = np.zeros((batch_size, 4), dtype=np.float32)
quat_t_1 = quat_t_1.numpy()
for i in range(batch_size):
sample_time = cur_times[i]
for j in range(number_virtual):
time_stamp = sample_time - self.sample_freq * (number_virtual - j)
virtual_data[i, j] = get_virtual_at_timestamp(virtual_queue[i], self.data[real_queue_idx[i]].gyro, time_stamp, time_start[i], quat_t_1[i])
vt_1[i] = get_virtual_at_timestamp(virtual_queue[i], self.data[real_queue_idx[i]].gyro, pre_times[i], time_start[i], None)
virtual_data = np.reshape(virtual_data, (batch_size, number_virtual * 4))
return torch.tensor(virtual_data, dtype=torch.float), torch.tensor(vt_1, dtype=torch.float)
def update_virtual_queue(self, batch_size, virtual_queue, out, times):
virtual_data = np.zeros((batch_size, 5))
virtual_data[:,0] = times
virtual_data[:, 1:] = out
virtual_data = np.expand_dims(virtual_data, axis = 1)
if None in virtual_queue:
virtual_queue = virtual_data
else:
virtual_queue = np.concatenate((virtual_queue, virtual_data), axis = 1)
return virtual_queue
def random_init_virtual_queue(self, batch_size, real_postion, times):
virtual_queue = np.zeros((batch_size, 3, 5))
virtual_queue[:, 2, 0] = times - 0.1 * self.sample_freq
virtual_queue[:, 1, 0] = times - 1.1 * self.sample_freq
virtual_queue[:, 0, 0] = times - 2.1 * self.sample_freq
for i in range(batch_size):
quat = np.random.uniform(low=-0.06, high= 0.06, size=4) # transfer to angle # 0.05
quat[3] = 1
quat = quat / LA.norm(quat)
quat = norm_quat(QuaternionProduct(real_postion[i], quat))
virtual_queue[i, 2, 1:] = quat
virtual_queue[i, 1, 1:] = quat
virtual_queue[i, 0, 1:] = quat
return virtual_queue
def get_data_at_timestamp(self, gyro_data, ois_data, time_stamp, quat_t_1):
quat_t = GetGyroAtTimeStamp(gyro_data, time_stamp)
quat_dif = QuaternionProduct(quat_t, QuaternionReciprocal(quat_t_1))
return quat_dif
def get_ois_at_timestamp(self, ois_data, time_stamp):
ois_t = FindOISAtTimeStamp(ois_data, time_stamp)
ois_t = np.array(ois_t) / self.ois_ratio
return ois_t
def get_timestamp(frame_data, idx):
sample_time = frame_data[idx, 0]
metadata = GetMetadata(frame_data, idx)
timestmap_ns = metadata["timestamp_ns"] + metadata["rs_time_ns"] * 0.5
return timestmap_ns
def preprocess_gyro(gyro, extend = 200):
fake_gyro = np.zeros((extend, 5))
time_start = gyro[0,0]
for i in range(extend):
fake_gyro[-i-1, 0] = time_start - (gyro[i+1, 0] - time_start)
fake_gyro[-i-1, 4] = gyro[i+1, 4]
fake_gyro[-i-1, 1:4] = -gyro[i+1, 1:4]
new_gyro = np.concatenate((fake_gyro, gyro), axis = 0)
return new_gyro
def LoadFlow(path):
file_names = sorted(os.listdir(path))
file_path =[]
for n in file_names:
file_path.append(os.path.join(path, n))
return file_path, flow_utils.readFlow(file_path[0]).shape
def get_virtual_at_timestamp(virtual_queue, real_queue, time_stamp, time_start, quat_t_1 = None, sample_freq = None):
if virtual_queue is None:
quat_t = GetGyroAtTimeStamp(real_queue, time_stamp)
else:
quat_t = train_GetGyroAtTimeStamp(virtual_queue, time_stamp)
if quat_t is None:
quat_t = GetGyroAtTimeStamp(real_queue, time_stamp)
if quat_t_1 is None:
return quat_t
else:
quat_dif = QuaternionProduct(quat_t, QuaternionReciprocal(quat_t_1))
return quat_dif
| |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities related to FeatureColumn."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.framework.python.ops import variables as contrib_variables
from tensorflow.contrib.layers.python.layers import feature_column as fc
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
def input_from_feature_columns(columns_to_tensors,
feature_columns,
weight_collections=None,
trainable=True,
scope=None):
"""A tf.contrib.layer style input layer builder based on FeatureColumns.
Generally a single example in training data is described with feature columns.
At the first layer of the model, this column oriented data should be converted
to a single tensor. Each feature column needs a different kind of operation
during this conversion. For example sparse features need a totally different
handling than continuous features.
An example usage of input_from_feature_columns is as follows:
# Building model for training
columns_to_tensor = tf.parse_example(...)
first_layer = input_from_feature_columns(
columns_to_tensors=columns_to_tensor,
feature_columns=feature_columns)
second_layer = fully_connected(first_layer, ...)
...
where feature_columns can be defined as follows:
occupation = sparse_column_with_hash_bucket(column_name="occupation",
hash_bucket_size=1000)
occupation_emb = embedding_column(sparse_id_column=occupation, dimension=16,
combiner="sum")
age = real_valued_column("age")
age_buckets = bucketized_column(
source_column=age,
boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65])
occupation_x_age = crossed_column(columns=[occupation, age_buckets],
hash_bucket_size=10000)
feature_columns=[occupation_emb, occupation_x_age]
Args:
columns_to_tensors: A mapping from feature column to tensors. 'string' key
means a base feature (not-transformed). It can have FeatureColumn as a
key too. That means that FeatureColumn is already transformed by input
pipeline. For example, `inflow` may have handled transformations.
feature_columns: A set containing all the feature columns. All items in the
set should be instances of classes derived by FeatureColumn.
weight_collections: List of graph collections to which weights are added.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
scope: Optional scope for variable_op_scope.
Returns:
A Tensor which can be consumed by hidden layers in the neural network.
Raises:
ValueError: if FeatureColumn cannot be consumed by a neural network.
"""
check_feature_columns(feature_columns)
with variable_scope.variable_op_scope(columns_to_tensors.values(), scope,
'input_from_feature_columns'):
output_tensors = []
transformer = _Transformer(columns_to_tensors)
if weight_collections:
weight_collections = list(set(list(weight_collections) +
[ops.GraphKeys.VARIABLES]))
for column in sorted(set(feature_columns), key=lambda x: x.key):
with variable_scope.variable_op_scope(
columns_to_tensors.values(), None, column.name):
try:
transformed_tensor = transformer.transform(column)
output_tensors.append(column.to_dnn_input_layer(
transformed_tensor, weight_collections, trainable))
except ValueError as e:
raise ValueError('Error creating input layer for column: {}.\n'
'{}'.format(column.name, e))
return array_ops.concat(1, output_tensors)
def weighted_sum_from_feature_columns(columns_to_tensors,
feature_columns,
num_outputs,
weight_collections=None,
trainable=True,
scope=None):
"""A tf.contrib.layer style linear prediction builder based on FeatureColumns.
Generally a single example in training data is described with feature columns.
This function generates weighted sum for each num_outputs. Weighted sum refers
to logits in classification problems. It refers to prediction itself for
linear regression problems.
An example usage of weighted_sum_from_feature_columns is as follows:
# Building model for training
columns_to_tensor = tf.parse_example(...)
logits = weighted_sum_from_feature_columns(
columns_to_tensors=columns_to_tensor,
feature_columns=feature_columns,
num_outputs=1)
loss = tf.nn.sigmoid_cross_entropy_with_logits(logits, labels)
where feature_columns can be defined as follows:
occupation = sparse_column_with_hash_bucket(column_name="occupation",
hash_bucket_size=1000)
occupation_emb = embedding_column(sparse_id_column=occupation, dimension=16,
combiner="sum")
age = real_valued_column("age")
age_buckets = bucketized_column(
source_column=age,
boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65])
occupation_x_age = crossed_column(columns=[occupation, age_buckets],
hash_bucket_size=10000)
feature_columns=[occupation_emb, occupation_x_age]
Args:
columns_to_tensors: A mapping from feature column to tensors. 'string' key
means a base feature (not-transformed). It can have FeatureColumn as a
key too. That means that FeatureColumn is already transformed by input
pipeline. For example, `inflow` may have handled transformations.
feature_columns: A set containing all the feature columns. All items in the
set should be instances of classes derived from FeatureColumn.
num_outputs: An integer specifying number of outputs. Default value is 1.
weight_collections: List of graph collections to which weights are added.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
scope: Optional scope fpor variable_op_scope.
Returns:
A tuple of followings:
* A Tensor which represents predictions of a linear model.
* A dictionary which maps feature_column to corresponding Variable.
* A Variable which is used for bias.
Raises:
ValueError: if FeatureColumn cannot be used for linear predictions.
"""
check_feature_columns(feature_columns)
with variable_scope.variable_op_scope(columns_to_tensors.values(), scope,
'weighted_sum_from_feature_columns'):
output_tensors = []
column_to_variable = dict()
transformer = _Transformer(columns_to_tensors)
for column in sorted(set(feature_columns), key=lambda x: x.key):
with variable_scope.variable_op_scope(
columns_to_tensors.values(), None, column.name):
try:
transformed_tensor = transformer.transform(column)
predictions, variable = column.to_weighted_sum(transformed_tensor,
num_outputs,
weight_collections,
trainable)
except ValueError as e:
raise ValueError('Error creating weighted sum for column: {}.\n'
'{}'.format(column.name, e))
output_tensors.append(predictions)
column_to_variable[column] = variable
_log_variable(variable)
predictions_no_bias = math_ops.add_n(output_tensors)
bias = contrib_variables.model_variable(
'bias_weight',
shape=[num_outputs],
initializer=init_ops.zeros_initializer,
collections=fc._add_variable_collection(weight_collections)) # pylint: disable=protected-access
_log_variable(bias)
predictions = nn_ops.bias_add(predictions_no_bias, bias)
return predictions, column_to_variable, bias
def parse_feature_columns_from_examples(serialized,
feature_columns,
name=None,
example_names=None):
"""Parses tf.Examples to extract tensors for given feature_columns.
This is a wrapper of 'tf.parse_example'. A typical usage is as follows:
```python
columns_to_tensor = parse_feature_columns_from_examples(
serialized=my_data,
feature_columns=my_features)
# Where my_features are:
# Define features and transformations
country = sparse_column_with_keys(column_name="native_country",
keys=["US", "BRA", ...])
country_emb = embedding_column(sparse_id_column=country, dimension=3,
combiner="sum")
occupation = sparse_column_with_hash_bucket(column_name="occupation",
hash_bucket_size=1000)
occupation_emb = embedding_column(sparse_id_column=occupation, dimension=16,
combiner="sum")
occupation_x_country = crossed_column(columns=[occupation, country],
hash_bucket_size=10000)
age = real_valued_column("age")
age_buckets = bucketized_column(
source_column=age,
boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65])
my_features = [occupation_emb, age_buckets, country_emb]
```
Args:
serialized: A vector (1-D Tensor) of strings, a batch of binary
serialized `Example` protos.
feature_columns: An iterable containing all the feature columns. All items
should be instances of classes derived from _FeatureColumn.
name: A name for this operation (optional).
example_names: A vector (1-D Tensor) of strings (optional), the names of
the serialized protos in the batch.
Returns:
A `dict` mapping FeatureColumn to `Tensor` and `SparseTensor` values.
"""
check_feature_columns(feature_columns)
columns_to_tensors = parsing_ops.parse_example(
serialized=serialized,
features=fc.create_feature_spec_for_parsing(feature_columns),
name=name,
example_names=example_names)
transformer = _Transformer(columns_to_tensors)
for column in sorted(set(feature_columns), key=lambda x: x.key):
transformer.transform(column)
return columns_to_tensors
def _log_variable(variable):
if isinstance(variable, list):
for var in variable:
if isinstance(variable, variables.Variable):
logging.info('Created variable %s, with device=%s', var.name,
var.device)
elif isinstance(variable, variables.Variable):
logging.info('Created variable %s, with device=%s', variable.name,
variable.device)
def _infer_real_valued_column_for_tensor(name, tensor):
"""Creates a real_valued_column for given tensor and name."""
if isinstance(tensor, ops.SparseTensor):
raise ValueError(
'SparseTensor is not supported for auto detection. Please define '
'corresponding FeatureColumn for tensor {} {}.', name, tensor)
if not (tensor.dtype.is_integer or tensor.dtype.is_floating):
raise ValueError(
'Non integer or non floating types are not supported for auto detection'
'. Please define corresponding FeatureColumn for tensor {} {}.', name,
tensor)
shape = tensor.get_shape().as_list()
dimension = 1
for i in range(1, len(shape)):
dimension *= shape[i]
return fc.real_valued_column(name, dimension=dimension, dtype=tensor.dtype)
def infer_real_valued_columns(features):
if not isinstance(features, dict):
return [_infer_real_valued_column_for_tensor('', features)]
feature_columns = []
for key, value in features.items():
feature_columns.append(_infer_real_valued_column_for_tensor(key, value))
return feature_columns
def check_feature_columns(feature_columns):
"""Checks the validity of the set of FeatureColumns.
Args:
feature_columns: A set of instances or subclasses of FeatureColumn.
Raises:
ValueError: If there are duplicate feature column keys.
"""
seen_keys = set()
for f in feature_columns:
key = f.key
if key in seen_keys:
raise ValueError('Duplicate feature column key found for column: {}. '
'This usually means that the column is almost identical '
'to another column, and one must be discarded.'.format(
f.name))
seen_keys.add(key)
class _Transformer(object):
"""Handles all the transformations defined by FeatureColumn if needed.
FeatureColumn specifies how to digest an input column to the network. Some
feature columns require data transformations. This class handles those
transformations if they are not handled already.
Some features may be used in more than one places. For example one can use a
bucketized feature by itself and a cross with it. In that case Transformer
should create only one bucketization op instead of multiple ops for each
feature column. To handle re-use of transformed columns, Transformer keeps all
previously transformed columns.
An example usage of Transformer is as follows:
occupation = sparse_column_with_hash_bucket(column_name="occupation",
hash_bucket_size=1000)
age = real_valued_column("age")
age_buckets = bucketized_column(
source_column=age,
boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65])
occupation_x_age = crossed_column(columns=[occupation, age_buckets],
hash_bucket_size=10000)
columns_to_tensor = tf.parse_example(...)
transformer = Transformer(columns_to_tensor)
occupation_x_age_tensor = transformer.transform(occupation_x_age)
occupation_tensor = transformer.transform(occupation)
age_buckets_tensor = transformer.transform(age_buckets)
"""
def __init__(self, columns_to_tensors):
"""Initializes transfomer.
Args:
columns_to_tensors: A mapping from feature columns to tensors. 'string'
key means a base feature (not-transformed). It can have FeatureColumn as
a key too. That means that FeatureColumn is already transformed by input
pipeline. For example, `inflow` may have handled transformations.
Transformed features are inserted in columns_to_tensors.
"""
self._columns_to_tensors = columns_to_tensors
def transform(self, feature_column):
"""Returns a Tensor which represents given feature_column.
Args:
feature_column: An instance of FeatureColumn.
Returns:
A Tensor which represents given feature_column. It may create a new Tensor
or re-use an existing one.
Raises:
ValueError: if FeatureColumn cannot be handled by this Transformer.
"""
logging.info('Transforming feature_column %s', feature_column)
if feature_column in self._columns_to_tensors:
# Feature_column is already transformed.
return self._columns_to_tensors[feature_column]
feature_column.insert_transformed_feature(self._columns_to_tensors)
if feature_column not in self._columns_to_tensors:
raise ValueError('Column {} is not supported.'.format(
feature_column.name))
return self._columns_to_tensors[feature_column]
| |
"""The tests for the Recorder component."""
# pylint: disable=protected-access
from datetime import datetime, timedelta
import unittest
import pytest
from homeassistant.components.recorder import (
CONFIG_SCHEMA,
DOMAIN,
Recorder,
run_information,
run_information_from_instance,
run_information_with_session,
)
from homeassistant.components.recorder.const import DATA_INSTANCE
from homeassistant.components.recorder.models import Events, RecorderRuns, States
from homeassistant.components.recorder.util import session_scope
from homeassistant.const import MATCH_ALL, STATE_LOCKED, STATE_UNLOCKED
from homeassistant.core import Context, callback
from homeassistant.setup import async_setup_component
from homeassistant.util import dt as dt_util
from .common import wait_recording_done
from tests.async_mock import patch
from tests.common import (
async_fire_time_changed,
get_test_home_assistant,
init_recorder_component,
)
class TestRecorder(unittest.TestCase):
"""Test the recorder module."""
def setUp(self): # pylint: disable=invalid-name
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
init_recorder_component(self.hass)
self.hass.start()
self.addCleanup(self.tear_down_cleanup)
def tear_down_cleanup(self):
"""Stop everything that was started."""
self.hass.stop()
def test_saving_state(self):
"""Test saving and restoring a state."""
entity_id = "test.recorder"
state = "restoring_from_db"
attributes = {"test_attr": 5, "test_attr_10": "nice"}
self.hass.states.set(entity_id, state, attributes)
wait_recording_done(self.hass)
with session_scope(hass=self.hass) as session:
db_states = list(session.query(States))
assert len(db_states) == 1
assert db_states[0].event_id > 0
state = db_states[0].to_native()
assert state == _state_empty_context(self.hass, entity_id)
def test_saving_event(self):
"""Test saving and restoring an event."""
event_type = "EVENT_TEST"
event_data = {"test_attr": 5, "test_attr_10": "nice"}
events = []
@callback
def event_listener(event):
"""Record events from eventbus."""
if event.event_type == event_type:
events.append(event)
self.hass.bus.listen(MATCH_ALL, event_listener)
self.hass.bus.fire(event_type, event_data)
wait_recording_done(self.hass)
assert len(events) == 1
event = events[0]
self.hass.data[DATA_INSTANCE].block_till_done()
with session_scope(hass=self.hass) as session:
db_events = list(session.query(Events).filter_by(event_type=event_type))
assert len(db_events) == 1
db_event = db_events[0].to_native()
assert event.event_type == db_event.event_type
assert event.data == db_event.data
assert event.origin == db_event.origin
# Recorder uses SQLite and stores datetimes as integer unix timestamps
assert event.time_fired.replace(microsecond=0) == db_event.time_fired.replace(
microsecond=0
)
@pytest.fixture
def hass_recorder():
"""Home Assistant fixture with in-memory recorder."""
hass = get_test_home_assistant()
def setup_recorder(config=None):
"""Set up with params."""
init_recorder_component(hass, config)
hass.start()
hass.block_till_done()
hass.data[DATA_INSTANCE].block_till_done()
return hass
yield setup_recorder
hass.stop()
def _add_entities(hass, entity_ids):
"""Add entities."""
attributes = {"test_attr": 5, "test_attr_10": "nice"}
for idx, entity_id in enumerate(entity_ids):
hass.states.set(entity_id, f"state{idx}", attributes)
wait_recording_done(hass)
with session_scope(hass=hass) as session:
return [st.to_native() for st in session.query(States)]
def _add_events(hass, events):
with session_scope(hass=hass) as session:
session.query(Events).delete(synchronize_session=False)
for event_type in events:
hass.bus.fire(event_type)
wait_recording_done(hass)
with session_scope(hass=hass) as session:
return [ev.to_native() for ev in session.query(Events)]
def _state_empty_context(hass, entity_id):
# We don't restore context unless we need it by joining the
# events table on the event_id for state_changed events
state = hass.states.get(entity_id)
state.context = Context(id=None)
return state
# pylint: disable=redefined-outer-name,invalid-name
def test_saving_state_include_domains(hass_recorder):
"""Test saving and restoring a state."""
hass = hass_recorder({"include": {"domains": "test2"}})
states = _add_entities(hass, ["test.recorder", "test2.recorder"])
assert len(states) == 1
assert _state_empty_context(hass, "test2.recorder") == states[0]
def test_saving_state_include_domains_globs(hass_recorder):
"""Test saving and restoring a state."""
hass = hass_recorder(
{"include": {"domains": "test2", "entity_globs": "*.included_*"}}
)
states = _add_entities(
hass, ["test.recorder", "test2.recorder", "test3.included_entity"]
)
assert len(states) == 2
assert _state_empty_context(hass, "test2.recorder") == states[0]
assert _state_empty_context(hass, "test3.included_entity") == states[1]
def test_saving_state_incl_entities(hass_recorder):
"""Test saving and restoring a state."""
hass = hass_recorder({"include": {"entities": "test2.recorder"}})
states = _add_entities(hass, ["test.recorder", "test2.recorder"])
assert len(states) == 1
assert _state_empty_context(hass, "test2.recorder") == states[0]
def test_saving_event_exclude_event_type(hass_recorder):
"""Test saving and restoring an event."""
hass = hass_recorder({"exclude": {"event_types": "test"}})
events = _add_events(hass, ["test", "test2"])
assert len(events) == 1
assert events[0].event_type == "test2"
def test_saving_state_exclude_domains(hass_recorder):
"""Test saving and restoring a state."""
hass = hass_recorder({"exclude": {"domains": "test"}})
states = _add_entities(hass, ["test.recorder", "test2.recorder"])
assert len(states) == 1
assert _state_empty_context(hass, "test2.recorder") == states[0]
def test_saving_state_exclude_domains_globs(hass_recorder):
"""Test saving and restoring a state."""
hass = hass_recorder(
{"exclude": {"domains": "test", "entity_globs": "*.excluded_*"}}
)
states = _add_entities(
hass, ["test.recorder", "test2.recorder", "test2.excluded_entity"]
)
assert len(states) == 1
assert _state_empty_context(hass, "test2.recorder") == states[0]
def test_saving_state_exclude_entities(hass_recorder):
"""Test saving and restoring a state."""
hass = hass_recorder({"exclude": {"entities": "test.recorder"}})
states = _add_entities(hass, ["test.recorder", "test2.recorder"])
assert len(states) == 1
assert _state_empty_context(hass, "test2.recorder") == states[0]
def test_saving_state_exclude_domain_include_entity(hass_recorder):
"""Test saving and restoring a state."""
hass = hass_recorder(
{"include": {"entities": "test.recorder"}, "exclude": {"domains": "test"}}
)
states = _add_entities(hass, ["test.recorder", "test2.recorder"])
assert len(states) == 2
def test_saving_state_exclude_domain_glob_include_entity(hass_recorder):
"""Test saving and restoring a state."""
hass = hass_recorder(
{
"include": {"entities": ["test.recorder", "test.excluded_entity"]},
"exclude": {"domains": "test", "entity_globs": "*._excluded_*"},
}
)
states = _add_entities(
hass, ["test.recorder", "test2.recorder", "test.excluded_entity"]
)
assert len(states) == 3
def test_saving_state_include_domain_exclude_entity(hass_recorder):
"""Test saving and restoring a state."""
hass = hass_recorder(
{"exclude": {"entities": "test.recorder"}, "include": {"domains": "test"}}
)
states = _add_entities(hass, ["test.recorder", "test2.recorder", "test.ok"])
assert len(states) == 1
assert _state_empty_context(hass, "test.ok") == states[0]
assert _state_empty_context(hass, "test.ok").state == "state2"
def test_saving_state_include_domain_glob_exclude_entity(hass_recorder):
"""Test saving and restoring a state."""
hass = hass_recorder(
{
"exclude": {"entities": ["test.recorder", "test2.included_entity"]},
"include": {"domains": "test", "entity_globs": "*._included_*"},
}
)
states = _add_entities(
hass, ["test.recorder", "test2.recorder", "test.ok", "test2.included_entity"]
)
assert len(states) == 1
assert _state_empty_context(hass, "test.ok") == states[0]
assert _state_empty_context(hass, "test.ok").state == "state2"
def test_saving_state_and_removing_entity(hass, hass_recorder):
"""Test saving the state of a removed entity."""
hass = hass_recorder()
entity_id = "lock.mine"
hass.states.set(entity_id, STATE_LOCKED)
hass.states.set(entity_id, STATE_UNLOCKED)
hass.states.async_remove(entity_id)
wait_recording_done(hass)
with session_scope(hass=hass) as session:
states = list(session.query(States))
assert len(states) == 3
assert states[0].entity_id == entity_id
assert states[0].state == STATE_LOCKED
assert states[1].entity_id == entity_id
assert states[1].state == STATE_UNLOCKED
assert states[2].entity_id == entity_id
assert states[2].state is None
def test_recorder_setup_failure():
"""Test some exceptions."""
hass = get_test_home_assistant()
with patch.object(Recorder, "_setup_connection") as setup, patch(
"homeassistant.components.recorder.time.sleep"
):
setup.side_effect = ImportError("driver not found")
rec = Recorder(
hass,
auto_purge=True,
keep_days=7,
commit_interval=1,
uri="sqlite://",
db_max_retries=10,
db_retry_wait=3,
entity_filter=CONFIG_SCHEMA({DOMAIN: {}}),
exclude_t=[],
)
rec.start()
rec.join()
hass.stop()
async def test_defaults_set(hass):
"""Test the config defaults are set."""
recorder_config = None
async def mock_setup(hass, config):
"""Mock setup."""
nonlocal recorder_config
recorder_config = config["recorder"]
return True
with patch("homeassistant.components.recorder.async_setup", side_effect=mock_setup):
assert await async_setup_component(hass, "history", {})
assert recorder_config is not None
# pylint: disable=unsubscriptable-object
assert recorder_config["auto_purge"]
assert recorder_config["purge_keep_days"] == 10
def test_auto_purge(hass_recorder):
"""Test saving and restoring a state."""
hass = hass_recorder()
original_tz = dt_util.DEFAULT_TIME_ZONE
tz = dt_util.get_time_zone("Europe/Copenhagen")
dt_util.set_default_time_zone(tz)
now = dt_util.utcnow()
test_time = tz.localize(datetime(now.year + 1, 1, 1, 4, 12, 0))
async_fire_time_changed(hass, test_time)
with patch(
"homeassistant.components.recorder.purge.purge_old_data", return_value=True
) as purge_old_data:
for delta in (-1, 0, 1):
async_fire_time_changed(hass, test_time + timedelta(seconds=delta))
hass.block_till_done()
hass.data[DATA_INSTANCE].block_till_done()
assert len(purge_old_data.mock_calls) == 1
dt_util.set_default_time_zone(original_tz)
def test_saving_sets_old_state(hass_recorder):
"""Test saving sets old state."""
hass = hass_recorder()
hass.states.set("test.one", "on", {})
hass.states.set("test.two", "on", {})
wait_recording_done(hass)
hass.states.set("test.one", "off", {})
hass.states.set("test.two", "off", {})
wait_recording_done(hass)
with session_scope(hass=hass) as session:
states = list(session.query(States))
assert len(states) == 4
assert states[0].entity_id == "test.one"
assert states[1].entity_id == "test.two"
assert states[2].entity_id == "test.one"
assert states[3].entity_id == "test.two"
assert states[0].old_state_id is None
assert states[1].old_state_id is None
assert states[2].old_state_id == states[0].state_id
assert states[3].old_state_id == states[1].state_id
def test_saving_state_with_serializable_data(hass_recorder, caplog):
"""Test saving data that cannot be serialized does not crash."""
hass = hass_recorder()
hass.states.set("test.one", "on", {"fail": CannotSerializeMe()})
wait_recording_done(hass)
hass.states.set("test.two", "on", {})
wait_recording_done(hass)
hass.states.set("test.two", "off", {})
wait_recording_done(hass)
with session_scope(hass=hass) as session:
states = list(session.query(States))
assert len(states) == 2
assert states[0].entity_id == "test.two"
assert states[1].entity_id == "test.two"
assert states[0].old_state_id is None
assert states[1].old_state_id == states[0].state_id
assert "State is not JSON serializable" in caplog.text
def test_run_information(hass_recorder):
"""Ensure run_information returns expected data."""
before_start_recording = dt_util.utcnow()
hass = hass_recorder()
run_info = run_information_from_instance(hass)
assert isinstance(run_info, RecorderRuns)
assert run_info.closed_incorrect is False
with session_scope(hass=hass) as session:
run_info = run_information_with_session(session)
assert isinstance(run_info, RecorderRuns)
assert run_info.closed_incorrect is False
run_info = run_information(hass)
assert isinstance(run_info, RecorderRuns)
assert run_info.closed_incorrect is False
hass.states.set("test.two", "on", {})
wait_recording_done(hass)
run_info = run_information(hass)
assert isinstance(run_info, RecorderRuns)
assert run_info.closed_incorrect is False
run_info = run_information(hass, before_start_recording)
assert run_info is None
run_info = run_information(hass, dt_util.utcnow())
assert isinstance(run_info, RecorderRuns)
assert run_info.closed_incorrect is False
class CannotSerializeMe:
"""A class that the JSONEncoder cannot serialize."""
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.