gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2006-2010 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://genshi.edgewall.org/wiki/License.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://genshi.edgewall.org/log/.
"""Basic templating functionality."""
from collections import deque
import os
import sys
from genshi.compat import StringIO, BytesIO
from genshi.core import Attrs, Stream, StreamEventKind, START, TEXT, _ensure
from genshi.input import ParseError
__all__ = ['Context', 'DirectiveFactory', 'Template', 'TemplateError',
'TemplateRuntimeError', 'TemplateSyntaxError', 'BadDirectiveError']
__docformat__ = 'restructuredtext en'
class TemplateError(Exception):
"""Base exception class for errors related to template processing."""
def __init__(self, message, filename=None, lineno=-1, offset=-1):
"""Create the exception.
:param message: the error message
:param filename: the filename of the template
:param lineno: the number of line in the template at which the error
occurred
:param offset: the column number at which the error occurred
"""
if filename is None:
filename = '<string>'
self.msg = message #: the error message string
if filename != '<string>' or lineno >= 0:
message = '%s (%s, line %d)' % (self.msg, filename, lineno)
Exception.__init__(self, message)
self.filename = filename #: the name of the template file
self.lineno = lineno #: the number of the line containing the error
self.offset = offset #: the offset on the line
class TemplateSyntaxError(TemplateError):
"""Exception raised when an expression in a template causes a Python syntax
error, or the template is not well-formed.
"""
def __init__(self, message, filename=None, lineno=-1, offset=-1):
"""Create the exception
:param message: the error message
:param filename: the filename of the template
:param lineno: the number of line in the template at which the error
occurred
:param offset: the column number at which the error occurred
"""
if isinstance(message, SyntaxError) and message.lineno is not None:
message = str(message).replace(' (line %d)' % message.lineno, '')
TemplateError.__init__(self, message, filename, lineno)
class BadDirectiveError(TemplateSyntaxError):
"""Exception raised when an unknown directive is encountered when parsing
a template.
An unknown directive is any attribute using the namespace for directives,
with a local name that doesn't match any registered directive.
"""
def __init__(self, name, filename=None, lineno=-1):
"""Create the exception
:param name: the name of the directive
:param filename: the filename of the template
:param lineno: the number of line in the template at which the error
occurred
"""
TemplateSyntaxError.__init__(self, 'bad directive "%s"' % name,
filename, lineno)
class TemplateRuntimeError(TemplateError):
"""Exception raised when an the evaluation of a Python expression in a
template causes an error.
"""
class Context(object):
"""Container for template input data.
A context provides a stack of scopes (represented by dictionaries).
Template directives such as loops can push a new scope on the stack with
data that should only be available inside the loop. When the loop
terminates, that scope can get popped off the stack again.
>>> ctxt = Context(one='foo', other=1)
>>> ctxt.get('one')
'foo'
>>> ctxt.get('other')
1
>>> ctxt.push(dict(one='frost'))
>>> ctxt.get('one')
'frost'
>>> ctxt.get('other')
1
>>> ctxt.pop()
{'one': 'frost'}
>>> ctxt.get('one')
'foo'
"""
def __init__(self, **data):
"""Initialize the template context with the given keyword arguments as
data.
"""
self.frames = deque([data])
self.pop = self.frames.popleft
self.push = self.frames.appendleft
self._match_templates = []
self._choice_stack = []
# Helper functions for use in expressions
def defined(name):
"""Return whether a variable with the specified name exists in the
expression scope."""
return name in self
def value_of(name, default=None):
"""If a variable of the specified name is defined, return its value.
Otherwise, return the provided default value, or ``None``."""
return self.get(name, default)
data.setdefault('defined', defined)
data.setdefault('value_of', value_of)
def __repr__(self):
return repr(list(self.frames))
def __contains__(self, key):
"""Return whether a variable exists in any of the scopes.
:param key: the name of the variable
"""
return self._find(key)[1] is not None
has_key = __contains__
def __delitem__(self, key):
"""Remove a variable from all scopes.
:param key: the name of the variable
"""
for frame in self.frames:
if key in frame:
del frame[key]
def __getitem__(self, key):
"""Get a variables's value, starting at the current scope and going
upward.
:param key: the name of the variable
:return: the variable value
:raises KeyError: if the requested variable wasn't found in any scope
"""
value, frame = self._find(key)
if frame is None:
raise KeyError(key)
return value
def __len__(self):
"""Return the number of distinctly named variables in the context.
:return: the number of variables in the context
"""
return len(list(self.items()))
def __setitem__(self, key, value):
"""Set a variable in the current scope.
:param key: the name of the variable
:param value: the variable value
"""
self.frames[0][key] = value
def _find(self, key, default=None):
"""Retrieve a given variable's value and the frame it was found in.
Intended primarily for internal use by directives.
:param key: the name of the variable
:param default: the default value to return when the variable is not
found
"""
for frame in self.frames:
if key in frame:
return frame[key], frame
return default, None
def get(self, key, default=None):
"""Get a variable's value, starting at the current scope and going
upward.
:param key: the name of the variable
:param default: the default value to return when the variable is not
found
"""
for frame in self.frames:
if key in frame:
return frame[key]
return default
def keys(self):
"""Return the name of all variables in the context.
:return: a list of variable names
"""
keys = []
for frame in self.frames:
keys += [key for key in frame if key not in keys]
return keys
def items(self):
"""Return a list of ``(name, value)`` tuples for all variables in the
context.
:return: a list of variables
"""
return [(key, self.get(key)) for key in list(self.keys())]
def update(self, mapping):
"""Update the context from the mapping provided."""
self.frames[0].update(mapping)
def push(self, data):
"""Push a new scope on the stack.
:param data: the data dictionary to push on the context stack.
"""
def pop(self):
"""Pop the top-most scope from the stack."""
def copy(self):
"""Create a copy of this Context object."""
# required to make f_locals a dict-like object
# See http://genshi.edgewall.org/ticket/249 for
# example use case in Twisted tracebacks
ctxt = Context()
ctxt.frames.pop() # pop empty dummy context
ctxt.frames.extend(self.frames)
ctxt._match_templates.extend(self._match_templates)
ctxt._choice_stack.extend(self._choice_stack)
return ctxt
def _apply_directives(stream, directives, ctxt, vars):
"""Apply the given directives to the stream.
:param stream: the stream the directives should be applied to
:param directives: the list of directives to apply
:param ctxt: the `Context`
:param vars: additional variables that should be available when Python
code is executed
:return: the stream with the given directives applied
"""
if directives:
stream = directives[0](iter(stream), directives[1:], ctxt, **vars)
return stream
def _eval_expr(expr, ctxt, vars=None):
"""Evaluate the given `Expression` object.
:param expr: the expression to evaluate
:param ctxt: the `Context`
:param vars: additional variables that should be available to the
expression
:return: the result of the evaluation
"""
if vars:
ctxt.push(vars)
retval = expr.evaluate(ctxt)
if vars:
ctxt.pop()
return retval
def _exec_suite(suite, ctxt, vars=None):
"""Execute the given `Suite` object.
:param suite: the code suite to execute
:param ctxt: the `Context`
:param vars: additional variables that should be available to the
code
"""
if vars:
ctxt.push(vars)
ctxt.push({})
suite.execute(ctxt)
if vars:
top = ctxt.pop()
ctxt.pop()
ctxt.frames[0].update(top)
class DirectiveFactoryMeta(type):
"""Meta class for directive factories."""
def __new__(cls, name, bases, d):
if 'directives' in d:
d['_dir_by_name'] = dict(d['directives'])
d['_dir_order'] = [directive[1] for directive in d['directives']]
return type.__new__(cls, name, bases, d)
class DirectiveFactory(object, metaclass=DirectiveFactoryMeta):
"""Base for classes that provide a set of template directives.
:since: version 0.6
"""
directives = []
"""A list of ``(name, cls)`` tuples that define the set of directives
provided by this factory.
"""
def get_directive(self, name):
"""Return the directive class for the given name.
:param name: the directive name as used in the template
:return: the directive class
:see: `Directive`
"""
return self._dir_by_name.get(name)
def get_directive_index(self, dir_cls):
"""Return a key for the given directive class that should be used to
sort it among other directives on the same `SUB` event.
The default implementation simply returns the index of the directive in
the `directives` list.
:param dir_cls: the directive class
:return: the sort key
"""
if dir_cls in self._dir_order:
return self._dir_order.index(dir_cls)
return len(self._dir_order)
class Template(DirectiveFactory):
"""Abstract template base class.
This class implements most of the template processing model, but does not
specify the syntax of templates.
"""
EXEC = StreamEventKind('EXEC')
"""Stream event kind representing a Python code suite to execute."""
EXPR = StreamEventKind('EXPR')
"""Stream event kind representing a Python expression."""
INCLUDE = StreamEventKind('INCLUDE')
"""Stream event kind representing the inclusion of another template."""
SUB = StreamEventKind('SUB')
"""Stream event kind representing a nested stream to which one or more
directives should be applied.
"""
serializer = None
_number_conv = str # function used to convert numbers to event data
def __init__(self, source, filepath=None, filename=None, loader=None,
encoding=None, lookup='strict', allow_exec=True):
"""Initialize a template from either a string, a file-like object, or
an already parsed markup stream.
:param source: a string, file-like object, or markup stream to read the
template from
:param filepath: the absolute path to the template file
:param filename: the path to the template file relative to the search
path
:param loader: the `TemplateLoader` to use for loading included
templates
:param encoding: the encoding of the `source`
:param lookup: the variable lookup mechanism; either "strict" (the
default), "lenient", or a custom lookup class
:param allow_exec: whether Python code blocks in templates should be
allowed
:note: Changed in 0.5: Added the `allow_exec` argument
"""
self.filepath = filepath or filename
self.filename = filename
self.loader = loader
self.lookup = lookup
self.allow_exec = allow_exec
self._init_filters()
self._init_loader()
self._prepared = False
if not isinstance(source, Stream) and not hasattr(source, 'read'):
if isinstance(source, str):
source = StringIO(source)
else:
source = BytesIO(source)
try:
self._stream = self._parse(source, encoding)
except ParseError as e:
raise TemplateSyntaxError(e.msg, self.filepath, e.lineno, e.offset)
def __getstate__(self):
state = self.__dict__.copy()
state['filters'] = []
return state
def __setstate__(self, state):
self.__dict__ = state
self._init_filters()
def __repr__(self):
return '<%s "%s">' % (type(self).__name__, self.filename)
def _init_filters(self):
self.filters = [self._flatten, self._include]
def _init_loader(self):
if self.loader is None:
from genshi.template.loader import TemplateLoader
if self.filename:
if self.filepath != self.filename:
basedir = os.path.normpath(self.filepath)[:-len(
os.path.normpath(self.filename))
]
else:
basedir = os.path.dirname(self.filename)
else:
basedir = '.'
self.loader = TemplateLoader([os.path.abspath(basedir)])
@property
def stream(self):
if not self._prepared:
self._stream = list(self._prepare(self._stream))
self._prepared = True
return self._stream
def _parse(self, source, encoding):
"""Parse the template.
The parsing stage parses the template and constructs a list of
directives that will be executed in the render stage. The input is
split up into literal output (text that does not depend on the context
data) and directives or expressions.
:param source: a file-like object containing the XML source of the
template, or an XML event stream
:param encoding: the encoding of the `source`
"""
raise NotImplementedError
def _prepare(self, stream):
"""Call the `attach` method of every directive found in the template.
:param stream: the event stream of the template
"""
from genshi.template.loader import TemplateNotFound
for kind, data, pos in stream:
if kind is SUB:
directives = []
substream = data[1]
for _, cls, value, namespaces, pos in sorted(data[0]):
directive, substream = cls.attach(self, substream, value,
namespaces, pos)
if directive:
directives.append(directive)
substream = self._prepare(substream)
if directives:
yield kind, (directives, list(substream)), pos
else:
for event in substream:
yield event
else:
if kind is INCLUDE:
href, cls, fallback = data
if isinstance(href, str) and \
not getattr(self.loader, 'auto_reload', True):
# If the path to the included template is static, and
# auto-reloading is disabled on the template loader,
# the template is inlined into the stream
try:
tmpl = self.loader.load(href, relative_to=pos[0],
cls=cls or self.__class__)
for event in tmpl.stream:
yield event
except TemplateNotFound:
if fallback is None:
raise
for event in self._prepare(fallback):
yield event
continue
elif fallback:
# Otherwise the include is performed at run time
data = href, cls, list(self._prepare(fallback))
yield kind, data, pos
def generate(self, *args, **kwargs):
"""Apply the template to the given context data.
Any keyword arguments are made available to the template as context
data.
Only one positional argument is accepted: if it is provided, it must be
an instance of the `Context` class, and keyword arguments are ignored.
This calling style is used for internal processing.
:return: a markup event stream representing the result of applying
the template to the context data.
"""
vars = {}
if args:
assert len(args) == 1
ctxt = args[0]
if ctxt is None:
ctxt = Context(**kwargs)
else:
vars = kwargs
assert isinstance(ctxt, Context)
else:
ctxt = Context(**kwargs)
stream = self.stream
for filter_ in self.filters:
stream = filter_(iter(stream), ctxt, **vars)
return Stream(stream, self.serializer)
def _flatten(self, stream, ctxt, **vars):
number_conv = self._number_conv
stack = []
push = stack.append
pop = stack.pop
stream = iter(stream)
while 1:
for kind, data, pos in stream:
if kind is START and data[1]:
# Attributes may still contain expressions in start tags at
# this point, so do some evaluation
tag, attrs = data
new_attrs = []
for name, value in attrs:
if type(value) is list: # this is an interpolated string
values = [event[1]
for event in self._flatten(value, ctxt, **vars)
if event[0] is TEXT and event[1] is not None
]
if not values:
continue
value = ''.join(values)
new_attrs.append((name, value))
yield kind, (tag, Attrs(new_attrs)), pos
elif kind is EXPR:
result = _eval_expr(data, ctxt, vars)
if result is not None:
# First check for a string, otherwise the iterable test
# below succeeds, and the string will be chopped up into
# individual characters
if isinstance(result, str):
yield TEXT, result, pos
elif isinstance(result, (int, float)):
yield TEXT, number_conv(result), pos
elif hasattr(result, '__iter__'):
push(stream)
stream = _ensure(result)
break
else:
yield TEXT, str(result), pos
elif kind is SUB:
# This event is a list of directives and a list of nested
# events to which those directives should be applied
push(stream)
stream = _apply_directives(data[1], data[0], ctxt, vars)
break
elif kind is EXEC:
_exec_suite(data, ctxt, vars)
else:
yield kind, data, pos
else:
if not stack:
break
stream = pop()
def _include(self, stream, ctxt, **vars):
"""Internal stream filter that performs inclusion of external
template files.
"""
from genshi.template.loader import TemplateNotFound
for event in stream:
if event[0] is INCLUDE:
href, cls, fallback = event[1]
if not isinstance(href, str):
parts = []
for subkind, subdata, subpos in self._flatten(href, ctxt,
**vars):
if subkind is TEXT:
parts.append(subdata)
href = ''.join([x for x in parts if x is not None])
try:
tmpl = self.loader.load(href, relative_to=event[2][0],
cls=cls or self.__class__)
for event in tmpl.generate(ctxt, **vars):
yield event
except TemplateNotFound:
if fallback is None:
raise
for filter_ in self.filters:
fallback = filter_(iter(fallback), ctxt, **vars)
for event in fallback:
yield event
else:
yield event
EXEC = Template.EXEC
EXPR = Template.EXPR
INCLUDE = Template.INCLUDE
SUB = Template.SUB
|
|
import hashlib
import logging
import math
import os
import pickle
import re
import requests
from retrying import retry
import xdg.BaseDirectory
from lobster.core.dataset import DatasetInfo
from lobster.util import Configurable
from dbs.apis.dbsClient import DbsApi
from WMCore.Credential.Proxy import Proxy
from WMCore.DataStructs.LumiList import LumiList
logger = logging.getLogger('lobster.cmssw.dataset')
class DASWrapper(DbsApi):
@retry(stop_max_attempt_number=10)
def listFileLumis(self, *args, **kwargs):
return super(DASWrapper, self).listFileLumis(*args, **kwargs)
@retry(stop_max_attempt_number=10)
def listFileSummaries(self, *args, **kwargs):
return super(DASWrapper, self).listFileSummaries(*args, **kwargs)
@retry(stop_max_attempt_number=10)
def listFiles(self, *args, **kwargs):
return super(DASWrapper, self).listFiles(*args, **kwargs)
@retry(stop_max_attempt_number=10)
def listBlocks(self, *args, **kwargs):
return super(DASWrapper, self).listBlocks(*args, **kwargs)
class Cache(object):
def __init__(self):
self.cachedir = xdg.BaseDirectory.save_cache_path('lobster')
def __cachename(self, name, mask):
m = hashlib.sha256()
m.update(name)
if mask:
m.update(mask)
return os.path.join(self.cachedir,
"{}-{}.pkl".format(name.strip('/').split('/')[0], m.hexdigest()))
def cache(self, name, mask, baseinfo, dataset):
logger.debug("writing dataset '{}' to cache".format(name))
with open(self.__cachename(name, mask), 'wb') as fd:
pickle.dump((baseinfo, dataset), fd)
def cached(self, name, mask, baseinfo):
try:
with open(self.__cachename(name, mask), 'rb') as fd:
info, dset = pickle.load(fd)
if baseinfo == info:
logger.debug("retrieved dataset '{}' from cache".format(name))
return dset
return None
except Exception:
return None
class Dataset(Configurable):
"""
Specification for processing a dataset stored in DBS.
Parameters
----------
dataset : str
The full dataset name as in DBS.
lumis_per_task : int
How many luminosity sections to process in one task. May be
modified by Lobster to match the user-specified task runtime.
events_per_task : int
Adjust `lumis_per_task` to contain as many luminosity sections
to process the specified amount of events.
lumi_mask : str
The URL or filename of a JSON luminosity section mask, as
customary in CMS.
file_based : bool
Process whole files instead of single luminosity sections.
dbs_instance : str
Which DBS instance to query for the `dataset`.
"""
_mutable = {}
__apis = {}
__dsets = {}
__cache = Cache()
def __init__(self, dataset, lumis_per_task=25, events_per_task=None, lumi_mask=None, file_based=False, dbs_instance='global'):
self.dataset = dataset
self.lumi_mask = lumi_mask
self.lumis_per_task = lumis_per_task
self.events_per_task = events_per_task
self.file_based = file_based
self.dbs_instance = 'https://cmsweb.cern.ch/dbs/prod/{0}/DBSReader'.format(dbs_instance)
self.total_units = 0
def __get_mask(self, url):
if not re.match(r'https?://', url):
return url
fn = os.path.basename(url)
cached = os.path.join(Dataset.__cache.cachedir, fn)
if not os.path.isfile(cached):
r = requests.get(url)
if not r.ok:
raise IOError("unable to retrieve '{0}'".format(url))
with open(cached, 'w') as f:
f.write(r.text)
return cached
def validate(self):
if self.dataset in Dataset.__dsets:
return True
if self.lumi_mask:
self.lumi_mask = self.__get_mask(self.lumi_mask)
cred = Proxy({'logger': logging.getLogger("WMCore")})
dbs = DASWrapper(self.dbs_instance, ca_info=cred.getProxyFilename())
baseinfo = dbs.listFileSummaries(dataset=self.dataset)
if baseinfo is None or (len(baseinfo) == 1 and baseinfo[0] is None):
return False
return True
def get_info(self):
if self.dataset not in Dataset.__dsets:
if self.lumi_mask:
self.lumi_mask = self.__get_mask(self.lumi_mask)
res = self.query_database()
if self.events_per_task:
if res.total_events > 0:
res.tasksize = int(math.ceil(self.events_per_task / float(res.total_events) * res.total_units))
else:
res.tasksize = 1
else:
res.tasksize = self.lumis_per_task
Dataset.__dsets[self.dataset] = res
self.total_units = Dataset.__dsets[self.dataset].total_units
return Dataset.__dsets[self.dataset]
def query_database(self):
cred = Proxy({'logger': logging.getLogger("WMCore")})
dbs = DASWrapper(self.dbs_instance, ca_info=cred.getProxyFilename())
baseinfo = dbs.listFileSummaries(dataset=self.dataset)
if baseinfo is None or (len(baseinfo) == 1 and baseinfo[0] is None):
raise ValueError('unable to retrive information for dataset {}'.format(self.dataset))
if not self.file_based:
result = self.__cache.cached(self.dataset, self.lumi_mask, baseinfo)
if result:
return result
total_lumis = sum([info['num_lumi'] for info in baseinfo])
result = DatasetInfo()
result.total_events = sum([info['num_event'] for info in baseinfo])
for info in dbs.listFiles(dataset=self.dataset, detail=True):
fn = info['logical_file_name']
result.files[fn].events = info['event_count']
result.files[fn].size = info['file_size']
if self.file_based:
for info in dbs.listFiles(dataset=self.dataset):
fn = info['logical_file_name']
result.files[fn].lumis = [(-2, -2)]
else:
blocks = dbs.listBlocks(dataset=self.dataset)
if self.lumi_mask:
unmasked_lumis = LumiList(filename=self.lumi_mask)
for block in blocks:
runs = dbs.listFileLumis(block_name=block['block_name'])
for run in runs:
fn = run['logical_file_name']
for lumi in run['lumi_section_num']:
if not self.lumi_mask or ((run['run_num'], lumi) in unmasked_lumis):
result.files[fn].lumis.append((run['run_num'], lumi))
elif self.lumi_mask and ((run['run_num'], lumi) not in unmasked_lumis):
result.masked_units += 1
result.unmasked_units = sum([len(f.lumis) for f in result.files.values()])
result.total_units = result.unmasked_units + result.masked_units
if not self.file_based:
self.__cache.cache(self.dataset, self.lumi_mask, baseinfo, result)
result.stop_on_file_boundary = (result.total_units != total_lumis) and not self.file_based
if result.stop_on_file_boundary:
logger.debug("split lumis detected in {} - "
"{} unique (run, lumi) but "
"{} unique (run, lumi, file) - "
"enforcing a limit of one file per task".format(self.dataset, total_lumis, result.total_units))
return result
|
|
"""Logic Gate Classes. Multiple inheritance practice."""
class LogicGate:
"""Logic Gate Class."""
def __init__(self, n):
"""Initialization for Logic Gate."""
self.label = n
self.output = None
def getLabel(self):
"""Getter gate name."""
return self.label
def getOutput(self):
"""Getter output of gate."""
self.output = self.performGateLogic()
return self.output
class BinaryGate(LogicGate):
"""Binary Gate class -- two inputs, one output."""
def __init__(self, n):
"""Initializing the inherited classes data items."""
LogicGate.__init__(self, n)
# Above can rewritten as super(BinaryGate, self).__init__(n)
self.pinA = None
self.pinB = None
def getPinA(self):
"""Getter for pin A."""
if self.pinA is None:
return int(input("Enter Pin A input " + self.getLabel() + "-->"))
return self.pinA.getFrom().getOutput()
def getPinB(self):
"""Getter for pin B."""
if self.pinB is None:
return int(input("Enter Pin B input " + self.getLabel() + "-->"))
return self.pinB.getFrom().getOutput()
def setNextPin(self, source):
"""Setter for next pin."""
if self.pinA is None:
self.pinA = source
elif self.pinB is None:
self.pinB = source
else:
raise RuntimeError("Error: NO EMPTY PINS")
class UnaryGate(LogicGate):
"""Unary Gate Class."""
def __init__(self, n):
"""Initializing the inherited classes data items."""
LogicGate.__init__(self, n)
# Above can rewritten as super(UnaryGate, self).__init__(n)
self.pin = None
def getPin(self):
"""Getter for pin."""
if self.pin is None:
return(int(input("Enter Pin input " + self.getLabel() + "-->")))
else:
return self.pin.getFrom().getOutput()
def setNextPin(self, source):
"""Setter for pin."""
if self.pin is None:
self.pin = source
else:
raise RuntimeError("Cannot Connect: NO EMPTY PINS on this gate.")
class AndGate(BinaryGate):
"""And Gate Class."""
def __init__(self, n):
"""Initializing the parent classes data items."""
BinaryGate.__init__(self, n)
def performGateLogic(self):
"""Perform gate logic."""
a = self.getPinA()
b = self.getPinB()
return ((a == 1) and (b == 1))
class OrGate(BinaryGate):
"""Or Gate Class."""
def __init__(self, n):
"""Initialization for Binary gate."""
BinaryGate.__init__(self, n)
def performGateLogic(self):
"""Perform gate logic."""
a = self.getPinA()
b = self.getPinB()
return ((a == 1) or (b == 1))
class NotGate(UnaryGate):
"""Not Gate Class."""
def __init__(self, n):
"""Initialization for Unary Gate."""
UnaryGate.__init__(self, n)
def performGateLogic(self):
"""Perform gate logic."""
a = self.getPin()
return (not a)
class NorGate(OrGate):
"""NOR Gate Class."""
def performGateLogic(self):
"""Perform gate logic."""
return not (super().performGateLogic())
class NandGate(AndGate):
"""Nand Gate Class."""
def performGateLogic(self):
"""Perform gate logic."""
return not (super().performGateLogic())
class XorGate(BinaryGate):
"""XOR Gate Class."""
def __init__(self, n):
"""Initializing the parent classes data items."""
BinaryGate.__init__(self, n)
def performGateLogic(self):
"""Perform gate logic."""
a = self.getPinA()
b = self.getPinB()
return ((a == 1 and b == 0) or (a == 0 and b == 1))
class HalfAdder(XorGate, AndGate):
"""Half Adder Class."""
def __init__(self, n):
"""Initializing the parent classes data items."""
XorGate.__init__(self, n)
AndGate.__init__(self, n)
self.sum = XorGate.performGateLogic(self)
self.carry = AndGate.performGateLogic(self)
def performGateLogic(self):
"""Perform half gate logic."""
return self.carry, self.sum
class FullAdder(HalfAdder):
"""Full Adder Class."""
def __init__(self, n):
"""Initializing the parent classes data items."""
HalfAdder.__init__(self, n)
self.carry = AndGate.performGateLogic(self)
self.sum = XorGate.performGateLogic(self)
def performGateLogic(self):
"""Perform half gate logic."""
return self.carry, self.sum
class Connector:
"""Connector Gate Class."""
def __init__(self, fgate, tgate):
"""Initialization for Connector class."""
self.fromgate = fgate
self.togate = tgate
tgate.setNextPin(self)
def getFrom(self):
"""Getter for input gate."""
return self.fromgate
def getTo(self):
"""Getter for output gate."""
return self.togate
def main():
"""Testing Classes.
g1 = AndGate("G1")
g2 = AndGate("G2")
g3 = OrGate("G3")
g4 = NotGate("G4")
Connector(g1, g3)
Connector(g2, g3)
Connector(g3, g4)
print(g4.getOutput())
g5 = NandGate("G5")
g6 = NandGate("G6")
g7 = AndGate("G7")
Connector(g5, g7)
Connector(g6, g7)
print(g7.getOutput())
g8 = XorGate("G8")
print(g8.getOutput())"""
g9 = HalfAdder("G9")
print(g9.getOutput())
main()
|
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import six
import testtools
from heat.common import exception
from heat.common import identifier
from heat.engine import parameters
from heat.engine import template
class ParameterTest(testtools.TestCase):
def new_parameter(self, name, schema, value=None,
validate_value=True):
tmpl = template.Template({'HeatTemplateFormatVersion': '2012-12-12',
'Parameters': {name: schema}})
schema = tmpl.param_schemata()[name]
param = parameters.Parameter(name, schema, value)
param.validate(validate_value)
return param
def test_new_string(self):
p = self.new_parameter('p', {'Type': 'String'}, validate_value=False)
self.assertIsInstance(p, parameters.StringParam)
def test_new_number(self):
p = self.new_parameter('p', {'Type': 'Number'}, validate_value=False)
self.assertIsInstance(p, parameters.NumberParam)
def test_new_list(self):
p = self.new_parameter('p', {'Type': 'CommaDelimitedList'},
validate_value=False)
self.assertIsInstance(p, parameters.CommaDelimitedListParam)
def test_new_json(self):
p = self.new_parameter('p', {'Type': 'Json'}, validate_value=False)
self.assertIsInstance(p, parameters.JsonParam)
def test_json_return(self):
p = self.new_parameter('p', {'Type': 'Json'}, {"a": 1, "b": "a"})
self.assertEqual('{"a": 1, "b": "a"}', str(p))
def test_json_return_no_echo_true(self):
p = self.new_parameter(
'p', {'Type': 'Json', 'NoEcho': 'true'}, {"a": 1})
self.assertTrue(p.hidden())
self.assertEqual(str(p), '******')
def test_new_bad_type(self):
self.assertRaises(exception.InvalidSchemaError, self.new_parameter,
'p', {'Type': 'List'}, validate_value=False)
def test_list_as_str(self):
p = self.new_parameter('p', {'Type': 'CommaDelimitedList'}, 'a,b,c')
self.assertEqual(['a', 'b', 'c'], p.value())
self.assertEqual('a,b,c', str(p))
def test_default_no_override(self):
p = self.new_parameter('defaulted', {'Type': 'String',
'Default': 'blarg'})
self.assertTrue(p.has_default())
self.assertEqual('blarg', p.default())
self.assertEqual('blarg', p.value())
def test_default_override(self):
p = self.new_parameter('defaulted',
{'Type': 'String',
'Default': 'blarg'},
'wibble')
self.assertTrue(p.has_default())
self.assertEqual('blarg', p.default())
self.assertEqual('wibble', p.value())
def test_default_invalid(self):
schema = {'Type': 'String',
'AllowedValues': ['foo'],
'ConstraintDescription': 'wibble',
'Default': 'bar'}
err = self.assertRaises(exception.InvalidSchemaError,
self.new_parameter, 'p', schema, 'foo')
self.assertIn('wibble', six.text_type(err))
def test_no_echo_true(self):
p = self.new_parameter('anechoic',
{'Type': 'String',
'NoEcho': 'true'},
'wibble')
self.assertTrue(p.hidden())
self.assertNotEqual(str(p), 'wibble')
def test_no_echo_true_caps(self):
p = self.new_parameter('anechoic',
{'Type': 'String',
'NoEcho': 'TrUe'},
'wibble')
self.assertTrue(p.hidden())
self.assertNotEqual(str(p), 'wibble')
def test_no_echo_false(self):
p = self.new_parameter('echoic',
{'Type': 'String',
'NoEcho': 'false'},
'wibble')
self.assertFalse(p.hidden())
self.assertEqual('wibble', str(p))
def test_description(self):
description = 'Description of the parameter'
p = self.new_parameter('p', {'Type': 'String',
'Description': description},
validate_value=False)
self.assertEqual(description, p.description())
def test_no_description(self):
p = self.new_parameter('p', {'Type': 'String'}, validate_value=False)
self.assertEqual('', p.description())
def test_string_len_good(self):
schema = {'Type': 'String',
'MinLength': '3',
'MaxLength': '3'}
p = self.new_parameter('p', schema, 'foo')
self.assertEqual('foo', p.value())
def test_string_underflow(self):
schema = {'Type': 'String',
'ConstraintDescription': 'wibble',
'MinLength': '4'}
err = self.assertRaises(exception.StackValidationFailed,
self.new_parameter, 'p', schema, 'foo')
self.assertIn('wibble', six.text_type(err))
def test_string_overflow(self):
schema = {'Type': 'String',
'ConstraintDescription': 'wibble',
'MaxLength': '2'}
err = self.assertRaises(exception.StackValidationFailed,
self.new_parameter, 'p', schema, 'foo')
self.assertIn('wibble', six.text_type(err))
def test_string_pattern_good(self):
schema = {'Type': 'String',
'AllowedPattern': '[a-z]*'}
p = self.new_parameter('p', schema, 'foo')
self.assertEqual('foo', p.value())
def test_string_pattern_bad_prefix(self):
schema = {'Type': 'String',
'ConstraintDescription': 'wibble',
'AllowedPattern': '[a-z]*'}
err = self.assertRaises(exception.StackValidationFailed,
self.new_parameter, 'p', schema, '1foo')
self.assertIn('wibble', six.text_type(err))
def test_string_pattern_bad_suffix(self):
schema = {'Type': 'String',
'ConstraintDescription': 'wibble',
'AllowedPattern': '[a-z]*'}
err = self.assertRaises(exception.StackValidationFailed,
self.new_parameter, 'p', schema, 'foo1')
self.assertIn('wibble', six.text_type(err))
def test_string_value_list_good(self):
schema = {'Type': 'String',
'AllowedValues': ['foo', 'bar', 'baz']}
p = self.new_parameter('p', schema, 'bar')
self.assertEqual('bar', p.value())
def test_string_value_unicode(self):
schema = {'Type': 'String'}
p = self.new_parameter('p', schema, u'test\u2665')
self.assertEqual(u'test\u2665', p.value())
def test_string_value_list_bad(self):
schema = {'Type': 'String',
'ConstraintDescription': 'wibble',
'AllowedValues': ['foo', 'bar', 'baz']}
err = self.assertRaises(exception.StackValidationFailed,
self.new_parameter, 'p', schema, 'blarg')
self.assertIn('wibble', six.text_type(err))
def test_number_int_good(self):
schema = {'Type': 'Number',
'MinValue': '3',
'MaxValue': '3'}
p = self.new_parameter('p', schema, '3')
self.assertEqual(3, p.value())
def test_number_float_good_string(self):
schema = {'Type': 'Number',
'MinValue': '3.0',
'MaxValue': '4.0'}
p = self.new_parameter('p', schema, '3.5')
self.assertEqual(3.5, p.value())
def test_number_float_good_number(self):
schema = {'Type': 'Number',
'MinValue': '3.0',
'MaxValue': '4.0'}
p = self.new_parameter('p', schema, 3.5)
self.assertEqual(3.5, p.value())
def test_number_low(self):
schema = {'Type': 'Number',
'ConstraintDescription': 'wibble',
'MinValue': '4'}
err = self.assertRaises(exception.StackValidationFailed,
self.new_parameter, 'p', schema, '3')
self.assertIn('wibble', six.text_type(err))
def test_number_high(self):
schema = {'Type': 'Number',
'ConstraintDescription': 'wibble',
'MaxValue': '2'}
err = self.assertRaises(exception.StackValidationFailed,
self.new_parameter, 'p', schema, '3')
self.assertIn('wibble', six.text_type(err))
def test_number_bad(self):
schema = {'Type': 'Number'}
err = self.assertRaises(exception.StackValidationFailed,
self.new_parameter, 'p', schema, 'str')
self.assertIn('float', six.text_type(err))
def test_number_value_list_good(self):
schema = {'Type': 'Number',
'AllowedValues': ['1', '3', '5']}
p = self.new_parameter('p', schema, '5')
self.assertEqual(5, p.value())
def test_number_value_list_bad(self):
schema = {'Type': 'Number',
'ConstraintDescription': 'wibble',
'AllowedValues': ['1', '3', '5']}
err = self.assertRaises(exception.StackValidationFailed,
self.new_parameter, 'p', schema, '2')
self.assertIn('wibble', six.text_type(err))
def test_list_value_list_default_empty(self):
schema = {'Type': 'CommaDelimitedList', 'Default': ''}
p = self.new_parameter('p', schema)
self.assertEqual([], p.value())
def test_list_value_list_good(self):
schema = {'Type': 'CommaDelimitedList',
'AllowedValues': ['foo', 'bar', 'baz']}
p = self.new_parameter('p', schema, 'baz,foo,bar')
self.assertEqual('baz,foo,bar'.split(','), p.value())
schema['Default'] = []
p = self.new_parameter('p', schema)
self.assertEqual([], p.value())
schema['Default'] = 'baz,foo,bar'
p = self.new_parameter('p', schema)
self.assertEqual('baz,foo,bar'.split(','), p.value())
def test_list_value_list_bad(self):
schema = {'Type': 'CommaDelimitedList',
'ConstraintDescription': 'wibble',
'AllowedValues': ['foo', 'bar', 'baz']}
err = self.assertRaises(exception.StackValidationFailed,
self.new_parameter, 'p', schema,
'foo,baz,blarg')
self.assertIn('wibble', six.text_type(err))
def test_map_value(self):
'''Happy path for value that's already a map.'''
schema = {'Type': 'Json'}
val = {"foo": "bar", "items": [1, 2, 3]}
p = self.new_parameter('p', schema, val)
self.assertEqual(val, p.value())
self.assertEqual(val, p.parsed)
def test_map_value_bad(self):
'''Map value is not JSON parsable.'''
schema = {'Type': 'Json',
'ConstraintDescription': 'wibble'}
val = {"foo": "bar", "not_json": len}
err = self.assertRaises(ValueError,
self.new_parameter, 'p', schema, val)
self.assertIn('Value must be valid JSON', six.text_type(err))
def test_map_value_parse(self):
'''Happy path for value that's a string.'''
schema = {'Type': 'Json'}
val = {"foo": "bar", "items": [1, 2, 3]}
val_s = json.dumps(val)
p = self.new_parameter('p', schema, val_s)
self.assertEqual(val, p.value())
self.assertEqual(val, p.parsed)
def test_map_value_bad_parse(self):
'''Test value error for unparsable string value.'''
schema = {'Type': 'Json',
'ConstraintDescription': 'wibble'}
val = "I am not a map"
err = self.assertRaises(ValueError,
self.new_parameter, 'p', schema, val)
self.assertIn('Value must be valid JSON', six.text_type(err))
def test_map_underrun(self):
'''Test map length under MIN_LEN.'''
schema = {'Type': 'Json',
'MinLength': 3}
val = {"foo": "bar", "items": [1, 2, 3]}
err = self.assertRaises(exception.StackValidationFailed,
self.new_parameter, 'p', schema, val)
self.assertIn('out of range', six.text_type(err))
def test_map_overrun(self):
'''Test map length over MAX_LEN.'''
schema = {'Type': 'Json',
'MaxLength': 1}
val = {"foo": "bar", "items": [1, 2, 3]}
err = self.assertRaises(exception.StackValidationFailed,
self.new_parameter, 'p', schema, val)
self.assertIn('out of range', six.text_type(err))
def test_json_list(self):
schema = {'Type': 'Json'}
val = ["fizz", "buzz"]
p = self.new_parameter('p', schema, val)
self.assertIsInstance(p.value(), list)
self.assertIn("fizz", p.value())
self.assertIn("buzz", p.value())
def test_json_string_list(self):
schema = {'Type': 'Json'}
val = '["fizz", "buzz"]'
p = self.new_parameter('p', schema, val)
self.assertIsInstance(p.value(), list)
self.assertIn("fizz", p.value())
self.assertIn("buzz", p.value())
def test_bool_value_true(self):
schema = {'Type': 'Boolean'}
for val in ('1', 't', 'true', 'on', 'y', 'yes', True, 1):
bo = self.new_parameter('bo', schema, val)
self.assertEqual(True, bo.value())
def test_bool_value_false(self):
schema = {'Type': 'Boolean'}
for val in ('0', 'f', 'false', 'off', 'n', 'no', False, 0):
bo = self.new_parameter('bo', schema, val)
self.assertEqual(False, bo.value())
def test_bool_value_invalid(self):
schema = {'Type': 'Boolean'}
err = self.assertRaises(exception.StackValidationFailed,
self.new_parameter, 'bo', schema, 'foo')
self.assertIn("Unrecognized value 'foo'", unicode(err))
def test_missing_param(self):
'''Test missing user parameter.'''
self.assertRaises(exception.UserParameterMissing,
self.new_parameter, 'p',
{'Type': 'String'})
def test_param_name_in_error_message(self):
schema = {'Type': 'String',
'AllowedPattern': '[a-z]*'}
err = self.assertRaises(exception.StackValidationFailed,
self.new_parameter, 'testparam', schema, '234')
expected = 'Parameter \'testparam\' is invalid: '\
'"234" does not match pattern "[a-z]*"'
self.assertEqual(expected, unicode(err))
params_schema = json.loads('''{
"Parameters" : {
"User" : { "Type": "String" },
"Defaulted" : {
"Type": "String",
"Default": "foobar"
}
}
}''')
class ParametersTest(testtools.TestCase):
def new_parameters(self, stack_name, tmpl, user_params=None,
stack_id=None, validate_value=True):
user_params = user_params or {}
tmpl.update({'HeatTemplateFormatVersion': '2012-12-12'})
tmpl = template.Template(tmpl)
params = tmpl.parameters(
identifier.HeatIdentifier('', stack_name, stack_id),
user_params)
params.validate(validate_value)
return params
def test_pseudo_params(self):
stack_name = 'test_stack'
params = self.new_parameters(stack_name, {"Parameters": {}})
self.assertEqual('test_stack', params['AWS::StackName'])
self.assertEqual(
'arn:openstack:heat:::stacks/{0}/{1}'.format(stack_name, 'None'),
params['AWS::StackId'])
self.assertIn('AWS::Region', params)
def test_pseudo_param_stackid(self):
stack_name = 'test_stack'
params = self.new_parameters(stack_name, {'Parameters': {}},
stack_id='abc123')
self.assertEqual(
'arn:openstack:heat:::stacks/{0}/{1}'.format(stack_name, 'abc123'),
params['AWS::StackId'])
stack_identifier = identifier.HeatIdentifier('', '', 'def456')
params.set_stack_id(stack_identifier)
self.assertEqual(stack_identifier.arn(), params['AWS::StackId'])
def test_schema_invariance(self):
params1 = self.new_parameters('test', params_schema,
{'User': 'foo',
'Defaulted': 'wibble'})
self.assertEqual('wibble', params1['Defaulted'])
params2 = self.new_parameters('test', params_schema, {'User': 'foo'})
self.assertEqual('foobar', params2['Defaulted'])
def test_to_dict(self):
template = {'Parameters': {'Foo': {'Type': 'String'},
'Bar': {'Type': 'Number', 'Default': '42'}}}
params = self.new_parameters('test_params', template, {'Foo': 'foo'})
as_dict = dict(params)
self.assertEqual('foo', as_dict['Foo'])
self.assertEqual(42, as_dict['Bar'])
self.assertEqual('test_params', as_dict['AWS::StackName'])
self.assertIn('AWS::Region', as_dict)
def test_map(self):
template = {'Parameters': {'Foo': {'Type': 'String'},
'Bar': {'Type': 'Number', 'Default': '42'}}}
params = self.new_parameters('test_params', template, {'Foo': 'foo'})
expected = {'Foo': False,
'Bar': True,
'AWS::Region': True,
'AWS::StackId': True,
'AWS::StackName': True}
self.assertEqual(expected, params.map(lambda p: p.has_default()))
def test_map_str(self):
template = {'Parameters': {'Foo': {'Type': 'String'},
'Bar': {'Type': 'Number'},
'Uni': {'Type': 'String'}}}
stack_name = 'test_params'
params = self.new_parameters(stack_name, template,
{'Foo': 'foo',
'Bar': '42',
'Uni': u'test\u2665'})
expected = {'Foo': 'foo',
'Bar': '42',
'Uni': 'test\xe2\x99\xa5',
'AWS::Region': 'ap-southeast-1',
'AWS::StackId':
'arn:openstack:heat:::stacks/{0}/{1}'.format(
stack_name,
'None'),
'AWS::StackName': 'test_params'}
self.assertEqual(expected, params.map(str))
def test_unknown_params(self):
user_params = {'Foo': 'wibble'}
self.assertRaises(exception.UnknownUserParameter,
self.new_parameters,
'test',
params_schema,
user_params)
def test_missing_params(self):
user_params = {}
self.assertRaises(exception.UserParameterMissing,
self.new_parameters,
'test',
params_schema,
user_params)
def test_missing_attribute_params(self):
params = {'Parameters': {'Foo': {'Type': 'String'},
'NoAttr': 'No attribute.',
'Bar': {'Type': 'Number', 'Default': '1'}}}
self.assertRaises(exception.InvalidSchemaError,
self.new_parameters,
'test',
params)
class ParameterSchemaTest(testtools.TestCase):
def test_validate_schema_wrong_key(self):
error = self.assertRaises(exception.InvalidSchemaError,
parameters.Schema.from_dict, 'param_name',
{"foo": "bar"})
self.assertEqual("Invalid key 'foo' for parameter (param_name)",
six.text_type(error))
def test_validate_schema_no_type(self):
error = self.assertRaises(exception.InvalidSchemaError,
parameters.Schema.from_dict,
'broken',
{"Description": "Hi!"})
self.assertEqual("Missing parameter type for parameter: broken",
six.text_type(error))
|
|
#!/usr/bin/env python
# Copyright 2014 The Swarming Authors. All rights reserved.
# Use of this source code is governed by the Apache v2.0 license that can be
# found in the LICENSE file.
import datetime
import inspect
import logging
import os
import random
import sys
import unittest
ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, ROOT_DIR)
import test_env
test_env.setup_test_env()
from google.appengine.api import datastore_errors
from google.appengine.api import search
from google.appengine.ext import deferred
from google.appengine.ext import ndb
# From tools/third_party/
import webtest
from components import auth_testing
from components import datastore_utils
from components import stats_framework
from components import utils
from server import config
from server import stats
from server import task_pack
from server import task_request
from server import task_result
from server import task_scheduler
from server import task_to_run
from support import test_case
from server.task_result import State
# pylint: disable=W0212,W0612
def _gen_request_data(name='Request name', properties=None, **kwargs):
# Do not include optional arguments.
base_data = {
'name': name,
'user': 'Jesus',
'properties': {
'commands': [[u'command1']],
'data': [],
'dimensions': {},
'env': {},
'execution_timeout_secs': 24*60*60,
'io_timeout_secs': None,
},
'priority': 50,
'scheduling_expiration_secs': 60,
'tags': [u'tag:1'],
}
base_data.update(kwargs)
base_data['properties'].update(properties or {})
return base_data
def get_results(request_key):
"""Fetches all task results for a specified TaskRequest ndb.Key.
Returns:
tuple(TaskResultSummary, list of TaskRunResult that exist).
"""
result_summary_key = task_pack.request_key_to_result_summary_key(request_key)
result_summary = result_summary_key.get()
# There's two way to look at it, either use a DB query or fetch all the
# entities that could exist, at most 255. In general, there will be <3
# entities so just fetching them by key would be faster. This function is
# exclusively used in unit tests so it's not performance critical.
q = task_result.TaskRunResult.query(ancestor=result_summary_key)
q = q.order(task_result.TaskRunResult.key)
return result_summary, q.fetch()
def _quick_reap():
"""Reaps a task."""
data = _gen_request_data(
properties=dict(dimensions={u'OS': u'Windows-3.1.1'}))
request = task_request.make_request(data)
_result_summary = task_scheduler.schedule_request(request)
reaped_request, run_result = task_scheduler.bot_reap_task(
{'OS': 'Windows-3.1.1'}, 'localhost', 'abc')
return run_result
class TaskSchedulerApiTest(test_case.TestCase):
APP_DIR = ROOT_DIR
def setUp(self):
super(TaskSchedulerApiTest, self).setUp()
self.testbed.init_search_stub()
self.now = datetime.datetime(2014, 1, 2, 3, 4, 5, 6)
self.mock_now(self.now)
self.app = webtest.TestApp(
deferred.application,
extra_environ={
'REMOTE_ADDR': '1.0.1.2',
'SERVER_SOFTWARE': os.environ['SERVER_SOFTWARE'],
})
self.mock(stats_framework, 'add_entry', self._parse_line)
auth_testing.mock_get_current_identity(self)
def _parse_line(self, line):
# pylint: disable=W0212
actual = stats._parse_line(line, stats._Snapshot(), {}, {}, {})
self.assertIs(True, actual, line)
def test_all_apis_are_tested(self):
# Ensures there's a test for each public API.
# TODO(maruel): Remove this once coverage is asserted.
module = task_scheduler
expected = set(
i for i in dir(module)
if i[0] != '_' and hasattr(getattr(module, i), 'func_name'))
missing = expected - set(i[5:] for i in dir(self) if i.startswith('test_'))
self.assertFalse(missing)
def test_bot_reap_task(self):
data = _gen_request_data(
properties=dict(dimensions={u'OS': u'Windows-3.1.1'}))
request = task_request.make_request(data)
_result_summary = task_scheduler.schedule_request(request)
bot_dimensions = {
u'OS': [u'Windows', u'Windows-3.1.1'],
u'hostname': u'localhost',
u'foo': u'bar',
}
actual_request, run_result = task_scheduler.bot_reap_task(
bot_dimensions, 'localhost', 'abc')
self.assertEqual(request, actual_request)
self.assertEqual('localhost', run_result.bot_id)
self.assertEqual(None, task_to_run.TaskToRun.query().get().queue_number)
def test_exponential_backoff(self):
self.mock(
task_scheduler.random, 'random',
lambda: task_scheduler._PROBABILITY_OF_QUICK_COMEBACK)
self.mock(utils, 'is_canary', lambda: False)
data = [
(0, 2),
(1, 2),
(2, 3),
(3, 5),
(4, 8),
(5, 11),
(6, 17),
(7, 26),
(8, 38),
(9, 58),
(10, 60),
(11, 60),
]
for value, expected in data:
actual = int(round(task_scheduler.exponential_backoff(value)))
self.assertEqual(expected, actual, (value, expected, actual))
def test_exponential_backoff_quick(self):
self.mock(
task_scheduler.random, 'random',
lambda: task_scheduler._PROBABILITY_OF_QUICK_COMEBACK - 0.01)
self.assertEqual(1.0, task_scheduler.exponential_backoff(235))
def _task_ran_successfully(self):
"""Runs a task successfully and returns the task_id."""
data = _gen_request_data(
properties=dict(dimensions={u'OS': u'Windows-3.1.1'}, idempotent=True))
request = task_request.make_request(data)
_result_summary = task_scheduler.schedule_request(request)
bot_dimensions = {
u'OS': [u'Windows', u'Windows-3.1.1'],
u'hostname': u'localhost',
u'foo': u'bar',
}
actual_request, run_result = task_scheduler.bot_reap_task(
bot_dimensions, 'localhost', 'abc')
self.assertEqual(request, actual_request)
self.assertEqual('localhost', run_result.bot_id)
self.assertEqual(None, task_to_run.TaskToRun.query().get().queue_number)
# It's important to terminate the task with success.
self.assertEqual(
(True, True),
task_scheduler.bot_update_task(
run_result.key, 'localhost', 'Foo1', 0, 0, 0.1, False, False,
0.1))
return unicode(run_result.key_string)
def _task_deduped(
self, new_ts, deduped_from, task_id='1d8dc670a0008810', now=None):
data = _gen_request_data(
name='yay',
user='Raoul',
properties=dict(dimensions={u'OS': u'Windows-3.1.1'}, idempotent=True))
request = task_request.make_request(data)
_result_summary = task_scheduler.schedule_request(request)
bot_dimensions = {
u'OS': [u'Windows', u'Windows-3.1.1'],
u'hostname': u'localhost',
u'foo': u'bar',
}
self.assertEqual(None, task_to_run.TaskToRun.query().get().queue_number)
actual_request_2, run_result_2 = task_scheduler.bot_reap_task(
bot_dimensions, 'localhost', 'abc')
self.assertEqual(None, actual_request_2)
result_summary_duped, run_results_duped = get_results(request.key)
expected = {
'abandoned_ts': None,
'bot_id': u'localhost',
'bot_version': u'abc',
'children_task_ids': [],
'completed_ts': now or self.now,
'costs_usd': [],
'cost_saved_usd': 0.1,
'created_ts': new_ts,
'deduped_from': deduped_from,
'durations': [0.1],
'exit_codes': [0],
'failure': False,
'id': task_id,
'internal_failure': False,
# Only this value is updated to 'now', the rest uses the previous run
# timestamps.
'modified_ts': new_ts,
'name': u'yay',
# A deduped task cannot be deduped against.
'properties_hash': None,
'server_versions': [u'default-version'],
'started_ts': now or self.now,
'state': State.COMPLETED,
'try_number': 0,
'user': u'Raoul',
}
self.assertEqual(expected, result_summary_duped.to_dict())
self.assertEqual([], run_results_duped)
def test_task_idempotent(self):
self.mock(random, 'getrandbits', lambda _: 0x88)
# First task is idempotent.
task_id = self._task_ran_successfully()
# Second task is deduped against first task.
new_ts = self.mock_now(self.now, config.settings().reusable_task_age_secs-1)
self._task_deduped(new_ts, task_id)
def test_task_idempotent_old(self):
self.mock(random, 'getrandbits', lambda _: 0x88)
# First task is idempotent.
self._task_ran_successfully()
# Second task is scheduled, first task is too old to be reused.
new_ts = self.mock_now(self.now, config.settings().reusable_task_age_secs)
data = _gen_request_data(
name='yay',
user='Raoul',
properties=dict(dimensions={u'OS': u'Windows-3.1.1'}, idempotent=True))
request = task_request.make_request(data)
_result_summary = task_scheduler.schedule_request(request)
# The task was enqueued for execution.
self.assertNotEqual(None, task_to_run.TaskToRun.query().get().queue_number)
def test_task_idempotent_three(self):
self.mock(random, 'getrandbits', lambda _: 0x88)
# First task is idempotent.
task_id = self._task_ran_successfully()
# Second task is deduped against first task.
new_ts = self.mock_now(self.now, config.settings().reusable_task_age_secs-1)
self._task_deduped(new_ts, task_id)
# Third task is scheduled, second task is not dedupable, first task is too
# old.
new_ts = self.mock_now(self.now, config.settings().reusable_task_age_secs)
data = _gen_request_data(
name='yay',
user='Jesus',
properties=dict(dimensions={u'OS': u'Windows-3.1.1'}, idempotent=True))
request = task_request.make_request(data)
_result_summary = task_scheduler.schedule_request(request)
# The task was enqueued for execution.
self.assertNotEqual(None, task_to_run.TaskToRun.query().get().queue_number)
def test_task_idempotent_variable(self):
# Test the edge case where GlobalConfig.reusable_task_age_secs is being
# modified. This ensure TaskResultSummary.order(TRS.key) works.
self.mock(random, 'getrandbits', lambda _: 0x88)
cfg = config.settings()
cfg.reusable_task_age_secs = 10
cfg.store()
# First task is idempotent.
self._task_ran_successfully()
# Second task is scheduled, first task is too old to be reused.
second_ts = self.mock_now(self.now, 10)
task_id = self._task_ran_successfully()
# Now any of the 2 tasks could be reused. Assert the right one (the most
# recent) is reused.
cfg = config.settings()
cfg.reusable_task_age_secs = 100
cfg.store()
# Third task is deduped against second task. That ensures ordering works
# correctly.
third_ts = self.mock_now(self.now, 20)
self._task_deduped(third_ts, task_id, '1d69ba3ea8008810', second_ts)
def test_task_parent_children(self):
# Parent task creates a child task.
parent_id = self._task_ran_successfully()
data = _gen_request_data(
parent_task_id=parent_id,
properties=dict(dimensions={u'OS': u'Windows-3.1.1'}))
request = task_request.make_request(data)
result_summary = task_scheduler.schedule_request(request)
self.assertEqual([], result_summary.children_task_ids)
self.assertEqual(parent_id, request.parent_task_id)
parent_run_result_key = task_pack.unpack_run_result_key(parent_id)
parent_res_summary_key = task_pack.run_result_key_to_result_summary_key(
parent_run_result_key)
expected = [result_summary.key_string]
self.assertEqual(expected, parent_run_result_key.get().children_task_ids)
self.assertEqual(expected, parent_res_summary_key.get().children_task_ids)
def test_get_results(self):
# TODO(maruel): Split in more focused tests.
self.mock(random, 'getrandbits', lambda _: 0x88)
created_ts = self.now
self.mock_now(created_ts)
data = _gen_request_data(
properties=dict(dimensions={u'OS': u'Windows-3.1.1'}))
request = task_request.make_request(data)
_result_summary = task_scheduler.schedule_request(request)
# The TaskRequest was enqueued, the TaskResultSummary was created but no
# TaskRunResult exist yet since the task was not scheduled on any bot.
result_summary, run_results = get_results(request.key)
expected = {
'abandoned_ts': None,
'bot_id': None,
'bot_version': None,
'children_task_ids': [],
'completed_ts': None,
'costs_usd': [],
'cost_saved_usd': None,
'created_ts': created_ts,
'deduped_from': None,
'durations': [],
'exit_codes': [],
'failure': False,
'id': '1d69b9f088008810',
'internal_failure': False,
'modified_ts': created_ts,
'name': u'Request name',
'properties_hash': None,
'server_versions': [],
'started_ts': None,
'state': State.PENDING,
'try_number': None,
'user': u'Jesus',
}
self.assertEqual(expected, result_summary.to_dict())
self.assertEqual([], run_results)
# A bot reaps the TaskToRun.
reaped_ts = self.now + datetime.timedelta(seconds=60)
self.mock_now(reaped_ts)
reaped_request, run_result = task_scheduler.bot_reap_task(
{'OS': 'Windows-3.1.1'}, 'localhost', 'abc')
self.assertEqual(request, reaped_request)
self.assertTrue(run_result)
result_summary, run_results = get_results(request.key)
expected = {
'abandoned_ts': None,
'bot_id': u'localhost',
'bot_version': u'abc',
'children_task_ids': [],
'completed_ts': None,
'costs_usd': [0.],
'cost_saved_usd': None,
'created_ts': created_ts, # Time the TaskRequest was created.
'deduped_from': None,
'durations': [],
'exit_codes': [],
'failure': False,
'id': '1d69b9f088008810',
'internal_failure': False,
'modified_ts': reaped_ts,
'name': u'Request name',
'properties_hash': None,
'server_versions': [u'default-version'],
'started_ts': reaped_ts,
'state': State.RUNNING,
'try_number': 1,
'user': u'Jesus',
}
self.assertEqual(expected, result_summary.to_dict())
expected = [
{
'abandoned_ts': None,
'bot_id': u'localhost',
'bot_version': u'abc',
'children_task_ids': [],
'completed_ts': None,
'cost_usd': 0.,
'durations': [],
'exit_codes': [],
'failure': False,
'id': '1d69b9f088008811',
'internal_failure': False,
'modified_ts': reaped_ts,
'server_versions': [u'default-version'],
'started_ts': reaped_ts,
'state': State.RUNNING,
'try_number': 1,
},
]
self.assertEqual(expected, [i.to_dict() for i in run_results])
# The bot completes the task.
done_ts = self.now + datetime.timedelta(seconds=120)
self.mock_now(done_ts)
self.assertEqual(
(True, True),
task_scheduler.bot_update_task(
run_result.key, 'localhost', 'Foo1', 0, 0, 0.1, False, False,
0.1))
self.assertEqual(
(True, False),
task_scheduler.bot_update_task(
run_result.key, 'localhost', 'Bar22', 0, 0, 0.2, False, False, 0.1))
result_summary, run_results = get_results(request.key)
expected = {
'abandoned_ts': None,
'bot_id': u'localhost',
'bot_version': u'abc',
'children_task_ids': [],
'completed_ts': done_ts,
'costs_usd': [0.1],
'cost_saved_usd': None,
'created_ts': created_ts,
'deduped_from': None,
'durations': [0.1, 0.2],
'exit_codes': [0, 0],
'failure': False,
'id': '1d69b9f088008810',
'internal_failure': False,
'modified_ts': done_ts,
'name': u'Request name',
'properties_hash': None,
'server_versions': [u'default-version'],
'started_ts': reaped_ts,
'state': State.COMPLETED,
'try_number': 1,
'user': u'Jesus',
}
self.assertEqual(expected, result_summary.to_dict())
expected = [
{
'abandoned_ts': None,
'bot_id': u'localhost',
'bot_version': u'abc',
'children_task_ids': [],
'completed_ts': done_ts,
'cost_usd': 0.1,
'durations': [0.1, 0.2],
'exit_codes': [0, 0],
'failure': False,
'id': '1d69b9f088008811',
'internal_failure': False,
'modified_ts': done_ts,
'server_versions': [u'default-version'],
'started_ts': reaped_ts,
'state': State.COMPLETED,
'try_number': 1,
},
]
self.assertEqual(expected, [t.to_dict() for t in run_results])
def test_exit_code_failure(self):
self.mock(random, 'getrandbits', lambda _: 0x88)
data = _gen_request_data(
properties=dict(dimensions={u'OS': u'Windows-3.1.1'}))
request = task_request.make_request(data)
_result_summary = task_scheduler.schedule_request(request)
reaped_request, run_result = task_scheduler.bot_reap_task(
{'OS': 'Windows-3.1.1'}, 'localhost', 'abc')
self.assertEqual(request, reaped_request)
self.assertEqual(
(True, True),
task_scheduler.bot_update_task(
run_result.key, 'localhost', 'Foo1', 0, 1, 0.1, False, False, 0.1))
result_summary, run_results = get_results(request.key)
expected = {
'abandoned_ts': None,
'bot_id': u'localhost',
'bot_version': u'abc',
'children_task_ids': [],
'completed_ts': self.now,
'costs_usd': [0.1],
'cost_saved_usd': None,
'created_ts': self.now,
'deduped_from': None,
'durations': [0.1],
'exit_codes': [1],
'failure': True,
'id': '1d69b9f088008810',
'internal_failure': False,
'modified_ts': self.now,
'name': u'Request name',
'properties_hash': None,
'server_versions': [u'default-version'],
'started_ts': self.now,
'state': State.COMPLETED,
'try_number': 1,
'user': u'Jesus',
}
self.assertEqual(expected, result_summary.to_dict())
expected = [
{
'abandoned_ts': None,
'bot_id': u'localhost',
'bot_version': u'abc',
'children_task_ids': [],
'completed_ts': self.now,
'cost_usd': 0.1,
'durations': [0.1],
'exit_codes': [1],
'failure': True,
'id': '1d69b9f088008811',
'internal_failure': False,
'modified_ts': self.now,
'server_versions': [u'default-version'],
'started_ts': self.now,
'state': State.COMPLETED,
'try_number': 1,
},
]
self.assertEqual(expected, [t.to_dict() for t in run_results])
def test_schedule_request(self):
data = _gen_request_data(
properties=dict(dimensions={u'OS': u'Windows-3.1.1'}))
# It is tested indirectly in the other functions.
request = task_request.make_request(data)
self.assertTrue(task_scheduler.schedule_request(request))
def test_bot_update_task(self):
run_result = _quick_reap()
self.assertEqual(
(True, True),
task_scheduler.bot_update_task(
run_result.key, 'localhost', 'hi', 0, 0, 0.1, False, False, 0.1))
self.assertEqual(
(True, False),
task_scheduler.bot_update_task(
run_result.key, 'localhost', 'hey', 2, 0, 0.1, False, False,
0.1))
self.assertEqual(['hihey'], list(run_result.key.get().get_outputs()))
def test_bot_update_task_new_overwrite(self):
run_result = _quick_reap()
self.assertEqual(
(True, False),
task_scheduler.bot_update_task(
run_result.key, 'localhost', 'hi', 0, None, None, False, False,
0.1))
self.assertEqual(
(True, False),
task_scheduler.bot_update_task(
run_result.key, 'localhost', 'hey', 1, None, None, False, False,
0.1))
self.assertEqual(['hhey'], list(run_result.key.get().get_outputs()))
def test_bot_update_exception(self):
run_result = _quick_reap()
def r(*_):
raise datastore_utils.CommitError('Sorry!')
self.mock(ndb, 'put_multi', r)
self.assertEqual(
(False, False),
task_scheduler.bot_update_task(
run_result.key, 'localhost', 'hi', 0, 0, 0.1, False, False, 0.1))
def _bot_update_timeouts(self, hard, io):
self.mock(random, 'getrandbits', lambda _: 0x88)
data = _gen_request_data(
properties=dict(dimensions={u'OS': u'Windows-3.1.1'}))
request = task_request.make_request(data)
result_summary = task_scheduler.schedule_request(request)
reaped_request, run_result = task_scheduler.bot_reap_task(
{'OS': 'Windows-3.1.1'}, 'localhost', 'abc')
self.assertEqual(
(True, True),
task_scheduler.bot_update_task(
run_result.key, 'localhost', 'hi', 0, 0, 0.1, hard, io, 0.1))
expected = {
'abandoned_ts': None,
'bot_id': u'localhost',
'bot_version': u'abc',
'children_task_ids': [],
'completed_ts': self.now,
'costs_usd': [0.1],
'cost_saved_usd': None,
'created_ts': self.now,
'deduped_from': None,
'durations': [0.1],
'exit_codes': [0],
'failure': True,
'id': '1d69b9f088008810',
'internal_failure': False,
'modified_ts': self.now,
'name': u'Request name',
'properties_hash': None,
'server_versions': [u'default-version'],
'started_ts': self.now,
'state': State.TIMED_OUT,
'try_number': 1,
'user': u'Jesus',
}
self.assertEqual(expected, result_summary.key.get().to_dict())
expected = {
'abandoned_ts': None,
'bot_id': u'localhost',
'bot_version': u'abc',
'children_task_ids': [],
'completed_ts': self.now,
'cost_usd': 0.1,
'durations': [0.1],
'exit_codes': [0],
'failure': True,
'id': '1d69b9f088008811',
'internal_failure': False,
'modified_ts': self.now,
'server_versions': [u'default-version'],
'started_ts': self.now,
'state': State.TIMED_OUT,
'try_number': 1,
}
self.assertEqual(expected, run_result.key.get().to_dict())
def test_bot_update_hard_timeout(self):
self._bot_update_timeouts(True, False)
def test_bot_update_io_timeout(self):
self._bot_update_timeouts(False, True)
def test_bot_kill_task(self):
self.mock(random, 'getrandbits', lambda _: 0x88)
data = _gen_request_data(
properties=dict(dimensions={u'OS': u'Windows-3.1.1'}))
request = task_request.make_request(data)
result_summary = task_scheduler.schedule_request(request)
reaped_request, run_result = task_scheduler.bot_reap_task(
{'OS': 'Windows-3.1.1'}, 'localhost', 'abc')
self.assertEqual(
None, task_scheduler.bot_kill_task(run_result.key, 'localhost'))
expected = {
'abandoned_ts': self.now,
'bot_id': u'localhost',
'bot_version': u'abc',
'children_task_ids': [],
'completed_ts': None,
'costs_usd': [0.],
'cost_saved_usd': None,
'created_ts': self.now,
'deduped_from': None,
'durations': [],
'exit_codes': [],
'failure': False,
'id': '1d69b9f088008810',
'internal_failure': True,
'modified_ts': self.now,
'name': u'Request name',
'properties_hash': None,
'server_versions': [u'default-version'],
'started_ts': self.now,
'state': State.BOT_DIED,
'try_number': 1,
'user': u'Jesus',
}
self.assertEqual(expected, result_summary.key.get().to_dict())
expected = {
'abandoned_ts': self.now,
'bot_id': u'localhost',
'bot_version': u'abc',
'children_task_ids': [],
'completed_ts': None,
'cost_usd': 0.,
'durations': [],
'exit_codes': [],
'failure': False,
'id': '1d69b9f088008811',
'internal_failure': True,
'modified_ts': self.now,
'server_versions': [u'default-version'],
'started_ts': self.now,
'state': State.BOT_DIED,
'try_number': 1,
}
self.assertEqual(expected, run_result.key.get().to_dict())
def test_bot_kill_task_wrong_bot(self):
self.mock(random, 'getrandbits', lambda _: 0x88)
data = _gen_request_data(
properties=dict(dimensions={u'OS': u'Windows-3.1.1'}))
request = task_request.make_request(data)
result_summary = task_scheduler.schedule_request(request)
reaped_request, run_result = task_scheduler.bot_reap_task(
{'OS': 'Windows-3.1.1'}, 'localhost', 'abc')
expected = (
'Bot bot1 sent task kill for task 1d69b9f088008811 owned by bot '
'localhost')
self.assertEqual(
expected, task_scheduler.bot_kill_task(run_result.key, 'bot1'))
def test_cancel_task(self):
data = _gen_request_data(
properties=dict(dimensions={u'OS': u'Windows-3.1.1'}))
request = task_request.make_request(data)
result_summary = task_scheduler.schedule_request(request)
ok, was_running = task_scheduler.cancel_task(result_summary.key)
self.assertEqual(True, ok)
self.assertEqual(False, was_running)
result_summary = result_summary.key.get()
self.assertEqual(task_result.State.CANCELED, result_summary.state)
def test_cancel_task_running(self):
data = _gen_request_data(
properties=dict(dimensions={u'OS': u'Windows-3.1.1'}))
request = task_request.make_request(data)
result_summary = task_scheduler.schedule_request(request)
reaped_request, run_result = task_scheduler.bot_reap_task(
{'OS': 'Windows-3.1.1'}, 'localhost', 'abc')
ok, was_running = task_scheduler.cancel_task(result_summary.key)
self.assertEqual(False, ok)
self.assertEqual(True, was_running)
result_summary = result_summary.key.get()
self.assertEqual(task_result.State.RUNNING, result_summary.state)
def test_cron_abort_expired_task_to_run(self):
self.mock(random, 'getrandbits', lambda _: 0x88)
data = _gen_request_data(
properties=dict(dimensions={u'OS': u'Windows-3.1.1'}))
request = task_request.make_request(data)
result_summary = task_scheduler.schedule_request(request)
abandoned_ts = self.mock_now(self.now, data['scheduling_expiration_secs']+1)
self.assertEqual(1, task_scheduler.cron_abort_expired_task_to_run())
self.assertEqual([], task_result.TaskRunResult.query().fetch())
expected = {
'abandoned_ts': abandoned_ts,
'bot_id': None,
'bot_version': None,
'children_task_ids': [],
'completed_ts': None,
'costs_usd': [],
'cost_saved_usd': None,
'created_ts': self.now,
'deduped_from': None,
'durations': [],
'exit_codes': [],
'failure': False,
'id': '1d69b9f088008810',
'internal_failure': False,
'modified_ts': abandoned_ts,
'name': u'Request name',
'properties_hash': None,
'server_versions': [],
'started_ts': None,
'state': task_result.State.EXPIRED,
'try_number': None,
'user': u'Jesus',
}
self.assertEqual(expected, result_summary.key.get().to_dict())
def test_cron_abort_expired_task_to_run_retry(self):
self.mock(random, 'getrandbits', lambda _: 0x88)
data = _gen_request_data(
properties=dict(dimensions={u'OS': u'Windows-3.1.1'}),
scheduling_expiration_secs=600)
request = task_request.make_request(data)
result_summary = task_scheduler.schedule_request(request)
# Fake first try bot died.
bot_dimensions = {
u'OS': [u'Windows', u'Windows-3.1.1'],
u'hostname': u'localhost',
u'foo': u'bar',
}
_request, run_result = task_scheduler.bot_reap_task(
bot_dimensions, 'localhost', 'abc')
now_1 = self.mock_now(self.now + task_result.BOT_PING_TOLERANCE, 1)
self.assertEqual((0, 1, 0), task_scheduler.cron_handle_bot_died())
self.assertEqual(task_result.State.BOT_DIED, run_result.key.get().state)
self.assertEqual(
task_result.State.PENDING, run_result.result_summary_key.get().state)
# BOT_DIED is kept instead of EXPIRED.
abandoned_ts = self.mock_now(self.now, data['scheduling_expiration_secs']+1)
self.assertEqual(1, task_scheduler.cron_abort_expired_task_to_run())
self.assertEqual(1, len(task_result.TaskRunResult.query().fetch()))
expected = {
'abandoned_ts': abandoned_ts,
'bot_id': u'localhost',
'bot_version': u'abc',
'children_task_ids': [],
'completed_ts': None,
'costs_usd': [0.],
'cost_saved_usd': None,
'created_ts': self.now,
'deduped_from': None,
'durations': [],
'exit_codes': [],
'failure': False,
'id': '1d69b9f088008810',
'internal_failure': True,
'modified_ts': abandoned_ts,
'name': u'Request name',
'properties_hash': None,
'server_versions': [u'default-version'],
'started_ts': self.now,
'state': task_result.State.BOT_DIED,
'try_number': 1,
'user': u'Jesus',
}
self.assertEqual(expected, result_summary.key.get().to_dict())
def test_cron_handle_bot_died(self):
# Test first retry, then success.
self.mock(random, 'getrandbits', lambda _: 0x88)
data = _gen_request_data(
properties=dict(dimensions={u'OS': u'Windows-3.1.1'}),
scheduling_expiration_secs=600)
request = task_request.make_request(data)
_result_summary = task_scheduler.schedule_request(request)
bot_dimensions = {
u'OS': [u'Windows', u'Windows-3.1.1'],
u'hostname': u'localhost',
u'foo': u'bar',
}
_request, run_result = task_scheduler.bot_reap_task(
bot_dimensions, 'localhost', 'abc')
self.assertEqual(1, run_result.try_number)
self.assertEqual(task_result.State.RUNNING, run_result.state)
now_1 = self.mock_now(self.now + task_result.BOT_PING_TOLERANCE, 1)
self.assertEqual((0, 1, 0), task_scheduler.cron_handle_bot_died())
# Refresh and compare:
expected = {
'abandoned_ts': now_1,
'bot_id': u'localhost',
'bot_version': u'abc',
'children_task_ids': [],
'completed_ts': None,
'cost_usd': 0.,
'durations': [],
'exit_codes': [],
'failure': False,
'id': '1d69b9f088008811',
'internal_failure': True,
'modified_ts': now_1,
'server_versions': [u'default-version'],
'started_ts': self.now,
'state': task_result.State.BOT_DIED,
'try_number': 1,
}
self.assertEqual(expected, run_result.key.get().to_dict())
expected = {
'abandoned_ts': None,
'bot_id': u'localhost',
'bot_version': u'abc',
'children_task_ids': [],
'completed_ts': None,
'costs_usd': [0.],
'cost_saved_usd': None,
'created_ts': self.now,
'deduped_from': None,
'durations': [],
'exit_codes': [],
'failure': False,
'id': '1d69b9f088008810',
'internal_failure': False,
'modified_ts': now_1,
'name': u'Request name',
'properties_hash': None,
'server_versions': [u'default-version'],
'started_ts': None,
'state': task_result.State.PENDING,
'try_number': 1,
'user': u'Jesus',
}
self.assertEqual(expected, run_result.result_summary_key.get().to_dict())
# Task was retried.
now_2 = self.mock_now(self.now + task_result.BOT_PING_TOLERANCE, 2)
_request, run_result = task_scheduler.bot_reap_task(
bot_dimensions, 'localhost-second', 'abc')
logging.info('%s', [t.to_dict() for t in task_to_run.TaskToRun.query()])
self.assertEqual(2, run_result.try_number)
self.assertEqual(
(True, True),
task_scheduler.bot_update_task(
run_result.key, 'localhost-second', 'Foo1', 0, 0, 0.1, False, False,
0.1))
expected = {
'abandoned_ts': None,
'bot_id': u'localhost-second',
'bot_version': u'abc',
'children_task_ids': [],
'completed_ts': now_2,
'costs_usd': [0., 0.1],
'cost_saved_usd': None,
'created_ts': self.now,
'deduped_from': None,
'durations': [0.1],
'exit_codes': [0],
'failure': False,
'id': '1d69b9f088008810',
'internal_failure': False,
'modified_ts': now_2,
'name': u'Request name',
'properties_hash': None,
'server_versions': [u'default-version'],
'started_ts': now_2,
'state': task_result.State.COMPLETED,
'try_number': 2,
'user': u'Jesus',
}
self.assertEqual(expected, run_result.result_summary_key.get().to_dict())
self.assertEqual(0.1, run_result.key.get().cost_usd)
def test_cron_handle_bot_died_same_bot_denied(self):
# Test first retry, then success.
self.mock(random, 'getrandbits', lambda _: 0x88)
data = _gen_request_data(
properties=dict(dimensions={u'OS': u'Windows-3.1.1'}),
scheduling_expiration_secs=600)
request = task_request.make_request(data)
_result_summary = task_scheduler.schedule_request(request)
bot_dimensions = {
u'OS': [u'Windows', u'Windows-3.1.1'],
u'hostname': u'localhost',
u'foo': u'bar',
}
_request, run_result = task_scheduler.bot_reap_task(
bot_dimensions, 'localhost', 'abc')
self.assertEqual(1, run_result.try_number)
self.assertEqual(task_result.State.RUNNING, run_result.state)
now_1 = self.mock_now(self.now + task_result.BOT_PING_TOLERANCE, 1)
self.assertEqual((0, 1, 0), task_scheduler.cron_handle_bot_died())
# Refresh and compare:
expected = {
'abandoned_ts': now_1,
'bot_id': u'localhost',
'bot_version': u'abc',
'children_task_ids': [],
'completed_ts': None,
'cost_usd': 0.,
'durations': [],
'exit_codes': [],
'failure': False,
'id': '1d69b9f088008811',
'internal_failure': True,
'modified_ts': now_1,
'server_versions': [u'default-version'],
'started_ts': self.now,
'state': task_result.State.BOT_DIED,
'try_number': 1,
}
self.assertEqual(expected, run_result.key.get().to_dict())
expected = {
'abandoned_ts': None,
'bot_id': u'localhost',
'bot_version': u'abc',
'children_task_ids': [],
'completed_ts': None,
'costs_usd': [0.],
'cost_saved_usd': None,
'created_ts': self.now,
'deduped_from': None,
'durations': [],
'exit_codes': [],
'failure': False,
'id': '1d69b9f088008810',
'internal_failure': False,
'modified_ts': now_1,
'name': u'Request name',
'properties_hash': None,
'server_versions': [u'default-version'],
'started_ts': None,
'state': task_result.State.PENDING,
'try_number': 1,
'user': u'Jesus',
}
self.assertEqual(expected, run_result.result_summary_key.get().to_dict())
# Task was retried but the same bot polls again, it's denied the task.
now_2 = self.mock_now(self.now + task_result.BOT_PING_TOLERANCE, 2)
request, run_result = task_scheduler.bot_reap_task(
bot_dimensions, 'localhost', 'abc')
self.assertEqual(None, request)
self.assertEqual(None, run_result)
logging.info('%s', [t.to_dict() for t in task_to_run.TaskToRun.query()])
def test_cron_handle_bot_died_second(self):
# Test two tries internal_failure's leading to a BOT_DIED status.
self.mock(random, 'getrandbits', lambda _: 0x88)
data = _gen_request_data(
properties=dict(dimensions={u'OS': u'Windows-3.1.1'}),
scheduling_expiration_secs=600)
request = task_request.make_request(data)
_result_summary = task_scheduler.schedule_request(request)
bot_dimensions = {
u'OS': [u'Windows', u'Windows-3.1.1'],
u'hostname': u'localhost',
u'foo': u'bar',
}
_request, run_result = task_scheduler.bot_reap_task(
bot_dimensions, 'localhost', 'abc')
self.assertEqual(1, run_result.try_number)
self.assertEqual(task_result.State.RUNNING, run_result.state)
self.mock_now(self.now + task_result.BOT_PING_TOLERANCE, 1)
self.assertEqual((0, 1, 0), task_scheduler.cron_handle_bot_died())
now_1 = self.mock_now(self.now + task_result.BOT_PING_TOLERANCE, 2)
# It must be a different bot.
_request, run_result = task_scheduler.bot_reap_task(
bot_dimensions, 'localhost-second', 'abc')
now_2 = self.mock_now(self.now + 2 * task_result.BOT_PING_TOLERANCE, 3)
self.assertEqual((1, 0, 0), task_scheduler.cron_handle_bot_died())
self.assertEqual((0, 0, 0), task_scheduler.cron_handle_bot_died())
expected = {
'abandoned_ts': now_2,
'bot_id': u'localhost-second',
'bot_version': u'abc',
'children_task_ids': [],
'completed_ts': None,
'costs_usd': [0., 0.],
'cost_saved_usd': None,
'created_ts': self.now,
'deduped_from': None,
'durations': [],
'exit_codes': [],
'failure': False,
'id': '1d69b9f088008810',
'internal_failure': True,
'modified_ts': now_2,
'name': u'Request name',
'properties_hash': None,
'server_versions': [u'default-version'],
'started_ts': now_1,
'state': task_result.State.BOT_DIED,
'try_number': 2,
'user': u'Jesus',
}
self.assertEqual(expected, run_result.result_summary_key.get().to_dict())
def test_cron_handle_bot_died_ignored_expired(self):
self.mock(random, 'getrandbits', lambda _: 0x88)
data = _gen_request_data(
properties=dict(dimensions={u'OS': u'Windows-3.1.1'}),
scheduling_expiration_secs=600)
request = task_request.make_request(data)
_result_summary = task_scheduler.schedule_request(request)
bot_dimensions = {
u'OS': [u'Windows', u'Windows-3.1.1'],
u'hostname': u'localhost',
u'foo': u'bar',
}
_request, run_result = task_scheduler.bot_reap_task(
bot_dimensions, 'localhost', 'abc')
self.assertEqual(1, run_result.try_number)
self.assertEqual(task_result.State.RUNNING, run_result.state)
self.mock_now(self.now + task_result.BOT_PING_TOLERANCE, 601)
self.assertEqual((1, 0, 0), task_scheduler.cron_handle_bot_died())
def test_search_by_name(self):
data = _gen_request_data(
properties=dict(dimensions={u'OS': u'Windows-3.1.1'}))
request = task_request.make_request(data)
result_summary = task_scheduler.schedule_request(request)
# Assert that search is not case-sensitive by using unexpected casing.
actual, _cursor = task_result.search_by_name('requEST', None, 10)
self.assertEqual([result_summary], actual)
actual, _cursor = task_result.search_by_name('name', None, 10)
self.assertEqual([result_summary], actual)
def test_search_by_name_failures(self):
data = _gen_request_data(
properties=dict(dimensions={u'OS': u'Windows-3.1.1'}))
request = task_request.make_request(data)
result_summary = task_scheduler.schedule_request(request)
actual, _cursor = task_result.search_by_name('foo', None, 10)
self.assertEqual([], actual)
# Partial match doesn't work.
actual, _cursor = task_result.search_by_name('nam', None, 10)
self.assertEqual([], actual)
def test_search_by_name_broken_tasks(self):
# Create tasks where task_scheduler.schedule_request() fails in the middle.
# This is done by mocking the functions to fail every SKIP call and running
# it in a loop.
class RandomFailure(Exception):
pass
# First call fails ndb.put_multi(), second call fails search.Index.put(),
# third call work.
index = [0]
SKIP = 3
def put_multi(*args, **kwargs):
callers = [i[3] for i in inspect.stack()]
self.assertTrue(
'make_request' in callers or 'schedule_request' in callers, callers)
if (index[0] % SKIP) == 1:
raise RandomFailure()
return old_put_multi(*args, **kwargs)
def put_async(*args, **kwargs):
callers = [i[3] for i in inspect.stack()]
self.assertIn('schedule_request', callers)
out = ndb.Future()
if (index[0] % SKIP) == 2:
out.set_exception(search.Error())
else:
out.set_result(old_put_async(*args, **kwargs).get_result())
return out
old_put_multi = self.mock(ndb, 'put_multi', put_multi)
old_put_async = self.mock(search.Index, 'put_async', put_async)
saved = []
for i in xrange(100):
index[0] = i
data = _gen_request_data(
name='Request %d' % i,
properties=dict(dimensions={u'OS': u'Windows-3.1.1'}))
try:
request = task_request.make_request(data)
result_summary = task_scheduler.schedule_request(request)
saved.append(result_summary)
except RandomFailure:
pass
self.assertEqual(67, len(saved))
self.assertEqual(67, task_request.TaskRequest.query().count())
self.assertEqual(67, task_result.TaskResultSummary.query().count())
# Now the DB is full of half-corrupted entities.
cursor = None
actual, cursor = task_result.search_by_name('Request', cursor, 31)
self.assertEqual(31, len(actual))
actual, cursor = task_result.search_by_name('Request', cursor, 31)
self.assertEqual(3, len(actual))
actual, cursor = task_result.search_by_name('Request', cursor, 31)
self.assertEqual(0, len(actual))
if __name__ == '__main__':
if '-v' in sys.argv:
unittest.TestCase.maxDiff = None
logging.basicConfig(
level=logging.DEBUG if '-v' in sys.argv else logging.CRITICAL)
unittest.main()
|
|
import numpy, itertools
import scipy.signal as signal
from scipy.interpolate import griddata, bisplev, bisplrep
import Shadow
from wofry.propagator.wavefront2D.generic_wavefront import GenericWavefront2D
from wofry.propagator.decorators import WavefrontDecorator
from wofry.propagator.wavefront import WavefrontDimension
class SHADOW3Wavefront(Shadow.Beam, WavefrontDecorator):
def __init__(self, N=250000, user_units_to_meters = 0.01):
Shadow.Beam.__init__(self, N=N)
self._user_units_to_meters = user_units_to_meters
@classmethod
def initialize_from_shadow3_beam(cls, shadow3_beam, user_units_to_meters = 0.01):
wf3 = SHADOW3Wavefront(N=shadow3_beam.nrays(), user_units_to_meters=user_units_to_meters)
wf3.rays = shadow3_beam.rays.copy()
return wf3
def get_mean_wavelength(self, nolost=True): # meters
wavelength_in_angstroms = self.getshcol(19, nolost=nolost)
return 1e-10*wavelength_in_angstroms.mean()
def toGenericWavefront(self, pixels_h=None, pixels_v=None, range_h=None, range_v=None, shadow_to_meters=1e-2):
# guess number of pixels (if not defined)
if pixels_h == None or pixels_v == None:
pixels_estimated = int(numpy.sqrt(self.nrays()))
if pixels_h == None:
pixels_h = pixels_estimated
if pixels_v == None:
pixels_v = pixels_estimated
# guess definition limits (if not defined)
if range_h==None or range_v==None:
intensity_histogram = self.histo2(1, 3,
nbins_h=pixels_h,
nbins_v=pixels_v,
nolost=1,
calculate_widths=1)
if range_h==None:
try:
range_h = 3 * intensity_histogram['fwhm_h']
except:
shadow_x = intensity_histogram['bin_h_center']
range_h = numpy.abs(shadow_x[-1] - shadow_x[0])
if range_v == None:
try:
range_v = 3 * intensity_histogram['fwhm_v']
except:
shadow_y = intensity_histogram['bin_v_center']
range_v = numpy.abs(shadow_y[-1] - shadow_y[0])
intensity_histogram = self.histo2(1, 3,
nbins_h=pixels_h,
nbins_v=pixels_v,
ref=23,
xrange=[-0.5*range_h, 0.5*range_h],
yrange=[-0.5*range_v, 0.5*range_v],
nolost=1,
calculate_widths=1)
wavelength = self.get_mean_wavelength() # meters
x = intensity_histogram['bin_h_center'] * shadow_to_meters # in meters
z = intensity_histogram['bin_v_center'] * shadow_to_meters # in meters
number_of_rays_histogram = self.histo2(1, 3,
nbins_h=pixels_h,
nbins_v=pixels_v,
ref=0,
xrange=[-0.5*range_h, 0.5*range_h],
yrange=[-0.5*range_v, 0.5*range_v],
nolost=1)
good = numpy.where(number_of_rays_histogram ['histogram'] > 0)
phase_histogram = self.histo2(1, 3,
nbins_h=pixels_h,
nbins_v=pixels_v,
ref=40,
xrange=[-0.5*range_h, 0.5*range_h],
yrange=[-0.5*range_v, 0.5*range_v],
nolost=1)
phase = numpy.zeros(phase_histogram['histogram'].shape)
phase[good] = phase_histogram['histogram'][good] / number_of_rays_histogram['histogram'][good]
#
# AMPLITUDE (NORMALIZATION AND SMOOTHING)
#
amplitude = numpy.sqrt(intensity_histogram['histogram'] / intensity_histogram['histogram'].max())
amplitude = SHADOW3Wavefront.smooth_amplitude(amplitude=amplitude,
pixels_h=pixels_h,
pixels_v=pixels_v)
wavefront = GenericWavefront2D.initialize_wavefront_from_range(x[0],
x[-1],
z[0],
z[-1],
number_of_points=amplitude.shape,
wavelength=wavelength)
#
# PHASE (consider Kx and Kz as the partial derivate of the phase)
#
complex_amplitude = amplitude * numpy.exp(1j*phase)
wavefront.set_complex_amplitude(complex_amplitude)
return wavefront
def getshonecol(self, col, nolost=0):
if col == 40:
optical_path = self.rays[:, 12] * self._user_units_to_meters * 100 # to cm
k = self.rays[:, 10] # in cm
column = ((optical_path * k) % (2*numpy.pi)) - numpy.pi
if nolost == 0:
return column.copy()
if nolost == 1:
f = numpy.where(self.rays[:,9] > 0.0)
if len(f[0]) == 0:
return numpy.empty(0)
return column[f].copy()
if nolost == 2:
f = numpy.where(self.rays[:,9] < 0.0)
if len(f[0]) == 0:
return numpy.empty(0)
return column[f].copy()
else:
return super().getshonecol(col, nolost)
@classmethod
def fromGenericWavefront(cls, wavefront, shadow_to_meters = 1e-2):
meters_to_shadow = 1/shadow_to_meters
w_intensity = wavefront.get_intensity().flatten()
w_x = wavefront.get_mesh_x().flatten()
w_y = wavefront.get_mesh_y().flatten()
w_phase = wavefront.get_phase()
w_wavelength = wavefront.get_wavelength() # meters
k_modulus = 2 * numpy.pi / w_wavelength # m-1
nrays = w_intensity.size
wf3 = SHADOW3Wavefront(N=nrays)
# positions
wf3.rays[:, 0] = w_x * meters_to_shadow # cm
wf3.rays[:, 2] = w_y * meters_to_shadow # cm
# Lost ray flag
wf3.rays[:, 9] = 1.0
# energy
wf3.rays[:, 10] = k_modulus / meters_to_shadow # cm-1
# Ray index
wf3.rays[:, 11] = numpy.arange(1, nrays+1, 1)
normalization = nrays/numpy.sum(w_intensity) # Shadow-like intensity
# intensity
# TODO: now we suppose fully polarized beam
wf3.rays[:, 6] = numpy.sqrt(w_intensity*normalization)
dx, dy = wavefront.delta()
# The k direction is obtained from the gradient of the phase
kx, kz = numpy.gradient(w_phase, dx, dy, edge_order=2)
nx = kx / k_modulus
nz = kz / k_modulus
ny = numpy.sqrt(1.0 - nx**2 - nz**2)
wf3.rays[:, 3] = nx.flatten()
wf3.rays[:, 4] = ny.flatten()
wf3.rays[:, 5] = nz.flatten()
return wf3
@classmethod
def decorateSHADOW3WF(self, shadow3_beam):
return SHADOW3Wavefront.initialize_from_shadow3_beam(shadow3_beam)
def get_dimension(self):
return WavefrontDimension.TWO
# ------------------------------------------------------
#
# TOOLS
#
# ------------------------------------------------------
@classmethod
def smooth_amplitude(cls, amplitude, pixels_h, pixels_v):
kern_hanning = signal.hanning(max(5, int(pixels_h/10)))[:, None]
kern_hanning /= kern_hanning.sum()
kern_hanning_2 = signal.hanning(max(5, int(pixels_v/10)))[None, :]
kern_hanning_2 /= kern_hanning.sum()
return cls.rebin(array=signal.convolve(signal.convolve(amplitude,
kern_hanning),
kern_hanning_2),
new_shape=(pixels_h, pixels_v))
@classmethod
def rebin(cls, array=numpy.zeros((100, 100)), new_shape=(100, 100)):
assert len(array.shape) == len(new_shape)
slices = [slice(0, old, float(old) / new) for old, new in zip(array.shape, new_shape)]
coordinates = numpy.mgrid[slices]
indices = coordinates.astype('i') #choose the biggest smaller integer index
return array[tuple(indices)]
if __name__=="__main__":
def func(x, y):
return x*(1-x)*numpy.cos(4*numpy.pi*x) * numpy.sin(4*numpy.pi*y**2)**2
grid_x, grid_y = numpy.mgrid[0:1:100j, 0:1:100j]
points = numpy.random.rand(1000, 2)
values = func(points[:,0], points[:,1])
print(points.shape, values.shape)
grid_z2 = griddata(points, values, (grid_x, grid_y), method='cubic', fill_value=0.0)
import matplotlib.pyplot as plt
plt.imshow(grid_z2.T, extent=(0,1,0,1), origin='lower')
plt.show()
|
|
import os
import re
import json
import base64
import logging
import datetime
import time
import copy
import decimal
import cgi
import numpy
import pymongo
from lib import config, util, util_bitcoin
D = decimal.Decimal
def get_market_price(price_data, vol_data):
assert len(price_data) == len(vol_data)
assert len(price_data) <= config.MARKET_PRICE_DERIVE_NUM_POINTS
market_price = numpy.average(price_data, weights=vol_data)
return market_price
def get_market_price_summary(asset1, asset2, with_last_trades=0, start_dt=None, end_dt=None):
"""Gets a synthesized trading "market price" for a specified asset pair (if available), as well as additional info.
If no price is available, False is returned.
"""
mongo_db = config.mongo_db
if not end_dt:
end_dt = datetime.datetime.utcnow()
if not start_dt:
start_dt = end_dt - datetime.timedelta(days=10) #default to 10 days in the past
#look for the last max 6 trades within the past 10 day window
base_asset, quote_asset = util.assets_to_asset_pair(asset1, asset2)
base_asset_info = mongo_db.tracked_assets.find_one({'asset': base_asset})
quote_asset_info = mongo_db.tracked_assets.find_one({'asset': quote_asset})
if not isinstance(with_last_trades, int) or with_last_trades < 0 or with_last_trades > 30:
raise Exception("Invalid with_last_trades")
if not base_asset_info or not quote_asset_info:
raise Exception("Invalid asset(s)")
last_trades = mongo_db.trades.find({
"base_asset": base_asset,
"quote_asset": quote_asset,
'block_time': { "$gte": start_dt, "$lte": end_dt }
},
{'_id': 0, 'block_index': 1, 'block_time': 1, 'unit_price': 1, 'base_quantity_normalized': 1, 'quote_quantity_normalized': 1}
).sort("block_time", pymongo.DESCENDING).limit(max(config.MARKET_PRICE_DERIVE_NUM_POINTS, with_last_trades))
if not last_trades.count():
return None #no suitable trade data to form a market price (return None, NOT False here)
last_trades = list(last_trades)
last_trades.reverse() #from newest to oldest
market_price = get_market_price(
[last_trades[i]['unit_price'] for i in xrange(min(len(last_trades), config.MARKET_PRICE_DERIVE_NUM_POINTS))],
[(last_trades[i]['base_quantity_normalized'] + last_trades[i]['quote_quantity_normalized']) for i in xrange(min(len(last_trades), config.MARKET_PRICE_DERIVE_NUM_POINTS))])
result = {
'market_price': float(D(market_price)),
'base_asset': base_asset,
'quote_asset': quote_asset,
}
if with_last_trades:
#[0]=block_time, [1]=unit_price, [2]=base_quantity_normalized, [3]=quote_quantity_normalized, [4]=block_index
result['last_trades'] = [[
t['block_time'],
t['unit_price'],
t['base_quantity_normalized'],
t['quote_quantity_normalized'],
t['block_index']
] for t in last_trades]
else:
result['last_trades'] = []
return result
def calc_inverse(quantity):
return float( (D(1) / D(quantity) ))
def calc_price_change(open, close):
return float((D(100) * (D(close) - D(open)) / D(open)))
def get_price_primatives(start_dt=None, end_dt=None):
mps_xcp_btc = get_market_price_summary(config.XCP, config.BTC, start_dt=start_dt, end_dt=end_dt)
xcp_btc_price = mps_xcp_btc['market_price'] if mps_xcp_btc else None # == XCP/BTC
btc_xcp_price = calc_inverse(mps_xcp_btc['market_price']) if mps_xcp_btc else None #BTC/XCP
return mps_xcp_btc, xcp_btc_price, btc_xcp_price
def get_asset_info(asset, at_dt=None):
mongo_db = config.mongo_db
asset_info = mongo_db.tracked_assets.find_one({'asset': asset})
if asset not in (config.XCP, config.BTC) and at_dt and asset_info['_at_block_time'] > at_dt:
#get the asset info at or before the given at_dt datetime
for e in reversed(asset_info['_history']): #newest to oldest
if e['_at_block_time'] <= at_dt:
asset_info = e
break
else: #asset was created AFTER at_dt
asset_info = None
if asset_info is None: return None
assert asset_info['_at_block_time'] <= at_dt
#modify some of the properties of the returned asset_info for BTC and XCP
if asset == config.BTC:
if at_dt:
start_block_index, end_block_index = util.get_block_indexes_for_dates(end_dt=at_dt)
asset_info['total_issued'] = util_bitcoin.get_btc_supply(normalize=False, at_block_index=end_block_index)
asset_info['total_issued_normalized'] = util_bitcoin.normalize_quantity(asset_info['total_issued'])
else:
asset_info['total_issued'] = util_bitcoin.get_btc_supply(normalize=False)
asset_info['total_issued_normalized'] = util_bitcoin.normalize_quantity(asset_info['total_issued'])
elif asset == config.XCP:
#BUG: this does not take end_dt (if specified) into account. however, the deviation won't be too big
# as XCP doesn't deflate quickly at all, and shouldn't matter that much since there weren't any/much trades
# before the end of the burn period (which is what is involved with how we use at_dt with currently)
asset_info['total_issued'] = util.call_jsonrpc_api("get_xcp_supply", abort_on_error=True)['result']
asset_info['total_issued_normalized'] = util_bitcoin.normalize_quantity(asset_info['total_issued'])
if not asset_info:
raise Exception("Invalid asset: %s" % asset)
return asset_info
def get_xcp_btc_price_info(asset, mps_xcp_btc, xcp_btc_price, btc_xcp_price, with_last_trades=0, start_dt=None, end_dt=None):
if asset not in [config.BTC, config.XCP]:
#get price data for both the asset with XCP, as well as BTC
price_summary_in_xcp = get_market_price_summary(asset, config.XCP,
with_last_trades=with_last_trades, start_dt=start_dt, end_dt=end_dt)
price_summary_in_btc = get_market_price_summary(asset, config.BTC,
with_last_trades=with_last_trades, start_dt=start_dt, end_dt=end_dt)
#aggregated (averaged) price (expressed as XCP) for the asset on both the XCP and BTC markets
if price_summary_in_xcp: # no trade data
price_in_xcp = price_summary_in_xcp['market_price']
if xcp_btc_price:
aggregated_price_in_xcp = float(((D(price_summary_in_xcp['market_price']) + D(xcp_btc_price)) / D(2)))
else: aggregated_price_in_xcp = None
else:
price_in_xcp = None
aggregated_price_in_xcp = None
if price_summary_in_btc: # no trade data
price_in_btc = price_summary_in_btc['market_price']
if btc_xcp_price:
aggregated_price_in_btc = float(((D(price_summary_in_btc['market_price']) + D(btc_xcp_price)) / D(2)))
else: aggregated_price_in_btc = None
else:
aggregated_price_in_btc = None
price_in_btc = None
else:
#here we take the normal XCP/BTC pair, and invert it to BTC/XCP, to get XCP's data in terms of a BTC base
# (this is the only area we do this, as BTC/XCP is NOT standard pair ordering)
price_summary_in_xcp = mps_xcp_btc #might be None
price_summary_in_btc = copy.deepcopy(mps_xcp_btc) if mps_xcp_btc else None #must invert this -- might be None
if price_summary_in_btc:
price_summary_in_btc['market_price'] = calc_inverse(price_summary_in_btc['market_price'])
price_summary_in_btc['base_asset'] = config.BTC
price_summary_in_btc['quote_asset'] = config.XCP
for i in xrange(len(price_summary_in_btc['last_trades'])):
#[0]=block_time, [1]=unit_price, [2]=base_quantity_normalized, [3]=quote_quantity_normalized, [4]=block_index
price_summary_in_btc['last_trades'][i][1] = calc_inverse(price_summary_in_btc['last_trades'][i][1])
price_summary_in_btc['last_trades'][i][2], price_summary_in_btc['last_trades'][i][3] = \
price_summary_in_btc['last_trades'][i][3], price_summary_in_btc['last_trades'][i][2] #swap
if asset == config.XCP:
price_in_xcp = 1.0
price_in_btc = price_summary_in_btc['market_price'] if price_summary_in_btc else None
aggregated_price_in_xcp = 1.0
aggregated_price_in_btc = btc_xcp_price #might be None
else:
assert asset == config.BTC
price_in_xcp = price_summary_in_xcp['market_price'] if price_summary_in_xcp else None
price_in_btc = 1.0
aggregated_price_in_xcp = xcp_btc_price #might be None
aggregated_price_in_btc = 1.0
return (price_summary_in_xcp, price_summary_in_btc, price_in_xcp, price_in_btc, aggregated_price_in_xcp, aggregated_price_in_btc)
def calc_market_cap(asset_info, price_in_xcp, price_in_btc):
market_cap_in_xcp = float( (D(asset_info['total_issued_normalized']) / D(price_in_xcp))) if price_in_xcp else None
market_cap_in_btc = float( (D(asset_info['total_issued_normalized']) / D(price_in_btc))) if price_in_btc else None
return market_cap_in_xcp, market_cap_in_btc
def compile_summary_market_info(asset, mps_xcp_btc, xcp_btc_price, btc_xcp_price):
"""Returns information related to capitalization, volume, etc for the supplied asset(s)
NOTE: in_btc == base asset is BTC, in_xcp == base asset is XCP
@param assets: A list of one or more assets
"""
asset_info = get_asset_info(asset)
(price_summary_in_xcp, price_summary_in_btc, price_in_xcp, price_in_btc, aggregated_price_in_xcp, aggregated_price_in_btc
) = get_xcp_btc_price_info(asset, mps_xcp_btc, xcp_btc_price, btc_xcp_price, with_last_trades=30)
market_cap_in_xcp, market_cap_in_btc = calc_market_cap(asset_info, price_in_xcp, price_in_btc)
return {
'price_in_{}'.format(config.XCP.lower()): price_in_xcp, #current price of asset vs XCP (e.g. how many units of asset for 1 unit XCP)
'price_in_{}'.format(config.BTC.lower()): price_in_btc, #current price of asset vs BTC (e.g. how many units of asset for 1 unit BTC)
'price_as_{}'.format(config.XCP.lower()): calc_inverse(price_in_xcp) if price_in_xcp else None, #current price of asset AS XCP
'price_as_{}'.format(config.BTC.lower()): calc_inverse(price_in_btc) if price_in_btc else None, #current price of asset AS BTC
'aggregated_price_in_{}'.format(config.XCP.lower()): aggregated_price_in_xcp,
'aggregated_price_in_{}'.format(config.BTC.lower()): aggregated_price_in_btc,
'aggregated_price_as_{}'.format(config.XCP.lower()): calc_inverse(aggregated_price_in_xcp) if aggregated_price_in_xcp else None,
'aggregated_price_as_{}'.format(config.BTC.lower()): calc_inverse(aggregated_price_in_btc) if aggregated_price_in_btc else None,
'total_supply': asset_info['total_issued_normalized'],
'market_cap_in_{}'.format(config.XCP.lower()): market_cap_in_xcp,
'market_cap_in_{}'.format(config.BTC.lower()): market_cap_in_btc,
}
def compile_24h_market_info(asset):
asset_data = {}
start_dt_1d = datetime.datetime.utcnow() - datetime.timedelta(days=1)
mongo_db = config.mongo_db
#perform aggregation to get 24h statistics
#TOTAL volume and count across all trades for the asset (on ALL markets, not just XCP and BTC pairings)
_24h_vols = {'vol': 0, 'count': 0}
_24h_vols_as_base = mongo_db.trades.aggregate([
{"$match": {
"base_asset": asset,
"block_time": {"$gte": start_dt_1d } }},
{"$project": {
"base_quantity_normalized": 1 #to derive volume
}},
{"$group": {
"_id": 1,
"vol": {"$sum": "$base_quantity_normalized"},
"count": {"$sum": 1},
}}
])
_24h_vols_as_base = {} if not _24h_vols_as_base['ok'] \
or not len(_24h_vols_as_base['result']) else _24h_vols_as_base['result'][0]
_24h_vols_as_quote = mongo_db.trades.aggregate([
{"$match": {
"quote_asset": asset,
"block_time": {"$gte": start_dt_1d } }},
{"$project": {
"quote_quantity_normalized": 1 #to derive volume
}},
{"$group": {
"_id": 1,
"vol": {"$sum": "quote_quantity_normalized"},
"count": {"$sum": 1},
}}
])
_24h_vols_as_quote = {} if not _24h_vols_as_quote['ok'] \
or not len(_24h_vols_as_quote['result']) else _24h_vols_as_quote['result'][0]
_24h_vols['vol'] = _24h_vols_as_base.get('vol', 0) + _24h_vols_as_quote.get('vol', 0)
_24h_vols['count'] = _24h_vols_as_base.get('count', 0) + _24h_vols_as_quote.get('count', 0)
#XCP market volume with stats
if asset != config.XCP:
_24h_ohlc_in_xcp = mongo_db.trades.aggregate([
{"$match": {
"base_asset": config.XCP,
"quote_asset": asset,
"block_time": {"$gte": start_dt_1d } }},
{"$project": {
"unit_price": 1,
"base_quantity_normalized": 1 #to derive volume
}},
{"$group": {
"_id": 1,
"open": {"$first": "$unit_price"},
"high": {"$max": "$unit_price"},
"low": {"$min": "$unit_price"},
"close": {"$last": "$unit_price"},
"vol": {"$sum": "$base_quantity_normalized"},
"count": {"$sum": 1},
}}
])
_24h_ohlc_in_xcp = {} if not _24h_ohlc_in_xcp['ok'] \
or not len(_24h_ohlc_in_xcp['result']) else _24h_ohlc_in_xcp['result'][0]
if _24h_ohlc_in_xcp: del _24h_ohlc_in_xcp['_id']
else:
_24h_ohlc_in_xcp = {}
#BTC market volume with stats
if asset != config.BTC:
_24h_ohlc_in_btc = mongo_db.trades.aggregate([
{"$match": {
"base_asset": config.BTC,
"quote_asset": asset,
"block_time": {"$gte": start_dt_1d } }},
{"$project": {
"unit_price": 1,
"base_quantity_normalized": 1 #to derive volume
}},
{"$group": {
"_id": 1,
"open": {"$first": "$unit_price"},
"high": {"$max": "$unit_price"},
"low": {"$min": "$unit_price"},
"close": {"$last": "$unit_price"},
"vol": {"$sum": "$base_quantity_normalized"},
"count": {"$sum": 1},
}}
])
_24h_ohlc_in_btc = {} if not _24h_ohlc_in_btc['ok'] \
or not len(_24h_ohlc_in_btc['result']) else _24h_ohlc_in_btc['result'][0]
if _24h_ohlc_in_btc: del _24h_ohlc_in_btc['_id']
else:
_24h_ohlc_in_btc = {}
return {
'24h_summary': _24h_vols,
#^ total quantity traded of that asset in all markets in last 24h
'24h_ohlc_in_{}'.format(config.XCP.lower()): _24h_ohlc_in_xcp,
#^ quantity of asset traded with BTC in last 24h
'24h_ohlc_in_{}'.format(config.BTC.lower()): _24h_ohlc_in_btc,
#^ quantity of asset traded with XCP in last 24h
'24h_vol_price_change_in_{}'.format(config.XCP.lower()): calc_price_change(_24h_ohlc_in_xcp['open'], _24h_ohlc_in_xcp['close'])
if _24h_ohlc_in_xcp else None,
#^ aggregated price change from 24h ago to now, expressed as a signed float (e.g. .54 is +54%, -1.12 is -112%)
'24h_vol_price_change_in_{}'.format(config.BTC.lower()): calc_price_change(_24h_ohlc_in_btc['open'], _24h_ohlc_in_btc['close'])
if _24h_ohlc_in_btc else None,
}
def compile_7d_market_info(asset):
mongo_db = config.mongo_db
start_dt_7d = datetime.datetime.utcnow() - datetime.timedelta(days=7)
#get XCP and BTC market summarized trades over a 7d period (quantize to hour long slots)
_7d_history_in_xcp = None # xcp/asset market (or xcp/btc for xcp or btc)
_7d_history_in_btc = None # btc/asset market (or btc/xcp for xcp or btc)
if asset not in [config.BTC, config.XCP]:
for a in [config.XCP, config.BTC]:
_7d_history = mongo_db.trades.aggregate([
{"$match": {
"base_asset": a,
"quote_asset": asset,
"block_time": {"$gte": start_dt_7d }
}},
{"$project": {
"year": {"$year": "$block_time"},
"month": {"$month": "$block_time"},
"day": {"$dayOfMonth": "$block_time"},
"hour": {"$hour": "$block_time"},
"unit_price": 1,
"base_quantity_normalized": 1 #to derive volume
}},
{"$sort": {"block_time": pymongo.ASCENDING}},
{"$group": {
"_id": {"year": "$year", "month": "$month", "day": "$day", "hour": "$hour"},
"price": {"$avg": "$unit_price"},
"vol": {"$sum": "$base_quantity_normalized"},
}},
])
_7d_history = [] if not _7d_history['ok'] else _7d_history['result']
if a == config.XCP: _7d_history_in_xcp = _7d_history
else: _7d_history_in_btc = _7d_history
else: #get the XCP/BTC market and invert for BTC/XCP (_7d_history_in_btc)
_7d_history = mongo_db.trades.aggregate([
{"$match": {
"base_asset": config.XCP,
"quote_asset": config.BTC,
"block_time": {"$gte": start_dt_7d }
}},
{"$project": {
"year": {"$year": "$block_time"},
"month": {"$month": "$block_time"},
"day": {"$dayOfMonth": "$block_time"},
"hour": {"$hour": "$block_time"},
"unit_price": 1,
"base_quantity_normalized": 1 #to derive volume
}},
{"$sort": {"block_time": pymongo.ASCENDING}},
{"$group": {
"_id": {"year": "$year", "month": "$month", "day": "$day", "hour": "$hour"},
"price": {"$avg": "$unit_price"},
"vol": {"$sum": "$base_quantity_normalized"},
}},
])
_7d_history = [] if not _7d_history['ok'] else _7d_history['result']
_7d_history_in_xcp = _7d_history
_7d_history_in_btc = copy.deepcopy(_7d_history_in_xcp)
for i in xrange(len(_7d_history_in_btc)):
_7d_history_in_btc[i]['price'] = calc_inverse(_7d_history_in_btc[i]['price'])
_7d_history_in_btc[i]['vol'] = calc_inverse(_7d_history_in_btc[i]['vol'])
for l in [_7d_history_in_xcp, _7d_history_in_btc]:
for e in l: #convert our _id field out to be an epoch ts (in ms), and delete _id
e['when'] = time.mktime(datetime.datetime(e['_id']['year'], e['_id']['month'], e['_id']['day'], e['_id']['hour']).timetuple()) * 1000
del e['_id']
return {
'7d_history_in_{}'.format(config.XCP.lower()): [[e['when'], e['price']] for e in _7d_history_in_xcp],
'7d_history_in_{}'.format(config.BTC.lower()): [[e['when'], e['price']] for e in _7d_history_in_btc],
}
def compile_asset_pair_market_info():
"""Compiles the pair-level statistics that show on the View Prices page of counterwallet, for instance"""
#loop through all open orders, and compile a listing of pairs, with a count of open orders for each pair
mongo_db = config.mongo_db
end_dt = datetime.datetime.utcnow()
start_dt = end_dt - datetime.timedelta(days=1)
start_block_index, end_block_index = util.get_block_indexes_for_dates(start_dt=start_dt, end_dt=end_dt)
open_orders = util.call_jsonrpc_api("get_orders",
{ 'filters': [
{'field': 'give_remaining', 'op': '>', 'value': 0},
{'field': 'get_remaining', 'op': '>', 'value': 0},
{'field': 'fee_required_remaining', 'op': '>=', 'value': 0},
{'field': 'fee_provided_remaining', 'op': '>=', 'value': 0},
],
'status': 'open',
'show_expired': False,
}, abort_on_error=True)['result']
pair_data = {}
asset_info = {}
def get_price(base_quantity_normalized, quote_quantity_normalized):
return float(D(quote_quantity_normalized / base_quantity_normalized ))
#COMPOSE order depth, lowest ask, and highest bid column data
for o in open_orders:
(base_asset, quote_asset) = util.assets_to_asset_pair(o['give_asset'], o['get_asset'])
pair = '%s/%s' % (base_asset, quote_asset)
base_asset_info = asset_info.get(base_asset, mongo_db.tracked_assets.find_one({ 'asset': base_asset }))
if base_asset not in asset_info: asset_info[base_asset] = base_asset_info
quote_asset_info = asset_info.get(quote_asset, mongo_db.tracked_assets.find_one({ 'asset': quote_asset }))
if quote_asset not in asset_info: asset_info[quote_asset] = quote_asset_info
pair_data.setdefault(pair, {'open_orders_count': 0, 'lowest_ask': None, 'highest_bid': None,
'completed_trades_count': 0, 'vol_base': 0, 'vol_quote': 0})
#^ highest ask = open order selling base, highest bid = open order buying base
#^ we also initialize completed_trades_count, vol_base, vol_quote because every pair inited here may
# not have cooresponding data out of the trades_data_by_pair aggregation below
pair_data[pair]['open_orders_count'] += 1
base_quantity_normalized = util_bitcoin.normalize_quantity(o['give_quantity'] if base_asset == o['give_asset'] else o['get_quantity'], base_asset_info['divisible'])
quote_quantity_normalized = util_bitcoin.normalize_quantity(o['give_quantity'] if quote_asset == o['give_asset'] else o['get_quantity'], quote_asset_info['divisible'])
order_price = get_price(base_quantity_normalized, quote_quantity_normalized)
if base_asset == o['give_asset']: #selling base
if pair_data[pair]['lowest_ask'] is None or order_price < pair_data[pair]['lowest_ask']:
pair_data[pair]['lowest_ask'] = order_price
elif base_asset == o['get_asset']: #buying base
if pair_data[pair]['highest_bid'] is None or order_price > pair_data[pair]['highest_bid']:
pair_data[pair]['highest_bid'] = order_price
#COMPOSE volume data (in XCP and BTC), and % change data
#loop through all trade volume over the past 24h, and match that to the open orders
trades_data_by_pair = mongo_db.trades.aggregate([
{"$match": {
"block_time": {"$gte": start_dt, "$lte": end_dt } }
},
{"$project": {
"base_asset": 1,
"quote_asset": 1,
"base_quantity_normalized": 1, #to derive base volume
"quote_quantity_normalized": 1 #to derive quote volume
}},
{"$group": {
"_id": {"base_asset": "$base_asset", "quote_asset": "$quote_asset"},
"vol_base": {"$sum": "$base_quantity_normalized"},
"vol_quote": {"$sum": "$quote_quantity_normalized"},
"count": {"$sum": 1},
}}
])
trades_data_by_pair = [] if not trades_data_by_pair['ok'] else trades_data_by_pair['result']
for e in trades_data_by_pair:
pair = '%s/%s' % (e['_id']['base_asset'], e['_id']['quote_asset'])
pair_data.setdefault(pair, {'open_orders_count': 0, 'lowest_ask': None, 'highest_bid': None})
#^ initialize an empty pair in the event there are no open orders for that pair, but there ARE completed trades for it
pair_data[pair]['completed_trades_count'] = e['count']
pair_data[pair]['vol_base'] = e['vol_base']
pair_data[pair]['vol_quote'] = e['vol_quote']
#compose price data, relative to BTC and XCP
mps_xcp_btc, xcp_btc_price, btc_xcp_price = get_price_primatives()
for pair, e in pair_data.iteritems():
base_asset, quote_asset = pair.split('/')
_24h_vol_in_btc = None
_24h_vol_in_xcp = None
#derive asset price data, expressed in BTC and XCP, for the given volumes
if base_asset == config.XCP:
_24h_vol_in_xcp = e['vol_base']
_24h_vol_in_btc = util_bitcoin.round_out(e['vol_base'] * xcp_btc_price) if xcp_btc_price else 0
elif base_asset == config.BTC:
_24h_vol_in_xcp = util_bitcoin.round_out(e['vol_base'] * btc_xcp_price) if btc_xcp_price else 0
_24h_vol_in_btc = e['vol_base']
else: #base is not XCP or BTC
price_summary_in_xcp, price_summary_in_btc, price_in_xcp, price_in_btc, aggregated_price_in_xcp, aggregated_price_in_btc = \
get_xcp_btc_price_info(base_asset, mps_xcp_btc, xcp_btc_price, btc_xcp_price, with_last_trades=0, start_dt=start_dt, end_dt=end_dt)
if price_in_xcp:
_24h_vol_in_xcp = util_bitcoin.round_out(e['vol_base'] * price_in_xcp)
if price_in_btc:
_24h_vol_in_btc = util_bitcoin.round_out(e['vol_base'] * price_in_btc)
if _24h_vol_in_xcp is None or _24h_vol_in_btc is None:
#the base asset didn't have price data against BTC or XCP, or both...try against the quote asset instead
price_summary_in_xcp, price_summary_in_btc, price_in_xcp, price_in_btc, aggregated_price_in_xcp, aggregated_price_in_btc = \
get_xcp_btc_price_info(quote_asset, mps_xcp_btc, xcp_btc_price, btc_xcp_price, with_last_trades=0, start_dt=start_dt, end_dt=end_dt)
if _24h_vol_in_xcp is None and price_in_xcp:
_24h_vol_in_xcp = util_bitcoin.round_out(e['vol_quote'] * price_in_xcp)
if _24h_vol_in_btc is None and price_in_btc:
_24h_vol_in_btc = util_bitcoin.round_out(e['vol_quote'] * price_in_btc)
pair_data[pair]['24h_vol_in_{}'.format(config.XCP.lower())] = _24h_vol_in_xcp #might still be None
pair_data[pair]['24h_vol_in_{}'.format(config.BTC.lower())] = _24h_vol_in_btc #might still be None
#get % change stats -- start by getting the first trade directly before the 24h period starts
prev_trade = mongo_db.trades.find({
"base_asset": base_asset,
"quote_asset": quote_asset,
"block_time": {'$lt': start_dt}}).sort('block_time', pymongo.DESCENDING).limit(1)
latest_trade = mongo_db.trades.find({
"base_asset": base_asset,
"quote_asset": quote_asset}).sort('block_time', pymongo.DESCENDING).limit(1)
if not prev_trade.count(): #no previous trade before this 24hr period
pair_data[pair]['24h_pct_change'] = None
else:
prev_trade = prev_trade[0]
latest_trade = latest_trade[0]
prev_trade_price = get_price(prev_trade['base_quantity_normalized'], prev_trade['quote_quantity_normalized'])
latest_trade_price = get_price(latest_trade['base_quantity_normalized'], latest_trade['quote_quantity_normalized'])
pair_data[pair]['24h_pct_change'] = ((latest_trade_price - prev_trade_price) / prev_trade_price) * 100
pair_data[pair]['last_updated'] = end_dt
#print "PRODUCED", pair, pair_data[pair]
mongo_db.asset_pair_market_info.update( {'base_asset': base_asset, 'quote_asset': quote_asset}, {"$set": pair_data[pair]}, upsert=True)
#remove any old pairs that were not just updated
mongo_db.asset_pair_market_info.remove({'last_updated': {'$lt': end_dt}})
logging.info("Recomposed 24h trade statistics for %i asset pairs: %s" % (len(pair_data), ', '.join(pair_data.keys())))
def compile_asset_market_info():
"""Run through all assets and compose and store market ranking information."""
mongo_db = config.mongo_db
if not config.CAUGHT_UP:
logging.warn("Not updating asset market info as CAUGHT_UP is false.")
return False
#grab the last block # we processed assets data off of
last_block_assets_compiled = mongo_db.app_config.find_one()['last_block_assets_compiled']
last_block_time_assets_compiled = util.get_block_time(last_block_assets_compiled)
#logging.debug("Comping info for assets traded since block %i" % last_block_assets_compiled)
current_block_index = config.CURRENT_BLOCK_INDEX #store now as it may change as we are compiling asset data :)
current_block_time = util.get_block_time(current_block_index)
if current_block_index == last_block_assets_compiled:
#all caught up -- call again in 10 minutes
return True
mps_xcp_btc, xcp_btc_price, btc_xcp_price = get_price_primatives()
all_traded_assets = list(set(list([config.BTC, config.XCP]) + list(mongo_db.trades.find({}, {'quote_asset': 1, '_id': 0}).distinct('quote_asset'))))
#######################
#get a list of all assets with a trade within the last 24h (not necessarily just against XCP and BTC)
# ^ this is important because compiled market info has a 24h vol parameter that designates total volume for the asset across ALL pairings
start_dt_1d = datetime.datetime.utcnow() - datetime.timedelta(days=1)
assets = list(set(
list(mongo_db.trades.find({'block_time': {'$gte': start_dt_1d}}).distinct('quote_asset'))
+ list(mongo_db.trades.find({'block_time': {'$gte': start_dt_1d}}).distinct('base_asset'))
))
for asset in assets:
market_info_24h = compile_24h_market_info(asset)
mongo_db.asset_market_info.update({'asset': asset}, {"$set": market_info_24h})
#for all others (i.e. no trade in the last 24 hours), zero out the 24h trade data
non_traded_assets = list(set(all_traded_assets) - set(assets))
mongo_db.asset_market_info.update( {'asset': {'$in': non_traded_assets}}, {"$set": {
'24h_summary': {'vol': 0, 'count': 0},
'24h_ohlc_in_{}'.format(config.XCP.lower()): {},
'24h_ohlc_in_{}'.format(config.BTC.lower()): {},
'24h_vol_price_change_in_{}'.format(config.XCP.lower()): None,
'24h_vol_price_change_in_{}'.format(config.BTC.lower()): None,
}}, multi=True)
logging.info("Block: %s -- Calculated 24h stats for: %s" % (current_block_index, ', '.join(assets)))
#######################
#get a list of all assets with a trade within the last 7d up against XCP and BTC
start_dt_7d = datetime.datetime.utcnow() - datetime.timedelta(days=7)
assets = list(set(
list(mongo_db.trades.find({'block_time': {'$gte': start_dt_7d}, 'base_asset': {'$in': [config.XCP, config.BTC]}}).distinct('quote_asset'))
+ list(mongo_db.trades.find({'block_time': {'$gte': start_dt_7d}}).distinct('base_asset'))
))
for asset in assets:
market_info_7d = compile_7d_market_info(asset)
mongo_db.asset_market_info.update({'asset': asset}, {"$set": market_info_7d})
non_traded_assets = list(set(all_traded_assets) - set(assets))
mongo_db.asset_market_info.update( {'asset': {'$in': non_traded_assets}}, {"$set": {
'7d_history_in_{}'.format(config.XCP.lower()): [],
'7d_history_in_{}'.format(config.BTC.lower()): [],
}}, multi=True)
logging.info("Block: %s -- Calculated 7d stats for: %s" % (current_block_index, ', '.join(assets)))
#######################
#update summary market data for assets traded since last_block_assets_compiled
#get assets that were traded since the last check with either BTC or XCP, and update their market summary data
assets = list(set(
list(mongo_db.trades.find({'block_index': {'$gt': last_block_assets_compiled}, 'base_asset': {'$in': [config.XCP, config.BTC]}}).distinct('quote_asset'))
+ list(mongo_db.trades.find({'block_index': {'$gt': last_block_assets_compiled}}).distinct('base_asset'))
))
#update our storage of the latest market info in mongo
for asset in assets:
logging.info("Block: %s -- Updating asset market info for %s ..." % (current_block_index, asset))
summary_info = compile_summary_market_info(asset, mps_xcp_btc, xcp_btc_price, btc_xcp_price)
mongo_db.asset_market_info.update( {'asset': asset}, {"$set": summary_info}, upsert=True)
#######################
#next, compile market cap historicals (and get the market price data that we can use to update assets with new trades)
#NOTE: this algoritm still needs to be fleshed out some...I'm not convinced it's laid out/optimized like it should be
#start by getting all trades from when we last compiled this data
trades = mongo_db.trades.find({'block_index': {'$gt': last_block_assets_compiled}}).sort('block_index', pymongo.ASCENDING)
trades_by_block = [] #tracks assets compiled per block, as we only want to analyze any given asset once per block
trades_by_block_mapping = {}
#organize trades by block
for t in trades:
if t['block_index'] in trades_by_block_mapping:
assert trades_by_block_mapping[t['block_index']]['block_index'] == t['block_index']
assert trades_by_block_mapping[t['block_index']]['block_time'] == t['block_time']
trades_by_block_mapping[t['block_index']]['trades'].append(t)
else:
e = {'block_index': t['block_index'], 'block_time': t['block_time'], 'trades': [t,]}
trades_by_block.append(e)
trades_by_block_mapping[t['block_index']] = e
for t_block in trades_by_block:
#reverse the tradelist per block, and ensure that we only process an asset that hasn't already been processed for this block
# (as there could be multiple trades in a single block for any specific asset). we reverse the list because
# we'd rather process a later trade for a given asset, as the market price for that will take into account
# the earlier trades on that same block for that asset, and we don't want/need multiple cap points per block
assets_in_block = {}
mps_xcp_btc, xcp_btc_price, btc_xcp_price = get_price_primatives(end_dt=t_block['block_time'])
for t in reversed(t_block['trades']):
assets = []
if t['base_asset'] not in assets_in_block:
assets.append(t['base_asset'])
assets_in_block[t['base_asset']] = True
if t['quote_asset'] not in assets_in_block:
assets.append(t['quote_asset'])
assets_in_block[t['quote_asset']] = True
if not len(assets): continue
for asset in assets:
#recalculate the market cap for the asset this trade is for
asset_info = get_asset_info(asset, at_dt=t['block_time'])
(price_summary_in_xcp, price_summary_in_btc, price_in_xcp, price_in_btc, aggregated_price_in_xcp, aggregated_price_in_btc
) = get_xcp_btc_price_info(asset, mps_xcp_btc, xcp_btc_price, btc_xcp_price, with_last_trades=0, end_dt=t['block_time'])
market_cap_in_xcp, market_cap_in_btc = calc_market_cap(asset_info, price_in_xcp, price_in_btc)
#^ this will get price data from the block time of this trade back the standard number of days and trades
# to determine our standard market price, relative (anchored) to the time of this trade
for market_cap_as in (config.XCP, config.BTC):
market_cap = market_cap_in_xcp if market_cap_as == config.XCP else market_cap_in_btc
#if there is a previously stored market cap for this asset, add a new history point only if the two caps differ
prev_market_cap_history = mongo_db.asset_marketcap_history.find({'market_cap_as': market_cap_as, 'asset': asset,
'block_index': {'$lt': t['block_index']}}).sort('block_index', pymongo.DESCENDING).limit(1)
prev_market_cap_history = list(prev_market_cap_history)[0] if prev_market_cap_history.count() == 1 else None
if market_cap and (not prev_market_cap_history or prev_market_cap_history['market_cap'] != market_cap):
mongo_db.asset_marketcap_history.insert({
'block_index': t['block_index'],
'block_time': t['block_time'],
'asset': asset,
'market_cap': market_cap,
'market_cap_as': market_cap_as,
})
logging.info("Block %i -- Calculated market cap history point for %s as %s (mID: %s)" % (t['block_index'], asset, market_cap_as, t['message_index']))
mongo_db.app_config.update({}, {'$set': {'last_block_assets_compiled': current_block_index}})
return True
|
|
import sys
import numpy as np
import os
import itertools
import pickle
import multiprocessing as mp
import timeit
import getopt
#This is created while running Zeisel_wrapper.py
file_list='./SRR_in_3005.txt'
cell_files=sorted(np.loadtxt(file_list,dtype=str))
try:
opts, args = getopt.getopt(sys.argv[1:],"k:s:r:h:",["hacked-kallisto-path","seed=","reference-transcriptome","mouse-genome-for-hisat"])
except getopt.GetoptError:
print ("getopterrror")
print ('usage is : \n python time_test.py -k hacked-kallisto-path -r mouse-reference-transcriptome -h path-to-file-containing-paths-to-mouse-genome [-s seed]')
sys.exit(1)
kallipso_path=''
ref_transcriptome=''
ref_genome=''
npseed=100
for opt,arg in opts:
if opt in ("-s", "--seed"):
npseed=int(arg)
elif opt in ("-k","--hacked-kallisto-path"):
kallipso_path=arg
elif opt in ("-r","--reference-transcriptome"):
ref_transcriptome=arg
elif opt in ("-h","--mouse-genome-for-hisat"):
ref_genome=arg
if (not kallipso_path) or (not ref_transcriptome) or (not ref_genome):
print ('usage is : \n python time_test.py -k hacked-kallisto-path -r mouse-reference-transcriptome -h path-to-file-containing-paths-to-mouse-genome [-s seed]')
sys.exit(1)
test_SRA_dir='./SRA/'
test_read_dir='./reads/'
test_kallisto_dir='./kallisto/'
test_kallipso_dir='./TCC/'
test_bowtie1_dir='./bowtie1/'
test_hisat_dir='./hisat/'
test_wc_dir='./wc/'
kallisto_index='./kallisto_index/'
bowtie_index='./bowtie_index/'
hisat_index='./hisat_index/'
os.system('rm -rf '+test_SRA_dir)
os.system('rm -rf '+test_read_dir)
os.system('rm -rf '+test_kallisto_dir)
os.system('rm -rf '+test_kallipso_dir)
os.system('rm -rf '+test_bowtie1_dir)
os.system('rm -rf '+test_hisat_dir)
os.system('rm -rf '+test_wc_dir)
os.system('rm -rf '+kallisto_index)
os.system('rm -rf '+bowtie_index)
os.system('rm -rf '+hisat_index)
os.system('mkdir -p '+test_SRA_dir)
os.system('mkdir -p '+test_read_dir)
os.system('mkdir -p '+test_kallisto_dir)
os.system('mkdir -p '+test_kallipso_dir)
os.system('mkdir -p '+test_bowtie1_dir)
os.system('mkdir -p '+test_hisat_dir)
os.system('mkdir -p '+test_wc_dir)
os.system('mkdir -p '+kallisto_index)
os.system('mkdir -p '+bowtie_index)
os.system('mkdir -p '+hisat_index)
np.random.seed(seed=100)
files_picked=np.random.choice(cell_files,10,replace=False)
print('Copying over SRA files...')
base_cmd1="wget -O ./SRA/"
base_cmd2="ftp://ftp-trace.ncbi.nlm.nih.gov/sra/sra-instant/reads/ByStudy/sra/SRP/SRP045/SRP045452/"
for flnames in files_picked:
cmd=base_cmd1+flnames+".sra "+base_cmd2+flnames+"/"+flnames+'.sra'
os.system(cmd)
print('Converting SRA to fastq.gz files')
for flname in files_picked:
cmd='fastq-dump --gzip '+ './SRA/'+flname+'.sra' ' -O '+ test_read_dir
os.system(cmd)
print('Building kallisto index...')
kallisto_index_path=kallisto_index+'Zeisel_index.idx'
os.system(kallipso_path+' index -i '+kallisto_index_path+' '+ref_transcriptome)
print('Building hisat index...')
hisat_ip_paths=''
with open('hisat_chr_path_list.txt','r') as f:
hisat_ip_paths=f.readline()
hisat_index_path=hisat_index+'Zeisel_index'
os.system('hisat-build --offrate 5 '+hisat_ip_paths+' '+hisat_index_path)
print('Getting bowtie indices...')
bowtie_index_dir='./bowtie_index/'
os.system('mkdir -p '+bowtie_index_dir)
bowtie_index_path=bowtie_index_dir+'Zeisel_index.all'
os.system('bowtie-build --offrate=5 '+ref_transcriptome+' '+bowtie_index_path)
print('Timing kallisto...')
def run_kallisto():
test_kallisto_dir='./kallisto/'
test_read_dir='./reads/'
transcriptome_path='./kallisto_index/Zeisel_index.idx'
flnames=sorted(os.listdir(test_read_dir))
for fls in flnames:
cellname=fls.split('.')[0]
out_dir=test_kallisto_dir+cellname
read_fl=test_read_dir+fls
cmd="""kallisto quant -i"""+transcriptome_path+ """ -o """ +out_dir +""" --single -l 200 -s 100 """+read_fl
#print cmd
os.system(cmd)
x=timeit.timeit(run_kallisto,number=1)
op_file=test_kallisto_dir+'time.time'
with open(op_file,'w') as f:
f.write(str(x))
print('Timing hacked kallisto...')
def make_kallipso_path_global():
global kallipso_path
make_kallipso_path_global()
def run_kallipso():
test_kallipso_dir='./TCC/'
ref_path='./kallisto_index/Zeisel_index.idx'
test_read_dir='./reads/'
flnames=sorted(os.listdir(test_read_dir))
for flname in flnames:
read_path=test_read_dir+flname
cellname=flname.split('.')[0]
command = kallipso_path+' pseudoalign -i '+ ref_path+ ' -o ' + test_kallipso_dir+cellname+'.counts' + ' ' + read_path
#print command
os.system(command)
x=timeit.timeit(run_kallipso,number=1)
op_file=test_kallipso_dir+'time.time'
with open(op_file,'w') as f:
f.write(str(x))
print('Timing word count...')
def run_word_count():
test_read_dir='./reads/'
flnames=sorted(os.listdir(test_read_dir))
for flname in flnames:
read_path=test_read_dir+flname
os.system('wc '+read_path)
x=timeit.timeit(run_word_count,number=1)
op_file=test_wc_dir+'time.time'
with open(op_file,'w') as f:
f.write(str(x))
print('Timing hisat...')
def run_hisat():
test_hisat_dir='./hisat/'
test_read_dir='./reads/'
transcriptome_path='./hisat_index/Zeisel_index'
flnames=sorted(os.listdir(test_read_dir))
for fls in flnames:
cellname=fls.split('.')[0]
out_sam=test_hisat_dir+cellname+'.sam'
out_bam=test_hisat_dir+cellname+'.bam'
read_fl=test_read_dir+fls
cmd="""hisat -p 1 -x"""+transcriptome_path+ """ -U """ +test_read_dir+fls+" -S "+out_sam
#print cmd
cmd1="samtools view -bS "+out_sam+" > "+out_bam
#print cmd1
os.system(cmd)
os.system(cmd1)
x=timeit.timeit(run_hisat,number=1)
op_file=test_hisat_dir+'time.time'
with open(op_file,'w') as f:
f.write(str(x))
print('Timing bowtie...')
def run_bowtie1():
index = './bowtie_index/Zeisel_index.all'
test_bowtie1_dir='./bowtie1/'
test_read_dir='./reads/'
flnames=sorted(os.listdir(test_read_dir))
for flname in flnames:
out = test_bowtie1_dir + flname.split('.')[0]
os.system('mkdir -p ' + out)
BTcmd = 'gzip -dc '+test_read_dir+flname+' | bowtie -p 1 -aS --offrate 1 '+index+' - | samtools view -Sb - > '+out+'/hits.bam'
#print BTcmd
os.system(BTcmd)
x=timeit.timeit(run_bowtie1,number=1)
op_file=test_bowtie1_dir+'time.time'
with open(op_file,'w') as f:
f.write(str(x))
|
|
from pymeta.builder import TreeBuilder, writePython as writePython_orig
from textwrap import dedent
import unittest
def dd(txt):
return dedent(txt).strip()
def writePython(tree):
return writePython_orig(tree).strip()
class PythonWriterTests(unittest.TestCase):
"""
Tests for generating Python source from an AST.
"""
def setUp(self):
"""
Create a L{PythonBuilder}.
"""
self.builder = TreeBuilder("BuilderTest")
def test_exactly(self):
"""
Test generation of code for the 'exactly' pattern.
"""
x = self.builder.exactly("x")
self.assertEqual(writePython(x),
dd("""
_G_exactly_1, lastError = self.exactly('x')
self.considerError(lastError)
_G_exactly_1
"""))
def test_apply(self):
"""
Test generation of code for rule application.
"""
one = self.builder.expr("1")
x = self.builder.expr("x")
a = self.builder.apply("foo", "main", one, x)
self.assertEqual(writePython(a),
dd("""
_G_python_1, lastError = eval('1', self.globals, _locals), None
self.considerError(lastError)
_G_python_2, lastError = eval('x', self.globals, _locals), None
self.considerError(lastError)
_G_apply_3, lastError = self._apply("""
"""self.rule_foo, "foo", [_G_python_1, _G_python_2])
self.considerError(lastError)
_G_apply_3
"""))
def test_superApply(self):
"""
Test generation of code for calling the superclass' implementation of
the current rule.
"""
one = self.builder.expr("1")
x = self.builder.expr("x")
a = self.builder.apply("super", "main", one, x)
self.assertEqual(writePython(a),
dd("""
_G_python_1, lastError = eval('1', self.globals, _locals), None
self.considerError(lastError)
_G_python_2, lastError = eval('x', self.globals, _locals), None
self.considerError(lastError)
_G_apply_3, lastError = self.superApply("main", _G_python_1, _G_python_2)
self.considerError(lastError)
_G_apply_3
"""))
def test_many(self):
"""
Test generation of code for matching zero or more instances of
a pattern.
"""
xs = self.builder.many(self.builder.exactly("x"))
self.assertEqual(writePython(xs),
dd("""
def _G_many_1():
_G_exactly_1, lastError = self.exactly('x')
self.considerError(lastError)
return (_G_exactly_1, self.currentError)
_G_many_2, lastError = self.many(_G_many_1)
self.considerError(lastError)
_G_many_2
"""))
def test_many1(self):
"""
Test generation of code for matching one or more instances of
a pattern.
"""
xs = self.builder.many1(self.builder.exactly("x"))
self.assertEqual(writePython(xs),
dd("""
def _G_many1_1():
_G_exactly_1, lastError = self.exactly('x')
self.considerError(lastError)
return (_G_exactly_1, self.currentError)
_G_many1_2, lastError = self.many(_G_many1_1, _G_many1_1())
self.considerError(lastError)
_G_many1_2
"""))
def test_or(self):
"""
Test code generation for a sequence of alternatives.
"""
xy = self.builder._or([self.builder.exactly("x"),
self.builder.exactly("y")])
self.assertEqual(writePython(xy),
dd("""
def _G_or_1():
_G_exactly_1, lastError = self.exactly('x')
self.considerError(lastError)
return (_G_exactly_1, self.currentError)
def _G_or_2():
_G_exactly_1, lastError = self.exactly('y')
self.considerError(lastError)
return (_G_exactly_1, self.currentError)
_G_or_3, lastError = self._or([_G_or_1, _G_or_2])
self.considerError(lastError)
_G_or_3
"""))
def test_singleOr(self):
"""
Test code generation for a sequence of alternatives.
"""
x1 = self.builder._or([self.builder.exactly("x")])
x = self.builder.exactly("x")
self.assertEqual(writePython(x), writePython(x1))
def test_optional(self):
"""
Test code generation for optional terms.
"""
x = self.builder.optional(self.builder.exactly("x"))
self.assertEqual(writePython(x),
dd("""
def _G_optional_1():
_G_exactly_1, lastError = self.exactly('x')
self.considerError(lastError)
return (_G_exactly_1, self.currentError)
def _G_optional_2():
return (None, self.input.nullError())
_G_or_3, lastError = self._or([_G_optional_1, _G_optional_2])
self.considerError(lastError)
_G_or_3
"""))
def test_not(self):
"""
Test code generation for negated terms.
"""
x = self.builder._not(self.builder.exactly("x"))
self.assertEqual(writePython(x),
dd("""
def _G_not_1():
_G_exactly_1, lastError = self.exactly('x')
self.considerError(lastError)
return (_G_exactly_1, self.currentError)
_G_not_2, lastError = self._not(_G_not_1)
self.considerError(lastError)
_G_not_2
"""))
def test_lookahead(self):
"""
Test code generation for lookahead expressions.
"""
x = self.builder.lookahead(self.builder.exactly("x"))
self.assertEqual(writePython(x),
dd("""
def _G_lookahead_1():
_G_exactly_1, lastError = self.exactly('x')
self.considerError(lastError)
return (_G_exactly_1, self.currentError)
_G_lookahead_2, lastError = self.lookahead(_G_lookahead_1)
self.considerError(lastError)
_G_lookahead_2
"""))
def test_sequence(self):
"""
Test generation of code for sequence patterns.
"""
x = self.builder.exactly("x")
y = self.builder.exactly("y")
z = self.builder.sequence([x, y])
self.assertEqual(writePython(z),
dd("""
_G_exactly_1, lastError = self.exactly('x')
self.considerError(lastError)
_G_exactly_2, lastError = self.exactly('y')
self.considerError(lastError)
_G_exactly_2
"""))
def test_bind(self):
"""
Test code generation for variable assignment.
"""
x = self.builder.exactly("x")
b = self.builder.bind(x, "var")
self.assertEqual(writePython(b),
dd("""
_G_exactly_1, lastError = self.exactly('x')
self.considerError(lastError)
_locals['var'] = _G_exactly_1
_locals['var']
"""))
def test_pred(self):
"""
Test code generation for predicate expressions.
"""
x = self.builder.pred(self.builder.exactly("x"))
self.assertEqual(writePython(x),
dd("""
def _G_pred_1():
_G_exactly_1, lastError = self.exactly('x')
self.considerError(lastError)
return (_G_exactly_1, self.currentError)
_G_pred_2, lastError = self.pred(_G_pred_1)
self.considerError(lastError)
_G_pred_2
"""))
def test_action(self):
"""
Test code generation for semantic actions.
"""
x = self.builder.action("doStuff()")
self.assertEqual(writePython(x),
dd("""
_G_python_1, lastError = eval('doStuff()', self.globals, _locals), None
self.considerError(lastError)
_G_python_1
"""))
def test_expr(self):
"""
Test code generation for semantic predicates.
"""
x = self.builder.expr("returnStuff()")
code = dd(
"""
_G_python_1, lastError = eval('returnStuff()', self.globals, _locals), None
self.considerError(lastError)
_G_python_1
""")
self.assertEqual(writePython(x), code)
def test_listpattern(self):
"""
Test code generation for list patterns.
"""
x = self.builder.listpattern(self.builder.exactly("x"))
self.assertEqual(writePython(x),
dd("""
def _G_listpattern_1():
_G_exactly_1, lastError = self.exactly('x')
self.considerError(lastError)
return (_G_exactly_1, self.currentError)
_G_listpattern_2, lastError = self.listpattern(_G_listpattern_1)
self.considerError(lastError)
_G_listpattern_2
"""))
def test_consumedby(self):
"""
Test code generation for consumed by
"""
x = self.builder.consumedby(self.builder.exactly("x"))
self.assertEqual(writePython(x),
dd("""
def _G_consumed_by_1():
_G_exactly_1, lastError = self.exactly('x')
self.considerError(lastError)
return (_G_exactly_1, self.currentError)
_G_consumed_by_2, lastError = self.consumed_by(_G_consumed_by_1)
self.considerError(lastError)
_G_consumed_by_2
"""))
def test_range(self):
"""
Test code generation for .. operator
"""
x = self.builder.consumedby(self.builder.exactly("x"))
self.assertEqual(writePython(x),
dd("""
def _G_consumed_by_1():
_G_exactly_1, lastError = self.exactly('x')
self.considerError(lastError)
return (_G_exactly_1, self.currentError)
_G_consumed_by_2, lastError = self.consumed_by(_G_consumed_by_1)
self.considerError(lastError)
_G_consumed_by_2
"""))
def test_interleave(self):
"""
Test code generation for && operator
"""
x = self.builder.interleave([['1', self.builder.exactly("x"), None], ['1', self.builder.exactly("y"), None]])
self.assertEqual(writePython(x),
dd("""
def _G_interleave_1():
_G_exactly_1, lastError = self.exactly('x')
self.considerError(lastError)
return (_G_exactly_1, self.currentError)
def _G_interleave_2():
_G_exactly_1, lastError = self.exactly('y')
self.considerError(lastError)
return (_G_exactly_1, self.currentError)
_G_interleave_3, lastError = self._interleave(_locals, '1', _G_interleave_1, None, '1', _G_interleave_2, None)
self.considerError(lastError)
_G_interleave_3
"""))
def test_rule(self):
"""
Test generation of entire rules.
"""
x = self.builder.rule("foo", self.builder.exactly("x"))
self.assertEqual(writePython(x),
dd("""
def rule_foo(self):
_locals = {'self': self}
self.locals['foo'] = _locals
_G_exactly_1, lastError = self.exactly('x')
self.considerError(lastError)
return (_G_exactly_1, self.currentError)
"""))
def test_grammar(self):
"""
Test generation of an entire grammar.
"""
r1 = self.builder.rule("foo", self.builder.exactly("x"))
r2 = self.builder.rule("baz", self.builder.exactly("y"))
x = self.builder.makeGrammar([r1, r2])
self.assertEqual(writePython(x),
dd("""
class BuilderTest(GrammarBase):
globals = globals()
def rule_foo(self):
_locals = {'self': self}
self.locals['foo'] = _locals
_G_exactly_1, lastError = self.exactly('x')
self.considerError(lastError)
return (_G_exactly_1, self.currentError)
def rule_baz(self):
_locals = {'self': self}
self.locals['baz'] = _locals
_G_exactly_1, lastError = self.exactly('y')
self.considerError(lastError)
return (_G_exactly_1, self.currentError)
"""))
|
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import collections
import itertools
import weakref
import six
@six.add_metaclass(abc.ABCMeta)
class Function(object):
"""
Abstract base class for template functions.
"""
def __init__(self, stack, fn_name, args):
"""
Initialise with a Stack, the function name and the arguments.
All functions take the form of a single-item map in JSON::
{ <fn_name> : <args> }
"""
super(Function, self).__init__()
self._stackref = weakref.ref(stack) if stack is not None else None
self.fn_name = fn_name
self.args = args
@property
def stack(self):
ref = self._stackref
if ref is None:
return None
stack = ref()
assert stack is not None, "Need a reference to the Stack object"
return stack
def validate(self):
"""
Validate arguments without resolving the function.
Function subclasses must override this method to validate their
args.
"""
validate(self.args)
@abc.abstractmethod
def result(self):
"""
Return the result of resolving the function.
Function subclasses must override this method to calculate their
results.
"""
return {self.fn_name: self.args}
def dependencies(self, path):
return dependencies(self.args, '.'.join([path, self.fn_name]))
def dep_attrs(self, resource_name):
return dep_attrs(self.args, resource_name)
def __reduce__(self):
"""
Return a representation of the function suitable for pickling.
This allows the copy module (which works by pickling and then
unpickling objects) to copy a template. Functions in the copy will
return to their original (JSON) form (i.e. a single-element map).
"""
return dict, ([(self.fn_name, self.args)],)
def __repr__(self):
"""
Return a string representation of the function.
The representation includes the function name, arguments and result
(if available), as well as the name of the function class.
"""
try:
result = repr(self.result())
except (TypeError, ValueError):
result = '???'
fntype = type(self)
classname = '.'.join(filter(None,
(getattr(fntype,
attr,
'') for attr in ('__module__',
'__name__'))))
return '<%s {%s: %r} -> %s>' % (classname,
self.fn_name, self.args,
result)
def __eq__(self, other):
"""Compare the result of this function for equality."""
try:
result = self.result()
if isinstance(other, Function):
return result == other.result()
else:
return result == other
except (TypeError, ValueError):
return NotImplemented
def __ne__(self, other):
"""Compare the result of this function for inequality."""
eq = self.__eq__(other)
if eq is NotImplemented:
return NotImplemented
return not eq
def __hash__(self):
return id(self)
def resolve(snippet):
while isinstance(snippet, Function):
snippet = snippet.result()
if isinstance(snippet, collections.Mapping):
return dict((k, resolve(v)) for k, v in snippet.items())
elif (not isinstance(snippet, six.string_types) and
isinstance(snippet, collections.Iterable)):
return [resolve(v) for v in snippet]
return snippet
def validate(snippet):
if isinstance(snippet, Function):
snippet.validate()
elif isinstance(snippet, collections.Mapping):
for v in six.itervalues(snippet):
validate(v)
elif (not isinstance(snippet, six.string_types) and
isinstance(snippet, collections.Iterable)):
for v in snippet:
validate(v)
def dependencies(snippet, path=''):
"""
Return an iterator over Resource dependencies in a template snippet.
The snippet should be already parsed to insert Function objects where
appropriate.
"""
if isinstance(snippet, Function):
return snippet.dependencies(path)
elif isinstance(snippet, collections.Mapping):
def mkpath(key):
return '.'.join([path, six.text_type(key)])
deps = (dependencies(value,
mkpath(key)) for key, value in snippet.items())
return itertools.chain.from_iterable(deps)
elif (not isinstance(snippet, six.string_types) and
isinstance(snippet, collections.Iterable)):
def mkpath(idx):
return ''.join([path, '[%d]' % idx])
deps = (dependencies(value,
mkpath(i)) for i, value in enumerate(snippet))
return itertools.chain.from_iterable(deps)
else:
return []
def dep_attrs(snippet, resource_name):
"""
Return an iterator over dependent attributes for specified resource_name
in a template snippet.
The snippet should be already parsed to insert Function objects where
appropriate.
"""
if isinstance(snippet, Function):
return snippet.dep_attrs(resource_name)
elif isinstance(snippet, collections.Mapping):
attrs = (dep_attrs(value, resource_name) for value in snippet.items())
return itertools.chain.from_iterable(attrs)
elif (not isinstance(snippet, six.string_types) and
isinstance(snippet, collections.Iterable)):
attrs = (dep_attrs(value, resource_name) for value in snippet)
return itertools.chain.from_iterable(attrs)
return []
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The PoissonLogNormalQuadratureCompound distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops import distribution_util
from tensorflow.contrib.distributions.python.ops import poisson as poisson_lib
from tensorflow.contrib.distributions.python.ops.bijectors.exp import Exp
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops.distributions import categorical as categorical_lib
from tensorflow.python.ops.distributions import distribution as distribution_lib
from tensorflow.python.ops.distributions import normal as normal_lib
from tensorflow.python.ops.distributions import transformed_distribution as transformed_lib
from tensorflow.python.util import deprecation
__all__ = [
"PoissonLogNormalQuadratureCompound",
"quadrature_scheme_lognormal_gauss_hermite",
"quadrature_scheme_lognormal_quantiles",
]
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tfp.distributions`.",
warn_once=True)
def quadrature_scheme_lognormal_gauss_hermite(
loc, scale, quadrature_size,
validate_args=False, name=None): # pylint: disable=unused-argument
"""Use Gauss-Hermite quadrature to form quadrature on positive-reals.
Note: for a given `quadrature_size`, this method is generally less accurate
than `quadrature_scheme_lognormal_quantiles`.
Args:
loc: `float`-like (batch of) scalar `Tensor`; the location parameter of
the LogNormal prior.
scale: `float`-like (batch of) scalar `Tensor`; the scale parameter of
the LogNormal prior.
quadrature_size: Python `int` scalar representing the number of quadrature
points.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
name: Python `str` name prefixed to Ops created by this class.
Returns:
grid: (Batch of) length-`quadrature_size` vectors representing the
`log_rate` parameters of a `Poisson`.
probs: (Batch of) length-`quadrature_size` vectors representing the
weight associate with each `grid` value.
"""
with ops.name_scope(name, "vector_diffeomixture_quadrature_gauss_hermite",
[loc, scale]):
grid, probs = np.polynomial.hermite.hermgauss(deg=quadrature_size)
grid = grid.astype(loc.dtype.as_numpy_dtype)
probs = probs.astype(loc.dtype.as_numpy_dtype)
probs /= np.linalg.norm(probs, ord=1, keepdims=True)
probs = ops.convert_to_tensor(probs, name="probs", dtype=loc.dtype)
# The following maps the broadcast of `loc` and `scale` to each grid
# point, i.e., we are creating several log-rates that correspond to the
# different Gauss-Hermite quadrature points and (possible) batches of
# `loc` and `scale`.
grid = (loc[..., array_ops.newaxis]
+ np.sqrt(2.) * scale[..., array_ops.newaxis] * grid)
return grid, probs
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tfp.distributions`.",
warn_once=True)
def quadrature_scheme_lognormal_quantiles(
loc, scale, quadrature_size,
validate_args=False, name=None):
"""Use LogNormal quantiles to form quadrature on positive-reals.
Args:
loc: `float`-like (batch of) scalar `Tensor`; the location parameter of
the LogNormal prior.
scale: `float`-like (batch of) scalar `Tensor`; the scale parameter of
the LogNormal prior.
quadrature_size: Python `int` scalar representing the number of quadrature
points.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
name: Python `str` name prefixed to Ops created by this class.
Returns:
grid: (Batch of) length-`quadrature_size` vectors representing the
`log_rate` parameters of a `Poisson`.
probs: (Batch of) length-`quadrature_size` vectors representing the
weight associate with each `grid` value.
"""
with ops.name_scope(name, "quadrature_scheme_lognormal_quantiles",
[loc, scale]):
# Create a LogNormal distribution.
dist = transformed_lib.TransformedDistribution(
distribution=normal_lib.Normal(loc=loc, scale=scale),
bijector=Exp(),
validate_args=validate_args)
batch_ndims = dist.batch_shape.ndims
if batch_ndims is None:
batch_ndims = array_ops.shape(dist.batch_shape_tensor())[0]
def _compute_quantiles():
"""Helper to build quantiles."""
# Omit {0, 1} since they might lead to Inf/NaN.
zero = array_ops.zeros([], dtype=dist.dtype)
edges = math_ops.linspace(zero, 1., quadrature_size + 3)[1:-1]
# Expand edges so its broadcast across batch dims.
edges = array_ops.reshape(edges, shape=array_ops.concat([
[-1], array_ops.ones([batch_ndims], dtype=dtypes.int32)], axis=0))
quantiles = dist.quantile(edges)
# Cyclically permute left by one.
perm = array_ops.concat([
math_ops.range(1, 1 + batch_ndims), [0]], axis=0)
quantiles = array_ops.transpose(quantiles, perm)
return quantiles
quantiles = _compute_quantiles()
# Compute grid as quantile midpoints.
grid = (quantiles[..., :-1] + quantiles[..., 1:]) / 2.
# Set shape hints.
grid.set_shape(dist.batch_shape.concatenate([quadrature_size]))
# By construction probs is constant, i.e., `1 / quadrature_size`. This is
# important, because non-constant probs leads to non-reparameterizable
# samples.
probs = array_ops.fill(
dims=[quadrature_size],
value=1. / math_ops.cast(quadrature_size, dist.dtype))
return grid, probs
class PoissonLogNormalQuadratureCompound(distribution_lib.Distribution):
"""`PoissonLogNormalQuadratureCompound` distribution.
The `PoissonLogNormalQuadratureCompound` is an approximation to a
Poisson-LogNormal [compound distribution](
https://en.wikipedia.org/wiki/Compound_probability_distribution), i.e.,
```none
p(k|loc, scale)
= int_{R_+} dl LogNormal(l | loc, scale) Poisson(k | l)
approx= sum{ prob[d] Poisson(k | lambda(grid[d])) : d=0, ..., deg-1 }
```
By default, the `grid` is chosen as quantiles of the `LogNormal` distribution
parameterized by `loc`, `scale` and the `prob` vector is
`[1. / quadrature_size]*quadrature_size`.
In the non-approximation case, a draw from the LogNormal prior represents the
Poisson rate parameter. Unfortunately, the non-approximate distribution lacks
an analytical probability density function (pdf). Therefore the
`PoissonLogNormalQuadratureCompound` class implements an approximation based
on [quadrature](https://en.wikipedia.org/wiki/Numerical_integration).
Note: although the `PoissonLogNormalQuadratureCompound` is approximately the
Poisson-LogNormal compound distribution, it is itself a valid distribution.
Viz., it possesses a `sample`, `log_prob`, `mean`, `variance`, etc. which are
all mutually consistent.
#### Mathematical Details
The `PoissonLogNormalQuadratureCompound` approximates a Poisson-LogNormal
[compound distribution](
https://en.wikipedia.org/wiki/Compound_probability_distribution). Using
variable-substitution and [numerical quadrature](
https://en.wikipedia.org/wiki/Numerical_integration) (default:
based on `LogNormal` quantiles) we can redefine the distribution to be a
parameter-less convex combination of `deg` different Poisson samples.
That is, defined over positive integers, this distribution is parameterized
by a (batch of) `loc` and `scale` scalars.
The probability density function (pdf) is,
```none
pdf(k | loc, scale, deg)
= sum{ prob[d] Poisson(k | lambda=exp(grid[d]))
: d=0, ..., deg-1 }
```
#### Examples
```python
import tensorflow_probability as tfp
tfd = tfp.distributions
# Create two batches of PoissonLogNormalQuadratureCompounds, one with
# prior `loc = 0.` and another with `loc = 1.` In both cases `scale = 1.`
pln = tfd.PoissonLogNormalQuadratureCompound(
loc=[0., -0.5],
scale=1.,
quadrature_size=10,
validate_args=True)
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tfp.distributions`.",
warn_once=True)
def __init__(self,
loc,
scale,
quadrature_size=8,
quadrature_fn=quadrature_scheme_lognormal_quantiles,
validate_args=False,
allow_nan_stats=True,
name="PoissonLogNormalQuadratureCompound"):
"""Constructs the PoissonLogNormalQuadratureCompound`.
Note: `probs` returned by (optional) `quadrature_fn` are presumed to be
either a length-`quadrature_size` vector or a batch of vectors in 1-to-1
correspondence with the returned `grid`. (I.e., broadcasting is only
partially supported.)
Args:
loc: `float`-like (batch of) scalar `Tensor`; the location parameter of
the LogNormal prior.
scale: `float`-like (batch of) scalar `Tensor`; the scale parameter of
the LogNormal prior.
quadrature_size: Python `int` scalar representing the number of quadrature
points.
quadrature_fn: Python callable taking `loc`, `scale`,
`quadrature_size`, `validate_args` and returning `tuple(grid, probs)`
representing the LogNormal grid and corresponding normalized weight.
normalized) weight.
Default value: `quadrature_scheme_lognormal_quantiles`.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`,
statistics (e.g., mean, mode, variance) use the value "`NaN`" to
indicate the result is undefined. When `False`, an exception is raised
if one or more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
Raises:
TypeError: if `quadrature_grid` and `quadrature_probs` have different base
`dtype`.
"""
parameters = dict(locals())
with ops.name_scope(name, values=[loc, scale]) as name:
if loc is not None:
loc = ops.convert_to_tensor(loc, name="loc")
if scale is not None:
scale = ops.convert_to_tensor(
scale, dtype=None if loc is None else loc.dtype, name="scale")
self._quadrature_grid, self._quadrature_probs = tuple(quadrature_fn(
loc, scale, quadrature_size, validate_args))
dt = self._quadrature_grid.dtype
if dt.base_dtype != self._quadrature_probs.dtype.base_dtype:
raise TypeError("Quadrature grid dtype ({}) does not match quadrature "
"probs dtype ({}).".format(
dt.name, self._quadrature_probs.dtype.name))
self._distribution = poisson_lib.Poisson(
log_rate=self._quadrature_grid,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats)
self._mixture_distribution = categorical_lib.Categorical(
logits=math_ops.log(self._quadrature_probs),
validate_args=validate_args,
allow_nan_stats=allow_nan_stats)
self._loc = loc
self._scale = scale
self._quadrature_size = quadrature_size
super(PoissonLogNormalQuadratureCompound, self).__init__(
dtype=dt,
reparameterization_type=distribution_lib.NOT_REPARAMETERIZED,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
graph_parents=[loc, scale],
name=name)
@property
def mixture_distribution(self):
"""Distribution which randomly selects a Poisson with quadrature param."""
return self._mixture_distribution
@property
def distribution(self):
"""Base Poisson parameterized by a quadrature grid."""
return self._distribution
@property
def loc(self):
"""Location parameter of the LogNormal prior."""
return self._loc
@property
def scale(self):
"""Scale parameter of the LogNormal prior."""
return self._scale
@property
def quadrature_size(self):
return self._quadrature_size
def _batch_shape_tensor(self):
return array_ops.broadcast_dynamic_shape(
self.distribution.batch_shape_tensor(),
array_ops.shape(self.mixture_distribution.logits))[:-1]
def _batch_shape(self):
return array_ops.broadcast_static_shape(
self.distribution.batch_shape,
self.mixture_distribution.logits.shape)[:-1]
def _event_shape(self):
return tensor_shape.scalar()
def _sample_n(self, n, seed=None):
# Get ids as a [n, batch_size]-shaped matrix, unless batch_shape=[] then get
# ids as a [n]-shaped vector.
batch_size = self.batch_shape.num_elements()
if batch_size is None:
batch_size = math_ops.reduce_prod(self.batch_shape_tensor())
# We need to "sample extra" from the mixture distribution if it doesn't
# already specify a probs vector for each batch coordinate.
# We only support this kind of reduced broadcasting, i.e., there is exactly
# one probs vector for all batch dims or one for each.
ids = self._mixture_distribution.sample(
sample_shape=concat_vectors(
[n],
distribution_util.pick_vector(
self.mixture_distribution.is_scalar_batch(),
[batch_size],
np.int32([]))),
seed=distribution_util.gen_new_seed(
seed, "poisson_lognormal_quadrature_compound"))
# We need to flatten batch dims in case mixture_distribution has its own
# batch dims.
ids = array_ops.reshape(ids, shape=concat_vectors(
[n],
distribution_util.pick_vector(
self.is_scalar_batch(),
np.int32([]),
np.int32([-1]))))
# Stride `quadrature_size` for `batch_size` number of times.
offset = math_ops.range(start=0,
limit=batch_size * self._quadrature_size,
delta=self._quadrature_size,
dtype=ids.dtype)
ids += offset
rate = array_ops.gather(
array_ops.reshape(self.distribution.rate, shape=[-1]), ids)
rate = array_ops.reshape(
rate, shape=concat_vectors([n], self.batch_shape_tensor()))
return random_ops.random_poisson(
lam=rate, shape=[], dtype=self.dtype, seed=seed)
def _log_prob(self, x):
return math_ops.reduce_logsumexp(
(self.mixture_distribution.logits
+ self.distribution.log_prob(x[..., array_ops.newaxis])),
axis=-1)
def _mean(self):
return math_ops.exp(
math_ops.reduce_logsumexp(
self.mixture_distribution.logits + self.distribution.log_rate,
axis=-1))
def _variance(self):
return math_ops.exp(self._log_variance())
def _stddev(self):
return math_ops.exp(0.5 * self._log_variance())
def _log_variance(self):
# Following calculation is based on law of total variance:
#
# Var[Z] = E[Var[Z | V]] + Var[E[Z | V]]
#
# where,
#
# Z|v ~ interpolate_affine[v](distribution)
# V ~ mixture_distribution
#
# thus,
#
# E[Var[Z | V]] = sum{ prob[d] Var[d] : d=0, ..., deg-1 }
# Var[E[Z | V]] = sum{ prob[d] (Mean[d] - Mean)**2 : d=0, ..., deg-1 }
v = array_ops.stack([
# log(self.distribution.variance()) = log(Var[d]) = log(rate[d])
self.distribution.log_rate,
# log((Mean[d] - Mean)**2)
2. * math_ops.log(
math_ops.abs(self.distribution.mean()
- self._mean()[..., array_ops.newaxis])),
], axis=-1)
return math_ops.reduce_logsumexp(
self.mixture_distribution.logits[..., array_ops.newaxis] + v,
axis=[-2, -1])
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tfp.distributions`.",
warn_once=True)
def concat_vectors(*args):
"""Concatenates input vectors, statically if possible."""
args_ = [distribution_util.static_value(x) for x in args]
if any(vec is None for vec in args_):
return array_ops.concat(args, axis=0)
return [val for vec in args_ for val in vec]
|
|
from __future__ import absolute_import
import math
import os
import unittest
from pychron.core.geometry.affine import transform_point, itransform_point
from pychron.stage.maps.laser_stage_map import LaserStageMap
class StageMapTestCase(unittest.TestCase):
def setUp(self):
p = 'pychron/stage/tests/data/221-hole.txt'
if not os.path.isfile(p):
base = os.path.dirname(os.path.abspath(__file__))
p = os.path.join(base, 'data', '221-hole.txt')
self.sm = LaserStageMap(file_path=p)
def test_generate_interpolation(self):
sm = self.sm
h1 = sm.get_hole('1')
h3 = sm.get_hole('3')
h5 = sm.get_hole('5')
h1.corrected = True
h1.x_cor = 0
h1.y_cor = 0
h3.corrected = True
h3.x_cor = 2
h3.y_cor = 4
h5.corrected = True
h5.x_cor = 4
h5.y_cor = 8
sm.generate_row_interpolated_corrections(dump_corrections=False)
h2 = sm.get_hole('2')
h4 = sm.get_hole('4')
self.assertTupleEqual((1, 2, 3, 6),
(h2.x_cor, h2.y_cor,
h4.x_cor, h4.y_cor,))
def test_generate_interpolation_no_mid(self):
sm = self.sm
h1 = sm.get_hole('1')
h5 = sm.get_hole('5')
h1.corrected = True
h1.x_cor = 0
h1.y_cor = 0
h5.corrected = True
h5.x_cor = 4
h5.y_cor = 8
sm.generate_row_interpolated_corrections(dump_corrections=False)
h2 = sm.get_hole('2')
h4 = sm.get_hole('4')
self.assertTupleEqual((1, 2, 3, 6),
(h2.x_cor, h2.y_cor,
h4.x_cor, h4.y_cor,))
def test_generate_interpolation_no_end(self):
sm = self.sm
h1 = sm.get_hole('1')
h3 = sm.get_hole('3')
h1.corrected = True
h1.x_cor = 0
h1.y_cor = 0
h3.corrected = True
h3.x_cor = 2
h3.y_cor = 4
sm.generate_row_interpolated_corrections(dump_corrections=False)
h2 = sm.get_hole('2')
h4 = sm.get_hole('4')
self.assertTupleEqual((1, 2, 3, 6),
(h2.x_cor, h2.y_cor,
h4.x_cor, h4.y_cor,))
def test_generate_interpolation_no_start(self):
sm = self.sm
h3 = sm.get_hole('3')
h5 = sm.get_hole('5')
h3.corrected = True
h3.x_cor = 2
h3.y_cor = 4
h5.corrected = True
h5.x_cor = 4
h5.y_cor = 8
sm.generate_row_interpolated_corrections(dump_corrections=False)
h2 = sm.get_hole('2')
h4 = sm.get_hole('4')
self.assertTupleEqual((1, 2, 3, 6),
(h2.x_cor, h2.y_cor,
h4.x_cor, h4.y_cor,))
def test_generate_interpolation_no_points(self):
sm = self.sm
sm.generate_row_interpolated_corrections(dump_corrections=False)
h2 = sm.get_hole('2')
h4 = sm.get_hole('4')
self.assertTupleEqual((0, 0, 0, 0),
(h2.x_cor, h2.y_cor,
h4.x_cor, h4.y_cor,))
def test_row_ends(self):
holes = list(self.sm.row_ends())
hs = [hi.id for hi in holes[:6]]
self.assertListEqual(['1', '5', '6', '14', '15', '25'], hs)
def test_row_ends2(self):
holes = list(self.sm.row_ends(alternate=True))
hs = [hi.id for hi in holes[:6]]
self.assertListEqual(['1', '5', '14', '6', '15', '25'], hs)
def test_circumference_holes(self):
holes = list(self.sm.circumference_holes())
hs = [hi.id for hi in holes[:6]]
self.assertListEqual(['1', '6', '15', '26', '39', '54'], hs)
def test_circumference_holes2(self):
holes = list(self.sm.circumference_holes())
hs = [hi.id for hi in holes[-6:]]
self.assertListEqual(['68', '53', '38', '25', '14', '5'], hs)
def test_mid_holes(self):
holes = list(self.sm.mid_holes())
hs = [hi.id for hi in holes[:6]]
self.assertListEqual(['3', '10', '20', '32', '46', '61'], hs)
class TransformTestCase(unittest.TestCase):
def test_itransform_point_ntran_nrot(self):
cpos = 0, 0
rot = 0
pt = 1, 1
tpt = itransform_point(pt, cpos, rot, 1)
self.assertAlmostEqual(pt, tpt)
def test_itransform_point_nrot(self):
cpos = 1, 0
rot = 0
pt = 2, 1
tpt = itransform_point(pt, cpos, rot, 1)
self.assertTupleEqual((1.0, 1.0), tpt)
def test_itransform_point(self):
cpos = 1, 0
rot = 90
pt = 1, 1
tpt = itransform_point(pt, cpos, rot, 1)
self.assertAlmostEqual(1.0, tpt[0])
self.assertAlmostEqual(0, tpt[1])
def test_transform_point_ntran_nrot(self):
cpos = 0, 0
rot = 0
pt = 1, 1
tpt = transform_point(pt, cpos, rot, 1)
self.assertAlmostEqual(pt, tpt)
def test_transform_point_nrot(self):
cpos = 1, 0
rot = 0
pt = 1, 1
tpt = transform_point(pt, cpos, rot, 1)
self.assertTupleEqual((2.0, 1.0), tpt)
def test_transform_point_ntrans(self):
cpos = 0, 0
rot = 90
pt = 1, 0
tpt = transform_point(pt, cpos, rot, 1)
self.assertAlmostEqual(0.0, tpt[0])
self.assertAlmostEqual(1.0, tpt[1])
def test_transform_point(self):
cpos = 1, 0
rot = 90
pt = 1, 0
tpt = transform_point(pt, cpos, rot, 1)
self.assertAlmostEqual(1.0, tpt[0])
self.assertAlmostEqual(1.0, tpt[1])
def test_transform_point2(self):
cpos = 0, 0
rot = 45
pt = 1, 0
tpt = transform_point(pt, cpos, rot, 1)
r2 = 0.5 ** 0.5
self.assertAlmostEqual(r2, tpt[0])
self.assertAlmostEqual(r2, tpt[1])
def test_transform_point3(self):
cpos = 1.5, -1.5
rot = 45
pt = 1, 0
tpt = transform_point(pt, cpos, rot, 1)
r2 = 0.5 ** 0.5
self.assertAlmostEqual(1.5 + r2, tpt[0])
self.assertAlmostEqual(-1.5 + r2, tpt[1])
def test_transform_point4(self):
cpos = -2.1, -0.3
rot = 1
t = math.radians(rot)
pt = 1, 0
tpt = transform_point(pt, cpos, rot, 1)
x = pt[0] * math.cos(t) - pt[1] * math.sin(t)
y = pt[0] * math.sin(t) + pt[1] * math.cos(t)
self.assertAlmostEqual(-2.1 + x, tpt[0])
self.assertAlmostEqual(-0.3 + y, tpt[1])
if __name__ == '__main__':
unittest.main()
|
|
# Copyright 2013-2015 DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import # to enable import io from stdlib
from collections import defaultdict, deque
import errno
from functools import wraps, partial
import io
import logging
import os
import sys
from threading import Thread, Event, RLock
import time
if 'gevent.monkey' in sys.modules:
from gevent.queue import Queue, Empty
else:
from six.moves.queue import Queue, Empty # noqa
import six
from six.moves import range
from cassandra import ConsistencyLevel, AuthenticationFailed, OperationTimedOut
from cassandra.marshal import int32_pack, header_unpack, v3_header_unpack, int32_unpack
from cassandra.protocol import (ReadyMessage, AuthenticateMessage, OptionsMessage,
StartupMessage, ErrorMessage, CredentialsMessage,
QueryMessage, ResultMessage, decode_response,
InvalidRequestException, SupportedMessage,
AuthResponseMessage, AuthChallengeMessage,
AuthSuccessMessage, ProtocolException)
from cassandra.util import OrderedDict
log = logging.getLogger(__name__)
# We use an ordered dictionary and specifically add lz4 before
# snappy so that lz4 will be preferred. Changing the order of this
# will change the compression preferences for the driver.
locally_supported_compressions = OrderedDict()
try:
import lz4
except ImportError:
pass
else:
# Cassandra writes the uncompressed message length in big endian order,
# but the lz4 lib requires little endian order, so we wrap these
# functions to handle that
def lz4_compress(byts):
# write length in big-endian instead of little-endian
return int32_pack(len(byts)) + lz4.compress(byts)[4:]
def lz4_decompress(byts):
# flip from big-endian to little-endian
return lz4.decompress(byts[3::-1] + byts[4:])
locally_supported_compressions['lz4'] = (lz4_compress, lz4_decompress)
try:
import snappy
except ImportError:
pass
else:
# work around apparently buggy snappy decompress
def decompress(byts):
if byts == '\x00':
return ''
return snappy.decompress(byts)
locally_supported_compressions['snappy'] = (snappy.compress, decompress)
PROTOCOL_VERSION_MASK = 0x7f
HEADER_DIRECTION_FROM_CLIENT = 0x00
HEADER_DIRECTION_TO_CLIENT = 0x80
HEADER_DIRECTION_MASK = 0x80
NONBLOCKING = (errno.EAGAIN, errno.EWOULDBLOCK)
class ConnectionException(Exception):
"""
An unrecoverable error was hit when attempting to use a connection,
or the connection was already closed or defunct.
"""
def __init__(self, message, host=None):
Exception.__init__(self, message)
self.host = host
class ConnectionShutdown(ConnectionException):
"""
Raised when a connection has been marked as defunct or has been closed.
"""
pass
class ConnectionBusy(Exception):
"""
An attempt was made to send a message through a :class:`.Connection` that
was already at the max number of in-flight operations.
"""
pass
class ProtocolError(Exception):
"""
Communication did not match the protocol that this driver expects.
"""
pass
def defunct_on_error(f):
@wraps(f)
def wrapper(self, *args, **kwargs):
try:
return f(self, *args, **kwargs)
except Exception as exc:
self.defunct(exc)
return wrapper
DEFAULT_CQL_VERSION = '3.0.0'
class Connection(object):
in_buffer_size = 4096
out_buffer_size = 4096
cql_version = None
protocol_version = 2
keyspace = None
compression = True
compressor = None
decompressor = None
ssl_options = None
last_error = None
# The current number of operations that are in flight. More precisely,
# the number of request IDs that are currently in use.
in_flight = 0
# A set of available request IDs. When using the v3 protocol or higher,
# this will not initially include all request IDs in order to save memory,
# but the set will grow if it is exhausted.
request_ids = None
# Tracks the highest used request ID in order to help with growing the
# request_ids set
highest_request_id = 0
is_defunct = False
is_closed = False
lock = None
user_type_map = None
msg_received = False
is_control_connection = False
_iobuf = None
def __init__(self, host='127.0.0.1', port=9042, authenticator=None,
ssl_options=None, sockopts=None, compression=True,
cql_version=None, protocol_version=2, is_control_connection=False,
user_type_map=None):
self.host = host
self.port = port
self.authenticator = authenticator
self.ssl_options = ssl_options
self.sockopts = sockopts
self.compression = compression
self.cql_version = cql_version
self.protocol_version = protocol_version
self.is_control_connection = is_control_connection
self.user_type_map = user_type_map
self._push_watchers = defaultdict(set)
self._iobuf = io.BytesIO()
if protocol_version >= 3:
self._header_unpack = v3_header_unpack
self._header_length = 5
self.max_request_id = (2 ** 15) - 1
# Don't fill the deque with 2**15 items right away. Start with 300 and add
# more if needed.
self.request_ids = deque(range(300))
self.highest_request_id = 299
else:
self._header_unpack = header_unpack
self._header_length = 4
self.max_request_id = (2 ** 7) - 1
self.request_ids = deque(range(self.max_request_id + 1))
self.highest_request_id = self.max_request_id
# 0 8 16 24 32 40
# +---------+---------+---------+---------+---------+
# | version | flags | stream | opcode |
# +---------+---------+---------+---------+---------+
# | length |
# +---------+---------+---------+---------+
# | |
# . ... body ... .
# . .
# . .
# +----------------------------------------
self._full_header_length = self._header_length + 4
self.lock = RLock()
@classmethod
def initialize_reactor(self):
"""
Called once by Cluster.connect(). This should be used by implementations
to set up any resources that will be shared across connections.
"""
pass
@classmethod
def handle_fork(self):
"""
Called after a forking. This should cleanup any remaining reactor state
from the parent process.
"""
pass
@classmethod
def factory(cls, host, timeout, *args, **kwargs):
"""
A factory function which returns connections which have
succeeded in connecting and are ready for service (or
raises an exception otherwise).
"""
conn = cls(host, *args, **kwargs)
conn.connected_event.wait(timeout)
if conn.last_error:
raise conn.last_error
elif not conn.connected_event.is_set():
conn.close()
raise OperationTimedOut("Timed out creating connection (%s seconds)" % timeout)
else:
return conn
def close(self):
raise NotImplementedError()
def defunct(self, exc):
with self.lock:
if self.is_defunct or self.is_closed:
return
self.is_defunct = True
log.debug("Defuncting connection (%s) to %s:",
id(self), self.host, exc_info=exc)
self.last_error = exc
self.close()
self.error_all_callbacks(exc)
self.connected_event.set()
return exc
def error_all_callbacks(self, exc):
with self.lock:
callbacks = self._callbacks
self._callbacks = {}
new_exc = ConnectionShutdown(str(exc))
for cb in callbacks.values():
try:
cb(new_exc)
except Exception:
log.warning("Ignoring unhandled exception while erroring callbacks for a "
"failed connection (%s) to host %s:",
id(self), self.host, exc_info=True)
def get_request_id(self):
"""
This must be called while self.lock is held.
"""
try:
return self.request_ids.popleft()
except IndexError:
self.highest_request_id += 1
# in_flight checks should guarantee this
assert self.highest_request_id <= self.max_request_id
return self.highest_request_id
def handle_pushed(self, response):
log.debug("Message pushed from server: %r", response)
for cb in self._push_watchers.get(response.event_type, []):
try:
cb(response.event_args)
except Exception:
log.exception("Pushed event handler errored, ignoring:")
def send_msg(self, msg, request_id, cb):
if self.is_defunct:
raise ConnectionShutdown("Connection to %s is defunct" % self.host)
elif self.is_closed:
raise ConnectionShutdown("Connection to %s is closed" % self.host)
self._callbacks[request_id] = cb
self.push(msg.to_binary(request_id, self.protocol_version, compression=self.compressor))
return request_id
def wait_for_response(self, msg, timeout=None):
return self.wait_for_responses(msg, timeout=timeout)[0]
def wait_for_responses(self, *msgs, **kwargs):
"""
Returns a list of (success, response) tuples. If success
is False, response will be an Exception. Otherwise, response
will be the normal query response.
If fail_on_error was left as True and one of the requests
failed, the corresponding Exception will be raised.
"""
if self.is_closed or self.is_defunct:
raise ConnectionShutdown("Connection %s is already closed" % (self, ))
timeout = kwargs.get('timeout')
fail_on_error = kwargs.get('fail_on_error', True)
waiter = ResponseWaiter(self, len(msgs), fail_on_error)
# busy wait for sufficient space on the connection
messages_sent = 0
while True:
needed = len(msgs) - messages_sent
with self.lock:
available = min(needed, self.max_request_id - self.in_flight)
request_ids = [self.get_request_id() for _ in range(available)]
self.in_flight += available
for i, request_id in enumerate(request_ids):
self.send_msg(msgs[messages_sent + i],
request_id,
partial(waiter.got_response, index=messages_sent + i))
messages_sent += available
if messages_sent == len(msgs):
break
else:
if timeout is not None:
timeout -= 0.01
if timeout <= 0.0:
raise OperationTimedOut()
time.sleep(0.01)
try:
return waiter.deliver(timeout)
except OperationTimedOut:
raise
except Exception as exc:
self.defunct(exc)
raise
def register_watcher(self, event_type, callback):
raise NotImplementedError()
def register_watchers(self, type_callback_dict):
raise NotImplementedError()
def control_conn_disposed(self):
self.is_control_connection = False
self._push_watchers = {}
def process_io_buffer(self):
while True:
pos = self._iobuf.tell()
if pos < self._full_header_length or (self._total_reqd_bytes > 0 and pos < self._total_reqd_bytes):
# we don't have a complete header yet or we
# already saw a header, but we don't have a
# complete message yet
return
else:
# have enough for header, read body len from header
self._iobuf.seek(self._header_length)
body_len = int32_unpack(self._iobuf.read(4))
# seek to end to get length of current buffer
self._iobuf.seek(0, os.SEEK_END)
pos = self._iobuf.tell()
if pos >= body_len + self._full_header_length:
# read message header and body
self._iobuf.seek(0)
msg = self._iobuf.read(self._full_header_length + body_len)
# leave leftover in current buffer
leftover = self._iobuf.read()
self._iobuf = io.BytesIO()
self._iobuf.write(leftover)
self._total_reqd_bytes = 0
self.process_msg(msg, body_len)
else:
self._total_reqd_bytes = body_len + self._full_header_length
return
@defunct_on_error
def process_msg(self, msg, body_len):
version, flags, stream_id, opcode = self._header_unpack(msg[:self._header_length])
if stream_id < 0:
callback = None
else:
callback = self._callbacks.pop(stream_id, None)
with self.lock:
self.request_ids.append(stream_id)
self.msg_received = True
body = None
try:
# check that the protocol version is supported
given_version = version & PROTOCOL_VERSION_MASK
if given_version != self.protocol_version:
msg = "Server protocol version (%d) does not match the specified driver protocol version (%d). " +\
"Consider setting Cluster.protocol_version to %d."
raise ProtocolError(msg % (given_version, self.protocol_version, given_version))
# check that the header direction is correct
if version & HEADER_DIRECTION_MASK != HEADER_DIRECTION_TO_CLIENT:
raise ProtocolError(
"Header direction in response is incorrect; opcode %04x, stream id %r"
% (opcode, stream_id))
if body_len > 0:
body = msg[self._full_header_length:]
elif body_len == 0:
body = six.binary_type()
else:
raise ProtocolError("Got negative body length: %r" % body_len)
response = decode_response(given_version, self.user_type_map, stream_id,
flags, opcode, body, self.decompressor)
except Exception as exc:
log.exception("Error decoding response from Cassandra. "
"opcode: %04x; message contents: %r", opcode, msg)
if callback is not None:
callback(exc)
self.defunct(exc)
return
try:
if stream_id >= 0:
if isinstance(response, ProtocolException):
log.error("Closing connection %s due to protocol error: %s", self, response.summary_msg())
self.defunct(response)
if callback is not None:
callback(response)
else:
self.handle_pushed(response)
except Exception:
log.exception("Callback handler errored, ignoring:")
@defunct_on_error
def _send_options_message(self):
if self.cql_version is None and (not self.compression or not locally_supported_compressions):
log.debug("Not sending options message for new connection(%s) to %s "
"because compression is disabled and a cql version was not "
"specified", id(self), self.host)
self._compressor = None
self.cql_version = DEFAULT_CQL_VERSION
self._send_startup_message()
else:
log.debug("Sending initial options message for new connection (%s) to %s", id(self), self.host)
self.send_msg(OptionsMessage(), self.get_request_id(), self._handle_options_response)
@defunct_on_error
def _handle_options_response(self, options_response):
if self.is_defunct:
return
if not isinstance(options_response, SupportedMessage):
if isinstance(options_response, ConnectionException):
raise options_response
else:
log.error("Did not get expected SupportedMessage response; "
"instead, got: %s", options_response)
raise ConnectionException("Did not get expected SupportedMessage "
"response; instead, got: %s"
% (options_response,))
log.debug("Received options response on new connection (%s) from %s",
id(self), self.host)
supported_cql_versions = options_response.cql_versions
remote_supported_compressions = options_response.options['COMPRESSION']
if self.cql_version:
if self.cql_version not in supported_cql_versions:
raise ProtocolError(
"cql_version %r is not supported by remote (w/ native "
"protocol). Supported versions: %r"
% (self.cql_version, supported_cql_versions))
else:
self.cql_version = supported_cql_versions[0]
self._compressor = None
compression_type = None
if self.compression:
overlap = (set(locally_supported_compressions.keys()) &
set(remote_supported_compressions))
if len(overlap) == 0:
log.debug("No available compression types supported on both ends."
" locally supported: %r. remotely supported: %r",
locally_supported_compressions.keys(),
remote_supported_compressions)
else:
compression_type = None
if isinstance(self.compression, six.string_types):
# the user picked a specific compression type ('snappy' or 'lz4')
if self.compression not in remote_supported_compressions:
raise ProtocolError(
"The requested compression type (%s) is not supported by the Cassandra server at %s"
% (self.compression, self.host))
compression_type = self.compression
else:
# our locally supported compressions are ordered to prefer
# lz4, if available
for k in locally_supported_compressions.keys():
if k in overlap:
compression_type = k
break
# set the decompressor here, but set the compressor only after
# a successful Ready message
self._compressor, self.decompressor = \
locally_supported_compressions[compression_type]
self._send_startup_message(compression_type)
@defunct_on_error
def _send_startup_message(self, compression=None):
log.debug("Sending StartupMessage on %s", self)
opts = {}
if compression:
opts['COMPRESSION'] = compression
sm = StartupMessage(cqlversion=self.cql_version, options=opts)
self.send_msg(sm, self.get_request_id(), cb=self._handle_startup_response)
log.debug("Sent StartupMessage on %s", self)
@defunct_on_error
def _handle_startup_response(self, startup_response, did_authenticate=False):
if self.is_defunct:
return
if isinstance(startup_response, ReadyMessage):
log.debug("Got ReadyMessage on new connection (%s) from %s", id(self), self.host)
if self._compressor:
self.compressor = self._compressor
self.connected_event.set()
elif isinstance(startup_response, AuthenticateMessage):
log.debug("Got AuthenticateMessage on new connection (%s) from %s: %s",
id(self), self.host, startup_response.authenticator)
if self.authenticator is None:
raise AuthenticationFailed('Remote end requires authentication.')
self.authenticator_class = startup_response.authenticator
if isinstance(self.authenticator, dict):
log.debug("Sending credentials-based auth response on %s", self)
cm = CredentialsMessage(creds=self.authenticator)
callback = partial(self._handle_startup_response, did_authenticate=True)
self.send_msg(cm, self.get_request_id(), cb=callback)
else:
log.debug("Sending SASL-based auth response on %s", self)
initial_response = self.authenticator.initial_response()
initial_response = "" if initial_response is None else initial_response
self.send_msg(AuthResponseMessage(initial_response), self.get_request_id(), self._handle_auth_response)
elif isinstance(startup_response, ErrorMessage):
log.debug("Received ErrorMessage on new connection (%s) from %s: %s",
id(self), self.host, startup_response.summary_msg())
if did_authenticate:
raise AuthenticationFailed(
"Failed to authenticate to %s: %s" %
(self.host, startup_response.summary_msg()))
else:
raise ConnectionException(
"Failed to initialize new connection to %s: %s"
% (self.host, startup_response.summary_msg()))
elif isinstance(startup_response, ConnectionShutdown):
log.debug("Connection to %s was closed during the startup handshake", (self.host))
raise startup_response
else:
msg = "Unexpected response during Connection setup: %r"
log.error(msg, startup_response)
raise ProtocolError(msg % (startup_response,))
@defunct_on_error
def _handle_auth_response(self, auth_response):
if self.is_defunct:
return
if isinstance(auth_response, AuthSuccessMessage):
log.debug("Connection %s successfully authenticated", self)
self.authenticator.on_authentication_success(auth_response.token)
if self._compressor:
self.compressor = self._compressor
self.connected_event.set()
elif isinstance(auth_response, AuthChallengeMessage):
response = self.authenticator.evaluate_challenge(auth_response.challenge)
msg = AuthResponseMessage("" if response is None else response)
log.debug("Responding to auth challenge on %s", self)
self.send_msg(msg, self.get_request_id(), self._handle_auth_response)
elif isinstance(auth_response, ErrorMessage):
log.debug("Received ErrorMessage on new connection (%s) from %s: %s",
id(self), self.host, auth_response.summary_msg())
raise AuthenticationFailed(
"Failed to authenticate to %s: %s" %
(self.host, auth_response.summary_msg()))
elif isinstance(auth_response, ConnectionShutdown):
log.debug("Connection to %s was closed during the authentication process", self.host)
raise auth_response
else:
msg = "Unexpected response during Connection authentication to %s: %r"
log.error(msg, self.host, auth_response)
raise ProtocolError(msg % (self.host, auth_response))
def set_keyspace_blocking(self, keyspace):
if not keyspace or keyspace == self.keyspace:
return
query = QueryMessage(query='USE "%s"' % (keyspace,),
consistency_level=ConsistencyLevel.ONE)
try:
result = self.wait_for_response(query)
except InvalidRequestException as ire:
# the keyspace probably doesn't exist
raise ire.to_exception()
except Exception as exc:
conn_exc = ConnectionException(
"Problem while setting keyspace: %r" % (exc,), self.host)
self.defunct(conn_exc)
raise conn_exc
if isinstance(result, ResultMessage):
self.keyspace = keyspace
else:
conn_exc = ConnectionException(
"Problem while setting keyspace: %r" % (result,), self.host)
self.defunct(conn_exc)
raise conn_exc
def set_keyspace_async(self, keyspace, callback):
"""
Use this in order to avoid deadlocking the event loop thread.
When the operation completes, `callback` will be called with
two arguments: this connection and an Exception if an error
occurred, otherwise :const:`None`.
"""
if not keyspace or keyspace == self.keyspace:
callback(self, None)
return
query = QueryMessage(query='USE "%s"' % (keyspace,),
consistency_level=ConsistencyLevel.ONE)
def process_result(result):
if isinstance(result, ResultMessage):
self.keyspace = keyspace
callback(self, None)
elif isinstance(result, InvalidRequestException):
callback(self, result.to_exception())
else:
callback(self, self.defunct(ConnectionException(
"Problem while setting keyspace: %r" % (result,), self.host)))
request_id = None
# we use a busy wait on the lock here because:
# - we'll only spin if the connection is at max capacity, which is very
# unlikely for a set_keyspace call
# - it allows us to avoid signaling a condition every time a request completes
while True:
with self.lock:
if self.in_flight < self.max_request_id:
request_id = self.get_request_id()
self.in_flight += 1
break
time.sleep(0.001)
self.send_msg(query, request_id, process_result)
@property
def is_idle(self):
return not self.msg_received
def reset_idle(self):
self.msg_received = False
def __str__(self):
status = ""
if self.is_defunct:
status = " (defunct)"
elif self.is_closed:
status = " (closed)"
return "<%s(%r) %s:%d%s>" % (self.__class__.__name__, id(self), self.host, self.port, status)
__repr__ = __str__
class ResponseWaiter(object):
def __init__(self, connection, num_responses, fail_on_error):
self.connection = connection
self.pending = num_responses
self.fail_on_error = fail_on_error
self.error = None
self.responses = [None] * num_responses
self.event = Event()
def got_response(self, response, index):
with self.connection.lock:
self.connection.in_flight -= 1
if isinstance(response, Exception):
if hasattr(response, 'to_exception'):
response = response.to_exception()
if self.fail_on_error:
self.error = response
self.event.set()
else:
self.responses[index] = (False, response)
else:
if not self.fail_on_error:
self.responses[index] = (True, response)
else:
self.responses[index] = response
self.pending -= 1
if not self.pending:
self.event.set()
def deliver(self, timeout=None):
"""
If fail_on_error was set to False, a list of (success, response)
tuples will be returned. If success is False, response will be
an Exception. Otherwise, response will be the normal query response.
If fail_on_error was left as True and one of the requests
failed, the corresponding Exception will be raised. Otherwise,
the normal response will be returned.
"""
self.event.wait(timeout)
if self.error:
raise self.error
elif not self.event.is_set():
raise OperationTimedOut()
else:
return self.responses
class HeartbeatFuture(object):
def __init__(self, connection, owner):
self._exception = None
self._event = Event()
self.connection = connection
self.owner = owner
log.debug("Sending options message heartbeat on idle connection (%s) %s",
id(connection), connection.host)
with connection.lock:
if connection.in_flight < connection.max_request_id:
connection.in_flight += 1
connection.send_msg(OptionsMessage(), connection.get_request_id(), self._options_callback)
else:
self._exception = Exception("Failed to send heartbeat because connection 'in_flight' exceeds threshold")
self._event.set()
def wait(self, timeout):
self._event.wait(timeout)
if self._event.is_set():
if self._exception:
raise self._exception
else:
raise OperationTimedOut()
def _options_callback(self, response):
if not isinstance(response, SupportedMessage):
if isinstance(response, ConnectionException):
self._exception = response
else:
self._exception = ConnectionException("Received unexpected response to OptionsMessage: %s"
% (response,))
log.debug("Received options response on connection (%s) from %s",
id(self.connection), self.connection.host)
self._event.set()
class ConnectionHeartbeat(Thread):
def __init__(self, interval_sec, get_connection_holders):
Thread.__init__(self, name="Connection heartbeat")
self._interval = interval_sec
self._get_connection_holders = get_connection_holders
self._shutdown_event = Event()
self.daemon = True
self.start()
class ShutdownException(Exception):
pass
def run(self):
self._shutdown_event.wait(self._interval)
while not self._shutdown_event.is_set():
start_time = time.time()
futures = []
failed_connections = []
try:
for connections, owner in [(o.get_connections(), o) for o in self._get_connection_holders()]:
for connection in connections:
self._raise_if_stopped()
if not (connection.is_defunct or connection.is_closed):
if connection.is_idle:
try:
futures.append(HeartbeatFuture(connection, owner))
except Exception:
log.warning("Failed sending heartbeat message on connection (%s) to %s",
id(connection), connection.host, exc_info=True)
failed_connections.append((connection, owner))
else:
connection.reset_idle()
else:
# make sure the owner sees this defunt/closed connection
owner.return_connection(connection)
self._raise_if_stopped()
for f in futures:
self._raise_if_stopped()
connection = f.connection
try:
f.wait(self._interval)
# TODO: move this, along with connection locks in pool, down into Connection
with connection.lock:
connection.in_flight -= 1
connection.reset_idle()
except Exception:
log.warning("Heartbeat failed for connection (%s) to %s",
id(connection), connection.host, exc_info=True)
failed_connections.append((f.connection, f.owner))
for connection, owner in failed_connections:
self._raise_if_stopped()
connection.defunct(Exception('Connection heartbeat failure'))
owner.return_connection(connection)
except self.ShutdownException:
pass
except Exception:
log.error("Failed connection heartbeat", exc_info=True)
elapsed = time.time() - start_time
self._shutdown_event.wait(max(self._interval - elapsed, 0.01))
def stop(self):
self._shutdown_event.set()
self.join()
def _raise_if_stopped(self):
if self._shutdown_event.is_set():
raise self.ShutdownException()
|
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import logging
if __name__ == '__main__':
logging.basicConfig()
_log = logging.getLogger(__name__)
from pyxb.utils import unicode, xmlre
import re
import unittest
class TestXMLRE (unittest.TestCase):
def assertMatches(self, xml_pattern, value):
'''Helper function to assert a value matches an XSD regexp pattern.'''
py_pattern = xmlre.XMLToPython(xml_pattern)
compiled = re.compile(py_pattern)
mo = compiled.match(value)
self.assertTrue(mo is not None, 'XML re %r Python %r should match %r' % (xml_pattern, py_pattern, value))
def assertNoMatch(self, xml_pattern, value):
'''Helper function to assert a value does not matche an XSD regexp
pattern.'''
py_pattern = xmlre.XMLToPython(xml_pattern)
compiled = re.compile(py_pattern)
mo = compiled.match(value)
self.assertTrue(mo is None, 'XML re %r Python %r should not match %r' % (xml_pattern, py_pattern, value))
def testRangeErrors (self):
self.assertTrue(xmlre.MaybeMatchCharacterClass('', 1) is None)
def testWildcardEscape (self):
(charset, position) = xmlre.MaybeMatchCharacterClass('.', 0)
self.assertEqual(charset, unicode.WildcardEsc)
self.assertEqual(position, 1)
def testSingleCharEscapes (self):
# 17 chars recognized as escapes
self.assertEqual(len(unicode.SingleCharEsc), 17)
(charset, position) = xmlre.MaybeMatchCharacterClass(r'\t', 0)
self.assertEqual(charset.asTuples(), [ (9, 9) ])
self.assertEqual(2, position)
(charset, position) = xmlre.MaybeMatchCharacterClass(r'\?', 0)
self.assertEqual(charset.asTuples(), [ (ord('?'), ord('?')) ])
self.assertEqual(2, position)
(charset, position) = xmlre.MaybeMatchCharacterClass(r'\\', 0)
self.assertEqual(charset.asTuples(), [ (ord('\\'), ord('\\')) ])
self.assertEqual(2, position)
def testMultiCharEscapes (self):
# 5*2 chars recognized as escapes
self.assertEqual(len(unicode.MultiCharEsc), 10)
(charset, position) = xmlre.MaybeMatchCharacterClass(r'\s', 0)
self.assertEqual(charset.asTuples(), [ (9, 10), (13, 13), (32, 32) ])
self.assertEqual(2, position)
def testMatchCharProperty (self):
self.assertRaises(xmlre.RegularExpressionError, xmlre._MatchCharClassEsc, "\pL", 0)
self.assertRaises(xmlre.RegularExpressionError, xmlre._MatchCharClassEsc, "\p{L", 0)
text = "\p{L}"
(charset, position) = xmlre._MatchCharClassEsc(text, 0)
self.assertEqual(position, len(text))
self.assertEqual(charset, unicode.PropertyMap['L'])
text = "\p{IsCyrillic}"
(charset, position) = xmlre._MatchCharClassEsc(text, 0)
self.assertEqual(position, len(text))
self.assertEqual(charset, unicode.BlockMap['Cyrillic'])
def testCharProperty (self):
text = r'\p{D}'
self.assertRaises(xmlre.RegularExpressionError, xmlre.MaybeMatchCharacterClass, text, 0)
text = r'\P{D}'
self.assertRaises(xmlre.RegularExpressionError, xmlre.MaybeMatchCharacterClass, text, 0)
text = r'\p{N}'
(charset, position) = xmlre.MaybeMatchCharacterClass(text, 0)
self.assertEqual(position, len(text))
self.assertEqual(charset, unicode.PropertyMap['N'])
text = r'\P{N}'
(charset, position) = xmlre.MaybeMatchCharacterClass(text, 0)
self.assertEqual(position, len(text))
self.assertEqual(charset.negate(), unicode.PropertyMap['N'])
text = r'\p{Sm}'
(charset, position) = xmlre.MaybeMatchCharacterClass(text, 0)
self.assertEqual(position, len(text))
self.assertEqual(charset, unicode.PropertyMap['Sm'])
def testCharBlock (self):
text = r'\p{IsArrows}'
(charset, position) = xmlre.MaybeMatchCharacterClass(text, 0)
self.assertEqual(position, len(text))
self.assertEqual(charset, unicode.BlockMap['Arrows'])
text = r'\P{IsArrows}'
(charset, position) = xmlre.MaybeMatchCharacterClass(text, 0)
self.assertEqual(position, len(text))
self.assertEqual(charset.negate(), unicode.BlockMap['Arrows'])
text = r'\p{IsWelsh}'
self.assertRaises(xmlre.RegularExpressionError, xmlre.MaybeMatchCharacterClass, text, 0)
text = r'\P{IsWelsh}'
self.assertRaises(xmlre.RegularExpressionError, xmlre.MaybeMatchCharacterClass, text, 0)
def testCharGroup (self):
self.assertRaises(xmlre.RegularExpressionError, xmlre.MaybeMatchCharacterClass, '[]', 0)
self.assertRaises(xmlre.RegularExpressionError, xmlre.MaybeMatchCharacterClass, '[A--]', 0)
self.assertRaises(xmlre.RegularExpressionError, xmlre.MaybeMatchCharacterClass, '[A--]', 0)
text = r'[A-Z]'
#(charset, position) = xmlre.MaybeMatchCharacterClass(text, 0)
#self.assertEqual(position, len(text))
#self.assertEqual(charset, unicode.CodePointSet((ord('A'), ord('Z'))))
def testCharOrSCE (self):
self.assertRaises(xmlre.RegularExpressionError, xmlre._MatchCharClassEsc, '[', 0)
self.assertRaises(xmlre.RegularExpressionError, xmlre._MatchCharClassEsc, ']', 0)
self.assertRaises(xmlre.RegularExpressionError, xmlre._MatchCharClassEsc, '-', 0)
(charset, position) = xmlre._MatchCharClassEsc(r'\t', 0)
self.assertEqual(2, position)
self.assertEqual(unicode.CodePointSet("\t"), charset)
def testMatchPosCharGroup (self):
text = 'A]'
(charset, has_sub, position) = xmlre._MatchPosCharGroup(text, 0)
self.assertEqual(position, 1)
self.assertEqual(charset, unicode.CodePointSet(ord('A')))
text = r'\n]'
(charset, has_sub, position) = xmlre._MatchPosCharGroup(text, 0)
self.assertEqual(position, 2)
self.assertEqual(charset, unicode.CodePointSet(10))
text = r'-]'
(charset, has_sub, position) = xmlre._MatchPosCharGroup(text, 0)
self.assertEqual(position, 1)
self.assertEqual(charset, unicode.CodePointSet(ord('-')))
text = 'A-Z]'
(charset, has_sub, position) = xmlre._MatchPosCharGroup(text, 0)
self.assertEqual(position, 3)
self.assertEqual(charset, unicode.CodePointSet((ord('A'), ord('Z'))))
text = r'\t-\r]'
(charset, has_sub, position) = xmlre._MatchPosCharGroup(text, 0)
self.assertEqual(position, 5)
self.assertEqual(charset, unicode.CodePointSet((9, 13)))
text = r'\t-A]'
(charset, has_sub, position) = xmlre._MatchPosCharGroup(text, 0)
self.assertEqual(position, 4)
self.assertEqual(charset, unicode.CodePointSet((9, ord('A'))))
text = r'Z-\]]'
(charset, has_sub, position) = xmlre._MatchPosCharGroup(text, 0)
self.assertEqual(position, 4)
self.assertEqual(charset, unicode.CodePointSet((ord('Z'), ord(']'))))
text = 'Z-A]'
self.assertRaises(xmlre.RegularExpressionError, xmlre._MatchPosCharGroup, text, 0)
def testMatchCharClassExpr (self):
self.assertRaises(xmlre.RegularExpressionError, xmlre._MatchCharClassExpr, 'missing open', 0)
self.assertRaises(xmlre.RegularExpressionError, xmlre._MatchCharClassExpr, '[missing close', 0)
first_five = unicode.CodePointSet( (ord('A'), ord('E')) )
text = r'[ABCDE]'
(charset, position) = xmlre._MatchCharClassExpr(text, 0)
self.assertEqual(position, len(text))
self.assertEqual(charset, first_five)
text = r'[^ABCDE]'
(charset, position) = xmlre._MatchCharClassExpr(text, 0)
self.assertEqual(position, len(text))
self.assertEqual(charset.negate(), first_five)
text = r'[A-Z-[GHI]]'
expected = unicode.CodePointSet( (ord('A'), ord('Z')) )
expected.subtract( (ord('G'), ord('I') ))
(charset, position) = xmlre._MatchCharClassExpr(text, 0)
self.assertEqual(position, len(text))
self.assertEqual(charset, expected)
text = r'[\p{L}-\p{Lo}]'
self.assertRaises(xmlre.RegularExpressionError, xmlre._MatchCharClassExpr, text, 0)
text = r'[\p{L}-[\p{Lo}]]'
(charset, position) = xmlre._MatchCharClassExpr(text, 0)
expected = unicode.CodePointSet(unicode.PropertyMap['L'])
expected.subtract(unicode.PropertyMap['Lo'])
self.assertEqual(position, len(text))
self.assertEqual(charset, expected)
def testXMLToPython (self):
self.assertEqual(r'^(123)$', xmlre.XMLToPython('123'))
# Note that single-char escapes in the expression are
# converted to character classes.
self.assertEqual(r'^(Why[ ]not[?])$', xmlre.XMLToPython(r'Why[ ]not\?'))
def testRegularExpressions (self):
text = '[\i-[:]][\c-[:]]*'
compiled_re = re.compile(xmlre.XMLToPython(text))
self.assertTrue(compiled_re.match('identifier'))
self.assertFalse(compiled_re.match('0bad'))
self.assertFalse(compiled_re.match(' spaceBad'))
self.assertFalse(compiled_re.match('qname:bad'))
text = '\\i\\c*'
text_py = xmlre.XMLToPython(text)
compiled_re = re.compile(text_py)
self.assertTrue(compiled_re.match('identifier'))
self.assertTrue(compiled_re.match('_underscore'))
def testTrivialLiteral(self):
# Simplest sanity check for assertMatches / assertNoMatch
self.assertMatches("hello", "hello")
self.assertNoMatch("hello", "hhello")
self.assertNoMatch("hello", "helloo")
self.assertNoMatch("hello", "goodbye")
def testConvertingRangesToPythonWithDash(self):
# It's really easy to convert this RE into "foo[&-X]bar", if
# sorting characters in ASCII order without special-casing "-"
self.assertNoMatch("foo[-&X]bar", "fooWbar")
self.assertMatches("foo[-&X]bar", "foo-bar")
self.assertMatches("foo[-&X]bar", "foo&bar")
self.assertMatches("foo[-&X]bar", "fooXbar")
def testConvertingRangesToPythonWithCaret(self):
# It's really easy to convert this RE into "foo[^z]bar", if
# sorting characters in ASCII order without special-casing "^"
self.assertNoMatch("foo[z^]bar", "fooWbar")
self.assertMatches("foo[z^]bar", "foozbar")
self.assertMatches("foo[z^]bar", "foo^bar")
def testConvertingRangesToPythonWithBackslash(self):
# It's really easy to convert this RE into "foo[A\n]bar", if
# you forget to special-case r"\"
self.assertNoMatch("foo[A\\\\n]bar", "fooWbar")
self.assertNoMatch("foo[A\\\\n]bar", "foo\nbar")
self.assertMatches("foo[A\\\\n]bar", "fooAbar")
self.assertMatches("foo[A\\\\n]bar", "foo\\bar")
self.assertMatches("foo[A\\\\n]bar", "foonbar")
def testCnUnicodeClass(self):
# The Cn class is basically "everything that is not included in the
# Unicode character database". So it requires special handling when
# you parse the Unicode character database. It is really easy to
# miss this and leave the Cn class empty.
self.assertNoMatch("foo\\p{Cn}bar", "fooWbar")
self.assertMatches("foo\\p{Cn}bar", "foo\ufffebar")
self.assertMatches("foo\\P{Cn}bar", "fooWbar")
self.assertNoMatch("foo\\P{Cn}bar", "foo\ufffebar")
def testCnUnicodeClassInC(self):
# If the Cn class is wrong (see above), then C will probably be wrong
# too.
self.assertNoMatch("foo\\p{C}bar", "fooWbar")
self.assertMatches("foo\\p{C}bar", "foo\ufffebar")
self.assertMatches("foo\\P{C}bar", "fooWbar")
self.assertNoMatch("foo\\P{C}bar", "foo\ufffebar")
def testMultiCharEscape_s(self):
self.assertNoMatch("foo\\sbar", "fooWbar")
self.assertMatches("foo\\sbar", "foo bar")
def testMultiCharEscape_S(self):
self.assertMatches("foo\\Sbar", "fooWbar")
self.assertNoMatch("foo\\Sbar", "foo bar")
def testMultiCharEscape_i(self):
self.assertNoMatch("foo\\ibar", "foo bar")
self.assertMatches("foo\\ibar", "fooWbar")
self.assertMatches("foo\\ibar", "foo:bar")
self.assertMatches("foo\\ibar", "foo_bar")
self.assertMatches("foo\\ibar", "foo\u0D0Cbar")
self.assertNoMatch("foo\\ibar", "foo-bar")
self.assertNoMatch("foo\\ibar", "foo.bar")
self.assertNoMatch("foo\\ibar", "foo\u203Fbar")
self.assertNoMatch("foo\\ibar", "foo\u3005bar")
def testMultiCharEscape_I(self):
self.assertMatches("foo\\Ibar", "foo bar")
self.assertNoMatch("foo\\Ibar", "fooWbar")
self.assertNoMatch("foo\\Ibar", "foo:bar")
self.assertNoMatch("foo\\Ibar", "foo_bar")
self.assertNoMatch("foo\\Ibar", "foo\u0D0Cbar")
self.assertMatches("foo\\Ibar", "foo-bar")
self.assertMatches("foo\\Ibar", "foo.bar")
self.assertMatches("foo\\Ibar", "foo\u203Fbar")
self.assertMatches("foo\\Ibar", "foo\u3005bar")
def testMultiCharEscape_c(self):
self.assertNoMatch("foo\\cbar", "foo bar")
self.assertMatches("foo\\cbar", "fooWbar")
self.assertMatches("foo\\cbar", "foo:bar")
self.assertMatches("foo\\cbar", "foo_bar")
self.assertMatches("foo\\cbar", "foo\u0D0Cbar")
self.assertMatches("foo\\cbar", "foo-bar")
self.assertMatches("foo\\cbar", "foo.bar")
self.assertNoMatch("foo\\cbar", "foo\u203Fbar")
self.assertMatches("foo\\cbar", "foo\u3005bar")
def testMultiCharEscape_C(self):
self.assertMatches("foo\\Cbar", "foo bar")
self.assertNoMatch("foo\\Cbar", "fooWbar")
self.assertNoMatch("foo\\Cbar", "foo:bar")
self.assertNoMatch("foo\\Cbar", "foo_bar")
self.assertNoMatch("foo\\Cbar", "foo\u0D0Cbar")
self.assertNoMatch("foo\\Cbar", "foo-bar")
self.assertNoMatch("foo\\Cbar", "foo.bar")
self.assertMatches("foo\\Cbar", "foo\u203Fbar")
self.assertNoMatch("foo\\Cbar", "foo\u3005bar")
def testMultiCharEscape_d(self):
self.assertNoMatch("foo\\dbar", "foo bar")
self.assertNoMatch("foo\\dbar", "foozbar")
self.assertMatches("foo\\dbar", "foo5bar")
self.assertMatches("foo\\dbar", "foo\u0669bar")
def testMultiCharEscape_D(self):
self.assertMatches("foo\\Dbar", "foo bar")
self.assertMatches("foo\\Dbar", "foozbar")
self.assertNoMatch("foo\\Dbar", "foo5bar")
self.assertNoMatch("foo\\Dbar", "foo\u0669bar")
def testMultiCharEscape_w(self):
self.assertNoMatch("foo\\wbar", "foo bar")
self.assertNoMatch("foo\\wbar", "foo&bar")
self.assertMatches("foo\\wbar", "fooWbar")
self.assertMatches("[\\w]*", "fooWboar")
def testMultiCharEscape_W(self):
self.assertMatches("foo\\Wbar", "foo bar")
self.assertMatches("foo\\Wbar", "foo&bar")
self.assertNoMatch("foo\\Wbar", "fooWbar")
def testUnicodeClass(self):
self.assertMatches("\\p{L}*", "hello")
self.assertNoMatch("\\p{L}*", "hell7")
def testQuotedOpenBrace(self):
self.assertMatches("foo\\[bar", "foo[bar")
self.assertNoMatch("foo\\[bar", "foo\\[bar")
self.assertNoMatch("foo\\[bar", "foob")
def testQuotedCloseBrace(self):
self.assertMatches("foo\\]bar", "foo]bar")
self.assertNoMatch("foo\\]bar", "foo\\]bar")
self.assertNoMatch("foo\\]bar", "foob")
def testQuotedAndUnquotedCloseBrace(self):
self.assertMatches("foo[b\\]c]ar", "foobar")
self.assertMatches("foo[b\\]c]ar", "foo]ar")
self.assertMatches("foo[b\\]c]ar", "foocar")
self.assertNoMatch("foo[b\\]c]ar", "fooar")
def testUnquotedAndQuotedCloseBrace(self):
self.assertMatches("foo[zb]c\\]ar", "foobc]ar")
self.assertMatches("foo[zb]c\\]ar", "foozc]ar")
self.assertNoMatch("foo[zb]c\\]ar", "foozar")
def testQuotedOpenCloseBraces(self):
self.assertMatches("foo\\[bar\\]", "foo[bar]")
self.assertNoMatch("foo\\[bar\\]", "foo\\[bar]")
self.assertNoMatch("foo\\[bar\\]", "foobar")
def testQuotedAndUnquotedOpenBrace(self):
self.assertMatches("foo\\[b[az]r", "foo[bar")
self.assertMatches("foo\\[b[az]r", "foo[bzr")
self.assertNoMatch("foo\\[b[az]r", "foobr")
def testUnquotedAndQuotedOpenBrace(self):
self.assertMatches("foo[b\\[az]r", "foobr")
self.assertMatches("foo[b\\[az]r", "foo[r")
self.assertNoMatch("foo[b\\[az]r", "foobar")
def testFoo(self):
self.assertMatches("foo\\\\[bc\\]a]r", "foo\\br")
self.assertNoMatch("foo\\\\[bc\\]a]r", "foo\\bar")
self.assertNoMatch("foo\\\\[bc\\]a]r", "foobar")
def testDashStartRangeWithRange(self):
# Spec says: The - character is a valid character range only at the
# beginning or end of a positive character group.
self.assertMatches("foo[-a-z]bar", "fooabar")
self.assertMatches("foo[-a-z]bar", "foo-bar")
self.assertMatches("foo[-a-z]bar", "foonbar")
self.assertMatches("foo[-a-z]bar", "foozbar")
self.assertNoMatch("foo[-a-z]bar", "fooWbar")
def testDashStartRangeOneLetter(self):
self.assertMatches("foo[-a]bar", "fooabar")
self.assertMatches("foo[-a]bar", "foo-bar")
self.assertNoMatch("foo[-a]bar", "fooWbar")
def testDashStartRangeSeveralLetters(self):
self.assertMatches("foo[-abc]bar", "fooabar")
self.assertMatches("foo[-abc]bar", "foobbar")
self.assertMatches("foo[-abc]bar", "foocbar")
self.assertMatches("foo[-abc]bar", "foo-bar")
self.assertNoMatch("foo[-abc]bar", "fooWbar")
def testDashOnlyRange(self):
self.assertMatches("foo[-]bar", "foo-bar")
self.assertNoMatch("foo[-a-z]bar", "fooWbar")
def testDashEndRange(self):
self.assertMatches("foo[a-z-]bar", "fooabar")
self.assertMatches("foo[a-z-]bar", "foo-bar")
self.assertMatches("foo[a-z-]bar", "foonbar")
self.assertMatches("foo[a-z-]bar", "foozbar")
self.assertNoMatch("foo[a-z-]bar", "fooWbar")
def testDashEndRangeOneLetter(self):
self.assertMatches("foo[a-]bar", "fooabar")
self.assertMatches("foo[a-]bar", "foo-bar")
self.assertNoMatch("foo[a-]bar", "fooWbar")
def testDashEndRangeSeveralLetters(self):
self.assertMatches("foo[abc-]bar", "fooabar")
self.assertMatches("foo[abc-]bar", "foobbar")
self.assertMatches("foo[abc-]bar", "foocbar")
self.assertMatches("foo[abc-]bar", "foo-bar")
self.assertNoMatch("foo[abc-]bar", "fooWbar")
def testDashEndRangeWithSub(self):
self.assertMatches("foo[a-z--[q]]bar", "fooabar")
self.assertMatches("foo[a-z--[q]]bar", "foo-bar")
self.assertMatches("foo[a-z--[q]]bar", "foonbar")
self.assertMatches("foo[a-z--[q]]bar", "foozbar")
self.assertNoMatch("foo[a-z--[q]]bar", "fooWbar")
self.assertNoMatch("foo[a-z--[q]]bar", "fooqbar")
def testDashEndRangeOneLetterWithSub(self):
self.assertMatches("foo[a--[q]]bar", "fooabar")
self.assertMatches("foo[a--[q]]bar", "foo-bar")
self.assertNoMatch("foo[a--[q]]bar", "fooWbar")
self.assertMatches("foo[a--[a]]bar", "foo-bar")
self.assertNoMatch("foo[a--[a]]bar", "fooabar")
self.assertNoMatch("foo[a--[a]]bar", "fooWbar")
def testDashEndRangeSeveralLettersWithSub(self):
self.assertMatches("foo[abc--[b]]bar", "fooabar")
self.assertMatches("foo[abc--[b]]bar", "foocbar")
self.assertMatches("foo[abc--[b]]bar", "foo-bar")
self.assertNoMatch("foo[abc--[b]]bar", "foobbar")
self.assertNoMatch("foo[abc--[b]]bar", "fooWbar")
def testCaret(self):
self.assertMatches("foo^bar", "foo^bar")
self.assertNoMatch("foo^bar", "foobar")
self.assertNoMatch("foo^bar", "barfoo")
def testCaretStart(self):
self.assertMatches("^foobar", "^foobar")
self.assertNoMatch("^foobar", "foobar")
def testDollar(self):
self.assertMatches("foo$bar", "foo$bar")
self.assertNoMatch("foo$bar", "foobar")
self.assertNoMatch("foo$bar", "barfoo")
def testDollarEnd(self):
self.assertMatches("foobar$", "foobar$")
self.assertNoMatch("foobar$", "foobar")
def testCaretInRangeSub(self):
self.assertMatches("foo[a^-[a]]bar", "foo^bar")
self.assertNoMatch("foo[a^-[a]]bar", "fooabar")
self.assertNoMatch("foo[a^-[a]]bar", "foobar")
def testCaretInRange(self):
self.assertMatches("foo[a^]bar", "foo^bar")
self.assertMatches("foo[a^]bar", "fooabar")
self.assertNoMatch("foo[a^]bar", "foobar")
def testSingleCharRange(self):
self.assertMatches("foo[b]ar", "foobar")
def testQuotedSingleChar(self):
self.assertMatches("foo\\\\bar", "foo\\bar")
def testAlternation(self):
self.assertMatches("[0-9]{3}|", "123");
self.assertMatches("[0-9]{3}|", "");
self.assertNoMatch("[0-9]{3}|", "12");
self.assertNoMatch("[0-9]{3}|", "1234");
if __name__ == '__main__':
unittest.main()
|
|
# coding=utf-8
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for :class:`ironic.conductor.task_manager`."""
import eventlet
from eventlet import greenpool
import mock
from oslo_utils import uuidutils
from ironic.common import driver_factory
from ironic.common import exception
from ironic.common import fsm
from ironic.common import states
from ironic.conductor import task_manager
from ironic import objects
from ironic.tests import base as tests_base
from ironic.tests.db import base as tests_db_base
from ironic.tests.objects import utils as obj_utils
@mock.patch.object(objects.Node, 'get')
@mock.patch.object(objects.Node, 'release')
@mock.patch.object(objects.Node, 'reserve')
@mock.patch.object(driver_factory, 'get_driver')
@mock.patch.object(objects.Port, 'list_by_node_id')
class TaskManagerTestCase(tests_db_base.DbTestCase):
def setUp(self):
super(TaskManagerTestCase, self).setUp()
self.host = 'test-host'
self.config(host=self.host)
self.config(node_locked_retry_attempts=1, group='conductor')
self.config(node_locked_retry_interval=0, group='conductor')
self.node = obj_utils.create_test_node(self.context)
def test_excl_lock(self, get_ports_mock, get_driver_mock,
reserve_mock, release_mock, node_get_mock):
reserve_mock.return_value = self.node
with task_manager.TaskManager(self.context, 'fake-node-id') as task:
self.assertEqual(self.context, task.context)
self.assertEqual(self.node, task.node)
self.assertEqual(get_ports_mock.return_value, task.ports)
self.assertEqual(get_driver_mock.return_value, task.driver)
self.assertFalse(task.shared)
reserve_mock.assert_called_once_with(self.context, self.host,
'fake-node-id')
get_ports_mock.assert_called_once_with(self.context, self.node.id)
get_driver_mock.assert_called_once_with(self.node.driver)
release_mock.assert_called_once_with(self.context, self.host,
self.node.id)
self.assertFalse(node_get_mock.called)
def test_excl_lock_with_driver(self, get_ports_mock, get_driver_mock,
reserve_mock, release_mock,
node_get_mock):
reserve_mock.return_value = self.node
with task_manager.TaskManager(self.context, 'fake-node-id',
driver_name='fake-driver') as task:
self.assertEqual(self.context, task.context)
self.assertEqual(self.node, task.node)
self.assertEqual(get_ports_mock.return_value, task.ports)
self.assertEqual(get_driver_mock.return_value, task.driver)
self.assertFalse(task.shared)
reserve_mock.assert_called_once_with(self.context, self.host,
'fake-node-id')
get_ports_mock.assert_called_once_with(self.context, self.node.id)
get_driver_mock.assert_called_once_with('fake-driver')
release_mock.assert_called_once_with(self.context, self.host,
self.node.id)
self.assertFalse(node_get_mock.called)
def test_excl_nested_acquire(self, get_ports_mock, get_driver_mock,
reserve_mock, release_mock,
node_get_mock):
node2 = obj_utils.create_test_node(self.context,
uuid=uuidutils.generate_uuid(),
driver='fake')
reserve_mock.return_value = self.node
get_ports_mock.return_value = mock.sentinel.ports1
get_driver_mock.return_value = mock.sentinel.driver1
with task_manager.TaskManager(self.context, 'node-id1') as task:
reserve_mock.return_value = node2
get_ports_mock.return_value = mock.sentinel.ports2
get_driver_mock.return_value = mock.sentinel.driver2
with task_manager.TaskManager(self.context, 'node-id2') as task2:
self.assertEqual(self.context, task.context)
self.assertEqual(self.node, task.node)
self.assertEqual(mock.sentinel.ports1, task.ports)
self.assertEqual(mock.sentinel.driver1, task.driver)
self.assertFalse(task.shared)
self.assertEqual(self.context, task2.context)
self.assertEqual(node2, task2.node)
self.assertEqual(mock.sentinel.ports2, task2.ports)
self.assertEqual(mock.sentinel.driver2, task2.driver)
self.assertFalse(task2.shared)
self.assertEqual([mock.call(self.context, self.host, 'node-id1'),
mock.call(self.context, self.host, 'node-id2')],
reserve_mock.call_args_list)
self.assertEqual([mock.call(self.context, self.node.id),
mock.call(self.context, node2.id)],
get_ports_mock.call_args_list)
self.assertEqual([mock.call(self.node.driver),
mock.call(node2.driver)],
get_driver_mock.call_args_list)
# release should be in reverse order
self.assertEqual([mock.call(self.context, self.host, node2.id),
mock.call(self.context, self.host, self.node.id)],
release_mock.call_args_list)
self.assertFalse(node_get_mock.called)
def test_excl_lock_exception_then_lock(self, get_ports_mock,
get_driver_mock, reserve_mock,
release_mock, node_get_mock):
retry_attempts = 3
self.config(node_locked_retry_attempts=retry_attempts,
group='conductor')
# Fail on the first lock attempt, succeed on the second.
reserve_mock.side_effect = [exception.NodeLocked(node='foo',
host='foo'),
self.node]
with task_manager.TaskManager(self.context, 'fake-node-id') as task:
self.assertFalse(task.shared)
reserve_mock.assert_called(self.context, self.host, 'fake-node-id')
self.assertEqual(2, reserve_mock.call_count)
def test_excl_lock_reserve_exception(self, get_ports_mock,
get_driver_mock, reserve_mock,
release_mock, node_get_mock):
retry_attempts = 3
self.config(node_locked_retry_attempts=retry_attempts,
group='conductor')
reserve_mock.side_effect = exception.NodeLocked(node='foo',
host='foo')
self.assertRaises(exception.NodeLocked,
task_manager.TaskManager,
self.context,
'fake-node-id')
reserve_mock.assert_called_with(self.context, self.host,
'fake-node-id')
self.assertEqual(retry_attempts, reserve_mock.call_count)
self.assertFalse(get_ports_mock.called)
self.assertFalse(get_driver_mock.called)
self.assertFalse(release_mock.called)
self.assertFalse(node_get_mock.called)
def test_excl_lock_get_ports_exception(self, get_ports_mock,
get_driver_mock, reserve_mock,
release_mock, node_get_mock):
reserve_mock.return_value = self.node
get_ports_mock.side_effect = exception.IronicException('foo')
self.assertRaises(exception.IronicException,
task_manager.TaskManager,
self.context,
'fake-node-id')
reserve_mock.assert_called_once_with(self.context, self.host,
'fake-node-id')
get_ports_mock.assert_called_once_with(self.context, self.node.id)
self.assertFalse(get_driver_mock.called)
release_mock.assert_called_once_with(self.context, self.host,
self.node.id)
self.assertFalse(node_get_mock.called)
def test_excl_lock_get_driver_exception(self, get_ports_mock,
get_driver_mock, reserve_mock,
release_mock, node_get_mock):
reserve_mock.return_value = self.node
get_driver_mock.side_effect = exception.DriverNotFound(
driver_name='foo')
self.assertRaises(exception.DriverNotFound,
task_manager.TaskManager,
self.context,
'fake-node-id')
reserve_mock.assert_called_once_with(self.context, self.host,
'fake-node-id')
get_ports_mock.assert_called_once_with(self.context, self.node.id)
get_driver_mock.assert_called_once_with(self.node.driver)
release_mock.assert_called_once_with(self.context, self.host,
self.node.id)
self.assertFalse(node_get_mock.called)
def test_shared_lock(self, get_ports_mock, get_driver_mock,
reserve_mock, release_mock, node_get_mock):
node_get_mock.return_value = self.node
with task_manager.TaskManager(self.context, 'fake-node-id',
shared=True) as task:
self.assertEqual(self.context, task.context)
self.assertEqual(self.node, task.node)
self.assertEqual(get_ports_mock.return_value, task.ports)
self.assertEqual(get_driver_mock.return_value, task.driver)
self.assertTrue(task.shared)
self.assertFalse(reserve_mock.called)
self.assertFalse(release_mock.called)
node_get_mock.assert_called_once_with(self.context, 'fake-node-id')
get_ports_mock.assert_called_once_with(self.context, self.node.id)
get_driver_mock.assert_called_once_with(self.node.driver)
def test_shared_lock_with_driver(self, get_ports_mock, get_driver_mock,
reserve_mock, release_mock,
node_get_mock):
node_get_mock.return_value = self.node
with task_manager.TaskManager(self.context,
'fake-node-id',
shared=True,
driver_name='fake-driver') as task:
self.assertEqual(self.context, task.context)
self.assertEqual(self.node, task.node)
self.assertEqual(get_ports_mock.return_value, task.ports)
self.assertEqual(get_driver_mock.return_value, task.driver)
self.assertTrue(task.shared)
self.assertFalse(reserve_mock.called)
self.assertFalse(release_mock.called)
node_get_mock.assert_called_once_with(self.context, 'fake-node-id')
get_ports_mock.assert_called_once_with(self.context, self.node.id)
get_driver_mock.assert_called_once_with('fake-driver')
def test_shared_lock_node_get_exception(self, get_ports_mock,
get_driver_mock, reserve_mock,
release_mock, node_get_mock):
node_get_mock.side_effect = exception.NodeNotFound(node='foo')
self.assertRaises(exception.NodeNotFound,
task_manager.TaskManager,
self.context,
'fake-node-id',
shared=True)
self.assertFalse(reserve_mock.called)
self.assertFalse(release_mock.called)
node_get_mock.assert_called_once_with(self.context, 'fake-node-id')
self.assertFalse(get_ports_mock.called)
self.assertFalse(get_driver_mock.called)
def test_shared_lock_get_ports_exception(self, get_ports_mock,
get_driver_mock, reserve_mock,
release_mock, node_get_mock):
node_get_mock.return_value = self.node
get_ports_mock.side_effect = exception.IronicException('foo')
self.assertRaises(exception.IronicException,
task_manager.TaskManager,
self.context,
'fake-node-id',
shared=True)
self.assertFalse(reserve_mock.called)
self.assertFalse(release_mock.called)
node_get_mock.assert_called_once_with(self.context, 'fake-node-id')
get_ports_mock.assert_called_once_with(self.context, self.node.id)
self.assertFalse(get_driver_mock.called)
def test_shared_lock_get_driver_exception(self, get_ports_mock,
get_driver_mock, reserve_mock,
release_mock, node_get_mock):
node_get_mock.return_value = self.node
get_driver_mock.side_effect = exception.DriverNotFound(
driver_name='foo')
self.assertRaises(exception.DriverNotFound,
task_manager.TaskManager,
self.context,
'fake-node-id',
shared=True)
self.assertFalse(reserve_mock.called)
self.assertFalse(release_mock.called)
node_get_mock.assert_called_once_with(self.context, 'fake-node-id')
get_ports_mock.assert_called_once_with(self.context, self.node.id)
get_driver_mock.assert_called_once_with(self.node.driver)
def test_spawn_after(self, get_ports_mock, get_driver_mock,
reserve_mock, release_mock, node_get_mock):
thread_mock = mock.Mock(spec_set=['link', 'cancel'])
spawn_mock = mock.Mock(return_value=thread_mock)
task_release_mock = mock.Mock()
reserve_mock.return_value = self.node
with task_manager.TaskManager(self.context, 'node-id') as task:
task.spawn_after(spawn_mock, 1, 2, foo='bar', cat='meow')
task.release_resources = task_release_mock
spawn_mock.assert_called_once_with(1, 2, foo='bar', cat='meow')
thread_mock.link.assert_called_once_with(
task._thread_release_resources)
self.assertFalse(thread_mock.cancel.called)
# Since we mocked link(), we're testing that __exit__ didn't
# release resources pending the finishing of the background
# thread
self.assertFalse(task_release_mock.called)
def test_spawn_after_exception_while_yielded(self, get_ports_mock,
get_driver_mock,
reserve_mock,
release_mock,
node_get_mock):
spawn_mock = mock.Mock()
task_release_mock = mock.Mock()
reserve_mock.return_value = self.node
def _test_it():
with task_manager.TaskManager(self.context, 'node-id') as task:
task.spawn_after(spawn_mock, 1, 2, foo='bar', cat='meow')
task.release_resources = task_release_mock
raise exception.IronicException('foo')
self.assertRaises(exception.IronicException, _test_it)
self.assertFalse(spawn_mock.called)
task_release_mock.assert_called_once_with()
def test_spawn_after_spawn_fails(self, get_ports_mock, get_driver_mock,
reserve_mock, release_mock,
node_get_mock):
spawn_mock = mock.Mock(side_effect=exception.IronicException('foo'))
task_release_mock = mock.Mock()
reserve_mock.return_value = self.node
def _test_it():
with task_manager.TaskManager(self.context, 'node-id') as task:
task.spawn_after(spawn_mock, 1, 2, foo='bar', cat='meow')
task.release_resources = task_release_mock
self.assertRaises(exception.IronicException, _test_it)
spawn_mock.assert_called_once_with(1, 2, foo='bar', cat='meow')
task_release_mock.assert_called_once_with()
def test_spawn_after_link_fails(self, get_ports_mock, get_driver_mock,
reserve_mock, release_mock,
node_get_mock):
thread_mock = mock.Mock(spec_set=['link', 'cancel'])
thread_mock.link.side_effect = exception.IronicException('foo')
spawn_mock = mock.Mock(return_value=thread_mock)
task_release_mock = mock.Mock()
thr_release_mock = mock.Mock(spec_set=[])
reserve_mock.return_value = self.node
def _test_it():
with task_manager.TaskManager(self.context, 'node-id') as task:
task.spawn_after(spawn_mock, 1, 2, foo='bar', cat='meow')
task._thread_release_resources = thr_release_mock
task.release_resources = task_release_mock
self.assertRaises(exception.IronicException, _test_it)
spawn_mock.assert_called_once_with(1, 2, foo='bar', cat='meow')
thread_mock.link.assert_called_once_with(thr_release_mock)
thread_mock.cancel.assert_called_once_with()
task_release_mock.assert_called_once_with()
def test_spawn_after_on_error_hook(self, get_ports_mock, get_driver_mock,
reserve_mock, release_mock,
node_get_mock):
expected_exception = exception.IronicException('foo')
spawn_mock = mock.Mock(side_effect=expected_exception)
task_release_mock = mock.Mock()
on_error_handler = mock.Mock()
reserve_mock.return_value = self.node
def _test_it():
with task_manager.TaskManager(self.context, 'node-id') as task:
task.set_spawn_error_hook(on_error_handler, 'fake-argument')
task.spawn_after(spawn_mock, 1, 2, foo='bar', cat='meow')
task.release_resources = task_release_mock
self.assertRaises(exception.IronicException, _test_it)
spawn_mock.assert_called_once_with(1, 2, foo='bar', cat='meow')
task_release_mock.assert_called_once_with()
on_error_handler.assert_called_once_with(expected_exception,
'fake-argument')
def test_spawn_after_on_error_hook_exception(self, get_ports_mock,
get_driver_mock, reserve_mock,
release_mock, node_get_mock):
expected_exception = exception.IronicException('foo')
spawn_mock = mock.Mock(side_effect=expected_exception)
task_release_mock = mock.Mock()
# Raise an exception within the on_error handler
on_error_handler = mock.Mock(side_effect=Exception('unexpected'))
on_error_handler.__name__ = 'foo_method'
reserve_mock.return_value = self.node
def _test_it():
with task_manager.TaskManager(self.context, 'node-id') as task:
task.set_spawn_error_hook(on_error_handler, 'fake-argument')
task.spawn_after(spawn_mock, 1, 2, foo='bar', cat='meow')
task.release_resources = task_release_mock
# Make sure the original exception is the one raised
self.assertRaises(exception.IronicException, _test_it)
spawn_mock.assert_called_once_with(1, 2, foo='bar', cat='meow')
task_release_mock.assert_called_once_with()
on_error_handler.assert_called_once_with(expected_exception,
'fake-argument')
@mock.patch.object(states.machine, 'copy')
def test_init_prepares_fsm(self, copy_mock, get_ports_mock,
get_driver_mock, reserve_mock, release_mock,
node_get_mock):
m = mock.Mock(spec=fsm.FSM)
reserve_mock.return_value = self.node
copy_mock.return_value = m
t = task_manager.TaskManager('fake', 'fake')
copy_mock.assert_called_once_with()
self.assertIs(m, t.fsm)
m.initialize.assert_called_once_with(self.node.provision_state)
class TaskManagerStateModelTestCases(tests_base.TestCase):
def setUp(self):
super(TaskManagerStateModelTestCases, self).setUp()
self.fsm = mock.Mock(spec=fsm.FSM)
self.node = mock.Mock(spec=objects.Node)
self.task = mock.Mock(spec=task_manager.TaskManager)
self.task.fsm = self.fsm
self.task.node = self.node
def test_release_clears_resources(self):
t = self.task
t.release_resources = task_manager.TaskManager.release_resources
t.driver = mock.Mock()
t.ports = mock.Mock()
t.shared = True
t.release_resources(t)
self.assertIsNone(t.node)
self.assertIsNone(t.driver)
self.assertIsNone(t.ports)
self.assertIsNone(t.fsm)
def test_process_event_fsm_raises(self):
self.task.process_event = task_manager.TaskManager.process_event
self.fsm.process_event.side_effect = exception.InvalidState('test')
self.assertRaises(
exception.InvalidState,
self.task.process_event,
self.task, 'fake')
self.assertEqual(0, self.task.spawn_after.call_count)
self.assertFalse(self.task.node.save.called)
def test_process_event_sets_callback(self):
cb = mock.Mock()
arg = mock.Mock()
kwarg = mock.Mock()
self.task.process_event = task_manager.TaskManager.process_event
self.task.process_event(self.task, 'fake',
callback=cb, call_args=[arg], call_kwargs={'mock': kwarg})
self.fsm.process_event.assert_called_once_with('fake')
self.task.spawn_after.assert_called_with(cb, arg, mock=kwarg)
self.assertEqual(1, self.task.node.save.call_count)
self.assertIsNone(self.node.last_error)
def test_process_event_sets_callback_and_error_handler(self):
arg = mock.Mock()
cb = mock.Mock()
er = mock.Mock()
kwarg = mock.Mock()
provision_state = 'provision_state'
target_provision_state = 'target'
self.node.provision_state = provision_state
self.node.target_provision_state = target_provision_state
self.task.process_event = task_manager.TaskManager.process_event
self.task.process_event(self.task, 'fake',
callback=cb, call_args=[arg], call_kwargs={'mock': kwarg},
err_handler=er)
self.task.set_spawn_error_hook.assert_called_once_with(er,
self.node, provision_state, target_provision_state)
self.fsm.process_event.assert_called_once_with('fake')
self.task.spawn_after.assert_called_with(cb, arg, mock=kwarg)
self.assertEqual(1, self.task.node.save.call_count)
self.assertIsNone(self.node.last_error)
self.assertNotEqual(provision_state, self.node.provision_state)
self.assertNotEqual(target_provision_state,
self.node.target_provision_state)
@task_manager.require_exclusive_lock
def _req_excl_lock_method(*args, **kwargs):
return (args, kwargs)
class ExclusiveLockDecoratorTestCase(tests_base.TestCase):
def setUp(self):
super(ExclusiveLockDecoratorTestCase, self).setUp()
self.task = mock.Mock(spec=task_manager.TaskManager)
self.args_task_first = (self.task, 1, 2)
self.args_task_second = (1, self.task, 2)
self.kwargs = dict(cat='meow', dog='wuff')
def test_with_excl_lock_task_first_arg(self):
self.task.shared = False
(args, kwargs) = _req_excl_lock_method(*self.args_task_first,
**self.kwargs)
self.assertEqual(self.args_task_first, args)
self.assertEqual(self.kwargs, kwargs)
def test_with_excl_lock_task_second_arg(self):
self.task.shared = False
(args, kwargs) = _req_excl_lock_method(*self.args_task_second,
**self.kwargs)
self.assertEqual(self.args_task_second, args)
self.assertEqual(self.kwargs, kwargs)
def test_with_shared_lock_task_first_arg(self):
self.task.shared = True
self.assertRaises(exception.ExclusiveLockRequired,
_req_excl_lock_method,
*self.args_task_first,
**self.kwargs)
def test_with_shared_lock_task_second_arg(self):
self.task.shared = True
self.assertRaises(exception.ExclusiveLockRequired,
_req_excl_lock_method,
*self.args_task_second,
**self.kwargs)
class TaskManagerGreenThreadTestCase(tests_base.TestCase):
"""Class to assert our assumptions about greenthread behavior."""
def test_gt_link_callback_added_during_execution(self):
pool = greenpool.GreenPool()
q1 = eventlet.Queue()
q2 = eventlet.Queue()
def func():
q1.put(None)
q2.get()
link_callback = mock.Mock()
thread = pool.spawn(func)
q1.get()
thread.link(link_callback)
q2.put(None)
pool.waitall()
link_callback.assert_called_once_with(thread)
def test_gt_link_callback_added_after_execution(self):
pool = greenpool.GreenPool()
link_callback = mock.Mock()
thread = pool.spawn(lambda: None)
pool.waitall()
thread.link(link_callback)
link_callback.assert_called_once_with(thread)
def test_gt_link_callback_exception_inside_thread(self):
pool = greenpool.GreenPool()
q1 = eventlet.Queue()
q2 = eventlet.Queue()
def func():
q1.put(None)
q2.get()
raise Exception()
link_callback = mock.Mock()
thread = pool.spawn(func)
q1.get()
thread.link(link_callback)
q2.put(None)
pool.waitall()
link_callback.assert_called_once_with(thread)
def test_gt_link_callback_added_after_exception_inside_thread(self):
pool = greenpool.GreenPool()
def func():
raise Exception()
link_callback = mock.Mock()
thread = pool.spawn(func)
pool.waitall()
thread.link(link_callback)
link_callback.assert_called_once_with(thread)
def test_gt_cancel_doesnt_run_thread(self):
pool = greenpool.GreenPool()
func = mock.Mock()
thread = pool.spawn(func)
thread.link(lambda t: None)
thread.cancel()
pool.waitall()
self.assertFalse(func.called)
|
|
#!/usr/bin/env python
"""The MySQL database methods for flow handling."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import MySQLdb
from grr_response_core.lib import rdfvalue
from grr_response_core.lib.rdfvalues import client_stats as rdf_client_stats
from grr_response_core.lib.rdfvalues import protodict as rdf_protodict
from grr_response_core.lib.rdfvalues import stats as rdf_stats
from grr_response_server.databases import db
from grr_response_server.databases import db_utils
from grr_response_server.databases import mysql_utils
from grr_response_server.rdfvalues import flow_objects as rdf_flow_objects
from grr_response_server.rdfvalues import flow_runner as rdf_flow_runner
from grr_response_server.rdfvalues import hunt_objects as rdf_hunt_objects
from grr_response_server.rdfvalues import objects as rdf_objects
from grr_response_server.rdfvalues import output_plugin as rdf_output_plugin
_HUNT_COLUMNS_SELECT = ", ".join((
"UNIX_TIMESTAMP(create_timestamp)",
"UNIX_TIMESTAMP(last_update_timestamp)",
"creator",
"duration_micros",
"client_rate",
"client_limit",
"hunt_state",
"hunt_state_comment",
"UNIX_TIMESTAMP(init_start_time)",
"UNIX_TIMESTAMP(last_start_time)",
"num_clients_at_start_time",
"description",
"hunt",
))
_HUNT_OUTPUT_PLUGINS_STATES_COLUMNS = (
"plugin_name",
"plugin_args",
"plugin_state",
)
class MySQLDBHuntMixin(object):
"""MySQLDB mixin for flow handling."""
@mysql_utils.WithTransaction()
def WriteHuntObject(self, hunt_obj, cursor=None):
"""Writes a hunt object to the database."""
query = """
INSERT INTO hunts (hunt_id, creator, description, duration_micros,
hunt_state,
client_rate, client_limit,
hunt)
VALUES (%(hunt_id)s, %(creator)s, %(description)s, %(duration_micros)s,
%(hunt_state)s,
%(client_rate)s, %(client_limit)s,
%(hunt)s)
"""
args = {
"hunt_id": db_utils.HuntIDToInt(hunt_obj.hunt_id),
"creator": hunt_obj.creator,
"description": hunt_obj.description,
"duration_micros": hunt_obj.duration.microseconds,
"hunt_state": int(rdf_hunt_objects.Hunt.HuntState.PAUSED),
"client_rate": hunt_obj.client_rate,
"client_limit": hunt_obj.client_limit,
"hunt": hunt_obj.SerializeToBytes(),
}
try:
cursor.execute(query, args)
except MySQLdb.IntegrityError as error:
raise db.DuplicatedHuntError(hunt_id=hunt_obj.hunt_id, cause=error)
@mysql_utils.WithTransaction()
def UpdateHuntObject(self,
hunt_id,
duration=None,
client_rate=None,
client_limit=None,
hunt_state=None,
hunt_state_comment=None,
start_time=None,
num_clients_at_start_time=None,
cursor=None):
"""Updates the hunt object by applying the update function."""
vals = []
args = {}
if duration is not None:
vals.append("duration_micros = %(duration_micros)s")
args["duration_micros"] = duration.microseconds
if client_rate is not None:
vals.append("client_rate = %(client_rate)s")
args["client_rate"] = client_rate
if client_limit is not None:
vals.append("client_limit = %(client_limit)s")
args["client_limit"] = client_limit
if hunt_state is not None:
vals.append("hunt_state = %(hunt_state)s")
args["hunt_state"] = int(hunt_state)
if hunt_state_comment is not None:
vals.append("hunt_state_comment = %(hunt_state_comment)s")
args["hunt_state_comment"] = hunt_state_comment
if start_time is not None:
vals.append("""
init_start_time = IFNULL(init_start_time, FROM_UNIXTIME(%(start_time)s))
""")
vals.append("""
last_start_time = FROM_UNIXTIME(%(start_time)s)
""")
args["start_time"] = mysql_utils.RDFDatetimeToTimestamp(start_time)
if num_clients_at_start_time is not None:
vals.append("num_clients_at_start_time = %(num_clients_at_start_time)s")
args["num_clients_at_start_time"] = num_clients_at_start_time
vals.append("last_update_timestamp = NOW(6)")
query = """
UPDATE hunts
SET {updates}
WHERE hunt_id = %(hunt_id)s
""".format(updates=", ".join(vals))
args["hunt_id"] = db_utils.HuntIDToInt(hunt_id)
rows_modified = cursor.execute(query, args)
if rows_modified == 0:
raise db.UnknownHuntError(hunt_id)
@mysql_utils.WithTransaction()
def DeleteHuntObject(self, hunt_id, cursor=None):
"""Deletes a given hunt object."""
query = "DELETE FROM hunts WHERE hunt_id = %s"
hunt_id_int = db_utils.HuntIDToInt(hunt_id)
rows_deleted = cursor.execute(query, [hunt_id_int])
if rows_deleted == 0:
raise db.UnknownHuntError(hunt_id)
query = "DELETE FROM hunt_output_plugins_states WHERE hunt_id = %s"
cursor.execute(query, [hunt_id_int])
def _HuntObjectFromRow(self, row):
"""Generates a flow object from a database row."""
(
create_time,
last_update_time,
creator,
duration_micros,
client_rate,
client_limit,
hunt_state,
hunt_state_comment,
init_start_time,
last_start_time,
num_clients_at_start_time,
description,
body,
) = row
hunt_obj = rdf_hunt_objects.Hunt.FromSerializedBytes(body)
hunt_obj.duration = rdfvalue.Duration.From(duration_micros,
rdfvalue.MICROSECONDS)
hunt_obj.create_time = mysql_utils.TimestampToRDFDatetime(create_time)
hunt_obj.last_update_time = mysql_utils.TimestampToRDFDatetime(
last_update_time)
# Checks below are needed for hunts that were written to the database before
# respective fields became part of F1 schema.
if creator is not None:
hunt_obj.creator = creator
if client_rate is not None:
hunt_obj.client_rate = client_rate
if client_limit is not None:
hunt_obj.client_limit = client_limit
if hunt_state is not None:
hunt_obj.hunt_state = hunt_state
if hunt_state_comment is not None:
hunt_obj.hunt_state_comment = hunt_state_comment
if init_start_time is not None:
hunt_obj.init_start_time = mysql_utils.TimestampToRDFDatetime(
init_start_time)
if last_start_time is not None:
hunt_obj.last_start_time = mysql_utils.TimestampToRDFDatetime(
last_start_time)
if num_clients_at_start_time is not None:
hunt_obj.num_clients_at_start_time = num_clients_at_start_time
if description is not None:
hunt_obj.description = description
return hunt_obj
@mysql_utils.WithTransaction(readonly=True)
def ReadHuntObject(self, hunt_id, cursor=None):
"""Reads a hunt object from the database."""
query = ("SELECT {columns} "
"FROM hunts WHERE hunt_id = %s".format(
columns=_HUNT_COLUMNS_SELECT))
nr_results = cursor.execute(query, [db_utils.HuntIDToInt(hunt_id)])
if nr_results == 0:
raise db.UnknownHuntError(hunt_id)
return self._HuntObjectFromRow(cursor.fetchone())
@mysql_utils.WithTransaction(readonly=True)
def ReadHuntObjects(self,
offset,
count,
with_creator=None,
created_after=None,
with_description_match=None,
cursor=None):
"""Reads multiple hunt objects from the database."""
query = "SELECT {columns} FROM hunts ".format(columns=_HUNT_COLUMNS_SELECT)
args = []
components = []
if with_creator is not None:
components.append("creator = %s ")
args.append(with_creator)
if created_after is not None:
components.append("create_timestamp > FROM_UNIXTIME(%s) ")
args.append(mysql_utils.RDFDatetimeToTimestamp(created_after))
if with_description_match is not None:
components.append("description LIKE %s")
args.append("%" + with_description_match + "%")
if components:
query += "WHERE " + " AND ".join(components)
query += " ORDER BY create_timestamp DESC LIMIT %s OFFSET %s"
args.append(count)
args.append(offset)
cursor.execute(query, args)
return [self._HuntObjectFromRow(row) for row in cursor.fetchall()]
@mysql_utils.WithTransaction(readonly=True)
def ListHuntObjects(self,
offset,
count,
with_creator=None,
created_after=None,
with_description_match=None,
cursor=None):
"""Reads metadata for hunt objects from the database."""
query = """
SELECT
hunt_id,
UNIX_TIMESTAMP(create_timestamp),
UNIX_TIMESTAMP(last_update_timestamp),
creator,
duration_micros,
client_rate,
client_limit,
hunt_state,
hunt_state_comment,
UNIX_TIMESTAMP(init_start_time),
UNIX_TIMESTAMP(last_start_time),
description
FROM hunts """
args = []
components = []
if with_creator is not None:
components.append("creator = %s ")
args.append(with_creator)
if created_after is not None:
components.append("create_timestamp > FROM_UNIXTIME(%s) ")
args.append(mysql_utils.RDFDatetimeToTimestamp(created_after))
if with_description_match is not None:
components.append("description LIKE %s")
args.append("%" + with_description_match + "%")
if components:
query += "WHERE " + " AND ".join(components)
query += " ORDER BY create_timestamp DESC LIMIT %s OFFSET %s"
args.append(count)
args.append(offset)
cursor.execute(query, args)
result = []
for row in cursor.fetchall():
(hunt_id, create_timestamp, last_update_timestamp, creator,
duration_micros, client_rate, client_limit, hunt_state,
hunt_state_comment, init_start_time, last_start_time, description) = row
result.append(
rdf_hunt_objects.HuntMetadata(
hunt_id=db_utils.IntToHuntID(hunt_id),
description=description or None,
create_time=mysql_utils.TimestampToRDFDatetime(create_timestamp),
creator=creator,
duration=rdfvalue.Duration.From(duration_micros,
rdfvalue.MICROSECONDS),
client_rate=client_rate,
client_limit=client_limit,
hunt_state=hunt_state,
hunt_state_comment=hunt_state_comment or None,
last_update_time=mysql_utils.TimestampToRDFDatetime(
last_update_timestamp),
init_start_time=mysql_utils.TimestampToRDFDatetime(
init_start_time),
last_start_time=mysql_utils.TimestampToRDFDatetime(
last_start_time)))
return result
def _HuntOutputPluginStateFromRow(self, row):
"""Builds OutputPluginState object from a DB row."""
plugin_name, plugin_args_bytes, plugin_state_bytes = row
plugin_descriptor = rdf_output_plugin.OutputPluginDescriptor(
plugin_name=plugin_name)
if plugin_args_bytes is not None:
plugin_args_cls = plugin_descriptor.GetPluginArgsClass()
# If plugin_args_cls is None, we have no clue what class plugin args
# should be and therefore no way to deserialize it. This can happen if
# a plugin got renamed or removed, for example. In this case we
# still want to get plugin's definition and state back and not fail hard,
# so that all other plugins can be read.
if plugin_args_cls is not None:
plugin_descriptor.plugin_args = plugin_args_cls.FromSerializedBytes(
plugin_args_bytes)
plugin_state = rdf_protodict.AttributedDict.FromSerializedBytes(
plugin_state_bytes)
return rdf_flow_runner.OutputPluginState(
plugin_descriptor=plugin_descriptor, plugin_state=plugin_state)
@mysql_utils.WithTransaction(readonly=True)
def ReadHuntOutputPluginsStates(self, hunt_id, cursor=None):
"""Reads all hunt output plugins states of a given hunt."""
columns = ", ".join(_HUNT_OUTPUT_PLUGINS_STATES_COLUMNS)
query = ("SELECT {columns} FROM hunt_output_plugins_states "
"WHERE hunt_id = %s".format(columns=columns))
rows_returned = cursor.execute(query, [db_utils.HuntIDToInt(hunt_id)])
if rows_returned > 0:
states = []
for row in cursor.fetchall():
states.append(self._HuntOutputPluginStateFromRow(row))
return states
query = "SELECT hunt_id FROM hunts WHERE hunt_id = %s"
rows_returned = cursor.execute(query, [db_utils.HuntIDToInt(hunt_id)])
if rows_returned == 0:
raise db.UnknownHuntError(hunt_id)
return []
@mysql_utils.WithTransaction()
def WriteHuntOutputPluginsStates(self, hunt_id, states, cursor=None):
"""Writes hunt output plugin states for a given hunt."""
columns = ", ".join(_HUNT_OUTPUT_PLUGINS_STATES_COLUMNS)
placeholders = mysql_utils.Placeholders(
2 + len(_HUNT_OUTPUT_PLUGINS_STATES_COLUMNS))
hunt_id_int = db_utils.HuntIDToInt(hunt_id)
for index, state in enumerate(states):
query = ("INSERT INTO hunt_output_plugins_states "
"(hunt_id, plugin_id, {columns}) "
"VALUES {placeholders}".format(
columns=columns, placeholders=placeholders))
args = [hunt_id_int, index, state.plugin_descriptor.plugin_name]
if state.plugin_descriptor.plugin_args is None:
args.append(None)
else:
args.append(state.plugin_descriptor.plugin_args.SerializeToBytes())
args.append(state.plugin_state.SerializeToBytes())
try:
cursor.execute(query, args)
except MySQLdb.IntegrityError as e:
raise db.UnknownHuntError(hunt_id=hunt_id, cause=e)
@mysql_utils.WithTransaction()
def UpdateHuntOutputPluginState(self,
hunt_id,
state_index,
update_fn,
cursor=None):
"""Updates hunt output plugin state for a given output plugin."""
hunt_id_int = db_utils.HuntIDToInt(hunt_id)
query = "SELECT hunt_id FROM hunts WHERE hunt_id = %s"
rows_returned = cursor.execute(query, [hunt_id_int])
if rows_returned == 0:
raise db.UnknownHuntError(hunt_id)
columns = ", ".join(_HUNT_OUTPUT_PLUGINS_STATES_COLUMNS)
query = ("SELECT {columns} FROM hunt_output_plugins_states "
"WHERE hunt_id = %s AND plugin_id = %s".format(columns=columns))
rows_returned = cursor.execute(query, [hunt_id_int, state_index])
if rows_returned == 0:
raise db.UnknownHuntOutputPluginStateError(hunt_id, state_index)
state = self._HuntOutputPluginStateFromRow(cursor.fetchone())
modified_plugin_state = update_fn(state.plugin_state)
query = ("UPDATE hunt_output_plugins_states "
"SET plugin_state = %s "
"WHERE hunt_id = %s AND plugin_id = %s")
args = [modified_plugin_state.SerializeToBytes(), hunt_id_int, state_index]
cursor.execute(query, args)
return state
@mysql_utils.WithTransaction(readonly=True)
def ReadHuntLogEntries(self,
hunt_id,
offset,
count,
with_substring=None,
cursor=None):
"""Reads hunt log entries of a given hunt using given query options."""
hunt_id_int = db_utils.HuntIDToInt(hunt_id)
query = ("SELECT client_id, flow_id, message, UNIX_TIMESTAMP(timestamp) "
"FROM flow_log_entries "
"FORCE INDEX(flow_log_entries_by_hunt) "
"WHERE hunt_id = %s AND flow_id = hunt_id ")
args = [hunt_id_int]
if with_substring is not None:
query += "AND message LIKE %s "
args.append("%" + db_utils.EscapeWildcards(with_substring) + "%")
query += "ORDER BY timestamp ASC LIMIT %s OFFSET %s"
args.append(count)
args.append(offset)
cursor.execute(query, args)
flow_log_entries = []
for client_id_int, flow_id_int, message, timestamp in cursor.fetchall():
flow_log_entries.append(
rdf_flow_objects.FlowLogEntry(
client_id=db_utils.IntToClientID(client_id_int),
flow_id=db_utils.IntToFlowID(flow_id_int),
hunt_id=hunt_id,
message=message,
timestamp=mysql_utils.TimestampToRDFDatetime(timestamp)))
return flow_log_entries
@mysql_utils.WithTransaction(readonly=True)
def CountHuntLogEntries(self, hunt_id, cursor=None):
"""Returns number of hunt log entries of a given hunt."""
hunt_id_int = db_utils.HuntIDToInt(hunt_id)
query = ("SELECT COUNT(*) FROM flow_log_entries "
"FORCE INDEX(flow_log_entries_by_hunt) "
"WHERE hunt_id = %s AND flow_id = hunt_id")
cursor.execute(query, [hunt_id_int])
return cursor.fetchone()[0]
@mysql_utils.WithTransaction(readonly=True)
def ReadHuntResults(self,
hunt_id,
offset,
count,
with_tag=None,
with_type=None,
with_substring=None,
with_timestamp=None,
cursor=None):
"""Reads hunt results of a given hunt using given query options."""
hunt_id_int = db_utils.HuntIDToInt(hunt_id)
query = ("SELECT client_id, flow_id, hunt_id, payload, type, "
"UNIX_TIMESTAMP(timestamp), tag "
"FROM flow_results "
"FORCE INDEX(flow_results_hunt_id_flow_id_timestamp) "
"WHERE hunt_id = %s ")
args = [hunt_id_int]
if with_tag:
query += "AND tag = %s "
args.append(with_tag)
if with_type:
query += "AND type = %s "
args.append(with_type)
if with_substring:
query += "AND payload LIKE %s "
args.append("%" + db_utils.EscapeWildcards(with_substring) + "%")
if with_timestamp:
query += "AND timestamp = FROM_UNIXTIME(%s) "
args.append(mysql_utils.RDFDatetimeToTimestamp(with_timestamp))
query += "ORDER BY timestamp ASC LIMIT %s OFFSET %s"
args.append(count)
args.append(offset)
cursor.execute(query, args)
ret = []
for (
client_id_int,
flow_id_int,
hunt_id_int,
serialized_payload,
payload_type,
timestamp,
tag,
) in cursor.fetchall():
if payload_type in rdfvalue.RDFValue.classes:
payload = rdfvalue.RDFValue.classes[payload_type].FromSerializedBytes(
serialized_payload)
else:
payload = rdf_objects.SerializedValueOfUnrecognizedType(
type_name=payload_type, value=serialized_payload)
result = rdf_flow_objects.FlowResult(
client_id=db_utils.IntToClientID(client_id_int),
flow_id=db_utils.IntToFlowID(flow_id_int),
hunt_id=hunt_id,
payload=payload,
timestamp=mysql_utils.TimestampToRDFDatetime(timestamp))
if tag is not None:
result.tag = tag
ret.append(result)
return ret
@mysql_utils.WithTransaction(readonly=True)
def CountHuntResults(self,
hunt_id,
with_tag=None,
with_type=None,
cursor=None):
"""Counts hunt results of a given hunt using given query options."""
hunt_id_int = db_utils.HuntIDToInt(hunt_id)
query = "SELECT COUNT(*) FROM flow_results WHERE hunt_id = %s "
args = [hunt_id_int]
if with_tag is not None:
query += "AND tag = %s "
args.append(with_tag)
if with_type is not None:
query += "AND type = %s "
args.append(with_type)
cursor.execute(query, args)
return cursor.fetchone()[0]
@mysql_utils.WithTransaction(readonly=True)
def CountHuntResultsByType(self, hunt_id, cursor=None):
"""Counts number of hunts results per type."""
hunt_id_int = db_utils.HuntIDToInt(hunt_id)
query = ("SELECT type, COUNT(*) FROM flow_results "
"WHERE hunt_id = %s GROUP BY type")
cursor.execute(query, [hunt_id_int])
return dict(cursor.fetchall())
def _HuntFlowCondition(self, condition):
"""Builds an SQL condition matching db.HuntFlowsCondition."""
if condition == db.HuntFlowsCondition.UNSET:
return "", []
elif condition == db.HuntFlowsCondition.FAILED_FLOWS_ONLY:
return ("AND flow_state = %s ",
[int(rdf_flow_objects.Flow.FlowState.ERROR)])
elif condition == db.HuntFlowsCondition.SUCCEEDED_FLOWS_ONLY:
return ("AND flow_state = %s ",
[int(rdf_flow_objects.Flow.FlowState.FINISHED)])
elif condition == db.HuntFlowsCondition.COMPLETED_FLOWS_ONLY:
return ("AND (flow_state = %s OR flow_state = %s) ", [
int(rdf_flow_objects.Flow.FlowState.FINISHED),
int(rdf_flow_objects.Flow.FlowState.ERROR)
])
elif condition == db.HuntFlowsCondition.FLOWS_IN_PROGRESS_ONLY:
return ("AND flow_state = %s ",
[int(rdf_flow_objects.Flow.FlowState.RUNNING)])
elif condition == db.HuntFlowsCondition.CRASHED_FLOWS_ONLY:
return ("AND flow_state = %s ",
[int(rdf_flow_objects.Flow.FlowState.CRASHED)])
else:
raise ValueError("Invalid condition value: %r" % condition)
@mysql_utils.WithTransaction(readonly=True)
def ReadHuntFlows(self,
hunt_id,
offset,
count,
filter_condition=db.HuntFlowsCondition.UNSET,
cursor=None):
"""Reads hunt flows matching given conditins."""
hunt_id_int = db_utils.HuntIDToInt(hunt_id)
query = ("SELECT {columns} FROM flows "
"FORCE INDEX(flows_by_hunt) "
"WHERE parent_hunt_id = %s AND parent_flow_id IS NULL "
"{filter_condition} "
"ORDER BY last_update ASC "
"LIMIT %s OFFSET %s")
filter_query, extra_args = self._HuntFlowCondition(filter_condition)
query = query.format(
columns=self.FLOW_DB_FIELDS, filter_condition=filter_query)
args = [hunt_id_int] + extra_args + [count, offset]
cursor.execute(query, args)
return [self._FlowObjectFromRow(row) for row in cursor.fetchall()]
@mysql_utils.WithTransaction(readonly=True)
def CountHuntFlows(self,
hunt_id,
filter_condition=db.HuntFlowsCondition.UNSET,
cursor=None):
"""Counts hunt flows matching given conditions."""
hunt_id_int = db_utils.HuntIDToInt(hunt_id)
query = ("SELECT COUNT(*) FROM flows "
"FORCE INDEX(flows_by_hunt) "
"WHERE parent_hunt_id = %s AND parent_flow_id IS NULL "
"{filter_condition}")
filter_query, extra_args = self._HuntFlowCondition(filter_condition)
args = [hunt_id_int] + extra_args
query = query.format(filter_condition=filter_query)
cursor.execute(query, args)
return cursor.fetchone()[0]
@mysql_utils.WithTransaction(readonly=True)
def ReadHuntCounters(self, hunt_id, cursor=None):
"""Reads hunt counters."""
hunt_id_int = db_utils.HuntIDToInt(hunt_id)
query = ("SELECT flow_state, COUNT(*) "
"FROM flows "
"FORCE INDEX(flows_by_hunt) "
"WHERE parent_hunt_id = %s AND parent_flow_id IS NULL "
"GROUP BY flow_state")
cursor.execute(query, [hunt_id_int])
counts_by_state = dict(cursor.fetchall())
num_successful_clients = counts_by_state.get(
int(rdf_flow_objects.Flow.FlowState.FINISHED), 0)
num_failed_clients = counts_by_state.get(
int(rdf_flow_objects.Flow.FlowState.ERROR), 0)
num_crashed_clients = counts_by_state.get(
int(rdf_flow_objects.Flow.FlowState.CRASHED), 0)
num_clients = sum(counts_by_state.values())
query = """
SELECT * FROM
(
SELECT COUNT(client_id)
FROM flows
FORCE INDEX(flows_by_hunt)
WHERE parent_hunt_id = %s AND parent_flow_id IS NULL AND
num_replies_sent > 0) counters,
(
SELECT SUM(user_cpu_time_used_micros + system_cpu_time_used_micros),
SUM(network_bytes_sent),
SUM(num_replies_sent)
FROM flows
FORCE INDEX(flows_by_hunt)
WHERE parent_hunt_id = %s AND parent_flow_id IS NULL) resources
"""
cursor.execute(query, [hunt_id_int, hunt_id_int])
(
num_clients_with_results,
total_cpu_seconds,
total_network_bytes_sent,
num_results,
) = cursor.fetchone()
return db.HuntCounters(
num_clients=num_clients,
num_successful_clients=num_successful_clients,
num_failed_clients=num_failed_clients,
num_clients_with_results=num_clients_with_results,
num_crashed_clients=num_crashed_clients,
num_results=int(num_results or 0),
total_cpu_seconds=db_utils.MicrosToSeconds(int(total_cpu_seconds or 0)),
total_network_bytes_sent=int(total_network_bytes_sent or 0))
def _BinsToQuery(self, bins, column_name):
"""Builds an SQL query part to fetch counts corresponding to given bins."""
result = []
# With the current StatsHistogram implementation the last bin simply
# takes all the values that are greater than range_max_value of
# the one-before-the-last bin. range_max_value of the last bin
# is thus effectively ignored.
for prev_b, next_b in zip([0] + bins[:-1], bins[:-1] + [None]):
query = "COUNT(CASE WHEN %s >= %f" % (column_name, prev_b)
if next_b is not None:
query += " AND %s < %f" % (column_name, next_b)
query += " THEN 1 END)"
result.append(query)
return ", ".join(result)
@mysql_utils.WithTransaction(readonly=True)
def ReadHuntClientResourcesStats(self, hunt_id, cursor=None):
"""Read/calculate hunt client resources stats."""
hunt_id_int = db_utils.HuntIDToInt(hunt_id)
query = """
SELECT
COUNT(*),
SUM(user_cpu_time_used_micros),
STDDEV_POP(user_cpu_time_used_micros),
SUM(system_cpu_time_used_micros),
STDDEV_POP(system_cpu_time_used_micros),
SUM(network_bytes_sent),
STDDEV_POP(network_bytes_sent),
"""
scaled_bins = [
int(1000000 * b) for b in rdf_stats.ClientResourcesStats.CPU_STATS_BINS
]
query += self._BinsToQuery(scaled_bins, "(user_cpu_time_used_micros)")
query += ","
query += self._BinsToQuery(scaled_bins, "(system_cpu_time_used_micros)")
query += ","
query += self._BinsToQuery(
rdf_stats.ClientResourcesStats.NETWORK_STATS_BINS, "network_bytes_sent")
query += " FROM flows "
query += "FORCE INDEX(flows_by_hunt) "
query += "WHERE parent_hunt_id = %s AND parent_flow_id IS NULL"
cursor.execute(query, [hunt_id_int])
response = cursor.fetchone()
(count, user_sum, user_stddev, system_sum, system_stddev, network_sum,
network_stddev) = response[:7]
stats = rdf_stats.ClientResourcesStats(
user_cpu_stats=rdf_stats.RunningStats(
num=count,
sum=db_utils.MicrosToSeconds(int(user_sum or 0)),
stddev=int(user_stddev or 0) / 1e6,
),
system_cpu_stats=rdf_stats.RunningStats(
num=count,
sum=db_utils.MicrosToSeconds(int(system_sum or 0)),
stddev=int(system_stddev or 0) / 1e6,
),
network_bytes_sent_stats=rdf_stats.RunningStats(
num=count,
sum=float(network_sum or 0),
stddev=float(network_stddev or 0),
),
)
offset = 7
stats.user_cpu_stats.histogram = rdf_stats.StatsHistogram()
for b_num, b_max_value in zip(
response[offset:], rdf_stats.ClientResourcesStats.CPU_STATS_BINS):
stats.user_cpu_stats.histogram.bins.append(
rdf_stats.StatsHistogramBin(range_max_value=b_max_value, num=b_num))
offset += len(rdf_stats.ClientResourcesStats.CPU_STATS_BINS)
stats.system_cpu_stats.histogram = rdf_stats.StatsHistogram()
for b_num, b_max_value in zip(
response[offset:], rdf_stats.ClientResourcesStats.CPU_STATS_BINS):
stats.system_cpu_stats.histogram.bins.append(
rdf_stats.StatsHistogramBin(range_max_value=b_max_value, num=b_num))
offset += len(rdf_stats.ClientResourcesStats.CPU_STATS_BINS)
stats.network_bytes_sent_stats.histogram = rdf_stats.StatsHistogram()
for b_num, b_max_value in zip(
response[offset:], rdf_stats.ClientResourcesStats.NETWORK_STATS_BINS):
stats.network_bytes_sent_stats.histogram.bins.append(
rdf_stats.StatsHistogramBin(range_max_value=b_max_value, num=b_num))
query = """
SELECT
client_id, flow_id, user_cpu_time_used_micros,
system_cpu_time_used_micros, network_bytes_sent
FROM flows
FORCE INDEX(flows_by_hunt)
WHERE parent_hunt_id = %s AND parent_flow_id IS NULL AND
(user_cpu_time_used_micros > 0 OR
system_cpu_time_used_micros > 0 OR
network_bytes_sent > 0)
ORDER BY (user_cpu_time_used_micros + system_cpu_time_used_micros) DESC
LIMIT 10
"""
cursor.execute(query, [hunt_id_int])
for cid, fid, ucpu, scpu, nbs in cursor.fetchall():
client_id = db_utils.IntToClientID(cid)
flow_id = db_utils.IntToFlowID(fid)
stats.worst_performers.append(
rdf_client_stats.ClientResources(
client_id=client_id,
session_id=rdfvalue.RDFURN(client_id).Add(flow_id),
cpu_usage=rdf_client_stats.CpuSeconds(
user_cpu_time=db_utils.MicrosToSeconds(ucpu),
system_cpu_time=db_utils.MicrosToSeconds(scpu),
),
network_bytes_sent=nbs))
return stats
@mysql_utils.WithTransaction(readonly=True)
def ReadHuntFlowsStatesAndTimestamps(self, hunt_id, cursor=None):
"""Reads hunt flows states and timestamps."""
query = """
SELECT
flow_state, UNIX_TIMESTAMP(timestamp), UNIX_TIMESTAMP(last_update)
FROM flows
FORCE INDEX(flows_by_hunt)
WHERE parent_hunt_id = %s AND parent_flow_id IS NULL
"""
cursor.execute(query, [db_utils.HuntIDToInt(hunt_id)])
result = []
for fs, ct, lup in cursor.fetchall():
result.append(
db.FlowStateAndTimestamps(
flow_state=rdf_flow_objects.Flow.FlowState.FromInt(fs),
create_time=mysql_utils.TimestampToRDFDatetime(ct),
last_update_time=mysql_utils.TimestampToRDFDatetime(lup)))
return result
@mysql_utils.WithTransaction(readonly=True)
def ReadHuntOutputPluginLogEntries(self,
hunt_id,
output_plugin_id,
offset,
count,
with_type=None,
cursor=None):
"""Reads hunt output plugin log entries."""
query = ("SELECT client_id, flow_id, log_entry_type, message, "
"UNIX_TIMESTAMP(timestamp) "
"FROM flow_output_plugin_log_entries "
"FORCE INDEX (flow_output_plugin_log_entries_by_hunt) "
"WHERE hunt_id = %s AND output_plugin_id = %s ")
args = [
db_utils.HuntIDToInt(hunt_id),
db_utils.OutputPluginIDToInt(output_plugin_id)
]
if with_type is not None:
query += "AND log_entry_type = %s "
args.append(int(with_type))
query += "ORDER BY log_id ASC LIMIT %s OFFSET %s"
args.append(count)
args.append(offset)
cursor.execute(query, args)
ret = []
for (client_id_int, flow_id_int, log_entry_type, message,
timestamp) in cursor.fetchall():
ret.append(
rdf_flow_objects.FlowOutputPluginLogEntry(
hunt_id=hunt_id,
client_id=db_utils.IntToClientID(client_id_int),
flow_id=db_utils.IntToFlowID(flow_id_int),
output_plugin_id=output_plugin_id,
log_entry_type=log_entry_type,
message=message,
timestamp=mysql_utils.TimestampToRDFDatetime(timestamp)))
return ret
@mysql_utils.WithTransaction(readonly=True)
def CountHuntOutputPluginLogEntries(self,
hunt_id,
output_plugin_id,
with_type=None,
cursor=None):
"""Counts hunt output plugin log entries."""
query = ("SELECT COUNT(*) "
"FROM flow_output_plugin_log_entries "
"FORCE INDEX (flow_output_plugin_log_entries_by_hunt) "
"WHERE hunt_id = %s AND output_plugin_id = %s ")
args = [
db_utils.HuntIDToInt(hunt_id),
db_utils.OutputPluginIDToInt(output_plugin_id)
]
if with_type is not None:
query += "AND log_entry_type = %s"
args.append(int(with_type))
cursor.execute(query, args)
return cursor.fetchone()[0]
|
|
#
# discord.py documentation build configuration file, created by
# sphinx-quickstart on Fri Aug 21 05:43:30 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import logging
import sys
import os
import re
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
sys.path.append(os.path.abspath('extensions'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'builder',
'sphinx.ext.autodoc',
'sphinx.ext.extlinks',
'sphinx.ext.intersphinx',
'sphinx.ext.napoleon',
'sphinxcontrib_trio',
'details',
'exception_hierarchy',
'attributetable',
'resourcelinks',
'nitpick_file_ignorer',
]
autodoc_member_order = 'bysource'
autodoc_typehints = 'none'
# maybe consider this?
# napoleon_attr_annotations = False
extlinks = {
'issue': ('https://github.com/Rapptz/discord.py/issues/%s', 'GH-'),
}
# Links used for cross-referencing stuff in other documentation
intersphinx_mapping = {
'py': ('https://docs.python.org/3', None),
'aio': ('https://docs.aiohttp.org/en/stable/', None),
'req': ('https://docs.python-requests.org/en/latest/', None)
}
rst_prolog = """
.. |coro| replace:: This function is a |coroutine_link|_.
.. |maybecoro| replace:: This function *could be a* |coroutine_link|_.
.. |coroutine_link| replace:: *coroutine*
.. _coroutine_link: https://docs.python.org/3/library/asyncio-task.html#coroutine
"""
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'discord.py'
copyright = '2015-present, Rapptz'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = ''
with open('../discord/__init__.py') as f:
version = re.search(r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]', f.read(), re.MULTILINE).group(1)
# The full version, including alpha/beta/rc tags.
release = version
# This assumes a tag is available for final releases
branch = 'master' if version.endswith('a') else 'v' + version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
locale_dirs = ['locale/']
gettext_compact = False
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'friendly'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# Nitpicky mode options
nitpick_ignore_files = [
"migrating_to_async",
"migrating",
"whats_new",
]
# Ignore warnings about inconsistent order and/or count of references in translated messages.
# This makes no sense, different languages can have different word order...
def _i18n_warning_filter(record: logging.LogRecord) -> bool:
return not record.msg.startswith(
(
'inconsistent references in translated message',
'inconsistent term references in translated message',
)
)
_i18n_logger = logging.getLogger('sphinx')
_i18n_logger.addFilter(_i18n_warning_filter)
# -- Options for HTML output ----------------------------------------------
html_experimental_html5_writer = True
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'basic'
html_context = {
'discord_invite': 'https://discord.gg/r3sSKJJ',
'discord_extensions': [
('discord.ext.commands', 'ext/commands'),
('discord.ext.tasks', 'ext/tasks'),
],
}
resource_links = {
'discord': 'https://discord.gg/r3sSKJJ',
'issues': 'https://github.com/Rapptz/discord.py/issues',
'discussions': 'https://github.com/Rapptz/discord.py/discussions',
'examples': f'https://github.com/Rapptz/discord.py/tree/{branch}/examples',
}
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {
# }
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = './images/discord_py_logo.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
html_search_scorer = '_static/scorer.js'
html_js_files = [
'custom.js',
'settings.js',
'copy.js',
'sidebar.js'
]
# Output file base name for HTML help builder.
htmlhelp_basename = 'discord.pydoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'discord.py.tex', 'discord.py Documentation',
'Rapptz', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'discord.py', 'discord.py Documentation',
['Rapptz'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'discord.py', 'discord.py Documentation',
'Rapptz', 'discord.py', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
def setup(app):
if app.config.language == 'ja':
app.config.intersphinx_mapping['py'] = ('https://docs.python.org/ja/3', None)
app.config.html_context['discord_invite'] = 'https://discord.gg/nXzj3dg'
app.config.resource_links['discord'] = 'https://discord.gg/nXzj3dg'
|
|
import discord
from discord.ext import commands
import json
from sqlalchemy import create_engine, and_
from sqlalchemy.orm import sessionmaker
from db import Movelist, Base, Database_Location, Country
from contextlib import contextmanager
import datetime
description = '''Bot for receiving diplomacy commands'''
bot = commands.Bot(command_prefix='!', description=description)
userlist = []
gm = None
deadline = None
def user_is_gm(user):
with open('config.json') as data_file:
data = json.load(data_file)
gm_id = data["gm"]
return user.id == gm_id
@contextmanager
def session_scope():
"""Provide a transactional scope around a series of operations."""
engine = create_engine(Database_Location)
Base.metadata.Bind = engine
DBSession = sessionmaker(bind=engine)
session = DBSession()
try:
yield session
session.commit()
except:
session.rollback()
raise
finally:
session.close()
@bot.event
async def on_ready():
print('Logged in as')
print(bot.user.name)
print(bot.user.id)
print('------')
engine = create_engine(Database_Location)
Base.metadata.create_all(engine)
print('Database created')
print('------')
populate_user_list()
print('User list created')
set_gm()
print('Set GM')
global deadline
deadline = None
def populate_user_list():
"""Populate list of users from players"""
with session_scope() as session:
for server in bot.servers: #NOTE: If the bot is in more than one server this will need something smarter
for row in session.query(Movelist):
member = discord.utils.find(lambda m: m.id == row.discord_id, server.members)
userlist.append(member)
def set_gm():
"""Find and set gm"""
with session_scope() as session:
for server in bot.servers:
for row in session.query(Movelist):
member = discord.utils.find(lambda m: m.id == row.discord_id, server.members)
if user_is_gm(member):
global gm
gm = member
return
@bot.command(pass_context=True)
async def moves(ctx):
"""Send your moves to the bot"""
if ctx.message.channel.is_private:
with session_scope() as session:
row = session.query(Movelist).filter(Movelist.discord_id == ctx.message.author.id).one()
if not row.eliminated:
row.moveset = ctx.message.content[7:] # Remove "!moves"
await bot.say('Moves received! If you wish to change them, please resubmit them in their entirety')
#If all players have submitted moves, tell the GM
if session.query(Movelist).filter(and_(Movelist.moveset == None), (Movelist.eliminated == False)).count() == 0:
await bot.send_message(gm, 'All moves have been submitted')
else:
await bot.say('You have been eliminated so moves have not been recorded')
else:
await bot.say('You can only send moves in private!')
@bot.command(pass_context=True)
async def add(ctx):
"""Country Name
Add a player to the game as country"""
command, country, player = ctx.message.content.split(" ")
if country in Country.__members__:
member = discord.utils.find(lambda m: m.name == player, ctx.message.channel.server.members)
if member != None:
with session_scope() as session:
if session.query(Movelist).filter(Movelist.country == country).one_or_none() is None:
new_movelist = Movelist(country=country,
playername=player,
discord_id=member.id,
moveset=None)
session.add(new_movelist)
userlist.append(member)
await bot.say('Player added')
else:
await bot.say('That country has already been allocated')
else:
await bot.say('Invalid Player')
else:
await bot.say('Invalid Country')
@bot.command()
async def submitted():
"""Find out how many people have submitted moves"""
with session_scope() as session:
total = session.query(Movelist).filter(Movelist.eliminated.is_(False)).count()
submitted = session.query(Movelist).filter(Movelist.moveset.isnot(None)).count()
await bot.say(str(submitted) + "/" + str(total) + " players have submitted")
@bot.command(pass_context=True)
async def eliminate(ctx):
"""GM Only: Eliminate a country"""
if ctx.message.author == gm:
with session_scope() as session:
command, country = ctx.message.content.split(" ")
row = session.query(Movelist).filter(Movelist.country == country).one_or_none()
if row is None:
await bot.say("Invalid Country")
else:
row.eliminated = True
await bot.say('Country Eliminated')
else:
await bot.say('Only the GM can eliminate players!')
@bot.command(pass_context=True)
async def reset(ctx):
"""GM Only: Reset moves for a new turn"""
if ctx.message.author == gm:
with session_scope() as session:
for row in session.query(Movelist):
row.moveset = None
await bot.say('Moves reset')
else:
await bot.say('Only the GM can reset moves')
@bot.command(pass_context=True)
async def allmoves(ctx):
"""GM Only: Get the moves"""
if ctx.message.author == gm:
with session_scope() as session:
for country, moves in session.query(Movelist.country, Movelist.moveset).all():
msg = '```md\r __{0}__ \r {1}```'.format(country, moves)
await bot.send_message(ctx.message.author, msg)
else:
await bot.say('Only the GM can get the moves')
@bot.command(pass_context=True)
async def getmoves(ctx):
"""GM Only: Get the moves of a specific country"""
if ctx.message.author == gm:
command, target_country = ctx.message.content.split(" ")
if target_country in Country.__members__:
with session_scope() as session:
for country, moves in session.query(Movelist.country, Movelist.moveset).filter(Movelist.country==target_country):
msg = '```md\r __{0}__ \r {1}```'.format(country, moves)
await bot.send_message(ctx.message.author, msg)
else:
await bot.say('Invalid country name')
else:
await bot.say('Only the GM can get the moves')
@bot.command()
async def players():
"""Print the playerlist"""
with session_scope() as session:
playerlist = '`Playerlist`\r'
for country, playername, eliminated in session.query(Movelist.country, Movelist.playername, Movelist.eliminated).all():
player = '{0} - {1}'.format(country, playername)
if eliminated:
player = '~~{0}~~'.format(player)
playerlist += player + '\r'
await bot.say(playerlist)
@bot.command(pass_context=True)
async def setdeadline(ctx):
"""GM Only: Set the deadline in UTC time
Format is %d %B %Y %H:%M(e.g. 07 May 2018 23:59)
Pass with no parameters to cancel deadline"""
if ctx.message.author == gm:
global deadline
msg = ctx.message.content.split(" ")
if len(msg) == 1:
# No parameters, cancel deadline
deadline = None
await bot.say('Deadline cancelled')
else:
input_deadline = ''.join(msg[1::])
try:
naive_time = datetime.datetime.strptime(input_deadline,'%d%B%Y%H:%M')
aware_time = naive_time.replace(tzinfo=datetime.timezone.utc)
if aware_time < datetime.datetime.now(datetime.timezone.utc):
await bot.say('ERROR: Deadline is in the past')
else:
deadline = aware_time
await bot.say('Deadline set')
except ValueError:
await bot.say('Invalid deadline')
else:
await bot.say('Only the GM can set the deadline')
@bot.command()
async def deadline():
"""Get the deadline"""
global deadline
if deadline == None:
await bot.say('Deadline not set')
else:
time_difference_seconds = (deadline - datetime.datetime.now(datetime.timezone.utc)).total_seconds()
h = int(time_difference_seconds//3600)
m = int((time_difference_seconds%3600)//60)
await bot.say('Deadline is at {0} UTC ({1} hours, {2} minutes)'.format(deadline.strftime('%d %B %H:%M'), h, m))
def main():
with open('config.json') as data_file:
data = json.load(data_file)
bot.run(data["token"])
if __name__ == "__main__":
main()
|
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Aqueous Helgeson Equation of State with Bromley activity model
This file implements an aqueous equation of state (EOS) named the
Helgeson EOS with Bromley activity model. It is specifically
the version of the that EOS that is described in Jager et. al. (2003),
which is linked in the readme.md file. The file consists of a single
class, 'HegBromEos' that will take as arguments a list of components
and a pressure and a temperature. Pressure and temperature can be
modified after an instance of HegBromEos is created; however, the number
of components and actual component list cannot. The method 'calc' is the
main calculation of the class, which uses other methods to determine
the partial fugacity of each component given mole fractions, pressure,
and temperature.
Functions
----------
pure_water_vol_intgrt :
Calculates integrated change in the volume of pure water from P_0 to P
at fixed T.
pure_water_vol :
Calculates volume of pure water at P and T.
dielectric_const :
Calculates dielectric constant of pure water at P and T.
molality :
Calculates molality of each solute in the aqueous phase.
solute_vol_integrated:
Calculates volume of each component as a solute.
"""
import numpy as np
# Constants for EOS
R = 8.3144621 # Gas constant in J/mol-K
T_0 = 298.15 # Reference temperature in K
P_0 = 1 # Reference pressure in bar
# Constants for solute model
theta = 228.0
psi = 2600
s10 = 243.9576
s11 = -0.7520846
s12 = 6.60648e-4
s20 = 0.039037
s21 = -2.12309e-4
s22 = 3.18021e-7
s30 = -1.0126e-5
s31 = 6.04961e-8
s32 = -9.3334e-11
# Constants produced by symbolic integration for h_ast funum_compstion
f1 = 73786976294838206464
f2 = 8151985141053725
f3 = 3249460376862603
f4 = 9444732965739290427392
f5 = 4722366482869645213696
f6 = 1043454098054876800
# Constants for pure water
gw_pure = -237129
hw_pure = -285830
cp_a0 = R * 8.712
cp_a1 = R * 1e-2 * 0.125
cp_a2 = R * 1e-5 * -0.018
cp_a3 = 0
a10 = 31.1251
a11 = -1.14154e-1
a12 = 3.10034e-4
a13 = -2.48318e-7
a20 = -2.46176e-2
a21 = 2.15663e-4
a22 = -6.48160e-7
a23 = 6.47521e-10
a30 = 8.69425e-6
a31 = -7.96939e-8
a32 = 2.45391e-10
a33 = -2.51773e-13
a40 = -6.03348e-10
a41 = 5.57791e-12
a42 = -1.72577e-14
a43 = 1.77978e-17
"""The above global variables may be used throughout the calculation."""
def pure_water_vol_intgrt(T, P):
"""Volume of pure water integrate wrt pressure.
Parameters
----------
T : float
Temperature in Kelvin.
P : float
Pressure in bar.
Returns
----------
v_w : float
Volume of water integrated in cm^3 - bar.
"""
v_w = ((a10 * P + a20 * P ** 2 / 2 + a30 * P ** 3 / 3 + a40 * P ** 4 / 4)
+ (a11 * P + a21 * P ** 2 / 2 + a31 * P ** 3 / 3 + a41 * P ** 4 / 4) * T
+ (a12 * P + a22 * P ** 2 / 2 + a32 * P ** 3 / 3 + a42 * P ** 4 / 4) * T ** 2
+ (a13 * P + a23 * P ** 2 / 2 + a33 * P ** 3 / 3 + a43 * P ** 4 / 4) * T ** 3)
return v_w
def pure_water_vol(T, P):
"""Volume of pure water.
Parameters
----------
T : float
Temperature in Kelvin.
P : float
Pressure in bar.
Returns
----------
v_w : float
Volume of water in cm^3.
"""
v_w = ((a10 + a20 * P + a30 * P ** 2 + a40 * P ** 3)
+ (a11 + a21 * P + a31 * P ** 2 + a41 * P ** 3) * T
+ (a12 + a22 * P + a32 * P ** 2 + a42 * P ** 3) * T ** 2
+ (a13 + a23 * P + a33 * P ** 2 + a43 * P ** 3) * T ** 3)
return v_w
def dielectric_const(T, P):
"""Dielectric constant of pure water.
Parameters
----------
T : float
Temperature in Kelvin.
P : float
Pressure in bar.
Returns
----------
eps : float
Dielectric constant of water (no units) .
"""
eps = ((s10 + s20 * P + s30 * P ** 2)
+ (s11 + s21 * P + s31 * P ** 2) * T
+ (s12 + s22 * P + s32 * P ** 2) * T ** 2)
return eps
def molality(xc, xw):
"""Calculates molality of each solute in the aqueous phase.
Parameters
----------
xc : float
Mole fraction of component.
xw : float
Mole fraction water.
Returns
----------
float
Molality of component in mol[component] / kg[water].
"""
return xc / (xw * 0.018015)
def solute_vol_integrated(comp, T, P):
"""Volume of solute integrated wrt pressure.
Parameters
----------
comp : object
Instance of Component class for each component
T : float
Temperature at initialization in Kelvin.
P : float
Pressure at initialization in bar.
Returns
----------
v_ast_P : float
Volume of solute integrated wrt pressure in cm^3 - bar
"""
omega = comp.AqHB['omega_born']
v1 = comp.AqHB['v']['v1']
v2 = comp.AqHB['v']['v2']
v3 = comp.AqHB['v']['v3']
v4 = comp.AqHB['v']['v4']
tau = ((5.0 / 6.0) * T - theta) / (1.0 + np.exp((T - 273.15) / 5.0))
v_ast_P = (
(v1 * P + v2 * np.log(psi + P)
+ (v3 * P + v4 * np.log(psi + P)) * (1.0 / (T - theta - tau))
+ omega / dielectric_const(T, P)) / (R * T)
)
return v_ast_P
class HegBromEos(object):
"""The main class for this EOS that perform various calculations.
Methods
----------
make_constant_mats :
Performs calculations that only depend on pressure and temperature.
fugacity :
Calculates fugacity of each component in the aqueous phase.
calc:
Main calculation for aqueous phase EOS.
"""
def __init__(self, comps, T, P):
"""Aqueous EOS object for fugacity calculations.
Parameters
----------
comps : list
List of components as 'Component' objects created with
'component_properties.py'.
T : float
Temperature at initialization in Kelvin.
P : float
Pressure at initialization in bar.
Attributes
----------
water_ind : int
Index of for water component in all lists.
comps : list
List of 'Component' classes passed into 'HegBromEos'.
comp_names : list
List of components names.
num_comps : int
Number of components.
T : float
Temperature at initialization in Kelvin.
P : float
Pressure at initialization in bar.
g_io_vec : numpy array
Pre-allocated array for gibbs energy of each component
in ideal gas state.
molality_vec : numpy array
Pre-allocated array for molality of each component.
activity_vec : numpy array
Pre-allocated array for activity of each component
in Bromley activity model.
gamma_p1_vec : numpy array
Pre-allocated array for gamma_{p1} variable of each
component in Bromley activity model.
mu_ik_RT_vec : numpy array
Pre-allocated array chemical potential of each component.
"""
try:
self.water_ind = [ii for ii, x in enumerate(comps)
if x.compname == 'h2o'][0]
except ValueError:
raise RuntimeError(
"""Aqueous EOS requires water to be present!
\nPlease provide water in your component list.""")
self.comps = comps
self.comp_names = [x.compname for x in comps]
self.num_comps = len(comps)
self.T = T
self.P = P
self.g_io_vec = np.zeros(self.num_comps)
self.molality_vec = np.zeros(self.num_comps)
self.activity_vec = np.zeros(self.num_comps)
self.gamma_p1_vec = np.zeros(self.num_comps)
self.mu_ik_rt_cons = np.zeros(self.num_comps)
self.make_constant_mats(comps, T, P)
def make_constant_mats(self, comps, T, P):
"""Portion of calculation that only depends on P and T.
Parameters
----------
comps : list
List of components as 'Component' objects created with
'component_properties.py'.
T : float
Temperature in Kelvin.
P : float
Pressure in bar.
Notes
----------
Calculation assumes that pressure and temperature won't change
upon successive iteration of EOS. Instead, the calculation will
adjust molar fractions of each component at a fixed T and P.
However, if T and P do change then, it will recalculate these
constants.
"""
self.T = T
self.P = P
for ii, comp in enumerate(comps):
self.g_io_vec[ii] = comp.gibbs_ideal(T, P)
if comp.compname != 'h2o':
c1 = comp.AqHB['cp']['c1']
c2 = comp.AqHB['cp']['c2']
omega = comp.AqHB['omega_born']
h_io_ast = comp.h_io_ast
# Output of symbolic integration.
h_ast = (np.log(T) * (f1 * c1 + f2 * omega) / (f1 * R)
- (np.log(T_0) * (f1 * c1 + f2 * omega)) / (f1 * R)
- (f3 * T * omega) / (f4 * R) + (f3 * T_0 * omega) / (f4 * R)
- (f5 * T_0 * c2
- T_0 * (f4 * c2 + f4 * T_0 * h_io_ast - f4 * T_0 ** 2 * c1
- f6 * T_0 ** 2 * omega + f3 * T_0 ** 3 * omega)
) / (f4 * R * T_0 ** 3)
+ (f5 * T_0 * c2
- T * (f4 * c2 + f4 * T_0 * h_io_ast - f4 * T_0 ** 2 * c1
- f6 * T_0 ** 2 * omega + f3 * T_0 ** 3 * omega)
) / (f4 * R * T ** 2 * T_0))
self.mu_ik_rt_cons[ii] = (
comp.g_io_ast / (R * T_0) - h_ast
+ solute_vol_integrated(comp, T, P)
- solute_vol_integrated(comp, T, P_0)
)
if comp.compname == 'co2':
self.gamma_p1_vec[ii] = (0.107 - 4.5e-4 * T)
else:
self.mu_ik_rt_cons[ii] = (
gw_pure / (R * T_0)
- (12 * T * hw_pure - 12 * T_0 * hw_pure + 12 * T_0 ** 2 * cp_a0
+ 6 * T_0 ** 3 * cp_a1 + 4 * T_0 ** 4 * cp_a2 + 3 * T_0 ** 5 * cp_a3
- 12 * T * T_0 * cp_a0 - 12 * T * T_0 ** 2 * cp_a1
+ 6 * T ** 2 * T_0 * cp_a1 - 6 * T * T_0 ** 3 * cp_a2
+ 2 * T ** 3 * T_0 * cp_a2 - 4 * T * T_0 ** 4 * cp_a3
+ T ** 4 * T_0 * cp_a3 + 12 * T * T_0 * cp_a0 * np.log(T)
- 12 * T * T_0 * cp_a0 * np.log(T_0)) / (12 * R * T * T_0)
+ (pure_water_vol_intgrt(T, P)
- pure_water_vol_intgrt(T, P_0)) * 1e-1 / (R * T)
)
def fugacity(self, comps, x):
"""Fugacity of each component in aqueous phase for molar fractions 'x'.
Parameters
----------
comps : list
List of components as 'Component' classes.
x : list, numpy array
Molar fractions of each components indexed in the same order
as comps.
Returns
----------
fug : numpy array
Fugacity of each component in aqueous phase.
"""
xw = x[self.water_ind]
for ii, comp in enumerate(comps):
if comp.compname != 'h2o':
self.molality_vec[ii] = molality(x[ii], xw)
self.activity_vec[ii] = (
np.log(self.molality_vec[ii])
+ 2.0 * self.molality_vec[ii] * self.gamma_p1_vec[ii]
)
self.activity_vec[self.water_ind] = (
np.sum(-0.018015 * (self.molality_vec ** 2 * self.gamma_p1_vec
+ self.molality_vec))
)
mu_ik_RT = self.mu_ik_rt_cons + self.activity_vec
fug = np.exp(mu_ik_RT - self.g_io_vec)
return fug
def calc(self, comps, T, P, x):
"""Main calculation for the EOS which returns array of fugacities
Parameters
----------
comps : list
List of components as 'Component' classes.
T : float
Temperature in Kelvin.
P : float
Pressure in bar.
x : list, numpy array
Molar fractions of each components indexed in the same order
as comps.
Returns
----------
fug : numpy array
Fugacity of each component in aqueous phase.
"""
if len(x) != len(comps):
if len(x) > len(comps):
raise RuntimeError("""Length of mole fraction vector 'x'
exceeds number of components!""")
elif not x:
raise RuntimeError("Mole fraction vector 'x' is empty!")
else:
raise RuntimeError("""Mole fraction vector 'x' contains less
values than component length!""")
if comps != self.comps:
print("""Warning: Action not supported.
\n Number of_components have changed.
\n Please create a new fugacity object.""")
return None
else:
# Re-calculate constants if pressure or temperature changes.
if self.T != T or self.P != P:
self.make_constant_mats(comps, T, P)
fug = self.fugacity(comps, x)
return fug
|
|
from __future__ import absolute_import
import mock
import pytest
import six
from sentry.integrations.example.integration import ExampleIntegration
from sentry.models import (
GroupAssignee, Activity, Integration, GroupLink, ExternalIssue,
OrganizationIntegration, sync_group_assignee_inbound
)
from sentry.testutils import TestCase
class GroupAssigneeTestCase(TestCase):
def test_constraints(self):
# Can't both be assigned
with pytest.raises(AssertionError):
GroupAssignee.objects.create(
group=self.group,
project=self.group.project,
user=self.user,
team=self.team,
)
# Can't have nobody assigned
with pytest.raises(AssertionError):
GroupAssignee.objects.create(
group=self.group,
project=self.group.project,
user=None,
team=None,
)
def test_assign_user(self):
GroupAssignee.objects.assign(self.group, self.user)
assert GroupAssignee.objects.filter(
project=self.group.project,
group=self.group,
user=self.user,
team__isnull=True,
).exists()
activity = Activity.objects.get(
project=self.group.project,
group=self.group,
type=Activity.ASSIGNED,
)
assert activity.data['assignee'] == six.text_type(self.user.id)
assert activity.data['assigneeEmail'] == self.user.email
assert activity.data['assigneeType'] == 'user'
def test_assign_team(self):
GroupAssignee.objects.assign(self.group, self.team)
assert GroupAssignee.objects.filter(
project=self.group.project,
group=self.group,
team=self.team,
user__isnull=True,
).exists()
activity = Activity.objects.get(
project=self.group.project,
group=self.group,
type=Activity.ASSIGNED,
)
assert activity.data['assignee'] == six.text_type(self.team.id)
assert activity.data['assigneeEmail'] is None
assert activity.data['assigneeType'] == 'team'
def test_reassign_user_to_team(self):
GroupAssignee.objects.assign(self.group, self.user)
assert GroupAssignee.objects.filter(
project=self.group.project,
group=self.group,
user=self.user,
team__isnull=True,
).exists()
GroupAssignee.objects.assign(self.group, self.team)
assert GroupAssignee.objects.filter(
project=self.group.project,
group=self.group,
team=self.team,
user__isnull=True,
).exists()
activity = list(Activity.objects.filter(
project=self.group.project,
group=self.group,
type=Activity.ASSIGNED,
).order_by('id'))
assert activity[0].data['assignee'] == six.text_type(self.user.id)
assert activity[0].data['assigneeEmail'] == self.user.email
assert activity[0].data['assigneeType'] == 'user'
assert activity[1].data['assignee'] == six.text_type(self.team.id)
assert activity[1].data['assigneeEmail'] is None
assert activity[1].data['assigneeType'] == 'team'
@mock.patch.object(ExampleIntegration, 'sync_assignee_outbound')
def test_assignee_sync_outbound_assign(self, mock_sync_assignee_outbound):
group = self.group
integration = Integration.objects.create(
provider='example',
external_id='123456',
)
integration.add_organization(group.organization.id)
OrganizationIntegration.objects.filter(
integration_id=integration.id,
organization_id=group.organization.id,
).update(
config={
'sync_comments': True,
'sync_status_outbound': True,
'sync_status_inbound': True,
'sync_assignee_outbound': True,
'sync_assignee_inbound': True,
}
)
external_issue = ExternalIssue.objects.create(
organization_id=group.organization.id,
integration_id=integration.id,
key='APP-123',
)
GroupLink.objects.create(
group_id=group.id,
project_id=group.project_id,
linked_type=GroupLink.LinkedType.issue,
linked_id=external_issue.id,
relationship=GroupLink.Relationship.references,
)
with self.feature('organizations:internal-catchall'):
with self.tasks():
GroupAssignee.objects.assign(self.group, self.user)
mock_sync_assignee_outbound.assert_called_with(
external_issue, self.user, assign=True)
assert GroupAssignee.objects.filter(
project=self.group.project,
group=self.group,
user=self.user,
team__isnull=True,
).exists()
activity = Activity.objects.get(
project=self.group.project,
group=self.group,
type=Activity.ASSIGNED,
)
assert activity.data['assignee'] == six.text_type(self.user.id)
assert activity.data['assigneeEmail'] == self.user.email
assert activity.data['assigneeType'] == 'user'
@mock.patch.object(ExampleIntegration, 'sync_assignee_outbound')
def test_assignee_sync_outbound_unassign(self, mock_sync_assignee_outbound):
group = self.group
integration = Integration.objects.create(
provider='example',
external_id='123456',
)
integration.add_organization(group.organization.id)
OrganizationIntegration.objects.filter(
integration_id=integration.id,
organization_id=group.organization.id,
).update(
config={
'sync_comments': True,
'sync_status_outbound': True,
'sync_status_inbound': True,
'sync_assignee_outbound': True,
'sync_assignee_inbound': True,
}
)
external_issue = ExternalIssue.objects.create(
organization_id=group.organization.id,
integration_id=integration.id,
key='APP-123',
)
GroupLink.objects.create(
group_id=group.id,
project_id=group.project_id,
linked_type=GroupLink.LinkedType.issue,
linked_id=external_issue.id,
relationship=GroupLink.Relationship.references,
)
GroupAssignee.objects.assign(self.group, self.user)
with self.feature('organizations:internal-catchall'):
with self.tasks():
GroupAssignee.objects.deassign(self.group)
mock_sync_assignee_outbound.assert_called_with(external_issue, None, assign=False)
assert not GroupAssignee.objects.filter(
project=self.group.project,
group=self.group,
user=self.user,
team__isnull=True,
).exists()
assert Activity.objects.filter(
project=self.group.project,
group=self.group,
type=Activity.UNASSIGNED,
).exists()
def test_assignee_sync_inbound_assign(self):
group = self.group
user_no_access = self.create_user()
user_w_access = self.user
integration = Integration.objects.create(
provider='example',
external_id='123456',
)
integration.add_organization(group.organization.id)
OrganizationIntegration.objects.filter(
integration_id=integration.id,
organization_id=group.organization.id,
).update(
config={
'sync_comments': True,
'sync_status_outbound': True,
'sync_status_inbound': True,
'sync_assignee_outbound': True,
'sync_assignee_inbound': True,
}
)
external_issue = ExternalIssue.objects.create(
organization_id=group.organization.id,
integration_id=integration.id,
key='APP-123',
)
GroupLink.objects.create(
group_id=group.id,
project_id=group.project_id,
linked_type=GroupLink.LinkedType.issue,
linked_id=external_issue.id,
relationship=GroupLink.Relationship.references,
)
# no permissions
groups_updated = sync_group_assignee_inbound(
integration, user_no_access.email, 'APP-123'
)
assert not groups_updated
# w permissions
groups_updated = sync_group_assignee_inbound(
integration, user_w_access.email, 'APP-123'
)
assert groups_updated[0] == group
assert GroupAssignee.objects.filter(
project=group.project,
group=group,
user=user_w_access,
team__isnull=True,
).exists()
def test_assignee_sync_inbound_deassign(self):
group = self.group
integration = Integration.objects.create(
provider='example',
external_id='123456',
)
integration.add_organization(group.organization.id)
OrganizationIntegration.objects.filter(
integration_id=integration.id,
organization_id=group.organization.id,
).update(
config={
'sync_comments': True,
'sync_status_outbound': True,
'sync_status_inbound': True,
'sync_assignee_outbound': True,
'sync_assignee_inbound': True,
}
)
external_issue = ExternalIssue.objects.create(
organization_id=group.organization.id,
integration_id=integration.id,
key='APP-123',
)
GroupLink.objects.create(
group_id=group.id,
project_id=group.project_id,
linked_type=GroupLink.LinkedType.issue,
linked_id=external_issue.id,
relationship=GroupLink.Relationship.references,
)
GroupAssignee.objects.assign(group, self.user)
groups_updated = sync_group_assignee_inbound(
integration, self.user.email, 'APP-123', assign=False,
)
assert groups_updated[0] == group
assert not GroupAssignee.objects.filter(
project=group.project,
group=group,
user=self.user,
team__isnull=True,
).exists()
|
|
import math
import os
import pickle
from collections import defaultdict
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import plotnine as p9
import typer
from pedroai.io import read_json, write_json
from pedroai.plot import theme_pedroai
from rich.console import Console
from rich.progress import track
from qanta.datasets.quiz_bowl import QuizBowlDataset
from qanta.guesser.abstract import AbstractGuesser
from qanta.reporting.curve_score import CurveScore
from qanta.util.constants import QANTA_MAPPED_DATASET_PATH
console = Console()
app = typer.Typer()
@app.command()
def export(output_file: str, fold: str = "buzztest"):
fold = "buzztest"
guesses_dir = AbstractGuesser.output_path("qanta.guesser.rnn", "RnnGuesser", 0, "")
guesses_dir = AbstractGuesser.guess_path(guesses_dir, fold, "char")
with open(guesses_dir, "rb") as f:
guesses = pickle.load(f)
guesses = guesses.groupby("qanta_id")
questions = QuizBowlDataset(buzzer_train=True).questions_by_fold()
questions = {q.qanta_id: q for q in questions[fold]}
buzzers = {}
for name in ["RNNBuzzer", "ThresholdBuzzer", "MLPBuzzer"]:
model_dir = f"output/buzzer/{name}"
buzzes_dir = os.path.join(model_dir, "{}_buzzes.pkl".format(fold))
with open(buzzes_dir, "rb") as f:
buzzers[name] = pickle.load(f)
qid_to_buzzes = defaultdict(dict)
for name, buzzes in track(buzzers.items()):
for qid, (char_indices, scores) in buzzes.items():
gs = (
guesses.get_group(qid)
.groupby("char_index")
.aggregate(lambda x: x.head(1))
.to_dict()["guess"]
)
question = questions[qid]
q_len = len(question.text)
buzz_oracle_position = -1
buzz_model_position = -1
oracle_guess = None
buzz_guess = None
for i, char_index in enumerate(char_indices):
buzz_oracle = gs[char_index] == question.page
if buzz_oracle:
if buzz_oracle_position == -1 or char_index <= buzz_oracle_position:
oracle_guess = question.page
buzz_oracle_position = char_index
if scores[i][1] > scores[i][0]:
if buzz_model_position == -1 or char_index < buzz_model_position:
buzz_guess = gs[char_index]
buzz_model_position = char_index
qid_to_buzzes[qid][name] = {
"oracle": buzz_oracle_position,
"oracle_fraction": buzz_oracle_position / q_len
if buzz_oracle_position != -1
else -1,
"position": buzz_model_position,
"position_fraction": buzz_model_position / q_len
if buzz_model_position != -1
else -1,
"q_len": q_len,
"oracle_guess": oracle_guess,
"buzz_guess": buzz_guess,
"answer": question.page,
"impossible": oracle_guess is None,
}
write_json(output_file, qid_to_buzzes)
HUMAN = r"\tdiamond{{{}}}"
RNN = r"\tcircle{{{}}}"
MLP = r"\tsquare{{{}}}"
THRESHOLD = r"\ttriangle{{{}}}"
CORRECT = "cgreen"
WRONG = "cred"
NAME_TO_SYMBOL = {
"human": HUMAN,
"oracle": "*",
"RNNBuzzer": RNN,
"MLPBuzzer": MLP,
"ThresholdBuzzer": THRESHOLD,
}
TEMPLATE = r"""
%s\\
\textbf{Answer:} \underline{%s}
"""
@app.command()
def latex(qid: int, buzz_file: str, output_file: str):
questions = {
q["qanta_id"]: q for q in read_json(QANTA_MAPPED_DATASET_PATH)["questions"]
}
buzzes = read_json(buzz_file)
proto_df = pd.read_hdf("data/external/datasets/protobowl/protobowl-042818.log.h5")
computer_buzzes = buzzes[str(qid)]
proto_id = questions[qid]["proto_id"]
human_buzzes = proto_df[proto_df.qid == proto_id]
answer = questions[qid]["page"]
question_buzzes = [
{
"name": "oracle",
"fraction": computer_buzzes["RNNBuzzer"]["oracle_fraction"],
"answer": answer,
"guess": computer_buzzes["RNNBuzzer"]["oracle_guess"],
"correct": answer == computer_buzzes["RNNBuzzer"]["oracle_guess"],
}
]
for name, computer_buzz in buzzes[str(qid)].items():
question_buzzes.append(
{
"name": name,
"fraction": computer_buzz["position_fraction"],
"answer": answer,
"guess": computer_buzz["buzz_guess"],
"correct": computer_buzz["answer"] == computer_buzz["buzz_guess"],
}
)
for row in human_buzzes.itertuples():
question_buzzes.append(
{
"name": "human",
"fraction": row.buzzing_position,
"answer": answer,
"guess": row.guess,
"correct": row.result,
}
)
tex_df = pd.DataFrame(question_buzzes)
text = questions[qid]["text"]
q_len = len(text)
tex_df["char"] = tex_df["fraction"].map(lambda f: math.floor(f * q_len))
char_to_symbols = {}
for row in tex_df.itertuples():
shape = NAME_TO_SYMBOL[row.name]
position = row.char
if row.correct:
color = CORRECT
else:
color = WRONG
colored_shape = shape.format(color)
if position in char_to_symbols:
char_to_symbols[position].append(colored_shape)
else:
char_to_symbols[position] = [colored_shape]
characters = list(text)
out_chars = []
for idx in range(len(characters) - 1, -1, -1):
out_chars.append(characters[idx])
if idx in char_to_symbols:
for symbol in char_to_symbols[idx]:
out_chars.append(symbol)
out_chars.reverse()
out_text = "".join(out_chars)
tex_out = TEMPLATE % (out_text, answer.replace("_", " "))
console.log(buzzes[str(qid)])
console.log(
human_buzzes.drop(columns=["date", "qid"]).sort_values("buzzing_position")
)
with open(output_file, "w") as f:
f.write(tex_out)
@app.command()
def plot_empirical_buzz():
proto_df = pd.read_hdf("data/external/datasets/protobowl/protobowl-042818.log.h5")
dataset = read_json(QANTA_MAPPED_DATASET_PATH)
questions = {q["qanta_id"]: q for q in dataset["questions"]}
folds = {
q["proto_id"]: q["fold"]
for q in questions.values()
if q["proto_id"] is not None
}
proto_df["fold"] = proto_df["qid"].map(lambda x: folds[x] if x in folds else None)
proto_df["n"] = 1
buzztest_df = proto_df[proto_df.fold == "buzztest"]
play_counts = (
buzztest_df.groupby("qid")
.count()
.reset_index()
.sort_values("fold", ascending=False)
)
qid_to_counts = {r.qid: r.n for r in play_counts.itertuples()}
popular_questions = play_counts.qid.tolist()
curve = CurveScore()
x = np.linspace(0, 1, 100)
y = [curve.get_weight(n) for n in x]
curve_df = pd.DataFrame({"buzzing_position": x, "result": y})
curve_df["qid"] = "Expected Wins Curve Score"
curve_df["source"] = "Curve Score | Average"
proto_ids = popular_questions[:10]
frames = []
for proto_id in proto_ids:
plays = buzztest_df[buzztest_df.qid == proto_id].sort_values("buzzing_position")
plays = plays[plays.result != "prompt"]
plays["result"] = plays["result"].astype(int)
frames.append(plays)
sample_df = pd.concat(frames)
rows = []
for qid, group_df in sample_df.groupby("qid"):
n_opp_correct = 0
n_opp_total = 0
n = qid_to_counts[qid]
rows.append(
{
"buzzing_position": 0,
"n_opp_correct": 0,
"n_opp_total": 1,
"qid": f"Question with {n} Plays",
"source": "Single Question",
"n_plays": n,
}
)
for r in group_df.itertuples():
if r.result == 1:
n_opp_correct += 1
n_opp_total += 1
rows.append(
{
"buzzing_position": r.buzzing_position,
"n_opp_correct": n_opp_correct,
"n_opp_total": n_opp_total,
"qid": f"Question with {n} Plays",
"source": "Single Question",
"n_plays": n,
}
)
n_opp_correct = 0
n_opp_total = 0
for r in sample_df.sort_values("buzzing_position").itertuples():
if r.result == 1:
n_opp_correct += 1
n_opp_total += 1
rows.append(
{
"buzzing_position": r.buzzing_position,
"n_opp_correct": n_opp_correct,
"n_opp_total": n_opp_total,
"qid": "Average of Most Played",
"source": "Curve Score | Average",
}
)
df = pd.DataFrame(rows)
df["p_opp_correct"] = df["n_opp_correct"] / df["n_opp_total"]
df["p_win"] = 1 - df["p_opp_correct"]
df["result"] = df["p_win"]
def order(c):
if c.startswith("Expected"):
return -1000
elif c.startswith("Average"):
return -999
elif c.startswith("Question with"):
return -int(c.split()[2])
else:
return 1000
categories = list(set(df.qid.tolist()) | set(curve_df.qid.tolist()))
categories = sorted(categories, key=order)
categories = pd.CategoricalDtype(categories, ordered=True)
df["qid"] = df["qid"].astype(categories)
cmap = plt.get_cmap("tab20")
colors = [matplotlib.colors.to_hex(c) for c in cmap.colors]
filter_df = df[df.n_opp_total > 4]
chart = (
p9.ggplot(filter_df, p9.aes(x="buzzing_position", y="result", color="qid"),)
+ p9.geom_line(
p9.aes(linetype="source"),
data=filter_df[filter_df.source.map(lambda s: s.startswith("Curve"))],
size=2,
)
+ p9.geom_line(
p9.aes(linetype="source"),
data=filter_df[filter_df.source.map(lambda s: not s.startswith("Curve"))],
size=0.5,
)
+ p9.geom_line(
p9.aes(x="buzzing_position", y="result", linetype="source"),
data=curve_df,
size=2,
)
+ p9.labs(
x="Position in Question (%)",
y="Empirical Probability of Winning",
linetype="Data Type",
color="Data Source",
)
+ p9.guides(size=False)
+ p9.scale_color_manual(values=colors)
+ theme_pedroai()
+ p9.theme(legend_position="right")
)
chart.save("output/empirical_buzz.pdf")
if __name__ == "__main__":
app()
|
|
# Copyright 2014: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import multiprocessing
import threading
import time
from six.moves import queue as Queue
from rally.common import logging
from rally.common import utils
from rally import consts
from rally.task import runner
LOG = logging.getLogger(__name__)
def _worker_process(queue, iteration_gen, timeout, rps, times,
max_concurrent, context, cls, method_name,
args, aborted, info):
"""Start scenario within threads.
Spawn N threads per second. Each thread runs the scenario once, and appends
result to queue. A maximum of max_concurrent threads will be ran
concurrently.
:param queue: queue object to append results
:param iteration_gen: next iteration number generator
:param timeout: operation's timeout
:param rps: number of scenario iterations to be run per one second
:param times: total number of scenario iterations to be run
:param max_concurrent: maximum worker concurrency
:param context: scenario context object
:param cls: scenario class
:param method_name: scenario method name
:param args: scenario args
:param aborted: multiprocessing.Event that aborts load generation if
the flag is set
:param info: info about all processes count and counter of runned process
"""
pool = collections.deque()
sleep = 1.0 / rps
runner._log_worker_info(times=times, rps=rps, timeout=timeout,
cls=cls, method_name=method_name, args=args)
time.sleep(
(sleep * info["processes_counter"]) / info["processes_to_start"])
start = time.time()
timeout_queue = Queue.Queue()
if timeout:
collector_thr_by_timeout = threading.Thread(
target=utils.timeout_thread,
args=(timeout_queue, )
)
collector_thr_by_timeout.start()
i = 0
while i < times and not aborted.is_set():
scenario_context = runner._get_scenario_context(context)
scenario_args = (next(iteration_gen), cls, method_name,
scenario_context, args)
worker_args = (queue, scenario_args)
thread = threading.Thread(target=runner._worker_thread,
args=worker_args)
i += 1
thread.start()
if timeout:
timeout_queue.put((thread.ident, time.time() + timeout))
pool.append(thread)
time_gap = time.time() - start
real_rps = i / time_gap if time_gap else "Infinity"
LOG.debug("Worker: %s rps: %s (requested rps: %s)" %
(i, real_rps, rps))
# try to join latest thread(s) until it finished, or until time to
# start new thread (if we have concurrent slots available)
while i / (time.time() - start) > rps or len(pool) >= max_concurrent:
if pool:
pool[0].join(0.001)
if not pool[0].isAlive():
pool.popleft()
else:
time.sleep(0.001)
while pool:
thr = pool.popleft()
thr.join()
if timeout:
timeout_queue.put((None, None,))
collector_thr_by_timeout.join()
@runner.configure(name="rps")
class RPSScenarioRunner(runner.ScenarioRunner):
"""Scenario runner that does the job with specified frequency.
Every single benchmark scenario iteration is executed with specified
frequency (runs per second) in a pool of processes. The scenario will be
launched for a fixed number of times in total (specified in the config).
An example of a rps scenario is booting 1 VM per second. This
execution type is thus very helpful in understanding the maximal load that
a certain cloud can handle.
"""
CONFIG_SCHEMA = {
"type": "object",
"$schema": consts.JSON_SCHEMA,
"properties": {
"type": {
"type": "string"
},
"times": {
"type": "integer",
"minimum": 1
},
"rps": {
"type": "number",
"exclusiveMinimum": True,
"minimum": 0
},
"timeout": {
"type": "number",
},
"max_concurrency": {
"type": "integer",
"minimum": 1
},
"max_cpu_count": {
"type": "integer",
"minimum": 1
}
},
"additionalProperties": False
}
def _run_scenario(self, cls, method_name, context, args):
"""Runs the specified benchmark scenario with given arguments.
Every single benchmark scenario iteration is executed with specified
frequency (runs per second) in a pool of processes. The scenario will
be launched for a fixed number of times in total (specified in the
config).
:param cls: The Scenario class where the scenario is implemented
:param method_name: Name of the method that implements the scenario
:param context: Benchmark context that contains users, admin & other
information, that was created before benchmark started.
:param args: Arguments to call the scenario method with
:returns: List of results fore each single scenario iteration,
where each result is a dictionary
"""
times = self.config["times"]
timeout = self.config.get("timeout", 0) # 0 means no timeout
iteration_gen = utils.RAMInt()
cpu_count = multiprocessing.cpu_count()
max_cpu_used = min(cpu_count,
self.config.get("max_cpu_count", cpu_count))
processes_to_start = min(max_cpu_used, times,
self.config.get("max_concurrency", times))
rps_per_worker = float(self.config["rps"]) / processes_to_start
times_per_worker, times_overhead = divmod(times, processes_to_start)
# Determine concurrency per worker
concurrency_per_worker, concurrency_overhead = divmod(
self.config.get("max_concurrency", times), processes_to_start)
self._log_debug_info(times=times, timeout=timeout,
max_cpu_used=max_cpu_used,
processes_to_start=processes_to_start,
rps_per_worker=rps_per_worker,
times_per_worker=times_per_worker,
times_overhead=times_overhead,
concurrency_per_worker=concurrency_per_worker,
concurrency_overhead=concurrency_overhead)
result_queue = multiprocessing.Queue()
def worker_args_gen(times_overhead, concurrency_overhead):
"""Generate arguments for process worker.
Remainder of threads per process division is distributed to
process workers equally - one thread per each process worker
until the remainder equals zero. The same logic is applied
to concurrency overhead.
:param times_overhead: remaining number of threads to be
distributed to workers
:param concurrency_overhead: remaining number of maximum
concurrent threads to be distributed
to workers
"""
while True:
yield (result_queue, iteration_gen, timeout, rps_per_worker,
times_per_worker + (times_overhead and 1),
concurrency_per_worker + (concurrency_overhead and 1),
context, cls, method_name, args, self.aborted)
if times_overhead:
times_overhead -= 1
if concurrency_overhead:
concurrency_overhead -= 1
process_pool = self._create_process_pool(
processes_to_start, _worker_process,
worker_args_gen(times_overhead, concurrency_overhead))
self._join_processes(process_pool, result_queue)
|
|
# Copyright (c) 2013, RedJack, LLC.
# All rights reserved.
#
# Please see the COPYING file in this distribution for license details.
from __future__ import print_function
import argparse
import codecs
import os.path
from pkg_resources import resource_listdir, resource_string
import re
import select
import sys
EXIT_SUCCESS = 0
EXIT_ERROR = 1
EXIT_ARGUMENT_ERROR = 2
LICENSES = []
for f in sorted(resource_listdir(__name__, '.')):
match = re.match(r'template-([A-Za-z0-9_]+).txt', f)
if match:
LICENSES.append(match.groups()[0])
# To extend language formatting support with a new language, add an item in
# LANGS dict:
# 'language_suffix':'comment_name'
# where 'language_suffix' is the suffix of your language and 'comment_name' is
# one of the comment types supported and listed in LANG_CMT:
# text : no comment
# c : /* * */
# unix : #
# lua : --- --
# if you want add a new comment type just add an item to LANG_CMT:
# 'comment_name':[u'string', u'string', u'string']
# where the first string open multiline comment, second string comment every
# license's line and the last string close multiline comment,
# associate your language and source file suffix with your new comment type
# how explained above.
# EXAMPLE:
# LANG_CMT = {'c':[u'/*', u'*', u'*/']}
# LANGS = {'cpp':'c'}
# (for more examples see LANG_CMT and langs dicts below)
# NOTE: unicode (u) in comment strings is required.
# FROM: <https://github.com/licenses/lice/blob/1723d6c1950ed4de2bc5e011c2f51abb4c601f9f/lice/core.py> # noqa
LANGS = {'txt': 'text', 'h': 'c', 'hpp': 'c', 'c': 'c', 'cc': 'c', 'cpp': 'c',
'py': 'unix', 'pl': 'perl', 'sh': 'unix', 'lua': 'lua', 'rb': 'ruby',
'js': 'c', 'java': 'java', 'f': 'fortran', 'f90': 'fortran90',
'erl': 'erlang', 'html': 'html', 'css': 'c', 'less': 'c', 'm': 'c',
'styl': 'c'}
LANG_CMT = {'text': [u'', u'', u''], 'c': [u'/*', u' *', u' */'],
'unix': [u'', u'#', u''], 'lua': [u'--[[', u'', u'--]]'],
'java': [u'/**', u' *', u' */'], 'perl': [u'=item', u'', u'=cut'],
'ruby': [u'=begin', u'', u'=end'], 'fortran': [u'C', u'C', u'C'],
'fortran90': [u'!*', u'!*', u'!*'], 'erlang': [u'%%', u'%', u'%%'],
'html': [u'<!--', u'', u'-->']}
def warn(msg):
print('WARNING: ' + msg, file=sys.stderr)
def error(msg, status=EXIT_ERROR):
print('ERROR: ' + msg, file=sys.stderr)
die(status)
def die(status=EXIT_ERROR):
sys.exit(status)
def generate_license_header(template, context):
"""
Generate a license header by extracting variables from the template
and replacing them with corresponding context values.
"""
out = template[:]
for key in extract_vars(template):
try:
out = out.replace('{{ %s }}' % key, context[key])
except KeyError:
error('missing "%s" in context!' % key, EXIT_ARGUMENT_ERROR)
return out
def format_license_header(header, lang):
"""
Format a license header for the specified language.
"""
first, most, last = LANG_CMT[lang]
lines = header.splitlines()
out = [first] + [most + u' ' + line for line in lines] + [last]
out = [line.rstrip() for line in out]
return '\n'.join(out)
def write_license_header(file, header):
"""
Write a formatted license header to the top of an open file object, after
an optional #! and optional encoding declaration.
"""
def is_encoding_line(line):
# <http://www.python.org/dev/peps/pep-0263/>
ENCODING = ('coding=', '-*- coding:')
return any([e in line for e in ENCODING])
input = file.readlines()
output = []
if input:
if input[0].startswith('#!'): # shebang
output.append(input.pop(0))
if is_encoding_line(input[0]): # encoding
output.append(input.pop(0))
output = u''.join(output) + header.strip() + u'\n' + u''.join(input)
file.seek(0)
file.truncate()
file.write(output)
def load_template(license):
"""
Load license template from the package.
"""
return resource_string(__name__,
'template-%s.txt' % license).decode('utf-8')
def extract_vars(template):
"""
Extract variables from template. Variables are enclosed in double curly
braces.
"""
return sorted(set(re.findall(r'\{\{ (?P<key>\w+) \}\}', template)))
def get_lang(path):
"""
Get the "language" of a path. Currently this is determined by its
extension.
"""
root, ext = os.path.splitext(path)
if ext[0] == '.':
ext = ext[1:]
return LANGS[ext]
def parse_context(vars):
"""
Parse a list of KEY=VALUE pairs into a dictionary.
"""
if vars is None:
vars = []
context = {}
for v in vars:
key, value = v.split('=')
if key in context:
error('"%s" specified multiple times!' % key, EXIT_ARGUMENT_ERROR)
context[key] = value
return context
def main(args=sys.argv[1:], stdin=sys.stdin):
parser = argparse.ArgumentParser(
description='Add license headers to files passed in on stdin')
parser.add_argument(
'license', metavar='LICENSE', choices=LICENSES,
help='the license to add, one of %s' % ', '.join(LICENSES))
group = parser.add_mutually_exclusive_group()
group.add_argument(
'--vars', dest='list_vars', action='store_true',
help='list template variables for specified license')
group.add_argument(
'--context', metavar='KEY=VALUE', nargs='*',
help='KEY=VALUE formatted variables to generate the license')
args = parser.parse_args(args)
license = args.license
template = load_template(license)
if args.list_vars:
print('The %s license template contains the following variables:' %
license)
vars = extract_vars(template)
for var in vars:
print('\t' + var)
die(EXIT_SUCCESS)
context = parse_context(args.context)
header = generate_license_header(template, context)
if stdin.isatty() and not select.select([stdin], [], [], 0.0)[0]:
# If there's no data on stdin, we error and die.
# <http://stackoverflow.com/a/3763257/609144>
error('No paths on stdin!')
paths = [line.strip() for line in stdin.readlines() if line]
if not any(paths):
error('No paths on stdin!')
for p in paths:
try:
lang = get_lang(p)
except (KeyError, IndexError):
warn('could not determine filetype for %s. skipping...' % p)
continue
formatted = format_license_header(header, lang)
with codecs.open(p, 'r+', encoding='utf-8') as f:
write_license_header(f, formatted)
if __name__ == '__main__':
main()
|
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Glance exception subclasses"""
import six
import six.moves.urllib.parse as urlparse
from glance import i18n
_ = i18n._
_FATAL_EXCEPTION_FORMAT_ERRORS = False
class RedirectException(Exception):
def __init__(self, url):
self.url = urlparse.urlparse(url)
class GlanceException(Exception):
"""
Base Glance Exception
To correctly use this class, inherit from it and define
a 'message' property. That message will get printf'd
with the keyword arguments provided to the constructor.
"""
message = _("An unknown exception occurred")
def __init__(self, message=None, *args, **kwargs):
if not message:
message = self.message
try:
if kwargs:
message = message % kwargs
except Exception:
if _FATAL_EXCEPTION_FORMAT_ERRORS:
raise
else:
# at least get the core message out if something happened
pass
self.msg = message
super(GlanceException, self).__init__(message)
def __unicode__(self):
# NOTE(flwang): By default, self.msg is an instance of Message, which
# can't be converted by str(). Based on the definition of
# __unicode__, it should return unicode always.
return six.text_type(self.msg)
class MissingCredentialError(GlanceException):
message = _("Missing required credential: %(required)s")
class BadAuthStrategy(GlanceException):
message = _("Incorrect auth strategy, expected \"%(expected)s\" but "
"received \"%(received)s\"")
class NotFound(GlanceException):
message = _("An object with the specified identifier was not found.")
class BadStoreUri(GlanceException):
message = _("The Store URI was malformed.")
class Duplicate(GlanceException):
message = _("An object with the same identifier already exists.")
class Conflict(GlanceException):
message = _("An object with the same identifier is currently being "
"operated on.")
class StorageQuotaFull(GlanceException):
message = _("The size of the data %(image_size)s will exceed the limit. "
"%(remaining)s bytes remaining.")
class AuthBadRequest(GlanceException):
message = _("Connect error/bad request to Auth service at URL %(url)s.")
class AuthUrlNotFound(GlanceException):
message = _("Auth service at URL %(url)s not found.")
class AuthorizationFailure(GlanceException):
message = _("Authorization failed.")
class NotAuthenticated(GlanceException):
message = _("You are not authenticated.")
class UploadException(GlanceException):
message = _('Image upload problem: %s')
class Forbidden(GlanceException):
message = _("You are not authorized to complete this action.")
class ForbiddenPublicImage(Forbidden):
message = _("You are not authorized to complete this action.")
class ProtectedImageDelete(Forbidden):
message = _("Image %(image_id)s is protected and cannot be deleted.")
class ProtectedMetadefNamespaceDelete(Forbidden):
message = _("Metadata definition namespace %(namespace)s is protected"
" and cannot be deleted.")
class ProtectedMetadefNamespacePropDelete(Forbidden):
message = _("Metadata definition property %(property_name)s is protected"
" and cannot be deleted.")
class ProtectedMetadefObjectDelete(Forbidden):
message = _("Metadata definition object %(object_name)s is protected"
" and cannot be deleted.")
class ProtectedMetadefResourceTypeAssociationDelete(Forbidden):
message = _("Metadata definition resource-type-association"
" %(resource_type)s is protected and cannot be deleted.")
class ProtectedMetadefResourceTypeSystemDelete(Forbidden):
message = _("Metadata definition resource-type %(resource_type_name)s is"
" a seeded-system type and cannot be deleted.")
class ProtectedMetadefTagDelete(Forbidden):
message = _("Metadata definition tag %(tag_name)s is protected"
" and cannot be deleted.")
class Invalid(GlanceException):
message = _("Data supplied was not valid.")
class InvalidSortKey(Invalid):
message = _("Sort key supplied was not valid.")
class InvalidSortDir(Invalid):
message = _("Sort direction supplied was not valid.")
class InvalidPropertyProtectionConfiguration(Invalid):
message = _("Invalid configuration in property protection file.")
class InvalidSwiftStoreConfiguration(Invalid):
message = _("Invalid configuration in glance-swift conf file.")
class InvalidFilterRangeValue(Invalid):
message = _("Unable to filter using the specified range.")
class InvalidOptionValue(Invalid):
message = _("Invalid value for option %(option)s: %(value)s")
class ReadonlyProperty(Forbidden):
message = _("Attribute '%(property)s' is read-only.")
class ReservedProperty(Forbidden):
message = _("Attribute '%(property)s' is reserved.")
class AuthorizationRedirect(GlanceException):
message = _("Redirecting to %(uri)s for authorization.")
class ClientConnectionError(GlanceException):
message = _("There was an error connecting to a server")
class ClientConfigurationError(GlanceException):
message = _("There was an error configuring the client.")
class MultipleChoices(GlanceException):
message = _("The request returned a 302 Multiple Choices. This generally "
"means that you have not included a version indicator in a "
"request URI.\n\nThe body of response returned:\n%(body)s")
class LimitExceeded(GlanceException):
message = _("The request returned a 413 Request Entity Too Large. This "
"generally means that rate limiting or a quota threshold was "
"breached.\n\nThe response body:\n%(body)s")
def __init__(self, *args, **kwargs):
self.retry_after = (int(kwargs['retry']) if kwargs.get('retry')
else None)
super(LimitExceeded, self).__init__(*args, **kwargs)
class ServiceUnavailable(GlanceException):
message = _("The request returned 503 Service Unavailable. This "
"generally occurs on service overload or other transient "
"outage.")
def __init__(self, *args, **kwargs):
self.retry_after = (int(kwargs['retry']) if kwargs.get('retry')
else None)
super(ServiceUnavailable, self).__init__(*args, **kwargs)
class ServerError(GlanceException):
message = _("The request returned 500 Internal Server Error.")
class UnexpectedStatus(GlanceException):
message = _("The request returned an unexpected status: %(status)s."
"\n\nThe response body:\n%(body)s")
class InvalidContentType(GlanceException):
message = _("Invalid content type %(content_type)s")
class BadRegistryConnectionConfiguration(GlanceException):
message = _("Registry was not configured correctly on API server. "
"Reason: %(reason)s")
class BadDriverConfiguration(GlanceException):
message = _("Driver %(driver_name)s could not be configured correctly. "
"Reason: %(reason)s")
class MaxRedirectsExceeded(GlanceException):
message = _("Maximum redirects (%(redirects)s) was exceeded.")
class InvalidRedirect(GlanceException):
message = _("Received invalid HTTP redirect.")
class NoServiceEndpoint(GlanceException):
message = _("Response from Keystone does not contain a Glance endpoint.")
class RegionAmbiguity(GlanceException):
message = _("Multiple 'image' service matches for region %(region)s. This "
"generally means that a region is required and you have not "
"supplied one.")
class WorkerCreationFailure(GlanceException):
message = _("Server worker creation failed: %(reason)s.")
class SchemaLoadError(GlanceException):
message = _("Unable to load schema: %(reason)s")
class InvalidObject(GlanceException):
message = _("Provided object does not match schema "
"'%(schema)s': %(reason)s")
class UnsupportedHeaderFeature(GlanceException):
message = _("Provided header feature is unsupported: %(feature)s")
class InUseByStore(GlanceException):
message = _("The image cannot be deleted because it is in use through "
"the backend store outside of Glance.")
class ImageSizeLimitExceeded(GlanceException):
message = _("The provided image is too large.")
class ImageMemberLimitExceeded(LimitExceeded):
message = _("The limit has been exceeded on the number of allowed image "
"members for this image. Attempted: %(attempted)s, "
"Maximum: %(maximum)s")
class ImagePropertyLimitExceeded(LimitExceeded):
message = _("The limit has been exceeded on the number of allowed image "
"properties. Attempted: %(attempted)s, Maximum: %(maximum)s")
class ImageTagLimitExceeded(LimitExceeded):
message = _("The limit has been exceeded on the number of allowed image "
"tags. Attempted: %(attempted)s, Maximum: %(maximum)s")
class ImageLocationLimitExceeded(LimitExceeded):
message = _("The limit has been exceeded on the number of allowed image "
"locations. Attempted: %(attempted)s, Maximum: %(maximum)s")
class SIGHUPInterrupt(GlanceException):
message = _("System SIGHUP signal received.")
class RPCError(GlanceException):
message = _("%(cls)s exception was raised in the last rpc call: %(val)s")
class TaskException(GlanceException):
message = _("An unknown task exception occurred")
class BadTaskConfiguration(GlanceException):
message = _("Task was not configured properly")
class TaskNotFound(TaskException, NotFound):
message = _("Task with the given id %(task_id)s was not found")
class InvalidTaskStatus(TaskException, Invalid):
message = _("Provided status of task is unsupported: %(status)s")
class InvalidTaskType(TaskException, Invalid):
message = _("Provided type of task is unsupported: %(type)s")
class InvalidTaskStatusTransition(TaskException, Invalid):
message = _("Status transition from %(cur_status)s to"
" %(new_status)s is not allowed")
class DuplicateLocation(Duplicate):
message = _("The location %(location)s already exists")
class ImageDataNotFound(NotFound):
message = _("No image data could be found")
class InvalidParameterValue(Invalid):
message = _("Invalid value '%(value)s' for parameter '%(param)s': "
"%(extra_msg)s")
class InvalidImageStatusTransition(Invalid):
message = _("Image status transition from %(cur_status)s to"
" %(new_status)s is not allowed")
class MetadefDuplicateNamespace(Duplicate):
message = _("The metadata definition namespace=%(namespace_name)s"
" already exists.")
class MetadefDuplicateObject(Duplicate):
message = _("A metadata definition object with name=%(object_name)s"
" already exists in namespace=%(namespace_name)s.")
class MetadefDuplicateProperty(Duplicate):
message = _("A metadata definition property with name=%(property_name)s"
" already exists in namespace=%(namespace_name)s.")
class MetadefDuplicateResourceType(Duplicate):
message = _("A metadata definition resource-type with"
" name=%(resource_type_name)s already exists.")
class MetadefDuplicateResourceTypeAssociation(Duplicate):
message = _("The metadata definition resource-type association of"
" resource-type=%(resource_type_name)s to"
" namespace=%(namespace_name)s"
" already exists.")
class MetadefDuplicateTag(Duplicate):
message = _("A metadata tag with name=%(name)s"
" already exists in namespace=%(namespace_name)s.")
class MetadefForbidden(Forbidden):
message = _("You are not authorized to complete this action.")
class MetadefIntegrityError(Forbidden):
message = _("The metadata definition %(record_type)s with"
" name=%(record_name)s not deleted."
" Other records still refer to it.")
class MetadefNamespaceNotFound(NotFound):
message = _("Metadata definition namespace=%(namespace_name)s"
"was not found.")
class MetadefObjectNotFound(NotFound):
message = _("The metadata definition object with"
" name=%(object_name)s was not found in"
" namespace=%(namespace_name)s.")
class MetadefPropertyNotFound(NotFound):
message = _("The metadata definition property with"
" name=%(property_name)s was not found in"
" namespace=%(namespace_name)s.")
class MetadefResourceTypeNotFound(NotFound):
message = _("The metadata definition resource-type with"
" name=%(resource_type_name)s, was not found.")
class MetadefResourceTypeAssociationNotFound(NotFound):
message = _("The metadata definition resource-type association of"
" resource-type=%(resource_type_name)s to"
" namespace=%(namespace_name)s,"
" was not found.")
class MetadefTagNotFound(NotFound):
message = _("The metadata definition tag with"
" name=%(name)s was not found in"
" namespace=%(namespace_name)s.")
class InvalidVersion(Invalid):
message = _("Version is invalid: %(reason)s")
|
|
# coding: utf-8
#
# Copyright 2021 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for jobs.types.model_property."""
from __future__ import annotations
import pickle
from core.jobs.types import model_property
from core.platform import models
from core.tests import test_utils
(base_models,) = models.Registry.import_models([models.NAMES.base_model])
datastore_services = models.Registry.import_datastore_services()
class SubclassOfBaseModel(base_models.BaseModel):
"""Subclass of BaseModel with a StringProperty named 'value'."""
value = datastore_services.StringProperty()
class SubclassOfNdbModel(datastore_services.Model):
"""Subclass of NDB Model with a StringProperty named 'value'."""
value = datastore_services.StringProperty()
class RepeatedValueModel(base_models.BaseModel):
"""Subclass of BaseModel with a repeated StringProperty named 'values'."""
values = datastore_services.StringProperty(repeated=True)
class ModelPropertyTests(test_utils.TestBase):
def setUp(self):
self.id_property = model_property.ModelProperty(
SubclassOfBaseModel, SubclassOfBaseModel.id)
self.ndb_property = model_property.ModelProperty(
SubclassOfBaseModel, SubclassOfBaseModel.value)
self.ndb_repeated_property = model_property.ModelProperty(
RepeatedValueModel, RepeatedValueModel.values)
def test_init_with_id_property(self):
# Does not raise.
model_property.ModelProperty(
SubclassOfBaseModel, SubclassOfBaseModel.id)
def test_init_with_ndb_property(self):
# Does not raise.
model_property.ModelProperty(
SubclassOfBaseModel, SubclassOfBaseModel.value)
def test_init_with_ndb_repeated_property(self):
# Does not raise.
model_property.ModelProperty(
RepeatedValueModel, RepeatedValueModel.values)
def test_init_raises_type_error_when_model_is_not_a_class(self):
model = SubclassOfBaseModel()
with self.assertRaisesRegexp(TypeError, 'not a model class'):
model_property.ModelProperty(model, SubclassOfBaseModel.value)
def test_init_raises_type_error_when_model_is_unrelated_to_base_model(self):
with self.assertRaisesRegexp(TypeError, 'not a subclass of BaseModel'):
model_property.ModelProperty(
SubclassOfNdbModel, SubclassOfNdbModel.value)
def test_init_raises_type_error_when_property_is_not_an_ndb_property(self):
model = SubclassOfBaseModel(value='123')
with self.assertRaisesRegexp(TypeError, 'not an NDB Property'):
model_property.ModelProperty(SubclassOfBaseModel, model.value)
def test_init_raises_value_error_when_property_is_not_in_model(self):
with self.assertRaisesRegexp(ValueError, 'not a property of'):
model_property.ModelProperty(
SubclassOfBaseModel, SubclassOfNdbModel.value)
def test_model_kind_of_id_property(self):
self.assertEqual(self.id_property.model_kind, 'SubclassOfBaseModel')
def test_model_kind_of_ndb_property(self):
self.assertEqual(self.ndb_property.model_kind, 'SubclassOfBaseModel')
def test_model_kind_of_ndb_repeated_property(self):
self.assertEqual(
self.ndb_repeated_property.model_kind, 'RepeatedValueModel')
def test_property_name_of_id_property(self):
self.assertEqual(self.id_property.property_name, 'id')
def test_property_name_of_ndb_property(self):
self.assertEqual(self.ndb_property.property_name, 'value')
def test_property_name_of_ndb_repeated_property(self):
self.assertEqual(self.ndb_repeated_property.property_name, 'values')
def test_str_of_id_property(self):
self.assertEqual(str(self.id_property), 'SubclassOfBaseModel.id')
def test_str_of_ndb_property(self):
self.assertEqual(str(self.ndb_property), 'SubclassOfBaseModel.value')
def test_str_of_ndb_repeated_property(self):
self.assertEqual(
str(self.ndb_repeated_property), 'RepeatedValueModel.values')
def test_repr_of_id_property(self):
self.assertEqual(
repr(self.id_property),
'ModelProperty(SubclassOfBaseModel, SubclassOfBaseModel.id)')
def test_repr_of_ndb_property(self):
self.assertEqual(
repr(self.ndb_property),
'ModelProperty(SubclassOfBaseModel, SubclassOfBaseModel.value)')
def test_repr_of_ndb_repeated_property(self):
self.assertEqual(
repr(self.ndb_repeated_property),
'ModelProperty(RepeatedValueModel, RepeatedValueModel.values)')
def test_equality(self):
self.assertNotEqual(self.id_property, self.ndb_property)
self.assertNotEqual(self.ndb_property, self.ndb_repeated_property)
self.assertNotEqual(self.ndb_repeated_property, self.id_property)
self.assertEqual(
self.id_property,
model_property.ModelProperty(
SubclassOfBaseModel, SubclassOfBaseModel.id))
self.assertEqual(
self.ndb_property,
model_property.ModelProperty(
SubclassOfBaseModel, SubclassOfBaseModel.value))
self.assertEqual(
self.ndb_repeated_property,
model_property.ModelProperty(
RepeatedValueModel, RepeatedValueModel.values))
def test_hash_of_id_property(self):
id_property_set = {
model_property.ModelProperty(
SubclassOfBaseModel, SubclassOfBaseModel.id),
}
self.assertIn(self.id_property, id_property_set)
self.assertNotIn(self.ndb_property, id_property_set)
self.assertNotIn(self.ndb_repeated_property, id_property_set)
def test_hash_of_ndb_property(self):
ndb_property_set = {
model_property.ModelProperty(
SubclassOfBaseModel, SubclassOfBaseModel.value),
}
self.assertIn(self.ndb_property, ndb_property_set)
self.assertNotIn(self.id_property, ndb_property_set)
self.assertNotIn(self.ndb_repeated_property, ndb_property_set)
def test_hash_of_ndb_repeated_property(self):
ndb_repeated_property_set = {
model_property.ModelProperty(
RepeatedValueModel, RepeatedValueModel.values),
}
self.assertIn(self.ndb_repeated_property, ndb_repeated_property_set)
self.assertNotIn(self.id_property, ndb_repeated_property_set)
self.assertNotIn(self.ndb_property, ndb_repeated_property_set)
def test_yield_value_from_id_property(self):
model = SubclassOfBaseModel(id='123')
self.assertEqual(
list(self.id_property.yield_value_from_model(model)), ['123'])
def test_yield_value_from_ndb_property(self):
model = SubclassOfBaseModel(value='abc')
self.assertEqual(
list(self.ndb_property.yield_value_from_model(model)), ['abc'])
def test_yield_value_from_ndb_repeated_property(self):
model = RepeatedValueModel(values=['123', '456', '789'])
self.assertEqual(
list(self.ndb_repeated_property.yield_value_from_model(model)),
['123', '456', '789'])
def test_yield_value_from_model_raises_type_error_if_not_right_kind(self):
model = RepeatedValueModel(values=['123', '456', '789'])
self.assertRaisesRegexp(
TypeError, 'not an instance of SubclassOfBaseModel',
lambda: list(self.ndb_property.yield_value_from_model(model)))
def test_pickle_id_property(self):
pickle_value = pickle.loads(pickle.dumps(self.id_property))
self.assertEqual(self.id_property, pickle_value)
self.assertIn(pickle_value, {self.id_property})
def test_pickle_ndb_property(self):
pickle_value = pickle.loads(pickle.dumps(self.ndb_property))
self.assertEqual(self.ndb_property, pickle_value)
self.assertIn(pickle_value, {self.ndb_property})
def test_pickle_ndb_repeated_property(self):
pickle_value = pickle.loads(pickle.dumps(self.ndb_repeated_property))
self.assertEqual(self.ndb_repeated_property, pickle_value)
self.assertIn(pickle_value, {self.ndb_repeated_property})
|
|
"""
Visualize function realted to the sensors
"""
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
import seaborn as sns
import numpy as np
import nexa.loading as load
sns.set(style='white')
def visualize_STDM(nexa_object, ax=None):
"""
Routine which plots using seaborn
"""
to_plot = nexa_object.STDM
if ax is None:
fig, ax = plt.subplots(figsize=(11, 9))
# Generate a custom diverging colormap
cmap = sns.diverging_palette(220, 10, as_cmap=True)
# Draw the heatmap with the mask and correct aspect ratio
sns.heatmap(to_plot, mask=None, cmap=cmap,
vmax=1.0, vmin=-1.0,
square=True, xticklabels=5, yticklabels=5,
linewidths=.5, cbar_kws={"shrink": .5}, ax=ax)
plt.title('Spatio Temporal Distance Matrix (Distances)')
if ax is None:
return fig
else:
return ax
def visualize_SLM(nexa_object, cmap='coolwarm', inter='none',
origin='upper', fontsize=16, aspect='auto',
colorbar=True, ax=None, symmetry=True):
"""
Document
"""
SLM = nexa_object.SLM
to_plot = SLM
# First the parameters
to_plot_title = 'Sensor Lagged Matrix'
cmap = cmap
inter = inter
origin = origin
fontsize = fontsize # The fontsize
xlabel = 'Time Windows'
ylabel = 'Lagged Sensors'
if ax is None:
fig_size = (16, 12)
axes_position = [0.1, 0.1, 0.8, 0.8]
fig = plt.figure(figsize=fig_size)
ax = fig.add_axes(axes_position)
if symmetry:
# We create symmetric vmin and vmax
max_value = np.abs(np.max(to_plot))
min_value = np.abs(np.min(to_plot))
vmax = np.max((max_value, min_value))
vmin = -vmax
im = ax.imshow(to_plot, interpolation=inter, vmin=vmin,
vmax=vmax, cmap=cmap, origin=origin,
aspect=aspect)
else:
im = ax.imshow(to_plot, interpolation=inter, cmap=cmap,
origin=origin, aspect=aspect)
fig = im.get_figure()
# Se the labels and titles
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.set_title(to_plot_title)
# Change the font sizes
axes = fig.get_axes()
for ax in axes:
for item in ([ax.title, ax.xaxis.label, ax.yaxis.label] +
ax.get_xticklabels() + ax.get_yticklabels()):
item.set_fontsize(fontsize)
# Colorbar (This makes the axes to display proper)
if colorbar:
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
cbar = fig.colorbar(im, cax=cax)
cbar.solids.set_edgecolor('face')
return im
def visualize_STDM(nexa_object, cmap='coolwarm', inter='none',
origin='upper', fontsize=16, aspect='auto',
colorbar=True):
"""
Document
"""
Nlags = nexa_object.Nlags
Nsensors = nexa_object.sensors.Nsensors
STDM = nexa_object.STDM
to_plot = STDM
# First the parameters
to_plot_title = 'Spatio Temporal Distance Matrix'
cmap = cmap
inter = inter
origin = origin
fontsize = fontsize # The fontsize
fig_size = (16, 12)
axes_position = [0.1, 0.1, 0.8, 0.8]
xlabel = 'Time lags * Sensors'
ylabel = xlabel
fig = plt.figure(figsize=fig_size)
ax = fig.add_axes(axes_position)
im = plt.imshow(to_plot, interpolation=inter, cmap=cmap,
origin=origin, aspect=aspect)
# Se the labels and titles
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.set_title(to_plot_title)
# Se the ticks names for x
# x_labels = np.arange(Nseries * Nseries + 1)
# ax.xaxis.set_major_formatter(plt.FixedFormatter(x_labels))
# ax.xaxis.set_major_locator(plt.MultipleLocator(1))
# Change the font sizes
axes = fig.get_axes()
for ax in axes:
for item in ([ax.title, ax.xaxis.label, ax.yaxis.label] +
ax.get_xticklabels() + ax.get_yticklabels()):
item.set_fontsize(fontsize)
# Colorbar (This makes the axes to display proper)
if colorbar:
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
cbar = fig.colorbar(im, cax=cax)
cbar.solids.set_edgecolor('face')
return fig
def visualize_SLM_hdf5(database, run_name, cmap='coolwarm', inter='none',
origin='upper', fontsize=16, aspect='auto',
colorbar=True, ax=None, symmetry=True):
"""
This visualizes the SLM for a particular database
of a hdf5 storage and a particular run.
"""
SLM = load.get_SLM_hdf5(database, run_name)
to_plot = SLM
# First the parameters
to_plot_title = 'Sensor Lagged Matrix'
cmap = cmap
inter = inter
origin = origin
fontsize = fontsize # The fontsize
xlabel = 'Time Windows'
ylabel = 'Lagged Sensors'
if ax is None:
fig_size = (16, 12)
axes_position = [0.1, 0.1, 0.8, 0.8]
fig = plt.figure(figsize=fig_size)
ax = fig.add_axes(axes_position)
if symmetry:
# We create symmetric vmin and vmax
max_value = np.abs(np.max(to_plot))
min_value = np.abs(np.min(to_plot))
vmax = np.max((max_value, min_value))
vmin = -vmax
im = ax.imshow(to_plot, interpolation=inter, vmin=vmin,
vmax=vmax, cmap=cmap, origin=origin,
aspect=aspect)
else:
im = ax.imshow(to_plot, interpolation=inter, cmap=cmap,
origin=origin, aspect=aspect)
fig = im.get_figure()
# Se the labels and titles
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.set_title(to_plot_title)
# Change the font sizes
axes = fig.get_axes()
for ax in axes:
for item in ([ax.title, ax.xaxis.label, ax.yaxis.label] +
ax.get_xticklabels() + ax.get_yticklabels()):
item.set_fontsize(fontsize)
# Colorbar (This makes the axes to display proper)
if colorbar:
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
cbar = fig.colorbar(im, cax=cax)
cbar.solids.set_edgecolor('face')
return im
def visualize_STDM_hdf5(database, run_name, nexa_arrangement,
ax=None):
"""
Routine which plots the STDM using seaborn
and extracting this from a hdf5 representation
"""
sns.set(font_scale=2)
to_plot = load.get_STDM_hdf5(database, run_name, nexa_arrangement)
if ax is None:
fig, ax = plt.subplots(figsize=(11, 9))
# Generate a custom diverging colormap
cmap = sns.diverging_palette(220, 10, as_cmap=True)
# Draw the heatmap with the mask and correct aspect ratio
ax = sns.heatmap(to_plot, mask=None, cmap=cmap,
vmax=1.0, vmin=-1.0,
square=True, xticklabels=False, yticklabels=False,
linewidths=.5, cbar_kws={"shrink": .5}, ax=ax)
ax.set_title('Spatio Temporal Distance Matrix')
return ax
|
|
"""
Tests for various Pyflakes behavior.
"""
from sys import version_info
from pyflakes import messages as m
from pyflakes.test.harness import TestCase, skip, skipIf
class Test(TestCase):
def test_duplicateArgs(self):
self.flakes('def fu(bar, bar): pass', m.DuplicateArgument)
def test_localReferencedBeforeAssignment(self):
self.flakes('''
a = 1
def f():
a; a=1
f()
''', m.UndefinedLocal, m.UnusedVariable)
@skipIf(version_info >= (3,),
'in Python 3 list comprehensions execute in a separate scope')
def test_redefinedInListComp(self):
"""
Test that shadowing a variable in a list comprehension raises
a warning.
"""
self.flakes('''
a = 1
[1 for a, b in [(1, 2)]]
''', m.RedefinedInListComp)
self.flakes('''
class A:
a = 1
[1 for a, b in [(1, 2)]]
''', m.RedefinedInListComp)
self.flakes('''
def f():
a = 1
[1 for a, b in [(1, 2)]]
''', m.RedefinedInListComp)
self.flakes('''
[1 for a, b in [(1, 2)]]
[1 for a, b in [(1, 2)]]
''')
self.flakes('''
for a, b in [(1, 2)]:
pass
[1 for a, b in [(1, 2)]]
''')
def test_redefinedInGenerator(self):
"""
Test that reusing a variable in a generator does not raise
a warning.
"""
self.flakes('''
a = 1
(1 for a, b in [(1, 2)])
''')
self.flakes('''
class A:
a = 1
list(1 for a, b in [(1, 2)])
''')
self.flakes('''
def f():
a = 1
(1 for a, b in [(1, 2)])
''', m.UnusedVariable)
self.flakes('''
(1 for a, b in [(1, 2)])
(1 for a, b in [(1, 2)])
''')
self.flakes('''
for a, b in [(1, 2)]:
pass
(1 for a, b in [(1, 2)])
''')
@skipIf(version_info < (2, 7), "Python >= 2.7 only")
def test_redefinedInSetComprehension(self):
"""
Test that reusing a variable in a set comprehension does not raise
a warning.
"""
self.flakes('''
a = 1
{1 for a, b in [(1, 2)]}
''')
self.flakes('''
class A:
a = 1
{1 for a, b in [(1, 2)]}
''')
self.flakes('''
def f():
a = 1
{1 for a, b in [(1, 2)]}
''', m.UnusedVariable)
self.flakes('''
{1 for a, b in [(1, 2)]}
{1 for a, b in [(1, 2)]}
''')
self.flakes('''
for a, b in [(1, 2)]:
pass
{1 for a, b in [(1, 2)]}
''')
@skipIf(version_info < (2, 7), "Python >= 2.7 only")
def test_redefinedInDictComprehension(self):
"""
Test that reusing a variable in a dict comprehension does not raise
a warning.
"""
self.flakes('''
a = 1
{1: 42 for a, b in [(1, 2)]}
''')
self.flakes('''
class A:
a = 1
{1: 42 for a, b in [(1, 2)]}
''')
self.flakes('''
def f():
a = 1
{1: 42 for a, b in [(1, 2)]}
''', m.UnusedVariable)
self.flakes('''
{1: 42 for a, b in [(1, 2)]}
{1: 42 for a, b in [(1, 2)]}
''')
self.flakes('''
for a, b in [(1, 2)]:
pass
{1: 42 for a, b in [(1, 2)]}
''')
def test_redefinedFunction(self):
"""
Test that shadowing a function definition with another one raises a
warning.
"""
self.flakes('''
def a(): pass
def a(): pass
''', m.RedefinedWhileUnused)
def test_redefinedClassFunction(self):
"""
Test that shadowing a function definition in a class suite with another
one raises a warning.
"""
self.flakes('''
class A:
def a(): pass
def a(): pass
''', m.RedefinedWhileUnused)
def test_redefinedIfElseFunction(self):
"""
Test that shadowing a function definition twice in an if
and else block does not raise a warning.
"""
self.flakes('''
if True:
def a(): pass
else:
def a(): pass
''')
def test_redefinedIfFunction(self):
"""
Test that shadowing a function definition within an if block
raises a warning.
"""
self.flakes('''
if True:
def a(): pass
def a(): pass
''', m.RedefinedWhileUnused)
def test_redefinedTryExceptFunction(self):
"""
Test that shadowing a function definition twice in try
and except block does not raise a warning.
"""
self.flakes('''
try:
def a(): pass
except:
def a(): pass
''')
def test_redefinedTryFunction(self):
"""
Test that shadowing a function definition within a try block
raises a warning.
"""
self.flakes('''
try:
def a(): pass
def a(): pass
except:
pass
''', m.RedefinedWhileUnused)
def test_redefinedIfElseInListComp(self):
"""
Test that shadowing a variable in a list comprehension in
an if and else block does not raise a warning.
"""
self.flakes('''
if False:
a = 1
else:
[a for a in '12']
''')
@skipIf(version_info >= (3,),
'in Python 3 list comprehensions execute in a separate scope')
def test_redefinedElseInListComp(self):
"""
Test that shadowing a variable in a list comprehension in
an else (or if) block raises a warning.
"""
self.flakes('''
if False:
pass
else:
a = 1
[a for a in '12']
''', m.RedefinedInListComp)
def test_functionDecorator(self):
"""
Test that shadowing a function definition with a decorated version of
that function does not raise a warning.
"""
self.flakes('''
from somewhere import somedecorator
def a(): pass
a = somedecorator(a)
''')
def test_classFunctionDecorator(self):
"""
Test that shadowing a function definition in a class suite with a
decorated version of that function does not raise a warning.
"""
self.flakes('''
class A:
def a(): pass
a = classmethod(a)
''')
@skipIf(version_info < (2, 6), "Python >= 2.6 only")
def test_modernProperty(self):
self.flakes("""
class A:
@property
def t(self):
pass
@t.setter
def t(self, value):
pass
@t.deleter
def t(self):
pass
""")
def test_unaryPlus(self):
"""Don't die on unary +."""
self.flakes('+1')
def test_undefinedBaseClass(self):
"""
If a name in the base list of a class definition is undefined, a
warning is emitted.
"""
self.flakes('''
class foo(foo):
pass
''', m.UndefinedName)
def test_classNameUndefinedInClassBody(self):
"""
If a class name is used in the body of that class's definition and
the name is not already defined, a warning is emitted.
"""
self.flakes('''
class foo:
foo
''', m.UndefinedName)
def test_classNameDefinedPreviously(self):
"""
If a class name is used in the body of that class's definition and
the name was previously defined in some other way, no warning is
emitted.
"""
self.flakes('''
foo = None
class foo:
foo
''')
def test_classRedefinition(self):
"""
If a class is defined twice in the same module, a warning is emitted.
"""
self.flakes('''
class Foo:
pass
class Foo:
pass
''', m.RedefinedWhileUnused)
def test_functionRedefinedAsClass(self):
"""
If a function is redefined as a class, a warning is emitted.
"""
self.flakes('''
def Foo():
pass
class Foo:
pass
''', m.RedefinedWhileUnused)
def test_classRedefinedAsFunction(self):
"""
If a class is redefined as a function, a warning is emitted.
"""
self.flakes('''
class Foo:
pass
def Foo():
pass
''', m.RedefinedWhileUnused)
def test_classWithReturn(self):
"""
If a return is used inside a class, a warning is emitted.
"""
self.flakes('''
class Foo(object):
return
''', m.ReturnOutsideFunction)
def test_moduleWithReturn(self):
"""
If a return is used at the module level, a warning is emitted.
"""
self.flakes('''
return
''', m.ReturnOutsideFunction)
def test_classWithYield(self):
"""
If a yield is used inside a class, a warning is emitted.
"""
self.flakes('''
class Foo(object):
yield
''', m.YieldOutsideFunction)
def test_moduleWithYield(self):
"""
If a yield is used at the module level, a warning is emitted.
"""
self.flakes('''
yield
''', m.YieldOutsideFunction)
@skipIf(version_info < (3, 3), "Python >= 3.3 only")
def test_classWithYieldFrom(self):
"""
If a yield from is used inside a class, a warning is emitted.
"""
self.flakes('''
class Foo(object):
yield from range(10)
''', m.YieldOutsideFunction)
@skipIf(version_info < (3, 3), "Python >= 3.3 only")
def test_moduleWithYieldFrom(self):
"""
If a yield from is used at the module level, a warning is emitted.
"""
self.flakes('''
yield from range(10)
''', m.YieldOutsideFunction)
def test_continueOutsideLoop(self):
self.flakes('''
continue
''', m.ContinueOutsideLoop)
self.flakes('''
def f():
continue
''', m.ContinueOutsideLoop)
self.flakes('''
while True:
pass
else:
continue
''', m.ContinueOutsideLoop)
self.flakes('''
while True:
pass
else:
if 1:
if 2:
continue
''', m.ContinueOutsideLoop)
self.flakes('''
while True:
def f():
continue
''', m.ContinueOutsideLoop)
self.flakes('''
while True:
class A:
continue
''', m.ContinueOutsideLoop)
def test_continueInsideLoop(self):
self.flakes('''
while True:
continue
''')
self.flakes('''
for i in range(10):
continue
''')
self.flakes('''
while True:
if 1:
continue
''')
self.flakes('''
for i in range(10):
if 1:
continue
''')
self.flakes('''
while True:
while True:
pass
else:
continue
else:
pass
''')
self.flakes('''
while True:
try:
pass
finally:
while True:
continue
''')
def test_continueInFinally(self):
# 'continue' inside 'finally' is a special syntax error
self.flakes('''
while True:
try:
pass
finally:
continue
''', m.ContinueInFinally)
self.flakes('''
while True:
try:
pass
finally:
if 1:
if 2:
continue
''', m.ContinueInFinally)
# Even when not in a loop, this is the error Python gives
self.flakes('''
try:
pass
finally:
continue
''', m.ContinueInFinally)
def test_breakOutsideLoop(self):
self.flakes('''
break
''', m.BreakOutsideLoop)
self.flakes('''
def f():
break
''', m.BreakOutsideLoop)
self.flakes('''
while True:
pass
else:
break
''', m.BreakOutsideLoop)
self.flakes('''
while True:
pass
else:
if 1:
if 2:
break
''', m.BreakOutsideLoop)
self.flakes('''
while True:
def f():
break
''', m.BreakOutsideLoop)
self.flakes('''
while True:
class A:
break
''', m.BreakOutsideLoop)
self.flakes('''
try:
pass
finally:
break
''', m.BreakOutsideLoop)
def test_breakInsideLoop(self):
self.flakes('''
while True:
break
''')
self.flakes('''
for i in range(10):
break
''')
self.flakes('''
while True:
if 1:
break
''')
self.flakes('''
for i in range(10):
if 1:
break
''')
self.flakes('''
while True:
while True:
pass
else:
break
else:
pass
''')
self.flakes('''
while True:
try:
pass
finally:
while True:
break
''')
self.flakes('''
while True:
try:
pass
finally:
break
''')
self.flakes('''
while True:
try:
pass
finally:
if 1:
if 2:
break
''')
def test_defaultExceptLast(self):
"""
A default except block should be last.
YES:
try:
...
except Exception:
...
except:
...
NO:
try:
...
except:
...
except Exception:
...
"""
self.flakes('''
try:
pass
except ValueError:
pass
''')
self.flakes('''
try:
pass
except ValueError:
pass
except:
pass
''')
self.flakes('''
try:
pass
except:
pass
''')
self.flakes('''
try:
pass
except ValueError:
pass
else:
pass
''')
self.flakes('''
try:
pass
except:
pass
else:
pass
''')
self.flakes('''
try:
pass
except ValueError:
pass
except:
pass
else:
pass
''')
def test_defaultExceptNotLast(self):
self.flakes('''
try:
pass
except:
pass
except ValueError:
pass
''', m.DefaultExceptNotLast)
self.flakes('''
try:
pass
except:
pass
except:
pass
''', m.DefaultExceptNotLast)
self.flakes('''
try:
pass
except:
pass
except ValueError:
pass
except:
pass
''', m.DefaultExceptNotLast)
self.flakes('''
try:
pass
except:
pass
except ValueError:
pass
except:
pass
except ValueError:
pass
''', m.DefaultExceptNotLast, m.DefaultExceptNotLast)
self.flakes('''
try:
pass
except:
pass
except ValueError:
pass
else:
pass
''', m.DefaultExceptNotLast)
self.flakes('''
try:
pass
except:
pass
except:
pass
else:
pass
''', m.DefaultExceptNotLast)
self.flakes('''
try:
pass
except:
pass
except ValueError:
pass
except:
pass
else:
pass
''', m.DefaultExceptNotLast)
self.flakes('''
try:
pass
except:
pass
except ValueError:
pass
except:
pass
except ValueError:
pass
else:
pass
''', m.DefaultExceptNotLast, m.DefaultExceptNotLast)
self.flakes('''
try:
pass
except:
pass
except ValueError:
pass
finally:
pass
''', m.DefaultExceptNotLast)
self.flakes('''
try:
pass
except:
pass
except:
pass
finally:
pass
''', m.DefaultExceptNotLast)
self.flakes('''
try:
pass
except:
pass
except ValueError:
pass
except:
pass
finally:
pass
''', m.DefaultExceptNotLast)
self.flakes('''
try:
pass
except:
pass
except ValueError:
pass
except:
pass
except ValueError:
pass
finally:
pass
''', m.DefaultExceptNotLast, m.DefaultExceptNotLast)
self.flakes('''
try:
pass
except:
pass
except ValueError:
pass
else:
pass
finally:
pass
''', m.DefaultExceptNotLast)
self.flakes('''
try:
pass
except:
pass
except:
pass
else:
pass
finally:
pass
''', m.DefaultExceptNotLast)
self.flakes('''
try:
pass
except:
pass
except ValueError:
pass
except:
pass
else:
pass
finally:
pass
''', m.DefaultExceptNotLast)
self.flakes('''
try:
pass
except:
pass
except ValueError:
pass
except:
pass
except ValueError:
pass
else:
pass
finally:
pass
''', m.DefaultExceptNotLast, m.DefaultExceptNotLast)
@skipIf(version_info < (3,), "Python 3 only")
def test_starredAssignmentNoError(self):
"""
Python 3 extended iterable unpacking
"""
self.flakes('''
a, *b = range(10)
''')
self.flakes('''
*a, b = range(10)
''')
self.flakes('''
a, *b, c = range(10)
''')
self.flakes('''
(a, *b) = range(10)
''')
self.flakes('''
(*a, b) = range(10)
''')
self.flakes('''
(a, *b, c) = range(10)
''')
self.flakes('''
[a, *b] = range(10)
''')
self.flakes('''
[*a, b] = range(10)
''')
self.flakes('''
[a, *b, c] = range(10)
''')
# Taken from test_unpack_ex.py in the cPython source
s = ", ".join("a%d" % i for i in range(1 << 8 - 1)) + \
", *rest = range(1<<8)"
self.flakes(s)
s = "(" + ", ".join("a%d" % i for i in range(1 << 8 - 1)) + \
", *rest) = range(1<<8)"
self.flakes(s)
s = "[" + ", ".join("a%d" % i for i in range(1 << 8 - 1)) + \
", *rest] = range(1<<8)"
self.flakes(s)
@skipIf(version_info < (3, ), "Python 3 only")
def test_starredAssignmentErrors(self):
"""
SyntaxErrors (not encoded in the ast) surrounding Python 3 extended
iterable unpacking
"""
# Taken from test_unpack_ex.py in the cPython source
s = ", ".join("a%d" % i for i in range(1 << 8)) + \
", *rest = range(1<<8 + 1)"
self.flakes(s, m.TooManyExpressionsInStarredAssignment)
s = "(" + ", ".join("a%d" % i for i in range(1 << 8)) + \
", *rest) = range(1<<8 + 1)"
self.flakes(s, m.TooManyExpressionsInStarredAssignment)
s = "[" + ", ".join("a%d" % i for i in range(1 << 8)) + \
", *rest] = range(1<<8 + 1)"
self.flakes(s, m.TooManyExpressionsInStarredAssignment)
s = ", ".join("a%d" % i for i in range(1 << 8 + 1)) + \
", *rest = range(1<<8 + 2)"
self.flakes(s, m.TooManyExpressionsInStarredAssignment)
s = "(" + ", ".join("a%d" % i for i in range(1 << 8 + 1)) + \
", *rest) = range(1<<8 + 2)"
self.flakes(s, m.TooManyExpressionsInStarredAssignment)
s = "[" + ", ".join("a%d" % i for i in range(1 << 8 + 1)) + \
", *rest] = range(1<<8 + 2)"
self.flakes(s, m.TooManyExpressionsInStarredAssignment)
# No way we can actually test this!
# s = "*rest, " + ", ".join("a%d" % i for i in range(1<<24)) + \
# ", *rest = range(1<<24 + 1)"
# self.flakes(s, m.TooManyExpressionsInStarredAssignment)
self.flakes('''
a, *b, *c = range(10)
''', m.TwoStarredExpressions)
self.flakes('''
a, *b, c, *d = range(10)
''', m.TwoStarredExpressions)
self.flakes('''
*a, *b, *c = range(10)
''', m.TwoStarredExpressions)
self.flakes('''
(a, *b, *c) = range(10)
''', m.TwoStarredExpressions)
self.flakes('''
(a, *b, c, *d) = range(10)
''', m.TwoStarredExpressions)
self.flakes('''
(*a, *b, *c) = range(10)
''', m.TwoStarredExpressions)
self.flakes('''
[a, *b, *c] = range(10)
''', m.TwoStarredExpressions)
self.flakes('''
[a, *b, c, *d] = range(10)
''', m.TwoStarredExpressions)
self.flakes('''
[*a, *b, *c] = range(10)
''', m.TwoStarredExpressions)
@skip("todo: Too hard to make this warn but other cases stay silent")
def test_doubleAssignment(self):
"""
If a variable is re-assigned to without being used, no warning is
emitted.
"""
self.flakes('''
x = 10
x = 20
''', m.RedefinedWhileUnused)
def test_doubleAssignmentConditionally(self):
"""
If a variable is re-assigned within a conditional, no warning is
emitted.
"""
self.flakes('''
x = 10
if True:
x = 20
''')
def test_doubleAssignmentWithUse(self):
"""
If a variable is re-assigned to after being used, no warning is
emitted.
"""
self.flakes('''
x = 10
y = x * 2
x = 20
''')
def test_comparison(self):
"""
If a defined name is used on either side of any of the six comparison
operators, no warning is emitted.
"""
self.flakes('''
x = 10
y = 20
x < y
x <= y
x == y
x != y
x >= y
x > y
''')
def test_identity(self):
"""
If a defined name is used on either side of an identity test, no
warning is emitted.
"""
self.flakes('''
x = 10
y = 20
x is y
x is not y
''')
def test_containment(self):
"""
If a defined name is used on either side of a containment test, no
warning is emitted.
"""
self.flakes('''
x = 10
y = 20
x in y
x not in y
''')
def test_loopControl(self):
"""
break and continue statements are supported.
"""
self.flakes('''
for x in [1, 2]:
break
''')
self.flakes('''
for x in [1, 2]:
continue
''')
def test_ellipsis(self):
"""
Ellipsis in a slice is supported.
"""
self.flakes('''
[1, 2][...]
''')
def test_extendedSlice(self):
"""
Extended slices are supported.
"""
self.flakes('''
x = 3
[1, 2][x,:]
''')
def test_varAugmentedAssignment(self):
"""
Augmented assignment of a variable is supported.
We don't care about var refs.
"""
self.flakes('''
foo = 0
foo += 1
''')
def test_attrAugmentedAssignment(self):
"""
Augmented assignment of attributes is supported.
We don't care about attr refs.
"""
self.flakes('''
foo = None
foo.bar += foo.baz
''')
def test_globalDeclaredInDifferentScope(self):
"""
A 'global' can be declared in one scope and reused in another.
"""
self.flakes('''
def f(): global foo
def g(): foo = 'anything'; foo.is_used()
''')
class TestUnusedAssignment(TestCase):
"""
Tests for warning about unused assignments.
"""
def test_unusedVariable(self):
"""
Warn when a variable in a function is assigned a value that's never
used.
"""
self.flakes('''
def a():
b = 1
''', m.UnusedVariable)
def test_unusedVariableAsLocals(self):
"""
Using locals() it is perfectly valid to have unused variables
"""
self.flakes('''
def a():
b = 1
return locals()
''')
def test_unusedVariableNoLocals(self):
"""
Using locals() in wrong scope should not matter
"""
self.flakes('''
def a():
locals()
def a():
b = 1
return
''', m.UnusedVariable)
@skip("todo: Difficult because it does't apply in the context of a loop")
def test_unusedReassignedVariable(self):
"""
Shadowing a used variable can still raise an UnusedVariable warning.
"""
self.flakes('''
def a():
b = 1
b.foo()
b = 2
''', m.UnusedVariable)
def test_variableUsedInLoop(self):
"""
Shadowing a used variable cannot raise an UnusedVariable warning in the
context of a loop.
"""
self.flakes('''
def a():
b = True
while b:
b = False
''')
def test_assignToGlobal(self):
"""
Assigning to a global and then not using that global is perfectly
acceptable. Do not mistake it for an unused local variable.
"""
self.flakes('''
b = 0
def a():
global b
b = 1
''')
@skipIf(version_info < (3,), 'new in Python 3')
def test_assignToNonlocal(self):
"""
Assigning to a nonlocal and then not using that binding is perfectly
acceptable. Do not mistake it for an unused local variable.
"""
self.flakes('''
b = b'0'
def a():
nonlocal b
b = b'1'
''')
def test_assignToMember(self):
"""
Assigning to a member of another object and then not using that member
variable is perfectly acceptable. Do not mistake it for an unused
local variable.
"""
# XXX: Adding this test didn't generate a failure. Maybe not
# necessary?
self.flakes('''
class b:
pass
def a():
b.foo = 1
''')
def test_assignInForLoop(self):
"""
Don't warn when a variable in a for loop is assigned to but not used.
"""
self.flakes('''
def f():
for i in range(10):
pass
''')
def test_assignInListComprehension(self):
"""
Don't warn when a variable in a list comprehension is
assigned to but not used.
"""
self.flakes('''
def f():
[None for i in range(10)]
''')
def test_generatorExpression(self):
"""
Don't warn when a variable in a generator expression is
assigned to but not used.
"""
self.flakes('''
def f():
(None for i in range(10))
''')
def test_assignmentInsideLoop(self):
"""
Don't warn when a variable assignment occurs lexically after its use.
"""
self.flakes('''
def f():
x = None
for i in range(10):
if i > 2:
return x
x = i * 2
''')
def test_tupleUnpacking(self):
"""
Don't warn when a variable included in tuple unpacking is unused. It's
very common for variables in a tuple unpacking assignment to be unused
in good Python code, so warning will only create false positives.
"""
self.flakes('''
def f(tup):
(x, y) = tup
''')
self.flakes('''
def f():
(x, y) = 1, 2
''', m.UnusedVariable, m.UnusedVariable)
self.flakes('''
def f():
(x, y) = coords = 1, 2
if x > 1:
print(coords)
''')
self.flakes('''
def f():
(x, y) = coords = 1, 2
''', m.UnusedVariable)
self.flakes('''
def f():
coords = (x, y) = 1, 2
''', m.UnusedVariable)
def test_listUnpacking(self):
"""
Don't warn when a variable included in list unpacking is unused.
"""
self.flakes('''
def f(tup):
[x, y] = tup
''')
self.flakes('''
def f():
[x, y] = [1, 2]
''', m.UnusedVariable, m.UnusedVariable)
def test_closedOver(self):
"""
Don't warn when the assignment is used in an inner function.
"""
self.flakes('''
def barMaker():
foo = 5
def bar():
return foo
return bar
''')
def test_doubleClosedOver(self):
"""
Don't warn when the assignment is used in an inner function, even if
that inner function itself is in an inner function.
"""
self.flakes('''
def barMaker():
foo = 5
def bar():
def baz():
return foo
return bar
''')
def test_tracebackhideSpecialVariable(self):
"""
Do not warn about unused local variable __tracebackhide__, which is
a special variable for py.test.
"""
self.flakes("""
def helper():
__tracebackhide__ = True
""")
def test_ifexp(self):
"""
Test C{foo if bar else baz} statements.
"""
self.flakes("a = 'moo' if True else 'oink'")
self.flakes("a = foo if True else 'oink'", m.UndefinedName)
self.flakes("a = 'moo' if True else bar", m.UndefinedName)
def test_withStatementNoNames(self):
"""
No warnings are emitted for using inside or after a nameless C{with}
statement a name defined beforehand.
"""
self.flakes('''
from __future__ import with_statement
bar = None
with open("foo"):
bar
bar
''')
def test_withStatementSingleName(self):
"""
No warnings are emitted for using a name defined by a C{with} statement
within the suite or afterwards.
"""
self.flakes('''
from __future__ import with_statement
with open('foo') as bar:
bar
bar
''')
def test_withStatementAttributeName(self):
"""
No warnings are emitted for using an attribute as the target of a
C{with} statement.
"""
self.flakes('''
from __future__ import with_statement
import foo
with open('foo') as foo.bar:
pass
''')
def test_withStatementSubscript(self):
"""
No warnings are emitted for using a subscript as the target of a
C{with} statement.
"""
self.flakes('''
from __future__ import with_statement
import foo
with open('foo') as foo[0]:
pass
''')
def test_withStatementSubscriptUndefined(self):
"""
An undefined name warning is emitted if the subscript used as the
target of a C{with} statement is not defined.
"""
self.flakes('''
from __future__ import with_statement
import foo
with open('foo') as foo[bar]:
pass
''', m.UndefinedName)
def test_withStatementTupleNames(self):
"""
No warnings are emitted for using any of the tuple of names defined by
a C{with} statement within the suite or afterwards.
"""
self.flakes('''
from __future__ import with_statement
with open('foo') as (bar, baz):
bar, baz
bar, baz
''')
def test_withStatementListNames(self):
"""
No warnings are emitted for using any of the list of names defined by a
C{with} statement within the suite or afterwards.
"""
self.flakes('''
from __future__ import with_statement
with open('foo') as [bar, baz]:
bar, baz
bar, baz
''')
def test_withStatementComplicatedTarget(self):
"""
If the target of a C{with} statement uses any or all of the valid forms
for that part of the grammar (See
U{http://docs.python.org/reference/compound_stmts.html#the-with-statement}),
the names involved are checked both for definedness and any bindings
created are respected in the suite of the statement and afterwards.
"""
self.flakes('''
from __future__ import with_statement
c = d = e = g = h = i = None
with open('foo') as [(a, b), c[d], e.f, g[h:i]]:
a, b, c, d, e, g, h, i
a, b, c, d, e, g, h, i
''')
def test_withStatementSingleNameUndefined(self):
"""
An undefined name warning is emitted if the name first defined by a
C{with} statement is used before the C{with} statement.
"""
self.flakes('''
from __future__ import with_statement
bar
with open('foo') as bar:
pass
''', m.UndefinedName)
def test_withStatementTupleNamesUndefined(self):
"""
An undefined name warning is emitted if a name first defined by a the
tuple-unpacking form of the C{with} statement is used before the
C{with} statement.
"""
self.flakes('''
from __future__ import with_statement
baz
with open('foo') as (bar, baz):
pass
''', m.UndefinedName)
def test_withStatementSingleNameRedefined(self):
"""
A redefined name warning is emitted if a name bound by an import is
rebound by the name defined by a C{with} statement.
"""
self.flakes('''
from __future__ import with_statement
import bar
with open('foo') as bar:
pass
''', m.RedefinedWhileUnused)
def test_withStatementTupleNamesRedefined(self):
"""
A redefined name warning is emitted if a name bound by an import is
rebound by one of the names defined by the tuple-unpacking form of a
C{with} statement.
"""
self.flakes('''
from __future__ import with_statement
import bar
with open('foo') as (bar, baz):
pass
''', m.RedefinedWhileUnused)
def test_withStatementUndefinedInside(self):
"""
An undefined name warning is emitted if a name is used inside the
body of a C{with} statement without first being bound.
"""
self.flakes('''
from __future__ import with_statement
with open('foo') as bar:
baz
''', m.UndefinedName)
def test_withStatementNameDefinedInBody(self):
"""
A name defined in the body of a C{with} statement can be used after
the body ends without warning.
"""
self.flakes('''
from __future__ import with_statement
with open('foo') as bar:
baz = 10
baz
''')
def test_withStatementUndefinedInExpression(self):
"""
An undefined name warning is emitted if a name in the I{test}
expression of a C{with} statement is undefined.
"""
self.flakes('''
from __future__ import with_statement
with bar as baz:
pass
''', m.UndefinedName)
self.flakes('''
from __future__ import with_statement
with bar as bar:
pass
''', m.UndefinedName)
@skipIf(version_info < (2, 7), "Python >= 2.7 only")
def test_dictComprehension(self):
"""
Dict comprehensions are properly handled.
"""
self.flakes('''
a = {1: x for x in range(10)}
''')
@skipIf(version_info < (2, 7), "Python >= 2.7 only")
def test_setComprehensionAndLiteral(self):
"""
Set comprehensions are properly handled.
"""
self.flakes('''
a = {1, 2, 3}
b = {x for x in range(10)}
''')
def test_exceptionUsedInExcept(self):
as_exc = ', ' if version_info < (2, 6) else ' as '
self.flakes('''
try: pass
except Exception%se: e
''' % as_exc)
self.flakes('''
def download_review():
try: pass
except Exception%se: e
''' % as_exc)
def test_exceptWithoutNameInFunction(self):
"""
Don't issue false warning when an unnamed exception is used.
Previously, there would be a false warning, but only when the
try..except was in a function
"""
self.flakes('''
import tokenize
def foo():
try: pass
except tokenize.TokenError: pass
''')
def test_exceptWithoutNameInFunctionTuple(self):
"""
Don't issue false warning when an unnamed exception is used.
This example catches a tuple of exception types.
"""
self.flakes('''
import tokenize
def foo():
try: pass
except (tokenize.TokenError, IndentationError): pass
''')
def test_augmentedAssignmentImportedFunctionCall(self):
"""
Consider a function that is called on the right part of an
augassign operation to be used.
"""
self.flakes('''
from foo import bar
baz = 0
baz += bar()
''')
def test_assert_without_message(self):
"""An assert without a message is not an error."""
self.flakes('''
a = 1
assert a
''')
def test_assert_with_message(self):
"""An assert with a message is not an error."""
self.flakes('''
a = 1
assert a, 'x'
''')
def test_assert_tuple(self):
"""An assert of a non-empty tuple is always True."""
self.flakes('''
assert (False, 'x')
assert (False, )
''', m.AssertTuple, m.AssertTuple)
def test_assert_tuple_empty(self):
"""An assert of an empty tuple is always False."""
self.flakes('''
assert ()
''')
def test_assert_static(self):
"""An assert of a static value is not an error."""
self.flakes('''
assert True
assert 1
''')
@skipIf(version_info < (3, 3), 'new in Python 3.3')
def test_yieldFromUndefined(self):
"""
Test C{yield from} statement
"""
self.flakes('''
def bar():
yield from foo()
''', m.UndefinedName)
@skipIf(version_info < (3, 6), 'new in Python 3.6')
def test_f_string(self):
"""Test PEP 498 f-strings are treated as a usage."""
self.flakes('''
baz = 0
print(f'\x7b4*baz\N{RIGHT CURLY BRACKET}')
''')
class TestAsyncStatements(TestCase):
@skipIf(version_info < (3, 5), 'new in Python 3.5')
def test_asyncDef(self):
self.flakes('''
async def bar():
return 42
''')
@skipIf(version_info < (3, 5), 'new in Python 3.5')
def test_asyncDefAwait(self):
self.flakes('''
async def read_data(db):
await db.fetch('SELECT ...')
''')
@skipIf(version_info < (3, 5), 'new in Python 3.5')
def test_asyncDefUndefined(self):
self.flakes('''
async def bar():
return foo()
''', m.UndefinedName)
@skipIf(version_info < (3, 5), 'new in Python 3.5')
def test_asyncFor(self):
self.flakes('''
async def read_data(db):
output = []
async for row in db.cursor():
output.append(row)
return output
''')
@skipIf(version_info < (3, 5), 'new in Python 3.5')
def test_asyncWith(self):
self.flakes('''
async def commit(session, data):
async with session.transaction():
await session.update(data)
''')
@skipIf(version_info < (3, 5), 'new in Python 3.5')
def test_asyncWithItem(self):
self.flakes('''
async def commit(session, data):
async with session.transaction() as trans:
await trans.begin()
...
await trans.end()
''')
@skipIf(version_info < (3, 5), 'new in Python 3.5')
def test_matmul(self):
self.flakes('''
def foo(a, b):
return a @ b
''')
|
|
import os
import types
import sys
import codecs
import tempfile
import tkinter.filedialog as tkFileDialog
import tkinter.messagebox as tkMessageBox
import re
from tkinter import *
from tkinter.simpledialog import SimpleDialog
from idlelib.configHandler import idleConf
from codecs import BOM_UTF8
# Try setting the locale, so that we can find out
# what encoding to use
try:
import locale
locale.setlocale(locale.LC_CTYPE, "")
except (ImportError, locale.Error):
pass
# Encoding for file names
filesystemencoding = sys.getfilesystemencoding() ### currently unused
locale_encoding = 'ascii'
if sys.platform == 'win32':
# On Windows, we could use "mbcs". However, to give the user
# a portable encoding name, we need to find the code page
try:
locale_encoding = locale.getdefaultlocale()[1]
codecs.lookup(locale_encoding)
except LookupError:
pass
else:
try:
# Different things can fail here: the locale module may not be
# loaded, it may not offer nl_langinfo, or CODESET, or the
# resulting codeset may be unknown to Python. We ignore all
# these problems, falling back to ASCII
locale_encoding = locale.nl_langinfo(locale.CODESET)
if locale_encoding is None or locale_encoding is '':
# situation occurs on Mac OS X
locale_encoding = 'ascii'
codecs.lookup(locale_encoding)
except (NameError, AttributeError, LookupError):
# Try getdefaultlocale: it parses environment variables,
# which may give a clue. Unfortunately, getdefaultlocale has
# bugs that can cause ValueError.
try:
locale_encoding = locale.getdefaultlocale()[1]
if locale_encoding is None or locale_encoding is '':
# situation occurs on Mac OS X
locale_encoding = 'ascii'
codecs.lookup(locale_encoding)
except (ValueError, LookupError):
pass
locale_encoding = locale_encoding.lower()
encoding = locale_encoding ### KBK 07Sep07 This is used all over IDLE, check!
### 'encoding' is used below in encode(), check!
coding_re = re.compile("coding[:=]\s*([-\w_.]+)")
class EncodingMessage(SimpleDialog):
"Inform user that an encoding declaration is needed."
def __init__(self, master, enc):
self.should_edit = False
self.root = top = Toplevel(master)
top.bind("<Return>", self.return_event)
top.bind("<Escape>", self.do_ok)
top.protocol("WM_DELETE_WINDOW", self.wm_delete_window)
top.wm_title("I/O Warning")
top.wm_iconname("I/O Warning")
self.top = top
l1 = Label(top,
text="Non-ASCII found, yet no encoding declared. Add a line like")
l1.pack(side=TOP, anchor=W)
l2 = Entry(top, font="courier")
l2.insert(0, "# -*- coding: %s -*-" % enc)
# For some reason, the text is not selectable anymore if the
# widget is disabled.
# l2['state'] = DISABLED
l2.pack(side=TOP, anchor = W, fill=X)
l3 = Label(top, text="to your file\n"
"Choose OK to save this file as %s\n"
"Edit your general options to silence this warning" % enc)
l3.pack(side=TOP, anchor = W)
buttons = Frame(top)
buttons.pack(side=TOP, fill=X)
# Both return and cancel mean the same thing: do nothing
self.default = self.cancel = 0
b1 = Button(buttons, text="Ok", default="active",
command=self.do_ok)
b1.pack(side=LEFT, fill=BOTH, expand=1)
b2 = Button(buttons, text="Edit my file",
command=self.do_edit)
b2.pack(side=LEFT, fill=BOTH, expand=1)
self._set_transient(master)
def do_ok(self):
self.done(0)
def do_edit(self):
self.done(1)
def coding_spec(data):
"""Return the encoding declaration according to PEP 263.
When checking encoded data, only the first two lines should be passed
in to avoid a UnicodeDecodeError if the rest of the data is not unicode.
The first two lines would contain the encoding specification.
Raise a LookupError if the encoding is declared but unknown.
"""
if isinstance(data, bytes):
try:
lines = data.decode('utf-8')
except UnicodeDecodeError:
return None
else:
lines = data
# consider only the first two lines
if '\n' in lines:
lst = lines.split('\n')[:2]
elif '\r' in lines:
lst = lines.split('\r')[:2]
else:
lst = list(lines)
str = '\n'.join(lst)
match = coding_re.search(str)
if not match:
return None
name = match.group(1)
try:
codecs.lookup(name)
except LookupError:
# The standard encoding error does not indicate the encoding
raise LookupError("Unknown encoding: "+name)
return name
class IOBinding:
def __init__(self, editwin):
self.editwin = editwin
self.text = editwin.text
self.__id_open = self.text.bind("<<open-window-from-file>>", self.open)
self.__id_save = self.text.bind("<<save-window>>", self.save)
self.__id_saveas = self.text.bind("<<save-window-as-file>>",
self.save_as)
self.__id_savecopy = self.text.bind("<<save-copy-of-window-as-file>>",
self.save_a_copy)
self.fileencoding = None
self.__id_print = self.text.bind("<<print-window>>", self.print_window)
def close(self):
# Undo command bindings
self.text.unbind("<<open-window-from-file>>", self.__id_open)
self.text.unbind("<<save-window>>", self.__id_save)
self.text.unbind("<<save-window-as-file>>",self.__id_saveas)
self.text.unbind("<<save-copy-of-window-as-file>>", self.__id_savecopy)
self.text.unbind("<<print-window>>", self.__id_print)
# Break cycles
self.editwin = None
self.text = None
self.filename_change_hook = None
def get_saved(self):
return self.editwin.get_saved()
def set_saved(self, flag):
self.editwin.set_saved(flag)
def reset_undo(self):
self.editwin.reset_undo()
filename_change_hook = None
def set_filename_change_hook(self, hook):
self.filename_change_hook = hook
filename = None
dirname = None
def set_filename(self, filename):
if filename and os.path.isdir(filename):
self.filename = None
self.dirname = filename
else:
self.filename = filename
self.dirname = None
self.set_saved(1)
if self.filename_change_hook:
self.filename_change_hook()
def open(self, event=None, editFile=None):
if self.editwin.flist:
if not editFile:
filename = self.askopenfile()
else:
filename=editFile
if filename:
# If the current window has no filename and hasn't been
# modified, we replace its contents (no loss). Otherwise
# we open a new window. But we won't replace the
# shell window (which has an interp(reter) attribute), which
# gets set to "not modified" at every new prompt.
try:
interp = self.editwin.interp
except AttributeError:
interp = None
if not self.filename and self.get_saved() and not interp:
self.editwin.flist.open(filename, self.loadfile)
else:
self.editwin.flist.open(filename)
else:
self.text.focus_set()
return "break"
#
# Code for use outside IDLE:
if self.get_saved():
reply = self.maybesave()
if reply == "cancel":
self.text.focus_set()
return "break"
if not editFile:
filename = self.askopenfile()
else:
filename=editFile
if filename:
self.loadfile(filename)
else:
self.text.focus_set()
return "break"
eol = r"(\r\n)|\n|\r" # \r\n (Windows), \n (UNIX), or \r (Mac)
eol_re = re.compile(eol)
eol_convention = os.linesep # default
def loadfile(self, filename):
try:
# open the file in binary mode so that we can handle
# end-of-line convention ourselves.
f = open(filename,'rb')
two_lines = f.readline() + f.readline()
f.seek(0)
bytes = f.read()
f.close()
except IOError as msg:
tkMessageBox.showerror("I/O Error", str(msg), master=self.text)
return False
chars = self._decode(two_lines, bytes)
if chars is None:
tkMessageBox.showerror("Decoding Error",
"File %s\nFailed to Decode" % filename,
parent=self.text)
return False
# We now convert all end-of-lines to '\n's
firsteol = self.eol_re.search(chars)
if firsteol:
self.eol_convention = firsteol.group(0)
chars = self.eol_re.sub(r"\n", chars)
self.text.delete("1.0", "end")
self.set_filename(None)
self.text.insert("1.0", chars)
self.reset_undo()
self.set_filename(filename)
self.text.mark_set("insert", "1.0")
self.text.see("insert")
self.updaterecentfileslist(filename)
return True
def _decode(self, two_lines, bytes):
"Create a Unicode string."
chars = None
# Check presence of a UTF-8 signature first
if bytes.startswith(BOM_UTF8):
try:
chars = bytes[3:].decode("utf-8")
except UnicodeDecodeError:
# has UTF-8 signature, but fails to decode...
return None
else:
# Indicates that this file originally had a BOM
self.fileencoding = 'BOM'
return chars
# Next look for coding specification
try:
enc = coding_spec(two_lines)
except LookupError as name:
tkMessageBox.showerror(
title="Error loading the file",
message="The encoding '%s' is not known to this Python "\
"installation. The file may not display correctly" % name,
master = self.text)
enc = None
except UnicodeDecodeError:
return None
if enc:
try:
chars = str(bytes, enc)
self.fileencoding = enc
return chars
except UnicodeDecodeError:
pass
# Try ascii:
try:
chars = str(bytes, 'ascii')
self.fileencoding = None
return chars
except UnicodeDecodeError:
pass
# Try utf-8:
try:
chars = str(bytes, 'utf-8')
self.fileencoding = 'utf-8'
return chars
except UnicodeDecodeError:
pass
# Finally, try the locale's encoding. This is deprecated;
# the user should declare a non-ASCII encoding
try:
chars = str(bytes, locale_encoding)
self.fileencoding = locale_encoding
except UnicodeDecodeError:
pass
return chars # None on failure
def maybesave(self):
if self.get_saved():
return "yes"
message = "Do you want to save %s before closing?" % (
self.filename or "this untitled document")
m = tkMessageBox.Message(
title="Save On Close",
message=message,
icon=tkMessageBox.QUESTION,
type=tkMessageBox.YESNOCANCEL,
master=self.text)
reply = m.show()
if reply == "yes":
self.save(None)
if not self.get_saved():
reply = "cancel"
self.text.focus_set()
return reply
def save(self, event):
if not self.filename:
self.save_as(event)
else:
if self.writefile(self.filename):
self.set_saved(1)
try:
self.editwin.store_file_breaks()
except AttributeError: # may be a PyShell
pass
self.text.focus_set()
return "break"
def save_as(self, event):
filename = self.asksavefile()
if filename:
if self.writefile(filename):
self.set_filename(filename)
self.set_saved(1)
try:
self.editwin.store_file_breaks()
except AttributeError:
pass
self.text.focus_set()
self.updaterecentfileslist(filename)
return "break"
def save_a_copy(self, event):
filename = self.asksavefile()
if filename:
self.writefile(filename)
self.text.focus_set()
self.updaterecentfileslist(filename)
return "break"
def writefile(self, filename):
self.fixlastline()
text = self.text.get("1.0", "end-1c")
if self.eol_convention != "\n":
text = text.replace("\n", self.eol_convention)
chars = self.encode(text)
try:
f = open(filename, "wb")
f.write(chars)
f.flush()
f.close()
return True
except IOError as msg:
tkMessageBox.showerror("I/O Error", str(msg),
master=self.text)
return False
def encode(self, chars):
if isinstance(chars, bytes):
# This is either plain ASCII, or Tk was returning mixed-encoding
# text to us. Don't try to guess further.
return chars
# See whether there is anything non-ASCII in it.
# If not, no need to figure out the encoding.
try:
return chars.encode('ascii')
except UnicodeError:
pass
# Check if there is an encoding declared
try:
# a string, let coding_spec slice it to the first two lines
enc = coding_spec(chars)
failed = None
except LookupError as msg:
failed = msg
enc = None
if enc:
try:
return chars.encode(enc)
except UnicodeError:
failed = "Invalid encoding '%s'" % enc
if failed:
tkMessageBox.showerror(
"I/O Error",
"%s.\nSaving as UTF-8" % failed,
master = self.text)
# If there was a UTF-8 signature, use that. This should not fail
if self.fileencoding == 'BOM' or failed:
return BOM_UTF8 + chars.encode("utf-8")
# Try the original file encoding next, if any
if self.fileencoding:
try:
return chars.encode(self.fileencoding)
except UnicodeError:
tkMessageBox.showerror(
"I/O Error",
"Cannot save this as '%s' anymore. Saving as UTF-8" \
% self.fileencoding,
master = self.text)
return BOM_UTF8 + chars.encode("utf-8")
# Nothing was declared, and we had not determined an encoding
# on loading. Recommend an encoding line.
config_encoding = idleConf.GetOption("main","EditorWindow",
"encoding")
if config_encoding == 'utf-8':
# User has requested that we save files as UTF-8
return BOM_UTF8 + chars.encode("utf-8")
ask_user = True
try:
chars = chars.encode(encoding)
enc = encoding
if config_encoding == 'locale':
ask_user = False
except UnicodeError:
chars = BOM_UTF8 + chars.encode("utf-8")
enc = "utf-8"
if not ask_user:
return chars
dialog = EncodingMessage(self.editwin.top, enc)
dialog.go()
if dialog.num == 1:
# User asked us to edit the file
encline = "# -*- coding: %s -*-\n" % enc
firstline = self.text.get("1.0", "2.0")
if firstline.startswith("#!"):
# Insert encoding after #! line
self.text.insert("2.0", encline)
else:
self.text.insert("1.0", encline)
return self.encode(self.text.get("1.0", "end-1c"))
return chars
def fixlastline(self):
c = self.text.get("end-2c")
if c != '\n':
self.text.insert("end-1c", "\n")
def print_window(self, event):
m = tkMessageBox.Message(
title="Print",
message="Print to Default Printer",
icon=tkMessageBox.QUESTION,
type=tkMessageBox.OKCANCEL,
default=tkMessageBox.OK,
master=self.text)
reply = m.show()
if reply != tkMessageBox.OK:
self.text.focus_set()
return "break"
tempfilename = None
saved = self.get_saved()
if saved:
filename = self.filename
# shell undo is reset after every prompt, looks saved, probably isn't
if not saved or filename is None:
(tfd, tempfilename) = tempfile.mkstemp(prefix='IDLE_tmp_')
filename = tempfilename
os.close(tfd)
if not self.writefile(tempfilename):
os.unlink(tempfilename)
return "break"
platform=os.name
printPlatform=1
if platform == 'posix': #posix platform
command = idleConf.GetOption('main','General',
'print-command-posix')
command = command + " 2>&1"
elif platform == 'nt': #win32 platform
command = idleConf.GetOption('main','General','print-command-win')
else: #no printing for this platform
printPlatform=0
if printPlatform: #we can try to print for this platform
command = command % filename
pipe = os.popen(command, "r")
# things can get ugly on NT if there is no printer available.
output = pipe.read().strip()
status = pipe.close()
if status:
output = "Printing failed (exit status 0x%x)\n" % \
status + output
if output:
output = "Printing command: %s\n" % repr(command) + output
tkMessageBox.showerror("Print status", output, master=self.text)
else: #no printing for this platform
message="Printing is not enabled for this platform: %s" % platform
tkMessageBox.showinfo("Print status", message, master=self.text)
if tempfilename:
os.unlink(tempfilename)
return "break"
opendialog = None
savedialog = None
filetypes = [
("Python and text files", "*.py *.pyw *.txt", "TEXT"),
("All text files", "*", "TEXT"),
("All files", "*"),
]
def askopenfile(self):
dir, base = self.defaultfilename("open")
if not self.opendialog:
self.opendialog = tkFileDialog.Open(master=self.text,
filetypes=self.filetypes)
filename = self.opendialog.show(initialdir=dir, initialfile=base)
return filename
def defaultfilename(self, mode="open"):
if self.filename:
return os.path.split(self.filename)
elif self.dirname:
return self.dirname, ""
else:
try:
pwd = os.getcwd()
except os.error:
pwd = ""
return pwd, ""
def asksavefile(self):
dir, base = self.defaultfilename("save")
if not self.savedialog:
self.savedialog = tkFileDialog.SaveAs(master=self.text,
filetypes=self.filetypes)
filename = self.savedialog.show(initialdir=dir, initialfile=base)
return filename
def updaterecentfileslist(self,filename):
"Update recent file list on all editor windows"
if self.editwin.flist:
self.editwin.update_recent_files_list(filename)
def test():
root = Tk()
class MyEditWin:
def __init__(self, text):
self.text = text
self.flist = None
self.text.bind("<Control-o>", self.open)
self.text.bind("<Control-s>", self.save)
self.text.bind("<Alt-s>", self.save_as)
self.text.bind("<Alt-z>", self.save_a_copy)
def get_saved(self): return 0
def set_saved(self, flag): pass
def reset_undo(self): pass
def open(self, event):
self.text.event_generate("<<open-window-from-file>>")
def save(self, event):
self.text.event_generate("<<save-window>>")
def save_as(self, event):
self.text.event_generate("<<save-window-as-file>>")
def save_a_copy(self, event):
self.text.event_generate("<<save-copy-of-window-as-file>>")
text = Text(root)
text.pack()
text.focus_set()
editwin = MyEditWin(text)
io = IOBinding(editwin)
root.mainloop()
if __name__ == "__main__":
test()
|
|
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
from nipype.interfaces.spm.base import SPMCommandInputSpec, SPMCommand, Info, scans_for_fnames, scans_for_fname
from nipype.interfaces.matlab import MatlabCommand
from nipype.interfaces.base import (TraitedSpec, BaseInterface,
BaseInterfaceInputSpec, isdefined,
OutputMultiPath, InputMultiPath)
from nipype.interfaces.base import File, traits
from nipype.utils.filemanip import split_filename, fname_presuffix, filename_to_list,list_to_filename
import os
import numpy as np
class Analyze2niiInputSpec(SPMCommandInputSpec):
analyze_file = File(exists=True, mandatory=True)
class Analyze2niiOutputSpec(SPMCommandInputSpec):
nifti_file = File(exists=True)
class Analyze2nii(SPMCommand):
input_spec = Analyze2niiInputSpec
output_spec = Analyze2niiOutputSpec
def _make_matlab_command(self, _):
script = "V = spm_vol('%s');\n"%self.inputs.analyze_file
_, name,_ = split_filename(self.inputs.analyze_file)
self.output_name = os.path.join(os.getcwd(), name + ".nii")
script += "[Y, XYZ] = spm_read_vols(V);\n"
script += "V.fname = '%s';\n"%self.output_name
script += "spm_write_vol(V, Y);\n"
return script
def _list_outputs(self):
outputs = self._outputs().get()
outputs['nifti_file'] = self.output_name
return outputs
class CalcCoregAffineInputSpec(SPMCommandInputSpec):
target = File( exists = True, mandatory = True,
desc = 'target for generating affine transform')
moving = File( exists = True, mandatory = True, copyfile=False,
desc = 'volume transform can be applied to register with target')
mat = File( desc = 'Filename used to store affine matrix')
invmat = File( desc = 'Filename used to store inverse affine matrix')
class CalcCoregAffineOutputSpec(TraitedSpec):
mat = File(exists = True, desc = 'Matlab file holding transform')
invmat = File( desc = 'Matlab file holding inverse transform')
class CalcCoregAffine(SPMCommand):
""" Uses SPM (spm_coreg) to calculate the transform mapping
moving to target. Saves Transform in mat (matlab binary file)
Also saves inverse transform
Examples
--------
>>> import nipype.interfaces.spm.utils as spmu
>>> coreg = spmu.CalcCoregAffine(matlab_cmd='matlab-spm8')
>>> coreg.inputs.target = 'structural.nii'
>>> coreg.inputs.moving = 'functional.nii'
>>> coreg.inputs.mat = 'func_to_struct.mat'
>>> coreg.run() # doctest: +SKIP
.. note::
* the output file mat is saves as a matlab binary file
* calculating the transforms does NOT change either input image
it does not **move** the moving image, only calculates the transform
that can be used to move it
"""
input_spec = CalcCoregAffineInputSpec
output_spec = CalcCoregAffineOutputSpec
def _make_inv_file(self):
""" makes filename to hold inverse transform if not specified"""
invmat = fname_presuffix(self.inputs.mat, prefix = 'inverse_')
return invmat
def _make_mat_file(self):
""" makes name for matfile if doesn exist"""
pth, mv, _ = split_filename(self.inputs.moving)
_, tgt, _ = split_filename(self.inputs.target)
mat = os.path.join(pth, '%s_to_%s.mat'%(mv,tgt))
return mat
def _make_matlab_command(self, _):
"""checks for SPM, generates script"""
if not isdefined(self.inputs.mat):
self.inputs.mat = self._make_mat_file()
if not isdefined(self.inputs.invmat):
self.inputs.invmat = self._make_inv_file()
script = """
target = '%s';
moving = '%s';
targetv = spm_vol(target);
movingv = spm_vol(moving);
x = spm_coreg(targetv, movingv);
M = spm_matrix(x);
save('%s' , 'M' );
M = inv(M);
save('%s','M')
"""%(self.inputs.target,
self.inputs.moving,
self.inputs.mat,
self.inputs.invmat)
return script
def _list_outputs(self):
outputs = self._outputs().get()
outputs['mat'] = os.path.abspath(self.inputs.mat)
outputs['invmat'] = os.path.abspath(self.inputs.invmat)
return outputs
class ApplyTransformInputSpec(SPMCommandInputSpec):
in_file = File( exists = True, mandatory = True, copyfile=True,
desc='file to apply transform to, (only updates header)')
mat = File( exists = True, mandatory = True,
desc='file holding transform to apply')
out_file = File(desc="output file name for transformed data",
genfile=True)
class ApplyTransformOutputSpec(TraitedSpec):
out_file = File(exists = True, desc = 'Transformed image file')
class ApplyTransform(SPMCommand):
""" Uses SPM to apply transform stored in a .mat file to given file
Examples
--------
>>> import nipype.interfaces.spm.utils as spmu
>>> applymat = spmu.ApplyTransform()
>>> applymat.inputs.in_file = 'functional.nii'
>>> applymat.inputs.mat = 'func_to_struct.mat'
>>> applymat.run() # doctest: +SKIP
"""
input_spec = ApplyTransformInputSpec
output_spec = ApplyTransformOutputSpec
def _make_matlab_command(self, _):
"""checks for SPM, generates script"""
outputs = self._list_outputs()
self.inputs.out_file = outputs['out_file']
script = """
infile = '%s';
outfile = '%s'
transform = load('%s');
V = spm_vol(infile);
X = spm_read_vols(V);
[p n e v] = spm_fileparts(V.fname);
V.mat = transform.M * V.mat;
V.fname = fullfile(outfile);
spm_write_vol(V,X);
"""%(self.inputs.in_file,
self.inputs.out_file,
self.inputs.mat)
#img_space = spm_get_space(infile);
#spm_get_space(infile, transform.M * img_space);
return script
def _list_outputs(self):
outputs = self.output_spec().get()
if not isdefined(self.inputs.out_file):
outputs['out_file'] = os.path.abspath(self._gen_outfilename())
else:
outputs['out_file'] = os.path.abspath(self.inputs.out_file)
return outputs
def _gen_outfilename(self):
_, name, _ = split_filename(self.inputs.in_file)
return name + '_trans.nii'
class ResliceInputSpec(SPMCommandInputSpec):
in_file = File( exists = True, mandatory=True,
desc='file to apply transform to, (only updates header)')
space_defining = File ( exists = True, mandatory = True,
desc = 'Volume defining space to slice in_file into')
interp = traits.Range(low = 0, high = 7, usedefault = True,
desc='degree of b-spline used for interpolation'\
'0 is nearest neighbor (default)')
out_file = File(desc = 'Optional file to save resliced volume')
class ResliceOutputSpec(TraitedSpec):
out_file = File( exists = True, desc = 'resliced volume')
class Reslice(SPMCommand):
""" uses spm_reslice to resample in_file into space of space_defining"""
input_spec = ResliceInputSpec
output_spec = ResliceOutputSpec
def _make_matlab_command(self, _):
""" generates script"""
if not isdefined(self.inputs.out_file):
self.inputs.out_file = fname_presuffix(self.inputs.in_file,
prefix = 'r')
script = """
flags.mean = 0;
flags.which = 1;
flags.mask = 0;
flags.interp = %d;
infiles = strvcat(\'%s\', \'%s\');
invols = spm_vol(infiles);
spm_reslice(invols, flags);
"""%(self.inputs.interp,
self.inputs.space_defining,
self.inputs.in_file)
return script
def _list_outputs(self):
outputs = self._outputs().get()
outputs['out_file'] = os.path.abspath(self.inputs.out_file)
return outputs
class ApplyInverseDeformationInput(SPMCommandInputSpec):
in_files = InputMultiPath(
File(exists=True), mandatory=True, field='fnames',
desc='Files on which deformation is applied')
target = File(
exists=True,
field='comp{1}.inv.space',
desc='File defining target space')
deformation = File(
exists=True,
field='comp{1}.inv.comp{1}.sn2def.matname',
desc='SN SPM deformation file',
xor=['deformation_field'])
deformation_field = File(
exists=True,
field='comp{1}.inv.comp{1}.def',
desc='SN SPM deformation file',
xor=['deformation'])
interpolation = traits.Range(
low=0, high=7, field='interp',
desc='degree of b-spline used for interpolation')
bounding_box = traits.List(
traits.Float(),
field='comp{1}.inv.comp{1}.sn2def.bb',
minlen=6, maxlen=6,
desc='6-element list (opt)')
voxel_sizes = traits.List(
traits.Float(),
field='comp{1}.inv.comp{1}.sn2def.vox',
minlen=3, maxlen=3,
desc='3-element list (opt)')
class ApplyInverseDeformationOutput(TraitedSpec):
out_files = OutputMultiPath(File(exists=True),
desc='Transformed files')
class ApplyInverseDeformation(SPMCommand):
""" Uses spm to apply inverse deformation stored in a .mat file or a
deformation field to a given file
Examples
--------
>>> import nipype.interfaces.spm.utils as spmu
>>> inv = spmu.ApplyInverseDeformation()
>>> inv.inputs.in_files = 'functional.nii'
>>> inv.inputs.deformation = 'struct_to_func.mat'
>>> inv.inputs.target = 'structural.nii'
>>> inv.run() # doctest: +SKIP
"""
input_spec = ApplyInverseDeformationInput
output_spec = ApplyInverseDeformationOutput
_jobtype = 'util'
_jobname = 'defs'
def _format_arg(self, opt, spec, val):
"""Convert input to appropriate format for spm
"""
if opt == 'in_files':
return scans_for_fnames(filename_to_list(val))
if opt == 'target':
return scans_for_fname(filename_to_list(val))
if opt == 'deformation':
return np.array([list_to_filename(val)], dtype=object)
if opt == 'deformation_field':
return np.array([list_to_filename(val)], dtype=object)
return val
def _list_outputs(self):
outputs = self._outputs().get()
outputs['out_files'] = []
for filename in self.inputs.in_files:
_, fname = os.path.split(filename)
outputs['out_files'].append(os.path.realpath('w%s' % fname))
return outputs
class ResliceToReferenceInput(SPMCommandInputSpec):
in_files = InputMultiPath(
File(exists=True), mandatory=True, field='fnames',
desc='Files on which deformation is applied')
target = File(
exists=True,
field='comp{1}.id.space',
desc='File defining target space')
interpolation = traits.Range(
low=0, high=7, field='interp',
desc='degree of b-spline used for interpolation')
bounding_box = traits.List(
traits.Float(),
field='comp{2}.idbbvox.bb',
minlen=6, maxlen=6,
desc='6-element list (opt)')
voxel_sizes = traits.List(
traits.Float(),
field='comp{2}.idbbvox.vox',
minlen=3, maxlen=3,
desc='3-element list (opt)')
class ResliceToReferenceOutput(TraitedSpec):
out_files = OutputMultiPath(File(exists=True),
desc='Transformed files')
class ResliceToReference(SPMCommand):
""" Uses spm to reslice a volume to a target image space or to a provided voxel size and bounding box
Examples
--------
>>> import nipype.interfaces.spm.utils as spmu
>>> r2ref = spmu.ResliceToReference()
>>> r2ref.inputs.in_files = 'functional.nii'
>>> r2ref.inputs.target = 'structural.nii'
>>> r2ref.run() # doctest: +SKIP
"""
input_spec = ResliceToReferenceInput
output_spec = ResliceToReferenceOutput
_jobtype = 'util'
_jobname = 'defs'
def _format_arg(self, opt, spec, val):
"""Convert input to appropriate format for spm
"""
if opt == 'in_files':
return scans_for_fnames(filename_to_list(val))
if opt == 'target':
return scans_for_fname(filename_to_list(val))
if opt == 'deformation':
return np.array([list_to_filename(val)], dtype=object)
if opt == 'deformation_field':
return np.array([list_to_filename(val)], dtype=object)
return val
def _list_outputs(self):
outputs = self._outputs().get()
outputs['out_files'] = []
for filename in self.inputs.in_files:
_, fname = os.path.split(filename)
outputs['out_files'].append(os.path.realpath('w%s' % fname))
return outputs
class DicomImportInputSpec(SPMCommandInputSpec):
in_files = InputMultiPath(
File(exists=True),
mandatory=True,
field='data',
desc='dicom files to be converted')
output_dir_struct = traits.Enum(
'flat', 'series', 'patname', 'patid_date', 'patid', 'date_time',
field='root',
usedefault=True,
desc='directory structure for the output.')
output_dir = traits.Str('./converted_dicom',
field='outdir',
usedefault=True,
desc='output directory.')
format = traits.Enum(
'nii', 'img',
field='convopts.format',
usedefault=True,
desc='output format.')
icedims = traits.Bool(False,
field='convopts.icedims',
usedefault=True,
desc='If image sorting fails, one can try using the additional\
SIEMENS ICEDims information to create unique filenames.\
Use this only if there would be multiple volumes with\
exactly the same file names.')
class DicomImportOutputSpec(TraitedSpec):
out_files = OutputMultiPath(File(exists=True),
desc='converted files')
class DicomImport(SPMCommand):
""" Uses spm to convert DICOM files to nii or img+hdr.
Examples
--------
>>> import nipype.interfaces.spm.utils as spmu
>>> di = spmu.DicomImport()
>>> di.inputs.in_files = ['functional_1.dcm', 'functional_2.dcm']
>>> di.run() # doctest: +SKIP
"""
input_spec = DicomImportInputSpec
output_spec = DicomImportOutputSpec
_jobtype = 'util'
_jobname = 'dicom'
def _format_arg(self, opt, spec, val):
"""Convert input to appropriate format for spm
"""
if opt == 'in_files':
return np.array(val, dtype=object)
if opt == 'output_dir':
return np.array([val], dtype=object)
if opt == 'output_dir':
return os.path.abspath(val)
if opt == 'icedims':
if val:
return 1
return 0
return super(DicomImport, self)._format_arg(opt, spec, val)
def _run_interface(self, runtime):
od = os.path.abspath(self.inputs.output_dir)
if not os.path.isdir(od):
os.mkdir(od)
return super(DicomImport, self)._run_interface(runtime)
def _list_outputs(self):
from glob import glob
outputs = self._outputs().get()
od = os.path.abspath(self.inputs.output_dir)
outputs['out_files'] = glob(os.path.join(od, '*'))
return outputs
|
|
from __future__ import division
import base64
import hmac
import re
import requests
import os.path
from binascii import unhexlify
from hashlib import sha256
from io import BytesIO, IOBase
from math import ceil
from threading import Thread, Timer
from time import time
from .stream import Stream
from .wrappers import StreamIOIterWrapper
from ..buffers import RingBuffer
from ..cache import Cache
from ..compat import urljoin, urlparse, bytes, queue, range, is_py33
from ..compat import parse_qsl
from ..exceptions import StreamError
from ..utils import absolute_url, urlget, res_xml
from ..utils import swfdecompress
from ..packages.flashmedia import F4V, F4VError, FLVError
from ..packages.flashmedia.box import Box
from ..packages.flashmedia.tag import (AudioData, AACAudioData, VideoData,
AVCVideoData, VideoCommandFrame,
ScriptData, Header, Tag,
TAG_TYPE_SCRIPT, TAG_TYPE_AUDIO,
TAG_TYPE_VIDEO)
# Akamai HD player verification key
# Use unhexlify() rather than bytes.fromhex() for compatibility with before
# Python 3. However, in Python 3.2 (not 3.3+), unhexlify only accepts a byte
# string.
AKAMAIHD_PV_KEY = unhexlify(
b"BD938D5EE6D9F42016F9C56577B6FDCF415FE4B184932B785AB32BCADC9BB592")
AAC_SEQUENCE_HEADER = 0x00
AVC_SEQUENCE_HEADER = 0x00
AVC_SEQUENCE_END = 0x02
# Some streams hosted by Akamai seem to require a hdcore parameter
# to function properly.
HDCORE_VERSION = "3.1.0"
class HDSStreamFiller(Thread):
def __init__(self, stream):
Thread.__init__(self)
self.daemon = True
self.error = None
self.running = False
self.stream = stream
self.queue = queue.Queue(maxsize=5)
self.avc_header_written = False
self.aac_header_written = False
self.timestamps = {
TAG_TYPE_AUDIO: None,
TAG_TYPE_VIDEO: None,
TAG_TYPE_SCRIPT: None
}
self.create_tag_buffer(8182 * 8)
def create_tag_buffer(self, size):
if is_py33:
self.tag_buffer = memoryview(bytearray(size))
else:
self.tag_buffer = bytearray(size)
def download_fragment(self, segment, fragment):
url = self.stream.fragment_url(segment, fragment)
self.stream.logger.debug("[Fragment {0}-{1}] Opening URL: {2}",
segment, fragment, url)
retries = 3
res = None
while retries > 0 and self.running:
try:
res = urlget(url, stream=True, exception=IOError,
session=self.stream.rsession, timeout=10)
break
except IOError as err:
self.stream.logger.error("[Fragment {0}-{1}] Failed to open: {2}",
segment, fragment, str(err))
retries -= 1
if not res:
return
size = int(res.headers.get("content-length", "0"))
size = size * self.stream.buffer_fragments
if size > self.stream.buffer.buffer_size:
self.stream.buffer.resize(size)
fd = StreamIOIterWrapper(res.iter_content(8192))
return self.convert_fragment(segment, fragment, fd)
def convert_fragment(self, segment, fragment, fd):
mdat = None
try:
f4v = F4V(fd, raw_payload=True)
# Fast forward to mdat box
for box in f4v:
if box.type == "mdat":
mdat = box.payload.data
break
except F4VError as err:
self.stream.logger.error("[Fragment {0}-{1}] Failed to deserialize: {2}",
segment, fragment, str(err))
return
if not mdat:
self.stream.logger.error("[Fragment {0}-{1}] No mdat box found",
segment, fragment)
return
self.stream.logger.debug(("[Fragment {0}-{1}] Extracting FLV tags from"
" MDAT box"), segment, fragment)
mdat_size = len(mdat)
if mdat_size > len(self.tag_buffer):
self.create_tag_buffer(mdat_size)
self.mdat_offset = 0
self.tag_offset = 0
while self.running and self.mdat_offset < mdat_size:
try:
self.extract_flv_tag(mdat)
except (FLVError, IOError) as err:
self.stream.logger.error(("Failed to extract FLV tag from MDAT"
" box: {0}").format(str(err)))
break
self.stream.buffer.write(self.tag_buffer[:self.tag_offset])
return True
def extract_flv_tag(self, mdat):
tag, self.mdat_offset = Tag.deserialize_from(mdat, self.mdat_offset)
if tag.filter:
self.stop()
self.error = IOError("Tag has filter flag set, probably encrypted")
raise self.error
if isinstance(tag.data, AudioData):
if isinstance(tag.data.data, AACAudioData):
if tag.data.data.type == AAC_SEQUENCE_HEADER:
if self.aac_header_written:
return
self.aac_header_written = True
else:
if not self.aac_header_written:
self.stream.logger.debug("Skipping AAC data before header")
return
if isinstance(tag.data, VideoData):
if isinstance(tag.data.data, AVCVideoData):
if tag.data.data.type == AVC_SEQUENCE_HEADER:
if self.avc_header_written:
return
self.avc_header_written = True
else:
if not self.avc_header_written:
self.stream.logger.debug("Skipping AVC data before header")
return
elif isinstance(tag.data.data, VideoCommandFrame):
self.stream.logger.debug("Skipping video command frame")
return
if tag.type in self.timestamps:
if self.timestamps[tag.type] is None:
self.timestamps[tag.type] = tag.timestamp
else:
tag.timestamp = max(0, tag.timestamp - self.timestamps[tag.type])
self.tag_offset = tag.serialize_into(self.tag_buffer, self.tag_offset)
def run(self):
self.stream.logger.debug("Starting buffer filler thread")
while self.running:
try:
segment, fragment, fragment_duration = self.queue.get(True, 5)
except queue.Empty:
continue
# Make sure timestamps don't get out of sync when
# a fragment is missing or failed to download.
if not self.download_fragment(segment, fragment):
for key, value in self.timestamps.items():
if value is not None:
self.timestamps[key] += fragment_duration
else:
self.timestamps[key] = fragment_duration
if fragment == self.stream.end_fragment:
break
self.stop()
self.stream.logger.debug("Buffer filler thread completed")
def start(self):
self.running = True
return Thread.start(self)
def stop(self):
self.running = False
self.stream.buffer.close()
if self.stream.bootstrap_timer:
self.stream.bootstrap_timer.cancel()
class HDSStreamIO(IOBase):
FragmentURL = "{url}{identifier}{quality}Seg{segment}-Frag{fragment}"
def __init__(self, session, baseurl, url, bootstrap, metadata=None,
timeout=60, rsession=None):
self.buffer = None
self.buffer_time = session.options.get("hds-live-edge")
self.buffer_fragments = int(session.options.get("hds-fragment-buffer"))
self.baseurl = baseurl
self.bootstrap = bootstrap
self.logger = session.logger.new_module("stream.hds")
self.metadata = metadata
self.session = session
self.timeout = timeout
self.url = url
if rsession:
self.rsession = rsession
else:
self.rsession = requests.session()
def open(self):
self.current_segment = -1
self.current_fragment = -1
self.first_fragment = 1
self.last_fragment = -1
self.end_fragment = None
self.bootstrap_timer = None
self.bootstrap_minimal_reload_time = 2.0
self.bootstrap_reload_time = self.bootstrap_minimal_reload_time
self.bootstrap_reload_timestamp = 0
self.invalid_fragments = set()
self.buffer = RingBuffer()
self.header_written = False
self.filler = HDSStreamFiller(self)
self.filler.start()
try:
self.update_bootstrap(silent=False, fillqueue=True)
except StreamError:
self.close()
raise
return self
def close(self):
self.filler.stop()
if self.filler.is_alive():
self.filler.join()
def read(self, size=-1):
if not self.buffer:
return b""
if self.filler.error:
raise self.filler.error
return self.buffer.read(size, block=self.filler.is_alive(),
timeout=self.timeout)
def fragment_url(self, segment, fragment):
url = absolute_url(self.baseurl, self.url)
return self.FragmentURL.format(url=url, identifier="",
quality="", segment=segment,
fragment=fragment)
def update_bootstrap(self, silent=True, fillqueue=False):
if not self.filler.running:
return
if self.end_fragment and self.current_fragment > self.end_fragment:
return
# Wait until buffer has room before requesting a new bootstrap
self.buffer.wait_free()
elapsed = time() - self.bootstrap_reload_timestamp
if elapsed > self.bootstrap_reload_time:
try:
self._update_bootstrap()
except IOError as err:
self.bootstrap_reload_time = self.bootstrap_minimal_reload_time
if silent:
self.logger.error("Failed to update bootstrap: {0}",
str(err))
else:
raise StreamError(str(err))
if not self.header_written:
flvheader = Header(has_video=True, has_audio=True)
self.buffer.write(flvheader.serialize())
if self.metadata:
# Remove duration from metadata when it's a livestream
# since it will just confuse players anyway.
if self.live and "duration" in self.metadata.value:
del self.metadata.value["duration"]
tag = Tag(TAG_TYPE_SCRIPT, timestamp=0, data=self.metadata)
self.buffer.write(tag.serialize())
self.header_written = True
if self.bootstrap_changed:
self._queue_fragments(fillqueue)
if self.bootstrap_timer:
self.bootstrap_timer.cancel()
self.bootstrap_timer = Timer(1, self.update_bootstrap)
self.bootstrap_timer.daemon = True
self.bootstrap_timer.start()
def _update_bootstrap(self):
self.logger.debug("Updating bootstrap")
if isinstance(self.bootstrap, Box):
bootstrap = self.bootstrap
else:
bootstrap = self._fetch_bootstrap(self.bootstrap)
self.live = bootstrap.payload.live
self.profile = bootstrap.payload.profile
self.timestamp = bootstrap.payload.current_media_time
self.identifier = bootstrap.payload.movie_identifier
self.time_scale = bootstrap.payload.time_scale
self.segmentruntable = bootstrap.payload.segment_run_table_entries[0]
self.fragmentruntable = bootstrap.payload.fragment_run_table_entries[0]
self.first_fragment, last_fragment = self._fragment_count()
fragment_duration = self._fragment_duration(last_fragment)
if last_fragment != self.last_fragment:
self.bootstrap_changed = True
self.last_fragment = last_fragment
else:
self.bootstrap_changed = False
if self.current_fragment < 0:
if self.live:
current_fragment = last_fragment
# Less likely to hit edge if we don't start with last fragment,
# default buffer is 10 sec.
fragment_buffer = int(ceil(self.buffer_time / fragment_duration))
current_fragment = max(self.first_fragment, current_fragment - (fragment_buffer - 1))
self.logger.debug("Live edge buffer {0} sec is {1} fragments",
self.buffer_time, fragment_buffer)
else:
current_fragment = self.first_fragment
self.current_fragment = current_fragment
self.logger.debug("Current timestamp: {0}", self.timestamp / self.time_scale)
self.logger.debug("Current segment: {0}", self.current_segment)
self.logger.debug("Current fragment: {0}", self.current_fragment)
self.logger.debug("First fragment: {0}", self.first_fragment)
self.logger.debug("Last fragment: {0}", self.last_fragment)
self.logger.debug("End fragment: {0}", self.end_fragment)
self.bootstrap_reload_timestamp = time()
self.bootstrap_reload_time = fragment_duration
if self.live and not self.bootstrap_changed:
self.logger.debug("Bootstrap not changed, shortening timer")
self.bootstrap_reload_time /= 2
if self.bootstrap_reload_time < self.bootstrap_minimal_reload_time:
self.bootstrap_reload_time = self.bootstrap_minimal_reload_time
def _queue_fragments(self, fillqueue=False):
for i, fragment in enumerate(range(self.current_fragment, self.last_fragment + 1)):
if not self.filler.running or (fillqueue and i == self.filler.queue.maxsize):
break
if fragment in self.invalid_fragments:
continue
self.current_fragment = fragment + 1
self.current_segment = self._segment_from_fragment(fragment)
fragment_duration = int(self._fragment_duration(fragment) * 1000)
entry = (self.current_segment, fragment, fragment_duration)
self.logger.debug("[Fragment {0}-{1}] Adding to queue",
entry[0], entry[1])
while self.filler.running:
try:
self.filler.queue.put(entry, True, 5)
break
except queue.Full:
continue
self.bootstrap_changed = self.current_fragment != self.last_fragment
def _fetch_bootstrap(self, url):
res = urlget(url, session=self.rsession, exception=IOError)
return Box.deserialize(BytesIO(res.content))
def _segment_from_fragment(self, fragment):
table = self.segmentruntable.payload.segment_run_entry_table
for segment, start, end in self._iterate_segments(table):
if fragment >= (start + 1) and fragment <= (end + 1):
break
else:
segment = 1
return segment
def _iterate_segments(self, table):
# If the first segment in the table starts at the beginning we can go from there,
# otherwise we start from the end and use the total fragment count to figure
# out where the last segment ends.
if table[0].first_segment == 1:
prev_frag = self.first_fragment - 1
for segmentrun in table:
start = prev_frag + 1
end = prev_frag + segmentrun.fragments_per_segment
yield segmentrun.first_segment, start, end
prev_frag = end
else:
prev_frag = self.last_fragment + 1
for segmentrun in reversed(table):
start = prev_frag - segmentrun.fragments_per_segment
end = prev_frag - 1
yield segmentrun.first_segment, start, end
prev_frag = start
def _debug_fragment_table(self):
fragmentruntable = self.fragmentruntable.payload.fragment_run_entry_table
for i, fragmentrun in enumerate(fragmentruntable):
print(fragmentrun.first_fragment, fragmentrun.first_fragment_timestamp,
fragmentrun.fragment_duration, fragmentrun.discontinuity_indicator)
def _fragment_count(self):
table = self.fragmentruntable.payload.fragment_run_entry_table
first_fragment, end_fragment = None, None
for i, fragmentrun in enumerate(table):
if fragmentrun.discontinuity_indicator is not None:
if fragmentrun.discontinuity_indicator == 0:
break
elif fragmentrun.discontinuity_indicator > 0:
continue
if first_fragment is None:
first_fragment = fragmentrun.first_fragment
end_fragment = fragmentrun.first_fragment
fragment_duration = fragmentrun.first_fragment_timestamp + fragmentrun.fragment_duration
if self.timestamp > fragment_duration:
offset = (self.timestamp - fragment_duration) / fragmentrun.fragment_duration
end_fragment += int(offset)
if first_fragment is None:
first_fragment = 1
if end_fragment is None:
end_fragment = 1
return first_fragment, end_fragment
def _fragment_duration(self, fragment):
fragment_duration = 0
table = self.fragmentruntable.payload.fragment_run_entry_table
time_scale = self.fragmentruntable.payload.time_scale
for i, fragmentrun in enumerate(table):
if fragmentrun.discontinuity_indicator is not None:
self.invalid_fragments.add(fragmentrun.first_fragment)
# Check for the last fragment of the stream
if fragmentrun.discontinuity_indicator == 0:
if i > 0:
prev = table[i-1]
self.end_fragment = prev.first_fragment
break
elif fragmentrun.discontinuity_indicator > 0:
continue
if fragment >= fragmentrun.first_fragment:
fragment_duration = fragmentrun.fragment_duration / time_scale
return fragment_duration
class HDSStream(Stream):
"""
Implements the Adobe HTTP Dynamic Streaming protocol
*Attributes:*
- :attr:`baseurl` Base URL
- :attr:`url` Base path of the stream, joined with the base URL when fetching fragments
- :attr:`bootstrap` Either a URL pointing to the bootstrap or a bootstrap :class:`Box` object
used for initial information about the stream
- :attr:`metadata` Either `None` or a :class:`ScriptData` object that contains metadata about
the stream, such as height, width and bitrate
"""
__shortname__ = "hds"
def __init__(self, session, baseurl, url, bootstrap, metadata=None,
timeout=60, rsession=None):
Stream.__init__(self, session)
self.baseurl = baseurl
self.url = url
self.bootstrap = bootstrap
self.metadata = metadata
self.timeout = timeout
self.rsession = rsession
def __repr__(self):
return ("<HDSStream({0!r}, {1!r}, {2!r},"
" metadata={3!r}, timeout={4!r})>").format(self.baseurl,
self.url,
self.bootstrap,
self.metadata,
self.timeout)
def __json__(self):
if isinstance(self.bootstrap, Box):
bootstrap = base64.b64encode(self.bootstrap.serialize())
else:
bootstrap = self.bootstrap
if isinstance(self.metadata, ScriptData):
metadata = self.metadata.__dict__
else:
metadata = self.metadata
return dict(type=HDSStream.shortname(), baseurl=self.baseurl,
url=self.url, bootstrap=bootstrap, metadata=metadata)
def open(self):
fd = HDSStreamIO(self.session, self.baseurl, self.url, self.bootstrap,
self.metadata, self.timeout, self.rsession)
return fd.open()
@classmethod
def parse_manifest(cls, session, url, timeout=60, rsession=None,
pvswf=None):
"""Parses a HDS manifest and returns its substreams.
:param url: The URL to the manifest.
:param timeout: How long to wait for data to be returned from
from the stream before raising an error.
:param rsession: requests session used for the streams.
:param pvswf: URL of player SWF for Akamai HD player verification.
"""
if not rsession:
rsession = requests.session()
if "akamaihd" in url:
rsession.params["hdcore"] = HDCORE_VERSION
res = urlget(url, exception=IOError, session=rsession)
manifest = res_xml(res, "manifest XML", ignore_ns=True,
exception=IOError)
parsed = urlparse(url)
baseurl = manifest.findtext("baseURL")
baseheight = manifest.findtext("height")
bootstraps = {}
streams = {}
if not baseurl:
baseurl = urljoin(url, os.path.dirname(parsed.path)) + "/"
for bootstrap in manifest.findall("bootstrapInfo"):
name = bootstrap.attrib.get("id") or "_global"
url = bootstrap.attrib.get("url")
if url:
box = absolute_url(baseurl, url)
else:
data = base64.b64decode(bytes(bootstrap.text, "utf8"))
box = Box.deserialize(BytesIO(data))
bootstraps[name] = box
pvtoken = manifest.findtext("pv-2.0")
if pvtoken:
if not pvswf:
raise IOError("This manifest requires the 'pvswf' parameter "
"to verify the SWF")
params = cls._pv_params(pvswf, pvtoken)
rsession.params.update(params)
for media in manifest.findall("media"):
url = media.attrib.get("url")
bootstrapid = media.attrib.get("bootstrapInfoId", "_global")
href = media.attrib.get("href")
if url and bootstrapid:
bootstrap = bootstraps.get(bootstrapid)
if not bootstrap:
continue
bitrate = media.attrib.get("bitrate")
streamid = media.attrib.get("streamId")
height = media.attrib.get("height")
if height:
quality = height + "p"
elif bitrate:
quality = bitrate + "k"
elif streamid:
quality = streamid
elif baseheight:
quality = baseheight + "p"
else:
quality = "live"
metadata = media.findtext("metadata")
if metadata:
metadata = base64.b64decode(bytes(metadata, "utf8"))
metadata = ScriptData.deserialize(BytesIO(metadata))
else:
metadata = None
stream = HDSStream(session, baseurl, url, bootstrap,
metadata=metadata, timeout=timeout,
rsession=rsession)
streams[quality] = stream
elif href:
url = absolute_url(baseurl, href)
child_streams = cls.parse_manifest(session, url,
timeout=timeout,
rsession=rsession)
for name, stream in child_streams.items():
# Override stream name if bitrate is available in parent
# manifest but not the child one.
bitrate = media.attrib.get("bitrate")
if bitrate and not re.match("^(\d+)k$", name):
name = bitrate + "k"
streams[name] = stream
return streams
@classmethod
def _pv_params(cls, pvswf, pv):
"""Returns any parameters needed for Akamai HD player verification.
Algorithm originally documented by KSV, source:
http://stream-recorder.com/forum/showpost.php?p=43761&postcount=13
"""
(data, hdntl) = pv.split(";")
cache = Cache(filename="stream.json")
key = "akamaihd-player:" + pvswf
cached = cache.get(key)
headers = dict()
if cached:
headers["If-Modified-Since"] = cached["modified"]
swf = urlget(pvswf, headers=headers)
if cached and swf.status_code == 304: # Server says not modified
hash = cached["hash"]
else:
# Calculate SHA-256 hash of the uncompressed SWF file, base-64
# encoded
hash = sha256()
hash.update(swfdecompress(swf.content))
hash = base64.b64encode(hash.digest()).decode("ascii")
modified = swf.headers.get("Last-Modified", "")
# Only save in cache if a valid date is given
if len(modified) < 40:
cache.set(key, dict(hash=hash, modified=modified))
msg = "st=0~exp=9999999999~acl=*~data={0}!{1}".format(data, hash)
auth = hmac.new(AKAMAIHD_PV_KEY, msg.encode("ascii"), sha256)
pvtoken = "{0}~hmac={1}".format(msg, auth.hexdigest())
# The "hdntl" parameter can be accepted as a cookie or passed in the
# query string, but the "pvtoken" parameter can only be in the query
# string
params = [("pvtoken", pvtoken)]
params.extend(parse_qsl(hdntl, keep_blank_values=True))
return params
|
|
from tcunittest import TeamcityTestRunner, TeamcityTestResult
from tcmessages import TeamcityServiceMessages
import sys
from pycharm_run_utils import adjust_django_sys_path
from django.test.utils import get_runner
adjust_django_sys_path()
from django.conf import settings
def _is_nosetest(settings):
"""
Checks if Django configured to work with nosetest
:param settings: django settings
:return: True if django should works with NoseTest runner of its inheritor
"""
try:
runner = get_runner(settings)
from django_nose import NoseTestSuiteRunner
if issubclass(runner, NoseTestSuiteRunner):
return True
except (AttributeError, ImportError):
pass
return False
from django.test.testcases import TestCase
from django import VERSION
if _is_nosetest(settings):
from nose_utils import TeamcityNoseRunner
# See: https://docs.djangoproject.com/en/1.8/releases/1.7/#django-utils-unittest
# django.utils.unittest provided uniform access to the unittest2 library on all Python versions.
# Since unittest2 became the standard library's unittest module in Python 2.7,
# and Django 1.7 drops support for older Python versions, this module isn't useful anymore.
# It has been deprecated. Use unittest instead.
if VERSION[1] >= 7:
import unittest
else:
from django.utils import unittest
def get_test_suite_runner():
if hasattr(settings, "TEST_RUNNER"):
from django.test.utils import get_runner
class TempSettings:
TEST_RUNNER = settings.TEST_RUNNER
return get_runner(TempSettings)
try:
if VERSION[1] >= 6:
from django.test.runner import DiscoverRunner as DjangoSuiteRunner
else:
from django.test.simple import DjangoTestSuiteRunner as DjangoSuiteRunner
from inspect import isfunction
SUITE_RUNNER = get_test_suite_runner()
if isfunction(SUITE_RUNNER):
import sys
sys.stderr.write(
"WARNING: TEST_RUNNER variable is ignored. PyCharm test runner supports "
"only class-like TEST_RUNNER valiables. Use Tools->run manage.py tasks.\n")
SUITE_RUNNER = None
BaseSuiteRunner = SUITE_RUNNER or DjangoSuiteRunner
class BaseRunner(TeamcityTestRunner, BaseSuiteRunner):
def __init__(self, stream=sys.stdout, **options):
TeamcityTestRunner.__init__(self, stream)
BaseSuiteRunner.__init__(self, **options)
except ImportError:
# for Django <= 1.1 compatibility
class BaseRunner(TeamcityTestRunner):
def __init__(self, stream=sys.stdout, **options):
TeamcityTestRunner.__init__(self, stream)
def strclass(cls):
if not cls.__name__:
return cls.__module__
return "%s.%s" % (cls.__module__, cls.__name__)
class DjangoTeamcityTestResult(TeamcityTestResult):
def __init__(self, *args, **kwargs):
super(DjangoTeamcityTestResult, self).__init__(**kwargs)
def _getSuite(self, test):
if hasattr(test, "suite"):
suite = strclass(test.suite)
suite_location = test.suite.location
location = test.suite.abs_location
if hasattr(test, "lineno"):
location = location + ":" + str(test.lineno)
else:
location = location + ":" + str(test.test.lineno)
else:
suite = strclass(test.__class__)
suite_location = "django_testid://" + suite
location = "django_testid://" + str(test.id())
return (suite, location, suite_location)
class DjangoTeamcityTestRunner(BaseRunner):
def __init__(self, stream=sys.stdout, **options):
super(DjangoTeamcityTestRunner, self).__init__(stream, **options)
self.options = options
def _makeResult(self, **kwargs):
return DjangoTeamcityTestResult(self.stream, **kwargs)
def build_suite(self, *args, **kwargs):
EXCLUDED_APPS = getattr(settings, 'TEST_EXCLUDE', [])
suite = super(DjangoTeamcityTestRunner, self).build_suite(*args, **kwargs)
if not args[0] and not getattr(settings, 'RUN_ALL_TESTS', False):
tests = []
for case in suite:
pkg = case.__class__.__module__.split('.')[0]
if pkg not in EXCLUDED_APPS:
tests.append(case)
suite._tests = tests
return suite
def run_suite(self, suite, **kwargs):
if _is_nosetest(settings):
from django_nose.plugin import DjangoSetUpPlugin, ResultPlugin
from django_nose.runner import _get_plugins_from_settings
from nose.config import Config
import nose
result_plugin = ResultPlugin()
plugins_to_add = [DjangoSetUpPlugin(self), result_plugin]
config = Config(plugins=nose.core.DefaultPluginManager())
config.plugins.addPlugins(extraplugins=plugins_to_add)
for plugin in _get_plugins_from_settings():
plugins_to_add.append(plugin)
nose.core.TestProgram(argv=suite, exit=False, addplugins=plugins_to_add,
testRunner=TeamcityNoseRunner(config=config))
return result_plugin.result
else:
self.options.update(kwargs)
return TeamcityTestRunner.run(self, suite, **self.options)
def run_tests(self, test_labels, extra_tests=None, **kwargs):
if _is_nosetest(settings):
return super(DjangoTeamcityTestRunner, self).run_tests(test_labels, extra_tests)
return super(DjangoTeamcityTestRunner, self).run_tests(test_labels, extra_tests, **kwargs)
def partition_suite(suite, classes, bins):
"""
Partitions a test suite by test type.
classes is a sequence of types
bins is a sequence of TestSuites, one more than classes
Tests of type classes[i] are added to bins[i],
tests with no match found in classes are place in bins[-1]
"""
for test in suite:
if isinstance(test, unittest.TestSuite):
partition_suite(test, classes, bins)
else:
for i in range(len(classes)):
if isinstance(test, classes[i]):
bins[i].addTest(test)
break
else:
bins[-1].addTest(test)
def reorder_suite(suite, classes):
"""
Reorders a test suite by test type.
classes is a sequence of types
All tests of type clases[0] are placed first, then tests of type classes[1], etc.
Tests with no match in classes are placed last.
"""
class_count = len(classes)
bins = [unittest.TestSuite() for i in range(class_count + 1)]
partition_suite(suite, classes, bins)
for i in range(class_count):
bins[0].addTests(bins[i + 1])
return bins[0]
def run_the_old_way(extra_tests, kwargs, test_labels, verbosity):
from django.test.simple import build_suite, build_test, get_app, get_apps, \
setup_test_environment, teardown_test_environment
setup_test_environment()
settings.DEBUG = False
suite = unittest.TestSuite()
if test_labels:
for label in test_labels:
if '.' in label:
suite.addTest(build_test(label))
else:
app = get_app(label)
suite.addTest(build_suite(app))
else:
for app in get_apps():
suite.addTest(build_suite(app))
for test in extra_tests:
suite.addTest(test)
suite = reorder_suite(suite, (TestCase,))
old_name = settings.DATABASE_NAME
from django.db import connection
connection.creation.create_test_db(verbosity, autoclobber=False)
result = DjangoTeamcityTestRunner().run(suite, **kwargs)
connection.creation.destroy_test_db(old_name, verbosity)
teardown_test_environment()
return len(result.failures) + len(result.errors)
def run_tests(test_labels, verbosity=1, interactive=False, extra_tests=[],
**kwargs):
"""
Run the unit tests for all the test labels in the provided list.
Labels must be of the form:
- app.TestClass.test_method
Run a single specific test method
- app.TestClass
Run all the test methods in a given class
- app
Search for doctests and unittests in the named application.
When looking for tests, the test runner will look in the models and
tests modules for the application.
A list of 'extra' tests may also be provided; these tests
will be added to the test suite.
Returns the number of tests that failed.
"""
options = {
'verbosity': verbosity,
'interactive': interactive
}
options.update(kwargs)
TeamcityServiceMessages(sys.stdout).testMatrixEntered()
if VERSION[1] > 1:
return DjangoTeamcityTestRunner(**options).run_tests(test_labels,
extra_tests=extra_tests, **options)
return run_the_old_way(extra_tests, options, test_labels, verbosity)
|
|
"""This pipeline is intended to make the classification of ALL modality
features."""
from __future__ import division
import os
import numpy as np
from sklearn.externals import joblib
from sklearn.preprocessing import label_binarize
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import AdaBoostClassifier
from protoclass.data_management import GTModality
# Define the path where the patients are stored
path_patients = '/data/prostate/experiments'
# Define the path where the features have been extracted
path_features = '/data/prostate/extraction/mp-mri-prostate'
# T2W
t2w_features = ['dct-t2w', 'edge-t2w/kirsch', 'edge-t2w/laplacian',
'edge-t2w/prewitt', 'edge-t2w/scharr', 'edge-t2w/sobel',
'gabor-t2w', 'harlick-t2w', 'ise-t2w', 'lbp-t2w', 'lbp-t2w',
'phase-congruency-t2w']
t2w_ext_features = ['_dct_t2w.npy', '_edge_t2w.npy', '_edge_t2w.npy',
'_edge_t2w.npy', '_edge_t2w.npy', '_edge_t2w.npy',
'_gabor_t2w.npy', '_haralick_t2w.npy', '_ise_t2w.npy',
'_lbp_8_1_t2w.npy', '_lbp_16_2_t2w.npy',
'_phase_congruency_t2w.npy']
# ADC
adc_features = ['dct-adc', 'edge-adc/kirsch', 'edge-adc/laplacian',
'edge-adc/prewitt', 'edge-adc/scharr', 'edge-adc/sobel',
'gabor-adc', 'harlick-adc', 'ise-adc', 'lbp-adc', 'lbp-adc',
'phase-congruency-adc']
adc_ext_features = ['_dct_adc.npy', '_edge_adc.npy', '_edge_adc.npy',
'_edge_adc.npy', '_edge_adc.npy', '_edge_adc.npy',
'_gabor_adc.npy', '_haralick_adc.npy', '_ise_adc.npy',
'_lbp_8_1_adc.npy', '_lbp_16_2_adc.npy',
'_phase_congruency_adc.npy']
# MRSI
mrsi_features = ['mrsi-spectra']
mrsi_ext_features = ['_spectra_mrsi.npy']
# DCE
dce_features = ['ese-dce']
dce_ext_features = ['_ese__dce.npy']
# Spatial information
spatial_features = ['spatial-position-euclidean', 'spatial-dist-center',
'spatial-dist-contour']
spatial_ext_features = ['_spe.npy', '_spe.npy',
'_spe.npy']
# Define the path of the ground for the prostate
path_gt = ['GT_inv/prostate', 'GT_inv/pz', 'GT_inv/cg', 'GT_inv/cap']
# Define the label of the ground-truth which will be provided
label_gt = ['prostate', 'pz', 'cg', 'cap']
# Generate the different path to be later treated
path_patients_list_gt = []
# Create the generator
id_patient_list = [name for name in os.listdir(path_patients)
if os.path.isdir(os.path.join(path_patients, name))]
# Sort the list of patient
id_patient_list = sorted(id_patient_list)
for id_patient in id_patient_list:
# Append for the GT data - Note that we need a list of gt path
path_patients_list_gt.append([os.path.join(path_patients, id_patient, gt)
for gt in path_gt])
# Load all the data once. Splitting into training and testing will be done at
# the cross-validation time
data_t2w = []
data_adc = []
data_dce = []
data_mrsi = []
data_spatial = []
label = []
for idx_pat in range(len(id_patient_list)):
print 'Read patient {}'.format(id_patient_list[idx_pat])
# Create the corresponding ground-truth
gt_mod = GTModality()
gt_mod.read_data_from_path(label_gt,
path_patients_list_gt[idx_pat])
print 'Read the GT data for the current patient ...'
# Let's get the information about the pz
roi_prostate = gt_mod.extract_gt_data('prostate', output_type='index')
# Get the label of the gt only for the prostate ROI
gt_pz = gt_mod.extract_gt_data('pz', output_type='data')
gt_pz = gt_pz[roi_prostate]
# Read the T2W information
patient_data_t2w = []
for idx_feat in range(len(t2w_features)):
# Create the path to the patient file
filename_feature = (id_patient_list[idx_pat].lower().replace(' ', '_') +
t2w_ext_features[idx_feat])
path_data = os.path.join(path_features, t2w_features[idx_feat],
filename_feature)
single_feature_data = np.load(path_data)
# Check if this is only one dimension data
if len(single_feature_data.shape) == 1:
single_feature_data = np.atleast_2d(single_feature_data).T
patient_data_t2w.append(single_feature_data)
# Concatenate the data
patient_data_t2w = np.concatenate(patient_data_t2w, axis=1)
data_t2w.append(patient_data_t2w)
# Read the ADC information
patient_data_adc = []
for idx_feat in range(len(adc_features)):
# Create the path to the patient file
filename_feature = (id_patient_list[idx_pat].lower().replace(' ', '_') +
adc_ext_features[idx_feat])
path_data = os.path.join(path_features, adc_features[idx_feat],
filename_feature)
single_feature_data = np.load(path_data)
# Check if this is only one dimension data
if len(single_feature_data.shape) == 1:
single_feature_data = np.atleast_2d(single_feature_data).T
patient_data_adc.append(single_feature_data)
# Concatenate the data
patient_data_adc = np.concatenate(patient_data_adc, axis=1)
data_adc.append(patient_data_adc)
# Read the MRSI information
patient_data_mrsi = []
for idx_feat in range(len(mrsi_features)):
# Create the path to the patient file
filename_feature = (id_patient_list[idx_pat].lower().replace(' ', '_') +
mrsi_ext_features[idx_feat])
path_data = os.path.join(path_features, mrsi_features[idx_feat],
filename_feature)
single_feature_data = np.load(path_data)
# Check if this is only one dimension data
if len(single_feature_data.shape) == 1:
single_feature_data = np.atleast_2d(single_feature_data).T
patient_data_mrsi.append(single_feature_data)
# Concatenate the data
patient_data_mrsi = np.concatenate(patient_data_mrsi, axis=1)
data_mrsi.append(patient_data_mrsi)
# Read the DCE information
patient_data_dce = []
for idx_feat in range(len(dce_features)):
# Create the path to the patient file
filename_feature = (id_patient_list[idx_pat].lower().replace(' ', '_') +
dce_ext_features[idx_feat])
path_data = os.path.join(path_features, dce_features[idx_feat],
filename_feature)
single_feature_data = np.load(path_data)
# Check if this is only one dimension data
if len(single_feature_data.shape) == 1:
single_feature_data = np.atleast_2d(single_feature_data).T
patient_data_dce.append(single_feature_data)
# Concatenate the data
patient_data_dce = np.concatenate(patient_data_dce, axis=1)
data_dce.append(patient_data_dce)
# Read the SPATIAL information
patient_data_spatial = []
for idx_feat in range(len(spatial_features)):
# Create the path to the patient file
filename_feature = (id_patient_list[idx_pat].lower().replace(' ', '_') +
spatial_ext_features[idx_feat])
path_data = os.path.join(path_features, spatial_features[idx_feat],
filename_feature)
single_feature_data = np.load(path_data)
# Check if this is only one dimension data
if len(single_feature_data.shape) == 1:
single_feature_data = np.atleast_2d(single_feature_data).T
patient_data_spatial.append(single_feature_data)
# Add the information about the pz
patient_data_spatial.append(np.atleast_2d(gt_pz).T)
# Concatenate the data
patient_data_spatial = np.concatenate(patient_data_spatial, axis=1)
data_spatial.append(patient_data_spatial)
# Extract the corresponding ground-truth for the testing data
# Get the index corresponding to the ground-truth
gt_cap = gt_mod.extract_gt_data('cap', output_type='data')
label.append(gt_cap[roi_prostate])
print 'Data and label extracted for the current patient ...'
# Create a list concatenating all the data
data = [data_t2w, data_adc, data_mrsi, data_dce, data_spatial]
result_cv = []
# Go for LOPO cross-validation
for idx_lopo_cv in range(len(id_patient_list)):
# Display some information about the LOPO-CV
print 'Round #{} of the LOPO-CV'.format(idx_lopo_cv + 1)
# We will need a training and a validation set for the meta-classifier
# Create a vector with all the patients
idx_patient = range(len(id_patient_list))
idx_patient.remove(idx_lopo_cv)
idx_patient = np.roll(idx_patient, idx_lopo_cv)
# We will use the 60 percent as training and 40 percent as validation
idx_split = int(0.6 * (len(id_patient_list) - 1))
idx_patient_training = idx_patient[:idx_split]
idx_patient_validation = idx_patient[idx_split:]
# Create an empty list for the ensemble of RF
rf_ensemble = []
# Get the label
training_label = [arr for idx_arr, arr in enumerate(label)
if idx_arr in idx_patient_training]
training_label = np.ravel(label_binarize(
np.hstack(training_label).astype(int), [0, 255]))
# We need to build the training and train each random forest
for mod_data in range(len(data) - 1):
# Get the training data
# Create the training data and label
training_data_mod = [arr for idx_arr, arr in enumerate(data[mod_data])
if idx_arr in idx_patient_training]
# Get the spatial information
training_data_spa = [arr for idx_arr, arr in enumerate(data[-1])
if idx_arr in idx_patient_training]
# Concatenate the data
training_data_mod = np.vstack(training_data_mod)
training_data_spa = np.vstack(training_data_spa)
# Concatenate spatial information and modality information
training_data = np.hstack((training_data_mod, training_data_spa))
# Create the current RF
crf = RandomForestClassifier(n_estimators=100, n_jobs=-1)
crf.fit(training_data, training_label)
# Add the classifier
rf_ensemble.append(crf)
# Get the labels
validation_label = [arr for idx_arr, arr in enumerate(label)
if idx_arr in idx_patient_validation]
validation_label = np.ravel(label_binarize(
np.hstack(validation_label).astype(int), [0, 255]))
# We need to create the meta classifier
rf_data_answer = []
for mod_data in range(len(data) - 1):
# Create the validation data and label
validation_data_mod = [arr for idx_arr, arr
in enumerate(data[mod_data])
if idx_arr in idx_patient_validation]
# Get the spatial information
validation_data_spa = [arr for idx_arr, arr in enumerate(data[-1])
if idx_arr in idx_patient_validation]
# Concatenate the data
validation_data_mod = np.vstack(validation_data_mod)
validation_data_spa = np.vstack(validation_data_spa)
# Concatenate spatial information and modality information
validation_data = np.hstack((validation_data_mod, validation_data_spa))
# Get the validation through the already trained forest
pred_proba = rf_ensemble[mod_data].predict_proba(validation_data)
# Select only the column corresponding to the positive class
pos_class_arg = np.ravel(np.argwhere(
rf_ensemble[mod_data].classes_ == 1))[0]
rf_data_answer.append(pred_proba[:, pos_class_arg])
# For know we will train a classifier using the previous probability
# extracted
rf_data_answer = np.vstack(rf_data_answer).T
# Create the meta-classifier
cgb = AdaBoostClassifier()
cgb.fit(rf_data_answer, validation_label)
testing_label = np.ravel(label_binarize(label[idx_lopo_cv], [0, 255]))
# Go for the testing now
testing_inter = []
for mod_data in range(len(data) - 1):
testing_data_mod = data[mod_data][idx_lopo_cv]
testing_data_spa = data[-1][idx_lopo_cv]
testing_data = np.hstack((testing_data_mod, testing_data_spa))
# Get the probability of the first layer
pred_proba = rf_ensemble[mod_data].predict_proba(testing_data)
# Select only the column corresponding to the positive class
pos_class_arg = np.ravel(np.argwhere(
rf_ensemble[mod_data].classes_ == 1))[0]
testing_inter.append(pred_proba[:, pos_class_arg])
# Make the classification with the second layer
testing_inter = np.vstack(testing_inter).T
pred_prob = cgb.predict_proba(testing_inter)
result_cv.append([pred_prob, cgb.classes_])
# Save the information
path_store = '/data/prostate/results/mp-mri-prostate/exp-2/stacking-adaboost'
if not os.path.exists(path_store):
os.makedirs(path_store)
joblib.dump(result_cv, os.path.join(path_store,
'results.pkl'))
|
|
#!/usr/bin/env python2.5
import sys
import unittest
from mappinglib import *
def lst_dump(lst, fp=sys.stdout):
its = [(' FORWARD ', lst_iter(lst_head(lst))),
(' REVERSE ', lst_iter(lst_tail(lst), reverse=True))]
high = idx = 0
for title, it in its:
print >>fp, title.center(70, '-')
for idx, node in enumerate(it):
print >>fp, ' %-5d I=%-8x P=%-8x N=%-8x %.20r' %\
(abs(high-idx),
id(node) if node else 0,
id(node[0]) if node[0] else 0,
id(node[2]) if node[2] else 0,
node[1])
print >>fp, '-' * 70
print >>fp
high = max(high, idx)
def lst_check(lst):
fwd_len = len(list(lst_iter(lst_head(lst))))
rev_len = len(list(lst_iter(lst_tail(lst), reverse=True)))
assert fwd_len == rev_len, \
'Forward iter yields %d items, reverse yields %d'\
% (fwd_len, rev_len)
if fwd_len in (0, 1):
assert lst[0] == lst[1], \
'%d-sized list yet head != tail' % (fwd_len,)
else:
assert lst[0] != lst[1], \
'%d-sized list yet head == head' % (fwd_len,)
fwd_nodes = list(lst_iter(lst_head(lst)))
rev_nodes = list(lst_iter(lst_tail(lst), reverse=True))
fwd_vals = [n[1] for n in fwd_nodes]
rev_vals = [n[1] for n in reversed(rev_nodes)]
assert all(x is y
for (x, y) in zip(fwd_nodes, reversed(rev_nodes))),\
'Forward and reverse walk do not match'
class ListTestCase(unittest.TestCase):
def setUp(self):
self.lst = lst_new()
def test_append_one(self):
node = lst_append(self.lst, 'value')
self.assert_(lst_head(self.lst) is node)
self.assert_(lst_tail(self.lst) is node)
self.assert_(lst_value(lst_head(self.lst)) == 'value')
self.assert_(lst_value(lst_tail(self.lst)) == 'value')
lst_check(self.lst)
lst_unlink(self.lst, node)
self.assert_(lst_head(self.lst) is None)
self.assert_(lst_tail(self.lst) is None)
lst_check(self.lst)
def test_append_two(self):
node1 = lst_append(self.lst, 'value1')
node2 = lst_append(self.lst, 'value2')
lst_check(self.lst)
self.assert_(lst_head(self.lst) is node1)
self.assert_(lst_next(lst_head(self.lst)) is node2)
self.assert_(lst_next(lst_next(lst_head(self.lst))) is None)
self.assert_(lst_tail(self.lst) is node2)
self.assert_(lst_prev(self.lst) is node1)
self.assert_(lst_prev(lst_prev(self.lst)) is None)
self.assert_(lst_value(lst_head(self.lst)) == 'value1')
self.assert_(lst_value(lst_next(lst_head(self.lst))) == 'value2')
self.assert_(lst_value(lst_next(lst_next(lst_head(self.lst)))) is None)
def test_empty(self):
self.assert_(lst_head(self.lst) is None)
self.assert_(lst_tail(self.lst) is None)
self.assert_(lst_value(lst_head(self.lst)) is None)
self.assert_(lst_value(lst_tail(self.lst)) is None)
def test_prepend_one(self):
node = lst_prepend(self.lst, 'value')
self.assert_(lst_head(self.lst) is node)
self.assert_(lst_tail(self.lst) is node)
self.assert_(lst_value(lst_head(self.lst)) == 'value')
self.assert_(lst_value(lst_tail(self.lst)) == 'value')
lst_check(self.lst)
def test_prepend_two(self):
node2 = lst_prepend(self.lst, 'value2')
node1 = lst_prepend(self.lst, 'value1')
lst_check(self.lst)
self.assert_(lst_head(self.lst) is node1)
self.assert_(lst_next(lst_head(self.lst)) is node2)
self.assert_(lst_next(lst_next(lst_head(self.lst))) is None)
self.assert_(lst_tail(self.lst) is node2)
self.assert_(lst_prev(lst_tail(self.lst)) is node1)
self.assert_(lst_prev(lst_prev(lst_tail(self.lst))) is None)
self.assert_(lst_value(lst_head(self.lst)) == 'value1')
self.assert_(lst_value(lst_next(lst_head(self.lst))) == 'value2')
self.assert_(lst_value(lst_next(lst_next(lst_head(self.lst)))) is None)
def test_prepend_append(self):
node1 = lst_prepend(self.lst, 'value1')
node2 = lst_append(self.lst, 'value2')
self.assert_(lst_head(self.lst) is node1)
self.assert_(lst_next(lst_head(self.lst)) is node2)
self.assert_(lst_tail(self.lst) is node2)
self.assert_(lst_prev(lst_tail(self.lst)) is node1)
def test_unlink_one(self):
node = lst_prepend(self.lst, 'value')
lst_unlink(self.lst, node)
self.assert_(lst_head(self.lst) is None)
self.assert_(lst_tail(self.lst) is None)
def test_unlink_two(self):
node1 = lst_append(self.lst, 'value1')
node2 = lst_append(self.lst, 'value2')
lst_check(self.lst)
lst_unlink(self.lst, node2)
self.assert_(lst_tail(self.lst) is node1)
self.assert_(lst_head(self.lst) is node1)
lst_check(self.lst)
lst_unlink(self.lst, node1)
self.assert_(lst_tail(self.lst) is None)
self.assert_(lst_head(self.lst) is None)
lst_check(self.lst)
def test_unlink_three(self):
lst_append(self.lst, 'value1')
lst_check(self.lst)
lst_append(self.lst, 'value2')
lst_check(self.lst)
lst_append(self.lst, 'value3')
lst_check(self.lst)
lst_unlink(self.lst, lst_tail(self.lst))
lst_check(self.lst)
class LruCacheTestCase(unittest.TestCase):
def test_one(self):
cache = LruCache(1)
self.assert_(cache.get('key1') is None)
cache.put('key1', 'value1')
self.assert_(cache.get('key1') == 'value1')
cache.put('key2', 'value2')
self.assert_(cache.get('key1') is None)
self.assert_(cache.get('key2') == 'value2')
def test_lru(self):
cache = LruCache(3)
cache.put('key1', 'value1')
cache.put('key2', 'value2')
cache.put('key3', 'value3')
cache.put('key4', 'value4')
self.assert_(cache.get('key1') is None)
self.assert_(cache.get('key2') == 'value2')
self.assert_(cache.get('key3') == 'value3')
self.assert_(cache.get('key4') == 'value4')
# This time, change the order we check in.
cache.put('key5', 'value5')
self.assert_(cache.get('key5') == 'value5')
self.assert_(cache.get('key4') == 'value4')
self.assert_(cache.get('key3') == 'value3')
self.assert_(cache.get('key2') is None)
self.assert_(cache.get('key1') is None)
# key5 is the LRU after previous gets.
cache.put('key6', 'value6')
self.assert_(cache.get('key6') == 'value6')
self.assert_(cache.get('key5') is None)
self.assert_(cache.get('key4') == 'value4')
self.assert_(cache.get('key3') == 'value3')
self.assert_(cache.get('key2') is None)
self.assert_(cache.get('key1') is None)
if __name__ == '__main__':
unittest.main()
|
|
"""Bayesian Generalized Linear Model implementation.
Implementation of Bayesian GLMs using a mixture of Gaussians posterior
approximation with the reparameterization trick and variational inference. See
[1]_ for the posterior mixture idea, and [2]_ for the inference scheme.
.. [1] Gershman, S., Hoffman, M., & Blei, D. "Nonparametric variational
inference". Proceedings of the international conference on machine learning.
2012.
.. [2] Kingma, D. P., & Welling, M. "Auto-encoding variational Bayes".
Proceedings of the 2nd International Conference on Learning Representations
(ICLR). 2014.
"""
from __future__ import division
import numpy as np
import logging
from itertools import chain
from multiprocessing import Pool
from scipy.stats.distributions import gamma, norm
from scipy.optimize import brentq
from sklearn.base import BaseEstimator, RegressorMixin
from sklearn.utils.validation import check_is_fitted, check_X_y, check_array
from sklearn.utils import check_random_state
from .utils import atleast_list, issequence
from .mathfun.special import logsumexp
from .basis_functions import LinearBasis, apply_grad
from .likelihoods import Gaussian
from .optimize import sgd, structured_sgd, logtrick_sgd, Adam
from .btypes import Bound, Positive, Parameter
# Set up logging
log = logging.getLogger(__name__)
# Module settings
WGTRND = norm() # Sampling distribution over mixture weights
COVRND = gamma(a=2, scale=0.5) # Sampling distribution over mixture covariance
LOGITER = 500 # Number of SGD iterations between logging ELBO and hypers
class GeneralizedLinearModel(BaseEstimator, RegressorMixin):
r"""
Bayesian Generalized linear model (GLM).
This provides a scikit learn compatible interface for the glm module.
Parameters
----------
likelihood : Object
A likelihood object, see the likelihoods module.
basis : Basis
A basis object, see the basis_functions module.
K : int, optional
Number of diagonal Gaussian components to use to approximate the
posterior distribution.
maxiter : int, optional
Maximum number of iterations of stochastic gradients to run.
batch_size : int, optional
number of observations to use per SGD batch.
updater : SGDUpdater, optional
The SGD learning rate updating algorithm to use, by default this is
Adam. See revrand.optimize.sgd for different options.
nsamples : int, optional
Number of samples for sampling the expected likelihood and expected
likelihood gradients
nstarts : int, optional
if there are any parameters with distributions as initial values, this
determines how many random candidate starts shoulds be evaluated before
commencing optimisation at the best candidate.
random_state : None, int or RandomState, optional
random seed
Notes
-----
This approximates the posterior distribution over the weights with a
mixture of Gaussians:
.. math ::
\mathbf{w} \sim \frac{1}{K} \sum^K_{k=1}
\mathcal{N}(\mathbf{m_k}, \boldsymbol{\Psi}_k)
where,
.. math ::
\boldsymbol{\Psi}_k = \text{diag}([\Psi_{k,1}, \ldots,
\Psi_{k,D}]).
This is so arbitrary likelihoods can be used with this algorithm, while
still mainting flexible and tractable non-Gaussian posteriors. Additionaly
this has the benefit that we have a reduced number of parameters to
optimise (compared with full covariance Gaussians).
The main differences between this implementation and the GLM in [1]_ are:
- We use diagonal mixtures, as opposed to isotropic.
- We use auto encoding variational Bayes (AEVB) inference [2]_ with
stochastic gradients.
This uses the python logging module for displaying learning status. To view
these messages have something like,
.. code ::
import logging
logging.basicConfig(level=logging.INFO)
log = logging.getLogger(__name__)
in your calling code.
"""
def __init__(self,
likelihood=Gaussian(),
basis=LinearBasis(),
K=10,
maxiter=3000,
batch_size=10,
updater=None,
nsamples=50,
nstarts=500,
random_state=None
):
"""See class docstring."""
self.likelihood = likelihood
self.basis = basis
self.K = K
self.maxiter = maxiter
self.batch_size = batch_size
self.updater = updater
self.nsamples = nsamples
self.nstarts = nstarts
self.random_state = random_state # For clone compatibility
self.random_ = check_random_state(self.random_state)
def fit(self, X, y, likelihood_args=()):
r"""
Learn the parameters of a Bayesian generalized linear model (GLM).
Parameters
----------
X : ndarray
(N, d) array input dataset (N samples, d dimensions).
y : ndarray
(N,) array targets (N samples)
likelihood : Object
A likelihood object, see the likelihoods module.
likelihood_args : sequence, optional
sequence of arguments to pass to the likelihood function. These are
non-learnable parameters. They can be scalars or arrays of length
N.
"""
X, y = check_X_y(X, y)
# Batch magnification factor
N, _ = X.shape
self.B_ = X.shape[0] / self.batch_size
self.D_ = self.basis.get_dim(X)
# Pack data
likelihood_args = _reshape_likelihood_args(likelihood_args, N)
data = (X, y) + likelihood_args
# Pack params
params = [Parameter(WGTRND, Bound(), shape=(self.D_, self.K)),
Parameter(COVRND, Positive(), shape=(self.D_, self.K)),
self.basis.regularizer,
self.likelihood.params,
self.basis.params]
log.info("Optimising parameters...")
self.__it = -self.nstarts # Keeping track of iterations for logging
nsgd = structured_sgd(logtrick_sgd(sgd))
res = nsgd(self._elbo,
params,
data,
eval_obj=True,
maxiter=self.maxiter,
updater=self.updater,
batch_size=self.batch_size,
random_state=self.random_,
nstarts=self.nstarts
)
# Unpack params
(self.weights_,
self.covariance_,
self.regularizer_,
self.like_hypers_,
self.basis_hypers_
) = res.x
log.info("Finished! reg = {}, likelihood_hypers = {}, "
"basis_hypers = {}, message: {}."
.format(self.regularizer_,
self.like_hypers_,
self.basis_hypers_,
res.message))
return self
def _elbo(self, m, C, reg, lpars, bpars, X, y, *largs):
# Full evidence lower bound objective with AEVB
# Shapes
D, K = m.shape
# Make sure hypers and args can be unpacked into callables
largs = tuple(chain(atleast_list(lpars), largs))
# Basis function
Phi = self.basis.transform(X, *atleast_list(bpars)) # M x D
# Get regularizer
L, slices = self.basis.regularizer_diagonal(X, *atleast_list(reg))
iL = 1. / L[:, np.newaxis]
# Posterior entropy lower bound terms
logNkl = _qmatrix(m, C)
logzk = logsumexp(logNkl, axis=0)
# Preallocate variational parameter gradients and ELL
dm = np.empty_like(m)
dC = np.empty_like(C)
Ell = np.empty(K, dtype=float)
# Zero starts for sums over posterior mixtures
dlpars = [np.zeros_like(p) for p in atleast_list(lpars)]
EdPhi = np.zeros_like(Phi)
# Log status, only do this occasionally to save cpu
dolog = (self.__it % LOGITER == 0) or (self.__it == self.maxiter - 1)
# Only calculate ELBO when sampling parameters (__it < 0) or logging
calc_ll = dolog or (self.__it < 0)
# Big loop though posterior mixtures for calculating stuff
for k in range(K):
# Sample expected likelihood and gradients
Edmk, EdCk, EdPhik, Edlpars, Ell[k] = \
self._reparam_k(m[:, k], C[:, k], y, Phi, largs, calc_ll)
EdPhi += EdPhik / K
# Weight factors for each component in the gradients
Nkl_zk = np.exp(logNkl[:, k] - logzk[k])
Nkl_zl = np.exp(logNkl[:, k] - logzk)
alpha = (Nkl_zk + Nkl_zl)
# Posterior mean and covariance gradients
mkmj = m[:, k][:, np.newaxis] - m
iCkCj = 1. / (C[:, k][:, np.newaxis] + C)
dm[:, k] = (self.B_ * Edmk - m[:, k] / L
+ (iCkCj * mkmj).dot(alpha)) / K
dC[:, k] = (self.B_ * EdCk - 1. / L
+ (iCkCj - (mkmj * iCkCj)**2).dot(alpha)) / (2 * K)
# Likelihood parameter gradients
for i, Edlpar in enumerate(Edlpars):
dlpars[i] -= Edlpar / K
# Regularizer gradient
def dreg(s):
dL = 0.5 * (((m[s]**2 + C[s]) * iL[s]**2).sum() / K - iL[s].sum())
return -dL
dL = list(map(dreg, slices)) if issequence(slices) else dreg(slices)
# Basis function parameter gradients
dtheta = lambda dPhi: -(EdPhi * dPhi).sum()
dbpars = apply_grad(dtheta, self.basis.grad(X, *atleast_list(bpars)))
# Approximate evidence lower bound
ELBO = -np.inf
if calc_ll:
ELBO = (Ell.sum() * self.B_
- 0.5 * D * K * np.log(2 * np.pi)
- 0.5 * K * np.log(L).sum()
- 0.5 * ((m**2 + C) * iL).sum()
- logzk.sum() + np.log(K)) / K
if dolog:
rs_mes = "Random starts: " if self.__it < 0 else ""
log.info("{}Iter {}: ELBO = {}, reg = {}, like_hypers = {}, "
"basis_hypers = {}"
.format(rs_mes, self.__it, ELBO, reg, lpars, bpars))
self.__it += 1
return -ELBO, [-dm, -dC, dL, dlpars, dbpars]
def _reparam_k(self, mk, Ck, y, Phi, largs, calc_ll=True):
# AEVB's reparameterisation trick
# Sample the latent function and its derivative
e = self.random_.randn(self.nsamples, len(mk))
Sk = np.sqrt(Ck)
ws = mk + Sk * e # L x D
fs = ws.dot(Phi.T) # L x M
dfs = self.likelihood.df(y, fs, *largs) # L x M
# Expected gradients
Edws = dfs.dot(Phi) # L x D, dweight samples
Edm = Edws.sum(axis=0) / self.nsamples # D
EdC = (Edws * e / Sk).sum(axis=0) / self.nsamples # D
EdPhi = dfs.T.dot(ws) / self.nsamples # M x D
# Structured likelihood parameter gradients
Edlpars = atleast_list(self.likelihood.dp(y, fs, *largs))
for i, Edlpar in enumerate(Edlpars):
Edlpars[i] = Edlpar.sum() / self.nsamples
# Expected ll
Ell = np.inf
if calc_ll:
Ell = self.likelihood.loglike(y, fs, *largs).sum() / self.nsamples
return Edm, EdC, EdPhi, Edlpars, Ell
def predict(self, X, nsamples=200, likelihood_args=()):
"""
Predict target values from Bayesian generalized linear regression.
Parameters
----------
X : ndarray
(N*,d) array query input dataset (N* samples, d dimensions).
nsamples : int, optional
Number of samples for sampling the expected target values from the
predictive distribution.
likelihood_args : sequence, optional
sequence of arguments to pass to the likelihood function. These are
non-learnable parameters. They can be scalars or arrays of length
N.
Returns
-------
Ey : ndarray
The expected value of y* for the query inputs, X* of shape (N*,).
"""
Ey, _ = self.predict_moments(X, nsamples, likelihood_args)
return Ey
def predict_moments(self, X, nsamples=200, likelihood_args=()):
r"""
Predictive moments, in particular mean and variance, of a Bayesian GLM.
This function uses Monte-Carlo sampling to evaluate the predictive mean
and variance of a Bayesian GLM. The exact expressions evaluated are,
.. math ::
\mathbb{E}[y^* | \mathbf{x^*}, \mathbf{X}, y] &=
\int \mathbb{E}[y^* | \mathbf{w}, \phi(\mathbf{x}^*)]
p(\mathbf{w} | \mathbf{y}, \boldsymbol\Phi) d\mathbf{w},
\mathbb{V}[y^* | \mathbf{x^*}, \mathbf{X}, y] &=
\int \left(\mathbb{E}[y^* | \mathbf{w}, \phi(\mathbf{x}^*)]
- \mathbb{E}[y^* | \mathbf{x^*}, \mathbf{X}, y]\right)^2
p(\mathbf{w} | \mathbf{y}, \boldsymbol\Phi) d\mathbf{w},
where :math:`\mathbb{E}[y^* | \mathbf{w}, \phi(\mathbf{x}^*)]` is the
the expected value of :math:`y^*` from the likelihood, and
:math:`p(\mathbf{w} | \mathbf{y}, \boldsymbol\Phi)` is the posterior
distribution over weights (from ``learn``). Here are few concrete
examples of how we can use these values,
- Gaussian likelihood: these are just the predicted mean and variance,
see ``revrand.regression.predict``
- Bernoulli likelihood: The expected value is the probability,
:math:`p(y^* = 1)`, i.e. the probability of class one. The variance
may not be so useful.
- Poisson likelihood: The expected value is similar conceptually to the
Gaussian case, and is also a *continuous* value. The median (50%
quantile) from ``predict_interval`` is a discrete value. Again,
the variance in this instance may not be so useful.
Parameters
----------
X : ndarray
(N*,d) array query input dataset (N* samples, d
dimensions).
nsamples : int, optional
Number of samples for sampling the expected moments from the
predictive distribution.
likelihood_args : sequence, optional
sequence of arguments to pass to the likelihood function. These are
non-learnable parameters. They can be scalars or arrays of length
N.
Returns
-------
Ey : ndarray
The expected value of y* for the query inputs, X* of shape (N*,).
Vy : ndarray
The expected variance of y* (excluding likelihood noise terms) for
the query inputs, X* of shape (N*,).
"""
# Get latent function samples
N = X.shape[0]
ys = np.empty((N, nsamples))
fsamples = self._sample_func(X, nsamples)
# Push samples though likelihood expected value
Eyargs = tuple(chain(atleast_list(self.like_hypers_), likelihood_args))
for i, f in enumerate(fsamples):
ys[:, i] = self.likelihood.Ey(f, *Eyargs)
# Average transformed samples (MC integration)
Ey = ys.mean(axis=1)
Vy = ((ys - Ey[:, np.newaxis])**2).mean(axis=1)
return Ey, Vy
def predict_logpdf(self, X, y, nsamples=200, likelihood_args=()):
r"""
Predictive log-probability density function of a Bayesian GLM.
Parameters
----------
X : ndarray
(N*,d) array query input dataset (N* samples, D dimensions).
y : float or ndarray
The test observations of shape (N*,) to evaluate under,
:math:`\log p(y^* |\mathbf{x}^*, \mathbf{X}, y)`.
nsamples : int, optional
Number of samples for sampling the log predictive distribution.
likelihood_args : sequence, optional
sequence of arguments to pass to the likelihood function. These are
non-learnable parameters. They can be scalars or arrays of length
N*.
Returns
-------
logp : ndarray
The log probability of y* given X* of shape (N*,).
logp_min : ndarray
The minimum sampled values of the predicted log probability (same
shape as p)
logp_max : ndarray
The maximum sampled values of the predicted log probability (same
shape as p)
"""
X, y = check_X_y(X, y)
# Get latent function samples
N = X.shape[0]
ps = np.empty((N, nsamples))
fsamples = self._sample_func(X, nsamples)
# Push samples though likelihood pdf
llargs = tuple(chain(atleast_list(self.like_hypers_), likelihood_args))
for i, f in enumerate(fsamples):
ps[:, i] = self.likelihood.loglike(y, f, *llargs)
# Average transformed samples (MC integration)
logp = ps.mean(axis=1)
logp_min = ps.min(axis=1)
logp_max = ps.max(axis=1)
return logp, logp_min, logp_max
def predict_cdf(self, X, quantile, nsamples=200, likelihood_args=()):
r"""
Predictive cumulative density function of a Bayesian GLM.
Parameters
----------
X : ndarray
(N*,d) array query input dataset (N* samples, D dimensions).
quantile : float
The predictive probability, :math:`p(y^* \leq \text{quantile} |
\mathbf{x}^*, \mathbf{X}, y)`.
nsamples : int, optional
Number of samples for sampling the predictive CDF.
likelihood_args : sequence, optional
sequence of arguments to pass to the likelihood function. These are
non-learnable parameters. They can be scalars or arrays of length
N*.
nsamples : int, optional
The number of samples to draw from the posterior in order to
approximate the predictive mean and variance.
Returns
-------
p : ndarray
The probability of y* <= quantile for the query inputs, X* of shape
(N*,).
p_min : ndarray
The minimum sampled values of the predicted probability (same shape
as p)
p_max : ndarray
The maximum sampled values of the predicted probability (same shape
as p)
"""
# Get latent function samples
N = X.shape[0]
ps = np.empty((N, nsamples))
fsamples = self._sample_func(X, nsamples)
# Push samples though likelihood cdf
cdfarg = tuple(chain(atleast_list(self.like_hypers_), likelihood_args))
for i, f in enumerate(fsamples):
ps[:, i] = self.likelihood.cdf(quantile, f, *cdfarg)
# Average transformed samples (MC integration)
p = ps.mean(axis=1)
p_min = ps.min(axis=1)
p_max = ps.max(axis=1)
return p, p_min, p_max
def predict_interval(self, X, percentile, nsamples=200, likelihood_args=(),
multiproc=True):
"""
Predictive percentile interval (upper and lower quantiles).
Parameters
----------
X : ndarray
(N*,d) array query input dataset (N* samples, D dimensions).
percentile : float
The percentile confidence interval (e.g. 95%) to return.
nsamples : int, optional
Number of samples for sampling the predictive percentiles.
likelihood_args : sequence, optional
sequence of arguments to pass to the likelihood function. These are
non-learnable parameters. They can be scalars or arrays of length
N*.
multiproc : bool, optional
Use multiprocessing to paralellise this prediction computation.
Returns
-------
ql : ndarray
The lower end point of the interval with shape (N*,)
qu : ndarray
The upper end point of the interval with shape (N*,)
"""
N = X.shape[0]
# Generate latent function samples per observation (n in N)
fsamples = self._sample_func(X, nsamples, genaxis=0)
# Make sure likelihood_args is consistent with work
if len(likelihood_args) > 0:
likelihood_args = _reshape_likelihood_args(likelihood_args, N)
# Now create work for distrbuted workers
like_hypers = atleast_list(self.like_hypers_)
work = ((f[0], self.likelihood, like_hypers, f[1:], percentile)
for f in zip(fsamples, *likelihood_args))
# Distribute sampling and rootfinding
if multiproc:
pool = Pool()
res = pool.map(_star_rootfinding, work)
pool.close()
pool.join()
else:
res = [_rootfinding(*w) for w in work]
# Get results of work
ql, qu = zip(*res)
return np.array(ql), np.array(qu)
def _sample_func(self, X, nsamples, genaxis=1):
"""
Generate samples from the posterior latent function mixtures of the GLM
for query inputs, X*.
Parameters
----------
X : ndarray
(N*, d) array query input dataset (N* samples, D dimensions).
nsamples : int
Number of samples for sampling the latent function.
genaxis : int
Axis to return samples from, i.e.
- ``genaxis=1`` will give you one sample at a time of f for ALL
observations (so it will iterate over nsamples).
- ``genaxis=0`` will give you all samples of f for ONE
observation at a time (so it will iterate through X*, row by
row)
Yields
------
fsamples : ndarray
of shape (N*,) if ``genaxis=1`` with each call being a sample
from the mixture of latent functions over all N*. Or of shape
(nsamples,) if ``genaxis=0``, with each call being a all
samples for an observation, n in N*.
"""
check_is_fitted(self, ['weights_', 'covariance_', 'basis_hypers_',
'like_hypers_', 'regularizer_'])
X = check_array(X)
D, K = self.weights_.shape
# Generate weight samples from all mixture components
k = self.random_.randint(0, K, size=(nsamples,))
w = self.weights_[:, k] + self.random_.randn(D, nsamples) \
* np.sqrt(self.covariance_[:, k])
# Do this here for *massive* speed improvements
Phi = self.basis.transform(X, *atleast_list(self.basis_hypers_))
# Now generate latent functions samples either colwise or rowwise
if genaxis == 1:
fs = (Phi.dot(ws) for ws in w.T)
elif genaxis == 0:
fs = (phi_n.dot(w) for phi_n in Phi)
else:
raise ValueError("Invalid axis to generate samples from")
return fs
def __repr__(self):
"""Representation."""
return "{}(likelihood={}, basis={}, K={}, maxiter={}, batch_size={},"\
"updater={}, nsamples={}, nstarts={}, random_state={})".format(
self.__class__.__name__,
self.likelihood,
self.basis,
self.K,
self.maxiter,
self.batch_size,
self.updater,
self.nsamples,
self.nstarts,
self.random_state
)
# For GB/AU spelling
class GeneralisedLinearModel(GeneralizedLinearModel):
pass
#
# Internal Module Utilities
#
def _reshape_likelihood_args(likelihood_args, N):
reshape_args = []
for l in likelihood_args:
if np.isscalar(l):
l = l * np.ones(N)
if (np.shape(l)[0] != N) and (len(l) != 0):
raise ValueError("Likelihood arguments not a compatible shape!")
reshape_args.append(l)
return tuple(reshape_args)
# For python 2.7 compatibility instead of pool.starmap
def _star_rootfinding(args):
return _rootfinding(*args)
def _rootfinding(fn, likelihood, likelihood_hypers, likelihood_args,
percentile):
# CDF minus percentile for quantile root finding
predCDF = lambda q, fs, percent: \
(likelihood.cdf(q, fs, *chain(likelihood_hypers,
likelihood_args))).mean() - percent
# Convert alpha into percentages and get (conservative) bounds for brentq
lpercent = (1 - percentile) / 2
upercent = 1 - lpercent
Eyn = likelihood.Ey(fn, *chain(likelihood_hypers, likelihood_args)).mean()
lb, ub = -1000 * max(Eyn, 1), 1000 * max(Eyn, 1)
# Do the root finding optimisation for upper and lower quantiles
try:
qln = brentq(predCDF, a=lb, b=ub, args=(fn, lpercent))
except ValueError:
qln = np.nan
try:
qun = brentq(predCDF, a=lb, b=ub, args=(fn, upercent))
except ValueError:
qun = np.nan
return qln, qun
def _dgausll(x, mean, dcov):
# This is faster than calling scipy.stats.norm.logpdf
D = len(x)
return - 0.5 * (D * np.log(2 * np.pi) + np.log(dcov).sum()
+ ((x - mean)**2 / dcov).sum())
def _qmatrix(m, C):
K = m.shape[1]
logq = [[_dgausll(m[:, i], m[:, j], C[:, i] + C[:, j])
for i in range(K)]
for j in range(K)]
return np.array(logq)
|
|
# -*- coding: utf-8 -*-
"""
Evalutaion for energy regression models
Take a model that predicts energy of events and do the evaluation for that, both
in the form of a 2d histogramm (mc energy vs reco energy),
and as a 1d histogram (mc_energy vs mean absolute error).
Looks for saved arr_energ_corrects to load, or generate new one.
Will print statistics from that array like median, variance, ...
Generates the 2d and the 1d plots and saves them.
Can also compare multiple 1d plots instead.
If apply precuts is selected, the dataset will be xzt_precuts instead of xzt,
and _precut will be added to the filename and the saved arr_energy_correct.
"""
import argparse
import matplotlib.pyplot as plt
import numpy as np
import os
from get_dataset_info import get_dataset_info
from util.evaluation_utilities import setup_and_make_energy_arr_energy_correct, calculate_2d_hist_data, make_2d_hist_plot, calculate_energy_mae_plot_data, make_energy_mae_plot, make_energy_evaluation_statistics, make_energy_mae_plot_errorbars, make_energy_mae_plot_mean_only
from util.saved_setups_for_plot_statistics import get_path_best_epoch
def parse_input():
parser = argparse.ArgumentParser(description='Take a model that predicts energy of events and do the evaluation for that, either in the form of a 2d histogramm (mc energy vs reco energy), or as a 1d histogram (mc_energy vs mean absolute error).')
parser.add_argument('model', type=str, help='Name of a model .h5 file, or a tag for a saved setup. (see this file for tags for sets and saved_setups for single epochs)')
parser.add_argument('-p','--apply_precuts', help="Change to dataset xzt_precut", action='store_true')
args = parser.parse_args()
params = vars(args)
return params
def get_saved_plots_info(tag, apply_precuts=False):
#Info about plots that have been generated for the thesis are listed here.
dataset_tag="xzt"
zero_center=True
energy_bins_2d=np.arange(3,101,1)
energy_bins_1d=20
home_path="/home/woody/capn/mppi013h/Km3-Autoencoder/"
is_a_set=False
#For sets: Which type of plot to generate
which_plot="mean"
#Should track and shower be seperated for the 2d hist plot
seperate_track_shower=True
#Path of where to save the plots. The histogram and the MRE plot will get
#different endings appended. None for auto generate.
save_as_base=None
#------------------------------Special single files---------------------
if tag == "energy_12_enc":
model_path = "vgg_3/trained_vgg_3_autoencoder_epoch8_supervised_energy_broken12_epoch48.h5"
dataset_tag="xzt"
elif tag == "energy_15_enc_sim":
model_path = "vgg_5_64-broken15/trained_vgg_5_64-broken15_autoencoder_epoch83_supervised_energynodrop_epoch67.h5"
dataset_tag="xzt"
seperate_track_shower=False
energy_bins_2d=np.arange(3,20,0.5)
save_as_base = home_path+"results/plots/energy_evaluation/broken15_on_normal"
elif tag == "energy_15_enc_meas":
model_path = "vgg_5_64-broken15/trained_vgg_5_64-broken15_autoencoder_epoch83_supervised_energynodrop_epoch67.h5"
dataset_tag="xzt_broken15"
seperate_track_shower=False
energy_bins_2d=np.arange(3,20,0.5)
save_as_base = home_path+"results/plots/energy_evaluation/broken15_on_broken15"
elif tag == "energy_15_unf_sim":
model_path = "vgg_5_2000/trained_vgg_5_2000_supervised_energy_epoch17.h5"
dataset_tag="xzt"
seperate_track_shower=False
energy_bins_2d=np.arange(3,20,0.5)
save_as_base = home_path+"results/plots/energy_evaluation/broken15_unf_on_normal"
elif tag == "energy_15_unf_meas":
model_path = "vgg_5_2000/trained_vgg_5_2000_supervised_energy_epoch17.h5"
dataset_tag="xzt_broken15"
seperate_track_shower=False
energy_bins_2d=np.arange(3,20,0.5)
save_as_base = home_path+"results/plots/energy_evaluation/broken15_unf_on_broken15"
#------------------------------Sets for mae comparison---------------------
elif tag == "2000":
tags = ["2000_unf_E", "2000_unf_mse_E"]
label_array = ["With MAE", "With MSE"]
save_plot_as = home_path+"results/plots/energy_evaluation/mae_compare_set_"+tag+"_"+which_plot+".pdf"
is_a_set=True
elif tag == "bottleneck":
tags = ["2000_unf_E", "200_linear_E"]
label_array = ["Unfrozen 2000", "Encoder 200"]
which_plot="mean"
save_plot_as = home_path+"results/plots/energy_evaluation/mae_compare_set_"+tag+"_"+which_plot+".pdf"
is_a_set=True
#-----------------------Bottleneck----------------------
elif tag == "compare_2000":
#title_of_plot='Performance comparison of the 1920 encoder network and the supervised one'
tags = ["2000_unf_E",
"vgg_3_2000_E_nodrop"]
label_array = ["Unfrozen", "Encoder"]
save_plot_as = home_path+"results/plots/energy_evaluation/mae_compare_set_"+tag+"_"+which_plot+".pdf"
is_a_set=True
elif tag == "compare_600":
tags = ["vgg_5_600_picture_E_nodrop",
"vgg_5_600_morefilter_E_nodrop"]
label_array = ["Picture", "More filter"]
save_plot_as = home_path+"results/plots/energy_evaluation/mae_compare_set_"+tag+"_"+which_plot+".pdf"
is_a_set=True
#title_of_plot='Accuracy of encoders with bottleneck 600'
elif tag=="compare_200":
tags = ["vgg_5_200_E_nodrop",
"vgg_5_200_dense_E_nodrop"]
label_array=["Standard", "Dense"]
save_plot_as = home_path+"results/plots/energy_evaluation/mae_compare_set_"+tag+"_"+which_plot+".pdf"
#title_of_plot='Accuracy of encoders with bottleneck 200'
is_a_set=True
elif tag == "compare_best":
#title_of_plot='Performance comparison of the 200 dense encoder network and the supervised one'
tags = ["2000_unf_E",
"vgg_5_200_dense_E_nodrop"]
label_array = ["Supervised", "Model 200-dense"]
save_plot_as = home_path+"results/plots/energy_evaluation/mae_compare_set_"+tag+"_"+which_plot+".pdf"
is_a_set=True
#--------------------- 200 size varaition --------------------
elif tag=="compare_200_bigger":
tags = ["vgg_5_200_E_nodrop",
"vgg_5_200_large_E_nodrop",
"vgg_5_200_deep_E_nodrop"]
label_array=["Standard", "Wider", "Deeper"]
which_plot="mean"
is_a_set=True
save_plot_as = home_path+"results/plots/energy_evaluation/mae_compare_set_"+tag+"_"+which_plot+".pdf"
elif tag=="compare_200_smaller":
tags = ["vgg_5_200_E_nodrop",
"vgg_5_200_small_E_nodrop",
"vgg_5_200_shallow_E_nodrop"]
label_array=["Standard", "Smaller", "Shallower"]
which_plot="mean"
is_a_set=True
save_plot_as = home_path+"results/plots/energy_evaluation/mae_compare_set_"+tag+"_"+which_plot+".pdf"
#Der rest von evaluation.py sollte hier auch rein, z.B. 64,...
#-------------------------------------------------------
else:
try:
#Read in the saved name from saved_setups_for_plot_statistics
model_path = get_path_best_epoch(tag, full_path=False)
except NameError:
print("Input is not a known tag. Opening as model instead.")
if type(tag) != int:
model_path = tag
save_as_base = home_path+"results/plots/energy_evaluation/"+model_path.split("trained_")[1][:-3]
return [model_path, dataset_tag, zero_center, energy_bins_2d, energy_bins_1d], save_as_base
else:
raise ValueError
if is_a_set:
return [tags, label_array, which_plot], save_plot_as, seperate_track_shower
else:
print("Working on model", model_path)
#Where to save the plots to
if save_as_base is None:
save_as_base = home_path+"results/plots/energy_evaluation/"+model_path.split("trained_")[1][:-3]
if apply_precuts:
save_as_base+="_precut"
dataset_tag="xzt_precut"
model_path=home_path+"models/"+model_path
return ([model_path, dataset_tag, zero_center, energy_bins_2d, energy_bins_1d],
save_as_base, seperate_track_shower)
def get_dump_name_arr(model_path, dataset_tag):
#Returns the name and path of the energy correct array dump file
modelname = model_path.split("trained_")[1][:-3]
dump_path="/home/woody/capn/mppi013h/Km3-Autoencoder/results/data/"
name_of_arr = dump_path + "energy_" + modelname + "_" + dataset_tag + "_arr_correct.npy"
return name_of_arr
def make_or_load_hist_data(model_path, dataset_tag, zero_center, energy_bins_2d, energy_bins_1d, samples=None, include_mae_single=False):
#Compares the predicted energy and the mc energy of many events in a 2d histogram
#This function outputs a np array with the 2d hist data,
#either by loading a saved arr_energy_correct, or by generating a new one
#Also outputs the 1d histogram of mc energy over mae.
#name of the files that the hist data will get dumped to (or loaded from)
name_of_arr = get_dump_name_arr(model_path, dataset_tag)
if os.path.isfile(name_of_arr)==True:
print("Loading existing file of correct array", name_of_arr)
arr_energy_correct = np.load(name_of_arr)
#Print infos about the evaluation performance like Median, Variance,...
__ = make_energy_evaluation_statistics(arr_energy_correct)
else:
print("No saved correct array for this model found. New one will be generated.\nGenerating energy array...")
dataset_info_dict = get_dataset_info(dataset_tag)
arr_energy_correct = setup_and_make_energy_arr_energy_correct(model_path, dataset_info_dict, zero_center, samples)
print("Saving as", name_of_arr)
np.save(name_of_arr, arr_energy_correct)
print("Generating 2d histogram...")
hist_data_2d = calculate_2d_hist_data(arr_energy_correct, energy_bins_2d)
print("Done.")
print("Generating mae histogramm...")
energy_mae_plot_data = calculate_energy_mae_plot_data(arr_energy_correct, energy_bins_1d,
include_single=include_mae_single)
print("Done.")
return (hist_data_2d, energy_mae_plot_data)
def save_and_show_plots(tag, apply_precuts=False, show_plot=True):
#Main function. Generate or load the data for the plots, and make them.
(input_for_make_hist_data, save_as_base,
seperate_track_shower) = get_saved_plots_info(tag, apply_precuts)
#Can also compare multiple already generated mae plots
if len(input_for_make_hist_data)==3:
#Is a set: Compare existing mae plots
save_plot_as = save_as_base
fig_compare = compare_plots(*input_for_make_hist_data)
if save_plot_as != None:
print("Saving plot as", save_plot_as)
fig_compare.savefig(save_plot_as)
print("Done")
if show_plot:
plt.show(fig_compare)
else:
#Do the standard energy evaluation, i.e. calculate the energy array,
#save it and make the plots
save_as_2d = save_as_base+"_2dhist_plot.pdf"
save_as_1d = save_as_base+"_mae_plot.pdf"
hist_data_2d, energy_mae_plot_data = make_or_load_hist_data(*input_for_make_hist_data, samples=samples)
print("Generating hist2d plot...")
fig_hist2d = make_2d_hist_plot(hist_data_2d, seperate_track_shower=seperate_track_shower)
if show_plot:
plt.show(fig_hist2d)
if save_as_2d != None:
print("Saving plot as", save_as_2d)
fig_hist2d.savefig(save_as_2d)
print("Done.")
print("Generating mae plot...")
fig_mae = make_energy_mae_plot_mean_only([energy_mae_plot_data,])
if show_plot:
plt.show(fig_mae)
if save_as_1d != None:
print("Saving plot as", save_as_1d)
fig_mae.savefig(save_as_1d)
print("Done.")
def compare_plots(tags, label_array, which_plot, apply_precuts=False):
"""
Plot several saved mae data files and plot them in a single figure.
"""
mae_plot_data_list = []
print("Loading the saved files of the following models:")
for tag in tags:
(input_for_make_hist_data, save_as_base,
seperate_track_shower) = get_saved_plots_info(tag, apply_precuts)
hist_data_2d, mae_plot_data = make_or_load_hist_data(*input_for_make_hist_data)
mae_plot_data_list.append(mae_plot_data)
print("Done. Generating plot...")
if which_plot=="mean_variance":
fig_mae = make_energy_mae_plot(mae_plot_data_list, label_list=label_array)
elif which_plot=="mean":
fig_mae = make_energy_mae_plot_mean_only(mae_plot_data_list, label_list=label_array)
return fig_mae
if __name__=="__main__":
params = parse_input()
tag = params["model"]
#Should precuts be applied to the data; if so, the plot will be saved
#with a "_precut" added to the file name
apply_precuts = params["apply_precuts"]
#only go through parts of the file (for testing)
samples=None
if tag=="all_energy":
print("Making evaluation of all best models...")
plot_tag=101
while True:
save_and_show_plots(plot_tag, apply_precuts, show_plot=False)
plot_tag+=1
else:
save_and_show_plots(tag, apply_precuts)
|
|
import inspect
import random
from myriad.world.base import Tile
class GeographicalFeature(Tile):
desc = ""
allowedTransitions = []
allowedTransforms = {
"increased temperature": [],
"decreased temperature": [],
}
canHaveTown = True
isPassable = True
# human adult walking speed of 5 km/hr (83 m/min on flat, unencumbered
# ground is taken as the unit speed.
traversalMultiplier = 1.0
class Terrain(GeographicalFeature):
pass
class WaterBody(GeographicalFeature):
canHaveTown = False
# comparing 5 km/hr (83 m/min) average walking time to average, casual
# swimming speed of 3 km/hr (50 m/min)
traversalMultiplier = 0.6
class Plains(Terrain):
desc = "You are surrounded by grassy plains."
pervasiveness = 0.8
class Savanna(Plains):
desc = ("You are surrounded by grassland mixed with undergrowth and "
"occasional trees.")
class Woodlands(Savanna):
desc = "You are in an area of scattered woods and occasional clearings."
traversalMultiplier = 0.6
class Forest(Woodlands):
desc = "You are surrounded by trees."
traversalMultiplier = 0.4
class Jungle(Forest):
desc = "Wild vegetation lays before you, daunting in its thickness."
traversalMultiplier = 0.1
class SandyGround(Plains):
desc = "You are surrounded by sandy ground."
traversalMultiplier = 0.6
pervasiveness = 0.3
class RockyGround(Plains):
desc = "You are surrounded by rocky ground."
traversalMultiplier = 0.8
pervasiveness = 0.3
class Hills(RockyGround):
desc = "You have entered a hilly area."
traversalMultiplier = 0.6
pervasiveness = 0.5
class Cliffs(Hills):
desc = "You face a wall of stone, a cliff too hight to pass unaided."
isPassable = False
pervasiveness = 0.3
class Caves(Hills):
desc = "You are in an area with caves in the hills."
traversalMultiplier = 0.5
pervasiveness = 0.2
class Mountains(Hills):
desc = "You are in the mountains"
traversalMultiplier = 0.4
pervasiveness = 0.4
class AlpineTreeline(Mountains):
desc = ("You are in the mountains, below the treeline. You see the "
"occasional Krummholz formation.")
class HighPeaks(Mountains):
desc = ("The wind is blowing hard, and you have a difficult time "
"breathing. You are very high up in the mountains, among "
"the highest peaks.")
traversalMultiplier = 0.1
isPassable = False
pervasiveness = 0.2
class HighPlateau(Mountains):
desc = ("The wind is blowing hard, and you have a difficult time "
"breathing. You are very high up in the mountains, on a "
"large, moderately flat plateau. It is a forbidding environment.")
traversalMultiplier = 0.7
pervasiveness = 0.1
class Valley(Plains):
desc = "Nestled between the slopes, you are standing in a valley."
pervasiveness = 0.3
class Ravine(RockyGround):
desc = ("You've managed to get yourself into a ravine. Let's see if you "
"can get yourself out.")
traversalMultiplier = 0.6
isPassable = True
pervasiveness = 0.3
class Canyon(Ravine):
desc = ("You are now in a canyon. A most unenviable position for a "
"traveller to be in.")
isPassable = False
pervasiveness = 0.3
class Desert(SandyGround):
desc = ("You have entered the desert. You wonder how long your water "
"will last if you have to keep this up...")
traversalMultiplier = 0.4
isPassable = False
pervasiveness = 0.7
class Buttes(Desert, Hills):
desc = "Several stunning buttes are within view."
pervasiveness = 0.1
class Tundra(Desert):
desc = "As far as you can see in that direction is a frozen wasteland."
class Shoreline(RockyGround):
desc = ("You are at the water's edge. The rocky shore might be "
"uncomfortable in barefeet.")
pervasiveness = 0.4
class Beach(SandyGround):
desc = ("You are on a beach. This would be a lovely place for a vacation. "
"If you knew what vacations were.")
pervasiveness = 0.3
class Stream(WaterBody):
desc = "You are up to your needs in a stream."
pervasiveness = 0.9
class River(Stream):
desc = "For some reason, you've decided to take a swim in a river."
isPassable = False
class Lake(WaterBody):
desc = "You are currently in a lake."
isPassable = False
pervasiveness = 0.8
class Ocean(WaterBody):
desc = "You have entered the ocean."
isPassable = False
pervasiveness = 0.9
# for procedural generation of tile layouts, valid transitions from one tile
# type to another have to be defined.
transitions = {
Plains: [Plains, SandyGround, RockyGround, Hills, Valley, Desert, Beach,
Canyon, River, Lake, Ocean, Jungle],
Woodlands: [Plains, SandyGround, RockyGround, Hills, Valley, Beach,
Woodlands, Forest, Jungle],
Hills: [Plains, SandyGround, RockyGround, Hills, Mountains, Canyon, River,
Lake],
Mountains: [Hills, Mountains, HighPlateau, HighPeaks, Valley,
AlpineTreeline],
AlpineTreeline: [Mountains, HighPlateau, HighPeaks],
HighPlateau: [Mountains, HighPlateau, HighPeaks, AlpineTreeline],
Valley: [Plains, SandyGround, RockyGround, Hills, Mountains, River, Lake],
Ravine: [Plains, SandyGround, RockyGround, Hills, Ravine, Canyon, River],
Desert: [Plains, SandyGround, RockyGround, Ravine, Desert, Beach, Canyon,
River, Lake, Ocean],
Stream: [Shoreline, River, Stream, Lake],
Lake: [Plains, SandyGround, RockyGround, Hills, Valley, Desert, Beach,
River, Lake],
Ocean: [Shoreline, River, Beach],
Shoreline: [Ocean, Lake, River, Stream, Shoreline, Beach],
}
transitions[Lake] = transitions[Stream]
transitions[River] = transitions[Stream] + [Beach]
transitions[SandyGround] = transitions[Plains]
transitions[Savanna] = transitions[Plains] + [Savanna, Woodlands]
transitions[Forest] = transitions[Woodlands]
transitions[Jungle] = transitions[Woodlands]
transitions[RockyGround] = transitions[Plains]
transitions[HighPeaks] = transitions[HighPlateau]
transitions[Canyon] = transitions[Ravine]
transitions[Buttes] = transitions[Desert]
transitions[Beach] = transitions[Plains]
# some terrain types require
requires = {
# each valley tile should be connected to at least two mountains (on each
# side)
Valley: [],
# each river tile should be connected directly to two other river tiles
River: [],
# each mountain should have a very high likelihood of having hills as
# neighbors
Mountains: [],
# a beach must have water at least one one side
Beach: [],
# A river should be *very* pervasive, but in only distinct directions
# (primarily usually only two neighbors will have river tiles... sometimes
# a river will branch, in which case there might be three neigbors (non
# touching, in that case). Also, where rivers meet oceans, an adjoining
# tile should be another river tile, to help comprise a river delta.
River: [],
}
# the transformation of one terrain type into another would be something that
# occurred with a permanent change (local or global) in temperature (e.g., a
# river turning into a dry ravine).
transforms ={}
def getTileClasses():
from myriad.world import terrain
classes = inspect.getmembers(terrain, inspect.isclass)
terrainClasses = [klass for name, klass in classes
if issubclass(klass, terrain.Terrain)
and klass is not terrain.Terrain]
waterClasses = [klass for name, klass in classes
if issubclass(klass, terrain.WaterBody)
and klass is not terrain.WaterBody]
return terrainClasses + waterClasses
def getRandomTileClass():
# XXX this needs to use the session randomizer... need to move this
return random.choice(getTileClasses())
def getPassableRandomTileClass():
tile = getRandomTileClass()
while not tile.isPassable:
tile = getRandomTileClass()
return tile
def getRandomTileTransitionClass(tile, neighborTiles):
# build sets of all valid transitions for each neighbor, then to find a
# transition that's valid for them all simultaneously, do an intersection
if not neighborTiles:
intersections = transitions[tile]
elif len(neighborTiles) == 1:
intersections = neighborTiles
else:
intersections = []
for index, tile in enumerate(neighborTiles):
if index == len(neighborTiles) - 1:
break
thisTransition = transitions[tile]
nextTransition = transitions[neighborTiles[index + 1]]
intersections.extend(
list(set(thisTransition).intersection(nextTransition)))
# remove redundancies
intersections = list(set(intersections))
# the higher tendency the tile is to be pervasive, the more likely the
# tile will continue being used; if the same tile is not pervaded, randomly
# select a valid transition tile from the intersections
# XXX this needs to use the session randomizer... need to move this
reuseCheck = random.random()
if tile in intersections and reuseCheck < tile.pervasiveness:
return tile
# XXX this needs to use the session randomizer... need to move this
return random.choice(intersections)
|
|
# Copyright 2020 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import itertools
# Dependency imports
from absl.testing import parameterized
import numpy as np
import tensorflow.compat.v2 as tf
import tensorflow_probability as tfp
from tensorflow_probability.python.internal import test_util
tfd = tfp.distributions
tfb = tfp.bijectors
@test_util.test_all_tf_execution_regimes
class OrderedLogisticTest(test_util.TestCase):
def _random_cutpoints(self, shape):
return self._ordered.forward(self._rng.randn(*shape))
def _random_location(self, shape):
return self._rng.randn(*shape)
def setUp(self):
self._ordered = tfb.Ascending()
self._rng = test_util.test_np_rng()
super(OrderedLogisticTest, self).setUp()
@parameterized.parameters(
itertools.product(['cutpoints', 'loc', 'both'], [[], [1], [1, 2, 3]])
)
def testBatchShapes(self, test, batch_shape):
if test == 'cutpoints':
cutpoints = self._random_cutpoints(batch_shape + [2])
loc = tf.constant(0., dtype=tf.float64)
elif test == 'loc':
cutpoints = tf.constant([1., 2.], dtype=tf.float64)
loc = self._random_location(batch_shape)
elif test == 'both':
cutpoints = self._random_cutpoints(batch_shape + [2])
loc = self._random_location(batch_shape)
dist = tfd.OrderedLogistic(cutpoints=cutpoints, loc=loc)
self.assertAllEqual(dist.batch_shape, batch_shape)
self.assertAllEqual(
self.evaluate(dist.batch_shape_tensor()), batch_shape)
self.assertAllEqual(dist.event_shape, [])
self.assertAllEqual(self.evaluate(dist.event_shape_tensor()), [])
categorical_probs = dist.categorical_probs()
categorical_probs_shape = tf.shape(categorical_probs)
self.assertAllEqual(
self.evaluate(categorical_probs_shape), batch_shape + [3])
sample = dist.sample(seed=test_util.test_seed())
sample_shape = tf.shape(sample)
self.assertAllEqual(self.evaluate(sample_shape), batch_shape)
prob_sample_shape = tf.shape(dist.prob(sample))
survival_prob_sample_shape = tf.shape(dist.survival_function(sample))
self.assertAllEqual(self.evaluate(prob_sample_shape), batch_shape)
self.assertAllEqual(self.evaluate(survival_prob_sample_shape), batch_shape)
n = [4, 5]
sample_n = dist.sample(n, seed=test_util.test_seed())
sample_n_shape = tf.shape(sample_n)
self.assertAllEqual(self.evaluate(sample_n_shape), n + batch_shape)
prob_sample_n_shape = tf.shape(dist.prob(sample_n))
survival_prob_sample_n_shape = tf.shape(dist.survival_function(sample_n))
self.assertAllEqual(self.evaluate(prob_sample_n_shape), n + batch_shape)
self.assertAllEqual(
self.evaluate(survival_prob_sample_n_shape), n + batch_shape)
def testProbs(self):
# survival functions
# P(Y > 0) = sigmoid(1) = 0.7310586
# P(Y > 1) = sigmoid(0) = 0.5
# P(Y > 2) = sigmoid(-1) = 0.26894143
# probs
# P(Y = 0) = 1. - sigmoid(1) = 0.2689414
# P(Y = 1) = sigmoid(1) - sigmoid(0) = 0.2310586
# P(Y = 2) = sigmoid(0) - sigmoid(-1) = 0.23105857
# P(Y = 3) = sigmoid(-1) = 0.26894143
expected_probs = [0.2689414, 0.2310586, 0.23105857, 0.26894143]
expected_survival_probs = 1. - np.cumsum(expected_probs)
dist = tfd.OrderedLogistic(cutpoints=[-1., 0., 1.], loc=0.)
categorical_probs = self.evaluate(dist.categorical_probs())
self.assertAllClose(expected_probs, categorical_probs, atol=1e-6)
probs = np.flip(self.evaluate(dist.prob([3, 2, 1, 0])))
self.assertAllClose(expected_probs, probs, atol=1e-6)
survival_probs = self.evaluate(dist.survival_function([0, 1, 2, 3]))
self.assertAllClose(expected_survival_probs, survival_probs, atol=1e-6)
zero_probs = self.evaluate(dist.prob([-1, 4]))
self.assertAllClose([0., 0.], zero_probs, atol=1e-6)
out_of_bounds_survival_probs = self.evaluate(
dist.survival_function([-2, -1, 4, 5]))
self.assertAllClose(
[1., 1., 0., 0.], out_of_bounds_survival_probs, atol=1e-6)
def testMode(self):
# 2 cutpoints i.e. 3 possible outcomes. 3 "batched" distributions with the
# logistic distribution location well within the large cutpoint spacing so
# mode is obvious
dist = tfd.OrderedLogistic(cutpoints=[-10., 10.], loc=[-20., 0., 20.])
mode = self.evaluate(dist.mode())
self.assertAllEqual([0, 1, 2], mode)
def testSample(self):
# as per `testProbs`
dist = tfd.OrderedLogistic(cutpoints=[-1., 0., 1.], loc=0.)
samples = self.evaluate(dist.sample(int(1e5), seed=test_util.test_seed()))
expected_probs = [0.2689414, 0.2310586, 0.23105857, 0.26894143]
for k, p in enumerate(expected_probs):
self.assertAllClose(np.mean(samples == k), p, atol=0.01)
def testEntropyAgainstCategoricalDistribution(self):
cutpoints = self._random_cutpoints([3])
loc = self._random_location([2])
dist = tfd.OrderedLogistic(cutpoints=cutpoints, loc=loc)
categorical_dist = tfd.Categorical(dist.categorical_log_probs())
expected_entropy = self.evaluate(categorical_dist.entropy())
entropy = self.evaluate(dist.entropy())
self.assertAllClose(expected_entropy, entropy)
def testEntropyAgainstSampling(self):
cutpoints = self._random_cutpoints([4])
loc = self._random_location([])
dist = tfd.OrderedLogistic(cutpoints=cutpoints, loc=loc)
samples = dist.sample(int(1e5), seed=test_util.test_seed())
sampled_entropy = self.evaluate(-tf.reduce_mean(dist.log_prob(samples)))
entropy = self.evaluate(dist.entropy())
self.assertAllClose(sampled_entropy, entropy, atol=0.01)
@parameterized.parameters(1, 10, 25)
def testKLAgainstCategoricalDistribution(self, batch_size):
cutpoints = self._random_cutpoints([100])
a_loc = self._random_location([batch_size])
b_loc = self._random_location([batch_size])
a = tfd.OrderedLogistic(
cutpoints=cutpoints, loc=a_loc, validate_args=True)
b = tfd.OrderedLogistic(
cutpoints=cutpoints, loc=b_loc, validate_args=True)
a_cat = tfd.Categorical(
logits=a.categorical_log_probs(), validate_args=True)
b_cat = tfd.Categorical(
logits=b.categorical_log_probs(), validate_args=True)
kl = self.evaluate(tfd.kl_divergence(a, b))
self.assertEqual(kl.shape, (batch_size,))
kl_expected = self.evaluate(tfd.kl_divergence(a_cat, b_cat))
self.assertAllClose(kl, kl_expected)
kl_same = self.evaluate(tfd.kl_divergence(a, a))
self.assertAllClose(kl_same, np.zeros_like(kl_expected))
def testKLAgainstSampling(self):
a_cutpoints = self._random_cutpoints([4])
b_cutpoints = self._random_cutpoints([4])
loc = self._random_location([])
a = tfd.OrderedLogistic(cutpoints=a_cutpoints, loc=loc)
b = tfd.OrderedLogistic(cutpoints=b_cutpoints, loc=loc)
samples = a.sample(int(1e5), seed=test_util.test_seed())
sampled_kl = self.evaluate(
tf.reduce_mean(a.log_prob(samples) - b.log_prob(samples)))
kl = self.evaluate(tfd.kl_divergence(a, b))
self.assertAllClose(sampled_kl, kl, atol=2e-2)
def testLatentLogistic(self):
loc = self._random_location([2])
cutpoints = self._random_cutpoints([2])
latent = tfd.Logistic(loc=loc, scale=1.)
ordered = tfd.OrderedLogistic(cutpoints=cutpoints, loc=loc)
ordered_cdf = self.evaluate(ordered.cdf([0, 1]))
latent_cdf = self.evaluate(latent.cdf(cutpoints))
self.assertAllClose(ordered_cdf, latent_cdf)
def testUnorderedCutpointsFails(self):
with self.assertRaisesRegexp(
ValueError, 'Argument `cutpoints` must be non-decreasing.'):
dist = tfd.OrderedLogistic(
cutpoints=[1., 0.9], loc=0.0, validate_args=True)
self.evaluate(dist.mode())
if __name__ == '__main__':
test_util.main()
|
|
"""
Functions for converting spherical harmonic coefficients to a different
normalization conventions.
"""
import numpy as _np
import warnings as _warnings
from scipy.special import factorial as _factorial
def convert(coeffs_in, normalization_in=None, normalization_out=None,
csphase_in=None, csphase_out=None, lmax=None):
"""
Convert an array of spherical harmonic coefficients to a different
normalization convention.
Usage
-----
coeffs_out = convert(coeffs_in, [normalization_in, normalization_out,
csphase_in, csphase_out, lmax])
Returns
-------
coeffs_out : ndarray, size (2, lmax+1, lmax+1)
An array of spherical harmonic coefficients with the new
normalization convention.
Parameters
----------
coeffs_in : ndarray
The array of imput spherical harmonic coefficients.
normalization_in : str, optional, default = None
Normalization of the output coefficients: '4pi', 'ortho'
'schmidt', or 'unnorm', for geodesy 4pi normalized,
orthonormalized, Schmidt semi-normalized, or unnormalized
coefficients, respectively.
normalization_out : str, optional, default = None
Normalization of the output coefficients: '4pi', 'ortho'
'schmidt', or 'unnorm', for geodesy 4pi normalized,
orthonormalized, Schmidt semi-normalized, or unnormalized
coefficients, respectively.
csphase_in : int, optional, default = None
Condon-Shortley phase convention of the input coefficients: 1 to
exclude the phase factor, or -1 to include it.
csphase_out : int, optional, default = None
Condon-Shortley phase convention of the output coefficients: 1 to
exclude the phase factor, or -1 to include it.
lmax : int, optional, default = coeffs.shape[1] - 1
Maximum spherical harmonic degree to output. If lmax is larger than
that of the input coefficients, the output array will be zero
padded.
Notes
-----
This routine will convert an array of spherical harmonic coefficients
to a different normalization convention and different Condon-Shortley
phase convention. Optionally, a different maximum spherical harmonic
degree can be specified. If this degree is smaller than that of the
input coefficients, the input coefficients will be truncated. If this
degree is larger than the input coefficients, then the output
coefficients will be zero padded.
"""
# check argument consistency
if normalization_in is not None:
if type(normalization_in) != str:
raise ValueError('normalization_in must be a string. ' +
'Input type was {:s}'
.format(str(type(normalization_in))))
if normalization_in.lower() not in ('4pi', 'ortho', 'schmidt',
'unnorm'):
raise ValueError(
"normalization_in must be '4pi', 'ortho', 'schmidt', or " +
"'unnorm'. Provided value was {:s}"
.format(repr(normalization_in))
)
if normalization_out is None:
raise ValueError("normalization_in and normalization_out " +
"must both be specified.")
if normalization_out is not None:
if type(normalization_out) != str:
raise ValueError('normalization_out must be a string. ' +
'Input type was {:s}'
.format(str(type(normalization_out))))
if normalization_out.lower() not in ('4pi', 'ortho', 'schmidt',
'unnorm'):
raise ValueError(
"normalization_out must be '4pi', 'ortho', 'schmidt', or" +
" 'unnorm'. Provided value was {:s}"
.format(repr(normalization_out))
)
if normalization_in is None:
raise ValueError("normalization_in and normalization_out " +
"must both be specified.")
if csphase_in is not None:
if csphase_in != 1 and csphase_in != -1:
raise ValueError(
"csphase_in must be 1 or -1. Input value was {:s}"
.format(repr(csphase_in)))
if csphase_out is None:
raise ValueError("csphase_in and csphase_out must both be " +
"specified.")
if csphase_out is not None:
if csphase_out != 1 and csphase_out != -1:
raise ValueError(
"csphase_out must be 1 or -1. Input value was {:s}"
.format(repr(csphase_out)))
if csphase_in is None:
raise ValueError("csphase_in and csphase_out must both be " +
"specified.")
lmaxin = coeffs_in.shape[1] - 1
if lmax is None:
lmaxout = lmaxin
else:
lmaxout = lmax
lconv = min(lmaxin, lmaxout)
if ((normalization_in == 'unnorm' or normalization_out ==
'unnorm') and lconv > 85):
_warnings.warn("Conversions with unnormalized coefficients are " +
"stable only for degrees less than or equal to " +
"85. lmax of the output coefficients will be " +
"truncated after degree 85. The spherical " +
"harmonic degree of the input coefficients was " +
"{:d}.".format(lmaxin), category=RuntimeWarning)
lconv = 85
degrees = _np.arange(lconv + 1)
if _np.iscomplexobj(coeffs_in):
coeffs = _np.zeros((2, lmaxout+1, lmaxout+1), dtype=complex)
else:
coeffs = _np.zeros((2, lmaxout+1, lmaxout+1))
coeffs[:, :lconv+1, :lconv+1] = coeffs_in[:, :lconv+1, :lconv+1]
if normalization_in == normalization_out:
pass
elif normalization_in == '4pi' and normalization_out == 'schmidt':
for l in degrees:
coeffs[:, l, :l+1] *= _np.sqrt(2. * l + 1.)
elif normalization_in == '4pi' and normalization_out == 'ortho':
coeffs *= _np.sqrt(4. * _np.pi)
elif normalization_in == '4pi' and normalization_out == 'unnorm':
for l in degrees:
ms = _np.arange(l+1)
conv = (2. * l + 1.) * _factorial(l-ms) / _factorial(l+ms)
if not _np.iscomplexobj(coeffs):
conv[1:] *= 2.
coeffs[:, l, :l+1] *= _np.sqrt(conv)
elif normalization_in == 'schmidt' and normalization_out == '4pi':
for l in degrees:
coeffs[:, l, :l+1] /= _np.sqrt(2. * l + 1.)
elif normalization_in == 'schmidt' and normalization_out == 'ortho':
for l in degrees:
coeffs[:, l, :l+1] *= _np.sqrt(4. * _np.pi / (2. * l + 1.))
elif normalization_in == 'schmidt' and normalization_out == 'unnorm':
for l in degrees:
ms = _np.arange(l+1)
conv = _factorial(l-ms) / _factorial(l+ms)
if not _np.iscomplexobj(coeffs):
conv[1:] *= 2.
coeffs[:, l, :l+1] *= _np.sqrt(conv)
elif normalization_in == 'ortho' and normalization_out == '4pi':
coeffs /= _np.sqrt(4. * _np.pi)
elif normalization_in == 'ortho' and normalization_out == 'schmidt':
for l in degrees:
coeffs[:, l, :l+1] *= _np.sqrt((2. * l + 1.) / (4. * _np.pi))
elif normalization_in == 'ortho' and normalization_out == 'unnorm':
for l in degrees:
ms = _np.arange(l+1)
conv = (2. * l + 1.) * _factorial(l-ms) \
/ 4. / _np.pi / _factorial(l+ms)
if not _np.iscomplexobj(coeffs):
conv[1:] *= 2.
coeffs[:, l, :l+1] *= _np.sqrt(conv)
elif normalization_in == 'unnorm' and normalization_out == '4pi':
for l in degrees:
ms = _np.arange(l+1)
conv = _factorial(l+ms) / (2. * l + 1.) / _factorial(l-ms)
if not _np.iscomplexobj(coeffs):
conv[1:] /= 2.
coeffs[:, l, :l+1] *= _np.sqrt(conv)
elif normalization_in == 'unnorm' and normalization_out == 'schmidt':
for l in degrees:
ms = _np.arange(l+1)
conv = _factorial(l+ms) / _factorial(l-ms)
if not _np.iscomplexobj(coeffs):
conv[1:] /= 2.
coeffs[:, l, :l+1] *= _np.sqrt(conv)
elif normalization_in == 'unnorm' and normalization_out == 'ortho':
for l in degrees:
ms = _np.arange(l+1)
conv = 4. * _np.pi * _factorial(l+ms) / (2. * l + 1.) / \
_factorial(l-ms)
if not _np.iscomplexobj(coeffs):
conv[1:] /= 2.
coeffs[:, l, :l+1] *= _np.sqrt(conv)
if csphase_in != csphase_out:
for m in degrees:
if m % 2 == 1:
coeffs[:, m:lconv+1, m] = - coeffs[:, m:lconv+1, m]
return coeffs
|
|
# coding: spec
from bespin.option_spec import stack_specs as specs, stack_objs as objs
from bespin.option_spec import artifact_objs
from bespin.errors import BadSpecValue
from tests.helpers import BespinCase
from noseOfYeti.tokeniser.support import noy_sup_setUp
from input_algorithms.meta import Meta
from option_merge import MergedOptions
import mock
describe BespinCase, "Var spec":
before_each:
self.meta = mock.Mock(name="meta", spec=Meta)
it "creates a Static variable if only one item is given":
self.assertEqual(specs.var_spec().normalise(self.meta, 1), objs.StaticVariable("1"))
self.assertEqual(specs.var_spec().normalise(self.meta, "1"), objs.StaticVariable("1"))
self.assertEqual(specs.var_spec().normalise(self.meta, ["1"]), objs.StaticVariable("1"))
it "creates a Dynamic variable if only one item is given":
stack = self.unique_val()
output = self.unique_val()
bespin = self.unique_val()
self.meta.everything = MergedOptions.using({"bespin": bespin})
self.assertEqual(specs.var_spec().normalise(self.meta, [stack, output]), objs.DynamicVariable(stack, output, bespin))
describe BespinCase, "artifact_path_spec":
before_each:
self.meta = mock.Mock(name="artifact_path_spec")
it "creates an artifact_path from the two items":
host_path = self.unique_val()
artifact_path = self.unique_val()
self.assertEqual(specs.artifact_path_spec().normalise(self.meta, [host_path, artifact_path]), artifact_objs.ArtifactPath(host_path, artifact_path))
describe BespinCase, "Env spec":
before_each:
self.meta = mock.Mock(name="meta", spec=Meta)
self.env_name = self.unique_val()
self.fallback_val = self.unique_val()
it "takes in just the env_name":
assert ":" not in self.env_name
assert "=" not in self.env_name
made = specs.env_spec().normalise(self.meta, self.env_name)
self.assertEqual(made.env_name, self.env_name)
self.assertEqual(made.set_val, None)
self.assertEqual(made.default_val, None)
it "takes in env as a list with 1 item":
assert ":" not in self.env_name
assert "=" not in self.env_name
made = specs.env_spec().normalise(self.meta, [self.env_name])
self.assertEqual(made.env_name, self.env_name)
self.assertEqual(made.set_val, None)
self.assertEqual(made.default_val, None)
it "takes in env as a list with 2 items":
assert ":" not in self.env_name
assert "=" not in self.env_name
made = specs.env_spec().normalise(self.meta, [self.env_name, self.fallback_val])
self.assertEqual(made.env_name, self.env_name)
self.assertEqual(made.set_val, None)
self.assertEqual(made.default_val, self.fallback_val)
it "takes in env with blank default if suffixed with a colon":
made = specs.env_spec().normalise(self.meta, self.env_name + ":")
self.assertEqual(made.env_name, self.env_name)
self.assertEqual(made.set_val, None)
self.assertEqual(made.default_val, "")
it "takes in env with blank set if suffixed with an equals sign":
made = specs.env_spec().normalise(self.meta, self.env_name + "=")
self.assertEqual(made.env_name, self.env_name)
self.assertEqual(made.set_val, "")
self.assertEqual(made.default_val, None)
it "takes in default value if seperated by a colon":
made = specs.env_spec().normalise(self.meta, self.env_name + ":" + self.fallback_val)
self.assertEqual(made.env_name, self.env_name)
self.assertEqual(made.set_val, None)
self.assertEqual(made.default_val, self.fallback_val)
it "takes in set value if seperated by an equals sign":
made = specs.env_spec().normalise(self.meta, self.env_name + "=" + self.fallback_val)
self.assertEqual(made.env_name, self.env_name)
self.assertEqual(made.set_val, self.fallback_val)
self.assertEqual(made.default_val, None)
describe BespinCase, "params_json_spec":
before_each:
self.meta = mock.Mock(name="meta", spec=Meta)
it "complains if any item has no ParameterKey or ParameterValue":
errors = [
BadSpecValue("Expected a value but got none", meta=self.meta.indexed_at(0).at("ParameterKey"))
, BadSpecValue("Expected a value but got none", meta=self.meta.indexed_at(0).at("ParameterValue"))
]
with self.fuzzyAssertRaisesError(BadSpecValue, _errors=[BadSpecValue(meta=self.meta.indexed_at(0), _errors=errors)]):
specs.params_json_spec().normalise(self.meta, [{"whatever": "blah"}])
with self.fuzzyAssertRaisesError(BadSpecValue, _errors=[BadSpecValue(meta=self.meta.indexed_at(0), _errors=[errors[1]])]):
specs.params_json_spec().normalise(self.meta, [{"ParameterKey": "blah"}])
with self.fuzzyAssertRaisesError(BadSpecValue, _errors=[BadSpecValue(meta=self.meta.indexed_at(0), _errors=[errors[0]])]):
specs.params_json_spec().normalise(self.meta, [{"ParameterValue": "blah"}])
it "works if all items have ParameterKey and ParameterValue":
spec = [{"ParameterKey": "one", "ParameterValue": 1}, {"ParameterKey": "two", "ParameterValue": "2"}]
self.assertEqual(specs.params_json_spec().normalise(self.meta, spec), spec)
describe BespinCase, "stack_json_spec":
before_each:
self.meta = mock.Mock(name="meta", spec=Meta)
it "complains if there is no Resources":
error = BadSpecValue("Expected a value but got none", meta=self.meta.at("Resources"))
with self.fuzzyAssertRaisesError(BadSpecValue, _errors=[error]):
specs.stack_json_spec().normalise(self.meta, {})
it "complains if any resource has no Type parameter":
with self.fuzzyAssertRaisesError(BadSpecValue
, _errors = [ BadSpecValue(meta=self.meta.at("Resources")
, _errors = [ BadSpecValue(meta=self.meta.at("Resources").at("resource1")
, _errors = [ BadSpecValue("Expected a value but got none", meta=self.meta.at("Resources").at("resource1").at("Type")) ]
)]
)]
):
specs.stack_json_spec().normalise(self.meta, {"Resources": {"resource1": {"blah": 1}}})
it "complains if any resource has Properties is not a dictionary":
with self.fuzzyAssertRaisesError(BadSpecValue
, _errors = [ BadSpecValue(meta=self.meta.at("Resources")
, _errors = [ BadSpecValue(meta=self.meta.at("Resources").at("resource1")
, _errors = [ BadSpecValue("Expected a dictionary", got=int, meta=self.meta.at("Resources").at("resource1").at("Properties")) ]
)]
)]
):
specs.stack_json_spec().normalise(self.meta, {"Resources": {"resource1": {"Type": "something", "Properties": 1}}})
it "complains if parameters and outputs is not a dictionary of string to dictionary":
for key in ("Parameters", "Outputs"):
value = {"Resources": {}, key: []}
error = BadSpecValue("Expected a dictionary", got=list, meta=self.meta.at(key))
with self.fuzzyAssertRaisesError(BadSpecValue, _errors=[error]):
specs.stack_json_spec().normalise(self.meta, value)
value = {"Resources": {}, key: {1:1}}
error = BadSpecValue("Expected a string", got=int, meta=self.meta.at(key).at(1))
with self.fuzzyAssertRaisesError(BadSpecValue, _errors=[BadSpecValue(meta=self.meta.at(key), _errors=[error])]):
specs.stack_json_spec().normalise(self.meta, value)
value = {"Resources": {}, key: {"1":1}}
error = BadSpecValue("Expected a dictionary", got=int, meta=self.meta.at(key).at("1"))
with self.fuzzyAssertRaisesError(BadSpecValue, _errors=[BadSpecValue(meta=self.meta.at(key), _errors=[error])]):
specs.stack_json_spec().normalise(self.meta, value)
describe BespinCase, "artifact_command_spec":
before_each:
self.meta = mock.Mock(name="meta", spec=Meta)
it "makes copy as a list of ArtifactPath":
p1 = self.unique_val()
p2 = self.unique_val()
p3 = self.unique_val()
p4 = self.unique_val()
copy = [[p1, p2], "{0}:{1}".format(p3, p4)]
self.assertEqual(
specs.artifact_command_spec().normalise(self.meta, {"copy": copy}).copy
, [artifact_objs.ArtifactPath(p1, p2), artifact_objs.ArtifactPath(p3, p4)]
)
it "makes add_into_tar as a list of ArtifactPath":
p1 = self.unique_val()
p2 = self.unique_val()
p3 = self.unique_val()
p4 = self.unique_val()
add_into_tar = [[p1, p2], "{0}:{1}".format(p3, p4)]
self.assertEqual(
specs.artifact_command_spec().normalise(self.meta, {"add_into_tar": add_into_tar}).add_into_tar
, [artifact_objs.ArtifactPath(p1, p2), artifact_objs.ArtifactPath(p3, p4)]
)
it "makes command as a list of formatted string":
one = self.unique_val()
meta = Meta(MergedOptions.using({"one": one}), [])
command = "blah {one} meh"
expected = ["blah {0} meh".format(one)]
self.assertEqual(specs.artifact_command_spec().normalise(meta, {"command": command}).command, expected)
it "makes modify as a dictionary":
res = specs.artifact_command_spec().normalise(self.meta, {"modify": {"somewhere": {"append": "stuff"}}})
self.assertEqual(res.modify, {"somewhere": {"append": ["stuff"]}})
describe BespinCase, "s3_address":
before_each:
self.meta = mock.Mock(name="meta", spec=Meta)
it "returns an S3 Address":
res = specs.s3_address().normalise(self.meta, "s3://blah/and/stuff")
self.assertEqual(res, objs.S3Address("blah", "/and/stuff", 600))
res = specs.s3_address().normalise(self.meta, "s3://blah")
self.assertEqual(res, objs.S3Address("blah", "/", 600))
res = specs.s3_address().normalise(self.meta, "s3://blah/")
self.assertEqual(res, objs.S3Address("blah", "/", 600))
it "can have a timeout specified as well":
res = specs.s3_address().normalise(self.meta, ["s3://blah/and/stuff", 700])
self.assertEqual(res, objs.S3Address("blah", "/and/stuff", 700))
it "complains if the address is invalid":
for val in ("http://somewhere", "amds"):
with self.fuzzyAssertRaisesError(BadSpecValue, "Not a valid s3 address", got=val, meta=self.meta):
specs.s3_address().normalise(self.meta, val)
|
|
# -*- coding: utf-8 -*-
import datetime
import json
import logging
LOG = logging.getLogger(__name__)
def hpo_terms(case_obj):
"""Extract all phenotype-associated terms for a case. Drawback of this method is that
it returns the same phenotype terms for each affected individual
of the case.
Args:
case_obj(dict): a scout case object
Returns:
features(list): a list of phenotype objects that looks like this:
[
{
"id": "HP:0001644",
"label": "Dilated cardiomyopathy",
"observed": "yes"
},
...
]
"""
LOG.info("Collecting phenotype terms for case {}".format(case_obj.get("display_name")))
features = []
case_features = case_obj.get("phenotype_terms")
if case_features:
# re-structure case features to mirror matchmaker feature fields:
for feature in case_features:
feature_obj = {
"id": feature.get("phenotype_id"),
"label": feature.get("feature"),
"observed": "yes",
}
features.append(feature_obj)
return features
def omim_terms(store, case_obj):
"""Extract all OMIM phenotypes available for the case
Args:
store(scout.adapter.MongoAdapter)
case_obj(dict): a scout case object
Returns:
disorders(list): a list of OMIM disorder objects
"""
LOG.info("Collecting OMIM disorders for case {}".format(case_obj.get("display_name")))
disorders = []
case_disorders = case_obj.get("diagnosis_phenotypes") # array of OMIM terms
if case_disorders:
for disorder in case_disorders:
omim_term = store.disease_term(disorder)
if omim_term is None:
LOG.warning(f"Disease term {disorder} could not be found in database.")
continue
disorder_obj = {
"id": ":".join(["MIM", str(disorder)]),
"label": omim_term.get("description"),
}
disorders.append(disorder_obj)
return disorders
def genomic_features(store, case_obj, sample_name, candidate_vars, genes_only):
"""Extract and parse matchmaker-like genomic features from pinned variants
of a patient
Args:
store(MongoAdapter) : connection to the database
case_obj(dict): a scout case object
sample_name(str): sample display name
candidate_vars(list): a list of variants/genes chosen from the user. Example: ["4c7d5c70d955875504db72ef8e1abe77", "77d69d4d78a8e272365bdabe4f607327|TIPIN"]
genes_only(bool): if True only gene names will be included in genomic features
Returns:
g_features(list): a list of genomic feature objects that looks like this:
[
{
"gene": {
"id": "LIMS2"
},
"variant": {
"alternateBases": "C",
"assembly": "GRCh37",
"end": 128412081,
"referenceBases": "G",
"referenceName": "2",
"start": 128412080
},
"zygosity": 1
},
....
]
"""
g_features = []
for var in candidate_vars:
vari_id = var.split("|")[0]
gene_symbol = None
var_obj = store.sample_variant(vari_id, sample_name)
if var_obj is None:
continue
if "|" in var: # Share a gene symbol from a SV
gene_symbol = var.split("|")[1]
g_feature = {"gene": {"id": gene_symbol}}
g_features.append(g_feature)
continue
# SNV variant
hgnc_genes = var_obj.get("hgnc_ids")
if not hgnc_genes:
continue
for hgnc_id in hgnc_genes:
gene_caption = store.hgnc_gene_caption(hgnc_id, case_obj["genome_build"])
if not gene_caption:
continue
g_feature = {"gene": {"id": gene_caption.get("hgnc_symbol")}}
if genes_only is True: # Disclose only gene info
g_features.append(g_feature)
continue
# share Variant-level information
g_feature["variant"] = {
"referenceName": var_obj["chromosome"],
"start": var_obj["position"],
"end": var_obj["end"],
"assembly": "GRCh38" if "38" in str(case_obj.get("genome_build", "")) else "GRCh37",
"referenceBases": var_obj["reference"],
"alternateBases": var_obj["alternative"],
"shareVariantLevelData": True,
}
zygosity = None
# collect zygosity for the given sample
zygosities = var_obj[
"samples"
] # it's a list with zygosity situation for each sample of the case
for zyg in zygosities:
if zyg.get("display_name") == sample_name: # sample of interest
zygosity = zyg["genotype_call"].count("1") + zyg["genotype_call"].count("2")
g_feature["zygosity"] = zygosity
g_features.append(g_feature)
return g_features
def parse_matches(patient_id, match_objs):
"""Parse a list of matchmaker matches objects and returns
a readable list of matches to display in matchmaker matches view.
Args:
patient_id(str): id of a mme patient
match_objs(list): list of match objs returned by MME server for the patient
# match_objs looks like this:
[
{
'node' : { id : node_id , label: node_label},
'patients' : [
{ 'patient': {patient1_data} },
{ 'patient': {patient2_data} },
..
]
},
..
]
Returns:
parsed_matches(list): a list of parsed match objects
"""
LOG.info("Parsing MatchMaker matches for patient {}".format(patient_id))
parsed_matches = []
for match_obj in match_objs:
# convert match date from millisecond to readable date
milliseconds_date = match_obj["created"]["$date"]
mdate = datetime.datetime.fromtimestamp(milliseconds_date / 1000.0)
match_type = "external"
matching_patients = []
parsed_match = {
"match_oid": match_obj["_id"]["$oid"], # save match object ID
"match_date": mdate,
}
# if patient was used as query patient:
if match_obj["data"]["patient"]["id"] == patient_id:
match_results = match_obj["results"] # List of matching patients
for node_result in match_results:
if match_obj["match_type"] == "internal":
match_type = "internal"
for patient in node_result["patients"]:
match_patient = {
"patient_id": patient["patient"]["id"],
"score": patient["score"],
"patient": patient["patient"],
"node": node_result["node"],
}
matching_patients.append(match_patient)
else: # else if patient was returned as a match result for another patient
m_patient = match_obj["data"]["patient"]
contact_institution = m_patient["contact"].get("institution")
if contact_institution and "Scout software user" in contact_institution:
match_type = "internal"
# loop over match results to capture score for matching
score = None
for res in match_obj["results"]:
for patient in res["patients"]:
LOG.info("Looping in else, patient:{}".format(patient["patient"]["id"]))
if patient["patient"]["id"] == patient_id:
score = patient["score"]
match_patient = {
"patient_id": m_patient["id"],
"score": score,
"patient": m_patient,
"node": res["node"],
}
matching_patients.append(match_patient)
parsed_match["match_type"] = match_type
parsed_match["patients"] = matching_patients
parsed_matches.append(parsed_match)
# sort results by descending score
parsed_matches = sorted(parsed_matches, key=lambda k: k["match_date"], reverse=True)
return parsed_matches
|
|
"""
Bernie's L System demo in Python.
This program implements a simple context free L System,
and renders two dimensional images in a window.
It needs (at least) python 2.6, and uses the turtle
graphics library, which is based on TK graphics.
To try it out, either load into Python and run the
demo functions, or run it from the command line:
python lsystem.py
Author: Bernie Pope: www.cs.mu.oz.au/~bjpop/
Licence: unrestricted.
Feel free to play with the code as much as you like
and share your changes with the world.
Some remarks about the implementation:
An L-System is a term rewriting system made up of one or
more rules. Rules have the form:
head -> body
where head is a variable, and body is a non-empty string
made up of variables and constants. Variables are
denoted by upper-case alphabetic letters. Constants
are denoted by any character which is not a variable.
Here is an example L-System:
X -> F-[[X]+X]+F[+FX]-X
F -> FF
In this program the convention is that the first rule
is taken as the starting point.
An LSystem object is constructed like so:
rules = ['X -> F-[[X]+X]+F[+FX]-X', 'F -> FF']
my_system = LSystem(rules)
That is, the LSystem constructor takes a list of strings
as its argument, where each string is a single rule.
An LSystem object doesn't do anything on its own - it must
be interpreted.
LSystem objects have a run method which takes two parameters:
1) An non-negative integer which indicates how many times
the rules should be iterated.
2) An interpreter object which implements an 'interpretTokens'
method.
Here is a simple example:
class SimpleInterp(object):
def interpretTokens(self, tokens): return tokens
answer = my_system().run(6, SimpleInterp())
A more sophisticated interpreter is defined called Visualise
which renders the result of iterating an LSystem as turtle
graphics.
The Visualise constructor takes a dictionary mapping LSystem
variables to functions, here is an example:
{ '-': lambda _ : left(left_angle)
, '+': lambda _ : right(right_angle)
, 'F': lambda _ : forward(fwd_distance)
, '[': lambda obj : obj.push()
, ']': lambda obj : obj.pop()
}
"""
import sys
# import the turtle graphics library
try:
from turtle import *
except ImportError:
print("This program requires the turtle graphics library.")
print("Unfortunately Python cannot find that library on your computer.")
print("See the documentation at: http://docs.python.org/library/turtle.html")
sys.exit(-1)
from collections import deque
py_version = sys.version_info[:2]
if py_version < (2,6):
print("This program requires Python version 2.6 or greater to run.")
print("Your version of Python is " + '.'.join(map(str,py_version)) + ", which is too old.")
sys.exit(-1)
# Some demo functions, which make it relatively easy to use
def interactive_demo():
def show_demo(name, action):
print(name)
action()
input = raw_input("Press any key to continue or q/Q to quit: ")
if input.lower() == 'q':
sys.exit(0)
show_demo("Bushy tree", demo1)
show_demo("Twiggy tree", demo2)
show_demo("Koch curve", demo3)
show_demo("Sierpinski triangle", demo4)
show_demo("Peano Gosper curve", demo5)
show_demo("Conifer-like tree", demo6)
show_demo("Tiles", demo7)
show_demo("Crystal", demo8)
show_demo("Peano curve", demo9)
def demo0():
class SimpleInterp(object):
def interpretTokens(self, tokens): return tokens
return bushy_tree().run(6, SimpleInterp())
def demo1():
def init():
initPosition()
left(90)
vis = Visualise(basic_actions(25,25,5), init)
bushy_tree().run(5,vis)
def bushy_tree():
return LSystem(["F -> FF-[-F+F+F]+[+F-F-F]"])
def demo2():
def init():
initPosition()
left(90)
vis = Visualise(basic_actions(25,25,2), init)
twiggy_tree().run(7,vis)
def twiggy_tree():
rules = ['X -> F-[[X]+X]+F[+FX]-X', 'F -> FF']
return LSystem(rules)
def demo3():
def init(): initPosition(lambda width, height : (-9*width/20, -height/4))
actions = basic_actions(None,None,0.06)
actions['-'] = lambda _ : right(68)
actions['+'] = lambda _ : left(68)
vis = Visualise(actions, init)
koch().run(7,vis)
def koch():
return LSystem(['F -> F+F-F-F+F'])
def demo4():
def init():
initPosition(lambda width, height : (-3*width/8, -height/4))
actions = basic_actions(60,60,0)
actions['A'] = lambda _ : forward(0.5)
actions['B'] = lambda _ : forward(0.5)
actions['-'] = lambda _ : right(60)
actions['+'] = lambda _ : left(60)
vis = Visualise(actions, init)
sierpinski().run(10,vis)
def sierpinski():
return LSystem(['A -> B-A-B', 'B -> A+B+A'])
def demo5():
def init():
initPosition(lambda width, height : (width/4, 3*height/8))
actions = basic_actions(60,60,4)
vis = Visualise(actions, init)
peano_gosper().run(5,vis)
def peano_gosper():
rules = [ 'X -> X+YF++YF-FX--FXFX-YF+'
, 'Y -> -FX+YFYF++YF+FX--FX-Y'
, 'F -> F' ]
return LSystem(rules)
def demo6():
def init():
initPosition(lambda width, height : (0, -3*height/8))
left(90)
actions = basic_actions(20,20,11)
vis = Visualise(actions, init)
conifer().run(12,vis)
def conifer():
rules = [ 'I -> VZFFF'
, 'V -> [+++W][---W]YV'
, 'W -> +X[-W]Z'
, 'X -> -W[+X]Z'
, 'Y -> YZ'
, 'Z -> [-FFF][+FFF]F'
, 'F -> F' ]
return LSystem(rules)
def demo7():
def init():
initPosition(lambda width, height : (-width/5, 0))
actions = basic_actions(90,90,4)
vis = Visualise(actions, init)
tiles().run(6,vis)
def tiles():
rules = [ 'I -> F+F+F+F'
, 'F -> FF+F-F+F+FF' ]
return LSystem(rules)
def demo8():
def init():
initPosition(lambda width, height : (-width/3, -height/3))
left(90)
actions = basic_actions(90,90,2)
vis = Visualise(actions, init)
crystal().run(6,vis)
def crystal():
rules = ['I -> F+F+F+F', 'F -> FF+F++F+F']
return LSystem(rules)
def demo9():
def init():
initPosition(lambda width, height : (-width/3, -height/3))
left(90)
actions = basic_actions(90,90,2)
vis = Visualise(actions, init)
peano_curve().run(5,vis)
def peano_curve():
rules = [ 'X -> XFYFX+F+YFXFY-F-XFYFX'
, 'Y -> YFXFY-F-XFYFX+F+YFXFY'
, 'F -> F' ]
return LSystem(rules)
class LSystem(object):
def __init__(self, rules):
if len(rules) > 0:
for r in rules:
exec(compile(r)) in locals()
firstRuleName, _ = decomposeRule(rules[0])
exec('def start(n): return {0}(n)'.format(firstRuleName)) in locals()
self.rule = start
else:
self.rule = lambda _ : ''
def run(self, maxIterations, interpreter):
return interpreter.interpretTokens(self.rule(maxIterations))
class Visualise (object):
def __init__(self, dict, initCommand=None):
self.actions = dict
self.initCommand = initCommand
self.stack = deque()
def interpretTokens(self, tokens):
initDisplay()
if self.initCommand != None: self.initCommand()
def action_fun(token):
return self.actions.get(token, lambda _ : None)(self)
self.stack = deque()
map (action_fun, tokens)
def push(self):
orient = heading()
pos = position()
self.stack.append((orient, pos))
def pop(self):
stack = self.stack
if len(stack) == 0:
raise Exception('Attempt to pop empty stack')
(orient, pos) = stack.pop()
up()
goto(pos)
setheading(orient)
down()
def basic_actions (left_angle, right_angle, fwd_distance):
return { '-': lambda _ : left(left_angle)
, '+': lambda _ : right(right_angle)
, 'F': lambda _ : forward(fwd_distance)
, '[': lambda obj : obj.push()
, ']': lambda obj : obj.pop()
}
# Configuration of graphics window
def initDisplay(drawColour="black"):
title ("Bernie's L System demo")
setup()
reset()
degrees()
color(drawColour)
# Try to make the animation of drawing reasonably fast.
tracer(50,0) # Only draw every 50th update, set delay to zero.
hideturtle() # don't draw the turtle; increase drawing speed.
def initPosition(mover=lambda width, height : (0, -height/2)):
height = window_height()
width = window_width()
up()
goto (mover (width, height))
down()
"""
The input rule:
X -> X+X+F
is compiled to:
def X(n):
if n > 0:
xn = X(n-1)
fn = F(n-1)
return ''.join([xn,'+',xn,'+',fn])
else:
return 'X'
"""
def compile(rule):
(name, body) = decomposeRule(rule)
(vars,listIds) = varsIds(body)
defPart = 'def ' + name + '(n):'
varBinds = list(map(mkVarBind,vars))
joinListPart = "''.join([" + ','.join(listIds) + '])'
ifHead = 'if n > 0:'
ifBody = varBinds + ['return ' + joinListPart]
elsePart = 'else: return ' + quote(name)
return '\n'.join("{0}{1}".format(
[defPart], map(indent, "{0}{1}{2}".format([ifHead], map(indent, ifBody),
[elsePart]))))
def decomposeRule(rule):
splitRule = rule.split('->')
if len(splitRule) != 2:
raise Exception("badly formed L-System rule:\
{0}".format(quote(str(rule))))
name = splitRule[0].strip()
body = splitRule[1].strip()
if len(name) != 1 or len(body) == 0:
raise Exception("badly formed L-System rule:\
{0}".format(quote(str(rule))))
return (name, body)
def mkVarBind(var):
return var.lower() + 'n = {0}(n-1)'.format(var)
def quote(inputstr):
return "'{0}'".format(inputstr)
def indent(inputstr):
return "' '{0}".format(inputstr)
def varsIds(inputstr):
vars = set()
list = []
for c in inputstr:
if c.isupper():
vars.add(c)
list.append(c.lower()+'n')
else:
list.append(quote(c))
return (vars, list)
if __name__ == "__main__":
interactive_demo()
|
|
#!/usr/bin/env python3
# Copyright (c) 2014-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test mining RPCs
- getmininginfo
- getblocktemplate proposal mode
- submitblock"""
import copy
from decimal import Decimal
from test_framework.blocktools import (
create_coinbase,
)
from test_framework.messages import (
CBlock,
CBlockHeader,
)
from test_framework.mininode import (
P2PDataStore,
)
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
connect_nodes,
)
from test_framework.script import CScriptNum
def assert_template(node, block, expect, rehash=True):
if rehash:
block.hashMerkleRoot = block.calc_merkle_root()
rsp = node.getblocktemplate(template_request={'data': block.serialize().hex(), 'mode': 'proposal'})
assert_equal(rsp, expect)
class MiningTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
def mine_chain(self):
self.log.info('Create some old blocks')
for _ in range(0, 200):
self.bump_mocktime(156)
self.nodes[0].generate(1)
mining_info = self.nodes[0].getmininginfo()
assert_equal(mining_info['blocks'], 200)
assert_equal(mining_info['currentblocktx'], 0)
assert_equal(mining_info['currentblocksize'], 1000)
self.restart_node(0)
connect_nodes(self.nodes[0], 1)
connect_nodes(self.nodes[1], 0)
def run_test(self):
self.mine_chain()
node = self.nodes[0]
def assert_submitblock(block, result_str_1, result_str_2=None):
block.solve()
result_str_2 = result_str_2 or 'duplicate-invalid'
assert_equal(result_str_1, node.submitblock(hexdata=block.serialize().hex()))
assert_equal(result_str_2, node.submitblock(hexdata=block.serialize().hex()))
self.log.info('getmininginfo')
mining_info = node.getmininginfo()
assert_equal(mining_info['blocks'], 200)
assert_equal(mining_info['chain'], self.chain)
assert 'currentblocksize' not in mining_info
assert 'currentblocktx' not in mining_info
assert_equal(mining_info['difficulty'], Decimal('4.656542373906925E-10'))
assert_equal(mining_info['networkhashps'], Decimal('0.01282051282051282'))
assert_equal(mining_info['pooledtx'], 0)
# Mine a block to leave initial block download
node.generatetoaddress(1, node.get_deterministic_priv_key().address)
tmpl = node.getblocktemplate()
self.log.info("getblocktemplate: Test capability advertised")
assert 'proposal' in tmpl['capabilities']
assert 'coinbasetxn' not in tmpl
next_height = int(tmpl["height"])
coinbase_tx = create_coinbase(height=next_height)
# sequence numbers must not be max for nLockTime to have effect
coinbase_tx.vin[0].nSequence = 2 ** 32 - 2
coinbase_tx.rehash()
# round-trip the encoded bip34 block height commitment
assert_equal(CScriptNum.decode(coinbase_tx.vin[0].scriptSig), next_height)
# round-trip negative and multi-byte CScriptNums to catch python regression
assert_equal(CScriptNum.decode(CScriptNum.encode(CScriptNum(1500))), 1500)
assert_equal(CScriptNum.decode(CScriptNum.encode(CScriptNum(-1500))), -1500)
assert_equal(CScriptNum.decode(CScriptNum.encode(CScriptNum(-1))), -1)
block = CBlock()
block.nVersion = tmpl["version"]
block.hashPrevBlock = int(tmpl["previousblockhash"], 16)
block.nTime = tmpl["curtime"]
block.nBits = int(tmpl["bits"], 16)
block.nNonce = 0
block.vtx = [coinbase_tx]
self.log.info("getblocktemplate: Test valid block")
assert_template(node, block, None)
self.log.info("submitblock: Test block decode failure")
assert_raises_rpc_error(-22, "Block decode failed", node.submitblock, block.serialize()[:-15].hex())
self.log.info("getblocktemplate: Test bad input hash for coinbase transaction")
bad_block = copy.deepcopy(block)
bad_block.vtx[0].vin[0].prevout.hash += 1
bad_block.vtx[0].rehash()
assert_template(node, bad_block, 'bad-cb-missing')
self.log.info("submitblock: Test invalid coinbase transaction")
assert_raises_rpc_error(-22, "Block does not start with a coinbase", node.submitblock, bad_block.serialize().hex())
self.log.info("getblocktemplate: Test truncated final transaction")
assert_raises_rpc_error(-22, "Block decode failed", node.getblocktemplate, {'data': block.serialize()[:-1].hex(), 'mode': 'proposal'})
self.log.info("getblocktemplate: Test duplicate transaction")
bad_block = copy.deepcopy(block)
bad_block.vtx.append(bad_block.vtx[0])
assert_template(node, bad_block, 'bad-txns-duplicate')
assert_submitblock(bad_block, 'bad-txns-duplicate', 'bad-txns-duplicate')
self.log.info("getblocktemplate: Test invalid transaction")
bad_block = copy.deepcopy(block)
bad_tx = copy.deepcopy(bad_block.vtx[0])
bad_tx.vin[0].prevout.hash = 255
bad_tx.rehash()
bad_block.vtx.append(bad_tx)
assert_template(node, bad_block, 'bad-txns-inputs-missingorspent')
assert_submitblock(bad_block, 'bad-txns-inputs-missingorspent')
self.log.info("getblocktemplate: Test nonfinal transaction")
bad_block = copy.deepcopy(block)
bad_block.vtx[0].nLockTime = 2 ** 32 - 1
bad_block.vtx[0].rehash()
assert_template(node, bad_block, 'bad-txns-nonfinal')
assert_submitblock(bad_block, 'bad-txns-nonfinal')
self.log.info("getblocktemplate: Test bad tx count")
# The tx count is immediately after the block header
TX_COUNT_OFFSET = 80
bad_block_sn = bytearray(block.serialize())
assert_equal(bad_block_sn[TX_COUNT_OFFSET], 1)
bad_block_sn[TX_COUNT_OFFSET] += 1
assert_raises_rpc_error(-22, "Block decode failed", node.getblocktemplate, {'data': bad_block_sn.hex(), 'mode': 'proposal'})
self.log.info("getblocktemplate: Test bad bits")
bad_block = copy.deepcopy(block)
bad_block.nBits = 469762303 # impossible in the real world
assert_template(node, bad_block, 'bad-diffbits')
self.log.info("getblocktemplate: Test bad merkle root")
bad_block = copy.deepcopy(block)
bad_block.hashMerkleRoot += 1
assert_template(node, bad_block, 'bad-txnmrklroot', False)
assert_submitblock(bad_block, 'bad-txnmrklroot', 'bad-txnmrklroot')
self.log.info("getblocktemplate: Test bad timestamps")
bad_block = copy.deepcopy(block)
bad_block.nTime = 2 ** 31 - 1
assert_template(node, bad_block, 'time-too-new')
assert_submitblock(bad_block, 'time-too-new', 'time-too-new')
bad_block.nTime = 0
assert_template(node, bad_block, 'time-too-old')
assert_submitblock(bad_block, 'time-too-old', 'time-too-old')
self.log.info("getblocktemplate: Test not best block")
bad_block = copy.deepcopy(block)
bad_block.hashPrevBlock = 123
assert_template(node, bad_block, 'inconclusive-not-best-prevblk')
assert_submitblock(bad_block, 'prev-blk-not-found', 'prev-blk-not-found')
self.log.info('submitheader tests')
assert_raises_rpc_error(-22, 'Block header decode failed', lambda: node.submitheader(hexdata='xx' * 80))
assert_raises_rpc_error(-22, 'Block header decode failed', lambda: node.submitheader(hexdata='ff' * 78))
assert_raises_rpc_error(-25, 'Must submit previous header', lambda: node.submitheader(hexdata='ff' * 80))
block.nTime += 1
block.solve()
def filter_tip_keys(chaintips):
"""
Dash chaintips rpc returns extra info in each tip (difficulty, chainwork, and
forkpoint). Filter down to relevant ones checked in this test.
"""
check_keys = ["hash", "height", "branchlen", "status"]
filtered_tips = []
for tip in chaintips:
filtered_tips.append({k: tip[k] for k in check_keys})
return filtered_tips
def chain_tip(b_hash, *, status='headers-only', branchlen=1):
return {'hash': b_hash, 'height': 202, 'branchlen': branchlen, 'status': status}
assert chain_tip(block.hash) not in filter_tip_keys(node.getchaintips())
node.submitheader(hexdata=block.serialize().hex())
assert chain_tip(block.hash) in filter_tip_keys(node.getchaintips())
node.submitheader(hexdata=CBlockHeader(block).serialize().hex()) # Noop
assert chain_tip(block.hash) in filter_tip_keys(node.getchaintips())
bad_block_root = copy.deepcopy(block)
bad_block_root.hashMerkleRoot += 2
bad_block_root.solve()
assert chain_tip(bad_block_root.hash) not in filter_tip_keys(node.getchaintips())
node.submitheader(hexdata=CBlockHeader(bad_block_root).serialize().hex())
assert chain_tip(bad_block_root.hash) in filter_tip_keys(node.getchaintips())
# Should still reject invalid blocks, even if we have the header:
assert_equal(node.submitblock(hexdata=bad_block_root.serialize().hex()), 'bad-txnmrklroot')
assert_equal(node.submitblock(hexdata=bad_block_root.serialize().hex()), 'bad-txnmrklroot')
assert chain_tip(bad_block_root.hash) in filter_tip_keys(node.getchaintips())
# We know the header for this invalid block, so should just return early without error:
node.submitheader(hexdata=CBlockHeader(bad_block_root).serialize().hex())
assert chain_tip(bad_block_root.hash) in filter_tip_keys(node.getchaintips())
bad_block_lock = copy.deepcopy(block)
bad_block_lock.vtx[0].nLockTime = 2**32 - 1
bad_block_lock.vtx[0].rehash()
bad_block_lock.hashMerkleRoot = bad_block_lock.calc_merkle_root()
bad_block_lock.solve()
assert_equal(node.submitblock(hexdata=bad_block_lock.serialize().hex()), 'bad-txns-nonfinal')
assert_equal(node.submitblock(hexdata=bad_block_lock.serialize().hex()), 'duplicate-invalid')
# Build a "good" block on top of the submitted bad block
bad_block2 = copy.deepcopy(block)
bad_block2.hashPrevBlock = bad_block_lock.sha256
bad_block2.solve()
assert_raises_rpc_error(-25, 'bad-prevblk', lambda: node.submitheader(hexdata=CBlockHeader(bad_block2).serialize().hex()))
# Should reject invalid header right away
bad_block_time = copy.deepcopy(block)
bad_block_time.nTime = 1
bad_block_time.solve()
assert_raises_rpc_error(-25, 'time-too-old', lambda: node.submitheader(hexdata=CBlockHeader(bad_block_time).serialize().hex()))
# Should ask for the block from a p2p node, if they announce the header as well:
node.add_p2p_connection(P2PDataStore())
node.p2p.wait_for_getheaders(timeout=5) # Drop the first getheaders
node.p2p.send_blocks_and_test(blocks=[block], node=node)
# Must be active now:
assert chain_tip(block.hash, status='active', branchlen=0) in filter_tip_keys(node.getchaintips())
# Building a few blocks should give the same results
node.generatetoaddress(10, node.get_deterministic_priv_key().address)
assert_raises_rpc_error(-25, 'time-too-old', lambda: node.submitheader(hexdata=CBlockHeader(bad_block_time).serialize().hex()))
assert_raises_rpc_error(-25, 'bad-prevblk', lambda: node.submitheader(hexdata=CBlockHeader(bad_block2).serialize().hex()))
node.submitheader(hexdata=CBlockHeader(block).serialize().hex())
node.submitheader(hexdata=CBlockHeader(bad_block_root).serialize().hex())
assert_equal(node.submitblock(hexdata=block.serialize().hex()), 'duplicate') # valid
if __name__ == '__main__':
MiningTest().main()
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
This module deals with the management of configuration files,
presumably for a cache server
"""
import logging
import os
import re
import typing
from base64 import b64decode
from trafficops.restapi import OperationError, InvalidJSONError, LoginError
from .configuration import Configuration
from .utils import getYesNoResponse as getYN
#: Holds a set of service names that need reloaded configs, mapped to a boolean which indicates
#: whether (:const:`True`) or not (:const:`False`) a full restart is required.
RELOADS_REQUIRED = set()
#: A constant that holds the absolute path to the backup directory for configuration files
BACKUP_DIR = "/opt/ort/backups"
#: a pre-compiled regular expression to use in parsing
SSL_KEY_REGEX = re.compile(r'^\s*ssl_cert_name\=(.*)\s+ssl_key_name\=(.*)\s*$')
class ConfigurationError(Exception):
"""
Represents an error updating configuration files
"""
pass
class ConfigFile():
"""
Represents a configuration file on a host system.
"""
fname = "" #: The base name of the file
location = "" #: An absolute path to the directory containing the file
URI = "" #: A URI where the actual file contents can be found
contents = "" #: The full contents of the file - as configured in TO, not the on-disk contents
sanitizedContents = "" #: Will store the contents after sanitization
def __init__(self, raw:dict = None, toURL:str = "", tsroot:str = "/", *unused_args, contents: str = None, path: str = None):
"""
Constructs a :class:`ConfigFile` object from a raw API response
:param raw: A raw config file from an API response
:param toURL: The URL of a valid Traffic Ops host
:param tsroot: The absolute path to the root of an Apache Traffic Server installation
:param contents: Directly constructs a ConfigFile from the passed contents. Must be used with path, and causes raw to be ignored.
:param path: Sets the full path to the file when constructing ConfigFiles directly from contents.
:raises ValueError: if ``raw`` does not faithfully represent a configuration file
>>> a = ConfigFile({"fnameOnDisk": "test",
... "location": "/path/to",
... "apiUri":"/test",
... "scope": "servers"}, "http://example.com/")
>>> a
ConfigFile(path='/path/to/test', URI='http://example.com/test', scope='servers')
>>> a.SSLdir
'/etc/trafficserver/ssl'
>>> ConfigFile(contents='testquest', path='/path/to/test')
ConfigFile(path='/path/to/test', URI=None, scope=None)
"""
self.SSLdir = os.path.join(tsroot, "etc", "trafficserver", "ssl")
if contents is not None:
if path is None:
raise ValueError("cannot construct from direct contents without setting path")
self.location, self.fname = os.path.split(path)
self.contents = contents
self.scope = None
return
if raw is not None:
try:
self.fname = raw["fnameOnDisk"]
self.location = raw["location"]
if "apiUri" in raw:
self.URI = toURL + raw["apiUri"].lstrip('/')
else:
self.URI = raw["url"]
self.scope = raw["scope"]
except (KeyError, TypeError, IndexError) as e:
raise ValueError from e
def __repr__(self) -> str:
"""
Implements ``repr(self)``
>>> repr(ConfigFile({"fnameOnDisk": "test",
... "location": "/path/to",
... "apiUri": "test",
... "scope": "servers"}, "http://example.com/"))
"ConfigFile(path='/path/to/test', URI='http://example.com/test', scope='servers')"
"""
return "ConfigFile(path=%r, URI=%r, scope=%r)" %\
(self.path, self.URI if self.URI else None, self.scope)
@property
def path(self) -> str:
"""
The full path to the file on disk
:returns: The system's path separator concatenating :attr:`location` and :attr`fname`
"""
return os.path.join(self.location, self.fname)
def fetchContents(self, api:'to_api.API'):
"""
Fetches the file contents from :attr:`URI` if possible. Sets :attr:`contents` when
successful.
:param api: A valid, authenticated API session for use when interacting with Traffic Ops
:raises ConnectionError: if something goes wrong fetching the file contents from Traffic
Ops
"""
logging.info("Fetching file %s", self.fname)
try:
self.contents = api.getRaw(self.URI)
except ValueError as e:
raise ConnectionError from e
logging.info("fetched")
def backup(self, contents:str, mode:Configuration.Modes):
"""
Creates a backup of this file under the :data:`BACKUP_DIR` directory
:param contents: The actual, on-disk contents from the original file
:param mode: The current run-mode of :program:`traffic_ops_ort`
:raises OSError: if the backup directory does not exist, or a backup of this file
could not be written into it.
"""
backupfile = os.path.join(BACKUP_DIR, self.fname)
willClobber = False
if os.path.isfile(backupfile):
willClobber = True
if mode is Configuration.Modes.INTERACTIVE:
prmpt = ("Write backup file %s%%s?" % backupfile)
prmpt %= " - will clobber existing file by the same name - " if willClobber else ''
if not getYN(prmpt, default='Y'):
return
elif willClobber:
logging.warning("Clobbering existing backup file '%s'!", backupfile)
if mode is not Configuration.Modes.REPORT:
with open(backupfile, 'w') as fp:
fp.write(contents)
logging.info("Backup File written")
def update(self, conf:Configuration) -> bool:
"""
Updates the file if required, backing up as necessary
:param conf: An object that represents the configuration of :program:`traffic_ops_ort`
:returns: whether or not the file on disk actually changed
:raises OSError: when reading/writing files fails for some reason
"""
from .services import NEEDED_RELOADS, FILES_THAT_REQUIRE_RELOADS
if not self.contents:
self.fetchContents(conf.api)
finalContents = sanitizeContents(self.contents, conf)
elif self.URI:
finalContents = self.contents
else:
finalContents = sanitizeContents(self.contents, conf)
# Ensure POSIX-compliant files
if not finalContents.endswith('\n'):
finalContents += '\n'
logging.info("Sanitized output: \n%s", finalContents)
self.sanitizedContents = finalContents
if not os.path.isdir(self.location):
if (conf.mode is Configuration.Modes.INTERACTIVE and
not getYN("Create configuration directory %s?" % self.path, 'Y')):
logging.warning("%s will not be created - some services may not work properly!",
self.path)
return False
logging.info("Directory %s will be created", self.location)
logging.info("File %s will be created", self.path)
if conf.mode is not Configuration.Modes.REPORT:
os.makedirs(self.location)
with open(self.path, 'x') as fp:
fp.write(finalContents)
return True
if not os.path.isfile(self.path):
if (conf.mode is Configuration.Modes.INTERACTIVE and\
not getYN("Create configuration file %s?"%self.path, default='Y')):
logging.warning("%s will not be created - some services may not work properly!",
self.path)
return False
logging.info("File %s will be created", self.path)
if conf.mode is not Configuration.Modes.REPORT:
with open(self.path, 'x') as fp:
fp.write(finalContents)
if self.fname == "ssl_multicert.config":
return self.advancedSSLProcessing(conf)
return True
written = False
with open(self.path, 'r+') as fp:
onDiskContents = fp.readlines()
if filesDiffer(finalContents.splitlines(), onDiskContents):
self.backup(''.join(onDiskContents), conf.mode)
if conf.mode is not Configuration.Modes.REPORT:
fp.seek(0)
fp.truncate()
fp.write(finalContents)
written = True
logging.info("File written to %s", self.path)
else:
logging.info("File doesn't differ from disk; nothing to do")
# Now we need to do some advanced processing to a couple specific filenames... unfortunately
# But ONLY if the object wasn't directly constructed.
if self.fname == "ssl_multicert.config" and self.URI:
return self.advancedSSLProcessing(conf) or written
return written
def advancedSSLProcessing(self, conf:Configuration):
"""
Does advanced processing on ssl_multicert.config files
:param conf: An object that represents the configuration of :program:`traffic_ops_ort`
:raises OSError: when reading/writing files fails for some reason
"""
global SSL_KEY_REGEX
logging.info("Doing advanced SSL key processing for CDN '%s'", conf.ServerInfo.cdnName)
try:
r = conf.api.get_cdn_ssl_keys(cdn_name=conf.ServerInfo.cdnName)
if r[1].status_code != 200 and r[1].status_code != 204:
raise OSError("Bad response code: %d - raw response: %s" %
(r[1].status_code, r[1].text))
except (OperationError, LoginError, InvalidJSONError, ValueError) as e:
raise OSError("Invalid values encountered when communicating with Traffic Ops!") from e
logging.debug("Raw response from Traffic Ops: %s", r[1].text)
written = False
for l in self.sanitizedContents.splitlines()[1:]:
logging.debug("advanced processing for line: %s", l)
# for some reason, pylint is detecting this regular expression as a string
#pylint: disable=E1101
m = SSL_KEY_REGEX.search(l)
#pylint: enable=E1101
if m is None:
continue
full = m.group(2)
if full.endswith(".key"):
full = full[:-4]
wildcard = full.find('.')
if wildcard >= 0:
wildcard = '*'+full[wildcard:]
else:
# Not sure if this is a reasonable default - if there's no '.' in the key name,
# then there's probably something wrong...
wildcard = "*." + full
logging.debug("Searching for '%s' or '%s' matches", full, wildcard)
for cert in r[0]:
if cert.hostname == full or cert.hostname == wildcard:
key = ConfigFile()
key.location = self.SSLdir
key.fname = m.group(2)
key.contents = b64decode(cert.certificate.key).decode()
logging.info("Processing private SSL key %s ...", key.fname)
written = key.update(conf)
logging.info("Done.")
crt = ConfigFile()
crt.location = self.SSLdir
crt.fname = m.group(1)
crt.contents = b64decode(cert.certificate.crt).decode()
logging.info("Processing SSL certificate %s ...", crt.fname)
written = crt.update(conf)
logging.info("Done.")
break
else:
logging.critical("Failed to find SSL key in %s for '%s' or by wildcard '%s'!",
conf.ServerInfo.cdnName, full, wildcard)
raise OSError("No cert/key pair for ssl_multicert.config line '%s'" % l)
# If even one key was written, we need to make ATS aware of the configuration changes
return written
def filesDiffer(a:typing.List[str], b:typing.List[str]) -> bool:
"""
Compares two files for meaningingful differences. Traffic Ops Headers are
stripped out of the file contents before comparison. Trailing whitespace
is ignored
:param a: The contents of the first file, as a list of its lines
:param b: The contents of the second file, as a list of its lines
:returns: :const:`True` if the files have any differences, :const:`False`
"""
a = [l.rstrip() for l in a if l.rstrip() and not l.startswith("# DO NOT EDIT") and\
not l.startswith("# TRAFFIC OPS NOTE:")]
b = [l.rstrip() for l in b if l.rstrip() and not l.startswith("# DO NOT EDIT") and\
not l.startswith("# TRAFFIC OPS NOTE:")]
if len(a) != len(b):
return True
for i, l in enumerate(a):
if l != b[i]:
return True
return False
def sanitizeContents(raw:str, conf:Configuration) -> str:
"""
Performs pre-processing on a raw configuration file
:param raw: The raw contents of the file as returned by a request to its URL
:param conf: An object that represents the configuration of :program:`traffic_ops_ort`
:returns: The same contents, but with special replacement strings parsed out and HTML-encoded
symbols decoded to their literal values
"""
out = []
lines = (conf.ServerInfo.sanitize(raw, conf.hostname) if conf.ServerInfo else raw).splitlines()
for line in lines:
tmp=(" ".join(line.split())).strip() #squeezes spaces and trims leading and trailing spaces
tmp=tmp.replace("&", '&') #decodes HTML-encoded ampersands
tmp=tmp.replace(">", '>') #decodes HTML-encoded greater-than symbols
tmp=tmp.replace("<", '<') #decodes HTML-encoded less-than symbols
out.append(tmp)
return '\n'.join(out)
def initBackupDir(mode:Configuration.Modes):
"""
Initializes a backup directory as a subdirectory of the directory containing
this ORT script.
:param mode: The current run-mode of :program:`traffic_ops_ort`
:raises OSError: if the backup directory initialization fails
"""
global BACKUP_DIR
logging.info("Initializing backup dir %s", BACKUP_DIR)
if not os.path.isdir(BACKUP_DIR):
if mode is not Configuration.Modes.REPORT:
os.mkdir(BACKUP_DIR)
else:
logging.error("Cannot create non-existent backup dir in REPORT mode!")
else:
logging.info("Backup dir already exists - nothing to do")
|
|
# -*- coding: utf-8 -*-
"""
Copyright (c) 2015-2020 Raj Patel(raj454raj@gmail.com), StopStalk
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import os
import socket
import datetime
import copy
import gluon.contenttype
import gluon.fileutils
try:
import pygraphviz as pgv
except ImportError:
pgv = None
is_gae = request.env.web2py_runtime_gae or False
# ## critical --- make a copy of the environment
global_env = copy.copy(globals())
global_env['datetime'] = datetime
http_host = request.env.http_host.split(':')[0]
remote_addr = request.env.remote_addr
try:
hosts = (http_host, socket.gethostname(),
socket.gethostbyname(http_host),
'::1', '127.0.0.1', '::ffff:127.0.0.1')
except:
hosts = (http_host, )
if request.is_https:
session.secure()
elif (remote_addr not in hosts) and (remote_addr != "127.0.0.1") and \
(request.function != 'manage'):
raise HTTP(200, T('appadmin is disabled because insecure channel'))
if request.function == 'manage':
if not 'auth' in globals() or not request.args:
redirect(URL(request.controller, 'index'))
manager_action = auth.settings.manager_actions.get(request.args(0), None)
if manager_action is None and request.args(0) == 'auth':
manager_action = dict(role=auth.settings.auth_manager_role,
heading=T('Manage Access Control'),
tables=[auth.table_user(),
auth.table_group(),
auth.table_permission()])
manager_role = manager_action.get('role', None) if manager_action else None
auth.requires_membership(manager_role)(lambda: None)()
menu = False
elif (request.application == 'admin' and not session.authorized) or \
(request.application != 'admin' and not gluon.fileutils.check_credentials(request)):
redirect(URL('admin', 'default', 'index',
vars=dict(send=URL(args=request.args, vars=request.vars))))
else:
response.subtitle = T('Database Administration (appadmin)')
menu = True
ignore_rw = True
response.view = 'appadmin.html'
# if menu:
# response.menu = [[T('design'), False, URL('admin', 'default', 'design',
# args=[request.application])], [T('db'), False,
# URL('index')], [T('state'), False,
# URL('state')], [T('cache'), False,
# URL('ccache')]]
# ##########################################################
# ## auxiliary functions
# ###########################################################
if False and request.tickets_db:
from gluon.restricted import TicketStorage
ts = TicketStorage()
ts._get_table(request.tickets_db, ts.tablename, request.application)
def get_databases(request):
dbs = {}
for (key, value) in global_env.items():
try:
cond = isinstance(value, GQLDB)
except:
cond = isinstance(value, SQLDB)
if cond:
dbs[key] = value
return dbs
databases = get_databases(None)
def eval_in_global_env(text):
exec ('_ret=%s' % text, {}, global_env)
return global_env['_ret']
def get_database(request):
if request.args and request.args[0] in databases:
return eval_in_global_env(request.args[0])
else:
session.flash = T('invalid request')
redirect(URL('index'))
def get_table(request):
db = get_database(request)
if len(request.args) > 1 and request.args[1] in db.tables:
return (db, request.args[1])
else:
session.flash = T('invalid request')
redirect(URL('index'))
def get_query(request):
try:
return eval_in_global_env(request.vars.query)
except Exception:
return None
def query_by_table_type(tablename, db, request=request):
keyed = hasattr(db[tablename], '_primarykey')
if keyed:
firstkey = db[tablename][db[tablename]._primarykey[0]]
cond = '>0'
if firstkey.type in ['string', 'text']:
cond = '!=""'
qry = '%s.%s.%s%s' % (
request.args[0], request.args[1], firstkey.name, cond)
else:
qry = '%s.%s.id>0' % tuple(request.args[:2])
return qry
# ##########################################################
# ## list all databases and tables
# ###########################################################
def index():
return dict(databases=databases,
disabled_sites=current.REDIS_CLIENT.smembers("disabled_retrieval"))
def disable_site():
sitename = request.vars.get("sitename", None)
if request.vars.get("addremove", None) == "on":
session.flash = "Retrieval enabled for " + sitename
current.REDIS_CLIENT.srem("disabled_retrieval", sitename)
else:
session.flash = "Retrieval disabled for " + sitename
current.REDIS_CLIENT.sadd("disabled_retrieval", sitename)
redirect(URL("appadmin", "index"))
return dict()
# ##########################################################
# ## insert a new record
# ###########################################################
def insert():
(db, table) = get_table(request)
form = SQLFORM(db[table], ignore_rw=ignore_rw)
if form.accepts(request.vars, session):
response.flash = T('new record inserted')
return dict(form=form, table=db[table])
# ##########################################################
# ## list all records in table and insert new record
# ###########################################################
def download():
import os
db = get_database(request)
return response.download(request, db)
def csv():
import gluon.contenttype
response.headers['Content-Type'] = \
gluon.contenttype.contenttype('.csv')
db = get_database(request)
query = get_query(request)
if not query:
return None
response.headers['Content-disposition'] = 'attachment; filename=%s_%s.csv'\
% tuple(request.vars.query.split('.')[:2])
return str(db(query, ignore_common_filters=True).select())
def import_csv(table, file):
table.import_from_csv_file(file)
def select():
import re
db = get_database(request)
dbname = request.args[0]
try:
is_imap = db._uri.startswith("imap://")
except (KeyError, AttributeError, TypeError):
is_imap = False
regex = re.compile('(?P<table>\w+)\.(?P<field>\w+)=(?P<value>\d+)')
if len(request.args) > 1 and hasattr(db[request.args[1]], '_primarykey'):
regex = re.compile('(?P<table>\w+)\.(?P<field>\w+)=(?P<value>.+)')
if request.vars.query:
match = regex.match(request.vars.query)
if match:
request.vars.query = '%s.%s.%s==%s' % (request.args[0],
match.group('table'), match.group('field'),
match.group('value'))
else:
request.vars.query = session.last_query
query = get_query(request)
if request.vars.start:
start = int(request.vars.start)
else:
start = 0
nrows = 0
step = 100
fields = []
if is_imap:
step = 3
stop = start + step
table = None
rows = []
orderby = request.vars.orderby
if orderby:
orderby = dbname + '.' + orderby
if orderby == session.last_orderby:
if orderby[0] == '~':
orderby = orderby[1:]
else:
orderby = '~' + orderby
session.last_orderby = orderby
session.last_query = request.vars.query
form = FORM(DIV(DIV(INPUT(_style='width:400px',
_name='query',
_id='query_inp',
_value=request.vars.query or '',
requires=IS_NOT_EMPTY(
error_message=T("Cannot be empty"))),
LABEL("Query:", _for='query_inp'),
_class="input-field col offset-s3 s6"),
_class="row"),
DIV(DIV(INPUT(_name='update_check',
_id='update_inp',
_type='checkbox',
value=False),
LABEL("Update", _for='update_inp'),
_class="col offset-s3 s6"),
_class="row"),
DIV(DIV(INPUT(_style='width:400px',
_name='update_fields',
_id='update_field_inp',
_value=request.vars.update_fields or ''),
LABEL("Update Fields:", _for='update_fields_inp'),
_class="input-field col offset-s3 s6"),
_class="row"),
DIV(DIV(INPUT(_name='delete_check',
_class='delete',
_id='delete_inp',
_type='checkbox',
value=False),
LABEL("Delete:", _for='delete_inp'),
_class="col offset-s3 s6"),
_class="row"),
DIV(DIV(INPUT(_type='submit',
_value=T('submit')),
_class="input-field col offset-s3 s6"),
_class="row"),
_action=URL(r=request, args=request.args),
_class="row col s12")
tb = None
if form.accepts(request.vars, formname=None):
regex = re.compile(request.args[0] + '\.(?P<table>\w+)\..+')
match = regex.match(form.vars.query.strip())
if match:
table = match.group('table')
try:
nrows = db(query, ignore_common_filters=True).count()
if form.vars.update_check and form.vars.update_fields:
db(query, ignore_common_filters=True).update(
**eval_in_global_env('dict(%s)' % form.vars.update_fields))
response.flash = T('%s %%{row} updated', nrows)
elif form.vars.delete_check:
db(query, ignore_common_filters=True).delete()
response.flash = T('%s %%{row} deleted', nrows)
nrows = db(query, ignore_common_filters=True).count()
if is_imap:
fields = [db[table][name] for name in
("id", "uid", "created", "to",
"sender", "subject")]
if orderby:
rows = db(query, ignore_common_filters=True).select(
*fields, limitby=(start, stop),
orderby=eval_in_global_env(orderby))
else:
rows = db(query, ignore_common_filters=True).select(
*fields, limitby=(start, stop))
except Exception, e:
import traceback
tb = traceback.format_exc()
(rows, nrows) = ([], 0)
response.flash = DIV(T('Invalid Query'), PRE(str(e)))
# begin handle upload csv
csv_table = table or request.vars.table
if csv_table:
formcsv = FORM(DIV(str(T('or import from csv file')) + " ", _class="row"),
DIV(DIV(DIV(SPAN("File"),
INPUT(_type='file', _name='csvfile'),
_class="btn"),
DIV(INPUT(_class="file-path", _type="text"),
_class="file-path-wrapper"),
_class="col offset-s4 s4 file-field input-field"),
_class="row"),
DIV(DIV(INPUT(_type='hidden', _value=csv_table, _name='table'),
INPUT(_type='submit', _value=T('import')),
_class="col offset-s4 s4"),
_class="row"),
_class="row center")
else:
formcsv = None
if formcsv and formcsv.process().accepted:
try:
import_csv(db[request.vars.table],
request.vars.csvfile.file)
response.flash = T('data uploaded')
except Exception, e:
response.flash = DIV(T('unable to parse csv file'), PRE(str(e)))
# end handle upload csv
return dict(
form=form,
table=table,
start=start,
stop=stop,
step=step,
nrows=nrows,
rows=rows,
query=request.vars.query,
formcsv=formcsv,
tb=tb
)
# ##########################################################
# ## edit delete one record
# ###########################################################
def update():
(db, table) = get_table(request)
keyed = hasattr(db[table], '_primarykey')
record = None
db[table]._common_filter = None
if keyed:
key = [f for f in request.vars if f in db[table]._primarykey]
if key:
record = db(db[table][key[0]] == request.vars[key[
0]]).select().first()
else:
record = db(db[table].id == request.args(
2)).select().first()
if not record:
qry = query_by_table_type(table, db)
session.flash = T('record does not exist')
redirect(URL('select', args=request.args[:1],
vars=dict(query=qry)))
if keyed:
for k in db[table]._primarykey:
db[table][k].writable = False
form = SQLFORM(
db[table], record, deletable=True, delete_label=T('Check to delete'),
ignore_rw=ignore_rw and not keyed,
linkto=URL('select',
args=request.args[:1]), upload=URL(r=request,
f='download', args=request.args[:1]))
if form.accepts(request.vars, session):
session.flash = T('Done !!')
qry = query_by_table_type(table, db)
redirect(URL('select', args=request.args[:1],
vars=dict(query=qry)))
return dict(form=form, table=db[table])
# ##########################################################
# ## get global variables
# ###########################################################
def state():
return dict()
def ccache():
if is_gae:
form = FORM(
P(TAG.BUTTON(T("Clear CACHE?"), _type="submit", _name="yes", _value="yes")))
else:
cache.ram.initialize()
cache.disk.initialize()
form = FORM(
P(TAG.BUTTON(
T("Clear CACHE?"), _type="submit", _name="yes", _value="yes")),
P(TAG.BUTTON(
T("Clear RAM"), _type="submit", _name="ram", _value="ram")),
P(TAG.BUTTON(
T("Clear DISK"), _type="submit", _name="disk", _value="disk")),
)
if form.accepts(request.vars, session):
session.flash = ""
if is_gae:
if request.vars.yes:
cache.ram.clear()
session.flash += T("Cache Cleared")
else:
clear_ram = False
clear_disk = False
if request.vars.yes:
clear_ram = clear_disk = True
if request.vars.ram:
clear_ram = True
if request.vars.disk:
clear_disk = True
if clear_ram:
cache.ram.clear()
session.flash += T("Ram Cleared")
if clear_disk:
cache.disk.clear()
session.flash += T("Disk Cleared")
redirect(URL(r=request))
try:
from guppy import hpy
hp = hpy()
except ImportError:
hp = False
import shelve
import os
import copy
import time
import math
from gluon import portalocker
ram = {
'entries': 0,
'bytes': 0,
'objects': 0,
'hits': 0,
'misses': 0,
'ratio': 0,
'oldest': time.time(),
'keys': []
}
disk = copy.copy(ram)
total = copy.copy(ram)
disk['keys'] = []
total['keys'] = []
def GetInHMS(seconds):
hours = math.floor(seconds / 3600)
seconds -= hours * 3600
minutes = math.floor(seconds / 60)
seconds -= minutes * 60
seconds = math.floor(seconds)
return (hours, minutes, seconds)
if is_gae:
gae_stats = cache.ram.client.get_stats()
try:
gae_stats['ratio'] = ((gae_stats['hits'] * 100) /
(gae_stats['hits'] + gae_stats['misses']))
except ZeroDivisionError:
gae_stats['ratio'] = T("?")
gae_stats['oldest'] = GetInHMS(time.time() - gae_stats['oldest_item_age'])
total.update(gae_stats)
else:
for key, value in cache.ram.storage.iteritems():
if isinstance(value, dict):
ram['hits'] = value['hit_total'] - value['misses']
ram['misses'] = value['misses']
try:
ram['ratio'] = ram['hits'] * 100 / value['hit_total']
except (KeyError, ZeroDivisionError):
ram['ratio'] = 0
else:
if hp:
ram['bytes'] += hp.iso(value[1]).size
ram['objects'] += hp.iso(value[1]).count
ram['entries'] += 1
if value[0] < ram['oldest']:
ram['oldest'] = value[0]
ram['keys'].append((key, GetInHMS(time.time() - value[0])))
for key in cache.disk.storage:
value = cache.disk.storage[key]
if isinstance(value, dict):
disk['hits'] = value['hit_total'] - value['misses']
disk['misses'] = value['misses']
try:
disk['ratio'] = disk['hits'] * 100 / value['hit_total']
except (KeyError, ZeroDivisionError):
disk['ratio'] = 0
else:
if hp:
disk['bytes'] += hp.iso(value[1]).size
disk['objects'] += hp.iso(value[1]).count
disk['entries'] += 1
if value[0] < disk['oldest']:
disk['oldest'] = value[0]
disk['keys'].append((key, GetInHMS(time.time() - value[0])))
ram_keys = ram.keys() # ['hits', 'objects', 'ratio', 'entries', 'keys', 'oldest', 'bytes', 'misses']
ram_keys.remove('ratio')
ram_keys.remove('oldest')
for key in ram_keys:
total[key] = ram[key] + disk[key]
try:
total['ratio'] = total['hits'] * 100 / (total['hits'] +
total['misses'])
except (KeyError, ZeroDivisionError):
total['ratio'] = 0
if disk['oldest'] < ram['oldest']:
total['oldest'] = disk['oldest']
else:
total['oldest'] = ram['oldest']
ram['oldest'] = GetInHMS(time.time() - ram['oldest'])
disk['oldest'] = GetInHMS(time.time() - disk['oldest'])
total['oldest'] = GetInHMS(time.time() - total['oldest'])
def key_table(keys):
return TABLE(
TR(TD(B(T('Key'))), TD(B(T('Time in Cache (h:m:s)')))),
*[TR(TD(k[0]), TD('%02d:%02d:%02d' % k[1])) for k in keys],
**dict(_class='cache-keys',
_style="border-collapse: separate; border-spacing: .5em;"))
if not is_gae:
ram['keys'] = key_table(ram['keys'])
disk['keys'] = key_table(disk['keys'])
total['keys'] = key_table(total['keys'])
return dict(form=form, total=total,
ram=ram, disk=disk, object_stats=hp != False)
def table_template(table):
from gluon.html import TR, TD, TABLE, TAG
def FONT(*args, **kwargs):
return TAG.font(*args, **kwargs)
def types(field):
f_type = field.type
if not isinstance(f_type,str):
return ' '
elif f_type == 'string':
return field.length
elif f_type == 'id':
return B('pk')
elif f_type.startswith('reference') or \
f_type.startswith('list:reference'):
return B('fk')
else:
return ' '
# This is horribe HTML but the only one graphiz understands
rows = []
cellpadding = 4
color = "#000000"
bgcolor = "#FFFFFF"
face = "Helvetica"
face_bold = "Helvetica Bold"
border = 0
rows.append(TR(TD(FONT(table, _face=face_bold, _color=bgcolor),
_colspan=3, _cellpadding=cellpadding,
_align="center", _bgcolor=color)))
for row in db[table]:
rows.append(TR(TD(FONT(row.name, _color=color, _face=face_bold),
_align="left", _cellpadding=cellpadding,
_border=border),
TD(FONT(row.type, _color=color, _face=face),
_align="left", _cellpadding=cellpadding,
_border=border),
TD(FONT(types(row), _color=color, _face=face),
_align="center", _cellpadding=cellpadding,
_border=border)))
return "< %s >" % TABLE(*rows, **dict(_bgcolor=bgcolor, _border=1,
_cellborder=0, _cellspacing=0)
).xml()
def bg_graph_model():
graph = pgv.AGraph(layout='dot', directed=True, strict=False, rankdir='LR')
subgraphs = dict()
for tablename in db.tables:
if hasattr(db[tablename],'_meta_graphmodel'):
meta_graphmodel = db[tablename]._meta_graphmodel
else:
meta_graphmodel = dict(group=request.application, color='#ECECEC')
group = meta_graphmodel['group'].replace(' ', '')
if not subgraphs.has_key(group):
subgraphs[group] = dict(meta=meta_graphmodel, tables=[])
subgraphs[group]['tables'].append(tablename)
graph.add_node(tablename, name=tablename, shape='plaintext',
label=table_template(tablename))
for n, key in enumerate(subgraphs.iterkeys()):
graph.subgraph(nbunch=subgraphs[key]['tables'],
name='cluster%d' % n,
style='filled',
color=subgraphs[key]['meta']['color'],
label=subgraphs[key]['meta']['group'])
for tablename in db.tables:
for field in db[tablename]:
f_type = field.type
if isinstance(f_type,str) and (
f_type.startswith('reference') or
f_type.startswith('list:reference')):
referenced_table = f_type.split()[1].split('.')[0]
n1 = graph.get_node(tablename)
n2 = graph.get_node(referenced_table)
graph.add_edge(n1, n2, color="#4C4C4C", label='')
graph.layout()
if not request.args:
response.headers['Content-Type'] = 'image/png'
return graph.draw(format='png', prog='dot')
else:
response.headers['Content-Disposition']='attachment;filename=graph.%s'%request.args(0)
if request.args(0) == 'dot':
return graph.string()
else:
return graph.draw(format=request.args(0), prog='dot')
def graph_model():
return dict(databases=databases, pgv=pgv)
def manage():
tables = manager_action['tables']
if isinstance(tables[0], str):
db = manager_action.get('db', auth.db)
db = globals()[db] if isinstance(db, str) else db
tables = [db[table] for table in tables]
if request.args(0) == 'auth':
auth.table_user()._plural = T('Users')
auth.table_group()._plural = T('Roles')
auth.table_membership()._plural = T('Memberships')
auth.table_permission()._plural = T('Permissions')
if request.extension != 'load':
return dict(heading=manager_action.get('heading',
T('Manage %(action)s') % dict(action=request.args(0).replace('_', ' ').title())),
tablenames=[table._tablename for table in tables],
labels=[table._plural.title() for table in tables])
table = tables[request.args(1, cast=int)]
formname = '%s_grid' % table._tablename
linked_tables = orderby = None
if request.args(0) == 'auth':
auth.table_group()._id.readable = \
auth.table_membership()._id.readable = \
auth.table_permission()._id.readable = False
auth.table_membership().user_id.label = T('User')
auth.table_membership().group_id.label = T('Role')
auth.table_permission().group_id.label = T('Role')
auth.table_permission().name.label = T('Permission')
if table == auth.table_user():
linked_tables=[auth.settings.table_membership_name]
elif table == auth.table_group():
orderby = 'role' if not request.args(3) or '.group_id' not in request.args(3) else None
elif table == auth.table_permission():
orderby = 'group_id'
kwargs = dict(user_signature=True, maxtextlength=1000,
orderby=orderby, linked_tables=linked_tables)
smartgrid_args = manager_action.get('smartgrid_args', {})
kwargs.update(**smartgrid_args.get('DEFAULT', {}))
kwargs.update(**smartgrid_args.get(table._tablename, {}))
grid = SQLFORM.smartgrid(table,
args=request.args[:2],
formname=formname,
**kwargs)
return grid
def hooks():
import functools
import inspect
list_op=['_%s_%s' %(h,m) for h in ['before', 'after'] for m in ['insert','update','delete']]
tables=[]
with_build_it=False
for db_str in sorted(databases):
db = databases[db_str]
for t in db.tables:
method_hooks=[]
for op in list_op:
functions = []
for f in getattr(db[t], op):
if hasattr(f, '__call__'):
try:
if isinstance(f, (functools.partial)):
f = f.func
filename = inspect.getsourcefile(f)
details = {'funcname':f.__name__,
'filename':filename[len(request.folder):] if request.folder in filename else None,
'lineno': inspect.getsourcelines(f)[1]}
if details['filename']: # Built in functions as delete_uploaded_files are not editable
details['url'] = URL(a='admin',c='default',f='edit', args=[request['application'], details['filename']],vars={'lineno':details['lineno']})
if details['filename'] or with_build_it:
functions.append(details)
# compiled app and windows build don't support code inspection
except:
pass
if len(functions):
method_hooks.append({'name':op, 'functions':functions})
if len(method_hooks):
tables.append({'name':"%s.%s" % (db_str,t), 'slug': IS_SLUG()("%s.%s" % (db_str,t))[0], 'method_hooks':method_hooks})
# Render
ul_main = UL(_class='nav nav-list')
for t in tables:
ul_main.append(A(t['name'], _onclick="collapse('a_%s')" % t['slug']))
ul_t = UL(_class='nav nav-list', _id="a_%s" % t['slug'], _style='display:none')
for op in t['method_hooks']:
ul_t.append(LI (op['name']))
ul_t.append(UL([LI(A(f['funcname'], _class="editor_filelink", _href=f['url']if 'url' in f else None, **{'_data-lineno':f['lineno']-1})) for f in op['functions']]))
ul_main.append(ul_t)
return ul_main
|
|
#
# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
#
import sys
import gevent
import itertools
from time import sleep
sys.path.append("../common/tests")
from test_utils import *
from vnc_api.vnc_api import *
from device_api.juniper_common_xsd import *
from device_manager.dm_utils import DMUtils
from test_common import *
from test_dm_common import *
from test_dm_utils import FakeDeviceConnect
#
# All BGP related DM test cases should go here
#
class TestBgpDM(TestCommonDM):
def __init__(self, *args, **kwargs):
super(TestBgpDM, self).__init__(*args, **kwargs)
@retries(5, hook=retry_exc_handler)
def check_dm_bgp_hold_time_config(self, bgp_type, hold_time):
config = FakeDeviceConnect.get_xml_config()
bgp_groups = self.get_bgp_groups(config, bgp_type)
self.assertIn(hold_time, [gp.get_hold_time() for gp in bgp_groups or []])
return
# test hold time configuration
def verify_dm_bgp_hold_time_config(self):
bgp_router, pr = self.create_router('router' + self.id() , '1.1.1.1',
product=self.product)
self.set_hold_time(bgp_router, 100)
self._vnc_lib.bgp_router_update(bgp_router)
self.check_dm_bgp_hold_time_config('internal', 100)
bgp_router_fq = bgp_router.get_fq_name()
pr_fq = pr.get_fq_name()
self.delete_routers(bgp_router, pr)
self.wait_for_routers_delete(bgp_router_fq, pr_fq)
@retries(5, hook=retry_exc_handler)
def check_dm_bgp_export_policy(self, product):
config = FakeDeviceConnect.get_xml_config()
bgp_groups = self.get_bgp_groups(config)
for gp in bgp_groups or []:
if gp.get_type() == 'internal':
if 'qfx5' not in product:
self.assertEqual(gp.get_export(), DMUtils.make_ibgp_export_policy_name())
else:
self.assertIsNone(gp.get_export())
return
if gp.get_type() == 'external':
self.assertThat(gp.get_export() != DMUtils.make_ibgp_export_policy_name())
return
self.assertTrue(False)
return
# test iBgp export policy configuration
def verify_dm_bgp_export_policy(self):
bgp_router, pr = self.create_router('router' + self.id() , '1.1.1.1',
product=self.product)
self.check_dm_bgp_export_policy(self.product)
bgp_router_fq = bgp_router.get_fq_name()
pr_fq = pr.get_fq_name()
self.delete_routers(bgp_router, pr)
self.wait_for_routers_delete(bgp_router_fq, pr_fq)
# Test Auth Confiuration
@retries(5, hook=retry_exc_handler)
def check_bgp_auth_config(self, bgp_type, key):
config = FakeDeviceConnect.get_xml_config()
bgp_groups = self.get_bgp_groups(config, bgp_type)
self.assertIn(key, [gp.get_authentication_key() for gp in bgp_groups or []])
return
@retries(5, hook=retry_exc_handler)
def check_bgp_auth_neighbour_config(self, bgp_type, key):
config = FakeDeviceConnect.get_xml_config()
bgp_groups = self.get_bgp_groups(config, bgp_type)
self.assertIn(key, [neigh.get_authentication_key() for neigh in
itertools.chain.from_iterable([gp.get_neighbor() for gp in bgp_groups or []])])
return
# test bgp auth configuration
def verify_dm_md5_auth_config(self):
bgp_router, pr = self.create_router('router1' + self.id(), '1.1.1.1',
product=self.product)
self.set_auth_data(bgp_router, 0, 'bgppswd', 'md5')
self._vnc_lib.bgp_router_update(bgp_router)
self.check_bgp_auth_config('internal', 'bgppswd')
#bgp peering, auth validate
bgp_router_peer, _ = self.create_router('router2' + self.id() , '20.2.2.2', product=self.product, ignore_pr=True)
families = AddressFamilies(['route-target', 'inet-vpn', 'e-vpn'])
auth = AuthenticationData('md5', [AuthenticationKeyItem(0, 'bgppswd-neigh')])
bgp_sess_attrs = [BgpSessionAttributes(address_families=families, auth_data=auth)]
bgp_sessions = [BgpSession(attributes=bgp_sess_attrs)]
bgp_router.add_bgp_router(bgp_router_peer, BgpPeeringAttributes(session=bgp_sessions))
self._vnc_lib.bgp_router_update(bgp_router)
self.check_bgp_auth_config('internal', 'bgppswd')
self.check_bgp_auth_config('external', 'bgppswd')
self.check_bgp_auth_neighbour_config('external', 'bgppswd-neigh')
bgp_peer_fq = bgp_router_peer.get_fq_name()
self.delete_routers(bgp_router_peer)
self.wait_for_routers_delete(bgp_peer_fq)
bgp_fq = bgp_router.get_fq_name()
pr_fq = pr.get_fq_name()
self.delete_routers(bgp_router, pr)
self.wait_for_routers_delete(bgp_fq, pr_fq)
#end test_dm_md5_auth_config
@retries(5, hook=retry_exc_handler)
def check_lo0_ip_config(self, ip_check=''):
config = FakeDeviceConnect.get_xml_config()
intfs = self.get_interfaces(config, "lo0")
if ip_check:
ips = self.get_ip_list(intfs[0], "v4", "0")
self.assertEqual(ip_check, ips[0])
else:
if not intfs or not self.get_ip_list(intfs[0], "v4", "0"):
return
self.assertTrue(False)
return
# end check_lo0_ip_config
@retries(5, hook=retry_exc_handler)
def check_tunnel_source_ip(self, ip_check='', look_for=True):
config = FakeDeviceConnect.get_xml_config()
tunnels = self.get_dynamic_tunnels(config) or DynamicTunnels()
if look_for:
self.assertIn(ip_check, [tunnel.source_address
for tunnel in tunnels.get_dynamic_tunnel()])
else:
self.assertNotIn(ip_check, [tunnel.source_address
for tunnel in tunnels.get_dynamic_tunnel()])
return
# end check_tunnel_source_ip
# test loopback ip configuration
def verify_dm_lo0_ip_config(self):
bgp_router, pr = self.create_router('router1' + self.id(), '1.1.1.1',
product=self.product)
self.check_lo0_ip_config()
tunnels_needed = True
if 'qfx5' in self.product:
tunnels_needed = False
pr.set_physical_router_loopback_ip("10.10.0.1")
self._vnc_lib.physical_router_update(pr)
self.check_lo0_ip_config("10.10.0.1/32")
self.check_tunnel_source_ip("10.10.0.1", tunnels_needed)
pr.set_physical_router_dataplane_ip("20.20.0.1")
self._vnc_lib.physical_router_update(pr)
self.check_tunnel_source_ip("20.20.0.1", tunnels_needed)
self.check_lo0_ip_config("10.10.0.1/32")
pr.set_physical_router_loopback_ip('')
self._vnc_lib.physical_router_update(pr)
self.check_lo0_ip_config()
self.check_tunnel_source_ip("20.20.0.1", tunnels_needed)
pr.set_physical_router_dataplane_ip('')
self._vnc_lib.physical_router_update(pr)
self.check_tunnel_source_ip("10.10.0.1", False)
self.check_tunnel_source_ip("20.20.0.1", False)
bgp_router_fq = bgp_router.get_fq_name()
pr_fq = pr.get_fq_name()
self.delete_routers(bgp_router, pr)
self.wait_for_routers_delete(bgp_router_fq, pr_fq)
@retries(5, hook=retry_exc_handler)
def check_router_id_config(self, ip_check=''):
config = FakeDeviceConnect.get_xml_config()
ri_opts = config.get_routing_options()
self.assertIsNotNone(ri_opts)
self.assertEqual(ip_check, ri_opts.get_router_id())
# end check_router_id_config
# test router id configuration
def verify_dm_router_id_config(self):
bgp_router, pr = self.create_router('router1' + self.id(), '1.1.1.1',
product=self.product)
# defaults to bgp address
self.check_router_id_config('1.1.1.1')
params = self.get_obj_param(bgp_router, 'bgp_router_parameters') or BgpRouterParams()
self.set_obj_param(params, 'identifier', '5.5.5.5')
self.set_obj_param(bgp_router, 'bgp_router_parameters', params)
self._vnc_lib.bgp_router_update(bgp_router)
# if identifier is set, use it to conifgure router-id
self.check_router_id_config('5.5.5.5')
# cleanup
bgp_router_fq = bgp_router.get_fq_name()
pr_fq = pr.get_fq_name()
self.delete_routers(bgp_router, pr)
self.wait_for_routers_delete(bgp_router_fq, pr_fq)
# end test_dm_router_id_config
# end TestBgpDM
|
|
import collections
import json as jsonlib
import random
import re
from operator import attrgetter
from urlparse import urljoin
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from django.forms import CheckboxInput
from django.utils import translation
from django.utils.encoding import smart_unicode
from django.template import defaultfilters
import caching.base as caching
import jinja2
import six
from babel.support import Format
from jingo import register, env
# Needed to make sure our own |f filter overrides jingo's one.
from jingo import helpers # noqa
from tower import ugettext as _, strip_whitespace
import amo
from amo import utils, urlresolvers
from constants.licenses import PERSONA_LICENSES_IDS
from translations.query import order_by_translation
from translations.helpers import truncate
from versions.models import License
# Yanking filters from Django.
register.filter(defaultfilters.slugify)
# Registering some utils as filters:
urlparams = register.filter(utils.urlparams)
register.filter(utils.epoch)
register.filter(utils.isotime)
register.function(dict)
register.function(utils.randslice)
@register.filter
def link(item):
html = """<a href="%s">%s</a>""" % (item.get_url_path(),
jinja2.escape(item.name))
return jinja2.Markup(html)
@register.filter
def xssafe(value):
"""
Like |safe but for strings with interpolation.
By using |xssafe you assert that you have written tests proving an
XSS can't happen here.
"""
return jinja2.Markup(value)
@register.filter
def babel_datetime(dt, format='medium'):
return _get_format().datetime(dt, format=format) if dt else ''
@register.filter
def babel_date(date, format='medium'):
return _get_format().date(date, format=format) if date else ''
@register.function
def locale_url(url):
"""Take a URL and give it the locale prefix."""
prefixer = urlresolvers.get_url_prefix()
script = prefixer.request.META['SCRIPT_NAME']
parts = [script, prefixer.locale, url.lstrip('/')]
return '/'.join(parts)
@register.inclusion_tag('includes/refinements.html')
@jinja2.contextfunction
def refinements(context, items, title, thing):
d = dict(context.items())
d.update(items=items, title=title, thing=thing)
return d
@register.function
def url(viewname, *args, **kwargs):
"""Helper for Django's ``reverse`` in templates."""
add_prefix = kwargs.pop('add_prefix', True)
host = kwargs.pop('host', '')
src = kwargs.pop('src', '')
url = '%s%s' % (host, urlresolvers.reverse(viewname,
args=args,
kwargs=kwargs,
add_prefix=add_prefix))
if src:
url = urlparams(url, src=src)
return url
@register.function
def shared_url(viewname, addon, *args, **kwargs):
"""
Helper specifically for addons or apps to get urls. Requires
the viewname, addon (or app). It's assumed that we'll pass the
slug into the args and we'll look up the right slug (addon or app)
for you.
Viewname should be a normal view eg: `addons.details` or `apps.details`.
`addons.details` becomes `apps.details`, if we've passed an app, etc.
A viewname such as `details` becomes `addons.details` or `apps.details`,
depending on the add-on type.
"""
namespace, dot, latter = viewname.partition('.')
# If `viewname` is prefixed with `addons.` but we're linking to a
# webapp, the `viewname` magically gets prefixed with `apps.`.
if namespace in ('addons', 'apps'):
viewname = latter
# Otherwise, we just slap the appropriate prefix in front of `viewname`.
viewname = '.'.join(['addons', viewname])
return url(viewname, *([addon.slug] + list(args)), **kwargs)
@register.function
def services_url(viewname, *args, **kwargs):
"""Helper for ``url`` with host=SERVICES_URL."""
kwargs.update({'host': settings.SERVICES_URL})
return url(viewname, *args, **kwargs)
@register.filter
def paginator(pager):
return Paginator(pager).render()
@register.filter
def impala_paginator(pager):
t = env.get_template('amo/impala/paginator.html')
return jinja2.Markup(t.render({'pager': pager}))
@register.filter
def mobile_paginator(pager):
t = env.get_template('amo/mobile/paginator.html')
return jinja2.Markup(t.render({'pager': pager}))
@register.filter
def mobile_impala_paginator(pager):
# Impala-style paginator that is easier to mobilefy.
t = env.get_template('amo/mobile/impala_paginator.html')
return jinja2.Markup(t.render({'pager': pager}))
@register.function
def is_mobile(app):
return app == amo.MOBILE
@register.function
def sidebar(app):
"""Populates the sidebar with (categories, types)."""
from addons.models import Category
if app is None:
return [], []
# We muck with query to make order_by and extra_order_by play nice.
q = Category.objects.filter(application=app.id, weight__gte=0,
type=amo.ADDON_EXTENSION)
categories = order_by_translation(q, 'name')
categories.query.extra_order_by.insert(0, 'weight')
Type = collections.namedtuple('Type', 'id name url')
base = urlresolvers.reverse('home')
types = [Type(99, _('Collections'), base + 'collections/')]
shown_types = {
amo.ADDON_PERSONA: urlresolvers.reverse('browse.personas'),
amo.ADDON_DICT: urlresolvers.reverse('browse.language-tools'),
amo.ADDON_SEARCH: urlresolvers.reverse('browse.search-tools'),
amo.ADDON_THEME: urlresolvers.reverse('browse.themes'),
}
titles = dict(amo.ADDON_TYPES,
**{amo.ADDON_DICT: _('Dictionaries & Language Packs')})
for type_, url in shown_types.items():
if type_ in app.types:
types.append(Type(type_, titles[type_], url))
return categories, sorted(types, key=lambda x: x.name)
class Paginator(object):
def __init__(self, pager):
self.pager = pager
self.max = 10
self.span = (self.max - 1) / 2
self.page = pager.number
self.num_pages = pager.paginator.num_pages
self.count = pager.paginator.count
pager.page_range = self.range()
pager.dotted_upper = self.num_pages not in pager.page_range
pager.dotted_lower = 1 not in pager.page_range
def range(self):
"""Return a list of page numbers to show in the paginator."""
page, total, span = self.page, self.num_pages, self.span
if total < self.max:
lower, upper = 0, total
elif page < span + 1:
lower, upper = 0, span * 2
elif page > total - span:
lower, upper = total - span * 2, total
else:
lower, upper = page - span, page + span - 1
return range(max(lower + 1, 1), min(total, upper) + 1)
def render(self):
c = {'pager': self.pager, 'num_pages': self.num_pages,
'count': self.count}
t = env.get_template('amo/paginator.html').render(c)
return jinja2.Markup(t)
def _get_format():
lang = translation.get_language()
return Format(utils.get_locale_from_lang(lang))
@register.filter
def numberfmt(num, format=None):
return _get_format().decimal(num, format)
@register.filter
def currencyfmt(num, currency):
if num is None:
return ''
return _get_format().currency(num, currency)
def page_name(app=None):
"""Determine the correct page name for the given app (or no app)."""
if app:
return _(u'Add-ons for {0}').format(app.pretty)
else:
return _('Add-ons')
@register.function
@jinja2.contextfunction
def login_link(context):
next = context['request'].path
qs = context['request'].GET.urlencode()
if qs:
next += '?' + qs
l = urlparams(urlresolvers.reverse('users.login'), to=next)
return l
@register.function
@jinja2.contextfunction
def page_title(context, title):
title = smart_unicode(title)
base_title = page_name(context['request'].APP)
return u'%s :: %s' % (title, base_title)
@register.function
@jinja2.contextfunction
def breadcrumbs(context, items=list(), add_default=True, crumb_size=40):
"""
show a list of breadcrumbs. If url is None, it won't be a link.
Accepts: [(url, label)]
"""
if add_default:
app = context['request'].APP
crumbs = [(urlresolvers.reverse('home'), page_name(app))]
else:
crumbs = []
# add user-defined breadcrumbs
if items:
try:
crumbs += items
except TypeError:
crumbs.append(items)
crumbs = [(url, truncate(label, crumb_size)) for (url, label) in crumbs]
c = {'breadcrumbs': crumbs}
t = env.get_template('amo/breadcrumbs.html').render(c)
return jinja2.Markup(t)
@register.function
@jinja2.contextfunction
def impala_breadcrumbs(context, items=list(), add_default=True, crumb_size=40):
"""
show a list of breadcrumbs. If url is None, it won't be a link.
Accepts: [(url, label)]
"""
if add_default:
base_title = page_name(context['request'].APP)
crumbs = [(urlresolvers.reverse('home'), base_title)]
else:
crumbs = []
# add user-defined breadcrumbs
if items:
try:
crumbs += items
except TypeError:
crumbs.append(items)
crumbs = [(url, truncate(label, crumb_size)) for (url, label) in crumbs]
c = {'breadcrumbs': crumbs, 'has_home': add_default}
t = env.get_template('amo/impala/breadcrumbs.html').render(c)
return jinja2.Markup(t)
@register.filter
def json(s):
return jsonlib.dumps(s)
@register.filter
def absolutify(url, site=None):
"""Takes a URL and prepends the SITE_URL"""
if url.startswith('http'):
return url
else:
return urljoin(site or settings.SITE_URL, url)
@register.filter
def strip_controls(s):
"""
Strips control characters from a string.
"""
# Translation table of control characters.
control_trans = dict((n, None) for n in xrange(32) if n not in [10, 13])
rv = unicode(s).translate(control_trans)
return jinja2.Markup(rv) if isinstance(s, jinja2.Markup) else rv
@register.filter
def strip_html(s, just_kidding=False):
"""Strips HTML. Confirm lets us opt out easily."""
if just_kidding:
return s
if not s:
return ''
else:
s = re.sub(r'<.*?>', '', smart_unicode(s, errors='ignore'))
return re.sub(r'<.*?>', '', s)
@register.filter
def external_url(url):
"""Bounce a URL off outgoing.mozilla.org."""
return urlresolvers.get_outgoing_url(unicode(url))
@register.filter
def shuffle(sequence):
"""Shuffle a sequence."""
random.shuffle(sequence)
return sequence
@register.function
def license_link(license):
"""Link to a code license, including icon where applicable."""
# If passed in an integer, try to look up the License.
if isinstance(license, (long, int)):
if license in PERSONA_LICENSES_IDS:
# Grab built-in license.
license = PERSONA_LICENSES_IDS[license]
else:
# Grab custom license.
license = License.objects.filter(id=license)
if not license.exists():
return ''
license = license[0]
elif not license:
return ''
if not getattr(license, 'builtin', True):
return _('Custom License')
t = env.get_template('amo/license_link.html').render({'license': license})
return jinja2.Markup(t)
@register.function
def field(field, label=None, **attrs):
if label is not None:
field.label = label
# HTML from Django is already escaped.
return jinja2.Markup(u'%s<p>%s%s</p>' %
(field.errors, field.label_tag(),
field.as_widget(attrs=attrs)))
@register.inclusion_tag('amo/category-arrow.html')
@jinja2.contextfunction
def category_arrow(context, key, prefix):
d = dict(context.items())
d.update(key=key, prefix=prefix)
return d
@register.filter
def timesince(time):
if not time:
return u''
ago = defaultfilters.timesince(time)
# L10n: relative time in the past, like '4 days ago'
return _(u'{0} ago').format(ago)
@register.inclusion_tag('amo/recaptcha.html')
@jinja2.contextfunction
def recaptcha(context, form):
d = dict(context.items())
d.update(form=form)
return d
@register.filter
def is_choice_field(value):
try:
return isinstance(value.field.widget, CheckboxInput)
except AttributeError:
pass
@register.inclusion_tag('amo/mobile/sort_by.html')
def mobile_sort_by(base_url, options=None, selected=None, extra_sort_opts=None,
search_filter=None):
if search_filter:
selected = search_filter.field
options = search_filter.opts
if hasattr(search_filter, 'extras'):
options += search_filter.extras
if extra_sort_opts:
options_dict = dict(options + extra_sort_opts)
else:
options_dict = dict(options)
if selected in options_dict:
current = options_dict[selected]
else:
selected, current = options[0] # Default to the first option.
return locals()
@register.function
@jinja2.contextfunction
def media(context, url, key='MEDIA_URL'):
"""Get a MEDIA_URL link with a cache buster querystring."""
if 'BUILD_ID' in context:
build = context['BUILD_ID']
else:
if url.endswith('.js'):
build = context['BUILD_ID_JS']
elif url.endswith('.css'):
build = context['BUILD_ID_CSS']
else:
build = context['BUILD_ID_IMG']
return urljoin(context[key], utils.urlparams(url, b=build))
@register.function
@jinja2.contextfunction
def static(context, url):
"""Get a STATIC_URL link with a cache buster querystring."""
return media(context, url, 'STATIC_URL')
@register.function
@jinja2.evalcontextfunction
def attrs(ctx, *args, **kw):
return jinja2.filters.do_xmlattr(ctx, dict(*args, **kw))
@register.function
@jinja2.contextfunction
def side_nav(context, addon_type, category=None):
app = context['request'].APP.id
cat = str(category.id) if category else 'all'
return caching.cached(lambda: _side_nav(context, addon_type, category),
'side-nav-%s-%s-%s' % (app, addon_type, cat))
def _side_nav(context, addon_type, cat):
# Prevent helpers generating circular imports.
from addons.models import Category, AddonType
request = context['request']
qs = Category.objects.filter(weight__gte=0)
if addon_type != amo.ADDON_PERSONA:
qs = qs.filter(application=request.APP.id)
sort_key = attrgetter('weight', 'name')
categories = sorted(qs.filter(type=addon_type), key=sort_key)
if cat:
base_url = cat.get_url_path()
else:
base_url = AddonType(addon_type).get_url_path()
ctx = dict(request=request, base_url=base_url, categories=categories,
addon_type=addon_type, amo=amo)
return jinja2.Markup(env.get_template('amo/side_nav.html').render(ctx))
@register.function
@jinja2.contextfunction
def site_nav(context):
app = context['request'].APP.id
return caching.cached(lambda: _site_nav(context), 'site-nav-%s' % app)
def _site_nav(context):
# Prevent helpers from generating circular imports.
from addons.models import Category
request = context['request']
sorted_cats = lambda qs: sorted(qs, key=attrgetter('weight', 'name'))
extensions = Category.objects.filter(application=request.APP.id,
weight__gte=0, type=amo.ADDON_EXTENSION)
personas = Category.objects.filter(weight__gte=0, type=amo.ADDON_PERSONA)
ctx = dict(request=request, amo=amo,
extensions=sorted_cats(extensions),
personas=sorted_cats(personas))
return jinja2.Markup(env.get_template('amo/site_nav.html').render(ctx))
@register.function
def loc(s):
"""A noop function for strings that are not ready to be localized."""
return strip_whitespace(s)
@register.function
def site_event_type(type):
return amo.SITE_EVENT_CHOICES[type]
@register.function
@jinja2.contextfunction
def remora_url(context, url, lang=None, app=None, prefix=''):
"""Wrapper for urlresolvers.remora_url"""
if lang is None:
_lang = context['LANG']
if _lang:
lang = translation.to_locale(_lang).replace('_', '-')
if app is None:
try:
app = context['APP'].short
except (AttributeError, KeyError):
pass
return urlresolvers.remora_url(url=url, lang=lang, app=app, prefix=prefix)
@register.function
@jinja2.contextfunction
def hasOneToOne(context, obj, attr):
try:
getattr(obj, attr)
return True
except ObjectDoesNotExist:
return False
@register.function
def no_results_amo():
# This prints a "No results found" message. That's all. Carry on.
t = env.get_template('amo/no_results.html').render()
return jinja2.Markup(t)
@register.filter
def f(string, *args, **kwargs):
"""This overrides jingo.helpers.f to convert input to unicode if needed.
This is needed because of
https://github.com/jbalogh/jingo/pull/54#issuecomment-36728948
"""
if not isinstance(string, six.text_type):
string = six.text_type(string)
return string.format(*args, **kwargs)
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tensor utility functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.util.deprecation import deprecated
__all__ = [
'assert_same_float_dtype',
'assert_scalar',
'assert_scalar_int',
'convert_to_tensor_or_sparse_tensor',
'is_tensor',
'reduce_sum_n',
'remove_squeezable_dimensions',
'with_shape',
'with_same_shape']
# Temporary for backwards compatibility
is_tensor = tensor_util.is_tensor
assert_same_float_dtype = check_ops.assert_same_float_dtype
assert_scalar = check_ops.assert_scalar
convert_to_tensor_or_sparse_tensor = (
sparse_tensor.convert_to_tensor_or_sparse_tensor)
def reduce_sum_n(tensors, name=None):
"""Reduce tensors to a scalar sum.
This reduces each tensor in `tensors` to a scalar via `tf.reduce_sum`, then
adds them via `tf.add_n`.
Args:
tensors: List of tensors, all of the same numeric type.
name: Tensor name, and scope for all other ops.
Returns:
Total loss tensor, or None if no losses have been configured.
Raises:
ValueError: if `losses` is missing or empty.
"""
if not tensors:
raise ValueError('No tensors provided.')
with ops.name_scope(name, 'reduce_sum_n', tensors) as name_scope:
tensors = [
math_ops.reduce_sum(t, name='%s/sum' % t.op.name) for t in tensors]
if len(tensors) == 1:
return tensors[0]
return math_ops.add_n(tensors, name=name_scope)
@deprecated(None,
"Please switch to tf.confusion_matrix.remove_squeezable_dimensions. Note "
"that order of the inputs and ouputs of labels and predictions have also "
"been switched.")
def remove_squeezable_dimensions(predictions, labels, name=None):
"""Squeeze last dim if ranks of `predictions` and `labels` differ by 1.
This will use static shape if available. Otherwise, it will add graph
operations, which could result in a performance hit.
Args:
predictions: Predicted values, a `Tensor` of arbitrary dimensions.
labels: Label values, a `Tensor` whose dimensions match `predictions`.
name: Name of the op.
Returns:
Tuple of `predictions` and `labels`, possibly with last dim squeezed.
"""
with ops.name_scope(name, 'remove_squeezable_dimensions',
[predictions, labels]):
predictions = ops.convert_to_tensor(predictions)
labels = ops.convert_to_tensor(labels)
predictions_shape = predictions.get_shape()
predictions_rank = predictions_shape.ndims
labels_shape = labels.get_shape()
labels_rank = labels_shape.ndims
if (labels_rank is not None) and (predictions_rank is not None):
# Use static rank.
rank_diff = predictions_rank - labels_rank
if rank_diff == -1:
labels = array_ops.squeeze(labels, [-1])
elif rank_diff == 1:
predictions = array_ops.squeeze(predictions, [-1])
return predictions, labels
# Use dynamic rank.
rank_diff = array_ops.rank(predictions) - array_ops.rank(labels)
if (predictions_rank is None) or (
predictions_shape.dims[-1].is_compatible_with(1)):
predictions = control_flow_ops.cond(
math_ops.equal(1, rank_diff),
lambda: array_ops.squeeze(predictions, [-1]),
lambda: predictions)
if (labels_rank is None) or (
labels_shape.dims[-1].is_compatible_with(1)):
labels = control_flow_ops.cond(
math_ops.equal(-1, rank_diff),
lambda: array_ops.squeeze(labels, [-1]),
lambda: labels)
return predictions, labels
def _all_equal(tensor0, tensor1):
with ops.name_scope('all_equal', values=[tensor0, tensor1]) as scope:
return math_ops.reduce_all(
math_ops.equal(tensor0, tensor1, name='equal'), name=scope)
def _is_rank(expected_rank, actual_tensor):
"""Returns whether actual_tensor's rank is expected_rank.
Args:
expected_rank: Integer defining the expected rank, or tensor of same.
actual_tensor: Tensor to test.
Returns:
New tensor.
"""
with ops.name_scope('is_rank', values=[actual_tensor]) as scope:
expected = ops.convert_to_tensor(expected_rank, name='expected')
actual = array_ops.rank(actual_tensor, name='actual')
return math_ops.equal(expected, actual, name=scope)
def _is_shape(expected_shape, actual_tensor, actual_shape=None):
"""Returns whether actual_tensor's shape is expected_shape.
Args:
expected_shape: Integer list defining the expected shape, or tensor of same.
actual_tensor: Tensor to test.
actual_shape: Shape of actual_tensor, if we already have it.
Returns:
New tensor.
"""
with ops.name_scope('is_shape', values=[actual_tensor]) as scope:
is_rank = _is_rank(array_ops.size(expected_shape), actual_tensor)
if actual_shape is None:
actual_shape = array_ops.shape(actual_tensor, name='actual')
shape_equal = _all_equal(
ops.convert_to_tensor(expected_shape, name='expected'),
actual_shape)
return math_ops.logical_and(is_rank, shape_equal, name=scope)
def _assert_shape_op(expected_shape, actual_tensor):
"""Asserts actual_tensor's shape is expected_shape.
Args:
expected_shape: List of integers defining the expected shape, or tensor of
same.
actual_tensor: Tensor to test.
Returns:
New assert tensor.
"""
with ops.name_scope('assert_shape', values=[actual_tensor]) as scope:
actual_shape = array_ops.shape(actual_tensor, name='actual')
is_shape = _is_shape(expected_shape, actual_tensor, actual_shape)
return control_flow_ops.Assert(
is_shape, [
'Wrong shape for %s [expected] [actual].' % actual_tensor.name,
expected_shape,
actual_shape
], name=scope)
def with_same_shape(expected_tensor, tensor):
"""Assert tensors are the same shape, from the same graph.
Args:
expected_tensor: Tensor with expected shape.
tensor: Tensor of actual values.
Returns:
Tuple of (actual_tensor, label_tensor), possibly with assert ops added.
"""
with ops.name_scope('%s/' % tensor.op.name, values=[expected_tensor, tensor]):
tensor_shape = expected_tensor.get_shape()
expected_shape = (
tensor_shape.as_list() if tensor_shape.is_fully_defined()
else array_ops.shape(expected_tensor, name='expected_shape'))
return with_shape(expected_shape, tensor)
def with_shape(expected_shape, tensor):
"""Asserts tensor has expected shape.
If tensor shape and expected_shape, are fully defined, assert they match.
Otherwise, add assert op that will validate the shape when tensor is
evaluated, and set shape on tensor.
Args:
expected_shape: Expected shape to assert, as a 1D array of ints, or tensor
of same.
tensor: Tensor whose shape we're validating.
Returns:
tensor, perhaps with a dependent assert operation.
Raises:
ValueError: if tensor has an invalid shape.
"""
if isinstance(tensor, sparse_tensor.SparseTensor):
raise ValueError('SparseTensor not supported.')
# Shape type must be 1D int32.
if tensor_util.is_tensor(expected_shape):
if expected_shape.dtype.base_dtype != dtypes.int32:
raise ValueError(
'Invalid dtype %s for shape %s expected of tensor %s.' % (
expected_shape.dtype, expected_shape, tensor.name))
if isinstance(expected_shape, (list, tuple)):
if not expected_shape:
expected_shape = np.asarray([], dtype=np.int32)
else:
np_expected_shape = np.asarray(expected_shape)
expected_shape = (
np.asarray(expected_shape, dtype=np.int32)
if np_expected_shape.dtype == np.int64 else np_expected_shape)
if isinstance(expected_shape, np.ndarray):
if expected_shape.ndim > 1:
raise ValueError(
'Invalid rank %s for shape %s expected of tensor %s.' % (
expected_shape.ndim, expected_shape, tensor.name))
if expected_shape.dtype != np.int32:
raise ValueError(
'Invalid dtype %s for shape %s expected of tensor %s.' % (
expected_shape.dtype, expected_shape, tensor.name))
actual_shape = tensor.get_shape()
if (not actual_shape.is_fully_defined()
or tensor_util.is_tensor(expected_shape)):
with ops.name_scope('%s/' % tensor.op.name, values=[tensor]):
if (not tensor_util.is_tensor(expected_shape)
and (len(expected_shape) < 1)):
# TODO(irving): Remove scalar special case
return array_ops.reshape(tensor, [])
with ops.control_dependencies([_assert_shape_op(expected_shape, tensor)]):
result = array_ops.identity(tensor)
if not tensor_util.is_tensor(expected_shape):
result.set_shape(expected_shape)
return result
if (not tensor_util.is_tensor(expected_shape) and
not actual_shape.is_compatible_with(expected_shape)):
if (len(expected_shape) < 1) and actual_shape.is_compatible_with([1]):
# TODO(irving): Remove scalar special case.
with ops.name_scope('%s/' % tensor.op.name, values=[tensor]):
return array_ops.reshape(tensor, [])
raise ValueError('Invalid shape for tensor %s, expected %s, got %s.' % (
tensor.name, expected_shape, actual_shape))
return tensor
def assert_scalar_int(tensor, name=None):
"""Assert `tensor` is 0-D, of type `tf.int32` or `tf.int64`.
Args:
tensor: `Tensor` to test.
name: Name of the op and of the new `Tensor` if one is created.
Returns:
`tensor`, for chaining.
Raises:
ValueError: if `tensor` is not 0-D, of integer type.
"""
with ops.name_scope(name, 'assert_scalar_int', [tensor]) as name_scope:
tensor = ops.convert_to_tensor(tensor)
data_type = tensor.dtype
if not data_type.base_dtype.is_integer:
raise ValueError('Expected integer type for %s, received type: %s.'
% (tensor.name, data_type))
return check_ops.assert_scalar(tensor, name=name_scope)
|
|
from __future__ import division
from __evp_system__ import *
# ============================================================= #
# SIMULATION META PARAMETERS, VARIABLES #
# ============================================================= #
ARGS = None
RESTART_WITH_ITER = None
RESTART_FROM_TIME = None
RUN_TIME_ITER = None
MAX_MOLS_ITER = None
MAX_ITER = None
KEEP_EVERY_NTH_ITER = None
RESTART_FROM_MOLS = None
MOLS_TARGET = None
BULK_TARGET = None
ON_CLUSTER = None
RUN = None
GRO = None
TOP = None
NDX = None
CTR = None
HST = None
SYSTEM_DIR = None
REQUIRED_DIRS = None
# ============================================================= #
SIM_TIME = None
MOL_COUNT = None
BULK_SIZE = None
SIM_ITER = None
EVO_STEP = None
EVO_STEP_CMDS = None
def set_globals(arg1, arg2, arg3, arg4, arg5,
arg6, arg7, arg8, arg9, arg10,
arg11, arg12, arg13, arg14, arg15,
arg16, arg17, arg18, arg19, arg20,
arg21, arg22, arg23, arg24):
global ARGS
global RESTART_WITH_ITER
global RESTART_FROM_TIME
global RUN_TIME_ITER
global MAX_MOLS_ITER
global MAX_ITER
global KEEP_EVERY_NTH_ITER
global RESTART_FROM_MOLS
global MOLS_TARGET
global BULK_TARGET
global ON_CLUSTER
global RUN
global GRO
global TOP
global NDX
global CTR
global HST
global SYSTEM_DIR
global REQUIRED_DIRS
global SIM_TIME
global MOL_COUNT
global BULK_SIZE
global SIM_ITER
global EVO_STEP
global EVO_STEP_CMDS
ARGS = arg23
RESTART_WITH_ITER = arg1
RESTART_FROM_TIME = arg2
RUN_TIME_ITER = arg3
MAX_MOLS_ITER = arg4
MAX_ITER = arg5
KEEP_EVERY_NTH_ITER = arg22
RESTART_FROM_MOLS = arg6
MOLS_TARGET = arg7
BULK_TARGET = arg24
ON_CLUSTER = arg8
RUN = arg9
GRO = arg10
TOP = arg11
NDX = arg12
CTR = arg13
HST = arg14
SYSTEM_DIR = arg15
REQUIRED_DIRS = arg16
SIM_TIME = arg17
MOL_COUNT = arg18
BULK_SIZE = 0
SIM_ITER = arg19
EVO_STEP = arg20
EVO_STEP_CMDS = arg21
def clean_if_applicable(prefix='ITER_'):
global EVO_STEP
global KEEP_EVERY_NTH_ITER
if EVO_STEP % KEEP_EVERY_NTH_ITER == 0 or True: # Override
folders = [ f for f in os.listdir('./') if prefix in f ]
for folder in folders:
sim_iter = int(folder[5:])
if sim_iter % KEEP_EVERY_NTH_ITER == 0:
pass
elif EVO_STEP - sim_iter <= 5:
pass
else:
exe("rm -r ./%1s" % folder)
return True
def obtain_status(gro, top, ndx, ctr, hst, iter_prefix='ITER_'):
global ARGS
next_iter, mols_target, mols_current, iter_time = \
autorestart_find_next_iter(gro, top, ndx, ctr, hst, iter_prefix,
verbose=False)
latest_iter = next_iter-1
logfile = None
rootfiles = [ item for item in os.listdir('./') if not os.path.isdir(item) ]
for rootfile in rootfiles:
if 'log' in rootfile:
logfile = rootfile
break
pid = -1
root = os.getcwd().split('/')[-1]
tag = "___"
if logfile != None:
# Retrieve PID
pid = convert_os_cmd(cmd = 'cat %s | grep PID | tail -n 1' % logfile, colidx=-1, typ=int)
is_active = not os.system('ps caux | grep %d > /dev/null' % pid)
if not is_active: pid = '-----'
# Retrieve tag
tag = convert_os_cmd(cmd = 'cat %s | grep "ID tag" | tail -n 1' % logfile, colidx=-1, typ=str)
# Calculate progress (# evaporated / # targeted)
count_current_total = 0
count_target_total = 0
for current,target in zip(mols_current,mols_target):
sp1 = current.split(':')
sp2 = target.split(':')
molname = sp1[0]
assert sp1[0] == sp2[0]
count_current = int(sp1[1])
count_target = int(sp2[1])
count_current_total += count_current
count_target_total += count_target
prog = 100*float(count_current_total)/count_target_total
# Bulk size
os.chdir('%s%d' % (iter_prefix, latest_iter))
system = System(gro, top, ndx, ctr, hst, ARGS, verbose=False)
z0,z1 = system.estimate_bulk_z()
dz = z1-z0
os.chdir('../')
# Queue ID
try:
os.chdir('%s%d' % (iter_prefix, latest_iter+1))
qid = retrieve_qid()
os.chdir('../')
except OSError:
qid = '------'
print "ROOT= %-20s TAG= %-20s PID= %-5s QID= %s TIME= %-5d ITER= %-4d PROG= %3.0f%% EVAP= %4d BULK= %2.1fnm" % \
(root, tag[:-1], pid, qid, iter_time, next_iter, prog, count_current_total, dz)
if False: # TODO verbose
for current,target in zip(mols_current,mols_target):
sp1 = current.split(':')
sp2 = target.split(':')
molname = sp1[0]
assert sp1[0] == sp2[0]
count_current = int(sp1[1])
count_target = int(sp2[1])
if count_target == 0:
print "\t%-15s %4d/%-4d" % \
(molname, count_current, count_target)
else:
print "\t%-15s %4d/%-4d -> %2.0f%% complete" % \
(molname, count_current, count_target,
100*float(count_current)/count_target)
sys.exit(0)
assert False
return
def autorestart_find_next_iter(gro, top, ndx, ctr, hst,
iter_prefix='ITER_', verbose=True):
if verbose: print "Auto-restart from root =", os.getcwd()
iters = get_dirs(regex=iter_prefix)
if iters == []:
return None,None,None,None
iter_ids = []
for iteration in iters:
iter_ids.append(int(iteration.split('_')[-1]))
iter_ids.sort()
if verbose: print "Snapshots available for restart: # = %d" % len(iter_ids)
iter_slot = -1
latest_iter = iter_ids[iter_slot]
mols = []
mols_current = []
iter_time = 0
while latest_iter > 0:
latest_iter = iter_ids[iter_slot]
latest_dir = '%s%d' % (iter_prefix,latest_iter)
os.chdir(latest_dir)
files = os.listdir('./')
try:
assert gro in files
assert top in files
assert ndx in files
assert ctr in files
assert hst in files
# Check for completeness of gro-file
intt = open(gro,'r')
lns = intt.readlines()
lenlns = len(lns)
intt.close()
if lenlns < 3:
if verbose: print "%s in %s is broken" % (gro,latest_iter)
assert False
# Check for completeness in log file
intt = open(hst, 'r')
iter_found = False
for ln in intt.readlines():
ln = ln.replace(',',' ')
sp = ln.split()
if sp == []:
continue
elif sp[0] == 'Log':
iter_nr = int(sp[-1])
iter_time = float(sp[3])
if iter_found:
break
if iter_nr == latest_iter:
iter_found = True
if verbose: print "Found log entry for iteration %d in %s" % (latest_iter,hst)
else: pass
elif sp[0] != 'Log' and iter_found:
mol_name = sp[0]
mol_target = int(sp[-1])
mol_current = int(sp[-3])
mols.append('%s:%d' % (mol_name,mol_target))
mols_current.append('%s:%d' % (mol_name,mol_current))
elif sp[0] != 'Log':
pass
else: print 1/0 # Error in hist-file
if not iter_found:
assert False # Iteration not found in log-file (*.hist)
break
except AssertionError:
if verbose: print "Candidate %s not complete. Continue backwards ..." % latest_dir
iter_slot = iter_slot - 1
os.chdir('../')
os.chdir('../')
if verbose: print "Latest iteration = %d in directory '%s'" % (latest_iter,latest_dir)
return latest_iter+1, mols, mols_current, iter_time
def originate(in_dir):
global GRO; global TOP; global NDX; global CTR; global HST;
global RESTART_WITH_ITER
global SYSTEM_DIR
global REQUIRED_DIRS
global EVO_STEP
global SIM_TIME
global MOL_COUNT
global MOLS_TARGET
if RESTART_WITH_ITER == 1:
assemble_dir = './%1s0' % in_dir
os.chdir(SYSTEM_DIR)
print "="*80
print "Originating system, step = %1d:" % EVO_STEP
for key in MOL_COUNT.keys():
print "... %-5s %5d/%1d" % (key, MOL_COUNT[key], MOLS_TARGET[key])
print "="*80
system = System(GRO,TOP,NDX,CTR,HST,ARGS)
#system.group_system()
#system.xy_density()
system.assemble_here('../'+assemble_dir)
del system
os.chdir('../')
for DIR in REQUIRED_DIRS:
exe('cp -r %1s %1s' % (DIR, assemble_dir))
else:
restart_dir = './%s%d' % (in_dir, RESTART_WITH_ITER-1)
print "Restarting from iteration %d using %s" % (RESTART_WITH_ITER, restart_dir)
os.chdir(restart_dir)
# Retrieve restart configuration from hist-file (HST)
# ... Set SIM_TIME, MOL_COUNT
intt = open(HST, 'r')
iter_found = False
for ln in intt.readlines():
ln = ln.replace(',',' ')
sp = ln.split()
if sp == []:
continue
elif sp[0] == 'Log':
iter_nr = int(sp[-1])
if iter_found: break
elif iter_nr == RESTART_WITH_ITER-1:
iter_found = True
print "Found log entry for iteration %d in %s" % (RESTART_WITH_ITER-1,HST)
SIM_TIME = float(sp[3])
print "... Restart from time t =", SIM_TIME
else: pass
elif sp[0] != 'Log':
if iter_found:
mol_name = sp[0]
mol_nr = int(sp[1])
RESTART_FROM_MOLS[mol_name] = mol_nr
MOL_COUNT[mol_name] = mol_nr
print "... Restart from # %s = %d" % (mol_name, mol_nr)
else: pass
else: assert False
if not iter_found:
assert False # Iteration not found in log-file (*.hist)
os.chdir('../')
return
def which_mol_to_evap():
global MOL_COUNT
global MOLS_TARGET
ratio = {}
ratios = []
for key in MOLS_TARGET.keys():
if MOLS_TARGET[key] == 0:
ratio[key] = 1.0
else:
ratio[key] = MOL_COUNT[key] / MOLS_TARGET[key]
ratios.append(ratio[key])
min_ratio = min(ratios)
for key in MOLS_TARGET.keys():
if ratio[key] == min_ratio:
print "Decided to evaporate %1s next." % key
return key
else:
pass
print "Could not decide which molecule to evaporate next."
return None
def enough_mols():
global MOL_COUNT
global BULK_SIZE
global MOLS_TARGET
global BULK_TARGET
if BULK_SIZE > BULK_TARGET:
return True
for key in MOLS_TARGET.keys():
if MOL_COUNT[key] < MOLS_TARGET[key]:
return False
else:
pass
return True
def enough_iters():
global SIM_ITER
global MAX_ITER
return SIM_ITER >= MAX_ITER
def evolve(from_dir,to_dir, nr_evaps, t_in, t_run):
global GRO; global TOP; global NDX; global CTR; global HST;
global REQUIRED_DIRS
global MOL_COUNT
global BULK_SIZE
global MOLS_TARGET
global BULK_TARGET
global MAX_MOLS_ITER
global RUN_TIME_ITER
global SIM_TIME
global EVO_STEP
global EVO_STEP_CMDS
EVO_STEP += 1
if nr_evaps == None:
nr_evaps = MAX_MOLS_ITER
if t_in == None:
t_in = SIM_TIME
if t_run == None:
t_run = RUN_TIME_ITER
from_dir = from_dir + '%1d' % (EVO_STEP-1)
to_dir = to_dir + '%1d' % (EVO_STEP)
print "="*80
print "Evolving system, step = %1d:" % EVO_STEP
for key in MOL_COUNT.keys():
print "... %-15s %5d/%1d" % (key, MOL_COUNT[key], MOLS_TARGET[key])
print "="*80
os.chdir('./%1s' % from_dir)
for FREQ_CMD in EVO_STEP_CMDS:
freq = FREQ_CMD[0]
cmd = FREQ_CMD[1]
if EVO_STEP % freq == 0:
print "STEP %1d EXE %1s" % (EVO_STEP, cmd)
exe(cmd)
else:
pass
if 'topol.tpr' in os.listdir('./'):
print "Placing all atoms inside the periodic box to evaluate height profile."
exe('echo "0" | trjconv -f %1s -o %1s -pbc atom -ur tric > /dev/null 2> /dev/null' % (GRO,GRO))
else:
print "NOTE: No topol.tpr in directory. Make sure %1s is wrapped." % GRO
system = System(GRO,TOP,NDX,CTR,HST,ARGS)
system.set_time(t_in,t_run)
system.xy_density() # evaporate_mol() also does this automatically
for n in range(nr_evaps):
evap_mol = which_mol_to_evap()
if evap_mol == None:
assert False # No mol. name given
os.chdir('./EVAPORATOR_%1s' % evap_mol)
exe('python evaporate.py')
os.chdir('../')
if system.evaporate_mol(evap_mol):
MOL_COUNT[evap_mol] += 1
if enough_mols():
break
else:
break
system.auto_box()
system.group_system()
system.assemble_here('../%1s' % to_dir)
z0,z1 = system.estimate_bulk_z()
BULK_SIZE = z1-z0
if BULK_SIZE > BULK_TARGET:
print "Bulk size %1.3f exceeds %1.3f -> Enough molecules" % (BULK_SIZE,BULK_TARGET)
else:
print "Bulk size %1.3f <= %1.3f -> More molecules" % (BULK_SIZE,BULK_TARGET)
del system
os.chdir('../')
for DIR in REQUIRED_DIRS:
exe('cp -r %1s ./%1s' % (DIR, to_dir))
return True
def simulate(in_dir):
global RUN
global ON_CLUSTER
global SIM_TIME
global EVO_STEP
global RUN_TIME_ITER
in_dir = in_dir + '%1d' % EVO_STEP
os.chdir('./%1s' % in_dir)
# ... Exe grompp
exe('chmod +x mdp.sh')
exe_safe('./mdp.sh > /dev/null 2> grompp.out')
print "Grompp summary ..."
s1 = os.system("pcregrep -M 'NOTE(.)*\n(.)*\n' grompp.out")
s2 = os.system("pcregrep -M 'WARNING(.)*\n(.)*\n' grompp.out")
s3 = os.system("pcregrep -M 'ERROR(.)*\n(.)*\n' grompp.out")
print "Grompp complete",
if not s1: print "(see notes)",
if not s2: print "(see warnings)",
if not s3: print "(errors!)",
print "."
os.system("rm grompp.out")
if not 'topol.tpr' in os.listdir('./'):
print "Missing topol.tpr. Error in grompp? Abort."
return False
# ... Exe mdrun
if RUN:
if ON_CLUSTER:
# ... Check queue if specified
if ARGS.nicejob:
print "This is a nice job: Checking queue ..."
jobs_waiting = are_jobs_waiting(ARGS.username)
if jobs_waiting:
print "Job waits for queue to be cleared."
while jobs_waiting:
print "Sleep ..."
time.sleep(10)
jobs_waiting = are_jobs_waiting(ARGS.username)
print "No jobs waiting. Submit ..."
# ... Submit job
exe('qsub qmd.sh')
# ... Wait for mdrun
exists = monitor_dir_for_file('confout.gro', verbose = True, t_h = 144)
if not exists:
print "File confout.gro did not pop up in time.";
return False
else:
exe('chmod +x run.sh')
exe('./run.sh')
else:
exe('cp %1s confout.gro' % (GRO))
if not 'confout.gro' in os.listdir('./'):
print "Did GROMACS terminate correctly? Missing confout.gro. Abort."
sys.exit(1)
SIM_TIME += RUN_TIME_ITER
# ... Rename files
exe('mv %1s %1s_initial.gro' % (GRO,GRO[:-4]))
exe('mv confout.gro %1s' % (GRO))
os.chdir('../')
return True
def log_iter(in_dir):
global HST
global SIM_TIME
global EVO_STEP
global MOL_COUNT
global SIM_ITER
in_dir = in_dir + '%1d' % EVO_STEP
SIM_ITER += 1
os.chdir('./%1s' % in_dir)
outt = open(HST,'a')
outt.write('Log t = %5d, iter (since restart) = %4d, iter (since start) = %4d\n' % (SIM_TIME,SIM_ITER,EVO_STEP))
for key in MOLS_TARGET.keys():
outt.write(' %-5s %5d / %-5d\n' % (key, MOL_COUNT[key], MOLS_TARGET[key]))
os.chdir('../')
return True
|
|
"""
The outputs.py module represents some form of all outputs
from the Automater program to include all variation of
output files. Any addition to the Automater that brings
any other output requirement should be programmed in this module.
Class(es):
SiteDetailOutput -- Wrapper class around all functions that print output
from Automater, to include standard output and file system output.
Function(s):
No global exportable functions are defined.
Exception(s):
No exceptions exported.
"""
import csv
import socket
import re
from datetime import datetime
from operator import attrgetter
import json
class SiteDetailOutput(object):
"""
SiteDetailOutput provides the capability to output information
to the screen, a text file, a comma-seperated value file, or
a file formatted with html markup (readable by web browsers).
Public Method(s):
createOutputInfo
Instance variable(s):
_listofsites - list storing the list of site results stored.
"""
def __init__(self,sitelist):
"""
Class constructor. Stores the incoming list of sites in the _listofsites list.
Argument(s):
sitelist -- list containing site result information to be printed.
Return value(s):
Nothing is returned from this Method.
"""
self._listofsites = []
self._listofsites = sitelist
@property
def ListOfSites(self):
"""
Checks instance variable _listofsites for content.
Returns _listofsites if it has content or None if it does not.
Argument(s):
No arguments are required.
Return value(s):
_listofsites -- list containing list of site results if variable contains data.
None -- if _listofsites is empty or not assigned.
Restriction(s):
This Method is tagged as a Property.
"""
if self._listofsites is None or len(self._listofsites) == 0:
return None
return self._listofsites
def createOutputInfo(self,parser):
"""
Checks parser information calls correct print methods based on parser requirements.
Returns nothing.
Argument(s):
parser -- Parser object storing program input parameters used when program was run.
Return value(s):
Nothing is returned from this Method.
Restriction(s):
The Method has no restrictions.
"""
self.PrintToScreen()
if parser.hasCEFOutFile():
self.PrintToCEFFile(parser.CEFOutFile)
if parser.hasTextOutFile():
self.PrintToTextFile(parser.TextOutFile)
if parser.hasHTMLOutFile():
self.PrintToHTMLFile(parser.HTMLOutFile)
if parser.hasCSVOutSet():
self.PrintToCSVFile(parser.CSVOutFile)
def PrintToScreen(self):
"""
Formats site information correctly and prints it to the user's standard output.
Returns nothing.
Argument(s):
No arguments are required.
Return value(s):
Nothing is returned from this Method.
Restriction(s):
The Method has no restrictions.
"""
sites = sorted(self.ListOfSites, key=attrgetter('Target'))
target = ""
if sites is not None:
for site in sites:
if not isinstance(site._regex,basestring): #this is a multisite
for index in range(len(site.RegEx)): #the regexs will ensure we have the exact number of lookups
siteimpprop = site.getImportantProperty(index)
if target != site.Target:
print "\n____________________ Results found for: " + site.Target + " ____________________"
target = site.Target
if siteimpprop is None or len(siteimpprop)==0:
print "No results in the " + site.FriendlyName[index] + " category"
else:
if siteimpprop[index] is None or len(siteimpprop[index])==0:
print "No results found for: " + site.ReportStringForResult[index]
else:
laststring = ""
#if it's just a string we don't want it output like a list
if isinstance(siteimpprop[index], basestring):
if "" + site.ReportStringForResult[index] + " " + str(siteimpprop) != laststring:
print "" + site.ReportStringForResult[index] + " " + str(siteimpprop)
laststring = "" + site.ReportStringForResult[index] + " " + str(siteimpprop)
#must be a list since it failed the isinstance check on string
else:
laststring = ""
for siteresult in siteimpprop[index]:
if "" + site.ReportStringForResult[index] + " " + str(siteresult) != laststring:
print "" + site.ReportStringForResult[index] + " " + str(siteresult)
laststring = "" + site.ReportStringForResult[index] + " " + str(siteresult)
else:#this is a singlesite
siteimpprop = site.getImportantProperty(0)
if target != site.Target:
print "\n____________________ Results found for: " + site.Target + " ____________________"
target = site.Target
if siteimpprop is None or len(siteimpprop)==0:
print "No results found in the " + site.FriendlyName
else:
laststring = ""
#if it's just a string we don't want it output like a list
if isinstance(siteimpprop, basestring):
if "" + site.ReportStringForResult + " " + str(siteimpprop) != laststring:
print "" + site.ReportStringForResult + " " + str(siteimpprop)
laststring = "" + site.ReportStringForResult + " " + str(siteimpprop)
#must be a list since it failed the isinstance check on string
else:
laststring = ""
for siteresult in siteimpprop:
if "" + site.ReportStringForResult + " " + str(siteresult) != laststring:
print "" + site.ReportStringForResult + " " + str(siteresult)
laststring = "" + site.ReportStringForResult + " " + str(siteresult)
else:
pass
def PrintToCEFFile(self,cefoutfile):
"""
Formats site information correctly and prints it to an output file in CEF format.
CEF format specification from http://mita-tac.wikispaces.com/file/view/CEF+White+Paper+071709.pdf
"Jan 18 11:07:53 host message"
where message:
"CEF:Version|Device Vendor|Device Product|Device Version|Signature ID|Name|Severity|Extension"
Returns nothing.
Argument(s):
cefoutfile -- A string representation of a file that will store the output.
Return value(s):
Nothing is returned from this Method.
Restriction(s):
The Method has no restrictions.
"""
sites = sorted(self.ListOfSites, key=attrgetter('Target'))
curr_date = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
hostname = socket.gethostname()
prefix = ' '.join([curr_date,hostname])
cef_version = "CEF:Version1.1"
cef_deviceVendor = "TekDefense"
cef_deviceProduct = "Automater"
cef_deviceVersion = "2.1"
cef_SignatureID = "0"
cef_Severity = "2"
cef_Extension = " "
cef_fields = [cef_version,cef_deviceVendor,cef_deviceProduct,cef_deviceVersion, \
cef_SignatureID, cef_Severity, cef_Extension]
pattern = "^\[\+\]\s+"
target = ""
print '\n[+] Generating CEF output: ' + cefoutfile
f = open(cefoutfile, "wb")
csv.register_dialect('escaped',delimiter='|',escapechar='\\',doublequote=False,quoting=csv.QUOTE_NONE)
cefRW = csv.writer(f,'escaped')
#cefRW.writerow(['Target', 'Type', 'Source', 'Result'])
if sites is not None:
for site in sites:
if not isinstance(site._regex,basestring): #this is a multisite:
for index in range(len(site.RegEx)): #the regexs will ensure we have the exact number of lookups
siteimpprop = site.getImportantProperty(index)
if siteimpprop is None or len(siteimpprop)==0:
tgt = site.Target
typ = site.TargetType
source = site.FriendlyName[index]
res = "No results found"
cefRW.writerow([prefix] + cef_fields[:5] + \
["["+",".join(["tgt="+tgt,"typ="+typ,"src="+source,"res="+res])+"] "] + \
[1] + [tgt])
else:
if siteimpprop[index] is None or len(siteimpprop[index])==0:
tgt = site.Target
typ = site.TargetType
source = site.FriendlyName[index]
res = "No results found"
cefRW.writerow([prefix] + cef_fields[:5] + \
["["+",".join(["tgt="+tgt,"typ="+typ,"src="+source,"res="+res])+"] "] + \
[1] + [tgt])
else:
laststring = ""
#if it's just a string we don't want it to output like a list
if isinstance(siteimpprop, basestring):
tgt = site.Target
typ = site.TargetType
source = site.FriendlyName
res = siteimpprop
if "" + tgt + typ + source + res != laststring:
cefRW.writerow([prefix] + cef_fields[:5] + \
["["+",".join(["tgt="+tgt,"typ="+typ,"src="+source,"res="+res])+"] " + \
re.sub(pattern,"",site.ReportStringForResult[index])+ str(siteimpprop)] + \
[cef_Severity] + [tgt])
laststring = "" + tgt + typ + source + res
#must be a list since it failed the isinstance check on string
else:
laststring = ""
for siteresult in siteimpprop[index]:
tgt = site.Target
typ = site.TargetType
source = site.FriendlyName[index]
res = siteresult
if "" + tgt + typ + source + str(res) != laststring:
cefRW.writerow([prefix] + cef_fields[:5] + \
["["+",".join(["tgt="+tgt,"typ="+typ,"src="+source,"res="+str(res)])+"] " + \
re.sub(pattern,"",site.ReportStringForResult[index])+ str(siteresult)] + \
[cef_Severity] + [tgt])
laststring = "" + tgt + typ + source + str(res)
else:#this is a singlesite
siteimpprop = site.getImportantProperty(0)
if siteimpprop is None or len(siteimpprop)==0:
tgt = site.Target
typ = site.TargetType
source = site.FriendlyName
res = "No results found"
cefRW.writerow([prefix] + cef_fields[:5] + \
["["+",".join(["tgt="+tgt,"typ="+typ,"src="+source,"res="+res])+"] "] + \
[1] + [tgt])
else:
laststring = ""
#if it's just a string we don't want it output like a list
if isinstance(siteimpprop, basestring):
tgt = site.Target
typ = site.TargetType
source = site.FriendlyName
res = siteimpprop
if "" + tgt + typ + source + res != laststring:
cefRW.writerow([prefix] + cef_fields[:5] + \
["["+",".join(["tgt="+tgt,"typ="+typ,"src="+source,"res="+res])+"] " + \
re.sub(pattern,"",site.ReportStringForResult)+ str(siteimpprop)] + \
[cef_Severity] + [tgt])
laststring = "" + tgt + typ + source + res
else:
laststring = ""
for siteresult in siteimpprop:
tgt = site.Target
typ = site.TargetType
source = site.FriendlyName
res = siteresult
if "" + tgt + typ + source + str(res) != laststring:
cefRW.writerow([prefix] + cef_fields[:5] + \
["["+",".join(["tgt="+tgt,"typ="+typ,"src="+source,"res="+str(res)])+"] " + \
re.sub(pattern,"",site.ReportStringForResult)+ str(siteimpprop)] + \
[cef_Severity] + [tgt])
laststring = "" + tgt + typ + source + str(res)
f.flush()
f.close()
print "" + cefoutfile + " Generated"
def PrintToTextFile(self,textoutfile):
"""
Formats site information correctly and prints it to an output file in text format.
Returns nothing.
Argument(s):
textoutfile -- A string representation of a file that will store the output.
Return value(s):
Nothing is returned from this Method.
Restriction(s):
The Method has no restrictions.
"""
sites = sorted(self.ListOfSites, key=attrgetter('Target'))
target = ""
print "\n[+] Generating text output: " + textoutfile
f = open(textoutfile, "w")
if sites is not None:
for site in sites:
if not isinstance(site._regex,basestring): #this is a multisite
for index in range(len(site.RegEx)): #the regexs will ensure we have the exact number of lookups
siteimpprop = site.getImportantProperty(index)
if target != site.Target:
f.write("\n____________________ Results found for: " + site.Target + " ____________________")
target = site.Target
if siteimpprop is None or len(siteimpprop)==0:
f.write("\nNo results in the " + site.FriendlyName[index] + " category")
else:
if siteimpprop[index] is None or len(siteimpprop[index])==0:
f.write("\nNo results found for: " + site.ReportStringForResult[index])
else:
laststring = ""
#if it's just a string we don't want it to output like a list
if isinstance(siteimpprop[index], basestring):
if "" + site.ReportStringForResult[index] + " " + str(siteimpprop) != laststring:
f.write("\n" + site.ReportStringForResult[index] + " " + str(siteimpprop))
laststring = "" + site.ReportStringForResult[index] + " " + str(siteimpprop)
#must be a list since it failed the isinstance check on string
else:
laststring = ""
for siteresult in siteimpprop[index]:
if "" + site.ReportStringForResult[index] + " " + str(siteresult) != laststring:
f.write("\n" + site.ReportStringForResult[index] + " " + str(siteresult))
laststring = "" + site.ReportStringForResult[index] + " " + str(siteresult)
else:#this is a singlesite
siteimpprop = site.getImportantProperty(0)
if target != site.Target:
f.write("\n____________________ Results found for: " + site.Target + " ____________________")
target = site.Target
if siteimpprop is None or len(siteimpprop)==0:
f.write("\nNo results found in the " + site.FriendlyName)
else:
laststring = ""
#if it's just a string we don't want it output like a list
if isinstance(siteimpprop, basestring):
if "" + site.ReportStringForResult + " " + str(siteimpprop) != laststring:
f.write("\n" + site.ReportStringForResult + " " + str(siteimpprop))
laststring = "" + site.ReportStringForResult + " " + str(siteimpprop)
else:
laststring = ""
for siteresult in siteimpprop:
if "" + site.ReportStringForResult + " " + str(siteresult) != laststring:
f.write("\n" + site.ReportStringForResult + " " + str(siteresult))
laststring = "" + site.ReportStringForResult + " " + str(siteresult)
f.flush()
f.close()
print "" + textoutfile + " Generated"
def PrintToCSVFile(self,csvoutfile):
f = open(csvoutfile, "wb")
self.PrintToCSVFileHandle(f)
f.close()
def PrintToCSVFileHandle(self,csvoutfilehandle):
"""
Formats site information correctly and prints it to an output file with comma-seperators.
Returns nothing.
Argument(s):
csvoutfile -- A string representation of a file that will store the output.
Return value(s):
Nothing is returned from this Method.
Restriction(s):
The Method has no restrictions.
"""
sites = sorted(self.ListOfSites, key=attrgetter('Target'))
target = ""
f = csvoutfilehandle
csvRW = csv.writer(f, quoting=csv.QUOTE_ALL)
csvRW.writerow(['Target', 'Type', 'Source', 'Result'])
if sites is not None:
for site in sites:
if not isinstance(site._regex,basestring): #this is a multisite:
for index in range(len(site.RegEx)): #the regexs will ensure we have the exact number of lookups
siteimpprop = site.getImportantProperty(index)
if siteimpprop is None or len(siteimpprop)==0:
tgt = site.Target
typ = site.TargetType
source = site.FriendlyName[index]
res = "No results found"
csvRW.writerow([tgt,typ,source,res])
else:
if siteimpprop[index] is None or len(siteimpprop[index])==0:
tgt = site.Target
typ = site.TargetType
source = site.FriendlyName[index]
res = "No results found"
csvRW.writerow([tgt,typ,source,res])
else:
laststring = ""
#if it's just a string we don't want it to output like a list
if isinstance(siteimpprop, basestring):
tgt = site.Target
typ = site.TargetType
source = site.FriendlyName
res = siteimpprop
if "" + tgt + typ + source + res != laststring:
csvRW.writerow([tgt,typ,source,res])
laststring = "" + tgt + typ + source + res
#must be a list since it failed the isinstance check on string
else:
laststring = ""
for siteresult in siteimpprop[index]:
tgt = site.Target
typ = site.TargetType
source = site.FriendlyName[index]
res = siteresult
if "" + tgt + typ + source + str(res) != laststring:
csvRW.writerow([tgt,typ,source,res])
laststring = "" + tgt + typ + source + str(res)
else:#this is a singlesite
siteimpprop = site.getImportantProperty(0)
if siteimpprop is None or len(siteimpprop)==0:
tgt = site.Target
typ = site.TargetType
source = site.FriendlyName
res = "No results found"
csvRW.writerow([tgt,typ,source,res])
else:
laststring = ""
#if it's just a string we don't want it output like a list
if isinstance(siteimpprop, basestring):
tgt = site.Target
typ = site.TargetType
source = site.FriendlyName
res = siteimpprop
if "" + tgt + typ + source + res != laststring:
csvRW.writerow([tgt,typ,source,res])
laststring = "" + tgt + typ + source + res
else:
laststring = ""
for siteresult in siteimpprop:
tgt = site.Target
typ = site.TargetType
source = site.FriendlyName
res = siteresult
if "" + tgt + typ + source + str(res) != laststring:
csvRW.writerow([tgt,typ,source,res])
laststring = "" + tgt + typ + source + str(res)
f.flush()
#f.close()
def PrintToHTMLFile(self,htmloutfile):
"""
Formats site information correctly and prints it to an output file using HTML markup.
Returns nothing.
Argument(s):
htmloutfile -- A string representation of a file that will store the output.
Return value(s):
Nothing is returned from this Method.
Restriction(s):
The Method has no restrictions.
"""
sites = sorted(self.ListOfSites, key=attrgetter('Target'))
target = ""
print '\n[+] Generating HTML output: ' + htmloutfile
f = open(htmloutfile, "w")
f.write(self.getHTMLOpening())
if sites is not None:
for site in sites:
if not isinstance(site._regex,basestring): #this is a multisite:
for index in range(len(site.RegEx)): #the regexs will ensure we have the exact number of lookups
siteimpprop = site.getImportantProperty(index)
if siteimpprop is None or len(siteimpprop)==0:
tgt = site.Target
typ = site.TargetType
source = site.FriendlyName[index]
res = "No results found"
tableData = '<tr><td>' + tgt + '</td><td>' + typ + '</td><td>' + source + '</td><td>' + str(res) + '</td></tr>'
f.write(tableData)
else:
if siteimpprop[index] is None or len(siteimpprop[index])==0:
tgt = site.Target
typ = site.TargetType
source = site.FriendlyName[index]
res = "No results found"
tableData = '<tr><td>' + tgt + '</td><td>' + typ + '</td><td>' + source + '</td><td>' + str(res) + '</td></tr>'
f.write(tableData)
else:
#if it's just a string we don't want it to output like a list
if isinstance(siteimpprop, basestring):
tgt = site.Target
typ = site.TargetType
source = site.FriendlyName
res = siteimpprop
tableData = '<tr><td>' + tgt + '</td><td>' + typ + '</td><td>' + source + '</td><td>' + str(res) + '</td></tr>'
f.write(tableData)
else:
for siteresult in siteimpprop[index]:
tgt = site.Target
typ = site.TargetType
source = site.FriendlyName[index]
res = siteresult
tableData = '<tr><td>' + tgt + '</td><td>' + typ + '</td><td>' + source + '</td><td>' + str(res) + '</td></tr>'
f.write(tableData)
else:#this is a singlesite
siteimpprop = site.getImportantProperty(0)
if siteimpprop is None or len(siteimpprop)==0:
tgt = site.Target
typ = site.TargetType
source = site.FriendlyName
res = "No results found"
tableData = '<tr><td>' + tgt + '</td><td>' + typ + '</td><td>' + source + '</td><td>' + str(res) + '</td></tr>'
f.write(tableData)
else:
#if it's just a string we don't want it output like a list
if isinstance(siteimpprop, basestring):
tgt = site.Target
typ = site.TargetType
source = site.FriendlyName
res = siteimpprop
tableData = '<tr><td>' + tgt + '</td><td>' + typ + '</td><td>' + source + '</td><td>' + str(res) + '</td></tr>'
f.write(tableData)
else:
for siteresult in siteimpprop:
tgt = site.Target
typ = site.TargetType
source = site.FriendlyName
res = siteresult
tableData = '<tr><td>' + tgt + '</td><td>' + typ + '</td><td>' + source + '</td><td>' + str(res) + '</td></tr>'
f.write(tableData)
f.write(self.getHTMLClosing())
f.flush()
f.close()
print "" + htmloutfile + " Generated"
def getHTMLOpening(self):
"""
Creates HTML markup to provide correct formatting for initial HTML file requirements.
Returns string that contains opening HTML markup information for HTML output file.
Argument(s):
No arguments required.
Return value(s):
string.
Restriction(s):
The Method has no restrictions.
"""
return '''<style type="text/css">
#table-3 {
border: 1px solid #DFDFDF;
background-color: #F9F9F9;
width: 100%;
-moz-border-radius: 3px;
-webkit-border-radius: 3px;
border-radius: 3px;
font-family: Arial,"Bitstream Vera Sans",Helvetica,Verdana,sans-serif;
color: #333;
}
#table-3 td, #table-3 th {
border-top-color: white;
border-bottom: 1px solid #DFDFDF;
color: #555;
}
#table-3 th {
text-shadow: rgba(255, 255, 255, 0.796875) 0px 1px 0px;
font-family: Georgia,"Times New Roman","Bitstream Charter",Times,serif;
font-weight: normal;
padding: 7px 7px 8px;
text-align: left;
line-height: 1.3em;
font-size: 14px;
}
#table-3 td {
font-size: 12px;
padding: 4px 7px 2px;
vertical-align: top;
}res
h1 {
text-shadow: rgba(255, 255, 255, 0.796875) 0px 1px 0px;
font-family: Georgia,"Times New Roman","Bitstream Charter",Times,serif;
font-weight: normal;
padding: 7px 7px 8px;
text-align: Center;
line-height: 1.3em;
font-size: 40px;
}
h2 {
text-shadow: rgba(255, 255, 255, 0.796875) 0px 1px 0px;
font-family: Georgia,"Times New Roman","Bitstream Charter",Times,serif;
font-weight: normal;
padding: 7px 7px 8px;
text-align: left;
line-height: 1.3em;
font-size: 16px;
}
h4 {
text-shadow: rgba(255, 255, 255, 0.796875) 0px 1px 0px;
font-family: Georgia,"Times New Roman","Bitstream Charter",Times,serif;
font-weight: normal;
padding: 7px 7px 8px;
text-align: left;
line-height: 1.3em;
font-size: 10px;
}
</style>
<html>
<body>
<title> Automater Results </title>
<h1> Automater Results </h1>
<table id="table-3">
<tr>
<th>Target</th>
<th>Type</th>
<th>Source</th>
<th>Result</th>
</tr>
'''
def getHTMLClosing(self):
"""
Creates HTML markup to provide correct formatting for closing HTML file requirements.
Returns string that contains closing HTML markup information for HTML output file.
Argument(s):
No arguments required.
Return value(s):
string.
Restriction(s):
The Method has no restrictions.
"""
return '''
</table>
<br>
<br>
<p>Created using Automater.py by @TekDefense <a href="http://www.tekdefense.com">http://www.tekdefense.com</a>; <a href="https://github.com/1aN0rmus/TekDefense">https://github.com/1aN0rmus/TekDefense</a></p>
</body>
</html>
'''
def hashOutput(self):
"""
Returns output of automater as a hash table sutible for JSON encoding.
The format is:
[{<target>:{<site>:{'Type':<Result Type>, 'Result':<Result>}}]
"""
sites = sorted(self._listofsites, key=attrgetter('Target'))
target = ""
thash = {}
def get_hash(h,k):
if k in h:
h[k]
else:
h[k] = {}
return h[k]
def get_array(h,k):
if k in h:
h[k]
else:
h[k] = []
return h[k]
if sites is not None:
for site in sites:
if not isinstance(site._regex,basestring): #this is a multisite:
for index in range(len(site.RegEx)): #the regexs will ensure we have the exact number of lookups
siteimpprop = site.getImportantProperty(index)
if siteimpprop is None or len(siteimpprop)==0:
tgt = site.Target
typ = site.TargetType
source = site.FriendlyName[index]
res = "No results found"
get_array(get_hash(thash,tgt),source).append({'Type':typ,'Result':res})
else:
if siteimpprop[index] is None or len(siteimpprop[index])==0:
tgt = site.Target
typ = site.TargetType
source = site.FriendlyName[index]
res = "No results found"
#csvRW.writerow([tgt,typ,source,res])
get_array(get_hash(thash,tgt),source).append({'Type':typ,'Result':res})
else:
laststring = ""
#if it's just a string we don't want it to output like a list
if isinstance(siteimpprop, basestring):
tgt = site.Target
typ = site.TargetType
source = site.FriendlyName
res = siteimpprop
if "" + tgt + typ + source + res != laststring:
get_array(get_hash(thash,tgt),source).append({'Type':typ,'Result':res})
laststring = "" + tgt + typ + source + res
#must be a list since it failed the isinstance check on string
else:
laststring = ""
for siteresult in siteimpprop[index]:
tgt = site.Target
typ = site.TargetType
source = site.FriendlyName[index]
res = siteresult
if "" + tgt + typ + source + str(res) != laststring:
get_array(get_hash(thash,tgt),source).append({'Type':typ,'Result':res})
laststring = "" + tgt + typ + source + str(res)
else:#this is a singlesite
siteimpprop = site.getImportantProperty(0)
if siteimpprop is None or len(siteimpprop)==0:
tgt = site.Target
typ = site.TargetType
source = site.FriendlyName
res = "No results found"
else:
laststring = ""
#if it's just a string we don't want it output like a list
if isinstance(siteimpprop, basestring):
tgt = site.Target
typ = site.TargetType
source = site.FriendlyName
res = siteimpprop
if "" + tgt + typ + source + res != laststring:
get_array(get_hash(thash,tgt),source).append({'Type':typ,'Result':res})
laststring = "" + tgt + typ + source + res
else:
laststring = ""
for siteresult in siteimpprop:
tgt = site.Target
typ = site.TargetType
source = site.FriendlyName if site.FriendlyName else "UNK"
res = siteresult
if "" + tgt + typ + source + str(res) != laststring:
get_array(get_hash(thash,tgt),source).append({'Type':typ,'Result':res})
laststring = "" + tgt + typ + source + str(res)
return thash
def jsonOutput(self):
"""
Returns output of automater as JSON encoded string
"""
return json.dumps(self.hashOutput())
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Ansible by Red Hat, inc
#
# This file is part of Ansible by Red Hat
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: ios_static_route
version_added: "2.4"
author: "Ricardo Carrillo Cruz (@rcarrillocruz)"
short_description: Manage static IP routes on Cisco IOS network devices
description:
- This module provides declarative management of static
IP routes on Cisco IOS network devices.
notes:
- Tested against IOS 15.6
options:
prefix:
description:
- Network prefix of the static route.
mask:
description:
- Network prefix mask of the static route.
next_hop:
description:
- Next hop IP of the static route.
vrf:
description:
- VRF of the static route.
version_added: "2.8"
interface:
description:
- Interface of the static route.
version_added: "2.8"
name:
description:
- Name of the static route
aliases: ['description']
version_added: "2.8"
admin_distance:
description:
- Admin distance of the static route.
tag:
description:
- Set tag of the static route.
version_added: "2.8"
track:
description:
- Tracked item to depend on for the static route.
version_added: "2.8"
aggregate:
description: List of static route definitions.
state:
description:
- State of the static route configuration.
default: present
choices: ['present', 'absent']
extends_documentation_fragment: ios
"""
EXAMPLES = """
- name: configure static route
ios_static_route:
prefix: 192.168.2.0
mask: 255.255.255.0
next_hop: 10.0.0.1
- name: configure black hole in vrf blue depending on tracked item 10
ios_static_route:
prefix: 192.168.2.0
mask: 255.255.255.0
vrf: blue
interface: null0
track: 10
- name: configure ultimate route with name and tag
ios_static_route:
prefix: 192.168.2.0
mask: 255.255.255.0
interface: GigabitEthernet1
name: hello world
tag: 100
- name: remove configuration
ios_static_route:
prefix: 192.168.2.0
mask: 255.255.255.0
next_hop: 10.0.0.1
state: absent
- name: Add static route aggregates
ios_static_route:
aggregate:
- { prefix: 172.16.32.0, mask: 255.255.255.0, next_hop: 10.0.0.8 }
- { prefix: 172.16.33.0, mask: 255.255.255.0, next_hop: 10.0.0.8 }
- name: Remove static route aggregates
ios_static_route:
aggregate:
- { prefix: 172.16.32.0, mask: 255.255.255.0, next_hop: 10.0.0.8 }
- { prefix: 172.16.33.0, mask: 255.255.255.0, next_hop: 10.0.0.8 }
state: absent
"""
RETURN = """
commands:
description: The list of configuration mode commands to send to the device
returned: always
type: list
sample:
- ip route 192.168.2.0 255.255.255.0 10.0.0.1
"""
from copy import deepcopy
from re import findall
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.common.utils import remove_default_spec, validate_ip_address
from ansible.module_utils.network.ios.ios import get_config, load_config
from ansible.module_utils.network.ios.ios import ios_argument_spec
def map_obj_to_commands(want, have):
commands = list()
for w in want:
state = w['state']
del w['state']
# Try to match an existing config with the desired config
for h in have:
# To delete admin_distance param from have if not it want before comparing both fields
if not w.get('admin_distance') and h.get('admin_distance'):
del h['admin_distance']
diff = list(set(w.items()) ^ set(h.items()))
if not diff:
break
# if route is present with name or name already starts with wanted name it will not change
elif len(diff) == 2 and diff[0][0] == diff[1][0] == 'name' and (not w['name'] or h['name'].startswith(w['name'])):
break
# If no matches found, clear `h`
else:
h = None
command = 'ip route'
prefix = w['prefix']
mask = w['mask']
vrf = w.get('vrf')
if vrf:
command = ' '.join((command, 'vrf', vrf, prefix, mask))
else:
command = ' '.join((command, prefix, mask))
for key in ['interface', 'next_hop', 'admin_distance', 'tag', 'name', 'track']:
if w.get(key):
if key == 'name' and len(w.get(key).split()) > 1:
command = ' '.join((command, key, '"%s"' % w.get(key))) # name with multiple words needs to be quoted
elif key in ('name', 'tag', 'track'):
command = ' '.join((command, key, w.get(key)))
else:
command = ' '.join((command, w.get(key)))
if state == 'absent' and h:
commands.append('no %s' % command)
elif state == 'present' and not h:
commands.append(command)
return commands
def map_config_to_obj(module):
obj = []
out = get_config(module, flags='| include ip route')
for line in out.splitlines():
splitted_line = findall(r'[^"\s]\S*|".+?"', line) # Split by whitespace but do not split quotes, needed for name parameter
if splitted_line[2] == 'vrf':
route = {'vrf': splitted_line[3]}
del splitted_line[:4] # Removes the words ip route vrf vrf_name
else:
route = {}
del splitted_line[:2] # Removes the words ip route
prefix = splitted_line[0]
mask = splitted_line[1]
route.update({'prefix': prefix, 'mask': mask, 'admin_distance': '1'})
next_word = None
for word in splitted_line[2:]:
if next_word:
route[next_word] = word.strip('"') # Remove quotes which is needed for name
next_word = None
elif validate_ip_address(word):
route.update(next_hop=word)
elif word.isdigit():
route.update(admin_distance=word)
elif word in ('tag', 'name', 'track'):
next_word = word
else:
route.update(interface=word)
obj.append(route)
return obj
def map_params_to_obj(module, required_together=None):
keys = ['prefix', 'mask', 'state', 'next_hop', 'vrf', 'interface', 'name', 'admin_distance', 'track', 'tag']
obj = []
aggregate = module.params.get('aggregate')
if aggregate:
for item in aggregate:
route = item.copy()
for key in keys:
if route.get(key) is None:
route[key] = module.params.get(key)
route = dict((k, v) for k, v in route.items() if v is not None)
module._check_required_together(required_together, route)
obj.append(route)
else:
module._check_required_together(required_together, module.params)
route = dict()
for key in keys:
if module.params.get(key) is not None:
route[key] = module.params.get(key)
obj.append(route)
return obj
def main():
""" main entry point for module execution
"""
element_spec = dict(
prefix=dict(type='str'),
mask=dict(type='str'),
next_hop=dict(type='str'),
vrf=dict(type='str'),
interface=dict(type='str'),
name=dict(type='str', aliases=['description']),
admin_distance=dict(type='str'),
track=dict(type='str'),
tag=dict(tag='str'),
state=dict(default='present', choices=['present', 'absent'])
)
aggregate_spec = deepcopy(element_spec)
aggregate_spec['prefix'] = dict(required=True)
# remove default in aggregate spec, to handle common arguments
remove_default_spec(aggregate_spec)
argument_spec = dict(
aggregate=dict(type='list', elements='dict', options=aggregate_spec),
)
argument_spec.update(element_spec)
argument_spec.update(ios_argument_spec)
required_one_of = [['aggregate', 'prefix']]
required_together = [['prefix', 'mask']]
mutually_exclusive = [['aggregate', 'prefix']]
module = AnsibleModule(argument_spec=argument_spec,
required_one_of=required_one_of,
mutually_exclusive=mutually_exclusive,
supports_check_mode=True)
warnings = list()
result = {'changed': False}
if warnings:
result['warnings'] = warnings
want = map_params_to_obj(module, required_together=required_together)
have = map_config_to_obj(module)
commands = map_obj_to_commands(want, have)
result['commands'] = commands
if commands:
if not module.check_mode:
load_config(module, commands)
result['changed'] = True
module.exit_json(**result)
if __name__ == '__main__':
main()
|
|
import datetime
from dateutil.relativedelta import relativedelta
from decimal import Decimal
import factory
from factory.fuzzy import FuzzyDate, FuzzyInteger
import random
import six
from django.contrib.auth import models as auth
from timepiece.contracts import models as contracts
from timepiece.crm import models as crm
from timepiece.entries import models as entries
from timepiece import utils
class User(factory.DjangoModelFactory):
class Meta:
model = auth.User
# FIXME: Some tests depend on first_name/last_name being unique.
first_name = factory.Sequence(lambda n: 'Sam{0}'.format(n))
last_name = factory.Sequence(lambda n: 'Blue{0}'.format(n))
username = factory.Sequence(lambda n: 'user{0}'.format(n))
email = factory.Sequence(lambda n: 'user{0}@example.com'.format(n))
@factory.post_generation
def password(self, create, extracted, **kwargs):
self.set_password(extracted or "password")
@factory.post_generation
def permissions(self, create, extracted, **kwargs):
if create and extracted:
for perm in extracted:
if isinstance(perm, six.string_types):
app_label, codename = perm.split('.')
perm = auth.Permission.objects.get(
content_type__app_label=app_label,
codename=codename,
)
self.user_permissions.add(perm)
class Superuser(User):
is_superuser = True
is_staff = True
class Group(factory.DjangoModelFactory):
class Meta:
model = auth.Group
name = factory.Sequence(lambda n: 'group{0}'.format(n))
class ProjectContract(factory.DjangoModelFactory):
class Meta:
model = contracts.ProjectContract
name = factory.Sequence(lambda n: 'contract{0}'.format(n))
start_date = datetime.date.today()
end_date = datetime.date.today() + relativedelta(weeks=2)
status = contracts.ProjectContract.STATUS_CURRENT,
type = contracts.ProjectContract.PROJECT_PRE_PAID_HOURLY
@factory.post_generation
def contract_hours(self, create, extracted, **kwargs):
if create:
num_hours = extracted or random.randint(10, 400)
for i in range(2):
ContractHour(contract=self, hours=Decimal(str(num_hours/2.0)))
@factory.post_generation
def projects(self, create, extracted, **kwargs):
if create and extracted:
self.projects.add(*extracted)
class ContractHour(factory.DjangoModelFactory):
class Meta:
model = contracts.ContractHour
date_requested = datetime.date.today()
status = contracts.ContractHour.APPROVED_STATUS
contract = factory.SubFactory('timepiece.tests.factories.ProjectContract')
class ContractAssignment(factory.DjangoModelFactory):
class Meta:
model = contracts.ContractAssignment
user = factory.SubFactory('timepiece.tests.factories.User')
contract = factory.SubFactory('timepiece.tests.factories.ProjectContract')
start_date = datetime.date.today()
end_date = datetime.date.today() + relativedelta(weeks=2)
class HourGroup(factory.DjangoModelFactory):
class Meta:
model = contracts.HourGroup
name = factory.Sequence(lambda n: 'hourgroup{0}'.format(n))
class EntryGroup(factory.DjangoModelFactory):
class Meta:
model = contracts.EntryGroup
user = factory.SubFactory('timepiece.tests.factories.User')
project = factory.SubFactory('timepiece.tests.factories.Project')
end = FuzzyDate(datetime.date.today() - relativedelta(months=1))
class TypeAttribute(factory.DjangoModelFactory):
class Meta:
model = crm.Attribute
label = factory.Sequence(lambda n: 'type{0}'.format(n))
type = crm.Attribute.PROJECT_TYPE
class StatusAttribute(factory.DjangoModelFactory):
class Meta:
model = crm.Attribute
label = factory.Sequence(lambda n: 'status{0}'.format(n))
type = crm.Attribute.PROJECT_STATUS
class Business(factory.DjangoModelFactory):
class Meta:
model = crm.Business
name = factory.Sequence(lambda n: 'business{0}'.format(n))
class Project(factory.DjangoModelFactory):
class Meta:
model = crm.Project
name = factory.Sequence(lambda n: 'project{0}'.format(n))
business = factory.SubFactory('timepiece.tests.factories.Business')
point_person = factory.SubFactory('timepiece.tests.factories.User')
type = factory.SubFactory('timepiece.tests.factories.TypeAttribute')
status = factory.SubFactory('timepiece.tests.factories.StatusAttribute')
class BillableProject(Project):
type = factory.SubFactory('timepiece.tests.factories.TypeAttribute', billable=True)
status = factory.SubFactory('timepiece.tests.factories.StatusAttribute', billable=True)
class NonbillableProject(Project):
type = factory.SubFactory('timepiece.tests.factories.TypeAttribute', billable=False)
status = factory.SubFactory('timepiece.tests.factories.StatusAttribute', billable=False)
class RelationshipType(factory.DjangoModelFactory):
class Meta:
model = crm.RelationshipType
name = factory.Sequence(lambda n: 'reltype{0}'.format(n))
class ProjectRelationship(factory.DjangoModelFactory):
class Meta:
model = crm.ProjectRelationship
user = factory.SubFactory('timepiece.tests.factories.User')
project = factory.SubFactory('timepiece.tests.factories.Project')
class UserProfile(factory.DjangoModelFactory):
class Meta:
model = crm.UserProfile
user = factory.SubFactory('timepiece.tests.factories.User')
class Activity(factory.DjangoModelFactory):
class Meta:
model = entries.Activity
code = factory.Sequence(lambda n: 'a{0}'.format(n))
name = factory.Sequence(lambda n: 'activity{0}'.format(n))
class BillableActivityFactory(Activity):
billable = True
class NonbillableActivityFactory(Activity):
billable = False
class ActivityGroup(factory.DjangoModelFactory):
class Meta:
model = entries.ActivityGroup
name = factory.Sequence(lambda n: 'activitygroup{0}'.format(n))
class Location(factory.DjangoModelFactory):
class Meta:
model = entries.Location
name = factory.Sequence(lambda n: 'location{0}'.format(n))
slug = factory.Sequence(lambda n: 'location{0}'.format(n))
class Entry(factory.DjangoModelFactory):
class Meta:
model = entries.Entry
status = entries.Entry.UNVERIFIED
user = factory.SubFactory('timepiece.tests.factories.User')
activity = factory.SubFactory('timepiece.tests.factories.Activity')
project = factory.SubFactory('timepiece.tests.factories.Project')
location = factory.SubFactory('timepiece.tests.factories.Location')
class ProjectHours(factory.DjangoModelFactory):
class Meta:
model = entries.ProjectHours
week_start = utils.get_week_start()
project = factory.SubFactory('timepiece.tests.factories.Project')
user = factory.SubFactory('timepiece.tests.factories.User')
hours = FuzzyInteger(0, 20)
|
|
# Copyright (c) 2012 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Client side of nova-cells RPC API (for talking to the nova-cells service
within a cell).
This is different than communication between child and parent nova-cells
services. That communication is handled by the cells driver via the
messging module.
"""
from oslo.config import cfg
from nova import exception
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common.rpc import proxy as rpc_proxy
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.import_opt('enable', 'nova.cells.opts', group='cells')
CONF.import_opt('topic', 'nova.cells.opts', group='cells')
class CellsAPI(rpc_proxy.RpcProxy):
'''Cells client-side RPC API
API version history:
1.0 - Initial version.
1.1 - Adds get_cell_info_for_neighbors() and sync_instances()
1.2 - Adds service_get_all(), service_get_by_compute_host(),
and proxy_rpc_to_compute_manager()
1.3 - Adds task_log_get_all()
1.4 - Adds compute_node_get(), compute_node_get_all(), and
compute_node_stats()
1.5 - Adds actions_get(), action_get_by_request_id(), and
action_events_get()
'''
BASE_RPC_API_VERSION = '1.0'
def __init__(self):
super(CellsAPI, self).__init__(topic=CONF.cells.topic,
default_version=self.BASE_RPC_API_VERSION)
def cast_compute_api_method(self, ctxt, cell_name, method,
*args, **kwargs):
"""Make a cast to a compute API method in a certain cell."""
method_info = {'method': method,
'method_args': args,
'method_kwargs': kwargs}
self.cast(ctxt, self.make_msg('run_compute_api_method',
cell_name=cell_name,
method_info=method_info,
call=False))
def call_compute_api_method(self, ctxt, cell_name, method,
*args, **kwargs):
"""Make a call to a compute API method in a certain cell."""
method_info = {'method': method,
'method_args': args,
'method_kwargs': kwargs}
return self.call(ctxt, self.make_msg('run_compute_api_method',
cell_name=cell_name,
method_info=method_info,
call=True))
def schedule_run_instance(self, ctxt, **kwargs):
"""Schedule a new instance for creation."""
self.cast(ctxt, self.make_msg('schedule_run_instance',
host_sched_kwargs=kwargs))
def instance_update_at_top(self, ctxt, instance):
"""Update instance at API level."""
if not CONF.cells.enable:
return
# Make sure we have a dict, not a SQLAlchemy model
instance_p = jsonutils.to_primitive(instance)
self.cast(ctxt, self.make_msg('instance_update_at_top',
instance=instance_p))
def instance_destroy_at_top(self, ctxt, instance):
"""Destroy instance at API level."""
if not CONF.cells.enable:
return
instance_p = jsonutils.to_primitive(instance)
self.cast(ctxt, self.make_msg('instance_destroy_at_top',
instance=instance_p))
def instance_delete_everywhere(self, ctxt, instance, delete_type):
"""Delete instance everywhere. delete_type may be 'soft'
or 'hard'. This is generally only used to resolve races
when API cell doesn't know to what cell an instance belongs.
"""
if not CONF.cells.enable:
return
instance_p = jsonutils.to_primitive(instance)
self.cast(ctxt, self.make_msg('instance_delete_everywhere',
instance=instance_p,
delete_type=delete_type))
def instance_fault_create_at_top(self, ctxt, instance_fault):
"""Create an instance fault at the top."""
if not CONF.cells.enable:
return
instance_fault_p = jsonutils.to_primitive(instance_fault)
self.cast(ctxt, self.make_msg('instance_fault_create_at_top',
instance_fault=instance_fault_p))
def bw_usage_update_at_top(self, ctxt, uuid, mac, start_period,
bw_in, bw_out, last_ctr_in, last_ctr_out, last_refreshed=None):
"""Broadcast upwards that bw_usage was updated."""
if not CONF.cells.enable:
return
bw_update_info = {'uuid': uuid,
'mac': mac,
'start_period': start_period,
'bw_in': bw_in,
'bw_out': bw_out,
'last_ctr_in': last_ctr_in,
'last_ctr_out': last_ctr_out,
'last_refreshed': last_refreshed}
self.cast(ctxt, self.make_msg('bw_usage_update_at_top',
bw_update_info=bw_update_info))
def instance_info_cache_update_at_top(self, ctxt, instance_info_cache):
"""Broadcast up that an instance's info_cache has changed."""
if not CONF.cells.enable:
return
iicache = jsonutils.to_primitive(instance_info_cache)
instance = {'uuid': iicache['instance_uuid'],
'info_cache': iicache}
self.cast(ctxt, self.make_msg('instance_update_at_top',
instance=instance))
def get_cell_info_for_neighbors(self, ctxt):
"""Get information about our neighbor cells from the manager."""
if not CONF.cells.enable:
return []
return self.call(ctxt, self.make_msg('get_cell_info_for_neighbors'),
version='1.1')
def sync_instances(self, ctxt, project_id=None, updated_since=None,
deleted=False):
"""Ask all cells to sync instance data."""
if not CONF.cells.enable:
return
return self.cast(ctxt, self.make_msg('sync_instances',
project_id=project_id,
updated_since=updated_since,
deleted=deleted),
version='1.1')
def service_get_all(self, ctxt, filters=None):
"""Ask all cells for their list of services."""
return self.call(ctxt,
self.make_msg('service_get_all',
filters=filters),
version='1.2')
def service_get_by_compute_host(self, ctxt, host_name):
"""Get the service entry for a host in a particular cell. The
cell name should be encoded within the host_name.
"""
return self.call(ctxt, self.make_msg('service_get_by_compute_host',
host_name=host_name),
version='1.2')
def proxy_rpc_to_manager(self, ctxt, rpc_message, topic, call=False,
timeout=None):
"""Proxy RPC to a compute manager. The host in the topic
should be encoded with the target cell name.
"""
return self.call(ctxt, self.make_msg('proxy_rpc_to_manager',
topic=topic,
rpc_message=rpc_message,
call=call,
timeout=timeout),
timeout=timeout,
version='1.2')
def task_log_get_all(self, ctxt, task_name, period_beginning,
period_ending, host=None, state=None):
"""Get the task logs from the DB in child cells."""
return self.call(ctxt, self.make_msg('task_log_get_all',
task_name=task_name,
period_beginning=period_beginning,
period_ending=period_ending,
host=host, state=state),
version='1.3')
def compute_node_get(self, ctxt, compute_id):
"""Get a compute node by ID in a specific cell."""
return self.call(ctxt, self.make_msg('compute_node_get',
compute_id=compute_id),
version='1.4')
def compute_node_get_all(self, ctxt, hypervisor_match=None):
"""Return list of compute nodes in all cells, optionally
filtering by hypervisor host.
"""
return self.call(ctxt,
self.make_msg('compute_node_get_all',
hypervisor_match=hypervisor_match),
version='1.4')
def compute_node_stats(self, ctxt):
"""Return compute node stats from all cells."""
return self.call(ctxt, self.make_msg('compute_node_stats'),
version='1.4')
def actions_get(self, ctxt, instance):
if not instance['cell_name']:
raise exception.InstanceUnknownCell(instance_uuid=instance['uuid'])
return self.call(ctxt, self.make_msg('actions_get',
cell_name=instance['cell_name'],
instance_uuid=instance['uuid']),
version='1.5')
def action_get_by_request_id(self, ctxt, instance, request_id):
if not instance['cell_name']:
raise exception.InstanceUnknownCell(instance_uuid=instance['uuid'])
return self.call(ctxt, self.make_msg('action_get_by_request_id',
cell_name=instance['cell_name'],
instance_uuid=instance['uuid'],
request_id=request_id),
version='1.5')
def action_events_get(self, ctxt, instance, action_id):
if not instance['cell_name']:
raise exception.InstanceUnknownCell(instance_uuid=instance['uuid'])
return self.call(ctxt, self.make_msg('action_events_get',
cell_name=instance['cell_name'],
action_id=action_id),
version='1.5')
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
engine
~~~~~~~
Core functionality.
"""
import sys
import types
import time
from functools import wraps
from concurrent import futures
from .tasks import Task, MultiTask, ProcessTask, MultiProcessTask
try:
# needed for type checks for list of tasks
from .gevent_tasks import GTask, MultiGTask
except ImportError:
GTask = MultiGTask = None
# TODO method to execute something in gui thread
# TODO should i call multiprocessing.freeze_support() ?
# TODO documentation
# TODO callbacks
# TODO cancel tasks, or stop engine
POOL_TIMEOUT = 0.02
class ReturnResult(Exception):
""" Exception Used to return result from generator
"""
def __init__(self, result):
super(ReturnResult, self).__init__()
self.result = result
class Engine(object):
""" Engine base class
After creating engine instance, set :attr:`main_app` property
(not needed with PyQt/PySide)
Decorate generator with :meth:`@async <async>` to execute tasks yielded
from generator in separate executor and rest operations in GUI thread.
Subclasses should implement :meth:`update_gui`.
"""
def __init__(self, pool_timeout=POOL_TIMEOUT):
"""
:param pool_timeout: time in seconds which GUI can spend in a loop
"""
self.pool_timeout = pool_timeout
#: main application instance
self.main_app = None
def async(self, func):
""" Decorator for asynchronous generators.
Any :class:`Task`, :class:`ProcessTask` or :class:`GTask` yielded from
generator will be executed in separate thread, process or greenlet
accordingly. For example gui application can has following button
click handler::
engine = PyQtEngine()
...
@engine.async
def on_button_click():
# do something in GUI thread
data = yield Task(do_time_consuming_work, param)
update_gui_with(data) # in main GUI thread
If some task raises :class:`ReturnResult`, it's value will be returned
.. seealso:: :func:`return_result`
"""
@wraps(func)
def wrapper(*args, **kwargs):
gen = func(*args, **kwargs)
if isinstance(gen, types.GeneratorType):
return self.create_runner(gen).run()
return wrapper
def create_runner(self, gen):
""" Creates :class:`Runner` instance
:param gen: generator which returns async tasks
Can be overridden if you want custom ``Runner``
"""
return Runner(self, gen)
def update_gui(self):
""" Allows GUI to process events
Should be overridden in subclass
"""
time.sleep(self.pool_timeout)
class Runner(object):
""" Internal class that runs tasks returned by generator
"""
def __init__(self, engine, gen):
"""
:param engine: :class:`Engine` instance
:param gen: Generator which yields tasks
"""
self.engine = engine
self.gen = gen
def run(self):
""" Runs generator and executes tasks
"""
gen = self.gen
try:
task = next(gen) # start generator and receive first task
except StopIteration:
return
while True:
try:
if isinstance(task, (list, tuple)):
assert len(task), "Empty tasks sequence"
first_task = task[0]
if isinstance(first_task, ProcessTask):
task = MultiProcessTask(task)
elif GTask and isinstance(first_task, GTask):
task = MultiGTask(task)
else:
task = MultiTask(task)
with task.executor_class(task.max_workers) as executor:
if isinstance(task, MultiTask):
task = self._execute_multi_task(gen, executor, task)
else:
task = self._execute_single_task(gen, executor, task)
except StopIteration:
break
except ReturnResult as e:
gen.close()
return e.result
def _execute_single_task(self, gen, executor, task):
future = executor.submit(task)
while True:
try:
result = future.result(self.engine.pool_timeout)
except futures.TimeoutError:
self.engine.update_gui()
# TODO canceled error
except Exception:
return gen.throw(*sys.exc_info())
else:
return gen.send(result)
def _execute_multi_task(self, gen, executor, task):
if task.unordered:
results_gen = self._execute_multi_gen_task(gen, executor, task)
return gen.send(results_gen)
future_tasks = [executor.submit(t) for t in task.tasks]
while True:
if not task.wait(executor, future_tasks, self.engine.pool_timeout):
self.engine.update_gui()
else:
break
if task.skip_errors:
results = []
for f in future_tasks:
try:
results.append(f.result())
except Exception:
pass
else:
try:
results = [f.result() for f in future_tasks]
except Exception:
return gen.throw(*sys.exc_info())
return gen.send(results)
def _execute_multi_gen_task(self, gen, executor, task):
unfinished = set(executor.submit(t) for t in task.tasks)
while unfinished:
if not task.wait(executor, unfinished, self.engine.pool_timeout):
self.engine.update_gui()
done = set(f for f in unfinished if f.done())
for f in done:
try:
result = f.result()
except Exception:
if not task.skip_errors:
raise
else:
yield result
unfinished.difference_update(done)
def return_result(result):
""" Allows to return result from generator
Internally it raises :class:`ReturnResult` exception, so take in mind, that
it can be catched in catch-all block
"""
raise ReturnResult(result)
|
|
import logging
import os
from typing import List, Dict
import numpy as np
import pymc3 as pm
from . import io_commons
from . import io_consts
from .. import types
from ..models import commons as model_commons
from ..models.model_ploidy import PloidyModelConfig
from ..models.model_ploidy import PloidyWorkspace, PloidyModel
from ..structs.metadata import SampleReadDepthMetadata, SamplePloidyMetadata
_logger = logging.getLogger(__name__)
class PloidyModelWriter:
"""Writes global ploidy model parameters to disk."""
def __init__(self,
ploidy_config: PloidyModelConfig,
ploidy_workspace: PloidyWorkspace,
ploidy_model: PloidyModel,
ploidy_model_approx: pm.MeanField,
output_path: str):
io_commons.assert_output_path_writable(output_path)
self.ploidy_config = ploidy_config
self.ploidy_workspace = ploidy_workspace
self.ploidy_model = ploidy_model
self.ploidy_model_approx = ploidy_model_approx
self.output_path = output_path
(self._approx_var_set, self._approx_mu_map,
self._approx_std_map) = io_commons.extract_mean_field_posterior_parameters(self.ploidy_model_approx)
def __call__(self):
# write gcnvkernel version
io_commons.write_gcnvkernel_version(self.output_path)
# write ploidy config
io_commons.write_dict_to_json_file(
os.path.join(self.output_path, io_consts.default_ploidy_config_json_filename),
self.ploidy_config.__dict__,
{'contig_ploidy_prior_map', 'contig_set', 'num_ploidy_states'})
# write global variables in the posterior
for var_name in self.ploidy_model.global_var_registry:
assert var_name in self._approx_var_set, \
"a variable named {0} does not exist in the approximation".format(var_name)
_logger.info("Writing {0}...".format(var_name))
var_mu = self._approx_mu_map[var_name]
var_std = self._approx_std_map[var_name]
var_mu_out_path = os.path.join(self.output_path, 'mu_' + var_name + '.tsv')
io_commons.write_ndarray_to_tsv(var_mu_out_path, var_mu)
var_std_out_path = os.path.join(self.output_path, 'std_' + var_name + '.tsv')
io_commons.write_ndarray_to_tsv(var_std_out_path, var_std)
class SamplePloidyWriter:
"""Writes sample-specific ploidy model parameters and associated workspace variables to disk."""
def __init__(self,
ploidy_config: PloidyModelConfig,
ploidy_workspace: PloidyWorkspace,
ploidy_model: PloidyModel,
ploidy_model_approx: pm.MeanField,
ploidy_gq_filter: int,
output_path: str):
self.ploidy_config = ploidy_config
self.ploidy_workspace = ploidy_workspace
self.ploidy_model = ploidy_model
self.ploidy_model_approx = ploidy_model_approx
self.ploidy_gq_filter = ploidy_gq_filter
self.output_path = output_path
(self._approx_var_set, self._approx_mu_map,
self._approx_std_map) = io_commons.extract_mean_field_posterior_parameters(self.ploidy_model_approx)
@staticmethod
def _write_sample_contig_ploidy(sample_posterior_path: str,
sample_ploidy_metadata: SamplePloidyMetadata,
extra_comment_lines: List[str] = None,
comment=io_consts.default_comment_char,
delimiter=io_consts.default_delimiter_char):
with open(os.path.join(sample_posterior_path, io_consts.default_sample_contig_ploidy_tsv_filename), 'w') as f:
if extra_comment_lines is not None:
for comment_line in extra_comment_lines:
f.write(comment + comment_line + '\n')
header = delimiter.join([io_consts.contig_column_name,
io_consts.ploidy_column_name,
io_consts.ploidy_gq_column_name])
f.write(header + '\n')
for j, contig in enumerate(sample_ploidy_metadata.contig_list):
f.write(delimiter.join([contig,
repr(sample_ploidy_metadata.ploidy_j[j]),
repr(sample_ploidy_metadata.ploidy_genotyping_quality_j[j])]) + '\n')
@staticmethod
def _write_sample_read_depth(sample_posterior_path: str,
sample_read_depth_metadata: SampleReadDepthMetadata,
extra_comment_lines: List[str] = None,
comment=io_consts.default_comment_char,
delimiter=io_consts.default_delimiter_char):
with open(os.path.join(sample_posterior_path, io_consts.default_sample_read_depth_tsv_filename), 'w') as f:
if extra_comment_lines is not None:
for comment_line in extra_comment_lines:
f.write(comment + comment_line + '\n')
header = delimiter.join([io_consts.global_read_depth_column_name,
io_consts.average_ploidy_column_name])
f.write(header + '\n')
f.write(delimiter.join([repr(sample_read_depth_metadata.global_read_depth),
repr(sample_read_depth_metadata.average_ploidy)]) + '\n')
def __call__(self):
for si, sample_name in enumerate(self.ploidy_workspace.sample_names):
sample_name_comment_line = [io_consts.sample_name_sam_header_prefix + sample_name]
sample_posterior_path = os.path.join(self.output_path, io_consts.sample_folder_prefix + repr(si))
io_commons.assert_output_path_writable(sample_posterior_path, try_creating_output_path=True)
_logger.info("Saving posteriors for sample \"{0}\" in \"{1}\"...".format(
sample_name, sample_posterior_path))
# find best contig ploidy calls and calculate ploidy genotyping quality
ploidy_j = np.zeros((self.ploidy_workspace.num_contigs,), dtype=types.small_uint)
ploidy_genotyping_quality_j = np.zeros((self.ploidy_workspace.num_contigs,), dtype=types.floatX)
log_q_ploidy_jk = self.ploidy_workspace.log_q_ploidy_sjk.get_value(borrow=True)[si, :, :]
for j in range(self.ploidy_workspace.num_contigs):
ploidy_j[j], ploidy_genotyping_quality_j[j] = model_commons.perform_genotyping(log_q_ploidy_jk[j, :])
if ploidy_genotyping_quality_j[j] < self.ploidy_gq_filter:
ploidy_j[j] = np.argmax(self.ploidy_config.contig_ploidy_prior_map.get(j))
_logger.warning("Ploidy call for contig at index {0} is lower quality than the specified GQ "
"threshold of {1}. Using ploidy from highest prior instead.".format(j, self.ploidy_gq_filter))
# generate sample ploidy metadata
sample_ploidy_metadata = SamplePloidyMetadata(
sample_name, ploidy_j, ploidy_genotyping_quality_j,
self.ploidy_workspace.interval_list_metadata.ordered_contig_list)
# generate sample read depth metadata
sample_read_depth_metadata = SampleReadDepthMetadata.generate_sample_read_depth_metadata(
self.ploidy_workspace.sample_metadata_collection.get_sample_coverage_metadata(sample_name),
sample_ploidy_metadata,
self.ploidy_workspace.interval_list_metadata)
# write contig ploidy
self._write_sample_contig_ploidy(
sample_posterior_path, sample_ploidy_metadata, extra_comment_lines=sample_name_comment_line)
# write read depth
self._write_sample_read_depth(
sample_posterior_path, sample_read_depth_metadata, extra_comment_lines=sample_name_comment_line)
# write sample name
io_commons.write_sample_name_to_txt_file(sample_posterior_path, sample_name)
# write sample-specific posteriors in the approximation
io_commons.write_mean_field_sample_specific_params(
si, sample_posterior_path, self._approx_var_set, self._approx_mu_map, self._approx_std_map,
self.ploidy_model, sample_name_comment_line)
class PloidyModelReader:
"""Reads ploidy model parameters from disk and updates the provided approximation accordingly.
Note:
It is assumed that the provided model instance and approximation are compatible with the model
parameters to be read. This has to be asserted beforehand by the CLI tool.
"""
def __init__(self,
ploidy_model: PloidyModel,
ploidy_model_approx: pm.MeanField,
input_path: str):
self.ploidy_model = ploidy_model
self.ploidy_model_approx = ploidy_model_approx
self.input_path = input_path
def __call__(self):
# check if the model is created with the same gcnvkernel version
io_commons.check_gcnvkernel_version_from_path(self.input_path)
# read model params
io_commons.read_mean_field_global_params(self.input_path, self.ploidy_model_approx, self.ploidy_model)
def get_contig_ploidy_prior_map_from_tsv_file(input_path: str,
comment=io_consts.default_comment_char,
delimiter=io_consts.default_delimiter_char) -> Dict[str, np.ndarray]:
contig_ploidy_prior_pd = io_commons.read_csv(input_path,
dtypes_dict=io_consts.ploidy_prior_dtypes_dict,
comment=comment,
delimiter=delimiter)
columns = [str(x) for x in contig_ploidy_prior_pd.columns.values]
assert len(columns) > 1
assert columns[0] == io_consts.ploidy_prior_contig_name_column
contig_list = [str(x) for x in contig_ploidy_prior_pd['CONTIG_NAME'].values]
assert all([len(column) > len(io_consts.ploidy_prior_prefix)
and column[:len(io_consts.ploidy_prior_prefix)] == io_consts.ploidy_prior_prefix
for column in columns[1:]])
ploidy_values = [int(column[len(io_consts.ploidy_prior_prefix):]) for column in columns[1:]]
num_ploidy_states = np.max(ploidy_values) + 1
contig_ploidy_prior_map: Dict[str, np.ndarray] = dict()
for contig in contig_list:
contig_ploidy_prior_map[contig] = np.zeros((num_ploidy_states,), dtype=types.floatX)
for ploidy in range(num_ploidy_states):
column_name = io_consts.ploidy_prior_prefix + str(ploidy)
if column_name in columns:
values = [float(x) for x in contig_ploidy_prior_pd[column_name].values]
for j, contig in enumerate(contig_list):
contig_ploidy_prior_map[contig][ploidy] = values[j]
return contig_ploidy_prior_map
|
|
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2020 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See https://swift.org/LICENSE.txt for license information
# See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
from __future__ import absolute_import, unicode_literals
import multiprocessing
import os
import android.adb.commands
from swift_build_support.swift_build_support import targets
from swift_build_support.swift_build_support.targets import \
StdlibDeploymentTarget
from . import argparse
from . import defaults
__all__ = [
'create_argument_parser',
]
class _ApplyDefaultsArgumentParser(argparse.ArgumentParser):
"""Wrapper class around the default ArgumentParser that allows for
post-processing the parsed argument namespace to apply default argument
transformations.
"""
def __init__(self, apply_defaults=None, *args, **kwargs):
self._apply_defaults = apply_defaults
super(_ApplyDefaultsArgumentParser, self).__init__(*args, **kwargs)
def parse_known_args(self, args=None, namespace=None):
args, argv = super(_ApplyDefaultsArgumentParser, self)\
.parse_known_args(args, namespace)
self._apply_defaults(args)
return args, argv
def _apply_default_arguments(args):
"""Preprocess argument namespace to apply default behaviors.
"""
# Build cmark if any cmark-related options were specified.
if (args.cmark_build_variant is not None):
args.build_cmark = True
# Build LLDB if any LLDB-related options were specified.
if args.lldb_build_variant is not None or \
args.lldb_assertions is not None or \
args.lldb_build_with_xcode is not None:
args.build_lldb = True
# Set the default build variant.
if args.build_variant is None:
args.build_variant = 'Debug'
if args.llvm_build_variant is None:
args.llvm_build_variant = args.build_variant
if args.swift_build_variant is None:
args.swift_build_variant = args.build_variant
if args.swift_stdlib_build_variant is None:
args.swift_stdlib_build_variant = args.build_variant
if args.cmark_build_variant is None:
args.cmark_build_variant = args.swift_build_variant
if args.lldb_build_variant is None:
args.lldb_build_variant = args.build_variant
if args.lldb_build_with_xcode is None:
args.lldb_build_with_xcode = '0'
if args.foundation_build_variant is None:
args.foundation_build_variant = args.build_variant
if args.libdispatch_build_variant is None:
args.libdispatch_build_variant = args.build_variant
if args.libicu_build_variant is None:
args.libicu_build_variant = args.build_variant
# Assertions are enabled by default.
if args.assertions is None:
args.assertions = True
# Propagate the default assertions setting.
if args.cmark_assertions is None:
args.cmark_assertions = args.assertions
if args.llvm_assertions is None:
args.llvm_assertions = args.assertions
if args.swift_assertions is None:
args.swift_assertions = args.assertions
if args.swift_stdlib_assertions is None:
args.swift_stdlib_assertions = args.assertions
if args.llbuild_assertions is None:
args.llbuild_assertions = args.assertions
if args.lldb_assertions is None:
args.lldb_assertions = args.assertions
# Set the default CMake generator.
if args.cmake_generator is None:
args.cmake_generator = 'Ninja'
# --ios-all etc are not supported by open-source Swift.
if args.ios_all:
raise ValueError('error: --ios-all is unavailable in open-source '
'Swift.\nUse --ios to skip iOS device tests.')
if args.tvos_all:
raise ValueError('error: --tvos-all is unavailable in open-source '
'Swift.\nUse --tvos to skip tvOS device tests.')
if args.watchos_all:
raise ValueError('error: --watchos-all is unavailable in open-source '
'Swift.\nUse --watchos to skip watchOS device tests.')
# --skip-{ios,tvos,watchos} or --skip-build-{ios,tvos,watchos} are
# merely shorthands for --skip-build-{**os}-{device,simulator}
if not args.ios or not args.build_ios:
args.build_ios_device = False
args.build_ios_simulator = False
if not args.tvos or not args.build_tvos:
args.build_tvos_device = False
args.build_tvos_simulator = False
if not args.watchos or not args.build_watchos:
args.build_watchos_device = False
args.build_watchos_simulator = False
if not args.android or not args.build_android:
args.build_android = False
# --test-paths implies --test and/or --validation-test
# depending on what directories/files have been specified.
if args.test_paths:
for path in args.test_paths:
if path.startswith('test'):
args.test = True
elif path.startswith('validation-test'):
args.test = True
args.validation_test = True
# --validation-test implies --test.
if args.validation_test:
args.test = True
# --test-optimized implies --test.
if args.test_optimized:
args.test = True
# --test-optimize-size implies --test.
if args.test_optimize_for_size:
args.test = True
# --test-optimize-none-with-implicit-dynamic implies --test.
if args.test_optimize_none_with_implicit_dynamic:
args.test = True
# If none of tests specified skip swift stdlib test on all platforms
if not args.test and not args.validation_test and not args.long_test:
args.test_linux = False
args.test_freebsd = False
args.test_cygwin = False
args.test_osx = False
args.test_ios = False
args.test_tvos = False
args.test_watchos = False
args.test_android = False
args.test_cmark = False
args.test_swiftpm = False
args.test_swift_driver = False
args.test_swiftsyntax = False
args.test_indexstoredb = False
args.test_sourcekitlsp = False
args.test_skstresstester = False
args.test_swiftformat = False
args.test_swiftevolve = False
args.test_toolchainbenchmarks = False
args.test_swiftdocc = False
# --test implies --test-early-swift-driver
# (unless explicitly skipped with `--skip-test-early-swift-driver`)
if args.test and (args.build_early_swift_driver and
args.test_early_swift_driver is None):
args.test_early_swift_driver = True
# --skip-test-ios is merely a shorthand for host and simulator tests.
if not args.test_ios:
args.test_ios_host = False
args.test_ios_simulator = False
# --skip-test-tvos is merely a shorthand for host and simulator tests.
if not args.test_tvos:
args.test_tvos_host = False
args.test_tvos_simulator = False
# --skip-test-watchos is merely a shorthand for host and simulator
# --tests.
if not args.test_watchos:
args.test_watchos_host = False
args.test_watchos_simulator = False
# --skip-build-{ios,tvos,watchos}-{device,simulator} implies
# --skip-test-{ios,tvos,watchos}-{host,simulator}
if not args.build_ios_device:
args.test_ios_host = False
if not args.build_ios_simulator:
args.test_ios_simulator = False
if not args.build_tvos_device:
args.test_tvos_host = False
if not args.build_tvos_simulator:
args.test_tvos_simulator = False
if not args.build_watchos_device:
args.test_watchos_host = False
if not args.build_watchos_simulator:
args.test_watchos_simulator = False
if not args.build_android:
# If building natively on an Android host, allow running the test suite
# without the NDK config.
if not StdlibDeploymentTarget.Android.contains(StdlibDeploymentTarget
.host_target().name):
args.test_android = False
args.test_android_host = False
if not args.test_android:
args.test_android_host = False
if not args.host_test:
args.test_ios_host = False
args.test_tvos_host = False
args.test_watchos_host = False
args.test_android_host = False
def create_argument_parser():
"""Return a configured argument parser."""
# NOTE: USAGE, DESCRIPTION and EPILOG are defined at the bottom of the file
parser = _ApplyDefaultsArgumentParser(
apply_defaults=_apply_default_arguments,
formatter_class=argparse.RawDescriptionHelpFormatter,
usage=USAGE,
description=DESCRIPTION,
epilog=EPILOG)
builder = parser.to_builder()
# Prepare DSL functions
option = builder.add_option
set_defaults = builder.set_defaults
in_group = builder.in_group
mutually_exclusive_group = builder.mutually_exclusive_group
# Prepare DSL actions
append = builder.actions.append
store = builder.actions.store
store_true = builder.actions.store_true
store_false = builder.actions.store_false
store_int = builder.actions.store_int
store_path = builder.actions.store_path
toggle_true = builder.actions.toggle_true
toggle_false = builder.actions.toggle_false
unsupported = builder.actions.unsupported
# -------------------------------------------------------------------------
# Top-level options
option(['-n', '--dry-run'], store_true,
help='print the commands that would be executed, but do not '
'execute them')
option('--dump-config', toggle_true,
help='instead of building, write JSON to stdout containing '
'various values used to build in this configuration')
option(['--reconfigure'], store_true,
help="Reconfigure all projects as we build")
option('--legacy-impl', store_true('legacy_impl'),
help='use legacy implementation')
option('--build-runtime-with-host-compiler', toggle_true,
help='Use the host compiler, not the self-built one to compile the '
'Swift runtime')
option(['-i', '--ios'], store_true,
help='also build for iOS, but disallow tests that require an iOS '
'device')
option(['-I', '--ios-all'], store_true('ios_all'),
help='also build for iOS, and allow all iOS tests')
option(['--skip-local-build'], toggle_true('skip_local_build'),
help='set to skip building for the local platform')
option('--skip-ios', store_false('ios'),
help='set to skip everything iOS-related')
option('--tvos', toggle_true,
help='also build for tvOS, but disallow tests that require a tvos '
'device')
option('--tvos-all', toggle_true('tvos_all'),
help='also build for tvOS, and allow all tvOS tests')
option('--skip-tvos', store_false('tvos'),
help='set to skip everything tvOS-related')
option('--watchos', toggle_true,
help='also build for watchOS, but disallow tests that require an '
'watchOS device')
option('--watchos-all', toggle_true('watchos_all'),
help='also build for Apple watchOS, and allow all Apple watchOS '
'tests')
option('--skip-watchos', store_false('watchos'),
help='set to skip everything watchOS-related')
option('--maccatalyst', toggle_true,
help='Enable building Swift with macCatalyst support')
option('--maccatalyst-ios-tests', toggle_true,
help='When building for macCatalyst run tests with iOS-like '
'target triple')
option('--android', toggle_true,
help='also build for Android')
option('--swift-analyze-code-coverage', store,
choices=['false', 'not-merged', 'merged'],
# so CMake can see the inert mode as a false value
default=defaults.SWIFT_ANALYZE_CODE_COVERAGE,
help='enable code coverage analysis in Swift (false, not-merged, '
'merged).')
option('--swift-disable-dead-stripping', toggle_true,
help="Turn off Darwin-specific dead stripping for Swift host tools")
option('--build-subdir', store,
metavar='PATH',
help='name of the directory under $SWIFT_BUILD_ROOT where the '
'build products will be placed')
option('--relocate-xdg-cache-home-under-build-subdir',
store_true,
help='relocate $XDG_CACHE_HOME to the same location '
'where build products will be placed; '
'this supports having multiple runs for different branches '
'in CI bots for Linux')
option('--install-prefix', store_path,
default=targets.install_prefix(),
help='The installation prefix. This is where built Swift products '
'(like bin, lib, and include) will be installed.')
option('--install-symroot', store_path,
help='the path to install debug symbols into')
option('--install-destdir', store_path,
help='the path to use as the filesystem root for the installation')
option('--install-all', toggle_true,
help='Assume all built products should be installed')
option(['-j', '--jobs'], store_int('build_jobs'),
default=multiprocessing.cpu_count(),
help='the number of parallel build jobs to use')
option('--darwin-xcrun-toolchain', store,
help='the name of the toolchain to use on Darwin')
option('--cmake', store_path(executable=True),
help='the path to a CMake executable that will be used to build '
'Swift')
option('--show-sdks', toggle_true,
help='print installed Xcode and SDK versions')
option('--extra-swift-args', append,
help='Pass through extra flags to swift in the form of a CMake '
'list "module_regexp;flag". Can be called multiple times to '
'add multiple such module_regexp flag pairs. All semicolons '
'in flags must be escaped with a "\\"')
option('--host-cc', store_path(executable=True),
help='the absolute path to CC, the "clang" compiler for the host '
'platform. Default is auto detected.')
option('--host-cxx', store_path(executable=True),
help='the absolute path to CXX, the "clang++" compiler for the '
'host platform. Default is auto detected.')
option('--native-swift-tools-path', store_path,
help='the path to a directory that contains prebuilt Swift tools '
'that are executable on the host platform')
option('--native-clang-tools-path', store_path,
help='the path to a directory that contains prebuilt Clang tools '
'that are executable on the host platform')
option('--native-llvm-tools-path', store_path,
help='the path to a directory that contains prebuilt LLVM tools '
'that are executable on the host platform')
option('--cmake-c-launcher', store_path(executable=True),
default=os.environ.get('C_COMPILER_LAUNCHER', None),
help='the absolute path to set CMAKE_C_COMPILER_LAUNCHER')
option('--cmake-cxx-launcher', store_path(executable=True),
default=os.environ.get('CXX_COMPILER_LAUNCHER', None),
help='the absolute path to set CMAKE_CXX_COMPILER_LAUNCHER')
option('--host-lipo', store_path(executable=True),
help='the absolute path to lipo. Default is auto detected.')
option('--host-libtool', store_path(executable=True),
help='the absolute path to libtool. Default is auto detected.')
option('--distcc', toggle_true,
default=os.environ.get('USE_DISTCC') == '1',
help='use distcc in pump mode')
option('--sccache', toggle_true,
default=os.environ.get('SWIFT_USE_SCCACHE') == '1',
help='use sccache')
option('--enable-asan', toggle_true,
help='enable Address Sanitizer')
option('--enable-ubsan', toggle_true,
help='enable Undefined Behavior Sanitizer')
option('--enable-tsan', toggle_true,
help='enable Thread Sanitizer for swift tools')
option('--enable-tsan-runtime', toggle_true,
help='enable Thread Sanitizer on the swift runtime')
option('--enable-lsan', toggle_true,
help='enable Leak Sanitizer for swift tools')
option('--enable-sanitize-coverage', toggle_true,
help='enable sanitizer coverage for swift tools. Necessary for '
'fuzzing swiftc')
option('--compiler-vendor', store,
choices=['none', 'apple'],
default=defaults.COMPILER_VENDOR,
help='Compiler vendor name')
option('--clang-compiler-version', store,
type=argparse.ClangVersionType(),
metavar='MAJOR.MINOR.PATCH',
help='string that indicates a compiler version for Clang')
option('--clang-user-visible-version', store,
type=argparse.ClangVersionType(),
default=defaults.CLANG_USER_VISIBLE_VERSION,
metavar='MAJOR.MINOR.PATCH',
help='User-visible version of the embedded Clang and LLVM '
'compilers')
option('--swift-compiler-version', store,
type=argparse.SwiftVersionType(),
metavar='MAJOR.MINOR',
help='string that indicates a compiler version for Swift')
option('--swift-user-visible-version', store,
type=argparse.SwiftVersionType(),
default=defaults.SWIFT_USER_VISIBLE_VERSION,
metavar='MAJOR.MINOR',
help='User-visible version of the embedded Swift compiler')
option('--darwin-deployment-version-osx', store,
default=defaults.DARWIN_DEPLOYMENT_VERSION_OSX,
metavar='MAJOR.MINOR',
help='minimum deployment target version for OS X')
option('--darwin-deployment-version-ios', store,
default=defaults.DARWIN_DEPLOYMENT_VERSION_IOS,
metavar='MAJOR.MINOR',
help='minimum deployment target version for iOS')
option('--darwin-deployment-version-tvos', store,
default=defaults.DARWIN_DEPLOYMENT_VERSION_TVOS,
metavar='MAJOR.MINOR',
help='minimum deployment target version for tvOS')
option('--darwin-deployment-version-watchos', store,
default=defaults.DARWIN_DEPLOYMENT_VERSION_WATCHOS,
metavar='MAJOR.MINOR',
help='minimum deployment target version for watchOS')
option('--extra-cmake-options', append,
type=argparse.ShellSplitType(),
help='Pass through extra options to CMake in the form of comma '
'separated options "-DCMAKE_VAR1=YES,-DCMAKE_VAR2=/tmp". Can '
'be called multiple times to add multiple such options.')
option('--build-args', store,
type=argparse.ShellSplitType(),
default=[],
help='arguments to the build tool. This would be prepended to the '
'default argument that is "-j8" when CMake generator is '
'"Ninja".')
option('--verbose-build', toggle_true,
help='print the commands executed during the build')
option('--lto', store('lto_type'),
choices=['thin', 'full'],
const='full',
default=None,
metavar='LTO_TYPE',
help='use lto optimization on llvm/swift tools. This does not '
'imply using lto on the swift standard library or runtime. '
'Options: thin, full. If no optional arg is provided, full is '
'chosen by default')
option('--clang-profile-instr-use', store_path,
help='profile file to use for clang PGO')
option('--llvm-max-parallel-lto-link-jobs', store_int,
default=defaults.LLVM_MAX_PARALLEL_LTO_LINK_JOBS,
metavar='COUNT',
help='the maximum number of parallel link jobs to use when '
'compiling llvm')
option('--swift-tools-max-parallel-lto-link-jobs', store_int,
default=defaults.SWIFT_MAX_PARALLEL_LTO_LINK_JOBS,
metavar='COUNT',
help='the maximum number of parallel link jobs to use when '
'compiling swift tools.')
option('--dsymutil-jobs', store_int,
default=defaults.DSYMUTIL_JOBS,
metavar='COUNT',
help='the maximum number of parallel dsymutil jobs to use when '
'extracting symbols. Tweak with caution, since dsymutil '
'is memory intensive.')
option('--disable-guaranteed-normal-arguments', store_true,
help='Disable guaranteed normal arguments')
option('--enable-stdlibcore-exclusivity-checking', store_true,
help='Enable exclusivity checking in stdlibCore')
option('--force-optimized-typechecker', store_true,
help='Force the type checker to be built with '
'optimization')
option('--lit-args', store,
default='-sv',
metavar='LITARGS',
help='lit args to use when testing')
option('--coverage-db', store_path,
help='coverage database to use when prioritizing testing')
option('--llvm-install-components', store,
default=defaults.llvm_install_components(),
help='A semi-colon split list of llvm components to install')
option('--libswift', store('libswift_mode'),
choices=['off', 'hosttools', 'bootstrapping', 'bootstrapping-with-hostlibs'],
const='hosttools',
default=None,
help='The libswift build mode. For details see libswift/README.md')
# -------------------------------------------------------------------------
in_group('Host and cross-compilation targets')
option('--host-target', store,
default=StdlibDeploymentTarget.host_target().name,
help='The host target. LLVM, Clang, and Swift will be built for '
'this target. The built LLVM and Clang will be used to '
'compile Swift for the cross-compilation targets.')
option('--cross-compile-hosts', append,
type=argparse.ShellSplitType(),
default=[],
help='A space separated list of targets to cross-compile host '
'Swift tools for. Can be used multiple times.')
option('--stdlib-deployment-targets', store,
type=argparse.ShellSplitType(),
default=None,
help='The targets to compile or cross-compile the Swift standard '
'library for. %(default)s by default.'
' Comma separated list: {}'.format(
' '.join(StdlibDeploymentTarget.get_target_names())))
option('--build-stdlib-deployment-targets', store,
type=argparse.ShellSplitType(),
default=['all'],
help='A space-separated list that filters which of the configured '
'targets to build the Swift standard library for, or "all".')
option('--swift-darwin-supported-archs', store,
metavar='ARCHS',
help='Semicolon-separated list of architectures to configure on '
'Darwin platforms. If left empty all default architectures '
'are configured.')
option('--swift-darwin-module-archs', store,
metavar='ARCHS',
help='Semicolon-separated list of architectures to configure Swift '
'module-only targets on Darwin platforms. These targets are '
'in addition to the full library targets.')
# -------------------------------------------------------------------------
in_group('Options to select projects')
option('--infer', toggle_true('infer_dependencies'),
help='Infer any downstream dependencies from enabled projects')
option(['-l', '--lldb'], toggle_true('build_lldb'),
help='build LLDB')
option(['-b', '--llbuild'], toggle_true('build_llbuild'),
help='build llbuild')
option(['--back-deploy-concurrency'], toggle_true('build_backdeployconcurrency'),
help='build back-deployment support for concurrency')
option(['--install-back-deploy-concurrency'],
toggle_true('install_backdeployconcurrency'),
help='install back-deployment support libraries for concurrency')
option(['--libcxx'], toggle_true('build_libcxx'),
help='build libcxx')
option(['-p', '--swiftpm'], toggle_true('build_swiftpm'),
help='build swiftpm')
option(['--install-swiftpm'], toggle_true('install_swiftpm'),
help='install swiftpm')
option(['--swiftsyntax'], toggle_true('build_swiftsyntax'),
help='build swiftSyntax')
option(['--skstresstester'], toggle_true('build_skstresstester'),
help='build the SourceKit stress tester')
option(['--swiftformat'], toggle_true('build_swiftformat'),
help='build swift-format')
option(['--swiftevolve'], toggle_true('build_swiftevolve'),
help='build the swift-evolve tool')
option(['--swift-driver'], toggle_true('build_swift_driver'),
help='build swift-driver')
option(['--swiftdocc'], toggle_true('build_swiftdocc'),
help='build Swift DocC')
option(['--skip-early-swift-driver'], toggle_false('build_early_swift_driver'),
help='skip building the early swift-driver')
option(['--indexstore-db'], toggle_true('build_indexstoredb'),
help='build IndexStoreDB')
option('--test-indexstore-db-sanitize-all',
toggle_true('test_indexstoredb_sanitize_all'),
help='run indexstore-db tests under all sanitizers')
option(['--sourcekit-lsp'], toggle_true('build_sourcekitlsp'),
help='build SourceKitLSP')
option('--test-sourcekit-lsp-sanitize-all',
toggle_true('test_sourcekitlsp_sanitize_all'),
help='run sourcekit-lsp tests under all sanitizers')
option('--install-swiftsyntax', toggle_true('install_swiftsyntax'),
help='install SwiftSyntax')
option('--swiftsyntax-verify-generated-files',
toggle_true('swiftsyntax_verify_generated_files'),
help='set to verify that the generated files in the source tree '
'match the ones that would be generated from current main')
option(['--install-sourcekit-lsp'], toggle_true('install_sourcekitlsp'),
help='install SourceKitLSP')
option(['--install-skstresstester'], toggle_true('install_skstresstester'),
help='install the SourceKit stress tester')
option(['--install-swift-driver'], toggle_true('install_swift_driver'),
help='install new Swift driver')
option(['--install-swiftevolve'], toggle_true('install_swiftevolve'),
help='install SwiftEvolve')
option(['--install-swiftdocc'], toggle_true('install_swiftdocc'),
help='install Swift DocC')
option(['--toolchain-benchmarks'],
toggle_true('build_toolchainbenchmarks'),
help='build Swift Benchmarks using swiftpm against the just built '
'toolchain')
option(['--swift-inspect'],
toggle_true('build_swift_inspect'),
help='build SwiftInspect using swiftpm against the just built '
'toolchain')
option('--xctest', toggle_true('build_xctest'),
help='build xctest')
option('--foundation', toggle_true('build_foundation'),
help='build foundation')
option('--libdispatch', toggle_true('build_libdispatch'),
help='build libdispatch')
option('--libicu', toggle_true('build_libicu'),
help='build libicu')
option('--playgroundsupport', toggle_true('build_playgroundsupport'),
help='build PlaygroundSupport')
option('--install-playgroundsupport',
toggle_true('install_playgroundsupport'),
help='install playground support')
option('--build-ninja', toggle_true,
help='build the Ninja tool')
option(['--build-libparser-only'], toggle_true('build_libparser_only'),
help='build only libParser for SwiftSyntax')
option('--skip-build-clang-tools-extra',
toggle_false('build_clang_tools_extra'),
default=True,
help='skip building clang-tools-extra as part of llvm')
# -------------------------------------------------------------------------
in_group('Extra actions to perform before or in addition to building')
option(['-c', '--clean'], store_true,
help='do a clean build')
option(['--clean-install-destdir'], store_true,
help='Clean the install destroot before building.')
option('--export-compile-commands', toggle_true,
help='generate compilation databases in addition to building')
option('--symbols-package', store_path,
help='if provided, an archive of the symbols directory will be '
'generated at this path')
option('--darwin-symroot-path-filters', append,
type=argparse.ShellSplitType(),
help='Space separated list of patterns used to match '
'a subset of files to generate symbols for. '
'Only supported on Darwin. Can be called multiple times '
'to add multiple such options.')
# -------------------------------------------------------------------------
in_group('Build variant')
with mutually_exclusive_group():
set_defaults(build_variant='Debug')
option(['-d', '--debug'], store('build_variant'),
const='Debug',
help='build the Debug variant of everything (LLVM, Clang, '
'Swift host tools, target Swift standard libraries, LLDB) '
'(default is %(default)s)')
option(['-r', '--release-debuginfo'], store('build_variant'),
const='RelWithDebInfo',
help='build the RelWithDebInfo variant of everything (default '
'is %(default)s)')
option(['-R', '--release'], store('build_variant'),
const='Release',
help='build the Release variant of everything (default is '
'%(default)s)')
option(['--min-size-release'], store('build_variant'),
const='MinSizeRel',
help='build the MinSizeRel variant of everything (default is '
'%(default)s)')
# -------------------------------------------------------------------------
in_group('Override build variant for a specific project')
option('--debug-llvm', store('llvm_build_variant'),
const='Debug',
help='build the Debug variant of LLVM')
option('--debug-swift', store('swift_build_variant'),
const='Debug',
help='build the Debug variant of Swift host tools')
option('--debug-swift-stdlib', store('swift_stdlib_build_variant'),
const='Debug',
help='build the Debug variant of the Swift standard library and '
' SDK overlay')
option('--debug-lldb', store('lldb_build_variant'),
const='Debug',
help='build the Debug variant of LLDB')
option('--lldb-build-with-xcode', store('lldb_build_with_xcode'),
const='1',
help='build LLDB using xcodebuild, if possible')
option('--lldb-build-with-cmake', store('lldb_build_with_xcode'),
const='0',
help='build LLDB using CMake')
option('--debug-cmark', store('cmark_build_variant'),
const='Debug',
help='build the Debug variant of CommonMark')
option('--debug-foundation', store('foundation_build_variant'),
const='Debug',
help='build the Debug variant of Foundation')
option('--debug-libdispatch', store('libdispatch_build_variant'),
const='Debug',
help='build the Debug variant of libdispatch')
option('--debug-libicu', store('libicu_build_variant'),
const='Debug',
help='build the Debug variant of libicu')
# -------------------------------------------------------------------------
# Assertions group
with mutually_exclusive_group():
set_defaults(assertions=True)
# TODO: Convert to store_true
option(['-a', '--assertions'], store,
const=True,
help='enable assertions in all projects')
# TODO: Convert to store_false
option(['-A', '--no-assertions'], store('assertions'),
const=False,
help='disable assertions in all projects')
# -------------------------------------------------------------------------
in_group('Control assertions in a specific project')
option('--cmark-assertions', store,
const=True,
help='enable assertions in CommonMark')
option('--llvm-assertions', store,
const=True,
help='enable assertions in LLVM')
option('--no-llvm-assertions', store('llvm_assertions'),
const=False,
help='disable assertions in LLVM')
option('--swift-assertions', store,
const=True,
help='enable assertions in Swift')
option('--no-swift-assertions', store('swift_assertions'),
const=False,
help='disable assertions in Swift')
option('--swift-stdlib-assertions', store,
const=True,
help='enable assertions in the Swift standard library')
option('--no-swift-stdlib-assertions', store('swift_stdlib_assertions'),
const=False,
help='disable assertions in the Swift standard library')
option('--lldb-assertions', store,
const=True,
help='enable assertions in LLDB')
option('--no-lldb-assertions', store('lldb_assertions'),
const=False,
help='disable assertions in LLDB')
option('--llbuild-assertions', store,
const=True,
help='enable assertions in llbuild')
option('--no-llbuild-assertions', store('llbuild_assertions'),
const=False,
help='disable assertions in llbuild')
# -------------------------------------------------------------------------
in_group('Select the CMake generator')
set_defaults(cmake_generator=defaults.CMAKE_GENERATOR)
option(['-e', '--eclipse'], store('cmake_generator'),
const='Eclipse CDT4 - Ninja',
help="use CMake's Eclipse generator (%(default)s by default)")
option(['-m', '--make'], store('cmake_generator'),
const='Unix Makefiles',
help="use CMake's Makefile generator (%(default)s by default)")
option(['-x', '--xcode'], store('cmake_generator'),
const='Xcode',
help="use CMake's Xcode generator (%(default)s by default)")
# -------------------------------------------------------------------------
in_group('Run tests')
# NOTE: We can't merge -t and --test, because nargs='?' makes
# `-ti` to be treated as `-t=i`.
# FIXME: Convert to store_true action
option('-t', store('test', const=True),
help='test Swift after building')
option('--test', toggle_true,
help='test Swift after building')
option('-T', store('validation_test', const=True),
help='run the validation test suite (implies --test)')
option('--validation-test', toggle_true,
help='run the validation test suite (implies --test)')
# FIXME: Convert to store_true action
option('-o', store('test_optimized', const=True),
help='run the test suite in optimized mode too (implies --test)')
option('--test-optimized', toggle_true,
help='run the test suite in optimized mode too (implies --test)')
# FIXME: Convert to store_true action
option('-s', store('test_optimize_for_size', const=True),
help='run the test suite in optimize for size mode too '
'(implies --test)')
option('--test-optimize-for-size', toggle_true,
help='run the test suite in optimize for size mode too '
'(implies --test)')
# FIXME: Convert to store_true action
option('-y', store('test_optimize_none_with_implicit_dynamic', const=True),
help='run the test suite in optimize none with implicit dynamic'
' mode too (implies --test)')
option('--test-optimize-none-with-implicit-dynamic', toggle_true,
help='run the test suite in optimize none with implicit dynamic'
'mode too (implies --test)')
option('--long-test', toggle_true,
help='run the long test suite')
option('--stress-test', toggle_true,
help='run the stress test suite')
option('--host-test', toggle_true,
help='run executable tests on host devices (such as iOS or tvOS)')
option('--only-executable-test', toggle_true,
help='Only run executable tests. Does nothing if host-test is not '
'allowed')
option('--only-non-executable-test', toggle_true,
help='Only run non-executable tests.')
option('--test-paths', append,
type=argparse.ShellSplitType(),
help='run tests located in specific directories and/or files '
'(implies --test and/or --validation-test)')
option(['-B', '--benchmark'], store_true,
help='run the Swift Benchmark Suite after building')
option('--benchmark-num-o-iterations', store_int,
default=3,
help='if the Swift Benchmark Suite is run after building, run N '
'iterations with -O')
option('--benchmark-num-onone-iterations', store_int,
default=3,
help='if the Swift Benchmark Suite is run after building, run N '
'iterations with -Onone')
# We want to run the TSan (compiler-rt) libdispatch tests on Linux, where
# libdispatch is just another library and not available by default. To do
# so we build Clang/LLVM/libdispatch and use it to compile/run the TSan
# libdispatch tests.
option('--tsan-libdispatch-test', toggle_true,
help='Builds a new toolchain including the libdispatch C library. '
'Then re-builds the TSan runtime (compiler-rt) using this '
'freshly-built Clang and runs the TSan libdispatch tests.')
option('--skip-test-osx', toggle_false('test_osx'),
help='skip testing Swift stdlibs for Mac OS X')
option('--skip-test-linux', toggle_false('test_linux'),
help='skip testing Swift stdlibs for Linux')
option('--skip-test-freebsd', toggle_false('test_freebsd'),
help='skip testing Swift stdlibs for FreeBSD')
option('--skip-test-cygwin', toggle_false('test_cygwin'),
help='skip testing Swift stdlibs for Cygwin')
# -------------------------------------------------------------------------
in_group('Run build')
option('--build-swift-dynamic-stdlib', toggle_true,
default=True,
help='build dynamic variants of the Swift standard library')
option('--build-swift-static-stdlib', toggle_true,
help='build static variants of the Swift standard library')
option('--build-swift-dynamic-sdk-overlay', toggle_true,
default=True,
help='build dynamic variants of the Swift SDK overlay')
option('--build-swift-static-sdk-overlay', toggle_true,
help='build static variants of the Swift SDK overlay')
option('--build-swift-stdlib-unittest-extra', toggle_true,
help='Build optional StdlibUnittest components')
option(['-S', '--skip-build'], store_true,
help='generate build directory only without building')
option('--skip-build-linux', toggle_false('build_linux'),
help='skip building Swift stdlibs for Linux')
option('--skip-build-freebsd', toggle_false('build_freebsd'),
help='skip building Swift stdlibs for FreeBSD')
option('--skip-build-cygwin', toggle_false('build_cygwin'),
help='skip building Swift stdlibs for Cygwin')
option('--skip-build-osx', toggle_false('build_osx'),
help='skip building Swift stdlibs for MacOSX')
option('--skip-build-ios', toggle_false('build_ios'),
help='skip building Swift stdlibs for iOS')
option('--skip-build-ios-device', toggle_false('build_ios_device'),
help='skip building Swift stdlibs for iOS devices '
'(i.e. build simulators only)')
option('--skip-build-ios-simulator', toggle_false('build_ios_simulator'),
help='skip building Swift stdlibs for iOS simulator '
'(i.e. build devices only)')
option('--skip-build-tvos', toggle_false('build_tvos'),
help='skip building Swift stdlibs for tvOS')
option('--skip-build-tvos-device', toggle_false('build_tvos_device'),
help='skip building Swift stdlibs for tvOS devices '
'(i.e. build simulators only)')
option('--skip-build-tvos-simulator', toggle_false('build_tvos_simulator'),
help='skip building Swift stdlibs for tvOS simulator '
'(i.e. build devices only)')
option('--skip-build-watchos', toggle_false('build_watchos'),
help='skip building Swift stdlibs for watchOS')
option('--skip-build-watchos-device', toggle_false('build_watchos_device'),
help='skip building Swift stdlibs for watchOS devices '
'(i.e. build simulators only)')
option('--skip-build-watchos-simulator',
toggle_false('build_watchos_simulator'),
help='skip building Swift stdlibs for watchOS simulator '
'(i.e. build devices only)')
option('--skip-build-android', toggle_false('build_android'),
help='skip building Swift stdlibs for Android')
option('--skip-build-benchmarks', toggle_false('build_benchmarks'),
help='skip building Swift Benchmark Suite')
option('--build-external-benchmarks', toggle_true,
help='skip building Swift Benchmark Suite')
# -------------------------------------------------------------------------
in_group('Skip testing specified targets')
option('--skip-test-ios',
toggle_false('test_ios'),
help='skip testing all iOS targets. Equivalent to specifying both '
'--skip-test-ios-simulator and --skip-test-ios-host')
option('--skip-test-ios-simulator',
toggle_false('test_ios_simulator'),
help='skip testing iOS simulator targets')
option('--skip-test-ios-32bit-simulator',
toggle_false('test_ios_32bit_simulator'),
default=False,
help='skip testing iOS 32 bit simulator targets')
option('--skip-test-watchos-32bit-simulator',
toggle_false('test_watchos_32bit_simulator'),
default=True,
help='skip testing watchOS 32 bit simulator targets')
option('--skip-test-ios-host',
toggle_false('test_ios_host'),
help='skip testing iOS device targets on the host machine (the '
'phone itself)')
option('--skip-test-tvos',
toggle_false('test_tvos'),
help='skip testing all tvOS targets. Equivalent to specifying both '
'--skip-test-tvos-simulator and --skip-test-tvos-host')
option('--skip-test-tvos-simulator',
toggle_false('test_tvos_simulator'),
help='skip testing tvOS simulator targets')
option('--skip-test-tvos-host',
toggle_false('test_tvos_host'),
help='skip testing tvOS device targets on the host machine (the '
'TV itself)')
option('--skip-test-watchos',
toggle_false('test_watchos'),
help='skip testing all tvOS targets. Equivalent to specifying both '
'--skip-test-watchos-simulator and --skip-test-watchos-host')
option('--skip-test-watchos-simulator',
toggle_false('test_watchos_simulator'),
help='skip testing watchOS simulator targets')
option('--skip-test-watchos-host',
toggle_false('test_watchos_host'),
help='skip testing watchOS device targets on the host machine (the '
'watch itself)')
option('--skip-test-android',
toggle_false('test_android'),
help='skip testing all Android targets.')
option('--skip-test-android-host',
toggle_false('test_android_host'),
help='skip testing Android device targets on the host machine (the '
'phone itself)')
option('--skip-clean-libdispatch', toggle_false('clean_libdispatch'),
help='skip cleaning up libdispatch')
option('--skip-clean-foundation', toggle_false('clean_foundation'),
help='skip cleaning up foundation')
option('--skip-clean-xctest', toggle_false('clean_xctest'),
help='skip cleaning up xctest')
option('--skip-clean-llbuild', toggle_false('clean_llbuild'),
help='skip cleaning up llbuild')
option('--clean-early-swift-driver', toggle_true('clean_early_swift_driver'),
help='Clean up the early SwiftDriver')
option('--skip-test-early-swift-driver',
store('test_early_swift_driver', const=False),
help='Test the early SwiftDriver against the host toolchain')
option('--skip-clean-swiftpm', toggle_false('clean_swiftpm'),
help='skip cleaning up swiftpm')
option('--skip-clean-swift-driver', toggle_false('clean_swift_driver'),
help='skip cleaning up Swift driver')
option('--skip-test-cmark', toggle_false('test_cmark'),
help='skip testing cmark')
option('--skip-test-swiftpm', toggle_false('test_swiftpm'),
help='skip testing swiftpm')
option('--skip-test-swift-driver', toggle_false('test_swift_driver'),
help='skip testing Swift driver')
option('--skip-test-swiftsyntax', toggle_false('test_swiftsyntax'),
help='skip testing SwiftSyntax')
option('--skip-test-indexstore-db', toggle_false('test_indexstoredb'),
help='skip testing indexstore-db')
option('--skip-test-sourcekit-lsp', toggle_false('test_sourcekitlsp'),
help='skip testing sourcekit-lsp')
option('--skip-test-playgroundsupport',
toggle_false('test_playgroundsupport'),
help='skip testing PlaygroundSupport')
option('--skip-test-skstresstester', toggle_false('test_skstresstester'),
help='skip testing the SourceKit Stress tester')
option('--skip-test-swiftformat', toggle_false('test_swiftformat'),
help='skip testing swift-format')
option('--skip-test-swiftevolve', toggle_false('test_swiftevolve'),
help='skip testing SwiftEvolve')
option('--skip-test-toolchain-benchmarks',
toggle_false('test_toolchainbenchmarks'),
help='skip testing toolchain benchmarks')
option('--skip-test-swift-inspect',
toggle_false('test_swift_inspect'),
help='skip testing swift_inspect')
option('--skip-test-swiftdocc', toggle_false('test_swiftdocc'),
help='skip testing swift-docc')
# -------------------------------------------------------------------------
in_group('Build settings specific for LLVM')
option('--llvm-targets-to-build', store,
default='X86;ARM;AArch64;PowerPC;SystemZ;Mips',
help='LLVM target generators to build')
option('--llvm-ninja-targets', append,
type=argparse.ShellSplitType(),
help='Space separated list of ninja targets to build for LLVM '
'instead of the default ones. Only supported when using '
'ninja to build. Can be called multiple times '
'to add multiple such options.')
option('--llvm-ninja-targets-for-cross-compile-hosts', append,
type=argparse.ShellSplitType(),
help='Space separated list of ninja targets to build for LLVM '
'in cross compile hosts instead of the ones specified in '
'llvm-ninja-targets (or the default ones). '
'Can be called multiple times '
'to add multiple such options.')
# -------------------------------------------------------------------------
in_group('Build settings for Android')
option('--android-ndk', store_path,
help='An absolute path to the NDK that will be used as a libc '
'implementation for Android builds')
option('--android-api-level', store,
default='21',
help='The Android API level to target when building for Android. '
'Currently only 21 or above is supported')
option('--android-ndk-gcc-version', store,
choices=['4.8', '4.9'],
default='4.9',
help='The GCC version to use when building for Android. Currently '
'only 4.9 is supported. %(default)s is also the default '
'value. This option may be used when experimenting with '
'versions of the Android NDK not officially supported by '
'Swift')
option('--android-icu-uc', store_path,
help='Path to libicuuc.so')
option('--android-icu-uc-include', store_path,
help='Path to a directory containing headers for libicuuc')
option('--android-icu-i18n', store_path,
help='Path to libicui18n.so')
option('--android-icu-i18n-include', store_path,
help='Path to a directory containing headers libicui18n')
option('--android-icu-data', store_path,
help='Path to libicudata.so')
option('--android-deploy-device-path', store_path,
default=android.adb.commands.DEVICE_TEMP_DIR,
help='Path on an Android device to which built Swift stdlib '
'products will be deployed. If running host tests, specify '
'the "{}" directory.'.format(
android.adb.commands.DEVICE_TEMP_DIR))
option('--android-arch', store,
choices=['armv7', 'aarch64', 'x86_64'],
default='armv7',
help='The target architecture when building for Android. '
'Currently, only armv7, aarch64, and x86_64 are supported. '
'%(default)s is the default.')
# -------------------------------------------------------------------------
in_group('Experimental language features')
option('--enable-experimental-differentiable-programming', toggle_true,
default=True,
help='Enable experimental Swift differentiable programming language'
' features.')
option('--enable-experimental-concurrency', toggle_true,
default=True,
help='Enable experimental Swift concurrency model.')
option('--enable-experimental-distributed', toggle_true,
default=True,
help='Enable experimental Swift distributed actors.')
# -------------------------------------------------------------------------
in_group('Unsupported options')
option('--build-jobs', unsupported)
option('--common-cmake-options', unsupported)
option('--only-execute', unsupported)
option('--skip-test-optimize-for-size', unsupported)
option('--skip-test-optimize-none-with-implicit-dynamic', unsupported)
option('--skip-test-optimized', unsupported)
# -------------------------------------------------------------------------
in_group('Build-script-impl arguments (for disambiguation)')
# We need to represent these options so that we can skip installing them if
# the user is running in install-all mode.
option('--skip-build-cmark', toggle_false('build_cmark'),
help='skip building cmark')
option('--skip-build-llvm', toggle_false('build_llvm'),
help='skip building llvm')
option('--skip-build-swift', toggle_false('build_swift'),
help='skip building swift')
# We need to list --skip-test-swift explicitly because otherwise argparse
# will auto-expand arguments like --skip-test-swift to the only known
# argument --skip-test-swiftevolve.
# These arguments are forwarded to impl_args in migration.py
option('--install-swift', toggle_true('impl_install_swift'))
option('--skip-test-swift', toggle_true('impl_skip_test_swift'))
# -------------------------------------------------------------------------
return builder.build()
# ----------------------------------------------------------------------------
USAGE = """
%(prog)s [-h | --help] [OPTION ...]
%(prog)s --preset=NAME [SUBSTITUTION ...]
"""
DESCRIPTION = """
Use this tool to build, test, and prepare binary distribution archives of Swift
and related tools.
Builds Swift (and, optionally, LLDB), incrementally, optionally
testing it thereafter. Different build configurations are maintained in
parallel.
"""
EPILOG = """
Using option presets:
--preset-file=PATH load presets from the specified file
--preset=NAME use the specified option preset
The preset mode is mutually exclusive with other options. It is not
possible to add ad-hoc customizations to a preset. This is a deliberate
design decision. (Rationale: a preset is a certain important set of
options that we want to keep in a centralized location. If you need to
customize it, you should create another preset in a centralized location,
rather than scattering the knowledge about the build across the system.)
Presets support substitutions for controlled customizations. Substitutions
are defined in the preset file. Values for substitutions are supplied
using the name=value syntax on the command line.
Any arguments not listed are forwarded directly to Swift's
'build-script-impl'. See that script's help for details. The listed
build-script-impl arguments are only for disambiguation in the argument parser.
Environment variables
---------------------
This script respects a few environment variables, should you
choose to set them:
SWIFT_SOURCE_ROOT: a directory containing the source for LLVM, Clang, Swift.
If this script is located in a Swift
source directory, the location of SWIFT_SOURCE_ROOT will be
inferred if the variable is not set.
'build-script' expects the sources to be laid out in the following way:
$SWIFT_SOURCE_ROOT/llvm-project
/swift
/llbuild (optional)
/swiftpm (optional, requires llbuild)
/swift-syntax (optional, requires swiftpm)
/swift-stress-tester (optional,
requires swift-syntax)
/swift-corelibs-xctest (optional)
/swift-corelibs-foundation (optional)
/swift-corelibs-libdispatch (optional)
/icu (optional)
SWIFT_BUILD_ROOT: a directory in which to create out-of-tree builds.
Defaults to "$SWIFT_SOURCE_ROOT/build/".
Preparing to run this script
----------------------------
See README.md for instructions on cloning Swift subprojects.
If you intend to use the -l, -L, --lldb, or --debug-lldb options.
That's it; you're ready to go!
Examples
--------
Given the above layout of sources, the simplest invocation of 'build-script' is
just:
[~/src/s]$ ./swift/utils/build-script
This builds LLVM, Clang, Swift and Swift standard library in debug mode.
All builds are incremental. To incrementally build changed files, repeat the
same 'build-script' command.
Typical uses of 'build-script'
------------------------------
To build everything with optimization without debug information:
[~/src/s]$ ./swift/utils/build-script -R
To run tests, add '-t':
[~/src/s]$ ./swift/utils/build-script -R -t
To run normal tests and validation tests, add '-T':
[~/src/s]$ ./swift/utils/build-script -R -T
To build LLVM+Clang with optimization without debug information, and a
debuggable Swift compiler:
[~/src/s]$ ./swift/utils/build-script -R --debug-swift
To build a debuggable Swift standard library:
[~/src/s]$ ./swift/utils/build-script -R --debug-swift-stdlib
iOS build targets are always configured and present, but are not built by
default. To build the standard library for OS X, iOS simulator and iOS device:
[~/src/s]$ ./swift/utils/build-script -R -i
To run OS X and iOS tests that don't require a device:
[~/src/s]$ ./swift/utils/build-script -R -i -t
To use 'make' instead of 'ninja', use '-m':
[~/src/s]$ ./swift/utils/build-script -m -R
To create Xcode projects that can build Swift, use '-x':
[~/src/s]$ ./swift/utils/build-script -x -R
Preset mode in build-script
---------------------------
All buildbots and automated environments use 'build-script' in *preset mode*.
In preset mode, the command line only specifies the preset name and allows
limited customization (extra output paths). The actual options come from
the selected preset in 'utils/build-presets.ini'. For example, to build like
the incremental buildbot, run:
[~/src/s]$ ./swift/utils/build-script --preset=buildbot_incremental
To build with AddressSanitizer:
[~/src/s]$ ./swift/utils/build-script --preset=asan
To build a root for Xcode XYZ, '/tmp/xcode-xyz-root.tar.gz':
[~/src/s]$ ./swift/utils/build-script --preset=buildbot_BNI_internal_XYZ \\
install_destdir="/tmp/install"
install_symroot="/tmp/symroot"
installable_package="/tmp/xcode-xyz-root.tar.gz"
If you have your own favorite set of options, you can create your own, local,
preset. For example, let's create a preset called 'ds' (which stands for
Debug Swift):
$ cat > ~/.swift-build-presets
[preset: ds]
release
debug-swift
debug-swift-stdlib
test
build-subdir=ds
To use it, specify the '--preset=' argument:
[~/src/s]$ ./swift/utils/build-script --preset=ds
./swift/utils/build-script: using preset 'ds', which expands to
./swift/utils/build-script --release --debug-swift --debug-swift-stdlib \
--test
--build-subdir=ds --
...
Existing presets can be found in `utils/build-presets.ini`
Philosophy
----------
While you can invoke CMake directly to build Swift, this tool will save you
time by taking away the mechanical parts of the process, providing you controls
for the important options.
For all automated build environments, this tool is regarded as *the* *only* way
to build Swift. This is not a technical limitation of the Swift build system.
It is a policy decision aimed at making the builds uniform across all
environments and easily reproducible by engineers who are not familiar with the
details of the setups of other systems or automated environments.
"""
|
|
import unittest
from jarn.mkrelease.mkrelease import ReleaseMaker
from jarn.mkrelease.testing import JailSetup
from jarn.mkrelease.testing import quiet
class serverinfo:
sign = None
identity = None
register = None
def __init__(self, sign=None, identity=None, register=None):
self.sign = sign
self.identity = identity
self.register = register
class GetOptionsTests(JailSetup):
def test_defaults(self):
self.mkfile('my.cfg', """\
[mkrelease]
""")
rm = ReleaseMaker(['-c', 'my.cfg', '-d', 'jarn.com:eggs'])
rm.get_options()
self.assertEqual(rm.skipcommit, False)
self.assertEqual(rm.skiptag, False)
self.assertEqual(rm.skipregister, False)
self.assertEqual(rm.skipupload, False)
self.assertEqual(rm.sign, False)
self.assertEqual(rm.push, True)
self.assertEqual(rm.develop, False)
self.assertEqual(rm.quiet, False)
self.assertEqual(rm.identity, '')
self.assertEqual(rm.formats, [])
self.assertNotEqual(rm.distributions, [])
self.assertNotEqual(rm.infoflags, [])
def test_dry_run(self):
self.mkfile('my.cfg', """\
[mkrelease]
""")
rm = ReleaseMaker(['-c', 'my.cfg', '-n'])
rm.get_options()
self.assertEqual(rm.skipcommit, True)
self.assertEqual(rm.skiptag, True)
self.assertEqual(rm.skipregister, True)
self.assertEqual(rm.skipupload, True)
self.assertEqual(rm.sign, False)
self.assertEqual(rm.push, True)
self.assertEqual(rm.develop, False)
self.assertEqual(rm.quiet, False)
self.assertEqual(rm.identity, '')
self.assertEqual(rm.formats, [])
self.assertNotEqual(rm.distributions, [])
self.assertNotEqual(rm.infoflags, [])
def test_dry_run_from_config(self):
self.mkfile('my.cfg', """\
[mkrelease]
commit = no
tag = no
register = no
upload = no
""")
rm = ReleaseMaker(['-c', 'my.cfg'])
rm.get_options()
self.assertEqual(rm.skipcommit, True)
self.assertEqual(rm.skiptag, True)
rm.defaults.servers.update({'pypi': serverinfo})
self.assertEqual(rm.get_skipregister('pypi'), True)
self.assertEqual(rm.get_skipupload(), True)
def test_register_server_precedence_yes(self):
# register=yes in server section overrides default
self.mkfile('my.cfg', """\
[mkrelease]
register = no
upload = yes
""")
rm = ReleaseMaker(['-c', 'my.cfg', '-d', 'jarn.com:eggs'])
rm.get_options()
rm.defaults.servers.update({'pypi': serverinfo(register=True)})
self.assertEqual(rm.get_skipregister('pypi'), False)
self.assertEqual(rm.get_skipupload(), False)
def test_register_server_precedence_yes_upload_no(self):
# register=yes in server section does NOT override default if
# upload is disabled
self.mkfile('my.cfg', """\
[mkrelease]
register = no
upload = no
""")
rm = ReleaseMaker(['-c', 'my.cfg']) # -d not required
rm.get_options()
rm.defaults.servers.update({'pypi': serverinfo(register=True)})
self.assertEqual(rm.get_skipregister('pypi'), True)
self.assertEqual(rm.get_skipupload(), True)
def test_register_server_precedence_yes_upload_flag_no(self):
# register=yes in server section does NOT override default if
# -S flag is given
self.mkfile('my.cfg', """\
[mkrelease]
register = no
upload = yes
""")
rm = ReleaseMaker(['-c', 'my.cfg', '-S']) # -d not required
rm.get_options()
rm.defaults.servers.update({'pypi': serverinfo(register=True)})
self.assertEqual(rm.get_skipregister('pypi'), True)
self.assertEqual(rm.get_skipupload(), True)
def test_register_server_precedence_yes_upload_no_register_flag_no(self):
# register=yes in server section does NOT override default if
# -R flag is given
self.mkfile('my.cfg', """\
[mkrelease]
register = yes
upload = no
""")
rm = ReleaseMaker(['-c', 'my.cfg', '-R']) # -d not required
rm.get_options()
rm.defaults.servers.update({'pypi': serverinfo(register=True)})
self.assertEqual(rm.get_skipregister('pypi'), True)
self.assertEqual(rm.get_skipupload(), True)
def test_register_server_precedence_no(self):
# register=no in server section overrides default
self.mkfile('my.cfg', """\
[mkrelease]
register = yes
upload = yes
""")
rm = ReleaseMaker(['-c', 'my.cfg', '-d', 'jarn.com:eggs'])
rm.get_options()
rm.defaults.servers.update({'pypi': serverinfo(register=False)})
self.assertEqual(rm.get_skipregister('pypi'), True)
self.assertEqual(rm.get_skipupload(), False)
def test_register_server_precedence_no_upload_no(self):
# register=no in server section overrides default independent
# of upload setting
self.mkfile('my.cfg', """\
[mkrelease]
register = yes
upload = no
""")
rm = ReleaseMaker(['-c', 'my.cfg', '-d', 'jarn.com:eggs'])
rm.get_options()
rm.defaults.servers.update({'pypi': serverinfo(register=False)})
self.assertEqual(rm.get_skipregister('pypi'), True)
self.assertEqual(rm.get_skipupload(), True)
def test_register_server_precedence_no_upload_flag_no(self):
# register=no in server section overrides default independent
# of upload setting
self.mkfile('my.cfg', """\
[mkrelease]
register = yes
upload = yes
""")
rm = ReleaseMaker(['-c', 'my.cfg', '-d', 'jarn.com:eggs', '-S'])
rm.get_options()
rm.defaults.servers.update({'pypi': serverinfo(register=False)})
self.assertEqual(rm.get_skipregister('pypi'), True)
self.assertEqual(rm.get_skipupload(), True)
def test_build_without_upload(self):
# -RS makes -d requirement go away
self.mkfile('my.cfg', """\
[mkrelease]
""")
rm = ReleaseMaker(['-c', 'my.cfg', '-R', '-S']) # -d not required
rm.get_options()
self.assertEqual(rm.skipregister, True)
self.assertEqual(rm.skipupload, True)
def test_formats(self):
self.mkfile('my.cfg', """\
[mkrelease]
""")
rm = ReleaseMaker(['-c', 'my.cfg', '-n', '-bgwz'])
rm.get_options()
self.assertEqual(rm.formats, ['egg', 'gztar', 'wheel', 'zip'])
self.assertEqual(rm.distributions, [
('bdist', ['--formats="egg"']),
('sdist', ['--formats="gztar"']),
('bdist_wheel', []),
('sdist', ['--formats="zip"']),
])
def test_duplicate_formats(self):
self.mkfile('my.cfg', """\
[mkrelease]
""")
rm = ReleaseMaker(['-c', 'my.cfg', '-n', '-zz'])
rm.get_options()
self.assertEqual(rm.formats, ['zip', 'zip'])
self.assertEqual(rm.distributions, [
('sdist', ['--formats="zip"']),
('sdist', ['--formats="zip"']),
])
def test_formats_from_config(self):
self.mkfile('my.cfg', """\
[mkrelease]
formats = zip wheel
""")
rm = ReleaseMaker(['-c', 'my.cfg', '-n'])
rm.get_options()
self.assertEqual(rm.formats, ['zip', 'wheel'])
self.assertEqual(rm.distributions, [
('sdist', ['--formats="zip"']),
('bdist_wheel', []),
])
def test_empty_formats_from_config(self):
self.mkfile('my.cfg', """\
[mkrelease]
formats =
""")
rm = ReleaseMaker(['-c', 'my.cfg', '-n'])
rm.get_options()
self.assertEqual(rm.formats, [])
# Fall back to zip
self.assertEqual(rm.distributions, [('sdist', ['--formats="gztar"']), ('bdist_wheel', [])])
@quiet
def test_bad_formats_from_config(self):
self.mkfile('my.cfg', """\
[mkrelease]
formats = rpm
""")
rm = ReleaseMaker(['-c', 'my.cfg', '-n'])
rm.get_options()
self.assertEqual(rm.formats, ['rpm'])
# Fall back to zip
self.assertEqual(rm.distributions, [('sdist', ['--formats="gztar"']), ('bdist_wheel', [])])
def test_develop(self):
self.mkfile('my.cfg', """\
[mkrelease]
""")
rm = ReleaseMaker(['-c', 'my.cfg', '-e', '-d', 'jarn.com:eggs'])
rm.get_options()
self.assertEqual(rm.develop, True)
self.assertEqual(rm.infoflags, [])
# Implied --no-tag
self.assertEqual(rm.skiptag, True)
def test_develop_from_config(self):
self.mkfile('my.cfg', """\
[mkrelease]
develop = yes
""")
rm = ReleaseMaker(['-c', 'my.cfg', '-d', 'jarn.com:eggs'])
rm.get_options()
self.assertEqual(rm.develop, True)
self.assertEqual(rm.infoflags, [])
# No implied --no-tag here
self.assertEqual(rm.skiptag, False)
def test_misc(self):
self.mkfile('my.cfg', """\
[mkrelease]
""")
rm = ReleaseMaker(['-c', 'my.cfg', '-n', '-pq'])
rm.get_options()
self.assertEqual(rm.push, True)
self.assertEqual(rm.quiet, True)
def test_misc_from_config(self):
self.mkfile('my.cfg', """\
[mkrelease]
push = yes
quiet = yes
""")
rm = ReleaseMaker(['-c', 'my.cfg', '-n'])
rm.get_options()
self.assertEqual(rm.push, True)
self.assertEqual(rm.quiet, True)
def test_sign(self):
self.mkfile('my.cfg', """\
[mkrelease]
""")
rm = ReleaseMaker(['-c', 'my.cfg', '-n', '-s'])
rm.get_options()
self.assertEqual(rm.sign, True)
self.assertEqual(rm.identity, '')
rm.defaults.servers.update({'pypi': serverinfo})
self.assertEqual(rm.get_uploadflags('pypi'), ['--sign'])
def test_sign_and_id(self):
self.mkfile('my.cfg', """\
[mkrelease]
""")
rm = ReleaseMaker(['-c', 'my.cfg', '-n', '-s', '-i', 'fred@bedrock.com'])
rm.get_options()
self.assertEqual(rm.sign, True)
self.assertEqual(rm.identity, 'fred@bedrock.com')
rm.defaults.servers.update({'pypi': serverinfo})
self.assertEqual(rm.get_uploadflags('pypi'), ['--sign', '--identity="fred@bedrock.com"'])
def test_id_only(self):
self.mkfile('my.cfg', """\
[mkrelease]
""")
rm = ReleaseMaker(['-c', 'my.cfg', '-n', '-i', 'fred@bedrock.com'])
rm.get_options()
self.assertEqual(rm.sign, False)
self.assertEqual(rm.identity, 'fred@bedrock.com')
# Implied --sign
rm.defaults.servers.update({'pypi': serverinfo})
self.assertEqual(rm.get_uploadflags('pypi'), ['--sign', '--identity="fred@bedrock.com"'])
def test_sign_from_config(self):
self.mkfile('my.cfg', """\
[mkrelease]
sign = yes
""")
rm = ReleaseMaker(['-c', 'my.cfg', '-n'])
rm.get_options()
rm.defaults.servers.update({'pypi': serverinfo})
self.assertEqual(rm.get_uploadflags('pypi'), ['--sign'])
def test_sign_and_id_from_config(self):
self.mkfile('my.cfg', """\
[mkrelease]
sign = yes
identity = fred@bedrock.com
""")
rm = ReleaseMaker(['-c', 'my.cfg', '-n'])
rm.get_options()
rm.defaults.servers.update({'pypi': serverinfo})
self.assertEqual(rm.get_uploadflags('pypi'), ['--sign', '--identity="fred@bedrock.com"'])
def test_id_only_from_config(self):
self.mkfile('my.cfg', """\
[mkrelease]
identity = fred@bedrock.com
""")
rm = ReleaseMaker(['-c', 'my.cfg', '-n'])
rm.get_options()
# No implied --sign here
rm.defaults.servers.update({'pypi': serverinfo})
self.assertEqual(rm.get_uploadflags('pypi'), [])
def test_sign_server_precedence_yes(self):
self.mkfile('my.cfg', """\
[mkrelease]
sign = no
""")
rm = ReleaseMaker(['-c', 'my.cfg', '-n'])
rm.get_options()
rm.defaults.servers.update({'pypi': serverinfo(sign=True)})
self.assertEqual(rm.get_uploadflags('pypi'), ['--sign'])
def test_sign_server_precedence_no(self):
self.mkfile('my.cfg', """\
[mkrelease]
sign = yes
""")
rm = ReleaseMaker(['-c', 'my.cfg', '-n'])
rm.get_options()
rm.defaults.servers.update({'pypi': serverinfo(sign=False)})
self.assertEqual(rm.get_uploadflags('pypi'), [])
def test_id_server_precedence(self):
self.mkfile('my.cfg', """\
[mkrelease]
sign = yes
identity = fred@bedrock.com
""")
rm = ReleaseMaker(['-c', 'my.cfg', '-n'])
rm.get_options()
rm.defaults.servers.update({'pypi': serverinfo(identity='barney@rubble.com')})
self.assertEqual(rm.get_uploadflags('pypi'), ['--sign', '--identity="barney@rubble.com"'])
def test_prefer_manifest(self):
self.mkfile('my.cfg', """\
[mkrelease]
""")
rm = ReleaseMaker(['-c', 'my.cfg', '-m', '-d', 'jarn.com:eggs'])
rm.get_options()
self.assertEqual(rm.manifest, True)
def test_prefer_manifest_from_config(self):
self.mkfile('my.cfg', """\
[mkrelease]
manifest-only = yes
""")
rm = ReleaseMaker(['-c', 'my.cfg', '-d', 'jarn.com:eggs'])
rm.get_options()
self.assertEqual(rm.manifest, True)
def test_dist_location(self):
self.mkfile('my.cfg', """\
[mkrelease]
""")
rm = ReleaseMaker(['-c', 'my.cfg', '-d', 'jarn.com:eggs'])
rm.get_options()
self.assertEqual(rm.locations.locations, ['jarn.com:eggs'])
def test_dist_location_from_config(self):
self.mkfile('my.cfg', """\
[mkrelease]
dist-location = jarn.com:eggs
""")
rm = ReleaseMaker(['-c', 'my.cfg'])
rm.get_options()
self.assertEqual(rm.locations.locations, ['jarn.com:eggs'])
|
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import (
Any,
AsyncIterator,
Awaitable,
Callable,
Sequence,
Tuple,
Optional,
Iterator,
)
from google.cloud.vision_v1p4beta1.types import product_search_service
class ListProductSetsPager:
"""A pager for iterating through ``list_product_sets`` requests.
This class thinly wraps an initial
:class:`google.cloud.vision_v1p4beta1.types.ListProductSetsResponse` object, and
provides an ``__iter__`` method to iterate through its
``product_sets`` field.
If there are more pages, the ``__iter__`` method will make additional
``ListProductSets`` requests and continue to iterate
through the ``product_sets`` field on the
corresponding responses.
All the usual :class:`google.cloud.vision_v1p4beta1.types.ListProductSetsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[..., product_search_service.ListProductSetsResponse],
request: product_search_service.ListProductSetsRequest,
response: product_search_service.ListProductSetsResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.vision_v1p4beta1.types.ListProductSetsRequest):
The initial request object.
response (google.cloud.vision_v1p4beta1.types.ListProductSetsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = product_search_service.ListProductSetsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(self) -> Iterator[product_search_service.ListProductSetsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterator[product_search_service.ProductSet]:
for page in self.pages:
yield from page.product_sets
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
class ListProductSetsAsyncPager:
"""A pager for iterating through ``list_product_sets`` requests.
This class thinly wraps an initial
:class:`google.cloud.vision_v1p4beta1.types.ListProductSetsResponse` object, and
provides an ``__aiter__`` method to iterate through its
``product_sets`` field.
If there are more pages, the ``__aiter__`` method will make additional
``ListProductSets`` requests and continue to iterate
through the ``product_sets`` field on the
corresponding responses.
All the usual :class:`google.cloud.vision_v1p4beta1.types.ListProductSetsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[
..., Awaitable[product_search_service.ListProductSetsResponse]
],
request: product_search_service.ListProductSetsRequest,
response: product_search_service.ListProductSetsResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiates the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.vision_v1p4beta1.types.ListProductSetsRequest):
The initial request object.
response (google.cloud.vision_v1p4beta1.types.ListProductSetsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = product_search_service.ListProductSetsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
async def pages(
self,
) -> AsyncIterator[product_search_service.ListProductSetsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = await self._method(self._request, metadata=self._metadata)
yield self._response
def __aiter__(self) -> AsyncIterator[product_search_service.ProductSet]:
async def async_generator():
async for page in self.pages:
for response in page.product_sets:
yield response
return async_generator()
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
class ListProductsPager:
"""A pager for iterating through ``list_products`` requests.
This class thinly wraps an initial
:class:`google.cloud.vision_v1p4beta1.types.ListProductsResponse` object, and
provides an ``__iter__`` method to iterate through its
``products`` field.
If there are more pages, the ``__iter__`` method will make additional
``ListProducts`` requests and continue to iterate
through the ``products`` field on the
corresponding responses.
All the usual :class:`google.cloud.vision_v1p4beta1.types.ListProductsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[..., product_search_service.ListProductsResponse],
request: product_search_service.ListProductsRequest,
response: product_search_service.ListProductsResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.vision_v1p4beta1.types.ListProductsRequest):
The initial request object.
response (google.cloud.vision_v1p4beta1.types.ListProductsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = product_search_service.ListProductsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(self) -> Iterator[product_search_service.ListProductsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterator[product_search_service.Product]:
for page in self.pages:
yield from page.products
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
class ListProductsAsyncPager:
"""A pager for iterating through ``list_products`` requests.
This class thinly wraps an initial
:class:`google.cloud.vision_v1p4beta1.types.ListProductsResponse` object, and
provides an ``__aiter__`` method to iterate through its
``products`` field.
If there are more pages, the ``__aiter__`` method will make additional
``ListProducts`` requests and continue to iterate
through the ``products`` field on the
corresponding responses.
All the usual :class:`google.cloud.vision_v1p4beta1.types.ListProductsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[..., Awaitable[product_search_service.ListProductsResponse]],
request: product_search_service.ListProductsRequest,
response: product_search_service.ListProductsResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiates the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.vision_v1p4beta1.types.ListProductsRequest):
The initial request object.
response (google.cloud.vision_v1p4beta1.types.ListProductsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = product_search_service.ListProductsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
async def pages(self) -> AsyncIterator[product_search_service.ListProductsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = await self._method(self._request, metadata=self._metadata)
yield self._response
def __aiter__(self) -> AsyncIterator[product_search_service.Product]:
async def async_generator():
async for page in self.pages:
for response in page.products:
yield response
return async_generator()
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
class ListReferenceImagesPager:
"""A pager for iterating through ``list_reference_images`` requests.
This class thinly wraps an initial
:class:`google.cloud.vision_v1p4beta1.types.ListReferenceImagesResponse` object, and
provides an ``__iter__`` method to iterate through its
``reference_images`` field.
If there are more pages, the ``__iter__`` method will make additional
``ListReferenceImages`` requests and continue to iterate
through the ``reference_images`` field on the
corresponding responses.
All the usual :class:`google.cloud.vision_v1p4beta1.types.ListReferenceImagesResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[..., product_search_service.ListReferenceImagesResponse],
request: product_search_service.ListReferenceImagesRequest,
response: product_search_service.ListReferenceImagesResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.vision_v1p4beta1.types.ListReferenceImagesRequest):
The initial request object.
response (google.cloud.vision_v1p4beta1.types.ListReferenceImagesResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = product_search_service.ListReferenceImagesRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(self) -> Iterator[product_search_service.ListReferenceImagesResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterator[product_search_service.ReferenceImage]:
for page in self.pages:
yield from page.reference_images
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
class ListReferenceImagesAsyncPager:
"""A pager for iterating through ``list_reference_images`` requests.
This class thinly wraps an initial
:class:`google.cloud.vision_v1p4beta1.types.ListReferenceImagesResponse` object, and
provides an ``__aiter__`` method to iterate through its
``reference_images`` field.
If there are more pages, the ``__aiter__`` method will make additional
``ListReferenceImages`` requests and continue to iterate
through the ``reference_images`` field on the
corresponding responses.
All the usual :class:`google.cloud.vision_v1p4beta1.types.ListReferenceImagesResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[
..., Awaitable[product_search_service.ListReferenceImagesResponse]
],
request: product_search_service.ListReferenceImagesRequest,
response: product_search_service.ListReferenceImagesResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiates the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.vision_v1p4beta1.types.ListReferenceImagesRequest):
The initial request object.
response (google.cloud.vision_v1p4beta1.types.ListReferenceImagesResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = product_search_service.ListReferenceImagesRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
async def pages(
self,
) -> AsyncIterator[product_search_service.ListReferenceImagesResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = await self._method(self._request, metadata=self._metadata)
yield self._response
def __aiter__(self) -> AsyncIterator[product_search_service.ReferenceImage]:
async def async_generator():
async for page in self.pages:
for response in page.reference_images:
yield response
return async_generator()
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
class ListProductsInProductSetPager:
"""A pager for iterating through ``list_products_in_product_set`` requests.
This class thinly wraps an initial
:class:`google.cloud.vision_v1p4beta1.types.ListProductsInProductSetResponse` object, and
provides an ``__iter__`` method to iterate through its
``products`` field.
If there are more pages, the ``__iter__`` method will make additional
``ListProductsInProductSet`` requests and continue to iterate
through the ``products`` field on the
corresponding responses.
All the usual :class:`google.cloud.vision_v1p4beta1.types.ListProductsInProductSetResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[..., product_search_service.ListProductsInProductSetResponse],
request: product_search_service.ListProductsInProductSetRequest,
response: product_search_service.ListProductsInProductSetResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.vision_v1p4beta1.types.ListProductsInProductSetRequest):
The initial request object.
response (google.cloud.vision_v1p4beta1.types.ListProductsInProductSetResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = product_search_service.ListProductsInProductSetRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(
self,
) -> Iterator[product_search_service.ListProductsInProductSetResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterator[product_search_service.Product]:
for page in self.pages:
yield from page.products
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
class ListProductsInProductSetAsyncPager:
"""A pager for iterating through ``list_products_in_product_set`` requests.
This class thinly wraps an initial
:class:`google.cloud.vision_v1p4beta1.types.ListProductsInProductSetResponse` object, and
provides an ``__aiter__`` method to iterate through its
``products`` field.
If there are more pages, the ``__aiter__`` method will make additional
``ListProductsInProductSet`` requests and continue to iterate
through the ``products`` field on the
corresponding responses.
All the usual :class:`google.cloud.vision_v1p4beta1.types.ListProductsInProductSetResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[
..., Awaitable[product_search_service.ListProductsInProductSetResponse]
],
request: product_search_service.ListProductsInProductSetRequest,
response: product_search_service.ListProductsInProductSetResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiates the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.vision_v1p4beta1.types.ListProductsInProductSetRequest):
The initial request object.
response (google.cloud.vision_v1p4beta1.types.ListProductsInProductSetResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = product_search_service.ListProductsInProductSetRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
async def pages(
self,
) -> AsyncIterator[product_search_service.ListProductsInProductSetResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = await self._method(self._request, metadata=self._metadata)
yield self._response
def __aiter__(self) -> AsyncIterator[product_search_service.Product]:
async def async_generator():
async for page in self.pages:
for response in page.products:
yield response
return async_generator()
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2019, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: kubevirt_rs
short_description: Manage KubeVirt virtual machine replica sets
description:
- Use Openshift Python SDK to manage the state of KubeVirt virtual machine replica sets.
version_added: "2.8"
author: KubeVirt Team (@kubevirt)
options:
state:
description:
- Create or delete virtual machine replica sets.
default: "present"
choices:
- present
- absent
type: str
name:
description:
- Name of the virtual machine replica set.
required: true
type: str
namespace:
description:
- Namespace where the virtual machine replica set exists.
required: true
type: str
selector:
description:
- "Selector is a label query over a set of virtual machine."
required: true
type: dict
replicas:
description:
- Number of desired pods. This is a pointer to distinguish between explicit zero and not specified.
- Replicas defaults to 1 if newly created replica set.
type: int
extends_documentation_fragment:
- k8s_auth_options
- kubevirt_vm_options
- kubevirt_common_options
requirements:
- python >= 2.7
- openshift >= 0.8.2
'''
EXAMPLES = '''
- name: Create virtual machine replica set 'myvmir'
kubevirt_rs:
state: present
name: myvmir
namespace: vms
wait: true
replicas: 3
memory: 64M
labels:
myvmi: myvmi
selector:
matchLabels:
myvmi: myvmi
disks:
- name: containerdisk
volume:
containerDisk:
image: kubevirt/cirros-container-disk-demo:latest
path: /custom-disk/cirros.img
disk:
bus: virtio
- name: Remove virtual machine replica set 'myvmir'
kubevirt_rs:
state: absent
name: myvmir
namespace: vms
wait: true
'''
RETURN = '''
kubevirt_rs:
description:
- The virtual machine virtual machine replica set managed by the user.
- "This dictionary contains all values returned by the KubeVirt API all options
are described here U(https://kubevirt.io/api-reference/master/definitions.html#_v1_virtualmachineinstance)"
returned: success
type: complex
contains: {}
'''
import copy
import traceback
from ansible.module_utils.k8s.common import AUTH_ARG_SPEC
from ansible.module_utils.kubevirt import (
virtdict,
KubeVirtRawModule,
VM_COMMON_ARG_SPEC,
)
KIND = 'VirtualMachineInstanceReplicaSet'
VMIR_ARG_SPEC = {
'replicas': {'type': 'int'},
'selector': {'type': 'dict'},
}
class KubeVirtVMIRS(KubeVirtRawModule):
@property
def argspec(self):
""" argspec property builder """
argument_spec = copy.deepcopy(AUTH_ARG_SPEC)
argument_spec.update(copy.deepcopy(VM_COMMON_ARG_SPEC))
argument_spec.update(copy.deepcopy(VMIR_ARG_SPEC))
return argument_spec
def wait_for_replicas(self, replicas):
""" Wait for ready_replicas to equal the requested number of replicas. """
resource = self.find_supported_resource(KIND)
return_obj = None
for event in resource.watch(namespace=self.namespace, timeout=self.params.get('wait_timeout')):
entity = event['object']
if entity.metadata.name != self.name:
continue
status = entity.get('status', {})
readyReplicas = status.get('readyReplicas', 0)
if readyReplicas == replicas:
return_obj = entity
break
if not return_obj:
self.fail_json(msg="Error fetching the patched object. Try a higher wait_timeout value.")
if replicas and return_obj.status.readyReplicas is None:
self.fail_json(msg="Failed to fetch the number of ready replicas. Try a higher wait_timeout value.")
if replicas and return_obj.status.readyReplicas != replicas:
self.fail_json(msg="Number of ready replicas is {0}. Failed to reach {1} ready replicas within "
"the wait_timeout period.".format(return_obj.status.ready_replicas, replicas))
return return_obj.to_dict()
def execute_module(self):
# Parse parameters specific for this module:
definition = virtdict()
selector = self.params.get('selector')
replicas = self.params.get('replicas')
if selector:
definition['spec']['selector'] = selector
if replicas is not None:
definition['spec']['replicas'] = replicas
# defaults for template
defaults = {'disks': [], 'volumes': [], 'interfaces': [], 'networks': []}
# Execute the CURD of VM:
template = definition['spec']['template']
dummy, definition = self.construct_vm_definition(KIND, definition, template, defaults)
result_crud = self.execute_crud(KIND, definition)
changed = result_crud['changed']
result = result_crud.pop('result')
# When creating a new VMIRS object without specifying `replicas`, assume it's '1' to make the
# wait logic work correctly
if changed and result_crud['method'] == 'create' and replicas is None:
replicas = 1
# Wait for the new number of ready replicas after a CRUD update
# Note1: doesn't work correctly when reducing number of replicas due to how VMIRS works (as of kubevirt 1.5.0)
# Note2: not the place to wait for the VMIs to get deleted when deleting the VMIRS object; that *might* be
# achievable in execute_crud(); keywords: orphanDependents, propagationPolicy, DeleteOptions
if self.params.get('wait') and replicas is not None and self.params.get('state') == 'present':
result = self.wait_for_replicas(replicas)
# Return from the module:
self.exit_json(**{
'changed': changed,
'kubevirt_rs': result,
'result': result_crud,
})
def main():
module = KubeVirtVMIRS()
try:
module.execute_module()
except Exception as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
if __name__ == '__main__':
main()
|
|
from __future__ import print_function
import copy
#import logging
import sys
import math
import numpy as np
from sas.sascalc.dataloader.data_info import Data1D
from sas.sascalc.dataloader.data_info import Data2D
_SMALLVALUE = 1.0e-10
class FitHandler(object):
"""
Abstract interface for fit thread handler.
The methods in this class are called by the optimizer as the fit
progresses.
Note that it is up to the optimizer to call the fit handler correctly,
reporting all status changes and maintaining the 'done' flag.
"""
done = False
"""True when the fit job is complete"""
result = None
"""The current best result of the fit"""
def improvement(self):
"""
Called when a result is observed which is better than previous
results from the fit.
result is a FitResult object, with parameters, #calls and fitness.
"""
def error(self, msg):
"""
Model had an error; print traceback
"""
def progress(self, current, expected):
"""
Called each cycle of the fit, reporting the current and the
expected amount of work. The meaning of these values is
optimizer dependent, but they can be converted into a percent
complete using (100*current)//expected.
Progress is updated each iteration of the fit, whatever that
means for the particular optimization algorithm. It is called
after any calls to improvement for the iteration so that the
update handler can control I/O bandwidth by suppressing
intermediate improvements until the fit is complete.
"""
def finalize(self):
"""
Fit is complete; best results are reported
"""
def abort(self):
"""
Fit was aborted.
"""
# TODO: not sure how these are used, but they are needed for running the fit
def update_fit(self, last=False): pass
def set_result(self, result=None): self.result = result
class Model:
"""
Fit wrapper for SAS models.
"""
def __init__(self, sas_model, sas_data=None, **kw):
"""
:param sas_model: the sas model to wrap for fitting
"""
self.model = sas_model
self.name = sas_model.name
self.data = sas_data
def get_params(self, fitparams):
"""
return a list of value of paramter to fit
:param fitparams: list of paramaters name to fit
"""
return [self.model.getParam(k) for k in fitparams]
def set_params(self, paramlist, params):
"""
Set value for parameters to fit
:param params: list of value for parameters to fit
"""
for k,v in zip(paramlist, params):
self.model.setParam(k,v)
def set(self, **kw):
self.set_params(*zip(*kw.items()))
def eval(self, x):
"""
Override eval method of model.
:param x: the x value used to compute a function
"""
try:
return self.model.evalDistribution(x)
except:
raise
def eval_derivs(self, x, pars=[]):
"""
Evaluate the model and derivatives wrt pars at x.
pars is a list of the names of the parameters for which derivatives
are desired.
This method needs to be specialized in the model to evaluate the
model function. Alternatively, the model can implement is own
version of residuals which calculates the residuals directly
instead of calling eval.
"""
raise NotImplementedError('no derivatives available')
def __call__(self, x):
return self.eval(x)
class FitData1D(Data1D):
"""
Wrapper class for SAS data
FitData1D inherits from DataLoader.data_info.Data1D. Implements
a way to get residuals from data.
"""
def __init__(self, x, y, dx=None, dy=None, smearer=None, data=None, lam=None, dlam=None):
"""
:param smearer: is an object of class QSmearer or SlitSmearer
that will smear the theory data (slit smearing or resolution
smearing) when set.
The proper way to set the smearing object would be to
do the following: ::
from sas.sascalc.data_util.qsmearing import smear_selection
smearer = smear_selection(some_data)
fitdata1d = FitData1D( x= [1,3,..,],
y= [3,4,..,8],
dx=None,
dy=[1,2...], smearer= smearer)
:Note: that some_data _HAS_ to be of
class DataLoader.data_info.Data1D
Setting it back to None will turn smearing off.
"""
Data1D.__init__(self, x=x, y=y, dx=dx, dy=dy, lam=lam, dlam=dlam)
self.num_points = len(x)
self.sas_data = data
self.smearer = smearer
self._first_unsmeared_bin = None
self._last_unsmeared_bin = None
# Check error bar; if no error bar found, set it constant(=1)
# TODO: Should provide an option for users to set it like percent,
# constant, or dy data
if dy is None or dy == [] or dy.all() == 0:
self.dy = np.ones(len(y))
else:
self.dy = np.asarray(dy).copy()
## Min Q-value
#Skip the Q=0 point, especially when y(q=0)=None at x[0].
if min(self.x) == 0.0 and self.x[0] == 0 and\
not np.isfinite(self.y[0]):
self.qmin = min(self.x[self.x != 0])
else:
self.qmin = min(self.x)
## Max Q-value
self.qmax = max(self.x)
# Range used for input to smearing
self._qmin_unsmeared = self.qmin
self._qmax_unsmeared = self.qmax
# Identify the bin range for the unsmeared and smeared spaces
self.idx = (self.x >= self.qmin) & (self.x <= self.qmax)
self.idx_unsmeared = (self.x >= self._qmin_unsmeared) \
& (self.x <= self._qmax_unsmeared)
def set_fit_range(self, qmin=None, qmax=None):
""" to set the fit range"""
# Skip Q=0 point, (especially for y(q=0)=None at x[0]).
# ToDo: Find better way to do it.
if qmin == 0.0 and not np.isfinite(self.y[qmin]):
self.qmin = min(self.x[self.x != 0])
elif qmin is not None:
self.qmin = qmin
if qmax is not None:
self.qmax = qmax
# Determine the range needed in unsmeared-Q to cover
# the smeared Q range
self._qmin_unsmeared = self.qmin
self._qmax_unsmeared = self.qmax
self._first_unsmeared_bin = 0
self._last_unsmeared_bin = len(self.x) - 1
if self.smearer is not None:
self._first_unsmeared_bin, self._last_unsmeared_bin = \
self.smearer.get_bin_range(self.qmin, self.qmax)
self._qmin_unsmeared = self.x[self._first_unsmeared_bin]
self._qmax_unsmeared = self.x[self._last_unsmeared_bin]
# Identify the bin range for the unsmeared and smeared spaces
self.idx = (self.x >= self.qmin) & (self.x <= self.qmax)
## zero error can not participate for fitting
self.idx = self.idx & (self.dy != 0)
self.idx_unsmeared = (self.x >= self._qmin_unsmeared) \
& (self.x <= self._qmax_unsmeared)
def get_fit_range(self):
"""
Return the range of data.x to fit
"""
return self.qmin, self.qmax
def size(self):
"""
Number of measurement points in data set after masking, etc.
"""
return len(self.x)
def residuals(self, fn):
"""
Compute residuals.
If self.smearer has been set, use if to smear
the data before computing chi squared.
:param fn: function that return model value
:return: residuals
"""
# Compute theory data f(x)
fx = np.zeros(len(self.x))
fx[self.idx_unsmeared] = fn(self.x[self.idx_unsmeared])
## Smear theory data
if self.smearer is not None:
fx = self.smearer(fx, self._first_unsmeared_bin,
self._last_unsmeared_bin)
## Sanity check
if np.size(self.dy) != np.size(fx):
msg = "FitData1D: invalid error array "
msg += "%d <> %d" % (np.shape(self.dy), np.size(fx))
raise RuntimeError, msg
return (self.y[self.idx] - fx[self.idx]) / self.dy[self.idx], fx[self.idx]
def residuals_deriv(self, model, pars=[]):
"""
:return: residuals derivatives .
:note: in this case just return empty array
"""
return []
class FitData2D(Data2D):
"""
Wrapper class for SAS data
"""
def __init__(self, sas_data2d, data=None, err_data=None):
Data2D.__init__(self, data=data, err_data=err_data)
# Data can be initialized with a sas plottable or with vectors.
self.res_err_image = []
self.num_points = 0 # will be set by set_data
self.idx = []
self.qmin = None
self.qmax = None
self.smearer = None
self.radius = 0
self.res_err_data = []
self.sas_data = sas_data2d
self.set_data(sas_data2d)
def set_data(self, sas_data2d, qmin=None, qmax=None):
"""
Determine the correct qx_data and qy_data within range to fit
"""
self.data = sas_data2d.data
self.err_data = sas_data2d.err_data
self.qx_data = sas_data2d.qx_data
self.qy_data = sas_data2d.qy_data
self.mask = sas_data2d.mask
x_max = max(math.fabs(sas_data2d.xmin), math.fabs(sas_data2d.xmax))
y_max = max(math.fabs(sas_data2d.ymin), math.fabs(sas_data2d.ymax))
## fitting range
if qmin is None:
self.qmin = 1e-16
if qmax is None:
self.qmax = math.sqrt(x_max * x_max + y_max * y_max)
## new error image for fitting purpose
if self.err_data is None or self.err_data == []:
self.res_err_data = np.ones(len(self.data))
else:
self.res_err_data = copy.deepcopy(self.err_data)
#self.res_err_data[self.res_err_data==0]=1
self.radius = np.sqrt(self.qx_data**2 + self.qy_data**2)
# Note: mask = True: for MASK while mask = False for NOT to mask
self.idx = ((self.qmin <= self.radius) &\
(self.radius <= self.qmax))
self.idx = (self.idx) & (self.mask)
self.idx = (self.idx) & (np.isfinite(self.data))
self.num_points = np.sum(self.idx)
def set_smearer(self, smearer):
"""
Set smearer
"""
if smearer is None:
return
self.smearer = smearer
self.smearer.set_index(self.idx)
self.smearer.get_data()
def set_fit_range(self, qmin=None, qmax=None):
"""
To set the fit range
"""
if qmin == 0.0:
self.qmin = 1e-16
elif qmin is not None:
self.qmin = qmin
if qmax is not None:
self.qmax = qmax
self.radius = np.sqrt(self.qx_data**2 + self.qy_data**2)
self.idx = ((self.qmin <= self.radius) &\
(self.radius <= self.qmax))
self.idx = (self.idx) & (self.mask)
self.idx = (self.idx) & (np.isfinite(self.data))
self.idx = (self.idx) & (self.res_err_data != 0)
def get_fit_range(self):
"""
return the range of data.x to fit
"""
return self.qmin, self.qmax
def size(self):
"""
Number of measurement points in data set after masking, etc.
"""
return np.sum(self.idx)
def residuals(self, fn):
"""
return the residuals
"""
if self.smearer is not None:
fn.set_index(self.idx)
gn = fn.get_value()
else:
gn = fn([self.qx_data[self.idx],
self.qy_data[self.idx]])
# use only the data point within ROI range
res = (self.data[self.idx] - gn) / self.res_err_data[self.idx]
return res, gn
def residuals_deriv(self, model, pars=[]):
"""
:return: residuals derivatives .
:note: in this case just return empty array
"""
return []
class FitAbort(Exception):
"""
Exception raise to stop the fit
"""
#pass
#print"Creating fit abort Exception"
class FitEngine:
def __init__(self):
"""
Base class for the fit engine
"""
#Dictionnary of fitArrange element (fit problems)
self.fit_arrange_dict = {}
self.fitter_id = None
def set_model(self, model, id, pars=[], constraints=[], data=None):
"""
set a model on a given in the fit engine.
:param model: sas.models type
:param id: is the key of the fitArrange dictionary where model is saved as a value
:param pars: the list of parameters to fit
:param constraints: list of
tuple (name of parameter, value of parameters)
the value of parameter must be a string to constraint 2 different
parameters.
Example:
we want to fit 2 model M1 and M2 both have parameters A and B.
constraints can be ``constraints = [(M1.A, M2.B+2), (M1.B= M2.A *5),...,]``
:note: pars must contains only name of existing model's parameters
"""
if not pars:
raise ValueError("no fitting parameters")
if model is None:
raise ValueError("no model to fit")
if not issubclass(model.__class__, Model):
model = Model(model, data)
sasmodel = model.model
available_parameters = sasmodel.getParamList()
for p in pars:
if p not in available_parameters:
raise ValueError("parameter %s not available in model %s; use one of [%s] instead"
%(p, sasmodel.name, ", ".join(available_parameters)))
if id not in self.fit_arrange_dict:
self.fit_arrange_dict[id] = FitArrange()
self.fit_arrange_dict[id].set_model(model)
self.fit_arrange_dict[id].pars = pars
self.fit_arrange_dict[id].vals = [sasmodel.getParam(name) for name in pars]
self.fit_arrange_dict[id].constraints = constraints
def set_data(self, data, id, smearer=None, qmin=None, qmax=None):
"""
Receives plottable, creates a list of data to fit,set data
in a FitArrange object and adds that object in a dictionary
with key id.
:param data: data added
:param id: unique key corresponding to a fitArrange object with data
"""
if data.__class__.__name__ == 'Data2D':
fitdata = FitData2D(sas_data2d=data, data=data.data,
err_data=data.err_data)
else:
fitdata = FitData1D(x=data.x, y=data.y,
dx=data.dx, dy=data.dy, smearer=smearer)
fitdata.sas_data = data
fitdata.set_fit_range(qmin=qmin, qmax=qmax)
#A fitArrange is already created but contains model only at id
if id in self.fit_arrange_dict:
self.fit_arrange_dict[id].add_data(fitdata)
else:
#no fitArrange object has been create with this id
fitproblem = FitArrange()
fitproblem.add_data(fitdata)
self.fit_arrange_dict[id] = fitproblem
def get_model(self, id):
"""
:param id: id is key in the dictionary containing the model to return
:return: a model at this id or None if no FitArrange element was
created with this id
"""
if id in self.fit_arrange_dict:
return self.fit_arrange_dict[id].get_model()
else:
return None
def remove_fit_problem(self, id):
"""remove fitarrange in id"""
if id in self.fit_arrange_dict:
del self.fit_arrange_dict[id]
def select_problem_for_fit(self, id, value):
"""
select a couple of model and data at the id position in dictionary
and set in self.selected value to value
:param value: the value to allow fitting.
can only have the value one or zero
"""
if id in self.fit_arrange_dict:
self.fit_arrange_dict[id].set_to_fit(value)
def get_problem_to_fit(self, id):
"""
return the self.selected value of the fit problem of id
:param id: the id of the problem
"""
if id in self.fit_arrange_dict:
self.fit_arrange_dict[id].get_to_fit()
class FitArrange:
def __init__(self):
"""
Class FitArrange contains a set of data for a given model
to perform the Fit.FitArrange must contain exactly one model
and at least one data for the fit to be performed.
model: the model selected by the user
Ldata: a list of data what the user wants to fit
"""
self.model = None
self.data_list = []
self.pars = []
self.vals = []
self.selected = 0
def set_model(self, model):
"""
set_model save a copy of the model
:param model: the model being set
"""
self.model = model
def add_data(self, data):
"""
add_data fill a self.data_list with data to fit
:param data: Data to add in the list
"""
if not data in self.data_list:
self.data_list.append(data)
def get_model(self):
"""
:return: saved model
"""
return self.model
def get_data(self):
"""
:return: list of data data_list
"""
return self.data_list[0]
def remove_data(self, data):
"""
Remove one element from the list
:param data: Data to remove from data_list
"""
if data in self.data_list:
self.data_list.remove(data)
def set_to_fit(self, value=0):
"""
set self.selected to 0 or 1 for other values raise an exception
:param value: integer between 0 or 1
"""
self.selected = value
def get_to_fit(self):
"""
return self.selected value
"""
return self.selected
class FResult(object):
"""
Storing fit result
"""
def __init__(self, model=None, param_list=None, data=None):
self.calls = None
self.fitness = None
self.chisqr = None
self.pvec = []
self.cov = []
self.info = None
self.mesg = None
self.success = None
self.stderr = None
self.residuals = []
self.index = []
self.model = model
self.data = data
self.theory = []
self.param_list = param_list
self.iterations = 0
self.inputs = []
self.fitter_id = None
if self.model is not None and self.data is not None:
self.inputs = [(self.model, self.data)]
def set_model(self, model):
"""
"""
self.model = model
def set_fitness(self, fitness):
"""
"""
self.fitness = fitness
def __str__(self):
"""
"""
if self.pvec is None and self.model is None and self.param_list is None:
return "No results"
sasmodel = self.model.model
pars = enumerate(sasmodel.getParamList())
msg1 = "[Iteration #: %s ]" % self.iterations
msg3 = "=== goodness of fit: %s ===" % (str(self.fitness))
msg2 = ["P%-3d %s......|.....%s" % (i, v, sasmodel.getParam(v))
for i,v in pars if v in self.param_list]
msg = [msg1, msg3] + msg2
return "\n".join(msg)
def print_summary(self):
"""
"""
print(str(self))
|
|
from confess import app
from confess.utils import *
from confess.constants import *
from confess.models.user import *
from confess.models.post import *
from confess.models.vote import *
from confess.models.comment import *
from confess.controllers.comment import get_comment_dict
import os
import json
from flask import (
send_from_directory,
request,
redirect,
render_template
)
@app.route('/favicon.ico')
def favicon():
return send_from_directory(
os.path.join(app.root_path, 'static'),
'favicon.png',
mimetype='image/png'
)
# Display the home page
@app.route('/')
@get_user()
def index():
mit = is_mit(request)
# Find post id if any
p_id = -1
for k, v in request.args.items():
if v == '':
try:
p_id = int(k)
except Exception as e:
pass
if 'p' in request.args:
try:
p = int(request.args['p'])
except Exception as e:
return "Error"
else:
p = 0
sel = 'hot'
if 's' in request.args:
s = request.args['s']
if s == 'top':
order = Post.top.desc()
sel = s
elif s == 'new':
order = Post.timestamp.desc()
sel = s
else:
order = Post.hn_score.desc()
else:
order = Post.hn_score.desc()
if p_id < 0:
posts = db.session.query(Post).order_by(order).limit(PAGE_SIZE).offset(PAGE_SIZE*p)
else:
posts = Post.query.filter_by(id=p_id)
# This iteration is slow, but since it's only PAGE_SIZE for now, it's kinda fine
votes = []
comment_counts = []
if user:
for pp in posts:
v = Vote.query.filter((Vote.post_id == pp.id) & (Vote.user == user.id)).first()
comment_counts.append(Comment.query.filter_by(post_id = pp.id).count())
if v:
votes.append(v.value)
else:
votes.append(0)
else:
for pp in posts:
comment_counts.append(Comment.query.filter_by(post_id = pp.id).count())
votes.append(0)
last_page = False
if len(votes) < PAGE_SIZE:
last_page = True
comments = None
c_p = None
if p_id >= 0:
comments = get_comment_dict(p_id, user)
c_p = posts[0].message
error = None
if 'error' in request.args:
error = request.args['error']
error = None if error == '' else error
return render_template('home.html',
user=user,
posts=zip(posts, votes, comment_counts),
sel=sel, page=p,
last_page=last_page, p_id=p_id,
comments=comments, error=error, c_p=c_p,
mit=mit)
def gen_vote_status(final, c):
return json.dumps({'vote': final, 'count': c})
@app.route('/vote/<int:id>/<vote>')
@requires_auth()
def cast_vote(id, vote):
my_votes = list(Vote.query.filter((Vote.post_id == id) & (Vote.user == user.id)))
assert len(my_votes) <= 1
post = Post.query.filter_by(id=id).first()
if not post:
return "Post doesn't exist."
if vote == "upvote":
vote_parsed = 1
elif vote == "downvote":
vote_parsed = -1
else:
return "Incorrect vote type."
if len(my_votes) == 0:
# Never voted
v = Vote(post_id=id, user=user.id, value=vote_parsed)
db.session.add(v)
if vote_parsed > 0:
post.upvotes += 1
else:
post.downvotes += 1
db.session.commit()
return gen_vote_status(vote_parsed, post.upvotes - post.downvotes)
my_vote = my_votes[0]
if my_vote.value == vote_parsed:
# Repeating the same vote means removing the vote.
Vote.query.filter_by(id=my_vote.id).delete()
if vote_parsed > 0:
# Original vote was upvote
post.upvotes -= 1
else:
# Original vote was downvote
post.downvotes -= 1
db.session.commit()
return gen_vote_status(0, post.upvotes - post.downvotes)
# Else we just change the value.
my_vote.value = vote_parsed
if vote_parsed > 0:
# Downvote -> Upvote
post.downvotes -= 1
post.upvotes += 1
else:
post.downvotes += 1
post.upvotes -= 1
db.session.commit()
return gen_vote_status(vote_parsed, post.upvotes - post.downvotes)
@app.route('/comment/vote/<int:id>/<vote>')
@requires_auth()
def cast_comment_vote(id, vote):
my_votes = list(CommentVote.query.filter((CommentVote.comment_id == id) & (CommentVote.user == user.id)))
assert len(my_votes) <= 1
post = Comment.query.filter_by(id=id).first()
if not post:
return "Post doesn't exist."
if vote == "upvote":
vote_parsed = 1
elif vote == "downvote":
vote_parsed = -1
else:
return "Incorrect vote type."
if len(my_votes) == 0:
# Never voted
v = CommentVote(comment_id=id, user=user.id, value=vote_parsed)
db.session.add(v)
if vote_parsed > 0:
post.upvotes += 1
else:
post.downvotes += 1
db.session.commit()
return gen_vote_status(vote_parsed, post.upvotes - post.downvotes)
my_vote = my_votes[0]
if my_vote.value == vote_parsed:
# Repeating the same vote means removing the vote.
CommentVote.query.filter_by(id=my_vote.id).delete()
if vote_parsed > 0:
# Original vote was upvote
post.upvotes -= 1
else:
# Original vote was downvote
post.downvotes -= 1
db.session.commit()
return gen_vote_status(0, post.upvotes - post.downvotes)
# Else we just change the value.
my_vote.value = vote_parsed
if vote_parsed > 0:
# Downvote -> Upvote
post.downvotes -= 1
post.upvotes += 1
else:
post.downvotes += 1
post.upvotes -= 1
db.session.commit()
return gen_vote_status(vote_parsed, post.upvotes - post.downvotes)
@app.route('/app/name')
def meta_name():
return app.CONFIG['APP_NAME']
|
|
import json
import sys
from glob import glob
from os import environ, remove, listdir
from os.path import exists
from time import sleep
from socket import gethostbyname
from shutil import copytree, rmtree, ignore_patterns
from multiprocessing import Value
from fabric.api import task, serial, parallel, runs_once, \
run, local, cd, env, get, put, settings, hide
from fabric.utils import abort
from fabric.colors import red
from fabric.contrib.files import upload_template, append, sed
from fabric.decorators import with_settings
from fablib import execute_task_name, rexists
from fablib.decorators import retry
from fablib.logging import log_success, log_error, log_info, log_warn
import platforms
env.PROFILE = environ.get("SWIFT_CLUSTER_PROFILE")
env.NAME = environ.get("SWIFT_CLUSTER_NAME")
env.TEST_CONFIG = environ.get("SWIFT_TEST_CONFIG_FILE")
env.host_prefix = env.NAME
env.abort_on_prompts = True
env.running_hosts = 0
env.disable_known_hosts = True
env.use_ssh_config = True
env.user = 'root'
env.shell = '/bin/bash -c'
env.roledefs = {
'proxy': [],
'storage': [],
}
running_hosts = Value('H', 0)
packaged_code = Value('H', 0)
total_hosts = Value('H', 0)
tests_running = Value('H', 0)
prep_auth_done = Value('H', 0)
PlatformManager = None
platform = None
def platform_init():
global PlatformManager
global platform
namespace, class_name = config('platform')
try:
log_info('Loading %s in %s' % (class_name, namespace))
PlatformManager = platforms.load_manager(namespace, class_name)
except Exception as e:
log_error('Cannot load %s from %s: %s' % (class_name, namespace, e))
sys.exit(1)
helpers = {'log_success': log_success,
'log_info': log_info,
'log_warn': log_warn,
'log_error': log_error,
'execute_handler': execute_task_name,
'run_handler': run,
'host_broker_handler': host_broker_handler,
'rexists': rexists}
platform = PlatformManager(config('platform_options'), **helpers)
def host_broker_handler(vmhost):
reimage_vm.hosts = [vmhost]
create_datadisks.hosts = [vmhost]
def load_profile(profile_name):
profile = json.load(open("profiles/%s" % profile_name))
if 'platform' not in profile:
log_error('Profile %s does not include a platform key, skipping' %
profile_name)
return None
return profile
def hosts_by_prefix(prefix):
return ['%s%d.stack.local' % (prefix, d,) for d in xrange(1, 5)]
def index_by_host(host):
return env.hosts.index(host)
def roledefs_from_hosts():
env.roledefs['proxy'].append(env.hosts[0])
env.roledefs['storage'] = env.hosts[1:]
# To load the correct profile and cluster name, we need
# these two values.
if not env.PROFILE or not env.NAME:
print "Critical environment variables missing"
print "\tSWIFT_CLUSTER_PROFILE=(debian-6-libvirt|" \
"debian-6-softlayer-cci|" \
"ubuntu-1204-libvirt|"\
"freebsd-libvirt)"
print "\tSWIFT_CLUSTER_NAME=swdev"
sys.exit(1)
# Load the profile into our config constant and generate
# the hosts list. This modifies the env.
CONFIG = {}
for profile in listdir("profiles"):
p = load_profile(profile)
if not p:
continue
CONFIG[profile.split(".")[0]] = p
if not env.roles:
env.roles = ['proxy', 'storage']
# Generate a list of hosts from the environment if a list
# was not provided as an argument.
if not env.hosts:
env.hosts = hosts_by_prefix(env.host_prefix)
# Regardless of how we got here, the roledefs must be
# assigned.
roledefs_from_hosts()
def current_profile(host=None):
profile = env.PROFILE
# We need to be able to also pass this in for reimaging.
host = host or env.host
# If our profile environment setting isn't a comma-separated list,
# return it as its value every time.
# eg: debian == debian,debian,debian,debian
if not ',' in profile:
return profile
return profile.split(',')[env.hosts.index(host)]
def config(key, host=None):
return CONFIG[current_profile(host)][key]
def config_package_index(name, package_set="packages"):
for idx, pkg in enumerate(config(package_set)):
if pkg[0] == name:
return idx
return None
def enable_service(service):
if current_profile() == "freebsd":
__enable_service_freebsd(service)
elif current_profile() in ["ubuntu", "debian"]:
__enable_service_ubuntu(service)
def service(service, action='start'):
svc_cmd = config("service_manager")
log_info("Attempting to %s %s" % (action, service))
result = run(svc_cmd.format(service=service, action=action), pty=False)
return result and result.succeeded
def __enable_service_ubuntu(service):
sed("/etc/default/{service}".format(service=service.lower()),
"{service}_ENABLE=.*".format(service=service.upper()),
"{service}_ENABLE=true".format(service=service.upper()))
def __enable_service_freebsd(service):
append("/etc/rc.conf", "{service}_enable=YES".format(
service=service.lower()), escape=False)
def format_drives():
if current_profile() == "freebsd":
__format_drives_freebsd()
elif current_profile() in ["ubuntu", "debian"]:
__format_drives_ubuntu()
def __format_drives_freebsd():
for zone, disk in enumerate(['ada'] * config("zone_count"), 1):
device = "{1}{0}".format(zone, disk)
run("zpool labelclear -f /dev/{device}".format(device=device))
run("zpool create -m /srv/node/disk{zone} {device} /dev/{device}"
.format(device=device, zone=zone))
def __format_drives_ubuntu():
disks = run("ls -1 /dev/sd[b-z]").split()
for zone, disk in enumerate(disks, 1):
run("sgdisk -Z %s || true" % disk)
run("sgdisk --clear %s" % disk)
run("sgdisk -N 1 %s" % disk) # default is linux data
run("mkfs.xfs -f %s1" % disk)
append("/etc/fstab", "%s1 /srv/node/disk%d xfs "
"noatime 0 2" % (disk, zone))
run("mkdir -p /srv/node/disk%d" % zone)
run("mount /srv/node/disk%d" % zone)
def get_address(host, private=False):
if private:
host = host.replace(".", ".p.", 1)
return gethostbyname(host)
def get_short_name(host):
return host.split('.', 1)[0]
def current_role(role):
return env.host in env.roledefs[role]
@task
@parallel(5)
@with_settings(hide('stdout'))
def cluster_prep(name=None):
run("hostname %s" % (env.host_string))
run("cp -f /usr/share/zoneinfo/America/Chicago /etc/localtime")
run("cp -f /etc/motd /etc/motd.bak")
run("echo '' > /etc/motd")
if rexists("/etc/rc.conf"):
sed('/etc/rc.conf', '^hostname=.*$', 'hostname="%s"' %
(env.host_string))
elif rexists("/etc/hostname"):
run("echo '' > /etc/hostname")
append("/etc/hostname", env.host_string)
zshrc = "config/%s/.zshrc" % name
if exists(zshrc) and rexists("/usr/local/bin/zsh"):
put(zshrc, "~")
run("chsh -s /usr/local/bin/zsh")
@runs_once
def check_swift_package_deps():
for package in config("packages"):
pkg, branch, url = package
if not exists("work/%s" % pkg):
log_error("Cannot locate %s." % pkg)
abort(red("Please run 'fab swift_deps' first."))
def swift_package_set():
packages = set()
for host in env.hosts:
for package in config("packages", host):
packages.add(tuple(package))
return packages
@task
@runs_once
def swift_package():
if packaged_code.value == True: # NOQA
return
packaged_code.value = True
for package in swift_package_set():
pkg, branch, url = package
try:
rmtree("tmp/%s" % pkg)
except OSError:
print "No temp copy of %s found." % pkg
copytree("work/%s" % pkg, "tmp/%s" % pkg,
ignore=ignore_patterns('.git', '.hg'))
for patch in patches_for_package(pkg, branch):
local("patch -d tmp/{0} -p1 < {1}".format(pkg, patch))
local("cd tmp && tar czf {0}.tar.gz {0}".format(pkg))
def patches_for_package(package, branch):
patches = glob("patches/%s/%s/*.patch" % (package, branch))
patches.sort()
return patches
@task
@runs_once
def swift_deps():
for package in config("packages"):
pkg, branch, url = package
if not exists("work/%s" % pkg):
if url.find('git') >= 0:
local("cd work && git clone %s" % url)
local("cd work/%s && git checkout %s" % (pkg, branch))
elif url.find('bitbucket') >= 0:
local("cd work && hg clone %s" % url)
local("cd work/%s && hg checkout %s" % (pkg, branch))
else:
log_error("Unknown repository server for %s" % pkg)
@task
@runs_once
def swift_update_deps():
for repo in glob("work/*"):
local("cd %s && git pull; true" % repo)
@with_settings(hide('stdout'))
def swift_deploy_from_local(limit_packages=None):
src_dir = "/root/src"
run("rm -rf {0} && mkdir -p {0}".format(src_dir))
with cd(src_dir):
for package in config("packages"):
pkg, branch, url = package
if limit_packages:
if pkg not in limit_packages:
log_info("Skipping: %s" % pkg)
continue
with settings(warn_only=True):
run("pip uninstall %s -y" % pkg)
put("tmp/%s.tar.gz" % pkg, src_dir)
run("rm -rf %s" % pkg)
run("tar xvf %s.tar.gz" % pkg)
with(cd(pkg)):
run("python setup.py build")
run("python setup.py install")
@task
@parallel(5)
def refresh_code(*args):
check_swift_package_deps()
swift_package()
swift_deploy_from_local(args)
swift_restart()
@task
@parallel(5)
def refresh_config(*args):
check_swift_package_deps()
upload_proxy_config()
upload_storage_config()
swift_restart()
@task
@parallel(5)
def rebuild_cluster():
platform_init()
instance = platform.find_instance(env.host)
check_swift_package_deps()
swift_package()
reset_vm(instance)
wait_for_vm()
system_base()
python_base()
add_user()
swift_deploy_from_local()
swift_client()
swift_config_proxy()
swift_config_storage()
swift_restart()
prep_auth()
swift_test(wait_for_prep=True)
@serial
def reimage_vm(disk, instance):
print 'reimage_vm(%s, %s)' % (disk, instance)
platform.reimage_instance_os(instance, disk)
@serial
def create_datadisks(disk=None, command=None):
if not disk or not command:
return
if not rexists(disk):
run(command)
def destroy_vm(instance):
platform.reimage_instance(instance)
def reset_local():
try:
log_info("Removing local configs")
remove("tmp/swift.conf")
except:
pass
try:
log_info("Removing local rings")
for f in glob("tmp/*.ring.gz"):
remove(f)
except:
pass
def reset_vm(instance):
log_warn("Killing %s" % instance.name)
destroy_vm(instance)
def wait_for_vm():
online = False
with settings(hide('warnings', 'running', 'stdout', 'stderr'),
warn_only=True):
while not online:
try:
run("echo")
log_success("VM is up!")
online = True
except:
log_info("Waiting for VM to boot...")
sleep(4)
def system_base():
packages = config('system_packages')
with settings(hide('warnings', 'running', 'stdout', 'stderr')):
if current_profile() in ["ubuntu", "debian"]:
run("apt-get update")
for package in packages:
log_info("Installing: {0}".format(package))
run("{mgr} {pkg}; true".format(
mgr=config("package_manager"),
pkg=package))
@task
def python_base():
packages = config("python_packages")
with settings(hide('warnings', 'running', 'stdout', 'stderr')):
for package in packages:
log_info("Installing: {0}".format(package))
run("pip install {0}; true".format(package))
def add_user():
prefix = ''
if current_profile() == "freebsd":
prefix = '/usr/local'
run("echo 'swift::::::Swift::/bin/sh:' | adduser -f - -w no")
else:
run("adduser --system --no-create-home --shell /bin/sh "
"--disabled-password --group swift")
run("echo 'swift ALL=(ALL) ALL' >> {prefix}/etc/sudoers".format(
prefix=prefix))
run("test -d /etc/swift || mkdir -p /etc/swift")
run("chown -R swift:swift /etc/swift")
@task
@runs_once
def swift_create_user(auth_key, account, username, password):
if not current_role('proxy'):
return
run("swauth-add-user -A http://{0}/auth/"
" -K {1} -a {2} {3} {4}".format(env.host, auth_key, account, username,
password))
@runs_once
def initialize_swauth(auth_key):
run("swauth-initialize -A http://{0}/auth/"
" -K {1}".format(env.host, auth_key))
def init_logfiles():
logfiles = ["all.log", "swift/proxy.log", "swift/proxy.error",
"swift/log_processor.log"]
# Create log directories all the way down to
# the hourly folder.
run("test -d /var/log/swift/hourly || mkdir -p /var/log/swift/hourly")
# Finally touch and chmod all the log files
for logfile in logfiles:
run("touch /var/log/%s" % logfile)
run("chmod 600 /var/log/%s" % logfile)
def swift_config_proxy():
if not current_role('proxy'):
return
reset_local()
init_logfiles()
upload_proxy_config()
for svc in config("proxy_services"):
enable_service(svc)
service(svc, action='restart')
run("openssl req -new -x509 -nodes -batch -out /etc/swift/cert.crt "
"-keyout /etc/swift/cert.key")
with cd("/etc/swift"):
run("swift-ring-builder account.builder create 12 2 1")
run("swift-ring-builder container.builder create 12 2 1")
run("swift-ring-builder object.builder create 12 2 1")
for node in env.roledefs['storage']:
for zone in xrange(1, config("zone_count"), 1):
conf = (zone, get_address(node, private=True), zone, 100)
run("swift-ring-builder account.builder "
"add z{0}-{1}:6002/disk{2} {3}".format(*conf))
run("swift-ring-builder container.builder "
"add z{0}-{1}:6001/disk{2} {3}".format(*conf))
run("swift-ring-builder object.builder "
"add z{0}-{1}:6000/disk{2} {3}".format(*conf))
run("swift-ring-builder account.builder rebalance")
run("swift-ring-builder container.builder rebalance")
run("swift-ring-builder object.builder rebalance")
get("/etc/swift/swift.conf", "tmp")
get("/etc/swift/*.ring.gz", "tmp")
log_success("Downloaded config and rings from proxy")
def swift_config_storage():
if not current_role('storage'):
return
init_logfiles()
format_drives()
run("mkdir -p /etc/swift/{object,container,account}")
run("mkdir -p /var/cache/swift")
run("mkdir -p /var/lock")
run("chmod o+x /var/cache")
run("chown -R swift:swift /srv/node /etc/swift /var/cache/swift")
# try and make /proc available in BSD
if current_profile() == 'freebsd':
run("mkdir -p /compat/linux/proc")
run("rmdir /proc ; ln -s /compat/linux/proc /proc")
append("/etc/fstab", "linprocfs /compat/linux/proc linprocfs rw 0 0",
escape=False)
append("/boot/loader.conf.local", "linprocfs_load=YES", escape=False)
upload_storage_config()
for svc in config("storage_services"):
enable_service(svc)
service(svc, action='restart')
# While the ring files and conf don't exist, wait.
while (not exists("tmp/swift.conf")
or not exists("tmp/account.ring.gz")
or not exists("tmp/container.ring.gz")
or not exists("tmp/object.ring.gz")):
log_info("Waiting for local config and ring files...")
sleep(5)
put("tmp/swift.conf", "/etc/swift")
put("tmp/*.ring.gz", "/etc/swift")
def upload_storage_config():
if not current_role('storage'):
return
address = get_address(env.host, private=True)
host = get_short_name(env.host)
upload_template("config/account-server.conf",
"/etc/swift/account-server.conf",
{'private_address': address,
'host_short': host}, backup=False)
upload_template("config/container-server.conf",
"/etc/swift/container-server.conf",
{'private_address': address,
'host_short': host}, backup=False)
upload_template("config/object-server.conf",
"/etc/swift/object-server.conf",
{'private_address': address,
'host_short': host}, backup=False)
prefix = ''
if current_profile() == 'freebsd':
prefix = '/usr/local'
upload_template("config/rsyncd.conf", "{prefix}/etc/rsyncd.conf".format(
prefix=prefix), {'private_address': address}, backup=False)
upload_template("config/rsyslog.conf", "{prefix}/etc/rsyslog.conf".format(
prefix=prefix), backup=False)
def upload_proxy_config():
if not current_role('proxy'):
return
address = get_address(env.host, private=True)
# Upload proxy configs
if not rexists("/etc/swift/swift.conf"):
# Generate a secure secret server-side
log_info("Not swift.conf found, generating ring!")
hash_prefix = local("od -t x4 -N 8 -A n </dev/random"
"| sed -e 's/ //g'", capture=True)
else:
hash_prefix = run("grep swift_hash_path_suffix /etc/swift/swift.conf "
"| sed -e 's/.*=[[:space:]]*//'")
swift_sync_key = hash_prefix
super_admin_key = hash_prefix
upload_template("config/dispersion.conf", "/etc/swift/dispersion.conf",
{'private_address': address}, backup=False)
upload_template("config/proxy-server.conf", "/etc/swift/proxy-server.conf",
{'private_address': address,
'host': env.host,
'swift_sync_key': swift_sync_key,
'super_admin_key': super_admin_key,
'host_prefix': env.host_prefix,
'host_short': get_short_name(env.host)}, backup=False)
upload_template("config/swift.conf", "/etc/swift/swift.conf",
{'hash_prefix': hash_prefix}, backup=False)
prefix = ''
if current_profile() == 'freebsd':
prefix = '/usr/local'
upload_template("config/rsyslog.conf", "{prefix}/etc/rsyslog.conf".format(
prefix=prefix), backup=False)
@task
@runs_once
def swift_test(wait_for_prep=False):
if tests_running.value == True: # NOQA
return
tests_running.value = True
if wait_for_prep:
while not all([running_hosts.value >= len(env.hosts),
prep_auth_done.value == True]): # NOQA
log_info("Waiting for all hosts to be available "
"before testing...")
sleep(1)
if not env.TEST_CONFIG:
abort(red("Please set your SWIFT_TEST_CONFIG_FILE environment "
"variable to a valid config file location."))
log_success("Running functional test suite...")
local("cd tmp/swift && ./.functests")
@task
@parallel(5)
def swift_restart():
global running_hosts
while not rexists("/etc/swift/swift.conf"):
log_info("Still need swift.conf...")
sleep(2)
while not rexists("/etc/swift/account.ring.gz"):
log_info("Still need account.ring.gz...")
sleep(2)
while not rexists("/etc/swift/container.ring.gz"):
log_info("Still need container.ring.gz...")
sleep(2)
while not rexists("/etc/swift/object.ring.gz"):
log_info("Still need object.ring.gz...")
sleep(2)
run("swift-init stop all; true")
run("swift-init start all; true")
log_success("Restarted!")
running_hosts.value += 1
while running_hosts.value < len(env.hosts):
log_info("%d/%d hosts running" % (running_hosts.value, len(env.hosts)))
sleep(1)
@task
@retry(5)
def prep_auth():
if not current_role('proxy'):
return
hash_prefix = run("grep super_admin_key /etc/swift/proxy-server.conf "
"| sed -e 's/.*=[[:space:]]*//'")
run("swauth-prep -A http://{0}/auth/ -K {1}".format(env.host, hash_prefix))
users = []
users.append(('test', 'tester', 'testing'))
users.append(('test2', 'tester2', 'testing2'))
users.append(('test', 'tester3', 'testing3'))
accounts = []
accounts.append('myaccount')
for user in users:
run("swauth-add-user -A http://{0}/auth/ -K {1} "
"-a {2} {3} {4}".format(env.host, hash_prefix, *user))
for account in accounts:
run("swauth-add-account -A http://{0}/auth/ -K {1} "
"{2}".format(env.host, hash_prefix, account))
prep_auth_done.value = True
@task
def swift_client():
src_dir = "/root/src"
with cd(src_dir):
run("git clone git://github.com/openstack/python-swiftclient.git "
"swift-client")
with cd("swift-client"):
with settings(warn_only=True):
run("pip uninstall swiftclient -y")
run("python setup.py build")
run("python setup.py install")
|
|
# art3d.py, original mplot3d version by John Porter
# Parts rewritten by Reinier Heeres <reinier@heeres.eu>
# Minor additions by Ben Axelrod <baxelrod@coroware.com>
'''
Module containing 3D artist code and functions to convert 2D
artists into 3D versions which can be added to an Axes3D.
'''
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import zip
from matplotlib import lines, text as mtext, path as mpath, colors as mcolors
from matplotlib import artist
from matplotlib.collections import Collection, LineCollection, \
PolyCollection, PatchCollection, PathCollection
from matplotlib.cm import ScalarMappable
from matplotlib.patches import Patch
from matplotlib.colors import Normalize
from matplotlib.cbook import iterable
import warnings
import numpy as np
import math
from . import proj3d
def norm_angle(a):
"""Return angle between -180 and +180"""
a = (a + 360) % 360
if a > 180:
a = a - 360
return a
def norm_text_angle(a):
"""Return angle between -90 and +90"""
a = (a + 180) % 180
if a > 90:
a = a - 180
return a
def get_dir_vector(zdir):
if zdir == 'x':
return np.array((1, 0, 0))
elif zdir == 'y':
return np.array((0, 1, 0))
elif zdir == 'z':
return np.array((0, 0, 1))
elif zdir is None:
return np.array((0, 0, 0))
elif iterable(zdir) and len(zdir) == 3:
return zdir
else:
raise ValueError("'x', 'y', 'z', None or vector of length 3 expected")
class Text3D(mtext.Text):
'''
Text object with 3D position and (in the future) direction.
'''
def __init__(self, x=0, y=0, z=0, text='', zdir='z', **kwargs):
'''
*x*, *y*, *z* Position of text
*text* Text string to display
*zdir* Direction of text
Keyword arguments are passed onto :func:`~matplotlib.text.Text`.
'''
mtext.Text.__init__(self, x, y, text, **kwargs)
self.set_3d_properties(z, zdir)
def set_3d_properties(self, z=0, zdir='z'):
x, y = self.get_position()
self._position3d = np.array((x, y, z))
self._dir_vec = get_dir_vector(zdir)
self.stale = True
def draw(self, renderer):
proj = proj3d.proj_trans_points([self._position3d, \
self._position3d + self._dir_vec], renderer.M)
dx = proj[0][1] - proj[0][0]
dy = proj[1][1] - proj[1][0]
if dx==0. and dy==0.:
# atan2 raises ValueError: math domain error on 0,0
angle = 0.
else:
angle = math.degrees(math.atan2(dy, dx))
self.set_position((proj[0][0], proj[1][0]))
self.set_rotation(norm_text_angle(angle))
mtext.Text.draw(self, renderer)
self.stale = False
def text_2d_to_3d(obj, z=0, zdir='z'):
"""Convert a Text to a Text3D object."""
obj.__class__ = Text3D
obj.set_3d_properties(z, zdir)
class Line3D(lines.Line2D):
'''
3D line object.
'''
def __init__(self, xs, ys, zs, *args, **kwargs):
'''
Keyword arguments are passed onto :func:`~matplotlib.lines.Line2D`.
'''
lines.Line2D.__init__(self, [], [], *args, **kwargs)
self._verts3d = xs, ys, zs
def set_3d_properties(self, zs=0, zdir='z'):
xs = self.get_xdata()
ys = self.get_ydata()
try:
# If *zs* is a list or array, then this will fail and
# just proceed to juggle_axes().
zs = float(zs)
zs = [zs for x in xs]
except TypeError:
pass
self._verts3d = juggle_axes(xs, ys, zs, zdir)
self.stale = True
def draw(self, renderer):
xs3d, ys3d, zs3d = self._verts3d
xs, ys, zs = proj3d.proj_transform(xs3d, ys3d, zs3d, renderer.M)
self.set_data(xs, ys)
lines.Line2D.draw(self, renderer)
self.stale = False
def line_2d_to_3d(line, zs=0, zdir='z'):
'''
Convert a 2D line to 3D.
'''
line.__class__ = Line3D
line.set_3d_properties(zs, zdir)
def path_to_3d_segment(path, zs=0, zdir='z'):
'''Convert a path to a 3D segment.'''
if not iterable(zs):
zs = np.ones(len(path)) * zs
seg = []
pathsegs = path.iter_segments(simplify=False, curves=False)
for (((x, y), code), z) in zip(pathsegs, zs):
seg.append((x, y, z))
seg3d = [juggle_axes(x, y, z, zdir) for (x, y, z) in seg]
return seg3d
def paths_to_3d_segments(paths, zs=0, zdir='z'):
'''
Convert paths from a collection object to 3D segments.
'''
if not iterable(zs):
zs = np.ones(len(paths)) * zs
segments = []
for path, pathz in zip(paths, zs):
segments.append(path_to_3d_segment(path, pathz, zdir))
return segments
def path_to_3d_segment_with_codes(path, zs=0, zdir='z'):
'''Convert a path to a 3D segment with path codes.'''
if not iterable(zs):
zs = np.ones(len(path)) * zs
seg = []
codes = []
pathsegs = path.iter_segments(simplify=False, curves=False)
for (((x, y), code), z) in zip(pathsegs, zs):
seg.append((x, y, z))
codes.append(code)
seg3d = [juggle_axes(x, y, z, zdir) for (x, y, z) in seg]
return seg3d, codes
def paths_to_3d_segments_with_codes(paths, zs=0, zdir='z'):
'''
Convert paths from a collection object to 3D segments with path codes.
'''
if not iterable(zs):
zs = np.ones(len(paths)) * zs
segments = []
codes_list = []
for path, pathz in zip(paths, zs):
segs, codes = path_to_3d_segment_with_codes(path, pathz, zdir)
segments.append(segs)
codes_list.append(codes)
return segments, codes_list
class Line3DCollection(LineCollection):
'''
A collection of 3D lines.
'''
def __init__(self, segments, *args, **kwargs):
'''
Keyword arguments are passed onto :func:`~matplotlib.collections.LineCollection`.
'''
LineCollection.__init__(self, segments, *args, **kwargs)
def set_sort_zpos(self, val):
'''Set the position to use for z-sorting.'''
self._sort_zpos = val
self.stale = True
def set_segments(self, segments):
'''
Set 3D segments
'''
self._segments3d = np.asanyarray(segments)
LineCollection.set_segments(self, [])
def do_3d_projection(self, renderer):
'''
Project the points according to renderer matrix.
'''
xyslist = [
proj3d.proj_trans_points(points, renderer.M) for points in
self._segments3d]
segments_2d = [list(zip(xs, ys)) for (xs, ys, zs) in xyslist]
LineCollection.set_segments(self, segments_2d)
# FIXME
minz = 1e9
for (xs, ys, zs) in xyslist:
minz = min(minz, min(zs))
return minz
def draw(self, renderer, project=False):
if project:
self.do_3d_projection(renderer)
LineCollection.draw(self, renderer)
def line_collection_2d_to_3d(col, zs=0, zdir='z'):
"""Convert a LineCollection to a Line3DCollection object."""
segments3d = paths_to_3d_segments(col.get_paths(), zs, zdir)
col.__class__ = Line3DCollection
col.set_segments(segments3d)
class Patch3D(Patch):
'''
3D patch object.
'''
def __init__(self, *args, **kwargs):
zs = kwargs.pop('zs', [])
zdir = kwargs.pop('zdir', 'z')
Patch.__init__(self, *args, **kwargs)
self.set_3d_properties(zs, zdir)
def set_3d_properties(self, verts, zs=0, zdir='z'):
if not iterable(zs):
zs = np.ones(len(verts)) * zs
self._segment3d = [juggle_axes(x, y, z, zdir) \
for ((x, y), z) in zip(verts, zs)]
self._facecolor3d = Patch.get_facecolor(self)
def get_path(self):
return self._path2d
def get_facecolor(self):
return self._facecolor2d
def do_3d_projection(self, renderer):
s = self._segment3d
xs, ys, zs = list(zip(*s))
vxs, vys,vzs, vis = proj3d.proj_transform_clip(xs, ys, zs, renderer.M)
self._path2d = mpath.Path(list(zip(vxs, vys)))
# FIXME: coloring
self._facecolor2d = self._facecolor3d
return min(vzs)
def draw(self, renderer):
Patch.draw(self, renderer)
class PathPatch3D(Patch3D):
'''
3D PathPatch object.
'''
def __init__(self, path, **kwargs):
zs = kwargs.pop('zs', [])
zdir = kwargs.pop('zdir', 'z')
Patch.__init__(self, **kwargs)
self.set_3d_properties(path, zs, zdir)
def set_3d_properties(self, path, zs=0, zdir='z'):
Patch3D.set_3d_properties(self, path.vertices, zs=zs, zdir=zdir)
self._code3d = path.codes
def do_3d_projection(self, renderer):
s = self._segment3d
xs, ys, zs = list(zip(*s))
vxs, vys,vzs, vis = proj3d.proj_transform_clip(xs, ys, zs, renderer.M)
self._path2d = mpath.Path(list(zip(vxs, vys)), self._code3d)
# FIXME: coloring
self._facecolor2d = self._facecolor3d
return min(vzs)
def get_patch_verts(patch):
"""Return a list of vertices for the path of a patch."""
trans = patch.get_patch_transform()
path = patch.get_path()
polygons = path.to_polygons(trans)
if len(polygons):
return polygons[0]
else:
return []
def patch_2d_to_3d(patch, z=0, zdir='z'):
"""Convert a Patch to a Patch3D object."""
verts = get_patch_verts(patch)
patch.__class__ = Patch3D
patch.set_3d_properties(verts, z, zdir)
def pathpatch_2d_to_3d(pathpatch, z=0, zdir='z'):
"""Convert a PathPatch to a PathPatch3D object."""
path = pathpatch.get_path()
trans = pathpatch.get_patch_transform()
mpath = trans.transform_path(path)
pathpatch.__class__ = PathPatch3D
pathpatch.set_3d_properties(mpath, z, zdir)
class Patch3DCollection(PatchCollection):
'''
A collection of 3D patches.
'''
def __init__(self, *args, **kwargs):
"""
Create a collection of flat 3D patches with its normal vector
pointed in *zdir* direction, and located at *zs* on the *zdir*
axis. 'zs' can be a scalar or an array-like of the same length as
the number of patches in the collection.
Constructor arguments are the same as for
:class:`~matplotlib.collections.PatchCollection`. In addition,
keywords *zs=0* and *zdir='z'* are available.
Also, the keyword argument "depthshade" is available to
indicate whether or not to shade the patches in order to
give the appearance of depth (default is *True*).
This is typically desired in scatter plots.
"""
zs = kwargs.pop('zs', 0)
zdir = kwargs.pop('zdir', 'z')
self._depthshade = kwargs.pop('depthshade', True)
PatchCollection.__init__(self, *args, **kwargs)
self.set_3d_properties(zs, zdir)
def set_sort_zpos(self, val):
'''Set the position to use for z-sorting.'''
self._sort_zpos = val
self.stale = True
def set_3d_properties(self, zs, zdir):
# Force the collection to initialize the face and edgecolors
# just in case it is a scalarmappable with a colormap.
self.update_scalarmappable()
offsets = self.get_offsets()
if len(offsets) > 0:
xs, ys = list(zip(*offsets))
else:
xs = []
ys = []
self._offsets3d = juggle_axes(xs, ys, np.atleast_1d(zs), zdir)
self._facecolor3d = self.get_facecolor()
self._edgecolor3d = self.get_edgecolor()
self.stale = True
def do_3d_projection(self, renderer):
xs, ys, zs = self._offsets3d
vxs, vys, vzs, vis = proj3d.proj_transform_clip(xs, ys, zs, renderer.M)
fcs = (zalpha(self._facecolor3d, vzs) if self._depthshade else
self._facecolor3d)
fcs = mcolors.to_rgba_array(fcs, self._alpha)
self.set_facecolors(fcs)
ecs = (zalpha(self._edgecolor3d, vzs) if self._depthshade else
self._edgecolor3d)
ecs = mcolors.to_rgba_array(ecs, self._alpha)
self.set_edgecolors(ecs)
PatchCollection.set_offsets(self, list(zip(vxs, vys)))
if vzs.size > 0:
return min(vzs)
else:
return np.nan
class Path3DCollection(PathCollection):
'''
A collection of 3D paths.
'''
def __init__(self, *args, **kwargs):
"""
Create a collection of flat 3D paths with its normal vector
pointed in *zdir* direction, and located at *zs* on the *zdir*
axis. 'zs' can be a scalar or an array-like of the same length as
the number of paths in the collection.
Constructor arguments are the same as for
:class:`~matplotlib.collections.PathCollection`. In addition,
keywords *zs=0* and *zdir='z'* are available.
Also, the keyword argument "depthshade" is available to
indicate whether or not to shade the patches in order to
give the appearance of depth (default is *True*).
This is typically desired in scatter plots.
"""
zs = kwargs.pop('zs', 0)
zdir = kwargs.pop('zdir', 'z')
self._depthshade = kwargs.pop('depthshade', True)
PathCollection.__init__(self, *args, **kwargs)
self.set_3d_properties(zs, zdir)
def set_sort_zpos(self, val):
'''Set the position to use for z-sorting.'''
self._sort_zpos = val
self.stale = True
def set_3d_properties(self, zs, zdir):
# Force the collection to initialize the face and edgecolors
# just in case it is a scalarmappable with a colormap.
self.update_scalarmappable()
offsets = self.get_offsets()
if len(offsets) > 0:
xs, ys = list(zip(*offsets))
else:
xs = []
ys = []
self._offsets3d = juggle_axes(xs, ys, np.atleast_1d(zs), zdir)
self._facecolor3d = self.get_facecolor()
self._edgecolor3d = self.get_edgecolor()
self.stale = True
def do_3d_projection(self, renderer):
xs, ys, zs = self._offsets3d
vxs, vys, vzs, vis = proj3d.proj_transform_clip(xs, ys, zs, renderer.M)
fcs = (zalpha(self._facecolor3d, vzs) if self._depthshade else
self._facecolor3d)
fcs = mcolors.to_rgba_array(fcs, self._alpha)
self.set_facecolors(fcs)
ecs = (zalpha(self._edgecolor3d, vzs) if self._depthshade else
self._edgecolor3d)
ecs = mcolors.to_rgba_array(ecs, self._alpha)
self.set_edgecolors(ecs)
PathCollection.set_offsets(self, list(zip(vxs, vys)))
if vzs.size > 0 :
return min(vzs)
else :
return np.nan
def patch_collection_2d_to_3d(col, zs=0, zdir='z', depthshade=True):
"""
Convert a :class:`~matplotlib.collections.PatchCollection` into a
:class:`Patch3DCollection` object
(or a :class:`~matplotlib.collections.PathCollection` into a
:class:`Path3DCollection` object).
Keywords:
*za* The location or locations to place the patches in the
collection along the *zdir* axis. Defaults to 0.
*zdir* The axis in which to place the patches. Default is "z".
*depthshade* Whether to shade the patches to give a sense of depth.
Defaults to *True*.
"""
if isinstance(col, PathCollection):
col.__class__ = Path3DCollection
elif isinstance(col, PatchCollection):
col.__class__ = Patch3DCollection
col._depthshade = depthshade
col.set_3d_properties(zs, zdir)
class Poly3DCollection(PolyCollection):
'''
A collection of 3D polygons.
'''
def __init__(self, verts, *args, **kwargs):
'''
Create a Poly3DCollection.
*verts* should contain 3D coordinates.
Keyword arguments:
zsort, see set_zsort for options.
Note that this class does a bit of magic with the _facecolors
and _edgecolors properties.
'''
zsort = kwargs.pop('zsort', True)
PolyCollection.__init__(self, verts, *args, **kwargs)
self.set_zsort(zsort)
self._codes3d = None
_zsort_functions = {
'average': np.average,
'min': np.min,
'max': np.max,
}
def set_zsort(self, zsort):
'''
Set z-sorting behaviour:
boolean: if True use default 'average'
string: 'average', 'min' or 'max'
'''
if zsort is True:
zsort = 'average'
if zsort is not False:
if zsort in self._zsort_functions:
zsortfunc = self._zsort_functions[zsort]
else:
return False
else:
zsortfunc = None
self._zsort = zsort
self._sort_zpos = None
self._zsortfunc = zsortfunc
self.stale = True
def get_vector(self, segments3d):
"""Optimize points for projection"""
si = 0
ei = 0
segis = []
points = []
for p in segments3d:
points.extend(p)
ei = si+len(p)
segis.append((si, ei))
si = ei
if len(segments3d) > 0 :
xs, ys, zs = list(zip(*points))
else :
# We need this so that we can skip the bad unpacking from zip()
xs, ys, zs = [], [], []
ones = np.ones(len(xs))
self._vec = np.array([xs, ys, zs, ones])
self._segis = segis
def set_verts(self, verts, closed=True):
'''Set 3D vertices.'''
self.get_vector(verts)
# 2D verts will be updated at draw time
PolyCollection.set_verts(self, [], closed)
def set_verts_and_codes(self, verts, codes):
'''Sets 3D vertices with path codes'''
# set vertices with closed=False to prevent PolyCollection from
# setting path codes
self.set_verts(verts, closed=False)
# and set our own codes instead.
self._codes3d = codes
def set_3d_properties(self):
# Force the collection to initialize the face and edgecolors
# just in case it is a scalarmappable with a colormap.
self.update_scalarmappable()
self._sort_zpos = None
self.set_zsort(True)
self._facecolors3d = PolyCollection.get_facecolors(self)
self._edgecolors3d = PolyCollection.get_edgecolors(self)
self._alpha3d = PolyCollection.get_alpha(self)
self.stale = True
def set_sort_zpos(self,val):
'''Set the position to use for z-sorting.'''
self._sort_zpos = val
self.stale = True
def do_3d_projection(self, renderer):
'''
Perform the 3D projection for this object.
'''
# FIXME: This may no longer be needed?
if self._A is not None:
self.update_scalarmappable()
self._facecolors3d = self._facecolors
txs, tys, tzs = proj3d.proj_transform_vec(self._vec, renderer.M)
xyzlist = [(txs[si:ei], tys[si:ei], tzs[si:ei])
for si, ei in self._segis]
# This extra fuss is to re-order face / edge colors
cface = self._facecolors3d
cedge = self._edgecolors3d
if len(cface) != len(xyzlist):
cface = cface.repeat(len(xyzlist), axis=0)
if len(cedge) != len(xyzlist):
if len(cedge) == 0:
cedge = cface
else:
cedge = cedge.repeat(len(xyzlist), axis=0)
# if required sort by depth (furthest drawn first)
if self._zsort:
indices = range(len(xyzlist))
z_segments_2d = [(self._zsortfunc(zs), list(zip(xs, ys)), fc, ec,
idx) for (xs, ys, zs), fc, ec, idx in
zip(xyzlist, cface, cedge, indices)]
z_segments_2d.sort(key=lambda x: x[0], reverse=True)
else:
raise ValueError("whoops")
segments_2d = [s for z, s, fc, ec, idx in z_segments_2d]
if self._codes3d is not None:
codes = [self._codes3d[idx] for z, s, fc, ec, idx in z_segments_2d]
PolyCollection.set_verts_and_codes(self, segments_2d, codes)
else:
PolyCollection.set_verts(self, segments_2d)
self._facecolors2d = [fc for z, s, fc, ec, idx in z_segments_2d]
if len(self._edgecolors3d) == len(cface):
self._edgecolors2d = [ec for z, s, fc, ec, idx in z_segments_2d]
else:
self._edgecolors2d = self._edgecolors3d
# Return zorder value
if self._sort_zpos is not None:
zvec = np.array([[0], [0], [self._sort_zpos], [1]])
ztrans = proj3d.proj_transform_vec(zvec, renderer.M)
return ztrans[2][0]
elif tzs.size > 0 :
# FIXME: Some results still don't look quite right.
# In particular, examine contourf3d_demo2.py
# with az = -54 and elev = -45.
return np.min(tzs)
else :
return np.nan
def set_facecolor(self, colors):
PolyCollection.set_facecolor(self, colors)
self._facecolors3d = PolyCollection.get_facecolor(self)
set_facecolors = set_facecolor
def set_edgecolor(self, colors):
PolyCollection.set_edgecolor(self, colors)
self._edgecolors3d = PolyCollection.get_edgecolor(self)
set_edgecolors = set_edgecolor
def set_alpha(self, alpha):
"""
Set the alpha tranparencies of the collection. *alpha* must be
a float or *None*.
ACCEPTS: float or None
"""
if alpha is not None:
try:
float(alpha)
except TypeError:
raise TypeError('alpha must be a float or None')
artist.Artist.set_alpha(self, alpha)
try:
self._facecolors = mcolors.to_rgba_array(
self._facecolors3d, self._alpha)
except (AttributeError, TypeError, IndexError):
pass
try:
self._edgecolors = mcolors.to_rgba_array(
self._edgecolors3d, self._alpha)
except (AttributeError, TypeError, IndexError):
pass
self.stale = True
def get_facecolors(self):
return self._facecolors2d
get_facecolor = get_facecolors
def get_edgecolors(self):
return self._edgecolors2d
get_edgecolor = get_edgecolors
def draw(self, renderer):
return Collection.draw(self, renderer)
def poly_collection_2d_to_3d(col, zs=0, zdir='z'):
"""Convert a PolyCollection to a Poly3DCollection object."""
segments_3d, codes = paths_to_3d_segments_with_codes(col.get_paths(),
zs, zdir)
col.__class__ = Poly3DCollection
col.set_verts_and_codes(segments_3d, codes)
col.set_3d_properties()
def juggle_axes(xs, ys, zs, zdir):
"""
Reorder coordinates so that 2D xs, ys can be plotted in the plane
orthogonal to zdir. zdir is normally x, y or z. However, if zdir
starts with a '-' it is interpreted as a compensation for rotate_axes.
"""
if zdir == 'x':
return zs, xs, ys
elif zdir == 'y':
return xs, zs, ys
elif zdir[0] == '-':
return rotate_axes(xs, ys, zs, zdir)
else:
return xs, ys, zs
def rotate_axes(xs, ys, zs, zdir):
"""
Reorder coordinates so that the axes are rotated with zdir along
the original z axis. Prepending the axis with a '-' does the
inverse transform, so zdir can be x, -x, y, -y, z or -z
"""
if zdir == 'x':
return ys, zs, xs
elif zdir == '-x':
return zs, xs, ys
elif zdir == 'y':
return zs, xs, ys
elif zdir == '-y':
return ys, zs, xs
else:
return xs, ys, zs
def iscolor(c):
try:
if len(c) == 4 or len(c) == 3:
if iterable(c[0]):
return False
if hasattr(c[0], '__float__'):
return True
except:
return False
return False
def get_colors(c, num):
"""Stretch the color argument to provide the required number num"""
if type(c) == type("string"):
c = mcolors.to_rgba(c)
if iscolor(c):
return [c] * num
if len(c) == num:
return c
elif iscolor(c):
return [c] * num
elif len(c) == 0: #if edgecolor or facecolor is specified as 'none'
return [[0,0,0,0]] * num
elif iscolor(c[0]):
return [c[0]] * num
else:
raise ValueError('unknown color format %s' % c)
def zalpha(colors, zs):
"""Modify the alphas of the color list according to depth"""
# FIXME: This only works well if the points for *zs* are well-spaced
# in all three dimensions. Otherwise, at certain orientations,
# the min and max zs are very close together.
# Should really normalize against the viewing depth.
colors = get_colors(colors, len(zs))
if zs.size > 0 :
norm = Normalize(min(zs), max(zs))
sats = 1 - norm(zs) * 0.7
colors = [(c[0], c[1], c[2], c[3] * s) for c, s in zip(colors, sats)]
return colors
|
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Speckle connection module for Google API."""
import logging
import os
import google
try:
import apiclient
except ImportError:
try:
import google_sql
google_sql.fix_sys_path(google_sql.GOOGLE_SQL_EXTRA_PATHS)
except ImportError:
logging.warning(
'Attempt to automatically load Google Cloud SQL dependencies failed! '
'Ensure that the App Engine SDK directory has been added to your '
'PYTHONPATH when using this backend.')
from apiclient import errors
from apiclient import http
from apiclient import model
import httplib2
from oauth2client import client
from oauth2client import file as oauth_file
from google.storage.speckle.proto import sql_pb2
from google.storage.speckle.python.api import rdbms
__path__ = rdbms.__path__
CLIENT_ID = '877927577750.apps.googleusercontent.com'
CLIENT_SECRET = '7nBqns87ugMSNBrOM1FdHMK6'
USER_AGENT = 'Google SQL Service/1.0'
def GetFlow(state=None):
"""Get a client.OAuth2WebServerFlow for performing OAuth 2.0 authentication.
Args:
state: Value to use for the OAuth 2.0 state parameter.
Returns:
A client.OAuth2WebServerFlow instance populated with default values for
getting access to the SQL Service over Google API.
"""
return client.OAuth2WebServerFlow(
client_id=CLIENT_ID,
client_secret=CLIENT_SECRET,
scope='https://www.googleapis.com/auth/sqlservice',
user_agent=USER_AGENT,
state=state)
class RdbmsGoogleApiClient(object):
"""A Google API client for rdbms."""
def __init__(self, api_url='https://www.googleapis.com/sql/v1/',
oauth_credentials_path=None, oauth_storage=None,
developer_key=None):
"""Constructs an RdbmsGoogleApiClient.
Args:
api_url: The base of the URL for the rdbms Google API.
oauth_credentials_path: The filesystem path to use for OAuth 2.0
credentials storage.
oauth_storage: A client.Storage instance to use for OAuth 2.0 credential
storage instead of the default file based storage.
developer_key: A Google APIs developer key to use when connecting to the
SQL service.
"""
self._api_url = api_url
self._developer_key = developer_key
if oauth_storage is None:
if oauth_credentials_path is None:
oauth_credentials_path = os.path.expanduser(
rdbms.OAUTH_CREDENTIALS_PATH)
oauth_storage = oauth_file.Storage(oauth_credentials_path)
credentials = oauth_storage.get()
if credentials is None or credentials.invalid:
from oauth2client import tools
credentials = tools.run(GetFlow(), oauth_storage)
self._transport = credentials.authorize(httplib2.Http())
def OpenConnection(self, request):
return self._MakeRequest(
'jdbc/openConnection', request, sql_pb2.OpenConnectionResponse)
def CloseConnection(self, request):
return self._MakeRequest(
'jdbc/closeConnection', request, sql_pb2.CloseConnectionResponse)
def Exec(self, request):
return self._MakeRequest('jdbc/exec', request, sql_pb2.ExecResponse)
def ExecOp(self, request):
return self._MakeRequest('jdbc/execOp', request, sql_pb2.ExecOpResponse)
def GetMetadata(self, request):
return self._MakeRequest(
'jdbc/getMetadata', request, sql_pb2.MetadataResponse)
def _MakeRequest(self, method, request, response_class):
"""Executes a request to the Google API server.
Args:
method: The method to invoke.
request: The request protocol buffer from sql_pb2.
response_class: The response protocol buffer class from sql_pb2.
Returns:
A protocol buffer instance of the given response_class type.
"""
pb_model = model.ProtocolBufferModel(response_class)
query_params = {}
if self._developer_key:
query_params['key'] = self._developer_key
headers, unused_params, query, body = pb_model.request(
{}, {}, query_params, request)
request = http.HttpRequest(
self._transport, pb_model.response, self._api_url + method + query,
method='POST', body=body, headers=headers)
return request.execute()
class GoogleApiConnection(rdbms.Connection):
"""Google API specific rdbms connection."""
def __init__(self, *args, **kwargs):
"""Constructs a GoogleApiConnection.
In addition to all of the arguments taken by rdbms.Connection.__init__, this
also accepts the following optional keyword arguments:
oauth_credentials_path: The filesystem path to the file used for OAuth 2.0
credential storage.
oauth_storage: A client.Storage instance to use for OAuth 2.0 credential
storage instead of the default file based storage.
developer_key: A Google APIs developer key to use when connecting to the SQL
service.
Args:
args: Positional arguments to pass to parent method.
kwargs: Keyword arguments to pass to parent method.
"""
self._oauth_credentials_path = kwargs.pop('oauth_credentials_path', None)
self._oauth_storage = kwargs.pop('oauth_storage', None)
self._developer_key = kwargs.pop('developer_key', None)
super(GoogleApiConnection, self).__init__(*args, **kwargs)
def SetupClient(self):
"""Opens a Google API connection to rdbms."""
kwargs = {'developer_key': self._developer_key,
'oauth_storage': self._oauth_storage}
if self._dsn:
kwargs['api_url'] = self._dsn
if self._oauth_credentials_path:
kwargs['oauth_credentials_path'] = self._oauth_credentials_path
self._client = RdbmsGoogleApiClient(**kwargs)
def MakeRequestImpl(self, stub_method, request):
"""Makes a Google API request, and possibly raises an appropriate exception.
Args:
stub_method: A string, the name of the method to call.
request: A protobuf; 'instance' and 'connection_id' will be set
when available.
Returns:
A protobuf.
Raises:
OperationalError: httplib2 transport failure, or non 2xx http response.
"""
try:
response = getattr(self._client, stub_method)(request)
except (errors.Error, client.Error, httplib2.HttpLib2Error), e:
raise OperationalError('could not connect: ' + str(e))
return response
apilevel = rdbms.apilevel
threadsafety = rdbms.threadsafety
paramstyle = rdbms.paramstyle
version_info = rdbms.version_info
Binary = rdbms.Binary
Date = rdbms.Date
Time = rdbms.Time
Timestamp = rdbms.Timestamp
DateFromTicks = rdbms.DateFromTicks
TimeFromTicks = rdbms.TimeFromTicks
TimestampFromTicks = rdbms.TimestampFromTicks
STRING = rdbms.STRING
BINARY = rdbms.BINARY
NUMBER = rdbms.NUMBER
DATETIME = rdbms.DATETIME
ROWID = rdbms.ROWID
Warning = rdbms.Warning
Error = rdbms.Error
InterfaceError = rdbms.InterfaceError
DatabaseError = rdbms.DatabaseError
DataError = rdbms.DataError
OperationalError = rdbms.OperationalError
IntegrityError = rdbms.IntegrityError
InternalError = rdbms.InternalError
ProgrammingError = rdbms.ProgrammingError
NotSupportedError = rdbms.NotSupportedError
connect = GoogleApiConnection
|
|
# The MIT License (MIT)
# Copyright (c) 2017 Levak Borok <levak92@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import discord
import asyncio
import datetime
class Handle:
def __init__(self, bot, message=None, member=None, channel=None):
self.bot = bot
self.member = member
self.channel = channel
self.message = message
if self.message:
self.member = self.message.author
self.channel = self.message.channel
self.team = None
if self.member and self.member.id != bot.client.user.id:
try:
db, error, _ = bot.find_cup_db(self.member.guild, captain=self.member.id)
if not error:
self.team = db['captains'][self.member.id].team
else:
print('WARNING: {} (member: {})'.format(error, self.member.id))
except KeyError:
print('WARNING: Could not find team for {}'.format(self.member.id))
pass
## Override pickle serialization
def __getstate__(self):
state = dict(self.__dict__)
# We cannot serialize Discord.Message because of WeakSet
# thus, remove them
state['bot'] = None
state['_msg_ch'] = self.message.channel.id \
if self.message \
and self.message.channel \
else self.channel.id \
if self.channel else None
state['_msg_id'] = self.message.id \
if self.message else None
state['_msg_am'] = self.message.author.id \
if self.message \
and self.message.author \
else self.member.id \
if self.member else None
state['message'] = None
state['member'] = None
state['channel'] = None
state['team'] = None
return state
## Once the bot is ready, restore the message
async def resume(self, guild, bot):
channel = guild.get_channel(self._msg_ch) \
if self._msg_ch else None
try:
message = await channel.fetch_message(self._msg_id) \
if channel and self._msg_id else None
except:
print('WARNING: Could not find message id {}'.format(self._msg_id))
message = None
member = guild.get_member(self._msg_am) \
if self._msg_am else None
if message:
self.__init__(bot, message=message)
elif channel and member:
self.__init__(bot, channel=channel, member=member)
def clone(self):
h = Handle(self.bot)
h.member = self.member
h.channel = self.channel
h.message = self.message
h.team = self.team
return h
async def reply(self, msg):
if self.member:
return await self.send('{} {}'.format(self.member.mention, msg))
else:
return await self.send(msg)
async def react(self, reaction, err_count=0):
if not self.message:
return None
try:
return await self.message.add_reaction(reaction)
except discord.errors.HTTPException as e:
print('WARNING: HTTPexception: {}'.format(str(e)))
err_count += 1
if err_count > 5:
return
await asyncio.sleep(10)
return await self.react(reaction, err_count=err_count)
async def unreact(self, reaction, user, err_count=0):
if not self.message:
return None
try:
print('removing reaction {} from {}'.format(reaction, str(user)))
return await self.message.remove_reaction(reaction, user)
except discord.errors.HTTPException as e:
print('WARNING: HTTPexception: {}'.format(str(e)))
err_count += 1
if err_count > 5:
return
await asyncio.sleep(10)
return await self.unreact(reaction, user, err_count=err_count)
async def send(self, msg, err_count=0):
try:
return await self.channel.send(content=msg)
except discord.errors.HTTPException as e:
print('WARNING: HTTPexception: {}'.format(str(e)))
err_count += 1
if err_count > 5:
return
await asyncio.sleep(10)
return await self.send(msg, err_count=err_count)
async def send_file(self, file, name, msg, err_count=0):
try:
return await self.channel.send(file=discord.File(fp=file, filename=name), content=msg)
except discord.errors.HTTPException as e:
print('WARNING: HTTPexception: {}'.format(str(e)))
err_count += 1
if err_count > 5:
return
await asyncio.sleep(10)
return await self.send_file(file, name, msg, err_count=err_count)
async def edit(self, msg, err_count=0):
try:
return await self.message.edit(content=msg)
except discord.errors.HTTPException as e:
print('WARNING: HTTPexception: {}'.format(str(e)))
err_count += 1
if err_count > 5:
return
await asyncio.sleep(10)
return await self.edit(msg, err_count=err_count)
async def embed(self, title, msg, color, fields=[], err_count=0):
try:
embed = discord.Embed(title=title,
type='rich',
description=msg,
timestamp=datetime.datetime.utcnow(),
color=color)
for field in fields:
embed.add_field(name=field['name'], value=field['value'], inline=False)
return await self.channel.send(embed=embed)
except discord.errors.HTTPException as e:
print('WARNING: HTTPexception: {}'.format(str(e)))
err_count += 1
if err_count > 5:
return
await asyncio.sleep(10)
return await self.embed(title, msg, color, fields=fields, err_count=err_count)
async def edit_embed(self, title, msg, color, fields=[], err_count=0):
try:
embed = discord.Embed(title=title,
type='rich',
description=msg,
timestamp=datetime.datetime.utcnow(),
color=color)
for field in fields:
embed.add_field(name=field['name'], value=field['value'], inline=False)
return await self.message.edit(embed=embed)
except discord.errors.HTTPException as e:
print('WARNING: HTTPexception: {}'.format(str(e)))
err_count += 1
if err_count > 5:
return
await asyncio.sleep(10)
return await self.edit_embed(title, msg, color, fields=fields, err_count=err_count)
async def delete(self, err_count=0):
try:
return await self.message.delete()
except discord.errors.HTTPException as e:
print('WARNING: HTTPexception: {}'.format(str(e)))
err_count += 1
if err_count > 5:
return
await asyncio.sleep(10)
return await self.delete(err_count=err_count)
async def broadcast(self, bcast_id, msg):
if not self.bot.is_broadcast_enabled(self.channel.guild):
return
channels = []
try:
channels = self.bot.config['guilds'][self.channel.guild.name]['rooms'][bcast_id]
except:
print('WARNING: No broadcast configuration for "{}"'.format(bcast_id))
pass
for channel_name in channels:
channel = discord.utils.get(self.channel.guild.channels, name=channel_name)
if channel:
try:
await channel.send(content=msg)
except:
print('WARNING: No permission to write in "{}"'.format(channel_name))
pass
else:
print ('WARNING: Missing channel {}'.format(channel_name))
|
|
import os
import time
import socket
import atexit
import signal
import pprint
import logging
import cPickle
import subprocess
import json
import numpy
import theano
from theano import tensor
import blocks
from blocks.initialization import Uniform, Constant
from blocks.algorithms import (
Adam, GradientDescent, Adam, StepClipping, CompositeRule)
from blocks.graph import ComputationGraph
from blocks.model import Model
from blocks.filter import VariableFilter
from blocks.extensions import FinishAfter, Timing, Printing
from blocks.extensions.training import TrackTheBest
from blocks.extensions.saveload import Load, Checkpoint
from blocks.extensions.monitoring import (DataStreamMonitoring,
TrainingDataMonitoring)
from blocks.extensions.stopping import FinishIfNoImprovementAfter
from blocks.extensions.predicates import OnLogRecord
from blocks.main_loop import MainLoop
from blocks.serialization import load_parameters
import fuel
from fuel.streams import ServerDataStream
from dictlearn.util import rename, masked_root_mean_square, get_free_port
from dictlearn.theano_util import parameter_stats
from dictlearn.data import LanguageModellingData
from dictlearn.extensions import (
DumpTensorflowSummaries, StartFuelServer, LoadNoUnpickling,
RetrievalPrintStats, IntermediateCheckpoint)
from dictlearn.language_model import LanguageModel
from dictlearn.retrieval import Retrieval, Dictionary
from dictlearn.vocab import Vocabulary
from tests.util import temporary_content_path
logger = logging.getLogger()
def initialize_data_and_model(config):
c = config
fuel_path = fuel.config.data_path[0]
vocab_main = None
if c['vocab_path']:
vocab_main = Vocabulary(
os.path.join(fuel.config.data_path[0], c['vocab_path']))
data = LanguageModellingData(c['data_path'], c['layout'], vocab=vocab_main)
vocab_main = data.vocab
retrieval = None
if c['dict_path'] and not c['embedding_path']:
dict_full_path = os.path.join(fuel_path, c['dict_path'])
dict_ = Dictionary(dict_full_path)
logger.debug("Loaded dictionary with {} entries"
.format(dict_.num_entries()))
vocab_def = data.vocab
if c['dict_vocab_path']:
if not c['standalone_def_lookup']:
raise ValueError("Standalone def lookup mandatory with separate vocabs")
vocab_def = Vocabulary(
os.path.join(fuel.config.data_path[0], c['dict_vocab_path']))
retrieval = Retrieval(vocab_main, dict_,
c['max_def_length'], with_too_long_defs='drop',
exclude_top_k = c['exclude_top_k'],
vocab_def = vocab_def,
max_def_per_word=c['max_def_per_word'])
elif c['embedding_path']:
assert(c['dict_path'])
emb_full_path = os.path.join(fuel_path, c['embedding_path'])
embedding_matrix = numpy.load(emb_full_path)
dict_full_path = os.path.join(fuel_path, c['dict_path'])
dict_ = Dictionary(dict_full_path) # should be key=value=word
if not c['standalone_def_lookup']:
raise ValueError("Standalone def lookup mandatory")
vocab_def = data.vocab
if c['dict_vocab_path']:
vocab_def = Vocabulary(
os.path.join(fuel.config.data_path[0], c['dict_vocab_path']))
retrieval = Retrieval(data.vocab, dict_, max_def_length=1,
with_too_long_defs='drop',
exclude_top_k=c['exclude_top_k'],
vocab_def = vocab_def,
max_def_per_word=1, add_bod_eod=False)
lm = LanguageModel(c['emb_dim'], c['emb_def_dim'], c['dim'], c['num_input_words'],
c['def_num_input_words'], c['num_output_words'], data.vocab,
retrieval,
c['def_reader'],
c['standalone_def_lookup'],
c['standalone_def_rnn'],
c['disregard_word_embeddings'],
c['compose_type'],
very_rare_threshold=c['very_rare_threshold'],
cache_size=c['cache_size'],
weights_init=Uniform(width=0.1),
biases_init=Constant(0.))
lm.initialize()
if c['embedding_path']:
lm.set_def_embeddings(embedding_matrix)
logger.debug("Embeddings loaded")
return (data, lm, retrieval)
def train_language_model(new_training_job, config, save_path, params,
fast_start, fuel_server, seed):
c = config
if seed:
fuel.config.default_seed = seed
blocks.config.config.default_seed = seed
data, lm, retrieval = initialize_data_and_model(config)
# full main loop can be saved...
main_loop_path = os.path.join(save_path, 'main_loop.tar')
# or only state (log + params) which can be useful not to pickle embeddings
state_path = os.path.join(save_path, 'training_state.tar')
stream_path = os.path.join(save_path, 'stream.pkl')
best_tar_path = os.path.join(save_path, "best_model.tar")
words = tensor.ltensor3('words')
words_mask = tensor.matrix('words_mask')
if theano.config.compute_test_value != 'off':
test_value_data = next(
data.get_stream('train', batch_size=4, max_length=5)
.get_epoch_iterator())
words.tag.test_value = test_value_data[0]
words_mask.tag.test_value = test_value_data[1]
costs, updates = lm.apply(words, words_mask)
cost = rename(costs.mean(), 'mean_cost')
cg = Model(cost)
if params:
logger.debug("Load parameters from {}".format(params))
with open(params) as src:
cg.set_parameter_values(load_parameters(src))
length = rename(words.shape[1], 'length')
perplexity, = VariableFilter(name='perplexity')(cg)
perplexities = VariableFilter(name_regex='perplexity.*')(cg)
monitored_vars = [length, cost] + perplexities
if c['dict_path']:
num_definitions, = VariableFilter(name='num_definitions')(cg)
monitored_vars.extend([num_definitions])
parameters = cg.get_parameter_dict()
trained_parameters = parameters.values()
saved_parameters = parameters.values()
if c['embedding_path']:
logger.debug("Exclude word embeddings from the trained parameters")
trained_parameters = [p for p in trained_parameters
if not p == lm.get_def_embeddings_params()]
saved_parameters = [p for p in saved_parameters
if not p == lm.get_def_embeddings_params()]
if c['cache_size'] != 0:
logger.debug("Enable fake recursivity for looking up embeddings")
trained_parameters = [p for p in trained_parameters
if not p == lm.get_cache_params()]
logger.info("Cost parameters" + "\n" +
pprint.pformat(
[" ".join((
key, str(parameters[key].get_value().shape),
'trained' if parameters[key] in trained_parameters else 'frozen'))
for key in sorted(parameters.keys())],
width=120))
rules = []
if c['grad_clip_threshold']:
rules.append(StepClipping(c['grad_clip_threshold']))
rules.append(Adam(learning_rate=c['learning_rate'],
beta1=c['momentum']))
algorithm = GradientDescent(
cost=cost,
parameters=trained_parameters,
step_rule=CompositeRule(rules))
if c['cache_size'] != 0:
algorithm.add_updates(updates)
train_monitored_vars = list(monitored_vars)
if c['grad_clip_threshold']:
train_monitored_vars.append(algorithm.total_gradient_norm)
word_emb_RMS, = VariableFilter(name='word_emb_RMS')(cg)
main_rnn_in_RMS, = VariableFilter(name='main_rnn_in_RMS')(cg)
train_monitored_vars.extend([word_emb_RMS, main_rnn_in_RMS])
if c['monitor_parameters']:
train_monitored_vars.extend(parameter_stats(parameters, algorithm))
# We use a completely random seed on purpose. With Fuel server
# it's currently not possible to restore the state of the training
# stream. That's why it's probably better to just have it stateless.
stream_seed = numpy.random.randint(0, 10000000) if fuel_server else None
training_stream = data.get_stream(
'train', batch_size=c['batch_size'], max_length=c['max_length'],
seed=stream_seed)
valid_stream = data.get_stream('valid', batch_size=c['batch_size_valid'],
max_length=c['max_length'], seed=stream_seed)
original_training_stream = training_stream
if fuel_server:
# the port will be configured by the StartFuelServer extension
training_stream = ServerDataStream(
sources=training_stream.sources,
produces_examples=training_stream.produces_examples)
validation = DataStreamMonitoring(
monitored_vars,
valid_stream,
prefix="valid").set_conditions(
before_first_epoch=not fast_start,
on_resumption = True,
every_n_batches=c['mon_freq_valid'])
track_the_best = TrackTheBest(
validation.record_name(perplexity),
choose_best=min).set_conditions(
on_resumption = True,
after_epoch=True,
every_n_batches=c['mon_freq_valid'])
# don't save them the entire main loop to avoid pickling everything
if c['fast_checkpoint']:
load = (LoadNoUnpickling(state_path, load_iteration_state=True, load_log=True)
.set_conditions(before_training=not new_training_job))
cp_args = {
'save_main_loop': False,
'save_separately' : ['log', 'iteration_state'],
'parameters': saved_parameters
}
checkpoint = Checkpoint(state_path,
before_training=not fast_start,
every_n_batches=c['save_freq_batches'],
after_training=not fast_start,
**cp_args)
if c['checkpoint_every_n_batches']:
intermediate_cp = IntermediateCheckpoint(
state_path,
every_n_batches=c['checkpoint_every_n_batches'],
after_training=False,
**cp_args)
else:
load = (Load(main_loop_path, load_iteration_state=True, load_log=True)
.set_conditions(before_training=not new_training_job))
cp_args = {
'save_separately' : ['iteration_state'],
'parameters': saved_parameters
}
checkpoint = Checkpoint(main_loop_path,
before_training=not fast_start,
every_n_batches=c['save_freq_batches'],
after_training=not fast_start,
**cp_args)
if c['checkpoint_every_n_batches']:
intermediate_cp = IntermediateCheckpoint(
main_loop_path,
every_n_batches=c['checkpoint_every_n_batches'],
after_training=False,
**cp_args)
checkpoint = checkpoint.add_condition(
['after_batch', 'after_epoch'],
OnLogRecord(track_the_best.notification_name),
(best_tar_path,))
extensions = [
load,
StartFuelServer(original_training_stream,
stream_path,
before_training=fuel_server),
Timing(every_n_batches=c['mon_freq_train'])
]
if retrieval:
extensions.append(
RetrievalPrintStats(retrieval=retrieval,
every_n_batches=c['mon_freq_train'],
before_training=not fast_start))
extensions.extend([
TrainingDataMonitoring(
train_monitored_vars, prefix="train",
every_n_batches=c['mon_freq_train']),
validation,
track_the_best,
checkpoint])
if c['checkpoint_every_n_batches']:
extensions.append(intermediate_cp)
extensions.extend([
DumpTensorflowSummaries(
save_path,
every_n_batches=c['mon_freq_train'],
after_training=True),
Printing(on_resumption=True,
every_n_batches=c['mon_freq_train']),
FinishIfNoImprovementAfter(
track_the_best.notification_name,
iterations=50 * c['mon_freq_valid'],
every_n_batches=c['mon_freq_valid']),
FinishAfter(after_n_batches=c['n_batches'])
])
logger.info("monitored variables during training:" + "\n" +
pprint.pformat(train_monitored_vars, width=120))
logger.info("monitored variables during valid:" + "\n" +
pprint.pformat(monitored_vars, width=120))
main_loop = MainLoop(
algorithm,
training_stream,
model=Model(cost),
extensions=extensions)
main_loop.run()
|
|
import multiprocessing
import numpy as np
from .model import Model
from ..grid import SphericalPolarGrid
from ..util.interpolate import interp1d_fast
from .analytical_yso_model import AnalyticalYSOModel
def find_last_iteration(file_handle):
max_iteration = 0
for group_name in file_handle:
if "iteration" in group_name:
iteration = int(group_name.split('_')[1])
max_iteration = max(iteration, max_iteration)
return max_iteration
def tau_to_radius(model, tau, wav):
"""
Given a Model instance with a spherical polar coordinate grid, find the
radius from which the optical depth to escape radially is a fixed value.
This only works for spherical polar grids, but works for 1-, 2-, and
3-d grids.
Parameters
----------
model : `~hyperion.model.Model` instance
tau : float
The optical depth for which to find the surface
wav : float
The wavelength at which the optical depth is defined
Returns
-------
r : np.ndarray
The radius or radii at which the optical depth to escape radially
is ``tau`` at ``wav``. This is a scalar, a 1-d, or a 2-d array
depending on the dimensionality of the grid.
"""
if not isinstance(model, Model):
raise TypeError("model should be a Model instance")
if model.grid is None:
raise Exception("Coordinate grid has not been defined")
if not isinstance(model.grid, SphericalPolarGrid):
raise TypeError("This method can only be called for spherical polar grids")
# Initialize cumulative optical depth array
tau_all = np.zeros(model.grid.shape)
# Loop over envelopes and add cumulative column density
for i, item in enumerate(model.grid['density']):
# Find density
rho = item.array
# Find optical depth in all cells in radial direction
dtau = model.grid.widths[0, :, :, :] * rho * model.dust[i].optical_properties.interp_chi_wav(wav)
# Find cumulative sum starting from the ouside
tau_all += np.cumsum(dtau[:, :, ::-1], axis=2)
r = np.zeros(model.grid.shape[:2])
for ip in range(model.grid.shape[0]):
for it in range(model.grid.shape[1]):
tau_col = np.hstack([0., tau_all[ip, it, :]])
if tau < np.max(tau_col):
r[ip, it] = interp1d_fast(tau_col, model.grid.r_wall[::-1], tau)
# print(tau_col, r[ip, it])
else:
r[ip, it] = 0.
return r
def hseq_profile(w, z, temperature, mstar, mu=2.279):
"""
Compute the new (normalized) density profile
corresponding to a given temperature profile
Parameters
----------
w : float
The cylindrical radius at which to compute the profile (in cm)
z : np.ndarray
The z coordinates of the cells at radius w (in cm)
temperature : np.ndarray
The temperatures in the cells (in K)
mstar : float
The mass of the star (in g)
"""
from hyperion.util.constants import G, m_h, k
from ..util.integrate import integrate, integrate_subset
# Compute the integrand
integrand = z / temperature / (w ** 2 + z ** 2) ** 1.5
# Compute the integral for all cells
# TODO - inefficient to compute integral from scratch - optimize
i = np.array([integrate_subset(z, integrand, 0., zmax) for zmax in z])
i[z < 0] = -i[z < 0]
# Compute the factor for the integrand
factor = G * mstar * mu * m_h / k
# Compute the profile
density = np.exp(-i * factor) / temperature
# Normalize the density profile
density = density / integrate(z, density)
return density
# The mean molecular weight of H2 + He is given by:
#
# mu = 4 * (X + 1) / (X + 2)
#
# where X is the mass fraction of Helium to Hydrogren. Assuming
#
# X = 0.32476319350473615
#
# gives:
#
# mu = 2.279
def run_with_vertical_hseq(prefix, model, n_iter=10, mpi=False,
n_processes=multiprocessing.cpu_count(),
overwrite=False):
"""
Run a model with vertical hydrostatic equilibrium.
.. note:: this is an experimental function that is currently in
development. Please use with care!
The hydrostatic equilibrium condition is only applied to the disk
components. The following requirements must be met:
- The model should be an AnalyticalYSOModel
- The model should be defined on a cylindrical polar grid
- The stellar mass should be set
- The model should include at least one disk
The dust properties for the model can be specified as dust or dust+gas
densities as this does not have an impact on this calculation - however,
the hydrostatic equilibrium is computed assuming an H2 + He mix of gas
(i.e. mu=2.279). Note that this calculation also ignores the effects of
self-gravity in the disk, which might be important for more massive disks.
Parameters
----------
prefix : str
The prefix for the output
model : `~hyperion.model.analytical_yso_model.AnalyticalYSOModel`
The model to run
n_iter : int, optional
The number of iterations to run the model for
mpi : bool, optional
Whether to run the model in parallel
n_processes : int, optional
The number of processes to use if ``mpi`` is ``True``
overwrite : bool, optional
Whether to overwrite previous files
"""
from ..grid import CylindricalPolarGrid
from .model_output import ModelOutput
from ..util.integrate import integrate
if not isinstance(model, AnalyticalYSOModel):
raise TypeError("Can only run hydrostatic equilibrium for AnalyticalYSOModel instances")
if not isinstance(model.grid, CylindricalPolarGrid):
raise TypeError("Can only run hydrostatic equilibrium for models with cylindrical polar grids")
if model.star.mass is None:
raise ValueError("Stellar mass needs to be defined for calculation of hydrostatic equilibrium")
if len(model.disks) == 0:
raise ValueError("Can only run hydrostatic equilibrium for models with disks")
else:
n_disks = len(model.disks)
# Write out initial model
model.write(prefix + '_00000.rtin', overwrite=overwrite, merge_if_possible=False)
# Run the initial model
mo = model.run(prefix + '_00000.rtout', overwrite=overwrite,
mpi=mpi, n_processes=n_processes)
previous = prefix + '_00000.rtout'
for iteration in range(1, n_iter + 1):
# Read in output
mo = ModelOutput(previous)
# Extract the quantities
g = mo.get_quantities()
# Get wall positions
rw, zw = g.w_wall, g.z_wall
# Make a 2-d grid of wall positions
R, Z = np.meshgrid(rw, zw)
# Extract density and temperature
density = g['density']
temperature = g['temperature']
# TODO: need to find a better way than just assuming the first n
# density grids are disks
for idisk in range(n_disks):
# Vertically extrapolate temperatures
for i in range(len(g.w)):
for j in range(len(g.p)):
reset = temperature[idisk].array[j, :, i] < 1.
temperature[idisk].array[j, reset, i] = np.max(temperature[idisk].array[j, :, i]) # shouldn't be max, but will do for now
# Compute new density
for i in range(len(g.w)):
for j in range(len(g.p)):
density[idisk].array[j, :, i] = hseq_profile(g.w[i], g.z, temperature[idisk].array[j, :, i], model.star.mass) * integrate(g.z, density[idisk].array[j, :, i])
# Instantiate new model based on previous
m = Model.read(previous)
# Override the density
m.grid['density'] = density
# Write and run
m.write('{0:s}_{1:05d}.rtin'.format(prefix, iteration), overwrite=overwrite)
m.run('{0:s}_{1:05d}.rtout'.format(prefix, iteration),
overwrite=overwrite, mpi=mpi, n_processes=n_processes)
previous = '{0:s}_{1:05d}.rtout'.format(prefix, iteration)
|
|
"""Module implementing StandardRunner."""
from __future__ import print_function
import argparse
import os
import json
import importlib
import tensorflow as tf
from .. import config
from .. import testproblems
from . import runner_utils
class StandardRunner(object):
"""Provides functionality to run optimizers on DeepOBS testproblems including
the logging of important performance metrics.
Args:
optimizer_class: Optimizer class, which should inherit from
tf.train.Optimizer and/or obey the same interface for ``.minimize()``.
hyperparams: A list describing the optimizer's hyperparameters other
than learning rate. Each entry of the list is a dictionary describing
one of the hyperparameters. This dictionary is expected to have the
following two fields:
- hyperparams["name"] must contain the name of the parameter (i.e.,
the exact name of the corresponding keyword argument to the
optimizer class' init function.
- hyperparams["type"] specifies the type of the parameter (e.g.,
``int``, ``float``, ``bool``).
Optionally, the dictionary can have a third field indexed by the key
"default", which specifies a default value for the hyperparameter.
Example
--------
>>> optimizer_class = tf.train.MomentumOptimizer
>>> hyperparams = [
{"name": "momentum", "type": float},
{"name": "use_nesterov", "type": bool, "default": False}]
>>> runner = StandardRunner(optimizer_class, hyperparms)
"""
def __init__(self, optimizer_class, hyperparams):
"""Creates a new StandardRunner.
Args:
optimizer_class: Optimizer class, which should inherit from
tf.train.Optimizer and/or obey the same interface for ``.minimize()``.
hyperparams: A list describing the optimizer's hyperparameters other
than learning rate. Each entry of the list is a dictionary describing
one of the hyperparameters. This dictionary is expected to have the
following two fields:
- hyperparams["name"] must contain the name of the parameter (i.e.,
the exact name of the corresponding keyword argument to the
optimizer class' init function.
- hyperparams["type"] specifies the type of the parameter (e.g.,
``int``, ``float``, ``bool``).
Optionally, the dictionary can have a third field indexed by the key
"default", which specifies a default value for the hyperparameter.
Example:
optimizer_class = tf.train.MomentumOptimizer
hyperparams = [
{"name": "momentum", "type": float},
{"name": "use_nesterov", "type": bool, "default": False}]
runner = StandardRunner(optimizer_class, hyperparms)
"""
self._optimizer_class = optimizer_class
self._optimizer_name = optimizer_class.__name__
self._hyperparams = hyperparams
# This function is a wrapper around _run() which grabs all non-specified
# arguments from the command line.
def run(self,
testproblem=None,
weight_decay=None,
batch_size=None,
num_epochs=None,
learning_rate=None,
lr_sched_epochs=None,
lr_sched_factors=None,
random_seed=None,
data_dir=None,
output_dir=None,
train_log_interval=None,
print_train_iter=None,
tf_logging=None,
no_logs=None,
**optimizer_hyperparams):
"""Runs a given optimizer on a DeepOBS testproblem.
This method receives all relevant options to run the optimizer on a DeepOBS
testproblem, including the hyperparameters of the optimizers, which can be
passed as keyword arguments (based on the names provided via ``hyperparams``
in the init function).
Options which are *not* passed here will
automatically be added as command line arguments. (Some of those will be
required, others will have defaults; run the script with the ``--help`` flag
to see a description of the command line interface.)
Training statistics (train/test loss/accuracy) are collected and will be
saved to a ``JSON`` output file, together with metadata. The training
statistics can optionally also be saved in TensorFlow output files and read
during training using `Tensorboard`.
Args:
testproblem (str): Name of a DeepOBS test problem.
weight_decay (float): The weight decay factor to use.
batch_size (int): The mini-batch size to use.
num_epochs (int): The number of epochs to train.
learning_rate (float): The learning rate to use. This will function as the
base learning rate when implementing a schedule using
``lr_sched_epochs`` and ``lr_sched_factors`` (see below).
lr_sched_epochs (list): A list of epoch numbers (positive integers) that
mark learning rate changes. The base learning rate is passed via
``learning_rate`` and the factors by which to change are passed via
``lr_sched_factors``.
Example: ``learning_rate=0.3``, ``lr_sched_epochs=[50, 100]``,
``lr_sched_factors=[0.1 0.01]`` will start with a learning rate of
``0.3``, then decrease to ``0.1*0.3=0.03`` after training for ``50``
epochs, and decrease to ``0.01*0.3=0.003`` after training for ``100``
epochs.
lr_sched_factors (list): A list of factors (floats) by which to change the
learning rate. The base learning rate has to be passed via
``learing_rate`` and the epochs at which to change the learning rate
have to be passed via ``lr_sched_factors``.
Example: ``learning_rate=0.3``, ``lr_sched_epochs=[50, 100]``,
``lr_sched_factors=[0.1 0.01]`` will start with a learning rate of
``0.3``, then decrease to ``0.1*0.3=0.03`` after training for ``50``
epochs, and decrease to ``0.01*0.3=0.003`` after training for ``100``
epochs.
random_seed (int): Random seed to use. If unspecified, it defaults to
``42``.
data_dir (str): Path to the DeepOBS data directory. If unspecified,
DeepOBS uses its default `/data_deepobs`.
output_dir (str): Path to the output directory. Within this directory,
subfolders for the testproblem and the optimizer are automatically
created. If unspecified, defaults to '/results'.
train_log_interval (int): Interval of steps at which to log training loss.
If unspecified it defaults to ``10``.
print_train_iter (bool): If ``True``, training loss is printed to screen.
If unspecified it defaults to ``False``.
tf_logging (bool): If ``True`` log all statistics with tensorflow summaries,
which can be viewed in real time with tensorboard. If unspecified it
defaults to ``False``.
no_logs (bool): If ``True`` no ``JSON`` files are created. If unspecified
it defaults to ``False``.
optimizer_hyperparams (dict): Keyword arguments for the hyperparameters of
the optimizer. These are the ones specified in the ``hyperparams``
dictionary passed to the ``__init__``.
"""
# We will go through all the arguments, check whether they have been passed
# to this function. If yes, we collect the (name, value) pairs in ``args``.
# If not, we add corresponding command line arguments.
args = {}
parser = argparse.ArgumentParser(
description="Run {0:s} on a DeepOBS test problem.".format(
self._optimizer_name))
if testproblem is None:
parser.add_argument(
"testproblem",
help="""Name of the DeepOBS testproblem
(e.g. 'cifar10_3c3d'""")
else:
args["testproblem"] = testproblem
if weight_decay is None:
parser.add_argument(
"--weight_decay",
"--wd",
type=float,
help="""Factor
used for the weight_deacy. If not given, the default weight decay for
this model is used. Note that not all models use weight decay and this
value will be ignored in such a case.""")
else:
args["weight_decay"] = weight_decay
if batch_size is None:
parser.add_argument(
"--batch_size",
"--bs",
required=True,
type=int,
help="The batch size (positive integer).")
else:
args["batch_size"] = batch_size
if num_epochs is None:
parser.add_argument(
"-N",
"--num_epochs",
required=True,
type=int,
help="Total number of training epochs.")
else:
args["num_epochs"] = num_epochs
if learning_rate is None:
parser.add_argument(
"--learning_rate",
"--lr",
required=True,
type=float,
help=
"""Learning rate (positive float) to use. Can be used as the base
of a learning rate schedule when used in conjunction with
--lr_sched_epochs and --lr_sched_factors.""")
else:
args["learning_rate"] = learning_rate
if lr_sched_epochs is None:
parser.add_argument(
"--lr_sched_epochs",
nargs="+",
type=int,
help="""One or more epoch numbers (positive integers) that mark
learning rate changes. The base learning rate has to be passed via
'--learing_rate' and the factors by which to change have to be passed
via '--lr_sched_factors'. Example: '--lr 0.3 --lr_sched_epochs 50 100
--lr_sched_factors 0.1 0.01' will start with a learning rate of 0.3,
then decrease to 0.1*0.3=0.03 after training for 50 epochs, and
decrease to 0.01*0.3=0.003' after training for 100 epochs.""")
else:
args["lr_sched_epochs"] = lr_sched_epochs
if lr_sched_factors is None:
parser.add_argument(
"--lr_sched_factors",
nargs="+",
type=float,
help=
"""One or more factors (floats) by which to change the learning
rate. The base learning rate has to be passed via '--learing_rate' and
the epochs at which to change the learning rate have to be passed via
'--lr_sched_factors'. Example: '--lr 0.3 --lr_sched_epochs 50 100
--lr_sched_factors 0.1 0.01' will start with a learning rate of 0.3,
then decrease to 0.1*0.3=0.03 after training for 50 epochs, and
decrease to 0.01*0.3=0.003' after training for 100 epochs.""")
else:
args["lr_sched_factors"] = lr_sched_factors
if random_seed is None:
parser.add_argument(
"-r",
"--random_seed",
type=int,
default=42,
help="An integer to set as tensorflow's random seed.")
else:
args["random_seed"] = random_seed
if data_dir is None:
parser.add_argument(
"--data_dir",
help="""Path to the base data dir. If
not specified, DeepOBS uses its default.""")
else:
args["data_dir"] = data_dir
if output_dir is None:
parser.add_argument(
"--output_dir",
type=str,
default="results",
help="""Path to the base directory in which output files will be
stored. Results will automatically be sorted into subdirectories of
the form 'testproblem/optimizer'.""")
else:
args["output_dir"] = output_dir
if train_log_interval is None:
parser.add_argument(
"--train_log_interval",
type=int,
default=10,
help="Interval of steps at which training loss is logged.")
else:
args["train_log_interval"] = train_log_interval
if print_train_iter is None:
parser.add_argument(
"--print_train_iter",
action="store_const",
const=True,
default=False,
help="""Add this flag to print mini-batch training loss to
stdout on each (logged) interation.""")
else:
args["print_train_iter"] = print_train_iter
if tf_logging is None:
parser.add_argument(
"--tf_logging",
action="store_const",
const=True,
default=False,
help="""Add this flag to log statistics using tensorflow
(to view in tensorboard).""")
else:
args["tf_logging"] = tf_logging
if no_logs is None:
parser.add_argument(
"--no_logs",
action="store_const",
const=True,
default=False,
help="""Add this flag to not save any json logging files.""")
else:
args["no_logs"] = no_logs
# Optimizer hyperparams
for hp in self._hyperparams:
hp_name = hp["name"]
if hp_name in optimizer_hyperparams:
args[hp_name] = optimizer_hyperparams[hp_name]
else: # hp_name not in optimizer_hyperparams
hp_type = hp["type"]
if "default" in hp:
hp_default = hp["default"]
parser.add_argument(
"--{0:s}".format(hp_name),
type=hp_type,
default=hp_default,
help="""Hyperparameter {0:s} of {1:s} ({2:s};
defaults to {3:s}).""".format(hp_name, self._optimizer_name,
str(hp_type), str(hp_default)))
else:
parser.add_argument(
"--{0:s}".format(hp_name),
type=hp_type,
required=True,
help="Hyperparameter {0:s} of {1:s} ({2:s}).".format(
hp_name, self._optimizer_name, str(hp_type)))
# Get the command line arguments and add them to the ``args`` dict. Then
# call the _run function with those arguments.
cmdline_args = vars(parser.parse_args())
args.update(cmdline_args)
self._run(**args)
def _run(self, testproblem, weight_decay, batch_size, num_epochs,
learning_rate, lr_sched_epochs, lr_sched_factors, random_seed,
data_dir, output_dir, train_log_interval, print_train_iter,
tf_logging, no_logs, **optimizer_hyperparams):
"""Performs the actual run, given all the arguments."""
# Set data directory of DeepOBS.
if data_dir is not None:
config.set_data_dir(data_dir)
# Find testproblem by name and instantiate with batch size and weight decay.
try:
testproblem_mod = importlib.import_module(testproblem)
testproblem_cls = getattr(testproblem_mod, testproblem)
print("Loading local testproblem.")
except:
testproblem_cls = getattr(testproblems, testproblem)
if weight_decay is not None:
tproblem = testproblem_cls(batch_size, weight_decay)
else:
tproblem = testproblem_cls(batch_size)
# Set up the testproblem.
tf.reset_default_graph()
tf.set_random_seed(random_seed)
tproblem.set_up()
loss = tf.reduce_mean(tproblem.losses) + tproblem.regularizer
# Set up the optimizer and create learning rate schedule.
global_step = tf.Variable(0, trainable=False)
learning_rate_var = tf.Variable(learning_rate, trainable=False)
opt = self._optimizer_class(learning_rate_var, **optimizer_hyperparams)
lr_schedule = runner_utils.make_lr_schedule(
learning_rate, lr_sched_epochs, lr_sched_factors)
# Call optimizer's minimize on loss to update all variables in the
# TRAINABLE_VARIABLES collection (with a dependency on performing all ops
# in the collection UPDATE_OPS collection for batch norm, etc).
with tf.control_dependencies(
tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
step = opt.minimize(loss, global_step=global_step)
# Create output folder
if not no_logs:
run_folder_name, file_name = runner_utils.make_run_name(
weight_decay, batch_size, num_epochs, learning_rate,
lr_sched_epochs, lr_sched_factors, random_seed,
**optimizer_hyperparams)
directory = os.path.join(output_dir, testproblem, self._optimizer_name,
run_folder_name)
if not os.path.exists(directory):
os.makedirs(directory)
# Lists to track train/test loss and accuracy.
train_losses = []
test_losses = []
minibatch_train_losses = []
train_accuracies = []
test_accuracies = []
# Tensorboard summaries
if tf_logging:
# per iteration
mb_train_loss_summary = tf.summary.scalar(
"training/minibatch_train_losses",
loss,
collections=[tf.GraphKeys.SUMMARIES, "per_iteration"])
# per epoch
lr_summary = tf.summary.scalar(
"hyperparams/learning_rate",
learning_rate_var,
collections=[tf.GraphKeys.SUMMARIES, "per_epoch"])
batch_summary = tf.summary.scalar(
"hyperparams/batch_size",
batch_size,
collections=[tf.GraphKeys.SUMMARIES, "per_epoch"])
per_iter_summaries = tf.summary.merge_all(key="per_iteration")
per_epoch_summaries = tf.summary.merge_all(key="per_epoch")
summary_writer = tf.summary.FileWriter(directory)
# Start tensorflow session and initialize variables.
sess = tf.Session()
sess.run(tf.global_variables_initializer())
# Wrapper functions for the evaluation phase.
def evaluate(test=True):
"""Computes average loss and accuracy in the evaluation phase."""
if test:
sess.run(tproblem.test_init_op)
msg = "TEST:"
loss_list = test_losses
acc_list = test_accuracies
else:
sess.run(tproblem.train_eval_init_op)
msg = "TRAIN:"
loss_list = train_losses
acc_list = train_accuracies
# Compute average loss and (if applicable) accuracy.
loss_ = 0.0
num_iters = 0.0
acc_ = 0.0
if tproblem.accuracy is not None:
while True:
try:
l_, a_ = sess.run([loss, tproblem.accuracy])
loss_ += l_
acc_ += a_
num_iters += 1.0
except tf.errors.OutOfRangeError:
break
else: # accuracy is None
acc_ = 0.0
while True:
try:
l_ = sess.run(loss)
loss_ += l_
num_iters += 1.0
except tf.errors.OutOfRangeError:
break
loss_ /= num_iters
acc_ /= num_iters
# Print and log the results.
loss_list.append(loss_)
acc_list.append(acc_)
# Log results to tensorflow summaries
if tf_logging:
if test:
tag = "epoch/test_"
else:
tag = "epoch/train_"
summary = tf.Summary()
summary.value.add(tag=tag + "loss_", simple_value=loss_)
summary.value.add(tag=tag + "acc_", simple_value=acc_)
per_epoch_summary_ = sess.run(per_epoch_summaries)
summary_writer.add_summary(per_epoch_summary_,
len(loss_list) - 1)
summary_writer.add_summary(summary, len(loss_list) - 1)
summary_writer.flush()
print("{0:s} loss {1:g}, acc {2:f}".format(msg, loss_, acc_))
# Start of training loop.
for n in range(num_epochs + 1):
# Evaluate at beginning of epoch.
print("********************************")
print("Evaluating after {0:d} of {1:d} epochs...".format(
n, num_epochs))
evaluate(test=False)
evaluate(test=True)
print("********************************")
# Break from train loop after the last round of evaluation
if n == num_epochs:
break
# Training
if n in lr_schedule:
sess.run(learning_rate_var.assign(lr_schedule[n]))
print("Setting learning rate to {0:f}".format(lr_schedule[n]))
sess.run(tproblem.train_init_op)
s = 0
while True:
try:
# Training step, with logging if we hit the train_log_interval
if s % train_log_interval == 0:
if tf_logging:
_, loss_, per_iter_summary_ = sess.run(
[step, loss, per_iter_summaries])
summary_writer.add_summary(per_iter_summary_,
sess.run(global_step))
else:
_, loss_ = sess.run([step, loss])
minibatch_train_losses.append(loss_.astype(float))
if print_train_iter:
print("Epoch {0:d}, step {1:d}: loss {2:g}".format(
n, s, loss_))
else:
sess.run(step)
s += 1
except tf.errors.OutOfRangeError:
break
sess.close()
# --- End of training loop.
# Put results into output dictionary.
output = {
"train_losses": train_losses,
"test_losses": test_losses,
"minibatch_train_losses": minibatch_train_losses
}
if tproblem.accuracy is not None:
output["train_accuracies"] = train_accuracies
output["test_accuracies"] = test_accuracies
# Put all run parameters into output dictionary.
output["optimizer"] = self._optimizer_name
output["testproblem"] = testproblem
output["weight_decay"] = weight_decay
output["batch_size"] = batch_size
output["num_epochs"] = num_epochs
output["learning_rate"] = learning_rate
output["lr_sched_epochs"] = lr_sched_epochs
output["lr_sched_factors"] = lr_sched_factors
output["random_seed"] = random_seed
output["train_log_interval"] = train_log_interval
# Add optimizer hyperparameters as a sub-dictionary.
output["hyperparams"] = optimizer_hyperparams
# Dump output into json file.
if not no_logs:
with open(os.path.join(directory, file_name + ".json"), "w") as f:
json.dump(output, f)
|
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import importlib
import os
import pytest
from unittest import mock
from unittest.mock import patch
import google.auth
from google.auth import credentials
from google.cloud.aiplatform import initializer
from google.cloud.aiplatform.metadata.metadata import metadata_service
from google.cloud.aiplatform.constants import base as constants
from google.cloud.aiplatform import utils
from google.cloud.aiplatform_v1.services.model_service import (
client as model_service_client,
)
_TEST_PROJECT = "test-project"
_TEST_PROJECT_2 = "test-project-2"
_TEST_LOCATION = "us-central1"
_TEST_LOCATION_2 = "europe-west4"
_TEST_INVALID_LOCATION = "test-invalid-location"
_TEST_EXPERIMENT = "test-experiment"
_TEST_DESCRIPTION = "test-description"
_TEST_STAGING_BUCKET = "test-bucket"
class TestInit:
def setup_method(self):
importlib.reload(initializer)
def teardown_method(self):
initializer.global_pool.shutdown(wait=True)
def test_init_project_sets_project(self):
initializer.global_config.init(project=_TEST_PROJECT)
assert initializer.global_config.project == _TEST_PROJECT
def test_not_init_project_gets_default_project(self, monkeypatch):
def mock_auth_default():
return None, _TEST_PROJECT
monkeypatch.setattr(google.auth, "default", mock_auth_default)
assert initializer.global_config.project == _TEST_PROJECT
def test_init_location_sets_location(self):
initializer.global_config.init(location=_TEST_LOCATION)
assert initializer.global_config.location == _TEST_LOCATION
def test_not_init_location_gets_default_location(self):
assert initializer.global_config.location == constants.DEFAULT_REGION
def test_init_location_with_invalid_location_raises(self):
with pytest.raises(ValueError):
initializer.global_config.init(location=_TEST_INVALID_LOCATION)
@patch.object(metadata_service, "set_experiment")
def test_init_experiment_sets_experiment(self, set_experiment_mock):
initializer.global_config.init(experiment=_TEST_EXPERIMENT)
set_experiment_mock.assert_called_once_with(
experiment=_TEST_EXPERIMENT, description=None
)
@patch.object(metadata_service, "set_experiment")
def test_init_experiment_sets_experiment_with_description(
self, set_experiment_mock
):
initializer.global_config.init(
experiment=_TEST_EXPERIMENT, experiment_description=_TEST_DESCRIPTION
)
set_experiment_mock.assert_called_once_with(
experiment=_TEST_EXPERIMENT, description=_TEST_DESCRIPTION
)
def test_init_experiment_description_fail_without_experiment(self):
with pytest.raises(ValueError):
initializer.global_config.init(experiment_description=_TEST_DESCRIPTION)
def test_init_staging_bucket_sets_staging_bucket(self):
initializer.global_config.init(staging_bucket=_TEST_STAGING_BUCKET)
assert initializer.global_config.staging_bucket == _TEST_STAGING_BUCKET
def test_init_credentials_sets_credentials(self):
creds = credentials.AnonymousCredentials()
initializer.global_config.init(credentials=creds)
assert initializer.global_config.credentials is creds
def test_common_location_path_returns_parent(self):
initializer.global_config.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
true_resource_parent = f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}"
assert true_resource_parent == initializer.global_config.common_location_path()
def test_common_location_path_overrides(self):
initializer.global_config.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
true_resource_parent = (
f"projects/{_TEST_PROJECT_2}/locations/{_TEST_LOCATION_2}"
)
assert true_resource_parent == initializer.global_config.common_location_path(
project=_TEST_PROJECT_2, location=_TEST_LOCATION_2
)
def test_create_client_returns_client(self):
initializer.global_config.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
client = initializer.global_config.create_client(
client_class=utils.ModelClientWithOverride
)
assert client._client_class is model_service_client.ModelServiceClient
assert isinstance(client, utils.ModelClientWithOverride)
assert (
client._transport._host == f"{_TEST_LOCATION}-{constants.API_BASE_PATH}:443"
)
def test_create_client_overrides(self):
initializer.global_config.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
creds = credentials.AnonymousCredentials()
client = initializer.global_config.create_client(
client_class=utils.ModelClientWithOverride,
credentials=creds,
location_override=_TEST_LOCATION_2,
prediction_client=True,
)
assert isinstance(client, utils.ModelClientWithOverride)
assert (
client._transport._host
== f"{_TEST_LOCATION_2}-{constants.API_BASE_PATH}:443"
)
assert client._transport._credentials == creds
def test_create_client_user_agent(self):
initializer.global_config.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
client = initializer.global_config.create_client(
client_class=utils.ModelClientWithOverride
)
for wrapped_method in client._transport._wrapped_methods.values():
# wrapped_method._metadata looks like:
# [('x-goog-api-client', 'model-builder/0.3.1 gl-python/3.7.6 grpc/1.30.0 gax/1.22.2 gapic/0.3.1')]
user_agent = wrapped_method._metadata[0][1]
assert user_agent.startswith("model-builder/")
@pytest.mark.parametrize(
"init_location, location_override, expected_endpoint",
[
("us-central1", None, "us-central1-aiplatform.googleapis.com"),
("us-central1", "europe-west4", "europe-west4-aiplatform.googleapis.com",),
("asia-east1", None, "asia-east1-aiplatform.googleapis.com"),
(
"asia-southeast1",
"australia-southeast1",
"australia-southeast1-aiplatform.googleapis.com",
),
],
)
def test_get_client_options(
self, init_location: str, location_override: str, expected_endpoint: str,
):
initializer.global_config.init(location=init_location)
assert (
initializer.global_config.get_client_options(
location_override=location_override
).api_endpoint
== expected_endpoint
)
def test_get_client_options_with_api_override(self):
initializer.global_config.init(location="asia-east1")
client_options = initializer.global_config.get_client_options(
api_base_path_override="override.googleapis.com"
)
assert client_options.api_endpoint == "asia-east1-override.googleapis.com"
class TestThreadPool:
def teardown_method(self):
initializer.global_pool.shutdown(wait=True)
@pytest.mark.parametrize(
"cpu_count, expected", [(4, 20), (32, 32), (None, 4), (2, 10)]
)
def test_max_workers(self, cpu_count, expected):
with mock.patch.object(os, "cpu_count") as cpu_count_mock:
cpu_count_mock.return_value = cpu_count
importlib.reload(initializer)
assert initializer.global_pool._max_workers == expected
|
|
"""
Internal module for the plugin system,
the API is exposed via __init__.py
"""
import django.contrib.auth.context_processors
import django.contrib.messages.context_processors
from django.conf import settings
from django.contrib import admin
from django.contrib.contenttypes.models import ContentType
from django.contrib.sites.models import Site
from django.core.cache import cache
from django.db import DatabaseError
from django.forms import Media, MediaDefiningClass
from django.http import HttpResponsePermanentRedirect, HttpResponseRedirect
from django.template import context_processors
from django.template.context import Context
from django.template.loader import render_to_string
from django.utils.functional import cached_property
from django.utils.html import escape, linebreaks
from django.utils.translation import get_language
from django.utils.translation import ugettext_lazy as _
from future.builtins import str
from future.utils import with_metaclass
from fluent_contents.cache import get_placeholder_cache_key, get_rendering_cache_key
from fluent_contents.forms import ContentItemForm
from fluent_contents.models import DEFAULT_TIMEOUT, ContentItemOutput, ImmutableMedia
from fluent_contents.utils.search import clean_join, get_search_field_values
# Some standard request processors to use in the plugins,
# Naturally, you want STATIC_URL to be available in plugins.
def _add_debug(request):
return {"debug": settings.DEBUG}
_STANDARD_REQUEST_CONTEXT_PROCESSORS = (
context_processors.csrf,
context_processors.debug,
context_processors.i18n,
context_processors.media,
context_processors.request,
context_processors.static,
django.contrib.auth.context_processors.auth,
django.contrib.messages.context_processors.messages,
_add_debug,
)
class PluginContext(Context):
"""
A template Context class similar to :class:`~django.template.context.RequestContext`, that enters some pre-filled data.
This ensures that variables such as ``STATIC_URL`` and ``request`` are available in the plugin templates.
"""
def __init__(self, request, dict=None, current_app=None):
# If there is any reason to site-global context processors for plugins,
# I'd like to know the usecase, and it could be implemented here.
if current_app is None:
# Avoid RemovedInDjango110Warning
Context.__init__(self, dict)
else:
Context.__init__(self, dict, current_app=current_app)
for processor in _STANDARD_REQUEST_CONTEXT_PROCESSORS:
self.update(processor(request))
def frontend_media_property(cls):
# Identical to the media_property, adapted to read the "FrontendMedia" class
# and optimized to avoid useless object creation.
def _media(self):
# Get the media property of the superclass, if it exists
sup_cls = super(cls, self)
try:
base = sup_cls.frontend_media
except AttributeError:
base = ImmutableMedia.empty_instance
# Get the media definition for this class
definition = getattr(cls, "FrontendMedia", None)
if definition:
media = Media(definition)
# Not supporting extend=('js',) here, not documented in Django either.
if (
getattr(definition, "extend", True)
and base is not ImmutableMedia.empty_instance
):
return base + media
return media
else:
return base
return property(_media)
class PluginMediaDefiningClass(MediaDefiningClass):
"Metaclass for classes that can have media definitions"
def __new__(cls, name, bases, attrs):
new_class = super(PluginMediaDefiningClass, cls).__new__(
cls, name, bases, attrs
)
if "frontend_media" not in attrs and "FrontendMedia" in attrs:
new_class.frontend_media = frontend_media_property(new_class)
return new_class
class ContentPlugin(with_metaclass(PluginMediaDefiningClass, object)):
"""
The base class for all content plugins.
A plugin defines the rendering for a :class:`~fluent_contents.models.ContentItem`, settings and presentation in the admin interface.
To create a new plugin, derive from this class and call :func:`plugin_pool.register <PluginPool.register>` to enable it.
For example:
.. code-block:: python
from fluent_contents.extensions import plugin_pool, ContentPlugin
@plugin_pool.register
class AnnouncementBlockPlugin(ContentPlugin):
model = AnnouncementBlockItem
render_template = "plugins/announcementblock.html"
category = _("Simple blocks")
As minimal configuration, specify the :attr:`model` and :attr:`render_template` fields.
The :attr:`model` should be a subclass of the :class:`~fluent_contents.models.ContentItem` model class.
.. note::
When the plugin is registered in the :attr:`plugin_pool`, it will be instantiated only once.
It is therefore not possible to store per-request state at the plugin object.
This is similar to the behavior of the :class:`~django.contrib.admin.ModelAdmin` classes in Django.
To customize the admin, the :attr:`admin_form_template` and :attr:`form` can be defined.
Some well known properties of the :class:`~django.contrib.admin.ModelAdmin` class can also be specified on plugins;
such as:
* :attr:`~django.contrib.admin.ModelAdmin.fieldsets`
* :attr:`~django.contrib.admin.ModelAdmin.filter_horizontal`
* :attr:`~django.contrib.admin.ModelAdmin.filter_vertical`
* :attr:`~django.contrib.admin.ModelAdmin.prepopulated_fields`
* :attr:`~django.contrib.admin.ModelAdmin.radio_fields`
* :attr:`~django.contrib.admin.ModelAdmin.raw_id_fields`
* :attr:`~django.contrib.admin.ModelAdmin.readonly_fields`
* A ``class Media`` to provide extra CSS and JavaScript files for the admin interface.
The rendered output of a plugin is cached by default, assuming that most content is static.
This also avoids extra database queries to retrieve the model objects.
In case the plugin needs to output content dynamically, include ``cache_output = False`` in the plugin definition.
"""
#: .. versionadded:: 1.1
#: Category for media
MEDIA = _("Media")
#: .. versionadded:: 1.1
#: Category for programming plugins
PROGRAMMING = _("Programming")
#: .. versionadded:: 1.1
#: Category for interactive plugins (e.g. forms, comments)
INTERACTIVITY = _("Interactivity")
#: .. versionadded:: 1.1
#: Category for advanced plugins (e.g. raw HTML, iframes)
ADVANCED = _("Advanced")
# -- Settings to override:
#: The model to use, must derive from :class:`fluent_contents.models.ContentItem`.
model = None
#: The form to use in the admin interface. By default it uses a :class:`fluent_contents.models.ContentItemForm`.
form = ContentItemForm
#: The template to render the admin interface with
admin_form_template = "admin/fluent_contents/contentitem/admin_form.html"
#: An optional template which is included in the admin interface, to initialize components (e.g. JavaScript)
admin_init_template = None
#: The fieldsets for the admin view.
fieldsets = None
#: The template to render the frontend HTML output.
render_template = None
#: By default, rendered output is cached, and updated on admin changes.
cache_output = True
#: .. versionadded:: 0.9
#: Cache the plugin output per :django:setting:`SITE_ID`.
cache_output_per_site = False
#: .. versionadded:: 1.0
#: Cache the plugin output per language.
#: This can be useful for sites which either:
#:
#: * Display fallback content on pages, but still use ``{% trans %}`` inside templates.
#: * Dynamically switch the language per request, and *share* content between multiple languages.
#:
#: This option does not have to be used for translated CMS pages,
#: as each page can have it's own set of :class:`~fluent_contents.models.ContentItem` objects.
#: It's only needed for rendering the *same* item in different languages.
cache_output_per_language = False
#: .. versionadded: 1.0
#: Set a custom cache timeout value
cache_timeout = DEFAULT_TIMEOUT
#: .. versionadded:: 1.0
#: Tell which languages the plugin will cache.
#: It defaults to the language codes from the :django:setting:`LANGUAGES` setting.
cache_supported_language_codes = [code for code, _ in settings.LANGUAGES]
#: The category title to place the plugin into.
#: This is only used for the "Add Plugin" menu.
#: You can provide a string here, :func:`~django.utils.translation.ugettext_lazy`
#: or one of the predefined constants (:attr:`MEDIA`, :attr:`INTERACTIVITY:`, :attr:`PROGRAMMING` and :attr:`ADVANCED`).
category = None
#: .. versionadded:: 1.0
#: By default, the plugin is rendered in the :attr:`language_code` it's written in.
#: It can be disabled explicitly in case the content should be rendered language agnostic.
#: For plugins that cache output per language, this will be done already.
#:
#: See also: :attr:`cache_output_per_language`
render_ignore_item_language = False
#: Alternative template for the view.
ADMIN_TEMPLATE_WITHOUT_LABELS = (
"admin/fluent_contents/contentitem/admin_form_without_labels.html"
)
#: .. versionadded:: 0.8.5
#: The ``HORIZONTAL`` constant for the :attr:`radio_fields`.
HORIZONTAL = admin.HORIZONTAL
#: .. versionadded:: 0.8.5
#: The ``VERTICAL`` constant for the :attr:`radio_fields`.
VERTICAL = admin.VERTICAL
#: The fields to display as raw ID
raw_id_fields = ()
#: The fields to display in a vertical filter
filter_vertical = ()
#: The fields to display in a horizontal filter
filter_horizontal = ()
#: The fields to display as radio choice. For example::
#:
#: radio_fields = {
#: 'align': ContentPlugin.VERTICAL,
#: }
#:
#: The value can be :attr:`ContentPlugin.HORIZONTAL` or :attr:`ContentPlugin.VERTICAL`.
radio_fields = {}
#: Fields to automatically populate with values
prepopulated_fields = {}
#: Overwritten formfield attributes, e.g. the 'widget'. Allows both the class and fieldname as key.
formfield_overrides = {}
#: The fields to display as readonly.
readonly_fields = ()
#: Define which fields could be used for indexing the plugin in a site (e.g. haystack)
search_fields = []
#: Define whether the full output should be used for indexing.
search_output = None
def __repr__(self):
return "<{0} for {1} model>".format(
self.__class__.__name__, self.model.__name__
)
@property
def verbose_name(self):
"""
The title for the plugin, by default it reads the ``verbose_name`` of the model.
"""
return self.model._meta.verbose_name
@property
def name(self):
"""
Return the classname of the plugin, this is mainly provided for templates.
This value can also be used in :func:`PluginPool`.
"""
return self.__class__.__name__
@property
def type_name(self):
"""
Return the classname of the model, this is mainly provided for templates.
"""
return self.model.__name__
@cached_property
def type_id(self):
"""
Shortcut to retrieving the ContentType id of the model.
"""
try:
return ContentType.objects.get_for_model(
self.model, for_concrete_model=False
).id
except DatabaseError as e:
raise DatabaseError(
"Unable to fetch ContentType object, is a plugin being registered before the initial syncdb? (original error: {0})".format(
str(e)
)
)
def get_model_instances(self):
"""
Return the model instances the plugin has created.
"""
return self.model.objects.all()
def _render_contentitem(self, request, instance):
# Internal wrapper for render(), to allow updating the method signature easily.
# It also happens to really simplify code navigation.
result = self.render(request=request, instance=instance)
if isinstance(result, ContentItemOutput):
# Return in new 1.0 format
# Also include the statically declared FrontendMedia, inserted before any extra added files.
# These could be included already in the ContentItemOutput object, but duplicates are removed.
media = self.get_frontend_media(instance)
if media is not ImmutableMedia.empty_instance:
result._insert_media(media)
return result
elif isinstance(result, (HttpResponseRedirect, HttpResponsePermanentRedirect)):
# Can't return a HTTP response from a plugin that is rendered as a string in a template.
# However, this response can be translated into our custom exception-based redirect mechanism.
return self.redirect(result["Location"], result.status_code)
else:
# Old 0.9 syntax, wrap it.
# The 'cacheable' is implied in the rendering already, but this is just for completeness.
media = self.get_frontend_media(instance)
return ContentItemOutput(
result,
media,
cacheable=self.cache_output,
cache_timeout=self.cache_timeout,
)
def get_output_cache_base_key(self, placeholder_name, instance):
"""
.. versionadded:: 1.0
Return the default cache key, both :func:`get_output_cache_key` and :func:`get_output_cache_keys` rely on this.
By default, this function generates the cache key using :func:`~fluent_contents.cache.get_rendering_cache_key`.
"""
return get_rendering_cache_key(placeholder_name, instance)
def get_output_cache_key(self, placeholder_name, instance):
"""
.. versionadded:: 0.9
Return the default cache key which is used to store a rendered item.
By default, this function generates the cache key using :func:`get_output_cache_base_key`.
"""
cachekey = self.get_output_cache_base_key(placeholder_name, instance)
if self.cache_output_per_site:
cachekey = "{0}-s{1}".format(cachekey, settings.SITE_ID)
# Append language code
if self.cache_output_per_language:
# NOTE: Not using self.language_code, but using the current language instead.
# That is what the {% trans %} tags are rendered as after all.
# The render_placeholder() code can switch the language if needed.
user_language = get_language()
if user_language not in self.cache_supported_language_codes:
user_language = "unsupported"
cachekey = "{0}.{1}".format(cachekey, user_language)
return cachekey
def get_output_cache_keys(self, placeholder_name, instance):
"""
.. versionadded:: 0.9
Return the possible cache keys for a rendered item.
This method should be overwritten when implementing a function :func:`set_cached_output` method
or when implementing a :func:`get_output_cache_key` function.
By default, this function generates the cache key using :func:`get_output_cache_base_key`.
"""
base_key = self.get_output_cache_base_key(placeholder_name, instance)
cachekeys = [base_key]
if self.cache_output_per_site:
site_ids = list(Site.objects.values_list("pk", flat=True))
if settings.SITE_ID not in site_ids:
site_ids.append(settings.SITE_ID)
base_key = get_rendering_cache_key(placeholder_name, instance)
cachekeys = ["{0}-s{1}".format(base_key, site_id) for site_id in site_ids]
if self.cache_output_per_language or self.render_ignore_item_language:
# Append language code to all keys,
# have to invalidate a lot more items in memcache.
# Also added "None" suffix, since get_parent_language_code() may return that.
# TODO: ideally for render_ignore_item_language, only invalidate all when the fallback language changed.
total_list = []
cache_languages = list(self.cache_supported_language_codes) + [
"unsupported",
"None",
]
# All variants of the Placeholder (for full page caching)
placeholder = instance.placeholder
total_list.extend(
get_placeholder_cache_key(placeholder, lc) for lc in cache_languages
)
# All variants of the ContentItem in different languages
for user_language in cache_languages:
total_list.extend(
"{0}.{1}".format(base, user_language) for base in cachekeys
)
cachekeys = total_list
return cachekeys
def get_cached_output(self, placeholder_name, instance):
"""
.. versionadded:: 0.9
Return the cached output for a rendered item, or ``None`` if no output is cached.
This method can be overwritten to implement custom caching mechanisms.
By default, this function generates the cache key using :func:`get_output_cache_key`
and retrieves the results from the configured Django cache backend (e.g. memcached).
"""
cachekey = self.get_output_cache_key(placeholder_name, instance)
return cache.get(cachekey)
def set_cached_output(self, placeholder_name, instance, output):
"""
.. versionadded:: 0.9
Store the cached output for a rendered item.
This method can be overwritten to implement custom caching mechanisms.
By default, this function generates the cache key using :func:`~fluent_contents.cache.get_rendering_cache_key`
and stores the results in the configured Django cache backend (e.g. memcached).
When custom cache keys are used, also include those in :func:`get_output_cache_keys`
so the cache will be cleared when needed.
.. versionchanged:: 1.0
The received data is no longer a HTML string, but :class:`~fluent_contents.models.ContentItemOutput` object.
"""
cachekey = self.get_output_cache_key(placeholder_name, instance)
if self.cache_timeout is not DEFAULT_TIMEOUT:
cache.set(cachekey, output, self.cache_timeout)
else:
# Don't want to mix into the default 0/None issue.
cache.set(cachekey, output)
def render(self, request, instance, **kwargs):
"""
The rendering/view function that displays a plugin model instance.
:param instance: An instance of the ``model`` the plugin uses.
:param request: The Django :class:`~django.http.HttpRequest` class containing the request parameters.
:param kwargs: An optional slot for any new parameters.
To render a plugin, either override this function, or specify the :attr:`render_template` variable,
and optionally override :func:`get_context`.
It is recommended to wrap the output in a ``<div>`` tag,
to prevent the item from being displayed right next to the previous plugin.
.. versionadded:: 1.0
The function may either return a string of HTML code,
or return a :class:`~fluent_contents.models.ContentItemOutput` object
which holds both the CSS/JS includes and HTML string.
For the sake of convenience and simplicity, most examples
only return a HTML string directly.
When the user needs to be redirected, simply return a :class:`~django.http.HttpResponseRedirect`
or call the :func:`redirect` method.
To render raw HTML code, use :func:`~django.utils.safestring.mark_safe` on the returned HTML.
"""
render_template = self.get_render_template(request, instance, **kwargs)
if not render_template:
return str(
_(u"{No rendering defined for class '%s'}" % self.__class__.__name__)
)
context = self.get_context(request, instance, **kwargs)
return self.render_to_string(request, render_template, context)
def render_to_string(self, request, template, context, content_instance=None):
"""
Render a custom template with the :class:`~PluginContext` as context instance.
"""
if not content_instance:
content_instance = PluginContext(request)
content_instance.update(context)
return render_to_string(template, content_instance.flatten(), request=request)
def render_error(self, error):
"""
A default implementation to render an exception.
"""
return (
'<div style="color: red; border: 1px solid red; padding: 5px;">'
"<p><strong>%s</strong></p>%s</div>"
% (_("Error:"), linebreaks(escape(str(error))))
)
def redirect(self, url, status=302):
"""
.. versionadded:: 1.0
Request a redirect to be performed for the user.
Usage example:
.. code-block:: python
def get_context(self, request, instance, **kwargs):
context = super(IdSearchPlugin, self).get_context(request, instance, **kwargs)
if request.method == "POST":
form = MyForm(request.POST)
if form.is_valid():
self.redirect("/foo/")
else:
form = MyForm()
context['form'] = form
return context
To handle redirects, :class:`fluent_contents.middleware.HttpRedirectRequestMiddleware`
should be added to the :django:setting:`MIDDLEWARE_CLASSES`.
"""
raise HttpRedirectRequest(url, status=status)
def get_render_template(self, request, instance, **kwargs):
"""
Return the template to render for the specific model `instance` or `request`,
By default it uses the ``render_template`` attribute.
"""
return self.render_template
def get_context(self, request, instance, **kwargs):
"""
Return the context to use in the template defined by ``render_template`` (or :func:`get_render_template`).
By default, it returns the model instance as ``instance`` field in the template.
"""
return {"instance": instance}
@property
def frontend_media(self):
"""
.. versionadded:: 1.0
The frontend media, typically declared using a ``class FrontendMedia`` definition.
"""
# By adding this property, frontend_media_property() is further optimized.
return ImmutableMedia.empty_instance
def get_frontend_media(self, instance):
"""
Return the frontend media for a specific instance.
By default, it returns ``self.frontend_media``, which derives
from the ``class FrontendMedia`` of the plugin.
"""
return self.frontend_media
def get_search_text(self, instance):
"""
Return a custom search text for a given instance.
.. note:: This method is called when :attr:`search_fields` is set.
"""
bits = get_search_field_values(instance)
return clean_join(u" ", bits)
class HttpRedirectRequest(Exception):
"""
.. versionadded:: 1.0
Request for a redirect from within a view.
"""
def __init__(self, url, status=302):
super(HttpRedirectRequest, self).__init__(
"A redirect to '{0}' was requested by a plugin.\n"
"Please add 'fluent_contents.middleware.HttpRedirectRequestMiddleware' "
"to MIDDLEWARE_CLASSES to handle redirects by plugins.".format(url)
)
self.url = str(url) if url else url
self.status = status
|
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Script that reads omahaproxy and gsutil to determine version of SDK to put
in manifest.
"""
# pylint is convinced the email module is missing attributes
# pylint: disable=E1101
import buildbot_common
import csv
import cStringIO
import difflib
import email
import json
import manifest_util
import optparse
import os
import posixpath
import re
import smtplib
import subprocess
import sys
import time
import traceback
import urllib2
MANIFEST_BASENAME = 'naclsdk_manifest2.json'
SCRIPT_DIR = os.path.dirname(__file__)
REPO_MANIFEST = os.path.join(SCRIPT_DIR, 'json', MANIFEST_BASENAME)
GS_BUCKET_PATH = 'gs://nativeclient-mirror/nacl/nacl_sdk/'
GS_SDK_MANIFEST = GS_BUCKET_PATH + MANIFEST_BASENAME
GS_MANIFEST_BACKUP_DIR = GS_BUCKET_PATH + 'manifest_backups/'
CANARY_BUNDLE_NAME = 'pepper_canary'
CANARY = 'canary'
NACLPORTS_ARCHIVE_NAME = 'naclports.tar.bz2'
def SplitVersion(version_string):
"""Split a version string (e.g. "18.0.1025.163") into its components.
Note that this function doesn't handle versions in the form "trunk.###".
"""
return tuple(map(int, version_string.split('.')))
def JoinVersion(version_tuple):
"""Create a string from a version tuple.
The tuple should be of the form (18, 0, 1025, 163).
"""
return '.'.join(map(str, version_tuple))
def GetTimestampManifestName():
"""Create a manifest name with a timestamp.
Returns:
A manifest name with an embedded date. This should make it easier to roll
back if necessary.
"""
return time.strftime('naclsdk_manifest2.%Y_%m_%d_%H_%M_%S.json',
time.gmtime())
def GetPlatformArchiveName(platform):
"""Get the basename of an archive given a platform string.
Args:
platform: One of ('win', 'mac', 'linux').
Returns:
The basename of the sdk archive for that platform.
"""
return 'naclsdk_%s.tar.bz2' % platform
def GetCanonicalArchiveName(url):
"""Get the canonical name of an archive given its URL.
This will convert "naclsdk_linux.bz2" -> "naclsdk_linux.tar.bz2", and also
remove everything but the filename of the URL.
This is used below to determine if an expected bundle is found in an version
directory; the archives all have the same name, but may not exist for a given
version.
Args:
url: The url to parse.
Returns:
The canonical name as described above.
"""
name = posixpath.basename(url)
match = re.match(r'naclsdk_(.*?)(?:\.tar)?\.bz2', name)
if match:
return 'naclsdk_%s.tar.bz2' % match.group(1)
return name
class Delegate(object):
"""Delegate all external access; reading/writing to filesystem, gsutil etc."""
def GetRepoManifest(self):
"""Read the manifest file from the NaCl SDK repository.
This manifest is used as a template for the auto updater; only pepper
bundles with no archives are considered for auto updating.
Returns:
A manifest_util.SDKManifest object read from the NaCl SDK repo."""
raise NotImplementedError()
def GetHistory(self):
"""Read Chrome release history from omahaproxy.appspot.com
Here is an example of data from this URL:
cros,stable,18.0.1025.168,2012-05-01 17:04:05.962578\n
win,canary,20.0.1123.0,2012-05-01 13:59:31.703020\n
mac,canary,20.0.1123.0,2012-05-01 11:54:13.041875\n
win,stable,18.0.1025.168,2012-04-30 20:34:56.078490\n
mac,stable,18.0.1025.168,2012-04-30 20:34:55.231141\n
...
Where each line has comma separated values in the following format:
platform, channel, version, date/time\n
Returns:
A list where each element is a line from the document, represented as a
tuple."""
raise NotImplementedError()
def GetTrunkRevision(self, version):
"""Given a Chrome version, get its trunk revision.
Args:
version: A version string of the form '18.0.1025.64'
Returns:
The revision number for that version, as a string."""
raise NotImplementedError()
def GsUtil_ls(self, url):
"""Runs gsutil ls |url|
Args:
url: The commondatastorage url to list.
Returns:
A list of URLs, all with the gs:// schema."""
raise NotImplementedError()
def GsUtil_cat(self, url):
"""Runs gsutil cat |url|
Args:
url: The commondatastorage url to read from.
Returns:
A string with the contents of the file at |url|."""
raise NotImplementedError()
def GsUtil_cp(self, src, dest, stdin=None):
"""Runs gsutil cp |src| |dest|
Args:
src: The file path or url to copy from.
dest: The file path or url to copy to.
stdin: If src is '-', this is used as the stdin to give to gsutil. The
effect is that text in stdin is copied to |dest|."""
raise NotImplementedError()
def Print(self, *args):
"""Print a message."""
raise NotImplementedError()
class RealDelegate(Delegate):
def __init__(self, dryrun=False, gsutil=None, verbose=False):
super(RealDelegate, self).__init__()
self.dryrun = dryrun
self.verbose = verbose
if gsutil:
self.gsutil = gsutil
else:
self.gsutil = buildbot_common.GetGsutil()
def GetRepoManifest(self):
"""See Delegate.GetRepoManifest"""
with open(REPO_MANIFEST, 'r') as sdk_stream:
sdk_json_string = sdk_stream.read()
manifest = manifest_util.SDKManifest()
manifest.LoadDataFromString(sdk_json_string, add_missing_info=True)
return manifest
def GetHistory(self):
"""See Delegate.GetHistory"""
url_stream = urllib2.urlopen('https://omahaproxy.appspot.com/history')
return [(platform, channel, version, date)
for platform, channel, version, date in csv.reader(url_stream)]
def GetTrunkRevision(self, version):
"""See Delegate.GetTrunkRevision"""
url = 'http://omahaproxy.appspot.com/revision.json?version=%s' % (version,)
data = json.loads(urllib2.urlopen(url).read())
return 'trunk.%s' % int(data['chromium_revision'])
def GsUtil_ls(self, url):
"""See Delegate.GsUtil_ls"""
try:
stdout = self._RunGsUtil(None, 'ls', url)
except subprocess.CalledProcessError:
return []
# filter out empty lines
return filter(None, stdout.split('\n'))
def GsUtil_cat(self, url):
"""See Delegate.GsUtil_cat"""
return self._RunGsUtil(None, 'cat', url)
def GsUtil_cp(self, src, dest, stdin=None):
"""See Delegate.GsUtil_cp"""
if self.dryrun:
self.Trace("Skipping upload: %s -> %s" % (src, dest))
return
return self._RunGsUtil(stdin, 'cp', '-a', 'public-read', src, dest)
def Print(self, *args):
sys.stdout.write(' '.join(map(str, args)) + '\n')
def Trace(self, *args):
if self.verbose:
self.Print(*args)
def _RunGsUtil(self, stdin, *args):
"""Run gsutil as a subprocess.
Args:
stdin: If non-None, used as input to the process.
*args: Arguments to pass to gsutil. The first argument should be an
operation such as ls, cp or cat.
Returns:
The stdout from the process."""
cmd = [self.gsutil] + list(args)
self.Trace("Running: %s" % str(cmd))
if stdin:
stdin_pipe = subprocess.PIPE
else:
stdin_pipe = None
try:
process = subprocess.Popen(cmd, stdin=stdin_pipe, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = process.communicate(stdin)
except OSError as e:
raise manifest_util.Error("Unable to run '%s': %s" % (cmd[0], str(e)))
if process.returncode:
sys.stderr.write(stderr)
raise subprocess.CalledProcessError(process.returncode, ' '.join(cmd))
return stdout
class VersionFinder(object):
"""Finds a version of a pepper bundle that all desired platforms share.
Args:
delegate: See Delegate class above.
platforms: A sequence of platforms to consider, e.g.
('mac', 'linux', 'win')
extra_archives: A sequence of tuples: (archive_basename, minimum_version),
e.g. [('foo.tar.bz2', '18.0.1000.0'), ('bar.tar.bz2', '19.0.1100.20')]
These archives must exist to consider a version for inclusion, as
long as that version is greater than the archive's minimum version.
"""
def __init__(self, delegate, platforms, extra_archives=None):
self.delegate = delegate
self.history = delegate.GetHistory()
self.platforms = platforms
self.extra_archives = extra_archives
def GetMostRecentSharedVersion(self, major_version):
"""Returns the most recent version of a pepper bundle that exists on all
given platforms.
Specifically, the resulting version should be the most recently released
(meaning closest to the top of the listing on
omahaproxy.appspot.com/history) version that has a Chrome release on all
given platforms, and has a pepper bundle archive for each platform as well.
Args:
major_version: The major version of the pepper bundle, e.g. 19.
Returns:
A tuple (version, channel, archives). The version is a string such as
"19.0.1084.41". The channel is one of ('stable', 'beta', or 'dev').
|archives| is a list of archive URLs."""
def GetPlatformHistory(platform):
return self._GetPlatformMajorVersionHistory(major_version, platform)
shared_version_generator = self._FindNextSharedVersion(self.platforms,
GetPlatformHistory)
return self._DoGetMostRecentSharedVersion(shared_version_generator,
allow_trunk_revisions=False)
def GetMostRecentSharedCanary(self):
"""Returns the most recent version of a canary pepper bundle that exists on
all given platforms.
Canary is special-cased because we don't care about its major version; we
always use the most recent canary, regardless of major version.
Returns:
A tuple (version, channel, archives). The version is a string such as
"19.0.1084.41". The channel is always 'canary'. |archives| is a list of
archive URLs."""
# We don't ship canary on Linux, so it won't appear in self.history.
# Instead, we can use the matching Linux trunk build for that version.
shared_version_generator = self._FindNextSharedVersion(
set(self.platforms) - set(('linux',)),
self._GetPlatformCanaryHistory)
return self._DoGetMostRecentSharedVersion(shared_version_generator,
allow_trunk_revisions=True)
def GetAvailablePlatformArchivesFor(self, version, allow_trunk_revisions):
"""Returns a sequence of archives that exist for a given version, on the
given platforms.
The second element of the returned tuple is a list of all platforms that do
not have an archive for the given version.
Args:
version: The version to find archives for. (e.g. "18.0.1025.164")
allow_trunk_revisions: If True, will search for archives using the
trunk revision that matches the branch version.
Returns:
A tuple (archives, missing_archives). |archives| is a list of archive
URLs, |missing_archives| is a list of archive names.
"""
archive_urls = self._GetAvailableArchivesFor(version)
platform_archives = set(GetPlatformArchiveName(p) for p in self.platforms)
expected_archives = platform_archives
if self.extra_archives:
for extra_archive, extra_archive_min_version in self.extra_archives:
if SplitVersion(version) >= SplitVersion(extra_archive_min_version):
expected_archives.add(extra_archive)
found_archives = set(GetCanonicalArchiveName(a) for a in archive_urls)
missing_archives = expected_archives - found_archives
if allow_trunk_revisions and missing_archives:
# Try to find trunk versions of any missing archives.
trunk_version = self.delegate.GetTrunkRevision(version)
trunk_archives = self._GetAvailableArchivesFor(trunk_version)
for trunk_archive_url in trunk_archives:
trunk_archive = GetCanonicalArchiveName(trunk_archive_url)
if trunk_archive in missing_archives:
archive_urls.append(trunk_archive_url)
missing_archives.discard(trunk_archive)
# Only return archives that are "expected".
def IsExpected(url):
return GetCanonicalArchiveName(url) in expected_archives
expected_archive_urls = [u for u in archive_urls if IsExpected(u)]
return expected_archive_urls, missing_archives
def _DoGetMostRecentSharedVersion(self, shared_version_generator,
allow_trunk_revisions):
"""Returns the most recent version of a pepper bundle that exists on all
given platforms.
This function does the real work for the public GetMostRecentShared* above.
Args:
shared_version_generator: A generator that will yield (version, channel)
tuples in order of most recent to least recent.
allow_trunk_revisions: If True, will search for archives using the
trunk revision that matches the branch version.
Returns:
A tuple (version, channel, archives). The version is a string such as
"19.0.1084.41". The channel is one of ('stable', 'beta', 'dev',
'canary'). |archives| is a list of archive URLs."""
version = None
skipped_versions = []
channel = ''
while True:
try:
version, channel = shared_version_generator.next()
except StopIteration:
msg = 'No shared version for platforms: %s\n' % (
', '.join(self.platforms))
msg += 'Last version checked = %s.\n' % (version,)
if skipped_versions:
msg += 'Versions skipped due to missing archives:\n'
for version, channel, missing_archives in skipped_versions:
archive_msg = '(missing %s)' % (', '.join(missing_archives))
msg += ' %s (%s) %s\n' % (version, channel, archive_msg)
raise Exception(msg)
archives, missing_archives = self.GetAvailablePlatformArchivesFor(
version, allow_trunk_revisions)
if not missing_archives:
return version, channel, archives
skipped_versions.append((version, channel, missing_archives))
def _GetPlatformMajorVersionHistory(self, with_major_version, with_platform):
"""Yields Chrome history for a given platform and major version.
Args:
with_major_version: The major version to filter for. If 0, match all
versions.
with_platform: The name of the platform to filter for.
Returns:
A generator that yields a tuple (channel, version) for each version that
matches the platform and major version. The version returned is a tuple as
returned from SplitVersion.
"""
for platform, channel, version, _ in self.history:
version = SplitVersion(version)
if (with_platform == platform and
(with_major_version == 0 or with_major_version == version[0])):
yield channel, version
def _GetPlatformCanaryHistory(self, with_platform):
"""Yields Chrome history for a given platform, but only for canary
versions.
Args:
with_platform: The name of the platform to filter for.
Returns:
A generator that yields a tuple (channel, version) for each version that
matches the platform and uses the canary channel. The version returned is
a tuple as returned from SplitVersion.
"""
for platform, channel, version, _ in self.history:
version = SplitVersion(version)
if with_platform == platform and channel == CANARY:
yield channel, version
def _FindNextSharedVersion(self, platforms, generator_func):
"""Yields versions of Chrome that exist on all given platforms, in order of
newest to oldest.
Versions are compared in reverse order of release. That is, the most
recently updated version will be tested first.
Args:
platforms: A sequence of platforms to consider, e.g.
('mac', 'linux', 'win')
generator_func: A function which takes a platform and returns a
generator that yields (channel, version) tuples.
Returns:
A generator that yields a tuple (version, channel) for each version that
matches all platforms and the major version. The version returned is a
string (e.g. "18.0.1025.164").
"""
platform_generators = []
for platform in platforms:
platform_generators.append(generator_func(platform))
shared_version = None
platform_versions = [(tuple(), '')] * len(platforms)
while True:
try:
for i, platform_gen in enumerate(platform_generators):
if platform_versions[i][1] != shared_version:
platform_versions[i] = platform_gen.next()
except StopIteration:
return
shared_version = min(v for c, v in platform_versions)
if all(v == shared_version for c, v in platform_versions):
# grab the channel from an arbitrary platform
first_platform = platform_versions[0]
channel = first_platform[0]
yield JoinVersion(shared_version), channel
# force increment to next version for all platforms
shared_version = None
def _GetAvailableArchivesFor(self, version_string):
"""Downloads a list of all available archives for a given version.
Args:
version_string: The version to find archives for. (e.g. "18.0.1025.164")
Returns:
A list of strings, each of which is a platform-specific archive URL. (e.g.
"gs://nativeclient_mirror/nacl/nacl_sdk/18.0.1025.164/"
"naclsdk_linux.tar.bz2").
All returned URLs will use the gs:// schema."""
files = self.delegate.GsUtil_ls(GS_BUCKET_PATH + version_string)
assert all(file.startswith('gs://') for file in files)
archives = [f for f in files if not f.endswith('.json')]
manifests = [f for f in files if f.endswith('.json')]
# don't include any archives that don't have an associated manifest.
return filter(lambda a: a + '.json' in manifests, archives)
class Updater(object):
def __init__(self, delegate):
self.delegate = delegate
self.versions_to_update = []
self.online_manifest = manifest_util.SDKManifest()
self._FetchOnlineManifest()
def AddVersionToUpdate(self, bundle_name, version, channel, archives):
"""Add a pepper version to update in the uploaded manifest.
Args:
bundle_name: The name of the pepper bundle, e.g. 'pepper_18'
version: The version of the pepper bundle, e.g. '18.0.1025.64'
channel: The stability of the pepper bundle, e.g. 'beta'
archives: A sequence of archive URLs for this bundle."""
self.versions_to_update.append((bundle_name, version, channel, archives))
def Update(self, manifest):
"""Update a manifest and upload it.
Note that bundles will not be updated if the current version is newer.
That is, the updater will never automatically update to an older version of
a bundle.
Args:
manifest: The manifest used as a template for updating. Only pepper
bundles that contain no archives will be considered for auto-updating."""
# Make sure there is only one stable branch: the one with the max version.
# All others are post-stable.
stable_major_versions = [SplitVersion(version)[0] for _, version, channel, _
in self.versions_to_update if channel == 'stable']
# Add 0 in case there are no stable versions.
max_stable_version = max([0] + stable_major_versions)
for bundle_name, version, channel, archives in self.versions_to_update:
self.delegate.Print('Updating %s to %s...' % (bundle_name, version))
bundle = manifest.GetBundle(bundle_name)
for archive in archives:
platform_bundle = self._GetPlatformArchiveBundle(archive)
# Normally the manifest snippet's bundle name matches our bundle name.
# pepper_canary, however is called "pepper_###" in the manifest
# snippet.
platform_bundle.name = bundle_name
bundle.MergeWithBundle(platform_bundle)
# Check to ensure this bundle is newer than the online bundle.
online_bundle = self.online_manifest.GetBundle(bundle_name)
if online_bundle and online_bundle.revision >= bundle.revision:
self.delegate.Print(
' Revision %s is not newer than than online revision %s. '
'Skipping.' % (bundle.revision, online_bundle.revision))
manifest.SetBundle(online_bundle)
continue
major_version = SplitVersion(version)[0]
if major_version < max_stable_version and channel == 'stable':
bundle.stability = 'post_stable'
else:
bundle.stability = channel
# We always recommend the stable version.
if channel == 'stable':
bundle.recommended = 'yes'
else:
bundle.recommended = 'no'
self._UploadManifest(manifest)
self.delegate.Print('Done.')
def _GetPlatformArchiveBundle(self, archive):
"""Downloads the manifest "snippet" for an archive, and reads it as a
Bundle.
Args:
archive: A full URL of a platform-specific archive, using the gs schema.
Returns:
An object of type manifest_util.Bundle, read from a JSON file storing
metadata for this archive.
"""
stdout = self.delegate.GsUtil_cat(archive + '.json')
bundle = manifest_util.Bundle('')
bundle.LoadDataFromString(stdout)
# Some snippets were uploaded with revisions and versions as strings. Fix
# those here.
bundle.revision = int(bundle.revision)
bundle.version = int(bundle.version)
# HACK. The naclports archive specifies host_os as linux. Change it to all.
for archive in bundle.GetArchives():
if NACLPORTS_ARCHIVE_NAME in archive.url:
archive.host_os = 'all'
return bundle
def _UploadManifest(self, manifest):
"""Upload a serialized manifest_util.SDKManifest object.
Upload one copy to gs://<BUCKET_PATH>/naclsdk_manifest2.json, and a copy to
gs://<BUCKET_PATH>/manifest_backups/naclsdk_manifest2.<TIMESTAMP>.json.
Args:
manifest: The new manifest to upload.
"""
new_manifest_string = manifest.GetDataAsString()
online_manifest_string = self.online_manifest.GetDataAsString()
if self.delegate.dryrun:
self.delegate.Print(''.join(list(difflib.unified_diff(
online_manifest_string.splitlines(1),
new_manifest_string.splitlines(1)))))
return
else:
online_manifest = manifest_util.SDKManifest()
online_manifest.LoadDataFromString(online_manifest_string)
if online_manifest == manifest:
self.delegate.Print('New manifest doesn\'t differ from online manifest.'
'Skipping upload.')
return
timestamp_manifest_path = GS_MANIFEST_BACKUP_DIR + \
GetTimestampManifestName()
self.delegate.GsUtil_cp('-', timestamp_manifest_path,
stdin=manifest.GetDataAsString())
# copy from timestampped copy over the official manifest.
self.delegate.GsUtil_cp(timestamp_manifest_path, GS_SDK_MANIFEST)
def _FetchOnlineManifest(self):
try:
online_manifest_string = self.delegate.GsUtil_cat(GS_SDK_MANIFEST)
except subprocess.CalledProcessError:
# It is not a failure if the online manifest doesn't exist.
online_manifest_string = ''
if online_manifest_string:
self.online_manifest.LoadDataFromString(online_manifest_string)
def Run(delegate, platforms, extra_archives, fixed_bundle_versions=None):
"""Entry point for the auto-updater.
Args:
delegate: The Delegate object to use for reading Urls, files, etc.
platforms: A sequence of platforms to consider, e.g.
('mac', 'linux', 'win')
extra_archives: A sequence of tuples: (archive_basename, minimum_version),
e.g. [('foo.tar.bz2', '18.0.1000.0'), ('bar.tar.bz2', '19.0.1100.20')]
These archives must exist to consider a version for inclusion, as
long as that version is greater than the archive's minimum version.
fixed_bundle_versions: A sequence of tuples (bundle_name, version_string).
e.g. ('pepper_21', '21.0.1145.0')
"""
if fixed_bundle_versions:
fixed_bundle_versions = dict(fixed_bundle_versions)
else:
fixed_bundle_versions = {}
manifest = delegate.GetRepoManifest()
auto_update_bundles = []
for bundle in manifest.GetBundles():
if not bundle.name.startswith('pepper_'):
continue
archives = bundle.GetArchives()
if not archives:
auto_update_bundles.append(bundle)
if not auto_update_bundles:
delegate.Print('No versions need auto-updating.')
return
version_finder = VersionFinder(delegate, platforms, extra_archives)
updater = Updater(delegate)
for bundle in auto_update_bundles:
if bundle.name == CANARY_BUNDLE_NAME:
version, channel, archives = version_finder.GetMostRecentSharedCanary()
else:
version, channel, archives = version_finder.GetMostRecentSharedVersion(
bundle.version)
if bundle.name in fixed_bundle_versions:
# Ensure this version is valid for all platforms.
# If it is, use the channel found above (because the channel for this
# version may not be in the history.)
version = fixed_bundle_versions[bundle.name]
delegate.Print('Fixed bundle version: %s, %s' % (bundle.name, version))
allow_trunk_revisions = bundle.name == CANARY_BUNDLE_NAME
archives, missing = version_finder.GetAvailablePlatformArchivesFor(
version, allow_trunk_revisions)
if missing:
delegate.Print(
'Some archives for version %s of bundle %s don\'t exist: '
'Missing %s' % (version, bundle.name, ', '.join(missing)))
return
updater.AddVersionToUpdate(bundle.name, version, channel, archives)
updater.Update(manifest)
def SendMail(send_from, send_to, subject, text, smtp='localhost'):
"""Send an email.
Args:
send_from: The sender's email address.
send_to: A list of addresses to send to.
subject: The subject of the email.
text: The text of the email.
smtp: The smtp server to use. Default is localhost.
"""
msg = email.MIMEMultipart.MIMEMultipart()
msg['From'] = send_from
msg['To'] = ', '.join(send_to)
msg['Date'] = email.Utils.formatdate(localtime=True)
msg['Subject'] = subject
msg.attach(email.MIMEText.MIMEText(text))
smtp_obj = smtplib.SMTP(smtp)
smtp_obj.sendmail(send_from, send_to, msg.as_string())
smtp_obj.close()
class CapturedFile(object):
"""A file-like object that captures text written to it, but also passes it
through to an underlying file-like object."""
def __init__(self, passthrough):
self.passthrough = passthrough
self.written = cStringIO.StringIO()
def write(self, s):
self.written.write(s)
if self.passthrough:
self.passthrough.write(s)
def getvalue(self):
return self.written.getvalue()
def main(args):
parser = optparse.OptionParser()
parser.add_option('--gsutil', help='path to gsutil.')
parser.add_option('-d', '--debug', help='run in debug mode.',
action='store_true')
parser.add_option('--mailfrom', help='email address of sender.')
parser.add_option('--mailto', help='send error mails to...', action='append')
parser.add_option('-n', '--dryrun', help="don't upload the manifest.",
action='store_true')
parser.add_option('-v', '--verbose', help='print more diagnotic messages.',
action='store_true')
parser.add_option('--bundle-version',
help='Manually set a bundle version. This can be passed more than once. '
'format: --bundle-version pepper_24=24.0.1312.25', action='append')
options, args = parser.parse_args(args[1:])
if (options.mailfrom is None) != (not options.mailto):
options.mailfrom = None
options.mailto = None
sys.stderr.write('warning: Disabling email, one of --mailto or --mailfrom '
'was missing.\n')
# Parse bundle versions.
fixed_bundle_versions = {}
if options.bundle_version:
for bundle_version_string in options.bundle_version:
bundle_name, version = bundle_version_string.split('=')
fixed_bundle_versions[bundle_name] = version
if options.mailfrom and options.mailto:
# Capture stderr so it can be emailed, if necessary.
sys.stderr = CapturedFile(sys.stderr)
try:
try:
delegate = RealDelegate(options.dryrun, options.gsutil, options.verbose)
# Only look for naclports archives > 26.0.1391.0 = r178222
extra_archives = [('naclports.tar.bz2', '26.0.1391.0')]
Run(delegate, ('mac', 'win', 'linux'), extra_archives,
fixed_bundle_versions)
except Exception:
if options.mailfrom and options.mailto:
traceback.print_exc()
scriptname = os.path.basename(sys.argv[0])
subject = '[%s] Failed to update manifest' % (scriptname,)
text = '%s failed.\n\nSTDERR:\n%s\n' % (scriptname,
sys.stderr.getvalue())
SendMail(options.mailfrom, options.mailto, subject, text)
sys.exit(1)
else:
raise
except manifest_util.Error as e:
if options.debug:
raise
print e
sys.exit(1)
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
|
from .buffer import Buffer
from .delay import DelayObservable, DelaySubscription, DelayTime
from .generate import Generate
from .sample import SampleWithObservable, SampleWithTime
from .throttle import ThrottleObservable, ThrottleTime
from .timeInterval import TimeInterval
from .timeout import TimeoutAbsolute, TimeoutRelative, TimeoutObservable
from .timer import Timer
from .timestamp import TimeStamp
from .window import Window
from rx.exceptions import TimeoutException
from rx.observable import Observable
from rx.scheduler import Scheduler
####################
# Time #
####################
def bufferWithTime(self, timeSpan, timeShift=None, scheduler=Scheduler.timeBasedOperation):
assert isinstance(self, Observable)
assert isinstance(scheduler, Scheduler)
if timeShift == None:
timeShift = timeSpan
return Buffer(self, timeSpan=timeSpan, timeShift=timeShift, scheduler=scheduler)
Observable.bufferWithTime = bufferWithTime
def bufferWithTimeAndCount(self, timeSpan, count, scheduler=Scheduler.timeBasedOperation):
assert isinstance(self, Observable)
assert isinstance(scheduler, Scheduler)
if count == None:
count = timeSpan
return Buffer(self, timeSpan=timeSpan, count=count, scheduler=scheduler)
Observable.bufferWithTimeAndCount = bufferWithTimeAndCount
def delayRelative(self, dueTime, scheduler=Scheduler.timeBasedOperation):
assert isinstance(self, Observable)
assert isinstance(scheduler, Scheduler)
return DelayTime(self, dueTime, False, scheduler)
Observable.delayRelative = delayRelative
def delayAbsolute(self, dueTime, scheduler=Scheduler.timeBasedOperation):
assert isinstance(self, Observable)
assert isinstance(scheduler, Scheduler)
return DelayTime(self, dueTime, True, scheduler)
Observable.delayAbsolute = delayAbsolute
def delayIndividual(self, delayDurationSelector, subscriptionDelayObservable=None):
assert isinstance(self, Observable)
assert callable(delayDurationSelector)
if subscriptionDelayObservable != None:
assert isinstance(subscriptionDelayObservable, Observable)
return DelayObservable(self, subscriptionDelayObservable, delayDurationSelector)
Observable.delayIndividual = delayIndividual
def delaySubscriptionRelative(self, dueTime, scheduler=Scheduler.timeBasedOperation):
assert isinstance(self, Observable)
assert isinstance(scheduler, Scheduler)
return DelaySubscription(self, dueTime, False, scheduler)
Observable.delaySubscriptionRelative = delaySubscriptionRelative
def delaySubscriptionAbsolute(self, dueTime, scheduler=Scheduler.timeBasedOperation):
assert isinstance(self, Observable)
assert isinstance(scheduler, Scheduler)
return DelaySubscription(self, dueTime, True, scheduler)
Observable.delaySubscriptionAbsolute = delaySubscriptionAbsolute
def generateRelative(initialState, condition, iterate, resultSelector, timeSelector, scheduler=Scheduler.timeBasedOperation):
assert callable(condition)
assert callable(iterate)
assert callable(resultSelector)
assert callable(timeSelector)
assert isinstance(scheduler, Scheduler)
return Generate(initialState, condition, iterate, resultSelector, timeSelector, False, scheduler)
Observable.generateRelative = generateRelative
def generateAbsolute(initialState, condition, iterate, resultSelector, timeSelector, scheduler=Scheduler.timeBasedOperation):
assert callable(condition)
assert callable(iterate)
assert callable(resultSelector)
assert callable(timeSelector)
assert isinstance(scheduler, Scheduler)
return Generate(initialState, condition, iterate, resultSelector, timeSelector, True, scheduler)
Observable.generateAbsolute = generateAbsolute
def interval(period, scheduler=Scheduler.timeBasedOperation):
assert isinstance(scheduler, Scheduler)
return Timer(period, False, period, scheduler)
Observable.interval = interval
def sampleWithTime(self, interval, scheduler=Scheduler.timeBasedOperation):
assert isinstance(self, Observable)
assert isinstance(scheduler, Scheduler)
return SampleWithTime(self, interval, scheduler)
Observable.sampleWithTime = sampleWithTime
def sampleWithObservable(self, sampler):
assert isinstance(self, Observable)
assert isinstance(sampler, Observable)
return SampleWithObservable(self, sampler)
Observable.sampleWithObservable = sampleWithObservable
def throttle(self, dueTime, scheduler=Scheduler.timeBasedOperation):
assert isinstance(self, Observable)
assert isinstance(scheduler, Scheduler)
return ThrottleTime(self, dueTime, scheduler)
Observable.throttle = throttle
def throttleIndividual(self, durationSelector):
assert isinstance(self, Observable)
assert callable(durationSelector)
return ThrottleObservable(self, durationSelector)
Observable.throttleIndividual = throttleIndividual
def timeInterval(self, scheduler=Scheduler.timeBasedOperation):
assert isinstance(self, Observable)
assert isinstance(scheduler, Scheduler)
return TimeInterval(self, scheduler)
Observable.timeInterval = timeInterval
def timeoutRelative(self, dueTime, other=None, scheduler=Scheduler.timeBasedOperation):
assert isinstance(self, Observable)
assert isinstance(scheduler, Scheduler)
if other == None:
other = Observable.throw(TimeoutException())
assert isinstance(other, Observable)
return TimeoutRelative(self, dueTime, other, scheduler)
Observable.timeoutRelative = timeoutRelative
def timeoutAbsolute(self, dueTime, other=None, scheduler=Scheduler.timeBasedOperation):
assert isinstance(self, Observable)
assert isinstance(scheduler, Scheduler)
if other == None:
other = Observable.throw(TimeoutException())
assert isinstance(other, Observable)
return TimeoutAbsolute(self, dueTime, other, scheduler)
Observable.timeoutAbsolute = timeoutAbsolute
def timeoutIndividual(self, durationSelector, firstTimeout=None, other=None):
assert isinstance(self, Observable)
if firstTimeout == None:
firstTimeout = Observable.never()
if other == None:
other = Observable.throw(TimeoutException())
assert isinstance(firstTimeout, Observable)
assert isinstance(other, Observable)
return TimeoutObservable(self, firstTimeout, durationSelector, other)
Observable.timeoutIndividual = timeoutIndividual
def timerRelative(dueTime, period=None, scheduler=Scheduler.timeBasedOperation):
assert isinstance(scheduler, Scheduler)
return Timer(dueTime, False, period, scheduler)
Observable.timerRelative = timerRelative
def timerAbsolute(dueTime, period=None, scheduler=Scheduler.timeBasedOperation):
assert isinstance(scheduler, Scheduler)
return Timer(dueTime, True, period, scheduler)
Observable.timerAbsolute = timerAbsolute
def timestamp(self, scheduler=Scheduler.timeBasedOperation):
assert isinstance(self, Observable)
assert isinstance(scheduler, Scheduler)
return TimeStamp(self, scheduler)
Observable.timestamp = timestamp
def windowWithTime(self, timeSpan, timeShift=None, scheduler=Scheduler.timeBasedOperation):
assert isinstance(self, Observable)
assert isinstance(scheduler, Scheduler)
if timeShift == None:
timeShift = timeSpan
return Window(self, timeSpan=timeSpan, timeShift=timeShift, scheduler=scheduler)
Observable.windowWithTime = windowWithTime
def windowWithTimeAndCount(self, timeSpan, count, scheduler=Scheduler.timeBasedOperation):
assert isinstance(self, Observable)
assert isinstance(scheduler, Scheduler)
if count == None:
count = timeSpan
return Window(self, timeSpan=timeSpan, count=count, scheduler=scheduler)
Observable.windowWithTimeAndCount = windowWithTimeAndCount
|
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
# ============================================================================
# Copyright (c) 2014 nexB Inc. http://www.nexb.com/ - All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
This tool is used to generate component attribution based on a set of .ABOUT
files. Optionally, one could pass a subset list of specific components for set
of .ABOUT files to generate attribution.
"""
from __future__ import print_function
import csv
import errno
import logging
import optparse
import os
import sys
from os.path import exists, dirname, join, abspath, isdir, basename, expanduser, normpath
from about import Collector
import genabout
LOG_FILENAME = 'error.log'
logger = logging.getLogger(__name__)
handler = logging.StreamHandler()
handler.setLevel(logging.CRITICAL)
handler.setFormatter(logging.Formatter('%(levelname)s: %(message)s'))
logger.addHandler(handler)
file_logger = logging.getLogger(__name__ + '_file')
__version__ = '2.0.0'
__about_spec_version__ = '1.0.0' # See http://dejacode.org
__copyright__ = """
Copyright (c) 2013-2014 nexB Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
def component_subset_to_sublist(input_list):
sublist = [row["about_file"] for row in input_list
if "about_file" in row.keys()]
return sublist
def update_path_to_about(input_list):
output_list = []
for row in input_list:
if not row.endswith('.ABOUT'):
if row.endswith('/'):
row += basename(dirname(row))
output_list.append(row + '.ABOUT')
else:
output_list.append(row)
return output_list
def convert_dict_key_to_lower_case(input_list):
output_list = []
for line in input_list:
lower_dict = {}
for key in line:
lower_dict[key.lower()] = line[key]
output_list.append(lower_dict)
return output_list
def check_about_file_existence_and_format(input_list):
try:
for row in input_list:
# Force the path to start with the '/' to map with the project
# structure
if not row['about_file'].startswith('/'):
row['about_file'] = '/' + row['about_file']
return input_list
except Exception:
return []
USAGE_SYNTAX = """\
Input can be a file or directory.
Output of rendered template must be a file (e.g. .html).
Optional:
Component List must be a .csv file which has at least an "about_file" column.
"""
VERBOSITY_HELP = """\
Print more or fewer verbose messages while processing ABOUT files
0 - Do not print any warning or error messages, just a total count (default)
1 - Print error messages
2 - Print error and warning messages
"""
TEMPLATE_LOCATION_HELP = """\
Use the custom template for the Attribution Generation
"""
MAPPING_HELP = """\
Configure the mapping key from the MAPPING.CONFIG
"""
VERIFICATION_HELP = """\
Create a verification CSV output for the attribution
"""
def main(parser, options, args):
overwrite = options.overwrite
verbosity = options.verbosity
mapping_config = options.mapping
template_location = options.template_location
verification_location = options.verification_location
if options.version:
print('ABOUT tool {0}\n{1}'.format(__version__, __copyright__))
sys.exit(0)
if verbosity == 1:
handler.setLevel(logging.ERROR)
elif verbosity >= 2:
handler.setLevel(logging.WARNING)
if mapping_config:
if not exists('MAPPING.CONFIG'):
print("The file 'MAPPING.CONFIG' does not exist.")
sys.exit(errno.EINVAL)
if template_location:
template_location = abspath(expanduser(template_location))
if not exists(template_location):
print('The defined template location does not exist.')
parser.print_help()
sys.exit(errno.EINVAL)
if verification_location:
verification_location = abspath(expanduser(verification_location))
if not verification_location.endswith('.csv'):
print('The verification output must ends with ".csv".')
parser.print_help()
sys.exit(errno.EINVAL)
if not exists(dirname(verification_location)):
print('The verification output directory does not exist.')
parser.print_help()
sys.exit(errno.EINVAL)
if not len(args) >= 2 or not len(args) < 4:
print('The number of arguments is incorrect.\n')
parser.print_help()
sys.exit(errno.EEXIST)
input_path = args[0]
output_path = args[1]
if len(args) == 3:
component_subset_path = args[2]
else:
component_subset_path = ""
# TODO: need more path normalization (normpath, expanduser)
input_path = expanduser(normpath(input_path))
output_path = expanduser(normpath(output_path))
# Add the following to solve the
# UnicodeEncodeError: 'ascii' codec can't encode character
# FIXME: these two lines do not make sense
reload(sys)
sys.setdefaultencoding('utf-8') # @UndefinedVariable
if not exists(input_path):
print('Input path does not exist.')
parser.print_help()
sys.exit(errno.EEXIST)
if isdir(output_path):
print('Output must be a HTML file.')
parser.print_help()
sys.exit(errno.EISDIR)
# We only support HTML currently
if not output_path.endswith('.html'):
print('Output must be a HTML file.')
parser.print_help()
sys.exit(errno.EINVAL)
if exists(output_path) and not overwrite:
print('Output file already exists. Select a different file name '
'or use the --overwrite option.')
parser.print_help()
sys.exit(errno.EEXIST)
if component_subset_path and not exists(component_subset_path):
print('Component Subset path does not exist.')
parser.print_help()
sys.exit(errno.EEXIST)
if not exists(output_path) or (exists(output_path) and overwrite):
collector = Collector(input_path)
outlist = None
if not component_subset_path:
sublist = None
else:
input_list = []
with open(component_subset_path, "rU") as f:
input_dict = csv.DictReader(f)
for row in input_dict:
input_list.append(row)
updated_list = convert_dict_key_to_lower_case(input_list)
if mapping_config:
mapping_list = genabout.GenAbout().get_mapping_list()
updated_list = genabout.GenAbout().convert_input_list(updated_list, mapping_list)
if not check_about_file_existence_and_format(updated_list):
print('The required key "about_file" was not found.')
print('Please use the "--mapping" option to map the input '
'keys and verify the mapping information are correct.')
print('OR, correct the header keys from the component list.')
parser.print_help()
sys.exit(errno.EISDIR)
sublist = component_subset_to_sublist(updated_list)
outlist = update_path_to_about(sublist)
attrib_str = collector.generate_attribution(template_path=template_location, limit_to=outlist, verification=verification_location)
errors = collector.get_genattrib_errors()
if attrib_str:
try:
with open(output_path, "w") as f:
f.write(attrib_str)
except Exception as e:
print("Problem occurs. Attribution was not generated.")
print(e)
# Remove the previous log file if exist
log_path = join(dirname(output_path), LOG_FILENAME)
if exists(log_path):
os.remove(log_path)
file_handler = logging.FileHandler(log_path)
file_logger.addHandler(file_handler)
for error_msg in errors:
logger.error(error_msg)
file_logger.error(error_msg)
if errors:
print("%d errors detected." % len(errors))
else:
# we should never reach this
assert False, "Unsupported option(s)."
def get_parser():
class MyFormatter(optparse.IndentedHelpFormatter):
def _format_text(self, text):
"""
Overridden to allow description to be printed without
modification
"""
return text
def format_option(self, option):
"""
Overridden to allow options help text to be printed without
modification
"""
result = []
opts = self.option_strings[option]
opt_width = self.help_position - self.current_indent - 2
if len(opts) > opt_width:
opts = "%*s%s\n" % (self.current_indent, "", opts)
indent_first = self.help_position
else: # start help on same line as opts
opts = "%*s%-*s " % (self.current_indent, "", opt_width, opts)
indent_first = 0
result.append(opts)
if option.help:
help_text = self.expand_default(option)
help_lines = help_text.split('\n')
result.append("%*s%s\n" % (indent_first, "", help_lines[0]))
result.extend(["%*s%s\n" % (self.help_position, "", line)
for line in help_lines[1:]])
elif opts[-1] != "\n":
result.append("\n")
return "".join(result)
parser = optparse.OptionParser(
usage='%prog [options] input_path output_path [component_list]',
description=USAGE_SYNTAX,
add_help_option=False,
formatter=MyFormatter(),
)
parser.add_option("-h", "--help", action="help", help="Display help")
parser.add_option("-v", "--version", action="store_true",
help='Display current version, license notice, and copyright notice')
parser.add_option('--overwrite', action='store_true',
help='Overwrites the output file if it exists')
parser.add_option('--verbosity', type=int,
help=VERBOSITY_HELP)
parser.add_option('--template_location', type='string',
help=TEMPLATE_LOCATION_HELP)
parser.add_option('--mapping', action='store_true',
help=MAPPING_HELP)
parser.add_option('--verification_location', type='string',
help=VERIFICATION_HELP)
return parser
if __name__ == "__main__":
parser = get_parser()
options, args = parser.parse_args()
main(parser, options, args)
|
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2011-2012 OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv, fields
from openerp.addons.edi import EDIMixin
from openerp.tools.translate import _
from werkzeug import url_encode
SALE_ORDER_LINE_EDI_STRUCT = {
'sequence': True,
'name': True,
#custom: 'date_planned'
'product_id': True,
'product_uom': True,
'price_unit': True,
#custom: 'product_qty'
'discount': True,
# fields used for web preview only - discarded on import
'price_subtotal': True,
}
SALE_ORDER_EDI_STRUCT = {
'name': True,
'origin': True,
'company_id': True, # -> to be changed into partner
#custom: 'partner_ref'
'date_order': True,
'partner_id': True,
#custom: 'partner_address'
#custom: 'notes'
'order_line': SALE_ORDER_LINE_EDI_STRUCT,
# fields used for web preview only - discarded on import
'amount_total': True,
'amount_untaxed': True,
'amount_tax': True,
'payment_term': True,
'order_policy': True,
'user_id': True,
'state': True,
}
class sale_order(osv.osv, EDIMixin):
_inherit = 'sale.order'
def edi_export(self, cr, uid, records, edi_struct=None, context=None):
"""Exports a Sale order"""
edi_struct = dict(edi_struct or SALE_ORDER_EDI_STRUCT)
res_company = self.pool.get('res.company')
res_partner_obj = self.pool.get('res.partner')
edi_doc_list = []
for order in records:
# generate the main report
self._edi_generate_report_attachment(cr, uid, order, context=context)
# Get EDI doc based on struct. The result will also contain all metadata fields and attachments.
edi_doc = super(sale_order,self).edi_export(cr, uid, [order], edi_struct, context)[0]
edi_doc.update({
# force trans-typing to purchase.order upon import
'__import_model': 'purchase.order',
'__import_module': 'purchase',
'company_address': res_company.edi_export_address(cr, uid, order.company_id, context=context),
'partner_address': res_partner_obj.edi_export(cr, uid, [order.partner_id], context=context)[0],
'currency': self.pool.get('res.currency').edi_export(cr, uid, [order.pricelist_id.currency_id],
context=context)[0],
'partner_ref': order.client_order_ref or False,
'notes': order.note or False,
})
edi_doc_list.append(edi_doc)
return edi_doc_list
def _edi_import_company(self, cr, uid, edi_document, context=None):
# TODO: for multi-company setups, we currently import the document in the
# user's current company, but we should perhaps foresee a way to select
# the desired company among the user's allowed companies
self._edi_requires_attributes(('company_id','company_address'), edi_document)
res_partner = self.pool.get('res.partner')
xid, company_name = edi_document.pop('company_id')
# Retrofit address info into a unified partner info (changed in v7 - used to keep them separate)
company_address_edi = edi_document.pop('company_address')
company_address_edi['name'] = company_name
company_address_edi['is_company'] = True
company_address_edi['__import_model'] = 'res.partner'
company_address_edi['__id'] = xid # override address ID, as of v7 they should be the same anyway
if company_address_edi.get('logo'):
company_address_edi['image'] = company_address_edi.pop('logo')
company_address_edi['customer'] = True
partner_id = res_partner.edi_import(cr, uid, company_address_edi, context=context)
# modify edi_document to refer to new partner
partner = res_partner.browse(cr, uid, partner_id, context=context)
partner_edi_m2o = self.edi_m2o(cr, uid, partner, context=context)
edi_document['partner_id'] = partner_edi_m2o
edi_document['partner_invoice_id'] = partner_edi_m2o
edi_document['partner_shipping_id'] = partner_edi_m2o
edi_document.pop('partner_address', None) # ignored, that's supposed to be our own address!
return partner_id
def _edi_get_pricelist(self, cr, uid, partner_id, currency, context=None):
# TODO: refactor into common place for purchase/sale, e.g. into product module
partner_model = self.pool.get('res.partner')
partner = partner_model.browse(cr, uid, partner_id, context=context)
pricelist = partner.property_product_pricelist
if not pricelist:
pricelist = self.pool.get('ir.model.data').get_object(cr, uid, 'product', 'list0', context=context)
if not pricelist.currency_id == currency:
# look for a pricelist with the right type and currency, or make a new one
pricelist_type = 'sale'
product_pricelist = self.pool.get('product.pricelist')
match_pricelist_ids = product_pricelist.search(cr, uid,[('type','=',pricelist_type),
('currency_id','=',currency.id)])
if match_pricelist_ids:
pricelist_id = match_pricelist_ids[0]
else:
pricelist_name = _('EDI Pricelist (%s)') % (currency.name,)
pricelist_id = product_pricelist.create(cr, uid, {'name': pricelist_name,
'type': pricelist_type,
'currency_id': currency.id,
})
self.pool.get('product.pricelist.version').create(cr, uid, {'name': pricelist_name,
'pricelist_id': pricelist_id})
pricelist = product_pricelist.browse(cr, uid, pricelist_id)
return self.edi_m2o(cr, uid, pricelist, context=context)
def edi_import(self, cr, uid, edi_document, context=None):
self._edi_requires_attributes(('company_id','company_address','order_line','date_order','currency'), edi_document)
#import company as a new partner
partner_id = self._edi_import_company(cr, uid, edi_document, context=context)
# currency for rounding the discount calculations and for the pricelist
res_currency = self.pool.get('res.currency')
currency_info = edi_document.pop('currency')
currency_id = res_currency.edi_import(cr, uid, currency_info, context=context)
order_currency = res_currency.browse(cr, uid, currency_id)
partner_ref = edi_document.pop('partner_ref', False)
edi_document['client_order_ref'] = edi_document['name']
edi_document['name'] = partner_ref or edi_document['name']
edi_document['note'] = edi_document.pop('notes', False)
edi_document['pricelist_id'] = self._edi_get_pricelist(cr, uid, partner_id, order_currency, context=context)
# discard web preview fields, if present
edi_document.pop('amount_total', None)
edi_document.pop('amount_tax', None)
edi_document.pop('amount_untaxed', None)
order_lines = edi_document['order_line']
for order_line in order_lines:
self._edi_requires_attributes(('product_id', 'product_uom', 'product_qty', 'price_unit'), order_line)
order_line['product_uom_qty'] = order_line['product_qty']
del order_line['product_qty']
# discard web preview fields, if present
order_line.pop('price_subtotal', None)
return super(sale_order,self).edi_import(cr, uid, edi_document, context=context)
def _edi_paypal_url(self, cr, uid, ids, field, arg, context=None):
res = dict.fromkeys(ids, False)
for order in self.browse(cr, uid, ids, context=context):
if order.order_policy in ('prepaid', 'manual') and \
order.company_id.paypal_account and order.state != 'draft':
params = {
"cmd": "_xclick",
"business": order.company_id.paypal_account,
"item_name": order.company_id.name + " Order " + order.name,
"invoice": order.name,
"amount": order.amount_total,
"currency_code": order.pricelist_id.currency_id.name,
"button_subtype": "services",
"no_note": "1",
"bn": "OpenERP_Order_PayNow_" + order.pricelist_id.currency_id.name,
}
res[order.id] = "https://www.paypal.com/cgi-bin/webscr?" + url_encode(params)
return res
_columns = {
'paypal_url': fields.function(_edi_paypal_url, type='char', string='Paypal Url'),
}
class sale_order_line(osv.osv, EDIMixin):
_inherit='sale.order.line'
def edi_export(self, cr, uid, records, edi_struct=None, context=None):
"""Overridden to provide sale order line fields with the expected names
(sale and purchase orders have different column names)"""
edi_struct = dict(edi_struct or SALE_ORDER_LINE_EDI_STRUCT)
edi_doc_list = []
for line in records:
edi_doc = super(sale_order_line,self).edi_export(cr, uid, [line], edi_struct, context)[0]
edi_doc['__import_model'] = 'purchase.order.line'
edi_doc['product_qty'] = line.product_uom_qty
if line.product_uos:
edi_doc.update(product_uom=line.product_uos,
product_qty=line.product_uos_qty)
edi_doc_list.append(edi_doc)
return edi_doc_list
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
|
import numpy as np
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import glob
import deepdish as dd
import sys
import os
# some general plot styles for consistency
from galaxy_analysis.plot import plot_styles as ps
from galaxy_analysis.utilities import utilities
colors = {'Disk' : ps.black,
'CNM' : ps.purple,
'WNM' : ps.purple,
'HIM' : ps.purple,
'Molecular' : 'red',
'FullBox' : ps.magenta,
'stars' : ps.orange,
'GravBound' : ps.blue,
'OutsideBox' : ps.green }
ls = {'Disk' : '-',
'CNM' : '-',
'WNM' : '--',
'HIM' : ':',
'Molecular' : '-',
'FullBox' : '-',
'stars' : '-',
'GravBound' : '-',
'OutsideBox' : '-'}
def plot_sequestering(directory = './', fields = None, elements = None,
fraction = None):
"""
Given a directory, goes through all data outputs in that
directory and plots the time evolution of the mass contained
in specified gas phases for a given species. The default behavior
is to plot all fields for each element on a different plot for each
element.
fields : list, default is to use all of: Disk, CNM, WNM, HIM, FullBox,
stars, Molecular, OutsideBox, GravBound
elements : list, default is to loop through all elements: H, He, C, N,
O, Mg, Ca, Si, Mn, S, Fe, Y, Eu, and Metals
fraction : optional, string. Plot the mass fraction. Normalize all
lines by one of the fields listed above.
"""
output_dir = directory + '/sequestering/'
if not os.path.exists(output_dir):
os.makedirs(output_dir)
if not fraction is None:
output_dir += fraction + '/'
if not os.path.exists(output_dir):
os.makedirs(output_dir)
sfields = fields
if sfields is None:
sfields = ['Disk', 'CNM', 'WNM', 'HIM', 'FullBox',
'stars', 'Molecular', 'OutsideBox', 'GravBound']
#
# get all data
#
all_output = np.sort(glob.glob(directory + '/DD*.h5'))
all_elements = elements
if all_elements is None:
# this is a hacky way of getting the metal fields - should just save this to file
metal_fields = dd.io.load(all_output[0], '/gas_meta_data/masses/FullBox')
exclude = ['H','HI','HII','H2','Metals','Total','He','HeI','HeII','HeIII']
metal_fields = utilities.sort_by_anum([x for x in metal_fields if (not any([y in x for y in exclude]))])
individual_metals = metal_fields
all_elements = ['H','He','Metals'] + individual_metals
for element in all_elements:
fig, ax = plt.subplots()
# construct dictionary and time array
plot_data = {}
t = np.zeros(len(all_output))
for s in sfields:
plot_data[s] = np.zeros(len(all_output))
# loop through all output, gathering time and mass values
for i in np.arange(len(all_output)):
t[i] = dd.io.load(all_output[i], '/meta_data/Time')
x = dd.io.load(all_output[i], '/gas_meta_data/masses')
# now loop through all fields
for s in sfields:
if element == 'Metals' and s == 'stars':
plot_data[s][i] = x[s]['metals']
else:
plot_data[s][i] = x[s][element]
# normalize if necessary
norm = np.ones(np.size(plot_data[sfields[0]]))
if not fraction is None:
norm = 1.0 / plot_data[fraction]
ymax = np.max(plot_data['FullBox'] * norm)
if fraction is None:
ymax = 5.0 * ymax
ymin = 1.0E-8 * ymax
# try and set something reasonable for minimum if it exists
ymin = np.max( [ymin, np.min(plot_data['stars'] * norm)*10.0] )
for s in sfields:
# ignore fields that are too tiny to show up on plot
if np.max(plot_data[s] * norm) < ymin:
continue
ax.plot(t, plot_data[s] * norm, lw = ps.lw, label=s, ls = ls[s], color = colors[s])
ax.set_xlabel(r'Time (Myr')
if not fraction is None:
ax.set_ylabel('Mass Fraction of ' + element + ' relative to ' + fraction)
else:
ax.set_ylabel(element + r' Mass (M$_{\odot}$)')
ax.set_ylim(ymin, ymax)
ax.semilogy()
plt.minorticks_on()
fig.set_size_inches(8,8)
plt.tight_layout()
ax.legend(loc='best', ncol = 2)
outname = output_dir + '/' + element + '_sequestering.png'
if not fraction is None:
outname = output_dir + '/' + element + '_' + fraction + '_fraction_sequestering.png'
fig.savefig(outname)
plt.close(fig)
del(plot_data)
return
def plot_surface_density_profiles(directory = './', normalize = False):
output_dir = directory + '/radial_profiles/'
if not os.path.exists(output_dir):
os.makedirs(output_dir)
output_dir = output_dir + 'surface_density/'
if not os.path.exists(output_dir):
os.makedirs(output_dir)
all_output = np.sort(glob.glob(directory + '/DD*.h5'))
da = dd.io.load(all_output[-1])
fields = [k[1] for k in da['gas_profiles']['surface_density']['disk'].keys() if k != 'xbins']
ls = ['-','--',':']
color = ['black',ps.purple,ps.blue,ps.orange]
for ele in fields:
lsi = 0
ci = 0
norm = 1.0
ymin = 1.0E99
ymax = -1.0E99
k1 = 'gas'
try:
temp = da['gas_profiles']['surface_density']['disk'][(k1,ele)]
except:
k1 = 'enzo'
if normalize:
da = dd.io.load(all_output[0])
norm = da['gas_profiles']['surface_density']['disk'][(k1,ele)]
fig, ax = plt.subplots()
for i in np.floor(np.linspace(0, len(all_output) - 1, 12)):
i = int(i)
da = dd.io.load( all_output[i])
t = da['meta_data']['Time']
y = da['gas_profiles']['surface_density']['disk'][(k1,ele)]
x = da['gas_profiles']['surface_density']['disk']['xbins']
x = (x[1:] + x[:-1])*0.5
ax.plot(x, y * norm, lw = 3, ls = ls[lsi], color = color[ci], label = "t = %0.1f Myr"%(t))
ymax = np.max( [ymax, np.max(y * norm)])
ymin = np.min( [ymin, np.min(y * norm)])
lsi = lsi + 1
if lsi >= len(ls):
lsi = 0
ci = ci + 1
ax.set_xlabel(r'Radius (pc)')
ax.set_ylabel(r'$\Sigma_{\rm ' + ele + '}$ (M$_{\odot}$ pc$^{-2}$)')
ax.semilogy()
# ax.semilogx()
ax.set_xlim(0.0, 2000.0)
ymin = np.max( [ymin, 1.0E-5*ymax])
ax.set_ylim(ymin, ymax)
plt.minorticks_on()
fig.set_size_inches(8,8)
plt.tight_layout()
ax.legend(loc='best', ncol = 2)
outname = output_dir + ele
if normalize:
outname = outname + '_norm'
outname = outname + '_radial_profile.png'
fig.savefig(outname)
plt.close(fig)
return
def plot_mass_profiles(directory = './', normalize = False):
"""
Plot cumulative radial profiles at 6 different times (evenly spaced)
throughout the simulation.
"""
output_dir = directory + '/radial_profiles/'
if not os.path.exists(output_dir):
os.makedirs(output_dir)
all_output = np.sort(glob.glob(directory + '/DD*.h5'))
da = dd.io.load(all_output[-1])
fields = [k[1] for k in da['gas_profiles']['accumulation']['sphere'].keys() if k != 'xbins']
ls = ['-','--',':']
color = ['black',ps.purple,ps.blue,ps.orange]
for ele in fields:
lsi = 0
ci = 0
norm = 1.0
if normalize:
da = dd.io.load(all_output[0])
norm = da['gas_profiles']['accumulation']['sphere'][('gas',ele)]
fig, ax = plt.subplots()
for i in np.floor(np.linspace(0, len(all_output) - 1, 12)):
i = int(i)
da = dd.io.load( all_output[i])
t = da['meta_data']['Time']
y = da['gas_profiles']['accumulation']['sphere'][('gas',ele)]
x = da['gas_profiles']['accumulation']['sphere']['xbins']
x = (x[1:] + x[:-1])*0.5
ax.plot(x, np.cumsum(y) * norm, lw = 3, ls = ls[lsi], color = color[ci], label = "t = %0.1f Myr"%(t))
lsi = lsi + 1
if lsi >= len(ls):
lsi = 0
ci = ci + 1
ax.set_xlabel(r'Radius (kpc)')
ax.set_ylabel(r'Mass (M$_{\odot}$)')
ax.semilogy()
# ax.semilogx()
ax.set_xlim(0.0, 15.0)
plt.minorticks_on()
fig.set_size_inches(8,8)
plt.tight_layout()
ax.legend(loc='best', ncol = 2)
outname = output_dir + ele
if normalize:
outname = outname + '_norm_'
outname = outname + 'radial_profile.png'
fig.savefig(outname)
plt.close(fig)
return
if __name__ == "__main__":
directory = './'
if len(sys.argv) == 2:
directory = sys.argv[1]
plot_sequestering(directory = directory)
print("completed total sequestering")
plot_surface_density_profiles(directory = directory)
print("completed surface density profiles")
plot_mass_profiles(directory = directory)
print("completed mass profiles")
# Disk and FullBox are likely the only two fractions that make
# sense given the way the data is constructed. Other fractions are
# certainly possible but will be misleading without recomputing
# analysis (i.e. GravBound isn't useful unless I'm doing
# fraction of a given field that is GravBound, which would
# require re-computing, not just dividing by GravBound)
plot_sequestering(directory = directory, fraction = 'Disk')
plot_sequestering(directory = directory, fraction = 'FullBox')
|
|
# Copyright 2017 The Abseil Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A Python test reporter that generates test reports in JUnit XML format."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import datetime
import re
import sys
import threading
import time
import traceback
import unittest
from xml.sax import saxutils
from absl.testing import _pretty_print_reporter
from absl.third_party import unittest3_backport
import six
# See http://www.w3.org/TR/REC-xml/#NT-Char
_bad_control_character_codes = set(range(0, 0x20)) - {0x9, 0xA, 0xD}
_control_character_conversions = {
chr(i): '\\x{:02x}'.format(i) for i in _bad_control_character_codes}
_escape_xml_attr_conversions = {
'"': '"',
"'": ''',
'\n': '
',
'\t': '	',
'\r': '
',
' ': ' '}
_escape_xml_attr_conversions.update(_control_character_conversions)
# When class or module level function fails, unittest/suite.py adds a
# _ErrorHolder instance instead of a real TestCase, and it has a description
# like "setUpClass (__main__.MyTestCase)".
_CLASS_OR_MODULE_LEVEL_TEST_DESC_REGEX = re.compile(r'^(\w+) \((\S+)\)$')
# NOTE: while saxutils.quoteattr() theoretically does the same thing; it
# seems to often end up being too smart for it's own good not escaping properly.
# This function is much more reliable.
def _escape_xml_attr(content):
"""Escapes xml attributes."""
# Note: saxutils doesn't escape the quotes.
return saxutils.escape(content, _escape_xml_attr_conversions)
def _escape_cdata(s):
"""Escapes a string to be used as XML CDATA.
CDATA characters are treated strictly as character data, not as XML markup,
but there are still certain restrictions on them.
Args:
s: the string to be escaped.
Returns:
An escaped version of the input string.
"""
for char, escaped in six.iteritems(_control_character_conversions):
s = s.replace(char, escaped)
return s.replace(']]>', ']] >')
def _iso8601_timestamp(timestamp):
"""Produces an ISO8601 datetime.
Args:
timestamp: an Epoch based timestamp in seconds.
Returns:
A iso8601 format timestamp if the input is a valid timestamp, None otherwise
"""
if timestamp is None or timestamp < 0:
return None
# Use utcfromtimestamp in PY2 because it doesn't have a built-in UTC object
if six.PY2:
return '%s+00:00' % datetime.datetime.utcfromtimestamp(
timestamp).isoformat()
else:
return datetime.datetime.fromtimestamp(
timestamp, tz=datetime.timezone.utc).isoformat()
def _print_xml_element_header(element, attributes, stream, indentation=''):
"""Prints an XML header of an arbitrary element.
Args:
element: element name (testsuites, testsuite, testcase)
attributes: 2-tuple list with (attributes, values) already escaped
stream: output stream to write test report XML to
indentation: indentation added to the element header
"""
stream.write('%s<%s' % (indentation, element))
for attribute in attributes:
if len(attribute) == 2 \
and attribute[0] is not None and attribute[1] is not None:
stream.write(' %s="%s"' % (attribute[0], attribute[1]))
stream.write('>\n')
# Copy time.time which ensures the real time is used internally.
# This prevents bad interactions with tests that stub out time.
_time_copy = time.time
if hasattr(traceback, '_some_str'):
# Use the traceback module str function to format safely.
_safe_str = traceback._some_str
else:
_safe_str = str # pylint: disable=invalid-name
class _TestCaseResult(object):
"""Private helper for _TextAndXMLTestResult that represents a test result.
Attributes:
test: A TestCase instance of an individual test method.
name: The name of the individual test method.
full_class_name: The full name of the test class.
run_time: The duration (in seconds) it took to run the test.
start_time: Epoch relative timestamp of when test started (in seconds)
errors: A list of error 4-tuples. Error tuple entries are
1) a string identifier of either "failure" or "error"
2) an exception_type
3) an exception_message
4) a string version of a sys.exc_info()-style tuple of values
('error', err[0], err[1], self._exc_info_to_string(err))
If the length of errors is 0, then the test is either passed or
skipped.
skip_reason: A string explaining why the test was skipped.
"""
def __init__(self, test):
self.run_time = -1
self.start_time = -1
self.skip_reason = None
self.errors = []
self.test = test
# Parse the test id to get its test name and full class path.
# Unfortunately there is no better way of knowning the test and class.
# Worse, unittest uses _ErrorHandler instances to represent class / module
# level failures.
test_desc = test.id() or str(test)
# Check if it's something like "setUpClass (__main__.TestCase)".
match = _CLASS_OR_MODULE_LEVEL_TEST_DESC_REGEX.match(test_desc)
if match:
name = match.group(1)
full_class_name = match.group(2)
else:
class_name = unittest.util.strclass(test.__class__)
if ((six.PY3 and isinstance(test, unittest.case._SubTest)) or
(six.PY2 and isinstance(test, unittest3_backport.case._SubTest))):
# If the test case is a _SubTest, the real TestCase instance is
# available as _SubTest.test_case.
class_name = unittest.util.strclass(test.test_case.__class__)
if test_desc.startswith(class_name + '.'):
# In a typical unittest.TestCase scenario, test.id() returns with
# a class name formatted using unittest.util.strclass.
name = test_desc[len(class_name)+1:]
full_class_name = class_name
else:
# Otherwise make a best effort to guess the test name and full class
# path.
parts = test_desc.rsplit('.', 1)
name = parts[-1]
full_class_name = parts[0] if len(parts) == 2 else ''
self.name = _escape_xml_attr(name)
self.full_class_name = _escape_xml_attr(full_class_name)
def set_run_time(self, time_in_secs):
self.run_time = time_in_secs
def set_start_time(self, time_in_secs):
self.start_time = time_in_secs
def print_xml_summary(self, stream):
"""Prints an XML Summary of a TestCase.
Status and result are populated as per JUnit XML test result reporter.
A test that has been skipped will always have a skip reason,
as every skip method in Python's unittest requires the reason arg to be
passed.
Args:
stream: output stream to write test report XML to
"""
if self.skip_reason is None:
status = 'run'
result = 'completed'
else:
status = 'notrun'
result = 'suppressed'
test_case_attributes = [
('name', '%s' % self.name),
('status', '%s' % status),
('result', '%s' % result),
('time', '%.1f' % self.run_time),
('classname', self.full_class_name),
('timestamp', _iso8601_timestamp(self.start_time)),
]
_print_xml_element_header('testcase', test_case_attributes, stream, ' ')
self._print_testcase_details(stream)
stream.write(' </testcase>\n')
def _print_testcase_details(self, stream):
for error in self.errors:
outcome, exception_type, message, error_msg = error # pylint: disable=unpacking-non-sequence
message = _escape_xml_attr(_safe_str(message))
exception_type = _escape_xml_attr(str(exception_type))
error_msg = _escape_cdata(error_msg)
stream.write(' <%s message="%s" type="%s"><![CDATA[%s]]></%s>\n'
% (outcome, message, exception_type, error_msg, outcome))
class _TestSuiteResult(object):
"""Private helper for _TextAndXMLTestResult."""
def __init__(self):
self.suites = {}
self.failure_counts = {}
self.error_counts = {}
self.overall_start_time = -1
self.overall_end_time = -1
self._testsuites_properties = {}
def add_test_case_result(self, test_case_result):
suite_name = type(test_case_result.test).__name__
if suite_name == '_ErrorHolder':
# _ErrorHolder is a special case created by unittest for class / module
# level functions.
suite_name = test_case_result.full_class_name.rsplit('.')[-1]
if ((six.PY3 and
isinstance(test_case_result.test, unittest.case._SubTest)) or
(six.PY2 and
isinstance(test_case_result.test, unittest3_backport.case._SubTest))):
# If the test case is a _SubTest, the real TestCase instance is
# available as _SubTest.test_case.
suite_name = type(test_case_result.test.test_case).__name__
self._setup_test_suite(suite_name)
self.suites[suite_name].append(test_case_result)
for error in test_case_result.errors:
# Only count the first failure or error so that the sum is equal to the
# total number of *testcases* that have failures or errors.
if error[0] == 'failure':
self.failure_counts[suite_name] += 1
break
elif error[0] == 'error':
self.error_counts[suite_name] += 1
break
def print_xml_summary(self, stream):
overall_test_count = sum(len(x) for x in self.suites.values())
overall_failures = sum(self.failure_counts.values())
overall_errors = sum(self.error_counts.values())
overall_attributes = [
('name', ''),
('tests', '%d' % overall_test_count),
('failures', '%d' % overall_failures),
('errors', '%d' % overall_errors),
('time', '%.1f' % (self.overall_end_time - self.overall_start_time)),
('timestamp', _iso8601_timestamp(self.overall_start_time)),
]
_print_xml_element_header('testsuites', overall_attributes, stream)
if self._testsuites_properties:
stream.write(' <properties>\n')
for name, value in sorted(six.iteritems(self._testsuites_properties)):
stream.write(' <property name="%s" value="%s"></property>\n' %
(_escape_xml_attr(name), _escape_xml_attr(str(value))))
stream.write(' </properties>\n')
for suite_name in self.suites:
suite = self.suites[suite_name]
suite_end_time = max(x.start_time + x.run_time for x in suite)
suite_start_time = min(x.start_time for x in suite)
failures = self.failure_counts[suite_name]
errors = self.error_counts[suite_name]
suite_attributes = [
('name', '%s' % suite_name),
('tests', '%d' % len(suite)),
('failures', '%d' % failures),
('errors', '%d' % errors),
('time', '%.1f' % (suite_end_time - suite_start_time)),
('timestamp', _iso8601_timestamp(suite_start_time)),
]
_print_xml_element_header('testsuite', suite_attributes, stream)
for test_case_result in suite:
test_case_result.print_xml_summary(stream)
stream.write('</testsuite>\n')
stream.write('</testsuites>\n')
def _setup_test_suite(self, suite_name):
"""Adds a test suite to the set of suites tracked by this test run.
Args:
suite_name: string, The name of the test suite being initialized.
"""
if suite_name in self.suites:
return
self.suites[suite_name] = []
self.failure_counts[suite_name] = 0
self.error_counts[suite_name] = 0
def set_end_time(self, timestamp_in_secs):
"""Sets the start timestamp of this test suite.
Args:
timestamp_in_secs: timestamp in seconds since epoch
"""
self.overall_end_time = timestamp_in_secs
def set_start_time(self, timestamp_in_secs):
"""Sets the end timestamp of this test suite.
Args:
timestamp_in_secs: timestamp in seconds since epoch
"""
self.overall_start_time = timestamp_in_secs
class _TextAndXMLTestResult(_pretty_print_reporter.TextTestResult):
"""Private TestResult class that produces both formatted text results and XML.
Used by TextAndXMLTestRunner.
"""
_TEST_SUITE_RESULT_CLASS = _TestSuiteResult
_TEST_CASE_RESULT_CLASS = _TestCaseResult
def __init__(self, xml_stream, stream, descriptions, verbosity,
time_getter=_time_copy, testsuites_properties=None):
super(_TextAndXMLTestResult, self).__init__(stream, descriptions, verbosity)
self.xml_stream = xml_stream
self.pending_test_case_results = {}
self.suite = self._TEST_SUITE_RESULT_CLASS()
if testsuites_properties:
self.suite._testsuites_properties = testsuites_properties
self.time_getter = time_getter
# This lock guards any mutations on pending_test_case_results.
self._pending_test_case_results_lock = threading.RLock()
def startTest(self, test):
self.start_time = self.time_getter()
super(_TextAndXMLTestResult, self).startTest(test)
def stopTest(self, test):
# Grabbing the write lock to avoid conflicting with stopTestRun.
with self._pending_test_case_results_lock:
super(_TextAndXMLTestResult, self).stopTest(test)
result = self.get_pending_test_case_result(test)
if not result:
test_name = test.id() or str(test)
sys.stderr.write('No pending test case: %s\n' % test_name)
return
test_id = id(test)
run_time = self.time_getter() - self.start_time
result.set_run_time(run_time)
result.set_start_time(self.start_time)
self.suite.add_test_case_result(result)
del self.pending_test_case_results[test_id]
def startTestRun(self):
self.suite.set_start_time(self.time_getter())
super(_TextAndXMLTestResult, self).startTestRun()
def stopTestRun(self):
self.suite.set_end_time(self.time_getter())
# All pending_test_case_results will be added to the suite and removed from
# the pending_test_case_results dictionary. Grabing the write lock to avoid
# results from being added during this process to avoid duplicating adds or
# accidentally erasing newly appended pending results.
with self._pending_test_case_results_lock:
# Errors in the test fixture (setUpModule, tearDownModule,
# setUpClass, tearDownClass) can leave a pending result which
# never gets added to the suite. The runner calls stopTestRun
# which gives us an opportunity to add these errors for
# reporting here.
for test_id in self.pending_test_case_results:
result = self.pending_test_case_results[test_id]
if hasattr(self, 'start_time'):
run_time = self.suite.overall_end_time - self.start_time
result.set_run_time(run_time)
result.set_start_time(self.start_time)
self.suite.add_test_case_result(result)
self.pending_test_case_results.clear()
def _exc_info_to_string(self, err, test=None):
"""Converts a sys.exc_info()-style tuple of values into a string.
This method must be overridden because the method signature in
unittest.TestResult changed between Python 2.2 and 2.4.
Args:
err: A sys.exc_info() tuple of values for an error.
test: The test method.
Returns:
A formatted exception string.
"""
if test:
return super(_TextAndXMLTestResult, self)._exc_info_to_string(err, test)
return ''.join(traceback.format_exception(*err))
def add_pending_test_case_result(self, test, error_summary=None,
skip_reason=None):
"""Adds result information to a test case result which may still be running.
If a result entry for the test already exists, add_pending_test_case_result
will add error summary tuples and/or overwrite skip_reason for the result.
If it does not yet exist, a result entry will be created.
Note that a test result is considered to have been run and passed
only if there are no errors or skip_reason.
Args:
test: A test method as defined by unittest
error_summary: A 4-tuple with the following entries:
1) a string identifier of either "failure" or "error"
2) an exception_type
3) an exception_message
4) a string version of a sys.exc_info()-style tuple of values
('error', err[0], err[1], self._exc_info_to_string(err))
If the length of errors is 0, then the test is either passed or
skipped.
skip_reason: a string explaining why the test was skipped
"""
with self._pending_test_case_results_lock:
test_id = id(test)
if test_id not in self.pending_test_case_results:
self.pending_test_case_results[test_id] = self._TEST_CASE_RESULT_CLASS(
test)
if error_summary:
self.pending_test_case_results[test_id].errors.append(error_summary)
if skip_reason:
self.pending_test_case_results[test_id].skip_reason = skip_reason
def delete_pending_test_case_result(self, test):
with self._pending_test_case_results_lock:
test_id = id(test)
del self.pending_test_case_results[test_id]
def get_pending_test_case_result(self, test):
test_id = id(test)
return self.pending_test_case_results.get(test_id, None)
def addSuccess(self, test):
super(_TextAndXMLTestResult, self).addSuccess(test)
self.add_pending_test_case_result(test)
def addError(self, test, err):
super(_TextAndXMLTestResult, self).addError(test, err)
error_summary = ('error', err[0], err[1],
self._exc_info_to_string(err, test=test))
self.add_pending_test_case_result(test, error_summary=error_summary)
def addFailure(self, test, err):
super(_TextAndXMLTestResult, self).addFailure(test, err)
error_summary = ('failure', err[0], err[1],
self._exc_info_to_string(err, test=test))
self.add_pending_test_case_result(test, error_summary=error_summary)
def addSkip(self, test, reason):
super(_TextAndXMLTestResult, self).addSkip(test, reason)
self.add_pending_test_case_result(test, skip_reason=reason)
def addExpectedFailure(self, test, err):
super(_TextAndXMLTestResult, self).addExpectedFailure(test, err)
if callable(getattr(test, 'recordProperty', None)):
test.recordProperty('EXPECTED_FAILURE',
self._exc_info_to_string(err, test=test))
self.add_pending_test_case_result(test)
def addUnexpectedSuccess(self, test):
super(_TextAndXMLTestResult, self).addUnexpectedSuccess(test)
test_name = test.id() or str(test)
error_summary = ('error', '', '',
'Test case %s should have failed, but passed.'
% (test_name))
self.add_pending_test_case_result(test, error_summary=error_summary)
def addSubTest(self, test, subtest, err): # pylint: disable=invalid-name
super(_TextAndXMLTestResult, self).addSubTest(test, subtest, err)
if err is not None:
if issubclass(err[0], test.failureException):
error_summary = ('failure', err[0], err[1],
self._exc_info_to_string(err, test=test))
else:
error_summary = ('error', err[0], err[1],
self._exc_info_to_string(err, test=test))
else:
error_summary = None
self.add_pending_test_case_result(subtest, error_summary=error_summary)
def printErrors(self):
super(_TextAndXMLTestResult, self).printErrors()
self.xml_stream.write('<?xml version="1.0"?>\n')
self.suite.print_xml_summary(self.xml_stream)
class TextAndXMLTestRunner(unittest.TextTestRunner):
"""A test runner that produces both formatted text results and XML.
It prints out the names of tests as they are run, errors as they
occur, and a summary of the results at the end of the test run.
"""
_TEST_RESULT_CLASS = _TextAndXMLTestResult
_xml_stream = None
_testsuites_properties = {}
def __init__(self, xml_stream=None, *args, **kwargs):
"""Initialize a TextAndXMLTestRunner.
Args:
xml_stream: file-like or None; XML-formatted test results are output
via this object's write() method. If None (the default), the
new instance behaves as described in the set_default_xml_stream method
documentation below.
*args: passed unmodified to unittest.TextTestRunner.__init__.
**kwargs: passed unmodified to unittest.TextTestRunner.__init__.
"""
super(TextAndXMLTestRunner, self).__init__(*args, **kwargs)
if xml_stream is not None:
self._xml_stream = xml_stream
# else, do not set self._xml_stream to None -- this allows implicit fallback
# to the class attribute's value.
@classmethod
def set_default_xml_stream(cls, xml_stream):
"""Sets the default XML stream for the class.
Args:
xml_stream: file-like or None; used for instances when xml_stream is None
or not passed to their constructors. If None is passed, instances
created with xml_stream=None will act as ordinary TextTestRunner
instances; this is the default state before any calls to this method
have been made.
"""
cls._xml_stream = xml_stream
def _makeResult(self):
if self._xml_stream is None:
return super(TextAndXMLTestRunner, self)._makeResult()
else:
return self._TEST_RESULT_CLASS(
self._xml_stream, self.stream, self.descriptions, self.verbosity,
testsuites_properties=self._testsuites_properties)
@classmethod
def set_testsuites_property(cls, key, value):
cls._testsuites_properties[key] = value
|
|
"""Support for LG webOS Smart TV."""
from __future__ import annotations
import asyncio
from collections.abc import Callable
from contextlib import suppress
import json
import logging
import os
from pickle import loads
from typing import Any
from aiowebostv import WebOsClient, WebOsTvPairError
import sqlalchemy as db
import voluptuous as vol
from homeassistant.components import notify as hass_notify
from homeassistant.components.automation import AutomationActionType
from homeassistant.config_entries import SOURCE_IMPORT, ConfigEntry
from homeassistant.const import (
ATTR_COMMAND,
ATTR_ENTITY_ID,
CONF_CLIENT_SECRET,
CONF_CUSTOMIZE,
CONF_HOST,
CONF_ICON,
CONF_NAME,
CONF_UNIQUE_ID,
EVENT_HOMEASSISTANT_STOP,
Platform,
)
from homeassistant.core import (
Context,
Event,
HassJob,
HomeAssistant,
ServiceCall,
callback,
)
from homeassistant.helpers import config_validation as cv, discovery, entity_registry
from homeassistant.helpers.dispatcher import async_dispatcher_send
from homeassistant.helpers.typing import ConfigType
from .const import (
ATTR_BUTTON,
ATTR_CONFIG_ENTRY_ID,
ATTR_PAYLOAD,
ATTR_SOUND_OUTPUT,
CONF_ON_ACTION,
CONF_SOURCES,
DATA_CONFIG_ENTRY,
DATA_HASS_CONFIG,
DEFAULT_NAME,
DOMAIN,
PLATFORMS,
SERVICE_BUTTON,
SERVICE_COMMAND,
SERVICE_SELECT_SOUND_OUTPUT,
WEBOSTV_CONFIG_FILE,
WEBOSTV_EXCEPTIONS,
)
CUSTOMIZE_SCHEMA = vol.Schema(
{vol.Optional(CONF_SOURCES, default=[]): vol.All(cv.ensure_list, [cv.string])}
)
CONFIG_SCHEMA = vol.Schema(
vol.All(
cv.deprecated(DOMAIN),
{
DOMAIN: vol.All(
cv.ensure_list,
[
vol.Schema(
{
vol.Optional(CONF_CUSTOMIZE, default={}): CUSTOMIZE_SCHEMA,
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_ON_ACTION): cv.SCRIPT_SCHEMA,
vol.Optional(CONF_ICON): cv.string,
}
)
],
)
},
),
extra=vol.ALLOW_EXTRA,
)
CALL_SCHEMA = vol.Schema({vol.Required(ATTR_ENTITY_ID): cv.comp_entity_ids})
BUTTON_SCHEMA = CALL_SCHEMA.extend({vol.Required(ATTR_BUTTON): cv.string})
COMMAND_SCHEMA = CALL_SCHEMA.extend(
{vol.Required(ATTR_COMMAND): cv.string, vol.Optional(ATTR_PAYLOAD): dict}
)
SOUND_OUTPUT_SCHEMA = CALL_SCHEMA.extend({vol.Required(ATTR_SOUND_OUTPUT): cv.string})
SERVICE_TO_METHOD = {
SERVICE_BUTTON: {"method": "async_button", "schema": BUTTON_SCHEMA},
SERVICE_COMMAND: {"method": "async_command", "schema": COMMAND_SCHEMA},
SERVICE_SELECT_SOUND_OUTPUT: {
"method": "async_select_sound_output",
"schema": SOUND_OUTPUT_SCHEMA,
},
}
_LOGGER = logging.getLogger(__name__)
def read_client_keys(config_file: str) -> dict[str, str]:
"""Read legacy client keys from file."""
if not os.path.isfile(config_file):
return {}
# Try to parse the file as being JSON
with open(config_file, encoding="utf8") as json_file:
try:
client_keys = json.load(json_file)
if isinstance(client_keys, dict):
return client_keys
return {}
except (json.JSONDecodeError, UnicodeDecodeError):
pass
# If the file is not JSON, read it as Sqlite DB
engine = db.create_engine(f"sqlite:///{config_file}")
table = db.Table("unnamed", db.MetaData(), autoload=True, autoload_with=engine)
results = engine.connect().execute(db.select([table])).fetchall()
db_client_keys = {k: loads(v) for k, v in results}
return db_client_keys
async def async_setup(hass: HomeAssistant, config: ConfigType) -> bool:
"""Set up the LG WebOS TV platform."""
hass.data.setdefault(DOMAIN, {})
hass.data[DOMAIN].setdefault(DATA_CONFIG_ENTRY, {})
hass.data[DOMAIN][DATA_HASS_CONFIG] = config
if DOMAIN not in config:
return True
config_file = hass.config.path(WEBOSTV_CONFIG_FILE)
if not (
client_keys := await hass.async_add_executor_job(read_client_keys, config_file)
):
_LOGGER.debug("No pairing keys, Not importing webOS Smart TV YAML config")
return True
async def async_migrate_task(
entity_id: str, conf: dict[str, str], key: str
) -> None:
_LOGGER.debug("Migrating webOS Smart TV entity %s unique_id", entity_id)
client = WebOsClient(conf[CONF_HOST], key)
tries = 0
while not client.is_connected():
try:
await client.connect()
except WEBOSTV_EXCEPTIONS:
if tries == 0:
_LOGGER.warning(
"Please make sure webOS TV %s is turned on to complete "
"the migration of configuration.yaml to the UI",
entity_id,
)
wait_time = 2 ** min(tries, 4) * 5
tries += 1
await asyncio.sleep(wait_time)
except WebOsTvPairError:
return
ent_reg = entity_registry.async_get(hass)
if not (
new_entity_id := ent_reg.async_get_entity_id(
Platform.MEDIA_PLAYER, DOMAIN, key
)
):
_LOGGER.debug(
"Not updating webOSTV Smart TV entity %s unique_id, entity missing",
entity_id,
)
return
uuid = client.hello_info["deviceUUID"]
ent_reg.async_update_entity(new_entity_id, new_unique_id=uuid)
await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_IMPORT},
data={
**conf,
CONF_CLIENT_SECRET: key,
CONF_UNIQUE_ID: uuid,
},
)
ent_reg = entity_registry.async_get(hass)
tasks = []
for conf in config[DOMAIN]:
host = conf[CONF_HOST]
if (key := client_keys.get(host)) is None:
_LOGGER.debug(
"Not importing webOS Smart TV host %s without pairing key", host
)
continue
if entity_id := ent_reg.async_get_entity_id(Platform.MEDIA_PLAYER, DOMAIN, key):
tasks.append(asyncio.create_task(async_migrate_task(entity_id, conf, key)))
async def async_tasks_cancel(_event: Event) -> None:
"""Cancel config flow import tasks."""
for task in tasks:
if not task.done():
task.cancel()
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, async_tasks_cancel)
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set the config entry up."""
host = entry.data[CONF_HOST]
key = entry.data[CONF_CLIENT_SECRET]
wrapper = WebOsClientWrapper(host, client_key=key)
await wrapper.connect()
async def async_service_handler(service: ServiceCall) -> None:
method = SERVICE_TO_METHOD[service.service]
data = service.data.copy()
data["method"] = method["method"]
async_dispatcher_send(hass, DOMAIN, data)
for service, method in SERVICE_TO_METHOD.items():
schema = method["schema"]
hass.services.async_register(
DOMAIN, service, async_service_handler, schema=schema
)
hass.data[DOMAIN][DATA_CONFIG_ENTRY][entry.entry_id] = wrapper
hass.config_entries.async_setup_platforms(entry, PLATFORMS)
# set up notify platform, no entry support for notify component yet,
# have to use discovery to load platform.
hass.async_create_task(
discovery.async_load_platform(
hass,
"notify",
DOMAIN,
{
CONF_NAME: entry.title,
ATTR_CONFIG_ENTRY_ID: entry.entry_id,
},
hass.data[DOMAIN][DATA_HASS_CONFIG],
)
)
if not entry.update_listeners:
entry.async_on_unload(entry.add_update_listener(async_update_options))
async def async_on_stop(_event: Event) -> None:
"""Unregister callbacks and disconnect."""
await wrapper.shutdown()
entry.async_on_unload(
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, async_on_stop)
)
return True
async def async_update_options(hass: HomeAssistant, entry: ConfigEntry) -> None:
"""Update options."""
await hass.config_entries.async_reload(entry.entry_id)
async def async_control_connect(host: str, key: str | None) -> WebOsClient:
"""LG Connection."""
client = WebOsClient(host, key)
try:
await client.connect()
except WebOsTvPairError:
_LOGGER.warning("Connected to LG webOS TV %s but not paired", host)
raise
return client
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload a config entry."""
unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)
if unload_ok:
client = hass.data[DOMAIN][DATA_CONFIG_ENTRY].pop(entry.entry_id)
await hass_notify.async_reload(hass, DOMAIN)
await client.shutdown()
# unregister service calls, check if this is the last entry to unload
if unload_ok and not hass.data[DOMAIN][DATA_CONFIG_ENTRY]:
for service in SERVICE_TO_METHOD:
hass.services.async_remove(DOMAIN, service)
return unload_ok
class PluggableAction:
"""A pluggable action handler."""
def __init__(self) -> None:
"""Initialize."""
self._actions: dict[Callable[[], None], tuple[HassJob, dict[str, Any]]] = {}
def __bool__(self) -> bool:
"""Return if we have something attached."""
return bool(self._actions)
@callback
def async_attach(
self, action: AutomationActionType, variables: dict[str, Any]
) -> Callable[[], None]:
"""Attach a device trigger for turn on."""
@callback
def _remove() -> None:
del self._actions[_remove]
job = HassJob(action)
self._actions[_remove] = (job, variables)
return _remove
@callback
def async_run(self, hass: HomeAssistant, context: Context | None = None) -> None:
"""Run all turn on triggers."""
for job, variables in self._actions.values():
hass.async_run_hass_job(job, variables, context)
class WebOsClientWrapper:
"""Wrapper for a WebOS TV client with Home Assistant specific functions."""
def __init__(self, host: str, client_key: str) -> None:
"""Set up the client."""
self.host = host
self.client_key = client_key
self.turn_on = PluggableAction()
self.client: WebOsClient | None = None
async def connect(self) -> None:
"""Attempt a connection, but fail gracefully if tv is off for example."""
self.client = WebOsClient(self.host, self.client_key)
with suppress(*WEBOSTV_EXCEPTIONS, WebOsTvPairError):
await self.client.connect()
async def shutdown(self) -> None:
"""Unregister callbacks and disconnect."""
assert self.client
self.client.clear_state_update_callbacks()
await self.client.disconnect()
|
|
#!/usr/bin/env python
# deltafy
# - a simple library that keeps track of modified/created/removed files and directories in a file tree
#
# Author: Marshall Culpepper
# Licensed under the Apache Public License v2 (see LICENSE.txt)
import os, sys, platform, sqlite3, time, stat
from datetime import datetime, timedelta
class DeltaList(list):
def has_path(self, path):
for delta in self:
if delta.get_path() == path: return True
return False
def is_updated(self, path):
for delta in self:
if delta.get_path() == path:
return delta.get_status() == Delta.MODIFIED or \
delta.get_status() == Delta.CREATED
return False
class Delta:
CREATED = 0
MODIFIED = 1
DELETED = 2
def __init__(self, path, timestamp, status):
self.path = path
self.timestamp = timestamp
self.status = status
def __str__(self):
return "%s [%s] @ %s" % (self.get_status_str(), self.get_path(), self.get_timestamp())
def get_path(self):
return self.path
def get_status(self):
return self.status
def get_status_str(self):
if self.status == self.CREATED: return "CREATED"
elif self.status == self.MODIFIED: return "MODIFIED"
else: return "DELETED"
def get_timestamp(self):
return self.timestamp
home = os.path.expanduser('~')
if platform.system() == 'Windows':
home = os.environ['USERPROFILE']
class Deltafy:
db_home = os.path.join(home, '.deltafy')
db_path = os.path.join(db_home, 'deltas')
@classmethod
def get_database_path(cls):
return cls.db_path
@classmethod
def set_database_path(cls, path):
cls.db_path = path
@classmethod
def get_modified_datetime(cls, path):
return datetime.fromtimestamp(os.stat(path).st_mtime)
@classmethod
def compare_datetime(cls, dt1, dt2, mindelta=None):
delta = dt1 - dt2
if mindelta is None:
mindelta = timedelta(microseconds=0)
if delta < mindelta: return -1
elif delta > mindelta: return 1
else: return 0
@classmethod
def compare_paths(cls, path1, path2, mindelta=None):
time1 = datetime.fromtimestamp(os.stat(path1).st_mtime)
time2 = datetime.fromtimestamp(os.stat(path2).st_mtime)
return cls.compare_datetime(time1, time2, mindelta)
@classmethod
def needs_update(cls, src_path, dest_path, mindelta=None):
"checks if dest_path needs to be updated by src_path with a default minimum delta of 1 second"
if mindelta is None:
mindelta = timedelta(seconds=1)
return not os.path.exists(dest_path) or \
(os.path.exists(src_path) and \
Deltafy.compare_paths(src_path, dest_path, mindelta) > 0)
@classmethod
def needs_update_timestamp(cls, src_path, dest_ts, mindelta=None):
"checks if dest_ts needs to be updated by src_path with a default minimum delta of 1 second"
return os.path.exists(src_path) and \
cls.compare_datetime(cls.get_modified_datetime(src_path), dest_ts, mindelta) > 0
def __init__(self, dir, include_callback=None):
self.dir = dir
self.include_callback = include_callback
if not os.path.exists(self.db_home):
os.makedirs(self.db_home)
self.conn = sqlite3.connect(self.db_path, detect_types=sqlite3.PARSE_DECLTYPES)
self.conn.execute('create table if not exists timestamps (path text, modified timestamp)')
def clear_state(self):
self.conn.execute('delete from timestamps')
self.conn.commit()
def get_timestamp(self, path):
c = self.conn.cursor()
c.execute('select modified from timestamps where path = ?', (path,))
row = c.fetchone()
timestamp = None
if row is not None and len(row) == 1:
timestamp = row[0]
c.close()
return timestamp
def insert_timestamp(self, path, path_stat):
timestamp = datetime.fromtimestamp(path_stat.st_mtime)
self.conn.execute('insert into timestamps(path, modified) values (?, ?)', (path, timestamp))
self.conn.commit()
return timestamp
def update_timestamp(self, path, timestamp):
self.conn.execute('update timestamps set modified = ? where path = ?', (timestamp, path))
self.conn.commit()
def delete_timestamp(self, path):
self.conn.execute('delete from timestamps where path = ?', (path,))
self.conn.commit()
def get_paths(self):
c = self.conn.cursor()
c.execute('select path from timestamps')
rows = c.fetchall()
paths = [row[0] for row in rows]
c.close()
return paths
def check_delta(self, path, path_stat):
timestamp = self.get_timestamp(path)
modified_time = datetime.fromtimestamp(path_stat.st_mtime)
if timestamp is None:
timestamp = self.insert_timestamp(path, path_stat)
return Delta(path, timestamp, Delta.CREATED)
elif modified_time - timestamp >= timedelta(seconds=1):
# this needs to be a little fuzzy.
# windows loses a few microseconds in precision
self.update_timestamp(path, modified_time)
return Delta(path, modified_time, Delta.MODIFIED)
return None
def scan(self):
deltas = DeltaList()
# first pass against the filesystem
self.scan_path(self.dir, deltas)
# second pass check again paths in db
# to find deleted paths in the filesystem
for path in self.get_paths():
if path.startswith(self.dir):
include_path = True
if self.include_callback is not None:
include_path = self.include_callback(path, True)
if not include_path:
continue
if not os.path.exists(path):
self.delete_timestamp(path)
deltas.append(Delta(path, 0, Delta.DELETED))
return deltas
def scan_single_file(self, file):
return self.check_delta(file, os.stat(file))
def scan_path(self, path, deltas):
for file in os.listdir(path):
absolute_path = os.path.join(path, file)
# reduce to just one stat, major speed up in windows
path_stat = os.stat(absolute_path)
if stat.S_ISDIR(path_stat.st_mode):
include_dir = True
if self.include_callback is not None:
include_dir = self.include_callback(absolute_path, False)
if not include_dir:
continue
self.scan_path(absolute_path, deltas)
else:
include_file = True
if self.include_callback is not None:
include_file = self.include_callback(absolute_path, True)
if not include_file:
continue
file_delta = self.check_delta(absolute_path, path_stat)
if file_delta is not None:
deltas.append(file_delta)
if __name__ == "__main__":
if len(sys.argv) == 1:
print "Usage: %s <dir>" % sys.argv[0]
sys.exit(-1)
deltafy = Deltafy(sys.argv[1])
sys.stdout.write("Initial scan...")
deltafy.scan()
print "done\nScanning for changes (Ctrl+C to stop)..."
while True:
try:
time.sleep(1)
deltas = deltafy.scan()
for delta in deltas:
print str(delta)
except KeyboardInterrupt:
print "Killed."
break
|
|
import mock
import pytest
import urllib
from django.core.exceptions import MultipleObjectsReturned
from osf.models import Guid, NodeLicenseRecord, OSFUser
from osf_tests.factories import AuthUserFactory, UserFactory, NodeFactory, NodeLicenseRecordFactory, \
RegistrationFactory, PreprintFactory, PreprintProviderFactory
from tests.base import OsfTestCase
from tests.test_websitefiles import TestFile
from website.settings import MFR_SERVER_URL, WATERBUTLER_URL
@pytest.mark.django_db
class TestGuid:
def test_long_id_gets_generated_on_creation(self):
obj = NodeLicenseRecordFactory()
assert obj._id
assert len(obj._id) > 5
def test_loading_by_object_id(self):
obj = NodeLicenseRecordFactory()
assert NodeLicenseRecord.load(obj._id) == obj
def test_loading_by_short_guid(self):
obj = UserFactory()
assert OSFUser.load(obj._id) == obj
@pytest.mark.parametrize('Factory',
[
UserFactory,
NodeFactory,
RegistrationFactory,
])
def test_short_guid_gets_generated_on_creation(self, Factory):
obj = Factory()
assert obj._id
assert len(obj._id) == 5
@pytest.mark.django_db
class TestReferent:
@pytest.mark.parametrize('Factory',
[
UserFactory,
NodeFactory
])
def test_referent(self, Factory):
obj = Factory()
guid = Guid.objects.get(_id=obj._id)
assert guid.referent == obj
@pytest.mark.parametrize('Factory',
[
UserFactory,
NodeFactory
])
def test_referent_can_be_set(self, Factory):
obj = Factory()
obj1 = Factory()
guid = Guid.load(obj._id)
assert guid.referent == obj # sanity check
guid.referent = obj1
assert guid.referent == obj1
@pytest.mark.skip('I don\'t actually think we do this anywhere')
def test_swapping_guids(self):
user = UserFactory()
node = NodeFactory()
user_guid = user.guids[0]
node_guid = node.guids[0]
user._id = node_guid._id
node._id = user_guid._id
assert node_guid._id == user._id
assert user_guid._id == node._id
def test_id_matches(self):
user = UserFactory()
guid = Guid.objects.get(_id=user._id)
assert user._id == guid._id
@pytest.mark.skip('I don\'t actually think we do this anywhere')
@pytest.mark.parametrize('Factory',
[
UserFactory,
NodeFactory
])
def test_nulling_out_guid(self, Factory):
obj = Factory()
guid = Guid.load(obj._id)
obj.guid = None
obj.save()
obj.refresh_from_db()
# queryset cache returns the old version
guid.refresh_from_db()
assert obj.guid != guid
assert guid.guid != obj.guid.guid
@pytest.mark.parametrize('Factory',
[
UserFactory,
NodeFactory,
])
def test_querying_with_multiple_guids(self, Factory):
obj = Factory()
guids = [obj.guids.first()]
for i in range(0, 16):
guids.append(Guid.objects.create(referent=obj))
try:
Factory._meta.model.objects.get(id=obj.id)
except MultipleObjectsReturned as ex:
pytest.fail('Multiple objects returned for {} with multiple guids. {}'.format(Factory._meta.model, ex))
@pytest.mark.enable_bookmark_creation
class TestResolveGuid(OsfTestCase):
def setUp(self):
super(TestResolveGuid, self).setUp()
self.node = NodeFactory()
def test_resolve_guid(self):
res_guid = self.app.get(self.node.web_url_for('node_setting', _guid=True), auth=self.node.creator.auth)
res_full = self.app.get(self.node.web_url_for('node_setting'), auth=self.node.creator.auth)
assert res_guid.text == res_full.text
def test_resolve_guid_no_referent(self):
guid = Guid.load(self.node._id)
guid.referent = None
guid.save()
res = self.app.get(
self.node.web_url_for('node_setting', _guid=True),
auth=self.node.creator.auth,
expect_errors=True,
)
assert res.status_code == 404
@mock.patch('osf.models.node.Node.deep_url', None)
def test_resolve_guid_no_url(self):
res = self.app.get(
self.node.web_url_for('node_setting', _guid=True),
auth=self.node.creator.auth,
expect_errors=True,
)
assert res.status_code == 404
def test_resolve_guid_download_file(self):
pp = PreprintFactory(finish=True)
res = self.app.get(pp.url + 'download')
assert res.status_code == 302
assert '{}/v1/resources/{}/providers/{}{}?action=download&version=1&direct'.format(WATERBUTLER_URL, pp.node._id, pp.primary_file.provider, pp.primary_file.path) in res.location
res = self.app.get(pp.url + 'download/')
assert res.status_code == 302
assert '{}/v1/resources/{}/providers/{}{}?action=download&version=1&direct'.format(WATERBUTLER_URL, pp.node._id, pp.primary_file.provider, pp.primary_file.path) in res.location
res = self.app.get('/{}/download'.format(pp.primary_file.get_guid(create=True)._id))
assert res.status_code == 302
assert '{}/v1/resources/{}/providers/{}{}?action=download&version=1&direct'.format(WATERBUTLER_URL, pp.node._id, pp.primary_file.provider, pp.primary_file.path) in res.location
pp.primary_file.create_version(
creator=pp.node.creator,
location={u'folder': u'osf', u'object': u'deadbe', u'service': u'cloud'},
metadata={u'contentType': u'img/png', u'size': 9001}
)
pp.primary_file.save()
res = self.app.get(pp.url + 'download/')
assert res.status_code == 302
assert '{}/v1/resources/{}/providers/{}{}?action=download&version=2&direct'.format(WATERBUTLER_URL, pp.node._id, pp.primary_file.provider, pp.primary_file.path) in res.location
res = self.app.get(pp.url + 'download/?version=1')
assert res.status_code == 302
assert '{}/v1/resources/{}/providers/{}{}?action=download&version=1&direct'.format(WATERBUTLER_URL, pp.node._id, pp.primary_file.provider, pp.primary_file.path) in res.location
unpub_pp = PreprintFactory(project=self.node, is_published=False)
res = self.app.get(unpub_pp.url + 'download/?version=1', auth=self.node.creator.auth)
assert res.status_code == 302
assert '{}/v1/resources/{}/providers/{}{}?action=download&version=1&direct'.format(WATERBUTLER_URL, unpub_pp.node._id, unpub_pp.primary_file.provider, unpub_pp.primary_file.path) in res.location
@mock.patch('website.settings.USE_EXTERNAL_EMBER', True)
@mock.patch('website.settings.EXTERNAL_EMBER_APPS', {
'preprints': {
'server': 'http://localhost:4200',
'path': '/preprints/'
},
})
def test_resolve_guid_download_file_from_emberapp_preprints(self):
provider = PreprintProviderFactory(_id='sockarxiv', name='Sockarxiv')
pp = PreprintFactory(finish=True, provider=provider)
assert pp.url.startswith('/preprints/sockarxiv')
res = self.app.get(pp.url + 'download')
assert res.status_code == 302
assert '{}/v1/resources/{}/providers/{}{}?action=download&version=1&direct'.format(WATERBUTLER_URL, pp.node._id, pp.primary_file.provider, pp.primary_file.path) in res.location
res = self.app.get(pp.url + 'download/')
assert res.status_code == 302
assert '{}/v1/resources/{}/providers/{}{}?action=download&version=1&direct'.format(WATERBUTLER_URL, pp.node._id, pp.primary_file.provider, pp.primary_file.path) in res.location
@mock.patch('website.settings.USE_EXTERNAL_EMBER', True)
@mock.patch('website.settings.EXTERNAL_EMBER_APPS', {
'preprints': {
'server': 'http://localhost:4200',
'path': '/preprints/'
},
})
def test_resolve_guid_download_file_from_emberapp_preprints_unpublished(self):
# non-branded domains
provider = PreprintProviderFactory(_id='sockarxiv', name='Sockarxiv', reviews_workflow='pre-moderation')
# branded domains
branded_provider = PreprintProviderFactory(_id='spot', name='Spotarxiv', reviews_workflow='pre-moderation')
branded_provider.allow_submissions = False
branded_provider.domain = 'https://www.spotarxiv.com'
branded_provider.description = 'spots not dots'
branded_provider.domain_redirect_enabled = True
branded_provider.share_publish_type = 'Thesis'
branded_provider.save()
# test_provider_submitter_can_download_unpublished
submitter = AuthUserFactory()
project = NodeFactory(creator=submitter)
pp = PreprintFactory(finish=True, provider=provider, is_published=False, project=project)
pp.run_submit(submitter)
pp_branded = PreprintFactory(finish=True, provider=branded_provider, is_published=False, project=project, filename='preprint_file_two.txt')
pp_branded.run_submit(submitter)
res = self.app.get('{}download'.format(pp.url), auth=submitter.auth)
assert res.status_code == 302
assert '{}/v1/resources/{}/providers/{}{}?action=download&version=1&direct'.format(WATERBUTLER_URL, pp.node._id, pp.primary_file.provider, pp.primary_file.path) in res.location
res = self.app.get('{}download'.format(pp_branded.url), auth=submitter.auth)
assert res.status_code == 302
# test_provider_super_user_can_download_unpublished
super_user = AuthUserFactory()
super_user.is_superuser = True
super_user.save()
res = self.app.get('{}download'.format(pp.url), auth=super_user.auth)
assert res.status_code == 302
assert '{}/v1/resources/{}/providers/{}{}?action=download&version=1&direct'.format(WATERBUTLER_URL, pp.node._id, pp.primary_file.provider, pp.primary_file.path) in res.location
res = self.app.get('{}download'.format(pp_branded.url), auth=super_user.auth)
assert res.status_code == 302
# test_provider_moderator_can_download_unpublished
moderator = AuthUserFactory()
provider.add_to_group(moderator, 'moderator')
provider.save()
res = self.app.get('{}download'.format(pp.url), auth=moderator.auth)
assert res.status_code == 302
assert '{}/v1/resources/{}/providers/{}{}?action=download&version=1&direct'.format(WATERBUTLER_URL, pp.node._id, pp.primary_file.provider, pp.primary_file.path) in res.location
branded_provider.add_to_group(moderator, 'moderator')
branded_provider.save()
res = self.app.get('{}download'.format(pp_branded.url), auth=moderator.auth)
assert res.status_code == 302
# test_provider_admin_can_download_unpublished
admin = AuthUserFactory()
provider.add_to_group(admin, 'admin')
provider.save()
res = self.app.get('{}download'.format(pp.url), auth=admin.auth)
assert res.status_code == 302
assert '{}/v1/resources/{}/providers/{}{}?action=download&version=1&direct'.format(WATERBUTLER_URL, pp.node._id, pp.primary_file.provider, pp.primary_file.path) in res.location
branded_provider.add_to_group(admin, 'admin')
branded_provider.save()
res = self.app.get('{}download'.format(pp_branded.url), auth=admin.auth)
assert res.status_code == 302
def test_resolve_guid_download_file_export(self):
pp = PreprintFactory(finish=True)
res = self.app.get(pp.url + 'download?format=asdf')
assert res.status_code == 302
assert '{}/export?format=asdf&url='.format(MFR_SERVER_URL) in res.location
assert '{}/v1/resources/{}/providers/{}{}%3Faction%3Ddownload'.format(urllib.quote(WATERBUTLER_URL), pp.node._id, pp.primary_file.provider, pp.primary_file.path) in res.location
res = self.app.get(pp.url + 'download/?format=asdf')
assert res.status_code == 302
assert '{}/export?format=asdf&url='.format(MFR_SERVER_URL) in res.location
assert '{}/v1/resources/{}/providers/{}{}%3Faction%3Ddownload'.format(urllib.quote(WATERBUTLER_URL), pp.node._id, pp.primary_file.provider, pp.primary_file.path) in res.location
res = self.app.get('/{}/download?format=asdf'.format(pp.primary_file.get_guid(create=True)._id))
assert res.status_code == 302
assert '{}/export?format=asdf&url='.format(MFR_SERVER_URL) in res.location
assert '{}/v1/resources/{}/providers/{}{}%3Faction%3Ddownload'.format(urllib.quote(WATERBUTLER_URL), pp.node._id, pp.primary_file.provider, pp.primary_file.path) in res.location
res = self.app.get('/{}/download/?format=asdf'.format(pp.primary_file.get_guid(create=True)._id))
assert res.status_code == 302
assert '{}/export?format=asdf&url='.format(MFR_SERVER_URL) in res.location
assert '{}/v1/resources/{}/providers/{}{}%3Faction%3Ddownload'.format(urllib.quote(WATERBUTLER_URL), pp.node._id, pp.primary_file.provider, pp.primary_file.path) in res.location
pp.primary_file.create_version(
creator=pp.node.creator,
location={u'folder': u'osf', u'object': u'deadbe', u'service': u'cloud'},
metadata={u'contentType': u'img/png', u'size': 9001}
)
pp.primary_file.save()
res = self.app.get(pp.url + 'download/?format=asdf')
assert res.status_code == 302
assert '{}/export?format=asdf&url='.format(MFR_SERVER_URL) in res.location
assert '{}/v1/resources/{}/providers/{}{}%3F'.format(urllib.quote(WATERBUTLER_URL), pp.node._id, pp.primary_file.provider, pp.primary_file.path) in res.location
quarams = res.location.split('%3F')[1].split('%26')
assert 'action%3Ddownload' in quarams
assert 'version%3D2' in quarams
assert 'direct' in quarams
res = self.app.get(pp.url + 'download/?format=asdf&version=1')
assert res.status_code == 302
assert '{}/export?format=asdf&url='.format(MFR_SERVER_URL) in res.location
assert '{}/v1/resources/{}/providers/{}{}%3F'.format(urllib.quote(WATERBUTLER_URL), pp.node._id, pp.primary_file.provider, pp.primary_file.path) in res.location
quarams = res.location.split('%3F')[1].split('%26')
assert 'action%3Ddownload' in quarams
assert 'version%3D1' in quarams
assert 'direct' in quarams
unpub_pp = PreprintFactory(project=self.node, is_published=False)
res = self.app.get(unpub_pp.url + 'download?format=asdf', auth=unpub_pp.node.creator.auth)
assert res.status_code == 302
assert res.status_code == 302
assert '{}/export?format=asdf&url='.format(MFR_SERVER_URL) in res.location
assert '{}/v1/resources/{}/providers/{}{}%3F'.format(urllib.quote(WATERBUTLER_URL), unpub_pp.node._id, unpub_pp.primary_file.provider, unpub_pp.primary_file.path) in res.location
quarams = res.location.split('%3F')[1].split('%26')
assert 'action%3Ddownload' in quarams
assert 'version%3D1' in quarams
assert 'direct' in quarams
def test_resolve_guid_download_file_export_same_format_optimization(self):
pp = PreprintFactory(filename='test.pdf', finish=True)
res = self.app.get(pp.url + 'download/?format=pdf')
assert res.status_code == 302
assert '{}/export?'.format(MFR_SERVER_URL) not in res.location
assert '{}/v1/resources/{}/providers/{}{}?action=download&version=1&direct'.format(WATERBUTLER_URL, pp.node._id, pp.primary_file.provider, pp.primary_file.path) in res.location
def test_resolve_guid_download_errors(self):
testfile = TestFile.get_or_create(self.node, 'folder/path')
testfile.name = 'asdf'
testfile.materialized_path = '/folder/path'
guid = testfile.get_guid(create=True)
testfile.save()
testfile.delete()
res = self.app.get('/{}/download'.format(guid), expect_errors=True)
assert res.status_code == 404
pp = PreprintFactory(is_published=False)
res = self.app.get(pp.url + 'download', expect_errors=True)
assert res.status_code == 404
pp.is_published = True
pp.save()
pp.node.is_public = False
pp.node.save()
non_contrib = AuthUserFactory()
res = self.app.get(pp.url + 'download', auth=non_contrib.auth, expect_errors=True)
assert res.status_code == 403
pp.node.is_deleted = True
pp.node.save()
res = self.app.get(pp.url + 'download', auth=non_contrib.auth, expect_errors=True)
# assert res.status_code == 410
# This will throw an unauthorized error before it reaches deleted
assert res.status_code == 403
|
|
from pyrep.robots.mobiles.mobile_base import MobileBase
from pyrep.robots.configuration_paths.holonomic_configuration_path import (
HolonomicConfigurationPath)
from pyrep.backend import utils
from pyrep.const import PYREP_SCRIPT_TYPE
from pyrep.const import ConfigurationPathAlgorithms as Algos
from pyrep.errors import ConfigurationPathError
from typing import List
from pyrep.objects.joint import Joint
from math import pi, sqrt
class HolonomicBase(MobileBase):
def __init__(self,
count: int,
num_wheels: int,
distance_from_target: float,
name: str,
max_velocity: float = 4,
max_velocity_rotation: float = 6,
max_acceleration: float = 0.035):
"""Init.
:param count: used for multiple copies of robots.
:param num_wheels: number of actuated wheels.
:param distance_from_target: offset from target.
:param name: string with robot name (same as base in vrep model).
:param max_velocity: bounds x,y velocity for motion planning.
:param max_velocity_rotation: bounds yaw velocity for motion planning.
:param max_acceleration: bounds acceleration for motion planning.
"""
super().__init__(count, num_wheels, name)
suffix = '' if count == 0 else '#%d' % (count - 1)
self.paramP = 20
self.paramO = 10
# self.paramO = 30
self.previous_forw_back_vel = 0
self.previous_left_right_vel = 0
self.previous_rot_vel = 0
self.accelF = max_acceleration
self.maxV = max_velocity
self.max_v_rot = max_velocity_rotation
self.dist1 = distance_from_target
joint_slipping_names = [
'%s_slipping_m_joint%s%s' % (name, str(i + 1), suffix) for i in
range(self.num_wheels)]
self.joints_slipping = [Joint(jsname)
for jsname in joint_slipping_names]
def set_base_angular_velocites(self, velocity: List[float]):
"""Calls required functions to achieve desired omnidirectional effect.
:param velocity: A List with forwardBackward, leftRight and rotation
velocity (in radian/s)
"""
# self._reset_wheel()
fBVel = velocity[0]
lRVel = velocity[1]
rVel = velocity[2]
self.set_joint_target_velocities(
[-fBVel - lRVel - rVel, -fBVel + lRVel - rVel,
-fBVel - lRVel + rVel, -fBVel + lRVel + rVel])
def get_linear_path(self, position: List[float],
angle=0) -> HolonomicConfigurationPath:
"""Initialize linear path and check for collision along it.
Must specify either rotation in euler or quaternions, but not both!
:param position: The x, y position of the target.
:param angle: The z orientation of the target (in radians).
:raises: ConfigurationPathError if no path could be created.
:return: A linear path in the 2d space.
"""
position_base = self.get_position()
angle_base = self.get_orientation()[-1]
self.target_base.set_position(
[position[0], position[1], self.target_z])
self.target_base.set_orientation([0, 0, angle])
handle_base = self.get_handle()
handle_target_base = self.target_base.get_handle()
_, ret_floats, _, _ = utils.script_call(
'getBoxAdjustedMatrixAndFacingAngle@PyRep', PYREP_SCRIPT_TYPE,
ints=[handle_base, handle_target_base])
m = ret_floats[:-1]
angle = ret_floats[-1]
self.intermediate_target_base.set_position(
[m[3] - m[0] * self.dist1, m[7] - m[4] * self.dist1,
self.target_z])
self.intermediate_target_base.set_orientation([0, 0, angle])
self.target_base.set_orientation([0, 0, angle])
path = [[position_base[0], position_base[1], angle_base],
[position[0], position[1], angle]]
if self._check_collision_linear_path(path):
raise ConfigurationPathError(
'Could not create path. '
'An object was detected on the linear path.')
return HolonomicConfigurationPath(self, path)
def get_nonlinear_path(self, position: List[float],
angle=0,
boundaries=2,
path_pts=600,
ignore_collisions=False,
algorithm=Algos.RRTConnect
) -> HolonomicConfigurationPath:
"""Gets a non-linear (planned) configuration path given a target pose.
:param position: The x, y, z position of the target.
:param angle: The z orientation of the target (in radians).
:param boundaries: A float defining the path search in x and y direction
[[-boundaries,boundaries],[-boundaries,boundaries]].
:param path_pts: The number of sampled points returned from the
computed path
:param ignore_collisions: If collision checking should be disabled.
:param algorithm: Algorithm used to compute path
:raises: ConfigurationPathError if no path could be created.
:return: A non-linear path (x,y,angle) in the xy configuration space.
"""
path = self._get_nonlinear_path_points(
position, angle, boundaries, path_pts, ignore_collisions, algorithm)
return HolonomicConfigurationPath(self, path)
def get_base_actuation(self):
"""Proportional controller.
:return: A list with left and right joint velocity,
and a bool representing target is reached.
"""
handleBase = self.get_handle()
handle_inter_target_base = self.intermediate_target_base.get_handle()
pos_v = self.target_base.get_position(relative_to=self)
or_v = self.target_base.get_orientation(relative_to=self)
pos_inter = self.intermediate_target_base.get_position(
relative_to=self)
or_inter = self.intermediate_target_base.get_orientation(
relative_to=self)
if (sqrt((pos_v[0]) ** 2 + (
pos_v[1]) ** 2) - self.dist1) < 0.001 and or_v[-1] < 0.1 * pi / 180:
return [self.previous_forw_back_vel, self.previous_left_right_vel,
self.previous_rot_vel], True
forw_back_vel = pos_inter[1] * self.paramP
left_right_vel = pos_inter[0] * self.paramP
rot_vel = - or_inter[2] * self.paramO
v = sqrt(forw_back_vel * forw_back_vel +
left_right_vel * left_right_vel)
if v > self.maxV:
forw_back_vel = forw_back_vel * self.maxV / v
left_right_vel = left_right_vel * self.maxV / v
if (abs(rot_vel) > self.max_v_rot):
rot_vel = self.max_v_rot * rot_vel / abs(rot_vel)
df = forw_back_vel - self.previous_forw_back_vel
ds = left_right_vel - self.previous_left_right_vel
dr = rot_vel - self.previous_rot_vel
if abs(df) > self.maxV * self.accelF:
df = abs(df) * (self.maxV * self.accelF) / df
if abs(ds) > self.maxV * self.accelF:
ds = abs(ds) * (self.maxV * self.accelF) / ds
if abs(dr) > self.max_v_rot * self.accelF:
dr = abs(dr) * (self.max_v_rot * self.accelF) / dr
forw_back_vel = self.previous_forw_back_vel + df
left_right_vel = self.previous_left_right_vel + ds
rot_vel = self.previous_rot_vel + dr
self.previous_forw_back_vel = forw_back_vel
self.previous_left_right_vel = left_right_vel
self.previous_rot_vel = rot_vel
return [forw_back_vel, left_right_vel, rot_vel], False
def _reset_wheel(self):
"""Required to achieve desired omnidirectional wheel effect.
"""
[j.reset_dynamic_object() for j in self.wheels]
p = [[-pi / 4, 0, 0], [pi / 4, 0, pi], [-pi / 4, 0, 0], [pi / 4, 0, pi]]
for i in range(self.num_wheels):
self.joints_slipping[i].set_position([0, 0, 0],
relative_to=self.joints[i],
reset_dynamics=False)
self.joints_slipping[i].set_orientation(p[i],
relative_to=self.joints[i],
reset_dynamics=False)
self.wheels[i].set_position([0, 0, 0], relative_to=self.joints[i],
reset_dynamics=False)
self.wheels[i].set_orientation([0, 0, 0],
relative_to=self.joints[i],
reset_dynamics=False)
|
|
import os
import numpy as np
from discretize.utils import mkvc
from discretize.utils.code_utils import deprecate_method
import warnings
try:
from discretize.mixins.vtk_mod import InterfaceTensorread_vtk
except ImportError:
InterfaceTensorread_vtk = object
class TensorMeshIO(InterfaceTensorread_vtk):
"""Class for managing the input/output of tensor meshes and models.
The ``TensorMeshIO`` class contains a set of class methods specifically
for the :class:`~discretize.TensorMesh` class. These include:
- Read/write tensor meshes to file
- Read/write models defined on tensor meshes
"""
@classmethod
def _readUBC_3DMesh(cls, file_name):
"""Read 3D tensor mesh from UBC-GIF formatted file.
Parameters
----------
file_name : str or file name
full path to the UBC-GIF formatted mesh file
Returns
-------
discretize.TensorMesh
The tensor mesh
"""
# Interal function to read cell size lines for the UBC mesh files.
def readCellLine(line):
line_list = []
for seg in line.split():
if "*" in seg:
sp = seg.split("*")
seg_arr = np.ones((int(sp[0]),)) * float(sp[1])
else:
seg_arr = np.array([float(seg)], float)
line_list.append(seg_arr)
return np.concatenate(line_list)
# Read the file as line strings, remove lines with comment = !
msh = np.genfromtxt(file_name, delimiter="\n", dtype=np.str, comments="!")
# Fist line is the size of the model
sizeM = np.array(msh[0].split(), dtype=float)
# Second line is the South-West-Top corner coordinates.
origin = np.array(msh[1].split(), dtype=float)
# Read the cell sizes
h1 = readCellLine(msh[2])
h2 = readCellLine(msh[3])
h3temp = readCellLine(msh[4])
# Invert the indexing of the vector to start from the bottom.
h3 = h3temp[::-1]
# Adjust the reference point to the bottom south west corner
origin[2] = origin[2] - np.sum(h3)
# Make the mesh
tensMsh = cls([h1, h2, h3], origin=origin)
return tensMsh
@classmethod
def _readUBC_2DMesh(cls, file_name):
"""Read 2D tensor mesh from UBC-GIF formatted file.
Parameters
----------
file_name : str or file name
full path to the UBC-GIF formatted mesh file
Returns
-------
discretize.TensorMesh
The tensor mesh
"""
fopen = open(file_name, "r")
# Read down the file and unpack dx vector
def unpackdx(fid, nrows):
for ii in range(nrows):
line = fid.readline()
var = np.array(line.split(), dtype=float)
if ii == 0:
x0 = var[0]
xvec = np.ones(int(var[2])) * (var[1] - var[0]) / int(var[2])
xend = var[1]
else:
xvec = np.hstack(
(xvec, np.ones(int(var[1])) * (var[0] - xend) / int(var[1]))
)
xend = var[0]
return x0, xvec
# Start with dx block
# First line specifies the number of rows for x-cells
line = fopen.readline()
# Strip comments lines
while line.startswith("!"):
line = fopen.readline()
nl = np.array(line.split(), dtype=int)
[x0, dx] = unpackdx(fopen, nl[0])
# Move down the file until reaching the z-block
line = fopen.readline()
if not line:
line = fopen.readline()
# End with dz block
# First line specifies the number of rows for z-cells
line = fopen.readline()
nl = np.array(line.split(), dtype=int)
[z0, dz] = unpackdx(fopen, nl[0])
# Flip z0 to be the bottom of the mesh for SimPEG
z0 = -(z0 + sum(dz))
dz = dz[::-1]
# Make the mesh
tensMsh = cls([dx, dz], origin=(x0, z0))
fopen.close()
return tensMsh
@classmethod
def read_UBC(cls, file_name, directory=""):
"""Read 2D or 3D tensor mesh from UBC-GIF formatted file.
Parameters
----------
file_name : str or file name
full path to the UBC-GIF formatted mesh file or just its name if directory is specified
directory : str, optional
directory where the UBC-GIF file lives
Returns
-------
discretize.TensorMesh
The tensor mesh
"""
# Check the expected mesh dimensions
fname = os.path.join(directory, file_name)
# Read the file as line strings, remove lines with comment = !
msh = np.genfromtxt(
fname, delimiter="\n", dtype=np.str, comments="!", max_rows=1
)
# Fist line is the size of the model
sizeM = np.array(msh.ravel()[0].split(), dtype=float)
# Check if the mesh is a UBC 2D mesh
if sizeM.shape[0] == 1:
Tnsmsh = cls._readUBC_2DMesh(fname)
# Check if the mesh is a UBC 3D mesh
elif sizeM.shape[0] == 3:
Tnsmsh = cls._readUBC_3DMesh(fname)
else:
raise Exception("File format not recognized")
return Tnsmsh
def _readModelUBC_2D(mesh, file_name):
"""Read UBC-GIF formatted model file for 2D tensor mesh.
Parameters
----------
file_name : str or file name
full path to the UBC-GIF formatted model file
Returns
-------
(n_cells) numpy.ndarray
The model defined on the 2D tensor mesh
"""
# Open fileand skip header... assume that we know the mesh already
obsfile = np.genfromtxt(file_name, delimiter=" \n", dtype=np.str, comments="!")
dim = tuple(np.array(obsfile[0].split(), dtype=int))
if mesh.shape_cells != dim:
raise Exception("Dimension of the model and mesh mismatch")
model = []
for line in obsfile[1:]:
model.extend([float(val) for val in line.split()])
model = np.asarray(model)
if not len(model) == mesh.nC:
raise Exception(
"""Something is not right, expected size is {:d}
but unwrap vector is size {:d}""".format(
mesh.nC, len(model)
)
)
return model.reshape(mesh.vnC, order="F")[:, ::-1].reshape(-1, order="F")
def _readModelUBC_3D(mesh, file_name):
"""Read UBC-GIF formatted model file for 3D tensor mesh.
Parameters
----------
file_name : str or file name
full path to the UBC-GIF formatted model file
Returns
-------
(n_cells) numpy.ndarray
The model defined on the 3D tensor mesh
"""
f = open(file_name, "r")
model = np.array(list(map(float, f.readlines())))
f.close()
nCx, nCy, nCz = mesh.shape_cells
model = np.reshape(model, (nCz, nCx, nCy), order="F")
model = model[::-1, :, :]
model = np.transpose(model, (1, 2, 0))
model = mkvc(model)
return model
def read_model_UBC(mesh, file_name, directory=""):
"""Read UBC-GIF formatted model file for 2D or 3D tensor mesh.
Parameters
----------
file_name : str or file name
full path to the UBC-GIF formatted model file or just its name if directory is specified
directory : str, optional
directory where the UBC-GIF file lives
Returns
-------
(n_cells) numpy.ndarray
The model defined on the mesh
"""
fname = os.path.join(directory, file_name)
if mesh.dim == 3:
model = mesh._readModelUBC_3D(fname)
elif mesh.dim == 2:
model = mesh._readModelUBC_2D(fname)
else:
raise Exception("mesh must be a Tensor Mesh 2D or 3D")
return model
def write_model_UBC(mesh, file_name, model, directory=""):
"""Write 2D or 3D tensor model to UBC-GIF formatted file.
Parameters
----------
file_name : str or file name
full path for the output mesh file or just its name if directory is specified
model : (n_cells) numpy.ndarray
directory : str, optional
output directory
"""
fname = os.path.join(directory, file_name)
if mesh.dim == 3:
# Reshape model to a matrix
modelMat = mesh.reshape(model, "CC", "CC", "M")
# Transpose the axes
modelMatT = modelMat.transpose((2, 0, 1))
# Flip z to positive down
modelMatTR = mkvc(modelMatT[::-1, :, :])
np.savetxt(fname, modelMatTR.ravel())
elif mesh.dim == 2:
modelMat = mesh.reshape(model, "CC", "CC", "M").T[::-1]
f = open(fname, "w")
f.write("{:d} {:d}\n".format(*mesh.shape_cells))
f.close()
f = open(fname, "ab")
np.savetxt(f, modelMat)
f.close()
else:
raise Exception("mesh must be a Tensor Mesh 2D or 3D")
def _writeUBC_3DMesh(mesh, file_name, comment_lines=""):
"""Write 3D tensor mesh to UBC-GIF formatted file.
Parameters
----------
file_name : str or file name
full path for the output mesh file
comment_lines : str, optional
comment lines preceded are preceeded with '!'
"""
if not mesh.dim == 3:
raise Exception("Mesh must be 3D")
s = comment_lines
s += "{0:d} {1:d} {2:d}\n".format(*tuple(mesh.vnC))
# Have to it in the same operation or use mesh.origin.copy(),
# otherwise the mesh.origin is updated.
origin = mesh.origin + np.array([0, 0, mesh.h[2].sum()])
nCx, nCy, nCz = mesh.shape_cells
s += "{0:.6f} {1:.6f} {2:.6f}\n".format(*tuple(origin))
s += ("%.6f " * nCx + "\n") % tuple(mesh.h[0])
s += ("%.6f " * nCy + "\n") % tuple(mesh.h[1])
s += ("%.6f " * nCz + "\n") % tuple(mesh.h[2][::-1])
f = open(file_name, "w")
f.write(s)
f.close()
def _writeUBC_2DMesh(mesh, file_name, comment_lines=""):
"""Write 2D tensor mesh to UBC-GIF formatted file.
Parameters
----------
file_name : str or file name
full path for the output mesh file
comment_lines : str, optional
comment lines preceded are preceeded with '!'
"""
if not mesh.dim == 2:
raise Exception("Mesh must be 2D")
def writeF(fx, outStr=""):
# Init
i = 0
origin = True
x0 = fx[i]
f = fx[i]
number_segment = 0
auxStr = ""
while True:
i = i + 1
if i >= fx.size:
break
dx = -f + fx[i]
f = fx[i]
n = 1
for j in range(i + 1, fx.size):
if -f + fx[j] == dx:
n += 1
i += 1
f = fx[j]
else:
break
number_segment += 1
if origin:
auxStr += "{:.10f} {:.10f} {:d} \n".format(x0, f, n)
origin = False
else:
auxStr += "{:.10f} {:d} \n".format(f, n)
auxStr = "{:d}\n".format(number_segment) + auxStr
outStr += auxStr
return outStr
# Grab face coordinates
fx = mesh.nodes_x
fz = -mesh.nodes_y[::-1]
# Create the string
outStr = comment_lines
outStr = writeF(fx, outStr=outStr)
outStr += "\n"
outStr = writeF(fz, outStr=outStr)
# Write file
f = open(file_name, "w")
f.write(outStr)
f.close()
def write_UBC(mesh, file_name, models=None, directory="", comment_lines=""):
"""Write 2D or 3D tensor mesh (and models) to UBC-GIF formatted file(s).
Parameters
----------
file_name : str or file name
full path for the output mesh file or just its name if directory is specified
models : dict of [str, (n_cells) numpy.ndarray], optional
The dictionary key is a string representing the model's name. Each model
is an (n_cells) array.
directory : str, optional
output directory
comment_lines : str, optional
comment lines preceded are preceeded with '!'
"""
fname = os.path.join(directory, file_name)
if mesh.dim == 3:
mesh._writeUBC_3DMesh(fname, comment_lines=comment_lines)
elif mesh.dim == 2:
mesh._writeUBC_2DMesh(fname, comment_lines=comment_lines)
else:
raise Exception("mesh must be a Tensor Mesh 2D or 3D")
if models is None:
return
if not isinstance(models, dict):
raise TypeError("models must be a dict")
for key in models:
if not isinstance(key, str):
raise TypeError(
"The dict key must be a string representing the file name"
)
mesh.write_model_UBC(key, models[key], directory=directory)
# DEPRECATED
@classmethod
def readUBC(TensorMesh, file_name, directory=""):
"""*readUBC* has been deprecated and replaced by *read_UBC*"""
warnings.warn(
"TensorMesh.readUBC has been deprecated and will be removed in"
"discretize 1.0.0. please use TensorMesh.read_UBC",
DeprecationWarning,
)
return TensorMesh.read_UBC(file_name, directory)
readModelUBC = deprecate_method(
"read_model_UBC", "readModelUBC", removal_version="1.0.0", future_warn=False
)
writeUBC = deprecate_method("write_UBC", "writeUBC", removal_version="1.0.0", future_warn=False)
writeModelUBC = deprecate_method(
"write_model_UBC", "writeModelUBC", removal_version="1.0.0", future_warn=False
)
class TreeMeshIO(object):
"""Class for managing the input/output of tree meshes and models.
The ``TreeMeshIO`` class contains a set of class methods specifically
for the :class:`~discretize.TreeMesh` class. These include:
- Read/write tree meshes to file
- Read/write models defined on tree meshes
"""
@classmethod
def read_UBC(TreeMesh, meshFile, directory=""):
"""Read 3D tree mesh (OcTree mesh) from UBC-GIF formatted file.
Parameters
----------
file_name : str or file name
full path to the UBC-GIF formatted mesh file or just its name if directory is specified
directory : str, optional
directory where the UBC-GIF file lives
Returns
-------
discretize.TreeMesh
The tree mesh
"""
fname = os.path.join(directory, meshFile)
fileLines = np.genfromtxt(fname, dtype=str, delimiter="\n", comments="!")
nCunderMesh = np.array(fileLines[0].split("!")[0].split(), dtype=int)
tswCorn = np.array(fileLines[1].split("!")[0].split(), dtype=float)
smallCell = np.array(fileLines[2].split("!")[0].split(), dtype=float)
# Read the index array
indArr = np.genfromtxt(
(line.encode("utf8") for line in fileLines[4::]), dtype=np.int
)
nCunderMesh = nCunderMesh[: len(tswCorn)] # remove information related to core
hs = [np.ones(nr) * sz for nr, sz in zip(nCunderMesh, smallCell)]
origin = tswCorn
origin[-1] -= np.sum(hs[-1])
ls = np.log2(nCunderMesh).astype(int)
# if all ls are equal
if min(ls) == max(ls):
max_level = ls[0]
else:
max_level = min(ls) + 1
mesh = TreeMesh(hs, origin=origin)
levels = indArr[:, -1]
indArr = indArr[:, :-1]
indArr -= 1 # shift by 1....
indArr = 2 * indArr + levels[:, None] # get cell center index
indArr[:, -1] = 2 * nCunderMesh[-1] - indArr[:, -1] # switch direction of iz
levels = max_level - np.log2(levels) # calculate level
mesh.__setstate__((indArr, levels))
return mesh
def read_model_UBC(mesh, file_name):
"""Read UBC-GIF formatted file model file for 3D tree mesh (OcTree).
Parameters
----------
file_name : str or list of str
full path to the UBC-GIF formatted model file or
just its name if directory is specified. It can also be a list of file_names.
directory : str
directory where the UBC-GIF file lives (optional)
Returns
-------
(n_cells) numpy.ndarray or dict of [str, (n_cells) numpy.ndarray]
The model defined on the mesh. If **file_name** is a ``dict``, it is a
dictionary of models indexed by the file names.
"""
if type(file_name) is list:
out = {}
for f in file_name:
out[f] = mesh.read_model_UBC(f)
return out
modArr = np.loadtxt(file_name)
ubc_order = mesh._ubc_order
# order_ubc will re-order from treemesh ordering to UBC ordering
# need the opposite operation
un_order = np.empty_like(ubc_order)
un_order[ubc_order] = np.arange(len(ubc_order))
model = modArr[un_order].copy() # ensure a contiguous array
return model
def write_UBC(mesh, file_name, models=None, directory=""):
"""Write OcTree mesh (and models) to UBC-GIF formatted files.
Parameters
----------
file_name : str
full path for the output mesh file or just its name if directory is specified
models : dict of [str, (n_cells) numpy.ndarray], optional
The dictionary key is a string representing the model's name.
Each model is a 1D numpy array of size (n_cells).
directory : str, optional
output directory (optional)
"""
uniform_hs = np.array([np.allclose(h, h[0]) for h in mesh.h])
if np.any(~uniform_hs):
raise Exception("UBC form does not support variable cell widths")
nCunderMesh = np.array([h.size for h in mesh.h], dtype=np.int64)
tswCorn = mesh.origin.copy()
tswCorn[-1] += np.sum(mesh.h[-1])
smallCell = np.array([h[0] for h in mesh.h])
nrCells = mesh.nC
indArr, levels = mesh._ubc_indArr
ubc_order = mesh._ubc_order
indArr = indArr[ubc_order]
levels = levels[ubc_order]
# Write the UBC octree mesh file
head = " ".join([f"{int(n)}" for n in nCunderMesh]) + " \n"
head += " ".join([f"{v:.4f}" for v in tswCorn]) + " \n"
head += " ".join([f"{v:.3f}" for v in smallCell]) + " \n"
head += f"{int(nrCells)}"
np.savetxt(file_name, np.c_[indArr, levels], fmt="%i", header=head, comments="")
# Print the models
if models is None:
return
if not isinstance(models, dict):
raise TypeError("models must be a dict")
for key in models:
if not isinstance(key, str):
raise TypeError(
"The dict key must be a string representing the file name"
)
mesh.write_model_UBC(key, models[key], directory=directory)
def write_model_UBC(mesh, file_name, model, directory=""):
"""Write 3D tree model (OcTree) to UBC-GIF formatted file.
Parameters
----------
file_name : str
full path for the output mesh file or just its name if directory is specified
model : (n_cells) numpy.ndarray
model values defined for each cell
directory : str
output directory (optional)
"""
if type(file_name) is list:
for f, m in zip(file_name, model):
mesh.write_model_UBC(f, m)
else:
ubc_order = mesh._ubc_order
fname = os.path.join(directory, file_name)
m = model[ubc_order]
np.savetxt(fname, m)
# DEPRECATED
@classmethod
def readUBC(TreeMesh, file_name, directory=""):
"""*readUBC* has been deprecated and replaced by *read_UBC*"""
warnings.warn(
"TensorMesh.readUBC has been deprecated and will be removed in"
"discretize 1.0.0. please use TensorMesh.read_UBC",
DeprecationWarning,
)
return TreeMesh.read_UBC(file_name, directory)
readModelUBC = deprecate_method(
"read_model_UBC", "readModelUBC", removal_version="1.0.0", future_warn=False
)
writeUBC = deprecate_method("write_UBC", "writeUBC", removal_version="1.0.0", future_warn=False)
writeModelUBC = deprecate_method(
"write_model_UBC", "writeModelUBC", removal_version="1.0.0", future_warn=False
)
|
|
#!/usr/bin/env impala-python
# Copyright 2012 Cloudera Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from string import Template
import os
import shutil
import filecmp
import tempfile
from optparse import OptionParser
parser = OptionParser()
parser.add_option("--noclean", action="store_true", default=False,
help="If specified, does not remove existing files and only replaces "
"them with freshly generated ones if they have changed.")
options, args = parser.parse_args()
# This script will generate two headers that describe all of the clang cross compiled
# functions.
# The script outputs (run: 'impala/common/function-registry/gen_functions.py')
# - be/src/generated-sources/impala-ir/impala-ir-functions.h
# This file contains enums for all of the cross compiled functions
# - be/src/generated-sources/impala-ir/impala-ir-function-names.h
# This file contains a mapping of <string, enum>
# Mapping of enum to compiled function name. The compiled function name only has to
# be a substring of the actual, mangled compiler generated name.
# TODO: should we work out the mangling rules?
ir_functions = [
["AGG_NODE_PROCESS_ROW_BATCH_WITH_GROUPING", "ProcessRowBatchWithGrouping"],
["AGG_NODE_PROCESS_ROW_BATCH_NO_GROUPING", "ProcessRowBatchNoGrouping"],
["PART_AGG_NODE_PROCESS_BATCH_TRUE", "ProcessBatch_true"],
["PART_AGG_NODE_PROCESS_BATCH_FALSE", "ProcessBatch_false"],
["PART_AGG_NODE_PROCESS_BATCH_NO_GROUPING", "ProcessBatchNoGrouping"],
["AVG_UPDATE_BIGINT", "9AvgUpdateIN10impala_udf9BigIntVal"],
["AVG_UPDATE_DOUBLE", "9AvgUpdateIN10impala_udf9DoubleVal"],
["AVG_UPDATE_TIMESTAMP", "TimestampAvgUpdate"],
["AVG_UPDATE_DECIMAL", "DecimalAvgUpdate"],
["AVG_MERGE", "8AvgMerge"],
["AVG_MERGE_DECIMAL", "DecimalAvgMerge"],
["CODEGEN_ANYVAL_STRING_VAL_EQ", "StringValEq"],
["CODEGEN_ANYVAL_STRING_VALUE_EQ", "StringValueEq"],
["CODEGEN_ANYVAL_TIMESTAMP_VAL_EQ", "TimestampValEq"],
["CODEGEN_ANYVAL_TIMESTAMP_VALUE_EQ", "TimestampValueEq"],
["EXPR_GET_BOOLEAN_VAL", "4Expr13GetBooleanVal"],
["EXPR_GET_TINYINT_VAL", "4Expr13GetTinyIntVal"],
["EXPR_GET_SMALLINT_VAL", "4Expr14GetSmallIntVal"],
["EXPR_GET_INT_VAL", "4Expr9GetIntVal"],
["EXPR_GET_BIGINT_VAL", "4Expr12GetBigIntVal"],
["EXPR_GET_FLOAT_VAL", "4Expr11GetFloatVal"],
["EXPR_GET_DOUBLE_VAL", "4Expr12GetDoubleVal"],
["EXPR_GET_STRING_VAL", "4Expr12GetStringVal"],
["EXPR_GET_TIMESTAMP_VAL", "4Expr15GetTimestampVal"],
["EXPR_GET_DECIMAL_VAL", "4Expr13GetDecimalVal"],
["HASH_CRC", "IrCrcHash"],
["HASH_FNV", "IrFnvHash"],
["HASH_MURMUR", "IrMurmurHash"],
["HASH_JOIN_PROCESS_BUILD_BATCH", "12HashJoinNode17ProcessBuildBatch"],
["HASH_JOIN_PROCESS_PROBE_BATCH", "12HashJoinNode17ProcessProbeBatch"],
["PHJ_PROCESS_BUILD_BATCH", "23PartitionedHashJoinNode17ProcessBuildBatch"],
["PHJ_PROCESS_PROBE_BATCH_INNER_JOIN", "ProcessProbeBatchILi0"],
["PHJ_PROCESS_PROBE_BATCH_LEFT_OUTER_JOIN", "ProcessProbeBatchILi1"],
["PHJ_PROCESS_PROBE_BATCH_LEFT_SEMI_JOIN", "ProcessProbeBatchILi2"],
["PHJ_PROCESS_PROBE_BATCH_LEFT_ANTI_JOIN", "ProcessProbeBatchILi3"],
["PHJ_PROCESS_PROBE_BATCH_NULL_AWARE_LEFT_ANTI_JOIN", "ProcessProbeBatchILi4"],
["PHJ_PROCESS_PROBE_BATCH_RIGHT_OUTER_JOIN", "ProcessProbeBatchILi5"],
["PHJ_PROCESS_PROBE_BATCH_RIGHT_SEMI_JOIN", "ProcessProbeBatchILi6"],
["PHJ_PROCESS_PROBE_BATCH_RIGHT_ANTI_JOIN", "ProcessProbeBatchILi7"],
["PHJ_PROCESS_PROBE_BATCH_FULL_OUTER_JOIN", "ProcessProbeBatchILi8"],
["HASH_TABLE_GET_HASH_SEED", "GetHashSeed"],
["HLL_UPDATE_BOOLEAN", "HllUpdateIN10impala_udf10BooleanVal"],
["HLL_UPDATE_TINYINT", "HllUpdateIN10impala_udf10TinyIntVal"],
["HLL_UPDATE_SMALLINT", "HllUpdateIN10impala_udf11SmallIntVal"],
["HLL_UPDATE_INT", "HllUpdateIN10impala_udf6IntVal"],
["HLL_UPDATE_BIGINT", "HllUpdateIN10impala_udf9BigIntVal"],
["HLL_UPDATE_FLOAT", "HllUpdateIN10impala_udf8FloatVal"],
["HLL_UPDATE_DOUBLE", "HllUpdateIN10impala_udf9DoubleVal"],
["HLL_UPDATE_STRING", "HllUpdateIN10impala_udf9StringVal"],
["HLL_UPDATE_TIMESTAMP", "HllUpdateIN10impala_udf12TimestampVal"],
["HLL_UPDATE_DECIMAL", "HllUpdateIN10impala_udf10DecimalVal"],
["HLL_MERGE", "HllMerge"],
["DECODE_AVRO_DATA", "DecodeAvroData"],
["READ_UNION_TYPE", "ReadUnionType"],
["READ_AVRO_BOOLEAN", "ReadAvroBoolean"],
["READ_AVRO_INT32", "ReadAvroInt32"],
["READ_AVRO_INT64", "ReadAvroInt64"],
["READ_AVRO_FLOAT", "ReadAvroFloat"],
["READ_AVRO_DOUBLE", "ReadAvroDouble"],
["READ_AVRO_STRING", "ReadAvroString"],
["READ_AVRO_VARCHAR", "ReadAvroVarchar"],
["READ_AVRO_CHAR", "ReadAvroChar"],
["HDFS_SCANNER_WRITE_ALIGNED_TUPLES", "WriteAlignedTuples"],
["HDFS_SCANNER_GET_CONJUNCT_CTX", "GetConjunctCtx"],
["STRING_TO_BOOL", "IrStringToBool"],
["STRING_TO_INT8", "IrStringToInt8"],
["STRING_TO_INT16", "IrStringToInt16"],
["STRING_TO_INT32", "IrStringToInt32"],
["STRING_TO_INT64", "IrStringToInt64"],
["STRING_TO_FLOAT", "IrStringToFloat"],
["STRING_TO_DOUBLE", "IrStringToDouble"],
["IS_NULL_STRING", "IrIsNullString"],
["GENERIC_IS_NULL_STRING", "IrGenericIsNullString"],
]
enums_preamble = '\
// Copyright 2012 Cloudera Inc.\n\
//\n\
// Licensed under the Apache License, Version 2.0 (the "License");\n\
// you may not use this file except in compliance with the License.\n\
// You may obtain a copy of the License at\n\
//\n\
// http://www.apache.org/licenses/LICENSE-2.0\n\
//\n\
// Unless required by applicable law or agreed to in writing, software\n\
// distributed under the License is distributed on an "AS IS" BASIS,\n\
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\
// See the License for the specific language governing permissions and\n\
// limitations under the License.\n\
\n\
// This is a generated file, DO NOT EDIT IT.\n\
// To add new functions, see be/src/codegen/gen_ir_descriptions.py.\n\
\n\
#ifndef IMPALA_IR_FUNCTIONS_H\n\
#define IMPALA_IR_FUNCTIONS_H\n\
\n\
namespace impala {\n\
\n\
class IRFunction {\n\
public:\n\
enum Type {\n'
enums_epilogue = '\
};\n\
};\n\
\n\
}\n\
\n\
#endif\n'
names_preamble = '\
// Copyright 2012 Cloudera Inc.\n\
//\n\
// Licensed under the Apache License, Version 2.0 (the "License");\n\
// you may not use this file except in compliance with the License.\n\
// You may obtain a copy of the License at\n\
//\n\
// http://www.apache.org/licenses/LICENSE-2.0\n\
//\n\
// Unless required by applicable law or agreed to in writing, software\n\
// distributed under the License is distributed on an "AS IS" BASIS,\n\
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\
// See the License for the specific language governing permissions and\n\
// limitations under the License.\n\
\n\
// This is a generated file, DO NOT EDIT IT.\n\
// To add new functions, see be/src/codegen/gen_ir_descriptions.py.\n\
\n\
#ifndef IMPALA_IR_FUNCTION_NAMES_H\n\
#define IMPALA_IR_FUNCTION_NAMES_H\n\
\n\
#include "impala-ir/impala-ir-functions.h"\n\
\n\
namespace impala {\n\
\n\
static struct {\n\
std::string fn_name; \n\
IRFunction::Type fn; \n\
} FN_MAPPINGS[] = {\n'
names_epilogue = '\
};\n\
\n\
}\n\
\n\
#endif\n'
def move_if_different(src_file, dest_file):
"""Moves src_file to dest_file if dest_file does not exist, or if
the contents of src_file and dest_file differ. Assumes that src_file exists."""
if not os.path.isfile(dest_file) or not filecmp.cmp(src_file, dest_file):
shutil.move(src_file, dest_file)
else:
print 'Retaining existing file: %s' % (dest_file)
BE_PATH = os.path.join(os.environ['IMPALA_HOME'], 'be/generated-sources/impala-ir/')
IR_FUNCTIONS_FILE = 'impala-ir-functions.h'
IR_NAMES_FILE = 'impala-ir-names.h'
IR_FUNCTIONS_PATH = os.path.join(BE_PATH, IR_FUNCTIONS_FILE)
IR_NAMES_PATH = os.path.join(BE_PATH, IR_NAMES_FILE)
TMP_IR_FUNCTIONS_PATH = os.path.join(tempfile.gettempdir(), IR_FUNCTIONS_FILE)
TMP_IR_NAMES_PATH = os.path.join(tempfile.gettempdir(), IR_NAMES_FILE)
if not os.path.exists(BE_PATH):
os.makedirs(BE_PATH)
if __name__ == "__main__":
print "Generating IR description files"
enums_file = open(TMP_IR_FUNCTIONS_PATH, 'w')
enums_file.write(enums_preamble)
names_file = open(TMP_IR_NAMES_PATH, 'w')
names_file.write(names_preamble);
idx = 0;
enums_file.write(" FN_START = " + str(idx) + ",\n")
for fn in ir_functions:
enum = fn[0]
fn_name = fn[1]
enums_file.write(" " + enum + " = " + str(idx) + ",\n")
names_file.write(" { \"" + fn_name + "\", IRFunction::" + enum + " },\n")
idx = idx + 1;
enums_file.write(" FN_END = " + str(idx) + "\n")
enums_file.write(enums_epilogue)
enums_file.close()
names_file.write(names_epilogue)
names_file.close()
# Conditionally move files from tmp to BE.
if options.noclean:
move_if_different(TMP_IR_FUNCTIONS_PATH, IR_FUNCTIONS_PATH)
move_if_different(TMP_IR_NAMES_PATH, IR_NAMES_PATH)
else:
shutil.move(TMP_IR_FUNCTIONS_PATH, IR_FUNCTIONS_PATH)
shutil.move(TMP_IR_NAMES_PATH, IR_NAMES_PATH)
|
|
# Copyright 2012 Cloudbase Solutions Srl
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import pywintypes
import re
from win32com import client
from xml.etree import ElementTree
from xml.sax import saxutils
CBT_HARDENING_LEVEL_NONE = "none"
CBT_HARDENING_LEVEL_RELAXED = "relaxed"
CBT_HARDENING_LEVEL_STRICT = "strict"
LISTENER_PROTOCOL_HTTP = "HTTP"
LISTENER_PROTOCOL_HTTPS = "HTTPS"
class WinRMConfig(object):
_SERVICE_AUTH_URI = 'winrm/Config/Service/Auth'
_SERVICE_LISTENER_URI = ('winrm/Config/Listener?Address='
'%(address)s+Transport=%(protocol)s')
_SERVICE_CERTMAPPING_URI = ('winrm/Config/Service/certmapping?Issuer='
'%(issuer)s+Subject=%(subject)s+Uri=%(uri)s')
def _get_wsman_session(self):
wsman = client.Dispatch('WSMan.Automation')
return wsman.CreateSession()
def _get_node_tag(self, tag):
return re.match("^{.*}(.*)$", tag).groups(1)[0]
def _parse_listener_xml(self, data_xml):
if not data_xml:
return None
listening_on = []
data = {"ListeningOn": listening_on}
tree = ElementTree.fromstring(data_xml)
for node in tree:
tag = self._get_node_tag(node.tag)
if tag == "ListeningOn":
listening_on.append(node.text)
elif tag == "Enabled":
if node.text == "true":
value = True
else:
value = False
data[tag] = value
elif tag == "Port":
data[tag] = int(node.text)
else:
data[tag] = node.text
return data
def _parse_cert_mapping_xml(self, data_xml):
if not data_xml:
return None
data = {}
tree = ElementTree.fromstring(data_xml)
for node in tree:
tag = self._get_node_tag(node.tag)
if tag == "Enabled":
if node.text == "true":
value = True
else:
value = False
data[tag] = value
else:
data[tag] = node.text
return data
def _get_xml_bool(self, value):
if value:
return "true"
else:
return "false"
def _get_resource(self, resource_uri):
session = self._get_wsman_session()
try:
return session.Get(resource_uri)
except pywintypes.com_error as ex:
if len(ex.excepinfo) > 5 and ex.excepinfo[5] == -2144108544:
return None
else:
raise
def _delete_resource(self, resource_uri):
session = self._get_wsman_session()
session.Delete(resource_uri)
def _create_resource(self, resource_uri, data_xml):
session = self._get_wsman_session()
session.Create(resource_uri, data_xml)
def get_cert_mapping(self, issuer, subject, uri="*"):
resource_uri = self._SERVICE_CERTMAPPING_URI % {'issuer': issuer,
'subject': subject,
'uri': uri}
return self._parse_cert_mapping_xml(self._get_resource(resource_uri))
def delete_cert_mapping(self, issuer, subject, uri="*"):
resource_uri = self._SERVICE_CERTMAPPING_URI % {'issuer': issuer,
'subject': subject,
'uri': uri}
self._delete_resource(resource_uri)
def create_cert_mapping(self, issuer, subject, username, password,
uri="*", enabled=True):
resource_uri = self._SERVICE_CERTMAPPING_URI % {'issuer': issuer,
'subject': subject,
'uri': uri}
escaped_password = saxutils.escape(password)
escaped_username = saxutils.escape(username)
self._create_resource(
resource_uri,
'<p:certmapping xmlns:p="http://schemas.microsoft.com/wbem/wsman/'
'1/config/service/certmapping.xsd">'
'<p:Enabled>%(enabled)s</p:Enabled>'
'<p:Password>%(password)s</p:Password>'
'<p:UserName>%(username)s</p:UserName>'
'</p:certmapping>' % {'enabled': self._get_xml_bool(enabled),
'username': escaped_username,
'password': escaped_password})
def get_listener(self, protocol=LISTENER_PROTOCOL_HTTPS, address="*"):
resource_uri = self._SERVICE_LISTENER_URI % {'protocol': protocol,
'address': address}
return self._parse_listener_xml(self._get_resource(resource_uri))
def delete_listener(self, protocol=LISTENER_PROTOCOL_HTTPS, address="*"):
resource_uri = self._SERVICE_LISTENER_URI % {'protocol': protocol,
'address': address}
self._delete_resource(resource_uri)
def create_listener(self, protocol=LISTENER_PROTOCOL_HTTPS,
cert_thumbprint=None, address="*", enabled=True):
resource_uri = self._SERVICE_LISTENER_URI % {'protocol': protocol,
'address': address}
self._create_resource(
resource_uri,
'<p:Listener xmlns:p="http://schemas.microsoft.com/'
'wbem/wsman/1/config/listener.xsd">'
'<p:Enabled>%(enabled)s</p:Enabled>'
'<p:CertificateThumbPrint>%(cert_thumbprint)s'
'</p:CertificateThumbPrint>'
'<p:URLPrefix>wsman</p:URLPrefix>'
'</p:Listener>' % {"enabled": self._get_xml_bool(enabled),
"cert_thumbprint": cert_thumbprint})
def get_auth_config(self):
data = {}
data_xml = self._get_resource(self._SERVICE_AUTH_URI)
tree = ElementTree.fromstring(data_xml)
for node in tree:
tag = self._get_node_tag(node.tag)
value_str = node.text.lower()
if value_str == "true":
value = True
elif value_str == "false":
value = False
else:
value = value_str
data[tag] = value
return data
def set_auth_config(self, basic=None, kerberos=None, negotiate=None,
certificate=None, credSSP=None,
cbt_hardening_level=None):
tag_map = {'Basic': basic,
'Kerberos': kerberos,
'Negotiate': negotiate,
'Certificate': certificate,
'CredSSP': credSSP,
'CbtHardeningLevel': cbt_hardening_level}
session = self._get_wsman_session()
data_xml = session.Get(self._SERVICE_AUTH_URI)
ns = {'cfg':
'http://schemas.microsoft.com/wbem/wsman/1/config/service/auth'}
tree = ElementTree.fromstring(data_xml)
for (tag, value) in tag_map.items():
if value is not None:
node = tree.find('.//cfg:%s' % tag, namespaces=ns)
new_value = self._get_xml_bool(value)
if node.text.lower() != new_value:
node.text = new_value
data_xml = ElementTree.tostring(tree)
session.Put(self._SERVICE_AUTH_URI, data_xml)
|
|
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
from pylab import *
from numpy import fft
from numpy import linalg
from scipy import integrate
from scipy import interpolate
from numpy.polynomial import chebyshev
from mpl_toolkits.mplot3d import Axes3D
import os
from matplotlib import rc
rc("text", usetex=True)
from mpltools import style
style.use('ggplot')
fig = figure(figsize=(5, 4))
ax = fig.add_subplot(111, projection='3d')
# -----------------------------------------------------------------------------
# Plotting
# --------
SHOW_PLT = True
SAVE_PLT = False
PLT_NAME = ""
PLOT_NPTS = 1E2
filename = os.path.join(os.path.dirname(os.path.realpath(__file__)),
"paper/figures", PLT_NAME) + ".pdf"
# -----------------------------------------------------------------------------
# Simulations
# -----------
T = 0.5
SIM_NSTEPS = T/1E-2
SIM_NPTS = 10
METHOD = ["remove_lowest"][0]
SHAPE = ["sphere"][0]
FFT_NDIM = 2
FFT_AXES = (0, 1)
# -----------------------------------------------------------------------------
# Methods
# -------
def remove_lowest(shape, dim=1):
x_min = amin(real(fft.ifft2(change_n(shape.x_hat, 1E2 * ones(FFT_NDIM)),
axes=FFT_AXES))[...,dim])
g = 1/(5*absolute(shape.x[...,dim] - x_min) + 0.5) - 0.5
g[g<0] = 0
return g
method = {"remove_lowest": remove_lowest}[METHOD]
def sphere(s):
x = ( cos(s[1]) )[...,newaxis]
y = ( sin(s[0]) * sin(s[1]) )[...,newaxis]
z = ( cos(s[0]) )[...,newaxis]
return SpectralShape(concatenate([x, y, z], axis=-1))
shape_func = {"sphere": sphere}[SHAPE]
# -----------------------------------------------------------------------------
def insert_dims(mat, index, n):
for _ in arange(n):
mat = expand_dims(mat, index)
return mat
def vec_to_mgrid(vec, dim, n):
vec = insert_dims(vec, 0, dim-1)
return insert_dims(vec, -1, n - dim)
def vnorm(a):
return sqrt(sum(a**2, axis=-1))
def vdot(a, b):
return sum(a * b, axis=-1)
def vcross(a, b):
return cross(a, b)
def e_vec(i, npts):
vec = zeros(npts)
vec[i] = 1
return vec
def change_npts(x_hat, npts):
npts_old = x_hat.shape[:-1]
for i in arange(FFT_NDIM):
if npts[i] > npts_old[i]:
x_hat = insert(x_hat, int(npts_old[i]/2) * ones(npts[i] - npts_old[i]), 0, axis=i)
else:
x_hat = take(x_hat, indices=fft_k(npts[i]), axis=i)
return (prod(npts) / prod(npts_old)) * x_hat
def fft_k(npts):
if size(npts) == 1:
return hstack([arange(npts/2+1, dtype=int), arange(-npts/2+1, 0, dtype=int)])
else:
k = [fft_k(npts_i) for npts_i in npts]
return meshgrid(*k)
def fft_s(npts):
if size(npts) == 1:
s = linspace(0, 2*pi, npts, endpoint=False)
return s + (s[1] - s[0])/2
else:
s = [fft_s(npts_i) for npts_i in npts]
return meshgrid(*s)
def spectral_derivative(x_hat, p):
npts = x_hat.shape[:-1]
for i in arange(FFT_NDIM):
k = fft_k(npts[i])
w = (1j*k)**p[i]
if p[i] % 2 == 1:
w[npts[i]/2] = 0
x_hat *= vec_to_mgrid(w, i, FFT_NDIM)
return x_hat
def plot_spectral(x_hat):
x_fine = real(fft.ifft2(change_npts(x_hat, PLOT_NPTS * ones(FFT_NDIM)), axes=FFT_AXES))
s_fine = fft_s(x_fine.shape)
contour(s_fine[...,0], s_fine[...,1], x_hat)
class SpectralShape(object):
def __init__(self, x):
self.x = x
def shape(self):
return self.x_hat.shape
@property
def x(self):
return real(fft.ifft2(self.x_hat, axes=FFT_AXES))
@x.setter
def x(self, value):
self.x_hat = fft.fft2(value, axes=FFT_AXES)
def x_dot(self, p=1):
return concatenate([real(fft.ifft2(spectral_derivative(self.x_hat,
p=e_vec(i, FFT_NDIM)), axes=FFT_AXES))[...,newaxis] for i in arange(FFT_NDIM)], axis=-1)
def surface_normal(self):
x_dot = self.x_dot(p=1)
x_dot_n = vcross(x_dot[...,0], x_dot[...,1])
print(vnorm(x_dot_n))
exit()
x_dot_n /= vnorm(x_dot_n)[...,newaxis]
return x_dot_n
# def surface_tangent(self):
# x_dot = real(fft.ifft2(spectral_derivative(self.x_hat, n=1)))
# x_dot /= vnorm(x_dot)[:,newaxis]
# return x_dot
# def centroid(self):
# x_dot = irdft(spectral_derivative(self.x_hat, n=1))
# area_hat = rdft(-sign(x_dot[:,0])*self.x[:,1])
# xy_hat = rdft(-sign(x_dot[:,0])*self.x[:,0]*self.x[:,1])
# yx_hat = rdft(sign(x_dot[:,1])*self.x[:,0]*self.x[:,1])
# def curvature(self):
# x_dot = irdft(spectral_derivative(self.x_hat, n=1))
# x_ddot = irdft(spectral_derivative(self.x_hat, n=2))
# k = vcross(x_dot, x_ddot) / vnorm(x_dot)**3
# return k
# def dxdt(self, method):
# g = method(self)
# dx_hatdt = rdft(g[:,newaxis] * self.surface_normal())
# x_ddot = irdft(spectral_derivative(self.x_hat, n=2))
# a_t = vdot(x_ddot, self.surface_tangent())
# a_t *= norm(g) / norm(a_t)
# dx_hatdt += rdft(a_t[:,newaxis] * self.surface_tangent())
dxdt = self.surface_normal()
return dxdt
def plot(self, label=None):
x_fine = real(fft.ifft2(change_npts(self.x_hat, PLOT_NPTS * ones(FFT_NDIM)), axes=FFT_AXES))
ax.plot_surface(x_fine[...,0], x_fine[...,1], x_fine[...,2], rstride=int(PLOT_NPTS/10), cstride=int(PLOT_NPTS/10))
# ax.scatter(x_fine[...,0], x_fine[...,1], x_fine[...,2], c='k')
axis('equal')
# -----------------------------------------------------------------------------
def run_simulation(shape, t_steps, method):
def func(x, t):
shape.x = x.reshape(-1,2)
return shape.dxdt(method).flatten()
x_simulation = integrate.odeint(func, shape.x.flatten(), t_steps)
x_simulation = x_simulation.reshape(size(t_steps), -1, 2)
for i in arange(SIM_NSTEPS, step=int(SIM_NSTEPS/4), dtype=int):
shape.x = x_simulation[i]
shape.plot(label="t = {:.2f}".format(t_steps[i]))
legend()
savefig(filename)
show()
s = fft_s(SIM_NPTS * ones(FFT_NDIM))
shape = shape_func(s)
# shape.surface_normal()
shape.plot()
show()
# t = linspace(0, T, SIM_NSTEPS)
# run_simulation(shape, t, method)
|
|
import itertools
import time
from math import floor
from level import FakeChunk, MCLevel
import logging
from materials import pocketMaterials
import os
from mclevelbase import ChunkNotPresent, ChunkMalformed
import nbt
import numpy
import struct
from infiniteworld import ChunkedLevelMixin, SessionLockLost, AnvilChunkData
from level import LightedChunk
from contextlib import contextmanager
from pymclevel import entity, BoundingBox, Entity, TileEntity
logger = logging.getLogger(__name__)
leveldb_available = True
leveldb_mcpe = None
try:
import leveldb_mcpe
leveldb_mcpe.Options()
except Exception as e:
leveldb_available = False
logger.info("Error while trying to import leveldb_mcpe, starting without PE support ({0})".format(e))
leveldb_mcpe = None
def loadNBTCompoundList(data, littleEndian=True):
"""
Loads a list of NBT Compound tags from a bunch of data.
Uses sep to determine where the next Compound tag starts.
:param data: str, the NBT to load from
:param littleEndian: bool. Determines endianness
:return: list of TAG_Compounds
"""
if type(data) is unicode:
data = str(data)
def load(_data):
sep = "\x00\x00\x00\x00\n"
sep_data = _data.split(sep)
compounds = []
for d in sep_data:
if len(d) != 0:
if not d.startswith("\n"):
d = "\n" + d
tag = (nbt.load(buf=(d + '\x00\x00\x00\x00')))
compounds.append(tag)
return compounds
if littleEndian:
with nbt.littleEndianNBT():
return load(data)
else:
return load(data)
def TagProperty(tagName, tagType, default_or_func=None):
"""
Copied from infiniteworld.py. Custom property object to handle NBT-tag properties.
:param tagName: str, Name of the NBT-tag
:param tagType: int, (nbt.TAG_TYPE) Type of the NBT-tag
:param default_or_func: function or default value. If function, function should return the default.
:return: property
"""
def getter(self):
if tagName not in self.root_tag:
if hasattr(default_or_func, "__call__"):
default = default_or_func(self)
else:
default = default_or_func
self.root_tag[tagName] = tagType(default)
return self.root_tag[tagName].value
def setter(self, val):
self.root_tag[tagName] = tagType(value=val)
return property(getter, setter)
class PocketLeveldbDatabase(object):
"""
Not to be confused with leveldb_mcpe.DB
A PocketLeveldbDatabase is an interface around leveldb_mcpe.DB, providing various functions
to load/write chunk data, and access the level.dat file.
The leveldb_mcpe.DB object handles the actual leveldb database.
To access the actual database, world_db() should be called.
"""
holdDatabaseOpen = True
_world_db = None
@contextmanager
def world_db(self):
"""
Opens a leveldb and keeps it open until editing finished.
:yield: DB
"""
if PocketLeveldbDatabase.holdDatabaseOpen:
if self._world_db is None:
self._world_db = leveldb_mcpe.DB(self.options, os.path.join(str(self.path), 'db'))
yield self._world_db
pass
else:
db = leveldb_mcpe.DB(self.options, os.path.join(str(self.path), 'db'))
yield db
del db
def __init__(self, path, create=False):
"""
:param path: string, path to file
:return: None
"""
self.path = path
if not os.path.exists(path):
file(path, 'w').close()
self.options = leveldb_mcpe.Options()
self.writeOptions = leveldb_mcpe.WriteOptions()
self.readOptions = leveldb_mcpe.ReadOptions()
if create:
self.options.create_if_missing = True # The database will be created once needed first.
return
needsRepair = False
try:
with self.world_db() as db:
it = db.NewIterator(self.readOptions)
it.SeekToFirst()
if not db.Get(self.readOptions, it.key()) == it.value():
needsRepair = True
it.status()
del it
except RuntimeError as err:
logger.error("Error while opening world database from %s (%s)" % path, err)
needsRepair = True
if needsRepair:
logger.info("Trying to repair world %s", path)
try:
leveldb_mcpe.RepairWrapper(os.path.join(path, 'db'))
except RuntimeError as err:
logger.error("Error while repairing world %s %s" % path, err)
def close(self):
"""
Should be called before deleting this instance of the level.
Not calling this method may result in corrupted worlds
:return: None
"""
if PocketLeveldbDatabase.holdDatabaseOpen:
if self._world_db is not None:
del self._world_db
self._world_db = None
def _readChunk(self, cx, cz, readOptions=None):
"""
:param cx, cz: int Coordinates of the chunk
:param readOptions: ReadOptions
:return: None
"""
key = struct.pack('<i', cx) + struct.pack('<i', cz)
with self.world_db() as db:
rop = self.readOptions if readOptions is None else readOptions
# Only way to see if value exists is by failing db.Get()
try:
terrain = db.Get(rop, key + "0")
except RuntimeError:
return None
try:
tile_entities = db.Get(rop, key + "1")
except RuntimeError:
tile_entities = None
try:
entities = db.Get(rop, key + "2")
except RuntimeError:
entities = None
if len(terrain) != 83200:
raise ChunkMalformed(str(len(terrain)))
logger.debug("CHUNK LOAD %s %s", cx, cz)
return terrain, tile_entities, entities
def saveChunk(self, chunk, batch=None, writeOptions=None):
"""
:param chunk: PocketLeveldbChunk
:param batch: WriteBatch
:param writeOptions: WriteOptions
:return: None
"""
cx, cz = chunk.chunkPosition
data = chunk.savedData()
key = struct.pack('<i', cx) + struct.pack('<i', cz)
if batch is None:
with self.world_db() as db:
wop = self.writeOptions if writeOptions is None else writeOptions
db.Put(wop, key + "0", data[0])
if data[1] is not None:
db.Put(wop, key + "1", data[1])
if data[2] is not None:
db.Put(wop, key + "2", data[2])
else:
batch.Put(key + "0", data[0])
if data[1] is not None:
batch.Put(key + "1", data[1])
if data[2] is not None:
batch.Put(key + "2", data[2])
def loadChunk(self, cx, cz, world):
"""
:param cx, cz: int Coordinates of the chunk
:param world: PocketLeveldbWorld
:return: PocketLeveldbChunk
"""
data = self._readChunk(cx, cz)
if data is None:
raise ChunkNotPresent((cx, cz, self))
chunk = PocketLeveldbChunk(cx, cz, world, data)
return chunk
_allChunks = None
def deleteChunk(self, cx, cz, batch=None, writeOptions=None):
if batch is None:
with self.world_db() as db:
key = struct.pack('<i', cx) + struct.pack('<i', cz) + "0"
wop = self.writeOptions if writeOptions is None else writeOptions
db.Delete(wop, key)
else:
key = struct.pack('<i', cx) + struct.pack('<i', cz) + "0"
batch.Delete(key)
logger.debug("DELETED CHUNK %s %s", cx, cz)
def getAllChunks(self, readOptions=None):
"""
Returns a list of all chunks that have terrain data in the database.
Chunks with only Entities or TileEntities are ignored.
:param readOptions: ReadOptions
:return: list
"""
with self.world_db() as db:
allChunks = []
rop = self.readOptions if readOptions is None else readOptions
it = db.NewIterator(rop)
it.SeekToFirst()
while it.Valid():
key = it.key()
raw_x = key[0:4]
raw_z = key[4:8]
t = key[8]
if t == "0":
cx, cz = struct.unpack('<i', raw_x), struct.unpack('<i', raw_z)
allChunks.append((cx[0], cz[0]))
it.Next()
it.status() # All this does is cause an exception if something went wrong. Might be unneeded?
del it
return allChunks
def getAllPlayerData(self, readOptions=None):
"""
Returns the raw NBT data of all players in the database.
Every player is stored as player_<player-id>. The single-player player is stored as ~local_player
:param readOptions:
:return: dictonary key, value: key: player-id, value = player nbt data as str
"""
with self.world_db() as db:
allPlayers = {}
rop = self.readOptions if readOptions is None else readOptions
it = db.NewIterator(rop)
it.SeekToFirst()
while it.Valid():
key = it.key()
if key == "~local_player": # Singleplayer
allPlayers[key] = it.value()
elif key.startswith('player_'): # Multiplayer player
allPlayers[key] = it.value()
it.Next()
it.status()
del it
return allPlayers
def savePlayer(self, player, playerData, batch=None, writeOptions=None):
if writeOptions is None:
writeOptions = self.writeOptions
if batch is None:
with self.world_db() as db:
db.Put(writeOptions, player, playerData)
else:
batch.Put(player, playerData)
class InvalidPocketLevelDBWorldException(Exception):
pass
class PocketLeveldbWorld(ChunkedLevelMixin, MCLevel):
Height = 128
Width = 0
Length = 0
isInfinite = True
materials = pocketMaterials
noTileTicks = True
_bounds = None
oldPlayerFolderFormat = False
_allChunks = None # An array of cx, cz pairs.
_loadedChunks = {} # A dictionary of actual PocketLeveldbChunk objects mapped by (cx, cz)
_playerData = None
playerTagCache = {}
_playerList = None
@property
def LevelName(self):
if "LevelName" not in self.root_tag:
with open(os.path.join(self.worldFile.path, "levelname.txt"), 'r') as f:
name = f.read()
if name is None:
name = os.path.basename(self.worldFile.path)
self.root_tag["LevelName"] = name
return self.root_tag["LevelName"]
@LevelName.setter
def LevelName(self, name):
self.root_tag["LevelName"] = nbt.TAG_String(value=name)
with open(os.path.join(self.worldFile.path, "levelname.txt"), 'w') as f:
f.write(name)
@property
def allChunks(self):
"""
:return: list with all chunks in the world.
"""
if self._allChunks is None:
self._allChunks = self.worldFile.getAllChunks()
return self._allChunks
@property
def players(self):
if self._playerList is None:
self._playerList = []
for key in self.playerData.keys():
self._playerList.append(key)
return self._playerList
@property
def playerData(self):
if self._playerData is None:
self._playerData = self.worldFile.getAllPlayerData()
return self._playerData
@staticmethod
def getPlayerPath(player, dim=0):
"""
player.py loads players from files, but PE caches them differently. This is necessary to make it work.
:param player: str
:param dim: int
:return: str
"""
if dim == 0:
return player
def __init__(self, filename=None, create=False, random_seed=None, last_played=None, readonly=False):
"""
:param filename: path to the root dir of the level
:return:
"""
if not os.path.isdir(filename):
filename = os.path.dirname(filename)
self.filename = filename
self.worldFile = PocketLeveldbDatabase(filename, create=create)
self.readonly = readonly
self.loadLevelDat(create, random_seed, last_played)
def _createLevelDat(self, random_seed, last_played):
"""
Creates a new level.dat root_tag, and puts it in self.root_tag.
To write it to the disk, self.save() should be called.
:param random_seed: long
:param last_played: long
:return: None
"""
with nbt.littleEndianNBT():
root_tag = nbt.TAG_Compound()
root_tag["SpawnX"] = nbt.TAG_Int(0)
root_tag["SpawnY"] = nbt.TAG_Int(2)
root_tag["SpawnZ"] = nbt.TAG_Int(0)
if last_played is None:
last_played = long(time.time() * 100)
if random_seed is None:
random_seed = long(numpy.random.random() * 0xffffffffffffffffL) - 0x8000000000000000L
self.root_tag = root_tag
self.LastPlayed = long(last_played)
self.RandomSeed = long(random_seed)
self.SizeOnDisk = 0
self.Time = 1
self.LevelName = os.path.basename(self.worldFile.path)
def loadLevelDat(self, create=False, random_seed=None, last_played=None):
"""
Loads the level.dat from the worldfolder.
:param create: bool. If it's True, a fresh level.dat will be created instead.
:param random_seed: long
:param last_played: long
:return: None
"""
def _loadLevelDat(filename):
root_tag_buf = open(filename, 'rb').read()
magic, length, root_tag_buf = root_tag_buf[:4], root_tag_buf[4:8], root_tag_buf[8:]
if struct.Struct('<i').unpack(magic)[0] < 3:
logger.info("Found an old level.dat file. Aborting world load")
raise InvalidPocketLevelDBWorldException() # Maybe try convert/load old PE world?
if len(root_tag_buf) != struct.Struct('<i').unpack(length)[0]:
raise nbt.NBTFormatError()
self.root_tag = nbt.load(buf=root_tag_buf)
if create:
self._createLevelDat(random_seed, last_played)
return
try:
with nbt.littleEndianNBT():
_loadLevelDat(os.path.join(self.worldFile.path, "level.dat"))
return
except (nbt.NBTFormatError, IOError) as err:
logger.info("Failed to load level.dat, trying to load level.dat_old ({0})".format(err))
try:
with nbt.littleEndianNBT():
_loadLevelDat(os.path.join(self.worldFile.path, "level.dat_old"))
return
except (nbt.NBTFormatError, IOError) as err:
logger.info("Failed to load level.dat_old, creating new level.dat ({0})".format(err))
self._createLevelDat(random_seed, last_played)
# --- NBT Tag variables ---
SizeOnDisk = TagProperty('SizeOnDisk', nbt.TAG_Int, 0)
RandomSeed = TagProperty('RandomSeed', nbt.TAG_Long, 0)
# TODO PE worlds have a different day length, this has to be changed to that.
Time = TagProperty('Time', nbt.TAG_Long, 0)
LastPlayed = TagProperty('LastPlayed', nbt.TAG_Long, lambda self: long(time.time() * 1000))
GeneratorName = TagProperty('Generator', nbt.TAG_String, 'Infinite')
GameType = TagProperty('GameType', nbt.TAG_Int, 0)
def defaultDisplayName(self):
return os.path.basename(os.path.dirname(self.filename))
def __str__(self):
"""
How to represent this level
:return: str
"""
return "PocketLeveldbWorld(\"%s\")" % os.path.basename(os.path.dirname(self.worldFile.path))
def getChunk(self, cx, cz):
"""
Used to obtain a chunk from the database.
:param cx, cz: cx, cz coordinates of the chunk
:return: PocketLeveldbChunk
"""
c = self._loadedChunks.get((cx, cz))
if c is None:
c = self.worldFile.loadChunk(cx, cz, self)
self._loadedChunks[(cx, cz)] = c
return c
def unload(self):
"""
Unload all chunks and close all open file-handlers.
"""
self._loadedChunks.clear()
self._allChunks = None
self.worldFile.close()
def close(self):
"""
Unload all chunks and close all open file-handlers. Discard any unsaved data.
"""
self.unload()
try:
pass # Setup a way to close a work-folder?
except SessionLockLost:
pass
def deleteChunk(self, cx, cz, batch=None):
"""
Deletes a chunk at given cx, cz. Deletes using the batch if batch is given, uses world_db() otherwise.
:param cx, cz Coordinates of the chunk
:param batch WriteBatch
:return: None
"""
self.worldFile.deleteChunk(cx, cz, batch=batch)
if self._loadedChunks is not None and (cx, cz) in self._loadedChunks: # Unnecessary check?
del self._loadedChunks[(cx, cz)]
self.allChunks.remove((cx, cz))
def deleteChunksInBox(self, box):
"""
Deletes all chunks in a given box.
:param box pymclevel.box.BoundingBox
:return: None
"""
logger.info(u"Deleting {0} chunks in {1}".format((box.maxcx - box.mincx) * (box.maxcz - box.mincz),
((box.mincx, box.mincz), (box.maxcx, box.maxcz))))
i = 0
ret = []
batch = leveldb_mcpe.WriteBatch()
for cx, cz in itertools.product(xrange(box.mincx, box.maxcx), xrange(box.mincz, box.maxcz)):
i += 1
if self.containsChunk(cx, cz):
self.deleteChunk(cx, cz, batch=batch)
ret.append((cx, cz))
assert not self.containsChunk(cx, cz), "Just deleted {0} but it didn't take".format((cx, cz))
if i % 100 == 0:
logger.info(u"Chunk {0}...".format(i))
with self.worldFile.world_db() as db:
wop = self.worldFile.writeOptions
db.Write(wop, batch)
del batch
return ret
@property
def bounds(self):
"""
Returns a boundingbox containing the entire level
:return: pymclevel.box.BoundingBox
"""
if self._bounds is None:
self._bounds = self._getWorldBounds()
return self._bounds
@property
def size(self):
return self.bounds.size
def _getWorldBounds(self):
if len(self.allChunks) == 0:
return BoundingBox((0, 0, 0), (0, 0, 0))
allChunks = numpy.array(list(self.allChunks))
min_cx = (allChunks[:, 0]).min()
max_cx = (allChunks[:, 0]).max()
min_cz = (allChunks[:, 1]).min()
max_cz = (allChunks[:, 1]).max()
origin = (min_cx << 4, 0, min_cz << 4)
size = ((max_cx - min_cx + 1) << 4, self.Height, (max_cz - min_cz + 1) << 4)
return BoundingBox(origin, size)
@classmethod
def _isLevel(cls, filename):
"""
Determines whether or not the path in filename has a Pocket Edition 0.9.0 or later in it
:param filename string with path to level root directory.
"""
clp = ("db", "level.dat")
if not os.path.isdir(filename):
f = os.path.basename(filename)
if f not in clp:
return False
filename = os.path.dirname(filename)
return all([os.path.exists(os.path.join(filename, fl)) for fl in clp])
def saveInPlaceGen(self):
"""
Save all chunks to the database, and write the root_tag back to level.dat.
"""
self.saving = True
batch = leveldb_mcpe.WriteBatch()
dirtyChunkCount = 0
for chunk in self._loadedChunks.itervalues():
if chunk.dirty:
dirtyChunkCount += 1
self.worldFile.saveChunk(chunk, batch=batch)
chunk.dirty = False
yield
with nbt.littleEndianNBT():
for p in self.players:
playerData = self.playerTagCache[p]
if playerData is not None:
playerData = playerData.save(compressed=False) # It will get compressed in the DB itself
self.worldFile.savePlayer(p, playerData, batch=batch)
with self.worldFile.world_db() as db:
wop = self.worldFile.writeOptions
db.Write(wop, batch)
self.saving = False
logger.info(u"Saved {0} chunks to the database".format(dirtyChunkCount))
path = os.path.join(self.worldFile.path, 'level.dat')
with nbt.littleEndianNBT():
rootTagData = self.root_tag.save(compressed=False)
rootTagData = struct.Struct('<i').pack(4) + struct.Struct('<i').pack(len(rootTagData)) + rootTagData
with open(path, 'w') as f:
f.write(rootTagData)
def containsChunk(self, cx, cz):
"""
Determines if the chunk exist in this world.
:param cx, cz: int, Coordinates of the chunk
:return: bool (if chunk exists)
"""
return (cx, cz) in self.allChunks
def createChunk(self, cx, cz):
"""
Creates an empty chunk at given cx, cz coordinates, and stores it in self._loadedChunks
:param cx, cz: int, Coordinates of the chunk
:return:
"""
if self.containsChunk(cx, cz):
raise ValueError("{0}:Chunk {1} already present!".format(self, (cx, cz)))
if self.allChunks is not None:
self.allChunks.append((cx, cz))
self._loadedChunks[(cx, cz)] = PocketLeveldbChunk(cx, cz, self, create=True)
self._bounds = None
def saveGeneratedChunk(self, cx, cz, tempChunkBytes):
"""
Chunks get generated using Anvil generation. This is a (slow) way of importing anvil chunk bytes
and converting them to MCPE chunk data. Could definitely use some improvements, but at least it works.
:param cx, cx: Coordinates of the chunk
:param tempChunkBytes: str. Raw MCRegion chunk data.
:return:
"""
loaded_data = nbt.load(buf=tempChunkBytes)
class fake:
def __init__(self):
self.Height = 128
tempChunk = AnvilChunkData(fake(), (0, 0), loaded_data)
if not self.containsChunk(cx, cz):
self.createChunk(cx, cz)
chunk = self.getChunk(cx, cz)
chunk.Blocks = numpy.array(tempChunk.Blocks, dtype='uint16')
chunk.Data = numpy.array(tempChunk.Data, dtype='uint8')
chunk.SkyLight = numpy.array(tempChunk.SkyLight, dtype='uint8')
chunk.BlockLight = numpy.array(tempChunk.BlockLight, dtype='uint8')
chunk.dirty = True
self.worldFile.saveChunk(chunk)
else:
logger.info("Tried to import generated chunk at %s, %s but the chunk already existed." % cx, cz)
@property
def chunksNeedingLighting(self):
"""
Generator containing all chunks that need lighting.
:yield: int (cx, cz) Coordinates of the chunk
"""
for chunk in self._loadedChunks.itervalues():
if chunk.needsLighting:
yield chunk.chunkPosition
# -- Entity Stuff --
# A lot of this code got copy-pasted from MCInfDevOldLevel
# Slight modifications to make it work with MCPE
def getTileEntitiesInBox(self, box):
"""
Returns the Tile Entities in given box.
:param box: pymclevel.box.BoundingBox
:return: list of nbt.TAG_Compound
"""
tileEntites = []
for chunk, slices, point in self.getChunkSlices(box):
tileEntites += chunk.getTileEntitiesInBox(box)
return tileEntites
def getEntitiesInBox(self, box):
"""
Returns the Entities in given box.
:param box: pymclevel.box.BoundingBox
:return: list of nbt.TAG_Compound
"""
entities = []
for chunk, slices, point in self.getChunkSlices(box):
entities += chunk.getEntitiesInBox(box)
return entities
def getTileTicksInBox(self, box):
"""
Always returns None, as MCPE has no TileTicks.
:param box: pymclevel.box.BoundingBox
:return: list
"""
return []
def addEntity(self, entityTag):
"""
Adds an entity to the level.
:param entityTag: nbt.TAG_Compound containing the entity's data.
:return:
"""
assert isinstance(entityTag, nbt.TAG_Compound)
x, y, z = map(lambda p: int(floor(p)), Entity.pos(entityTag))
try:
chunk = self.getChunk(x >> 4, z >> 4)
except (ChunkNotPresent, ChunkMalformed):
return
chunk.addEntity(entityTag)
chunk.dirty = True
def addTileEntity(self, tileEntityTag):
"""
Adds an entity to the level.
:param tileEntityTag: nbt.TAG_Compound containing the Tile entity's data.
:return:
"""
assert isinstance(tileEntityTag, nbt.TAG_Compound)
if 'x' not in tileEntityTag:
return
x, y, z = TileEntity.pos(tileEntityTag)
try:
chunk = self.getChunk(x >> 4, z >> 4)
except (ChunkNotPresent, ChunkMalformed):
return
chunk.addTileEntity(tileEntityTag)
chunk.dirty = True
def addTileTick(self, tickTag):
"""
MCPE doesn't have Tile Ticks, so this can't be added.
:param tickTag: nbt.TAG_Compound
:return: None
"""
return
def tileEntityAt(self, x, y, z):
"""
Retrieves a tile tick at given x, y, z coordinates
:param x: int
:param y: int
:param z: int
:return: nbt.TAG_Compound or None
"""
chunk = self.getChunk(x >> 4, z >> 4)
return chunk.tileEntityAt(x, y, z)
def removeEntitiesInBox(self, box):
"""
Removes all entities in given box
:param box: pymclevel.box.BoundingBox
:return: int, count of entities removed
"""
count = 0
for chunk, slices, point in self.getChunkSlices(box):
count += chunk.removeEntitiesInBox(box)
logger.info("Removed {0} entities".format(count))
return count
def removeTileEntitiesInBox(self, box):
"""
Removes all tile entities in given box
:param box: pymclevel.box.BoundingBox
:return: int, count of tile entities removed
"""
count = 0
for chunk, slices, point in self.getChunkSlices(box):
count += chunk.removeTileEntitiesInBox(box)
logger.info("Removed {0} tile entities".format(count))
return count
def removeTileTicksInBox(self, box):
"""
MCPE doesn't have TileTicks, so this does nothing.
:param box: pymclevel.box.BoundingBox
:return: int, count of TileTicks removed.
"""
return 0
# -- Player and spawn stuff
def playerSpawnPosition(self, player=None):
"""
Returns the default spawn position for the world. If player is given, the players spawn is returned instead.
:param player: nbt.TAG_Compound, root tag of the player.
:return: tuple int (x, y, z), coordinates of the spawn.
"""
dataTag = self.root_tag
if player is None:
playerSpawnTag = dataTag
else:
playerSpawnTag = self.getPlayerTag(player)
return [playerSpawnTag.get(i, dataTag[i]).value for i in ("SpawnX", "SpawnY", "SpawnZ")]
def setPlayerSpawnPosition(self, pos, player=None):
"""
Sets the worlds spawn point to pos. If player is given, sets that players spawn point instead.
:param pos: tuple int (x, y, z)
:param player: nbt.TAG_Compound, root tag of the player
:return: None
"""
if player is None:
playerSpawnTag = self.root_tag
else:
playerSpawnTag = self.getPlayerTag(player)
for name, val in zip(("SpawnX", "SpawnY", "SpawnZ"), pos):
playerSpawnTag[name] = nbt.TAG_Int(val)
def getPlayerTag(self, player='Player'):
"""
Obtains a player from the world.
:param player: string of the name of the player. "Player" for SSP player, player_<client-id> for SMP player.
:return: nbt.TAG_Compound, root tag of the player.
"""
if player == '[No players]': # Apparently this is being called somewhere?
return None
if player == 'Player':
player = '~local_player'
_player = self.playerTagCache.get(player)
if _player is not None:
return _player
playerData = self.playerData[player]
with nbt.littleEndianNBT():
_player = nbt.load(buf=playerData)
self.playerTagCache[player] = _player
return _player
def getPlayerDimension(self, player="Player"):
"""
Always returns 0, as MCPE only has the overworld dimension.
:param player: string of the name of the player. "Player" for SSP player, player_<client-id> for SMP player.
:return: int
"""
return 0
def setPlayerPosition(self, (x, y, z), player="Player"):
"""
Sets the players position to x, y, z
:param (x, y, z): tuple of the coordinates of the player
:param player: string of the name of the player. "Player" for SSP player, player_<client-id> for SMP player.
:return:
"""
posList = nbt.TAG_List([nbt.TAG_Double(p) for p in (x, y - 1.75, z)])
playerTag = self.getPlayerTag(player)
playerTag["Pos"] = posList
def getPlayerPosition(self, player="Player"):
"""
Gets the players position
:param player: string of the name of the player. "Player" for SSP player, player_<client-id> for SMP player.
:return: tuple int (x, y, z): Coordinates of the player.
"""
playerTag = self.getPlayerTag(player)
posList = playerTag["Pos"]
x, y, z = map(lambda c: c.value, posList)
return x, y + 1.75, z
def setPlayerOrientation(self, yp, player="Player"):
"""
Gets the players orientation.
:param player: string of the name of the player. "Player" for SSP player, player_<client-id> for SMP player.
:param yp: int tuple (yaw, pitch)
:return: None
"""
self.getPlayerTag(player)["Rotation"] = nbt.TAG_List([nbt.TAG_Float(p) for p in yp])
def getPlayerOrientation(self, player="Player"):
"""
Gets the players orientation.
:param player: string of the name of the player. "Player" for SSP player, player_<client-id> for SMP player.
:return: tuple int (yaw, pitch)
"""
yp = map(lambda x: x.value, self.getPlayerTag(player)["Rotation"])
y, p = yp
if p == 0:
p = 0.000000001
if p == 180.0:
p -= 0.000000001
yp = y, p
return numpy.array(yp)
@staticmethod # Editor keeps putting this in. Probably unnecesary
def setPlayerAbilities(gametype, player="Player"):
"""
This method is just to override the standard one, as MCPE has no abilities, as it seems.
:parm gametype, int of gamemode player gets set at.
:param player: string of the name of the player. "Player" for SSP player, player_<client-id> for SMP player.
"""
pass
def setPlayerGameType(self, gametype, player="Player"):
"""
Sets the game type for player
:param gametype: int (0=survival, 1=creative, 2=adventure, 3=spectator)
:param player: string of the name of the player. "Player" for SSP player, player_<client-id> for SMP player.
:return: None
"""
# This annoyingly works differently between single- and multi-player.
if player == "Player":
self.GameType = gametype
self.setPlayerAbilities(gametype, player)
else:
playerTag = self.getPlayerTag(player)
playerTag['playerGameType'] = nbt.TAG_Int(gametype)
self.setPlayerAbilities(gametype, player)
def getPlayerGameType(self, player="Player"):
"""
Obtains the players gametype.
:param player: string of the name of the player. "Player" for SSP player, player_<client-id> for SMP player.
:return: int (0=survival, 1=creative, 2=adventure, 3=spectator)
"""
if player == "Player":
return self.GameType
else:
playerTag = self.getPlayerTag(player)
return playerTag["playerGameType"].value
class PocketLeveldbChunk(LightedChunk):
HeightMap = FakeChunk.HeightMap
# _Entities = _TileEntities = nbt.TAG_List()
_Entities = nbt.TAG_List()
_TileEntities = nbt.TAG_List()
dirty = False
def __init__(self, cx, cz, world, data=None, create=False):
"""
:param cx, cz int, int Coordinates of the chunk
:param data List of 3 strings. (83200 bytes of terrain data, tile-entity data, entity data)
:param world PocketLeveldbWorld, instance of the world the chunk belongs too
"""
self.chunkPosition = (cx, cz)
self.world = world
if create:
self.Blocks = numpy.zeros(32768, 'uint16')
self.Data = numpy.zeros(16384, 'uint8')
self.SkyLight = numpy.zeros(16384, 'uint8')
self.BlockLight = numpy.zeros(16384, 'uint8')
self.DirtyColumns = numpy.zeros(256, 'uint8')
self.GrassColors = numpy.zeros(1024, 'uint8')
self.TileEntities = nbt.TAG_List()
self.Entities = nbt.TAG_List()
else:
terrain = numpy.fromstring(data[0], dtype='uint8')
if data[1] is not None:
TileEntities = loadNBTCompoundList(data[1])
self.TileEntities = nbt.TAG_List(TileEntities, list_type=nbt.TAG_COMPOUND)
if data[2] is not None:
Entities = loadNBTCompoundList(data[2])
# PE saves entities with their int ID instead of string name. We swap them to make it work in mcedit.
# Whenever we save an entity, we need to make sure to swap back.
invertEntities = {v: k for k, v in entity.PocketEntity.entityList.items()}
for ent in Entities:
ent["id"] = nbt.TAG_String(invertEntities[ent["id"].value])
self.Entities = nbt.TAG_List(Entities, list_type=nbt.TAG_COMPOUND)
self.Blocks, terrain = terrain[:32768], terrain[32768:]
self.Data, terrain = terrain[:16384], terrain[16384:]
self.SkyLight, terrain = terrain[:16384], terrain[16384:]
self.BlockLight, terrain = terrain[:16384], terrain[16384:]
self.DirtyColumns, terrain = terrain[:256], terrain[256:]
# Unused at the moment. Might need a special editor? Maybe hooked up to biomes?
self.GrassColors = terrain[:1024]
self.unpackChunkData()
self.shapeChunkData()
def unpackChunkData(self):
"""
Unpacks the terrain data to match mcedit's formatting.
"""
for key in ('SkyLight', 'BlockLight', 'Data'):
dataArray = getattr(self, key)
dataArray.shape = (16, 16, 64)
s = dataArray.shape
unpackedData = numpy.zeros((s[0], s[1], s[2] * 2), dtype='uint8')
unpackedData[:, :, ::2] = dataArray
unpackedData[:, :, ::2] &= 0xf
unpackedData[:, :, 1::2] = dataArray
unpackedData[:, :, 1::2] >>= 4
setattr(self, key, unpackedData)
def shapeChunkData(self):
"""
Determines the shape of the terrain data.
:return:
"""
chunkSize = 16
self.Blocks.shape = (chunkSize, chunkSize, self.world.Height)
self.SkyLight.shape = (chunkSize, chunkSize, self.world.Height)
self.BlockLight.shape = (chunkSize, chunkSize, self.world.Height)
self.Data.shape = (chunkSize, chunkSize, self.world.Height)
self.DirtyColumns.shape = chunkSize, chunkSize
def savedData(self):
"""
Returns the data of the chunk to save to the database.
:return: str of 83200 bytes of chunk data.
"""
def packData(dataArray):
"""
Repacks the terrain data to Mojang's leveldb library's format.
"""
assert dataArray.shape[2] == self.world.Height
data = numpy.array(dataArray).reshape(16, 16, self.world.Height / 2, 2)
data[..., 1] <<= 4
data[..., 1] |= data[..., 0]
return numpy.array(data[:, :, :, 1])
if self.dirty:
# elements of DirtyColumns are bitfields. Each bit corresponds to a
# 16-block segment of the column. We set all of the bits because
# we only track modifications at the chunk level.
self.DirtyColumns[:] = 255
with nbt.littleEndianNBT():
entityData = ""
tileEntityData = ""
for ent in self.TileEntities:
tileEntityData += ent.save(compressed=False)
for ent in self.Entities:
v = ent["id"].value
ent["id"] = nbt.TAG_Int(entity.PocketEntity.entityList[v])
entityData += ent.save(compressed=False)
# We have to re-invert after saving otherwise the next save will fail.
ent["id"] = nbt.TAG_String(v)
terrain = ''.join([self.Blocks.tostring(),
packData(self.Data).tostring(),
packData(self.SkyLight).tostring(),
packData(self.BlockLight).tostring(),
self.DirtyColumns.tostring(),
self.GrassColors.tostring(),
])
return terrain, tileEntityData, entityData
# -- Entities and TileEntities
@property
def Entities(self):
return self._Entities
@Entities.setter
def Entities(self, Entities):
"""
:param Entities: list
:return:
"""
self._Entities = Entities
@property
def TileEntities(self):
return self._TileEntities
@TileEntities.setter
def TileEntities(self, TileEntities):
"""
:param TileEntities: list
:return:
"""
self._TileEntities = TileEntities
|
|
# Copyright 2014-2015 Canonical Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tools for working with the host system"""
# Copyright 2012 Canonical Ltd.
#
# Authors:
# Nick Moffitt <nick.moffitt@canonical.com>
# Matthew Wedgwood <matthew.wedgwood@canonical.com>
import os
import re
import pwd
import glob
import grp
import random
import string
import subprocess
import hashlib
import functools
import itertools
import six
from contextlib import contextmanager
from collections import OrderedDict
from .hookenv import log
from .fstab import Fstab
from charmhelpers.osplatform import get_platform
__platform__ = get_platform()
if __platform__ == "ubuntu":
from charmhelpers.core.host_factory.ubuntu import (
service_available,
add_new_group,
lsb_release,
cmp_pkgrevno,
) # flake8: noqa -- ignore F401 for this import
elif __platform__ == "centos":
from charmhelpers.core.host_factory.centos import (
service_available,
add_new_group,
lsb_release,
cmp_pkgrevno,
) # flake8: noqa -- ignore F401 for this import
UPDATEDB_PATH = '/etc/updatedb.conf'
def service_start(service_name, **kwargs):
"""Start a system service.
The specified service name is managed via the system level init system.
Some init systems (e.g. upstart) require that additional arguments be
provided in order to directly control service instances whereas other init
systems allow for addressing instances of a service directly by name (e.g.
systemd).
The kwargs allow for the additional parameters to be passed to underlying
init systems for those systems which require/allow for them. For example,
the ceph-osd upstart script requires the id parameter to be passed along
in order to identify which running daemon should be reloaded. The follow-
ing example stops the ceph-osd service for instance id=4:
service_stop('ceph-osd', id=4)
:param service_name: the name of the service to stop
:param **kwargs: additional parameters to pass to the init system when
managing services. These will be passed as key=value
parameters to the init system's commandline. kwargs
are ignored for systemd enabled systems.
"""
return service('start', service_name, **kwargs)
def service_stop(service_name, **kwargs):
"""Stop a system service.
The specified service name is managed via the system level init system.
Some init systems (e.g. upstart) require that additional arguments be
provided in order to directly control service instances whereas other init
systems allow for addressing instances of a service directly by name (e.g.
systemd).
The kwargs allow for the additional parameters to be passed to underlying
init systems for those systems which require/allow for them. For example,
the ceph-osd upstart script requires the id parameter to be passed along
in order to identify which running daemon should be reloaded. The follow-
ing example stops the ceph-osd service for instance id=4:
service_stop('ceph-osd', id=4)
:param service_name: the name of the service to stop
:param **kwargs: additional parameters to pass to the init system when
managing services. These will be passed as key=value
parameters to the init system's commandline. kwargs
are ignored for systemd enabled systems.
"""
return service('stop', service_name, **kwargs)
def service_restart(service_name, **kwargs):
"""Restart a system service.
The specified service name is managed via the system level init system.
Some init systems (e.g. upstart) require that additional arguments be
provided in order to directly control service instances whereas other init
systems allow for addressing instances of a service directly by name (e.g.
systemd).
The kwargs allow for the additional parameters to be passed to underlying
init systems for those systems which require/allow for them. For example,
the ceph-osd upstart script requires the id parameter to be passed along
in order to identify which running daemon should be restarted. The follow-
ing example restarts the ceph-osd service for instance id=4:
service_restart('ceph-osd', id=4)
:param service_name: the name of the service to restart
:param **kwargs: additional parameters to pass to the init system when
managing services. These will be passed as key=value
parameters to the init system's commandline. kwargs
are ignored for init systems not allowing additional
parameters via the commandline (systemd).
"""
return service('restart', service_name)
def service_reload(service_name, restart_on_failure=False, **kwargs):
"""Reload a system service, optionally falling back to restart if
reload fails.
The specified service name is managed via the system level init system.
Some init systems (e.g. upstart) require that additional arguments be
provided in order to directly control service instances whereas other init
systems allow for addressing instances of a service directly by name (e.g.
systemd).
The kwargs allow for the additional parameters to be passed to underlying
init systems for those systems which require/allow for them. For example,
the ceph-osd upstart script requires the id parameter to be passed along
in order to identify which running daemon should be reloaded. The follow-
ing example restarts the ceph-osd service for instance id=4:
service_reload('ceph-osd', id=4)
:param service_name: the name of the service to reload
:param restart_on_failure: boolean indicating whether to fallback to a
restart if the reload fails.
:param **kwargs: additional parameters to pass to the init system when
managing services. These will be passed as key=value
parameters to the init system's commandline. kwargs
are ignored for init systems not allowing additional
parameters via the commandline (systemd).
"""
service_result = service('reload', service_name, **kwargs)
if not service_result and restart_on_failure:
service_result = service('restart', service_name, **kwargs)
return service_result
def service_pause(service_name, init_dir="/etc/init", initd_dir="/etc/init.d",
**kwargs):
"""Pause a system service.
Stop it, and prevent it from starting again at boot.
:param service_name: the name of the service to pause
:param init_dir: path to the upstart init directory
:param initd_dir: path to the sysv init directory
:param **kwargs: additional parameters to pass to the init system when
managing services. These will be passed as key=value
parameters to the init system's commandline. kwargs
are ignored for init systems which do not support
key=value arguments via the commandline.
"""
stopped = True
if service_running(service_name, **kwargs):
stopped = service_stop(service_name, **kwargs)
upstart_file = os.path.join(init_dir, "{}.conf".format(service_name))
sysv_file = os.path.join(initd_dir, service_name)
if init_is_systemd():
service('disable', service_name)
elif os.path.exists(upstart_file):
override_path = os.path.join(
init_dir, '{}.override'.format(service_name))
with open(override_path, 'w') as fh:
fh.write("manual\n")
elif os.path.exists(sysv_file):
subprocess.check_call(["update-rc.d", service_name, "disable"])
else:
raise ValueError(
"Unable to detect {0} as SystemD, Upstart {1} or"
" SysV {2}".format(
service_name, upstart_file, sysv_file))
return stopped
def service_resume(service_name, init_dir="/etc/init",
initd_dir="/etc/init.d", **kwargs):
"""Resume a system service.
Reenable starting again at boot. Start the service.
:param service_name: the name of the service to resume
:param init_dir: the path to the init dir
:param initd dir: the path to the initd dir
:param **kwargs: additional parameters to pass to the init system when
managing services. These will be passed as key=value
parameters to the init system's commandline. kwargs
are ignored for systemd enabled systems.
"""
upstart_file = os.path.join(init_dir, "{}.conf".format(service_name))
sysv_file = os.path.join(initd_dir, service_name)
if init_is_systemd():
service('enable', service_name)
elif os.path.exists(upstart_file):
override_path = os.path.join(
init_dir, '{}.override'.format(service_name))
if os.path.exists(override_path):
os.unlink(override_path)
elif os.path.exists(sysv_file):
subprocess.check_call(["update-rc.d", service_name, "enable"])
else:
raise ValueError(
"Unable to detect {0} as SystemD, Upstart {1} or"
" SysV {2}".format(
service_name, upstart_file, sysv_file))
started = service_running(service_name, **kwargs)
if not started:
started = service_start(service_name, **kwargs)
return started
def service(action, service_name, **kwargs):
"""Control a system service.
:param action: the action to take on the service
:param service_name: the name of the service to perform th action on
:param **kwargs: additional params to be passed to the service command in
the form of key=value.
"""
if init_is_systemd():
cmd = ['systemctl', action, service_name]
else:
cmd = ['service', service_name, action]
for key, value in six.iteritems(kwargs):
parameter = '%s=%s' % (key, value)
cmd.append(parameter)
return subprocess.call(cmd) == 0
_UPSTART_CONF = "/etc/init/{}.conf"
_INIT_D_CONF = "/etc/init.d/{}"
def service_running(service_name, **kwargs):
"""Determine whether a system service is running.
:param service_name: the name of the service
:param **kwargs: additional args to pass to the service command. This is
used to pass additional key=value arguments to the
service command line for managing specific instance
units (e.g. service ceph-osd status id=2). The kwargs
are ignored in systemd services.
"""
if init_is_systemd():
return service('is-active', service_name)
else:
if os.path.exists(_UPSTART_CONF.format(service_name)):
try:
cmd = ['status', service_name]
for key, value in six.iteritems(kwargs):
parameter = '%s=%s' % (key, value)
cmd.append(parameter)
output = subprocess.check_output(cmd,
stderr=subprocess.STDOUT).decode('UTF-8')
except subprocess.CalledProcessError:
return False
else:
# This works for upstart scripts where the 'service' command
# returns a consistent string to represent running
# 'start/running'
if ("start/running" in output or
"is running" in output or
"up and running" in output):
return True
elif os.path.exists(_INIT_D_CONF.format(service_name)):
# Check System V scripts init script return codes
return service('status', service_name)
return False
SYSTEMD_SYSTEM = '/run/systemd/system'
def init_is_systemd():
"""Return True if the host system uses systemd, False otherwise."""
if lsb_release()['DISTRIB_CODENAME'] == 'trusty':
return False
return os.path.isdir(SYSTEMD_SYSTEM)
def adduser(username, password=None, shell='/bin/bash',
system_user=False, primary_group=None,
secondary_groups=None, uid=None, home_dir=None):
"""Add a user to the system.
Will log but otherwise succeed if the user already exists.
:param str username: Username to create
:param str password: Password for user; if ``None``, create a system user
:param str shell: The default shell for the user
:param bool system_user: Whether to create a login or system user
:param str primary_group: Primary group for user; defaults to username
:param list secondary_groups: Optional list of additional groups
:param int uid: UID for user being created
:param str home_dir: Home directory for user
:returns: The password database entry struct, as returned by `pwd.getpwnam`
"""
try:
user_info = pwd.getpwnam(username)
log('user {0} already exists!'.format(username))
if uid:
user_info = pwd.getpwuid(int(uid))
log('user with uid {0} already exists!'.format(uid))
except KeyError:
log('creating user {0}'.format(username))
cmd = ['useradd']
if uid:
cmd.extend(['--uid', str(uid)])
if home_dir:
cmd.extend(['--home', str(home_dir)])
if system_user or password is None:
cmd.append('--system')
else:
cmd.extend([
'--create-home',
'--shell', shell,
'--password', password,
])
if not primary_group:
try:
grp.getgrnam(username)
primary_group = username # avoid "group exists" error
except KeyError:
pass
if primary_group:
cmd.extend(['-g', primary_group])
if secondary_groups:
cmd.extend(['-G', ','.join(secondary_groups)])
cmd.append(username)
subprocess.check_call(cmd)
user_info = pwd.getpwnam(username)
return user_info
def user_exists(username):
"""Check if a user exists"""
try:
pwd.getpwnam(username)
user_exists = True
except KeyError:
user_exists = False
return user_exists
def uid_exists(uid):
"""Check if a uid exists"""
try:
pwd.getpwuid(uid)
uid_exists = True
except KeyError:
uid_exists = False
return uid_exists
def group_exists(groupname):
"""Check if a group exists"""
try:
grp.getgrnam(groupname)
group_exists = True
except KeyError:
group_exists = False
return group_exists
def gid_exists(gid):
"""Check if a gid exists"""
try:
grp.getgrgid(gid)
gid_exists = True
except KeyError:
gid_exists = False
return gid_exists
def add_group(group_name, system_group=False, gid=None):
"""Add a group to the system
Will log but otherwise succeed if the group already exists.
:param str group_name: group to create
:param bool system_group: Create system group
:param int gid: GID for user being created
:returns: The password database entry struct, as returned by `grp.getgrnam`
"""
try:
group_info = grp.getgrnam(group_name)
log('group {0} already exists!'.format(group_name))
if gid:
group_info = grp.getgrgid(gid)
log('group with gid {0} already exists!'.format(gid))
except KeyError:
log('creating group {0}'.format(group_name))
add_new_group(group_name, system_group, gid)
group_info = grp.getgrnam(group_name)
return group_info
def add_user_to_group(username, group):
"""Add a user to a group"""
cmd = ['gpasswd', '-a', username, group]
log("Adding user {} to group {}".format(username, group))
subprocess.check_call(cmd)
def rsync(from_path, to_path, flags='-r', options=None, timeout=None):
"""Replicate the contents of a path"""
options = options or ['--delete', '--executability']
cmd = ['/usr/bin/rsync', flags]
if timeout:
cmd = ['timeout', str(timeout)] + cmd
cmd.extend(options)
cmd.append(from_path)
cmd.append(to_path)
log(" ".join(cmd))
return subprocess.check_output(cmd, stderr=subprocess.STDOUT).decode('UTF-8').strip()
def symlink(source, destination):
"""Create a symbolic link"""
log("Symlinking {} as {}".format(source, destination))
cmd = [
'ln',
'-sf',
source,
destination,
]
subprocess.check_call(cmd)
def mkdir(path, owner='root', group='root', perms=0o555, force=False):
"""Create a directory"""
log("Making dir {} {}:{} {:o}".format(path, owner, group,
perms))
uid = pwd.getpwnam(owner).pw_uid
gid = grp.getgrnam(group).gr_gid
realpath = os.path.abspath(path)
path_exists = os.path.exists(realpath)
if path_exists and force:
if not os.path.isdir(realpath):
log("Removing non-directory file {} prior to mkdir()".format(path))
os.unlink(realpath)
os.makedirs(realpath, perms)
elif not path_exists:
os.makedirs(realpath, perms)
os.chown(realpath, uid, gid)
os.chmod(realpath, perms)
def write_file(path, content, owner='root', group='root', perms=0o444):
"""Create or overwrite a file with the contents of a byte string."""
log("Writing file {} {}:{} {:o}".format(path, owner, group, perms))
uid = pwd.getpwnam(owner).pw_uid
gid = grp.getgrnam(group).gr_gid
with open(path, 'wb') as target:
os.fchown(target.fileno(), uid, gid)
os.fchmod(target.fileno(), perms)
target.write(content)
def fstab_remove(mp):
"""Remove the given mountpoint entry from /etc/fstab"""
return Fstab.remove_by_mountpoint(mp)
def fstab_add(dev, mp, fs, options=None):
"""Adds the given device entry to the /etc/fstab file"""
return Fstab.add(dev, mp, fs, options=options)
def mount(device, mountpoint, options=None, persist=False, filesystem="ext3"):
"""Mount a filesystem at a particular mountpoint"""
cmd_args = ['mount']
if options is not None:
cmd_args.extend(['-o', options])
cmd_args.extend([device, mountpoint])
try:
subprocess.check_output(cmd_args)
except subprocess.CalledProcessError as e:
log('Error mounting {} at {}\n{}'.format(device, mountpoint, e.output))
return False
if persist:
return fstab_add(device, mountpoint, filesystem, options=options)
return True
def umount(mountpoint, persist=False):
"""Unmount a filesystem"""
cmd_args = ['umount', mountpoint]
try:
subprocess.check_output(cmd_args)
except subprocess.CalledProcessError as e:
log('Error unmounting {}\n{}'.format(mountpoint, e.output))
return False
if persist:
return fstab_remove(mountpoint)
return True
def mounts():
"""Get a list of all mounted volumes as [[mountpoint,device],[...]]"""
with open('/proc/mounts') as f:
# [['/mount/point','/dev/path'],[...]]
system_mounts = [m[1::-1] for m in [l.strip().split()
for l in f.readlines()]]
return system_mounts
def fstab_mount(mountpoint):
"""Mount filesystem using fstab"""
cmd_args = ['mount', mountpoint]
try:
subprocess.check_output(cmd_args)
except subprocess.CalledProcessError as e:
log('Error unmounting {}\n{}'.format(mountpoint, e.output))
return False
return True
def file_hash(path, hash_type='md5'):
"""Generate a hash checksum of the contents of 'path' or None if not found.
:param str hash_type: Any hash alrgorithm supported by :mod:`hashlib`,
such as md5, sha1, sha256, sha512, etc.
"""
if os.path.exists(path):
h = getattr(hashlib, hash_type)()
with open(path, 'rb') as source:
h.update(source.read())
return h.hexdigest()
else:
return None
def path_hash(path):
"""Generate a hash checksum of all files matching 'path'. Standard
wildcards like '*' and '?' are supported, see documentation for the 'glob'
module for more information.
:return: dict: A { filename: hash } dictionary for all matched files.
Empty if none found.
"""
return {
filename: file_hash(filename)
for filename in glob.iglob(path)
}
def check_hash(path, checksum, hash_type='md5'):
"""Validate a file using a cryptographic checksum.
:param str checksum: Value of the checksum used to validate the file.
:param str hash_type: Hash algorithm used to generate `checksum`.
Can be any hash alrgorithm supported by :mod:`hashlib`,
such as md5, sha1, sha256, sha512, etc.
:raises ChecksumError: If the file fails the checksum
"""
actual_checksum = file_hash(path, hash_type)
if checksum != actual_checksum:
raise ChecksumError("'%s' != '%s'" % (checksum, actual_checksum))
class ChecksumError(ValueError):
"""A class derived from Value error to indicate the checksum failed."""
pass
def restart_on_change(restart_map, stopstart=False, restart_functions=None):
"""Restart services based on configuration files changing
This function is used a decorator, for example::
@restart_on_change({
'/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ]
'/etc/apache/sites-enabled/*': [ 'apache2' ]
})
def config_changed():
pass # your code here
In this example, the cinder-api and cinder-volume services
would be restarted if /etc/ceph/ceph.conf is changed by the
ceph_client_changed function. The apache2 service would be
restarted if any file matching the pattern got changed, created
or removed. Standard wildcards are supported, see documentation
for the 'glob' module for more information.
@param restart_map: {path_file_name: [service_name, ...]
@param stopstart: DEFAULT false; whether to stop, start OR restart
@param restart_functions: nonstandard functions to use to restart services
{svc: func, ...}
@returns result from decorated function
"""
def wrap(f):
@functools.wraps(f)
def wrapped_f(*args, **kwargs):
return restart_on_change_helper(
(lambda: f(*args, **kwargs)), restart_map, stopstart,
restart_functions)
return wrapped_f
return wrap
def restart_on_change_helper(lambda_f, restart_map, stopstart=False,
restart_functions=None):
"""Helper function to perform the restart_on_change function.
This is provided for decorators to restart services if files described
in the restart_map have changed after an invocation of lambda_f().
@param lambda_f: function to call.
@param restart_map: {file: [service, ...]}
@param stopstart: whether to stop, start or restart a service
@param restart_functions: nonstandard functions to use to restart services
{svc: func, ...}
@returns result of lambda_f()
"""
if restart_functions is None:
restart_functions = {}
checksums = {path: path_hash(path) for path in restart_map}
r = lambda_f()
# create a list of lists of the services to restart
restarts = [restart_map[path]
for path in restart_map
if path_hash(path) != checksums[path]]
# create a flat list of ordered services without duplicates from lists
services_list = list(OrderedDict.fromkeys(itertools.chain(*restarts)))
if services_list:
actions = ('stop', 'start') if stopstart else ('restart',)
for service_name in services_list:
if service_name in restart_functions:
restart_functions[service_name](service_name)
else:
for action in actions:
service(action, service_name)
return r
def pwgen(length=None):
"""Generate a random pasword."""
if length is None:
# A random length is ok to use a weak PRNG
length = random.choice(range(35, 45))
alphanumeric_chars = [
l for l in (string.ascii_letters + string.digits)
if l not in 'l0QD1vAEIOUaeiou']
# Use a crypto-friendly PRNG (e.g. /dev/urandom) for making the
# actual password
random_generator = random.SystemRandom()
random_chars = [
random_generator.choice(alphanumeric_chars) for _ in range(length)]
return(''.join(random_chars))
def is_phy_iface(interface):
"""Returns True if interface is not virtual, otherwise False."""
if interface:
sys_net = '/sys/class/net'
if os.path.isdir(sys_net):
for iface in glob.glob(os.path.join(sys_net, '*')):
if '/virtual/' in os.path.realpath(iface):
continue
if interface == os.path.basename(iface):
return True
return False
def get_bond_master(interface):
"""Returns bond master if interface is bond slave otherwise None.
NOTE: the provided interface is expected to be physical
"""
if interface:
iface_path = '/sys/class/net/%s' % (interface)
if os.path.exists(iface_path):
if '/virtual/' in os.path.realpath(iface_path):
return None
master = os.path.join(iface_path, 'master')
if os.path.exists(master):
master = os.path.realpath(master)
# make sure it is a bond master
if os.path.exists(os.path.join(master, 'bonding')):
return os.path.basename(master)
return None
def list_nics(nic_type=None):
"""Return a list of nics of given type(s)"""
if isinstance(nic_type, six.string_types):
int_types = [nic_type]
else:
int_types = nic_type
interfaces = []
if nic_type:
for int_type in int_types:
cmd = ['ip', 'addr', 'show', 'label', int_type + '*']
ip_output = subprocess.check_output(cmd).decode('UTF-8')
ip_output = ip_output.split('\n')
ip_output = (line for line in ip_output if line)
for line in ip_output:
if line.split()[1].startswith(int_type):
matched = re.search('.*: (' + int_type +
r'[0-9]+\.[0-9]+)@.*', line)
if matched:
iface = matched.groups()[0]
else:
iface = line.split()[1].replace(":", "")
if iface not in interfaces:
interfaces.append(iface)
else:
cmd = ['ip', 'a']
ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n')
ip_output = (line.strip() for line in ip_output if line)
key = re.compile('^[0-9]+:\s+(.+):')
for line in ip_output:
matched = re.search(key, line)
if matched:
iface = matched.group(1)
iface = iface.partition("@")[0]
if iface not in interfaces:
interfaces.append(iface)
return interfaces
def set_nic_mtu(nic, mtu):
"""Set the Maximum Transmission Unit (MTU) on a network interface."""
cmd = ['ip', 'link', 'set', nic, 'mtu', mtu]
subprocess.check_call(cmd)
def get_nic_mtu(nic):
"""Return the Maximum Transmission Unit (MTU) for a network interface."""
cmd = ['ip', 'addr', 'show', nic]
ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n')
mtu = ""
for line in ip_output:
words = line.split()
if 'mtu' in words:
mtu = words[words.index("mtu") + 1]
return mtu
def get_nic_hwaddr(nic):
"""Return the Media Access Control (MAC) for a network interface."""
cmd = ['ip', '-o', '-0', 'addr', 'show', nic]
ip_output = subprocess.check_output(cmd).decode('UTF-8')
hwaddr = ""
words = ip_output.split()
if 'link/ether' in words:
hwaddr = words[words.index('link/ether') + 1]
return hwaddr
@contextmanager
def chdir(directory):
"""Change the current working directory to a different directory for a code
block and return the previous directory after the block exits. Useful to
run commands from a specificed directory.
:param str directory: The directory path to change to for this context.
"""
cur = os.getcwd()
try:
yield os.chdir(directory)
finally:
os.chdir(cur)
def chownr(path, owner, group, follow_links=True, chowntopdir=False):
"""Recursively change user and group ownership of files and directories
in given path. Doesn't chown path itself by default, only its children.
:param str path: The string path to start changing ownership.
:param str owner: The owner string to use when looking up the uid.
:param str group: The group string to use when looking up the gid.
:param bool follow_links: Also follow and chown links if True
:param bool chowntopdir: Also chown path itself if True
"""
uid = pwd.getpwnam(owner).pw_uid
gid = grp.getgrnam(group).gr_gid
if follow_links:
chown = os.chown
else:
chown = os.lchown
if chowntopdir:
broken_symlink = os.path.lexists(path) and not os.path.exists(path)
if not broken_symlink:
chown(path, uid, gid)
for root, dirs, files in os.walk(path, followlinks=follow_links):
for name in dirs + files:
full = os.path.join(root, name)
broken_symlink = os.path.lexists(full) and not os.path.exists(full)
if not broken_symlink:
chown(full, uid, gid)
def lchownr(path, owner, group):
"""Recursively change user and group ownership of files and directories
in a given path, not following symbolic links. See the documentation for
'os.lchown' for more information.
:param str path: The string path to start changing ownership.
:param str owner: The owner string to use when looking up the uid.
:param str group: The group string to use when looking up the gid.
"""
chownr(path, owner, group, follow_links=False)
def owner(path):
"""Returns a tuple containing the username & groupname owning the path.
:param str path: the string path to retrieve the ownership
:return tuple(str, str): A (username, groupname) tuple containing the
name of the user and group owning the path.
:raises OSError: if the specified path does not exist
"""
stat = os.stat(path)
username = pwd.getpwuid(stat.st_uid)[0]
groupname = grp.getgrgid(stat.st_gid)[0]
return username, groupname
def get_total_ram():
"""The total amount of system RAM in bytes.
This is what is reported by the OS, and may be overcommitted when
there are multiple containers hosted on the same machine.
"""
with open('/proc/meminfo', 'r') as f:
for line in f.readlines():
if line:
key, value, unit = line.split()
if key == 'MemTotal:':
assert unit == 'kB', 'Unknown unit'
return int(value) * 1024 # Classic, not KiB.
raise NotImplementedError()
UPSTART_CONTAINER_TYPE = '/run/container_type'
def is_container():
"""Determine whether unit is running in a container
@return: boolean indicating if unit is in a container
"""
if init_is_systemd():
# Detect using systemd-detect-virt
return subprocess.call(['systemd-detect-virt',
'--container']) == 0
else:
# Detect using upstart container file marker
return os.path.exists(UPSTART_CONTAINER_TYPE)
def add_to_updatedb_prunepath(path, updatedb_path=UPDATEDB_PATH):
with open(updatedb_path, 'r+') as f_id:
updatedb_text = f_id.read()
output = updatedb(updatedb_text, path)
f_id.seek(0)
f_id.write(output)
f_id.truncate()
def updatedb(updatedb_text, new_path):
lines = [line for line in updatedb_text.split("\n")]
for i, line in enumerate(lines):
if line.startswith("PRUNEPATHS="):
paths_line = line.split("=")[1].replace('"', '')
paths = paths_line.split(" ")
if new_path not in paths:
paths.append(new_path)
lines[i] = 'PRUNEPATHS="{}"'.format(' '.join(paths))
output = "\n".join(lines)
return output
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import csv
import random
import glob
import os
import sys
import numpy
import pylab
import theano
import theano.tensor as T
from theano.tensor.shared_randomstreams import RandomStreams
from svmutil import *
N_MONTH = 4
N_DAY_PER_MONTH = 31
BASE_MONTH = 4
TYPE_LENGTH = 4
#Don't use a python long as this don't work on 32 bits computers.
numpy.random.seed(0xbeef)
rng = RandomStreams(seed=numpy.random.randint(1 << 30))
theano.config.warn.subtensor_merge_bug = False
class User(object):
def __init__(self, id, info):
self.id = id;
self.bands = info.keys()
self.data = numpy.zeros((len(info), N_MONTH * 3 * TYPE_LENGTH * 8), dtype=int)
self.label = []
for idx, brandID in enumerate(self.bands):
band = info[brandID]
row = [0 for n in range(48)]
label = 0
for month, day, action in band:
p = (month - BASE_MONTH) * 12
if day > 10:
p += 4
elif day > 20:
p += 8
row[p + action] = min(255, row[p + action] + 1)
if month == BASE_MONTH + N_MONTH - 1 and action == 1:
label = 1
self.label.append(label)
self.data[idx, :] = numpy.mat([list(format(num, '08b')) for num in row]).flatten()
self.data = self.data.astype(numpy.float32)
def __str__(self):
return str(self.id) + ' ' + str(len(self.bands))
def build_rbm(v, W, bv, bh, k):
'''Construct a k-step Gibbs chain starting at v for an RBM.
v : Theano vector or matrix
If a matrix, multiple chains will be run in parallel (batch).
W : Theano matrix
Weight matrix of the RBM.
bv : Theano vector
Visible bias vector of the RBM.
bh : Theano vector
Hidden bias vector of the RBM.
k : scalar or Theano scalar
Length of the Gibbs chain.
Return a (v_sample, cost, monitor, updates) tuple:
v_sample : Theano vector or matrix with the same shape as `v`
Corresponds to the generated sample(s).
cost : Theano scalar
Expression whose gradient with respect to W, bv, bh is the CD-k approximation
to the log-likelihood of `v` (training example) under the RBM.
The cost is averaged in the batch case.
monitor: Theano scalar
Pseudo log-likelihood (also averaged in the batch case).
updates: dictionary of Theano variable -> Theano variable
The `updates` object returned by scan.'''
def gibbs_step(v):
mean_h = T.nnet.sigmoid(T.dot(v, W) + bh)
h = rng.binomial(size=mean_h.shape, n=1, p=mean_h,
dtype=theano.config.floatX)
mean_v = T.nnet.sigmoid(T.dot(h, W.T) + bv)
v = rng.binomial(size=mean_v.shape, n=1, p=mean_v,
dtype=theano.config.floatX)
return mean_v, v
chain, updates = theano.scan(lambda v: gibbs_step(v)[1], outputs_info=[v],
n_steps=k)
v_sample = chain[-1]
mean_v = gibbs_step(v_sample)[0]
monitor = T.xlogx.xlogy0(v, mean_v) + T.xlogx.xlogy0(1 - v, 1 - mean_v)
monitor = monitor.sum() / v.shape[0]
def free_energy(v):
return -(v * bv).sum() - T.log(1 + T.exp(T.dot(v, W) + bh)).sum()
cost = (free_energy(v) - free_energy(v_sample)) / v.shape[0]
return v_sample, cost, monitor, updates
def shared_normal(num_rows, num_cols, scale=1):
'''Initialize a matrix shared variable with normally distributed
elements.'''
return theano.shared(numpy.random.normal(
scale=scale, size=(num_rows, num_cols)).astype(theano.config.floatX))
def shared_zeros(*shape):
'''Initialize a vector shared variable with zero elements.'''
return theano.shared(numpy.zeros(shape, dtype=theano.config.floatX))
def build_rnnrbm(n_visible, n_hidden, n_hidden_recurrent):
'''Construct a symbolic RNN-RBM and initialize parameters.
n_visible : integer
Number of visible units.
n_hidden : integer
Number of hidden units of the conditional RBMs.
n_hidden_recurrent : integer
Number of hidden units of the RNN.
Return a (v, v_sample, cost, monitor, params, updates_train, v_t,
updates_generate) tuple:
v : Theano matrix
Symbolic variable holding an input sequence (used during training)
v_sample : Theano matrix
Symbolic variable holding the negative particles for CD log-likelihood
gradient estimation (used during training)
cost : Theano scalar
Expression whose gradient (considering v_sample constant) corresponds to the
LL gradient of the RNN-RBM (used during training)
monitor : Theano scalar
Frame-level pseudo-likelihood (useful for monitoring during training)
params : tuple of Theano shared variables
The parameters of the model to be optimized during training.
updates_train : dictionary of Theano variable -> Theano variable
Update object that should be passed to theano.function when compiling the
training function.
v_t : Theano matrix
Symbolic variable holding a generated sequence (used during sampling)
updates_generate : dictionary of Theano variable -> Theano variable
Update object that should be passed to theano.function when compiling the
generation function.'''
W = shared_normal(n_visible, n_hidden, 0.01)
bv = shared_zeros(n_visible)
bh = shared_zeros(n_hidden)
Wuh = shared_normal(n_hidden_recurrent, n_hidden, 0.0001)
Wuv = shared_normal(n_hidden_recurrent, n_visible, 0.0001)
Wvu = shared_normal(n_visible, n_hidden_recurrent, 0.0001)
Wuu = shared_normal(n_hidden_recurrent, n_hidden_recurrent, 0.0001)
bu = shared_zeros(n_hidden_recurrent)
params = W, bv, bh, Wuh, Wuv, Wvu, Wuu, bu # learned parameters as shared
# variables
v = T.matrix() # a training sequence
u0 = T.zeros((n_hidden_recurrent,)) # initial value for the RNN hidden
# units
# If `v_t` is given, deterministic recurrence to compute the variable
# biases bv_t, bh_t at each time step. If `v_t` is None, same recurrence
# but with a separate Gibbs chain at each time step to sample (generate)
# from the RNN-RBM. The resulting sample v_t is returned in order to be
# passed down to the sequence history.
def recurrence(v_t, u_tm1, generate=False):
bv_t = bv + T.dot(u_tm1, Wuv)
bh_t = bh + T.dot(u_tm1, Wuh)
if generate:
v_t, _, _, updates = build_rbm(v_t, W, bv_t,
bh_t, k=25)
u_t = T.tanh(bu + T.dot(v_t, Wvu) + T.dot(u_tm1, Wuu))
return ([v_t, u_t], updates) if generate else [u_t, bv_t, bh_t]
# For training, the deterministic recurrence is used to compute all the
# {bv_t, bh_t, 1 <= t <= T} given v. Conditional RBMs can then be trained
# in batches using those parameters.
(u_t, bv_t, bh_t), updates_train = theano.scan(
lambda v_t, u_tm1, *_: recurrence(v_t, u_tm1),
sequences=v, outputs_info=[u0, None, None], non_sequences=params)
v_sample, cost, monitor, updates_rbm = build_rbm(v, W, bv_t[:], bh_t[:],
k=15)
updates_train.update(updates_rbm)
# symbolic loop for sequence generation
(v_t, u_t), updates_generate = theano.scan(
lambda v_t, u_tm1, *_: recurrence(v_t, u_tm1, True),
sequences=v, outputs_info=[None, u0], non_sequences=params, n_steps=1)
return (v, v_sample, cost, monitor, params, updates_train, v_t,
updates_generate)
class RnnRbm:
'''Simple class to train an RNN-RBM from MIDI files and to generate sample
sequences.'''
def __init__(self, n_visible=96, n_hidden=150, n_hidden_recurrent=100, lr=0.001, dt=0.3):
'''Constructs and compiles Theano functions for training and sequence
generation.
n_hidden : integer
Number of hidden units of the conditional RBMs.
n_hidden_recurrent : integer
Number of hidden units of the RNN.
lr : float
Learning rate
r : (integer, integer) tuple
Specifies the pitch range of the piano-roll in MIDI note numbers, including
r[0] but not r[1], such that r[1]-r[0] is the number of visible units of the
RBM at a given time step. The default (21, 109) corresponds to the full range
of piano (88 notes).
dt : float
Sampling period when converting the MIDI files into piano-rolls, or
equivalently the time difference between consecutive time steps.'''
self.dt = dt
(v, v_sample, cost, monitor, params, updates_train, v_t,
updates_generate) = build_rnnrbm(n_visible, n_hidden,
n_hidden_recurrent)
gradient = T.grad(cost, params, consider_constant=[v_sample])
updates_train.update(((p, p - lr * g) for p, g in zip(params,
gradient)))
self.train_function = theano.function([v], monitor,
updates=updates_train)
self.generate_function = theano.function([v], v_t,
updates=updates_generate)
def train(self, userTrain, userVal, batch_size=100, num_epochs=200):
'''Train the RNN-RBM via stochastic gradient descent (SGD) using MIDI
files converted to piano-rolls.
files : list of strings
List of MIDI files that will be loaded as piano-rolls for training.
batch_size : integer
Training sequences will be split into subsequences of at most this size
before applying the SGD updates.
num_epochs : integer
Number of epochs (pass over the training set) performed. The user can
safely interrupt training with Ctrl+C at any time.'''
dataTrain = numpy.vstack([user.data for user in userTrain])
labelTrain = []
for user in userTrain:
labelTrain.extend(user.label)
try:
for epoch in xrange(num_epochs):
combine = zip(dataTrain, labelTrain)
numpy.random.shuffle(combine)
costs = []
for s, sequence in enumerate(dataTrain):
cost = self.train_function(numpy.reshape(sequence, (4, 96)))
costs.append(cost)
print 'Epoch %i/%i' % (epoch + 1, num_epochs),
print numpy.mean(costs)
sys.stdout.flush()
generateData = []
for s, sequence in enumerate(dataTrain):
sample = self.generate_function(numpy.reshape(sequence, (4, 96))[0:3, :])
generateData.append(sample.flatten().tolist())
self.svm = svm_train(labelTrain, generateData, '-t 0 -q')
print 'Training: %.02f%% (Precision) %.02f%% (Recall) %.02f%% (F1)' % self.eval(userTrain)
print 'Validiate: %.02f%% (Precision) %.02f%% (Recall) %.02f%% (F1)' % self.eval(userVal)
print ''
sys.stdout.flush()
except KeyboardInterrupt:
print 'Interrupted by user.'
def eval(self, evalUser):
pBands = []
bBands = []
hitBands = []
for user in evalUser:
bBands.append(sum(user.label))
hit = 0
total = 0
for idx, label in enumerate(user.label):
data = self.generate_function(numpy.reshape(user.data[idx, :], (4, 96))[0:3, :])
predict, acc, prob = svm_predict([label], [data.flatten().tolist()], self.svm, '-q')
if predict == 1:
total += 1
if label == 1:
hit += 1
hitBands.append(hit)
pBands.append(total)
precision = float(sum(hitBands)) / sum(pBands) if not sum(pBands) == 0 else 0
recall = float(sum(hitBands)) / sum(bBands) if not sum(bBands) == 0 else 0
f1 = (2 * precision * recall) / (precision + recall) if not precision + recall == 0 else 0
return precision, recall, f1
def generate(self, filename, show=True):
'''Generate a sample sequence, plot the resulting piano-roll and save
it as a MIDI file.
filename : string
A MIDI file will be created at this location.
show : boolean
If True, a piano-roll of the generated sequence will be shown.'''
piano_roll = self.generate_function()
midiwrite(filename, piano_roll, self.r, self.dt)
if show:
extent = (0, self.dt * len(piano_roll)) + self.r
pylab.figure()
pylab.imshow(piano_roll.T, origin='lower', aspect='auto',
interpolation='nearest', cmap=pylab.cm.gray_r,
extent=extent)
pylab.xlabel('time (s)')
pylab.ylabel('MIDI note number')
pylab.title('generated piano-roll')
def test_rnnrbm(userTrain, userVal, batch_size=100, num_epochs=200):
model = RnnRbm()
model.train(userTrain, userVal,
batch_size=batch_size, num_epochs=num_epochs)
return model
if __name__ == '__main__':
userInfo = dict()
with open('/home/pumpkin/Documents/project/tmall/dataset/t_alibaba_data.csv', 'rb') as csvfile:
# with open('/home/pumpkin/Documents/project/tmall/dataset/demo.csv', 'rb') as csvfile:
reader = csv.reader(csvfile, delimiter=',')
for row in reader:
userID, brandID, actionType, month, day = [int(field) for field in row]
if not userID in userInfo:
userInfo[userID] = dict()
user = userInfo[userID]
if brandID not in user:
user[brandID] = []
if month in (4, 5, 6):
day = day - 14
else:
day = day - 15
if day <= 0:
month -= 1
day += 31
band = user[brandID]
band.append((month, day, actionType))
users = []
for (userID, info) in userInfo.iteritems():
users.append(User(userID, info))
nUsers = len(users)
nTrain = int(0.6 * nUsers)
nVal = int(0.2 * nUsers)
nTest = nUsers - nTrain - nVal
print 'Num of users: ', len(users)
print 'Num for train: ', nTrain
print 'Num for validiate: ', nVal
print 'Num for test: ', nTest
random.shuffle(users)
userTrain = users[0: nTrain]
userVal = users[nTrain: nTrain+nVal]
userTest = users[nTrain+nVal: nUsers]
model = test_rnnrbm(userTrain, userVal, batch_size=4)
# numpy.savetxt("foo.csv", users[0].data, delimiter=",", fmt='%d')
|
|
# Copyright 2013, Nachi Ueno, NTT I3, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import copy
import os
import re
import shutil
import jinja2
import netaddr
from oslo.config import cfg
from oslo import messaging
import six
import json
from FSComponentUtil import crypt
from neutron.agent.linux import ip_lib
from neutron.agent.linux import utils
from neutron.common import utils as common_utils
from neutron.common import rpc as n_rpc
from neutron import context
from neutron.openstack.common import lockutils
from neutron.openstack.common import log as logging
from neutron.openstack.common import loopingcall
from neutron.plugins.common import constants
from neutron.plugins.common import utils as plugin_utils
from neutron.services.vpn.common import topics
from neutron.services.vpn import device_drivers
from neutron.services.firewall import ngfw_plugin
from neutron.services.firewall.agents.ngfw import ngfw_api
from neutron.services.firewall.agents.ngfw import ngfw_utils
from neutron.services.vpn.device_drivers.template.ngfw import template as ngfw_template
import xmltodict
from netaddr import IPNetwork
LOG = logging.getLogger(__name__)
TEMPLATE_PATH = os.path.dirname(__file__)
ipsec_opts = [
cfg.IntOpt('ipsec_status_check_interval',
default=60,
help=_("Interval for checking ipsec status"))
]
cfg.CONF.register_opts(ipsec_opts, 'ipsec')
JINJA_ENV = None
STATUS_MAP = {
'negotiating': constants.DOWN,
'waiting': constants.PENDING_CREATE,
'succeed': constants.ACTIVE,
'failure': constants.DOWN,
'error': constants.ERROR,
'pending_delete': constants.PENDING_DELETE
}
IPSEC_CONNS = 'ipsec_site_connections'
FAIL_CODE = 400
SUCCESS_CODE = 200
VPN_ACL_ID_START = 3000
NOCONTENT = 204
VPN_ACL_ID_END = 4000
VPN_ACL_NUMBERS = set(range(VPN_ACL_ID_START, VPN_ACL_ID_END))
def _get_template(template_file):
global JINJA_ENV
if not JINJA_ENV:
templateLoader = jinja2.FileSystemLoader(searchpath="/")
JINJA_ENV = jinja2.Environment(loader=templateLoader)
return JINJA_ENV.get_template(template_file)
@six.add_metaclass(abc.ABCMeta)
class BaseSwanProcess():
"""Swan Family Process Manager
This class manages start/restart/stop ipsec process.
This class create/delete config template
"""
binary = "ipsec"
CONFIG_DIRS = [
'var/run',
'log',
'etc',
'etc/ipsec.d/aacerts',
'etc/ipsec.d/acerts',
'etc/ipsec.d/cacerts',
'etc/ipsec.d/certs',
'etc/ipsec.d/crls',
'etc/ipsec.d/ocspcerts',
'etc/ipsec.d/policies',
'etc/ipsec.d/private',
'etc/ipsec.d/reqs',
'etc/pki/nssdb/'
]
DIALECT_MAP = {
"3des": "3des",
"aes-128": "aes128",
"aes-256": "aes256",
"aes-192": "aes192",
"group2": "modp1024",
"group5": "modp1536",
"group14": "modp2048",
"group15": "modp3072",
"bi-directional": "start",
"response-only": "add",
"v2": "insist",
"v1": "never"
}
def __init__(self, conf, root_helper, process_id,
vpnservice, namespace):
self.conf = conf
self.id = process_id
self.root_helper = root_helper
self.updated_pending_status = False
self.namespace = namespace
self.connection_status = {}
self.update_vpnservice(vpnservice)
self.update_vpnservice_cache(vpnservice)
def update_vpnservice_cache(self, vpnservice):
self.vpnservice_cache = vpnservice
def update_vpnservice(self, vpnservice):
self.vpnservice = vpnservice
@abc.abstractmethod
def ensure_configs(self,conn_id):
pass
@abc.abstractmethod
def delete_ngfw_config(self):
pass
@abc.abstractmethod
def get_status(self,conn_id):
pass
@property
def status(self):
if self.active:
return constants.ACTIVE
return constants.DOWN
@property
def active(self):
"""Check if the process is active or not."""
LOG.debug("report active")
try:
flag = True
for i in range(len(self.vpnservice['ipsec_site_connections'])):
conn_id = self.vpnservice['ipsec_site_connections'][i]['id']
service_id = self.vpnservice['id']
status = self.get_status(conn_id)
if status != 'succeed':
flag = False
LOG.debug(_('_update_connection_status %s'),status)
self._update_connection_status(status,conn_id)
return flag
except RuntimeError:
return False
def update(self):
"""Update Status based on vpnservice configuration."""
if self.vpnservice and not self.vpnservice['admin_state_up']:
self.disable()
else:
self.enable()
if plugin_utils.in_pending_status(self.vpnservice['status']):
self.updated_pending_status = True
for ipsec_site_conn in self.vpnservice['ipsec_site_connections']:
if plugin_utils.in_pending_status(ipsec_site_conn['status']):
conn_id = ipsec_site_conn['id']
conn_status = self.connection_status.get(conn_id)
if not conn_status:
continue
conn_status['updated_pending_status'] = True
ipsec_site_conn['status'] = conn_status['status']
def enable(self):
"""Enabling the process."""
try:
if self.active:
self.restart()
else:
self.start()
except RuntimeError:
LOG.exception(
_("Failed to enable vpn process on router %s"),
self.id)
def disable(self):
"""Disabling the process."""
try:
self.delete_ngfw_config()
except RuntimeError:
LOG.exception(
_("Failed to disable vpn process on router %s"),
self.id)
@abc.abstractmethod
def restart(self):
"""Restart process."""
@abc.abstractmethod
def start(self):
"""Start process."""
@abc.abstractmethod
def stop(self):
"""Stop process."""
def _update_connection_status(self, status_output,connection_id):
#status{negotiating/ waiting/ succeed / failure}
status = status_output
if not self.connection_status.get(connection_id):
self.connection_status[connection_id] = {
'status': None,
'updated_pending_status': False
}
self.connection_status[
connection_id]['status'] = STATUS_MAP[status]
class NGFWProcess(BaseSwanProcess):
"""OpenSwan Process manager class.
This process class uses three commands
(1) ipsec pluto: IPsec IKE keying daemon
(2) ipsec addconn: Adds new ipsec addconn
(3) ipsec whack: control interface for IPSEC keying daemon
"""
def __init__(self, conf, root_helper, process_id,
vpnservice, namespace,kwargs):
super(NGFWProcess, self).__init__(
conf, root_helper, process_id,
vpnservice, namespace)
self.ike_peer = {}
self.ike_proposal = {}
self.ipsec_proposal = {}
self.ipsec_policy = {}
self.ipsec_acl = {}
self.ipsec_static_routes = []
self.ipsec_static_routes_with_vpn = []
self.rest = ngfw_api.ngfwRestAPI()
self.common_conf = kwargs
def ensure_ike_peer(self,vpnservice,vsys_num,ike_proposal_id,connection_id):
ipsec_site_connections = vpnservice['ipsec_site_connections'][connection_id]
ikepolicy = ipsec_site_connections['ikepolicy']
self.ike_peer = {
'ike_peer_name' : self.get_format_name(ipsec_site_connections['id'][0:15]),
'vsys_num' : vsys_num,
'pre_shared_key' : crypt.decrypt(ipsec_site_connections['psk']),
'ike_version' : ikepolicy['ike_version'],
'ike_proposal' : ike_proposal_id,
'peer_address' : ipsec_site_connections['peer_address'], # peer-address type
'phase1_mode' : ikepolicy['phase1_negotiation_mode'],
'vpn_instance' : vsys_num
}
def ensure_ike_proposal(self,vpnservice,connection_id):
ipsec_site_connections = vpnservice['ipsec_site_connections'][connection_id]
ikepolicy = ipsec_site_connections['ikepolicy']
# TODO id ?
self.ike_proposal = {
'id' : self.get_ike_proposal_id(), #
'auth_algorithm' : ikepolicy['auth_algorithm'],
'integrity_algorithm' : '',
'encryption_algorithm' : ikepolicy['encryption_algorithm'],
'auth_mode' : 'pre-share',
'dh' : ikepolicy['pfs'],
'lifetime' : ikepolicy['lifetime_value']
}
def ensure_acl(self,vpnservice,connection_id,acl_id,index):
if not acl_id:
acl_id = self.alloc_acl_id_from_ngfw()
if not acl_id:
return False
ipsec_site_connections = vpnservice['ipsec_site_connections'][connection_id]
access_list_entries = []
rule_name = 0
for i in range(len(ipsec_site_connections['peer_cidrs'])):
for subnet in vpnservice['local_subnets']:
rule_name += 1
access_list_entry = [{
'rule_name': rule_name,
'destination_ipv4_network': ipsec_site_connections['peer_cidrs'][i],
'source_ipv4_network': subnet['cidr'],
'protocol': 0
}]
access_list_entries = access_list_entries + access_list_entry
self.ipsec_acl = {
'access_control_list_name' : acl_id,
'access_list_entries' : access_list_entries,
'vsys':"vpn" + str(index)
}
return True
def ensure_ipsec_policy(self,vpnservice,ike_peer_name,ipsec_proposal_name,acl_id ,interface_name,connection_id):
ipsec_site_connections = vpnservice['ipsec_site_connections'][connection_id]
ipsec_alias = ipsec_site_connections['id']
ipsecpolicy = ipsec_site_connections['ipsecpolicy']
self.ipsec_policy = {
'alias' : self.get_format_name(ipsec_alias),
'name' : self.get_format_name(self.vpnservice['id'][0:15]),
'sequence' : self.vpnservice["sequences"][self.id],
'scenario' : 'p2p',
'acl' : acl_id,
'ike_peer_name' : ike_peer_name,
'ipsec_proposal_name' : ipsec_proposal_name,
'pfs' : self.get_ngfw_pfs(ipsecpolicy['pfs']),
'interface_name' : interface_name,
'local_address' : ipsec_site_connections['description']
}
def ensure_ipsec_proposal(self,vpnservice,connection_id):
ipsec_site_connections = vpnservice['ipsec_site_connections'][connection_id]
ipsecpolicy = ipsec_site_connections['ipsecpolicy']
#TODO ipsec_proposal_name
if ipsecpolicy['transform_protocol'] == 'esp':
self.ipsec_proposal = {
'ipsec_proposal_name' : self.get_format_name(ipsec_site_connections['id'][0:15]),
'transform_protocol' : ipsecpolicy['transform_protocol'],
'esp_auth_algorithm' : ' ' + ipsecpolicy['auth_algorithm'],
'esp_encryption_algorithm' : ' ' + ipsecpolicy['encryption_algorithm'],
'ah_auth_algorithm' : '',
'encapsulation_mode' : ipsecpolicy['encapsulation_mode']
}
else:
self.ipsec_proposal = {
'ipsec_proposal_name' : self.get_format_name(ipsec_site_connections['id'][0:15]),
'transform_protocol' : ipsecpolicy['transform_protocol'],
'esp_auth_algorithm' : '',
'esp_encryption_algorithm' : '',
'ah_auth_algorithm' : ' ' + ipsecpolicy['auth_algorithm'],
'encapsulation_mode' : ipsecpolicy['encapsulation_mode']
}
def ensure_ipsec_static_route(self,vpnservice,connection_id,index):
self.ipsec_static_routes = []
if len(vpnservice['ipsec_site_connections']) > 1:
return
outgoing_interface = self.common_conf.get('vpn_ngfw_private_interface') + '.' + str(index)
for subnet in vpnservice['local_subnets']:
ipsec_static_route = [{
'description':'',
'ip_address':str(IPNetwork(subnet['cidr']).ip),
'mask_length':str(IPNetwork(subnet['cidr']).prefixlen),
'next_hop_address':vpnservice['virtual_ip'],
'outgoing_interface':outgoing_interface,
'priority': self.common_conf.get('static_route_priority'),
'name': 'vpn' + str(index)
}]
self.ipsec_static_routes = ipsec_static_route + self.ipsec_static_routes
def ensure_ipsec_static_route_with_vpn(self,vpnservice,connection_id,index):
self.ipsec_static_routes_with_vpn = []
outgoing_interface = self.common_conf.get('vpn_ngfw_public_interface') + '.' + str(index)
peer_cidrs = vpnservice['ipsec_site_connections'][connection_id]['peer_cidrs']
for subnet in peer_cidrs:
ipsec_static_route = [{
'description':'',
'ip_address':str(IPNetwork(subnet).ip),
'mask_length':str(IPNetwork(subnet).prefixlen),
'next_hop_address':self.common_conf.get('vpn_nexthop'),
'outgoing_interface':outgoing_interface,
'priority': self.common_conf.get('static_route_priority'),
'name': 'vpn' + str(index)
}]
self.ipsec_static_routes_with_vpn = ipsec_static_route + self.ipsec_static_routes_with_vpn
def get_ngfw_ipsec_proposal_name(self):
return self.ipsec_proposal.get('ipsec_proposal_name')
def get_ngfw_pfs(self,pfs):
return "dh-" + pfs
def get_ngfw_ipsec_policy_name(self):
return self.ipsec_policy.get('name')
def _parse_xml_to_dict(self,data, parent_attr, son_attr):
ret = []
if not data:
return ret
try:
tmp = "<response>" + data + "</response>"
parse = xmltodict.parse(tmp)
parent = []
if type(parse['response'][parent_attr]) is list:
parent = parse['response'][parent_attr]
else:
parent.append(parse['response'][parent_attr])
if type(parent[0][son_attr]) is list:
ret = parent[0][son_attr]
else:
ret.append(parent[0][son_attr])
for i in range(1, len(parent)):
if type(parent[i][son_attr]) is list:
ret.extend(parent[i][son_attr])
else:
ret.append(parent[i][son_attr])
except:
LOG.debug("xml parse error")
ret = []
return ret
def alloc_acl_id_from_ngfw(self):
"""
allocate a free acl number for a vpn site connection
The VPN_ACL_NUMBERS is set(range(3000, 3999)
:return: None, that means there happened a error, or the available acl number have used up.
return the a available acl number for the vpn site connection
"""
response = self.rest.rest_api("GET", ngfw_utils.NGFW_URL_VPN_IPSEC_ACL)
if response['status'] == 204:
LOG.debug("Request acls failed! please check!")
return VPN_ACL_ID_START
if response['status'] != SUCCESS_CODE:
return None
body = response['body']
acls = self._parse_xml_to_dict(body,'access-lists','access-list')
used_acl_number = set()
for acl in acls:
used_acl_number.add(int(acl['access-control-list-name']))
ret = VPN_ACL_NUMBERS - used_acl_number
if not ret:
return None
return ret.pop()
def get_acl_id(self):
return self.ipsec_acl.get('access_control_list_name')
def get_ike_peer_name(self):
return self.ike_peer.get('ike_peer_name')
def get_ike_proposal_id(self):
return int(self.ipsec_acl.get('access_control_list_name')) - 2999
def get_acl_id(self):
return self.ipsec_acl.get('access_control_list_name')
def get_interface_name(self):
return self.common_conf.get('vpn_ngfw_public_interface')
def ensure_configs(self,conn_index,acl_id = None):
"""Generate config files which are needed for OpenSwan.
If there is no directory, this function will create
dirs.
"""
vpn_ip = self.vpnservice['ipsec_site_connections'][conn_index]['description']
index = ngfw_utils.get_index_from_value("ip_pool",vpn_ip, self.common_conf.get('vpn_ip_pool'))
if not self.ensure_acl(self.vpnservice,conn_index,acl_id,index):
return False
self.ensure_ipsec_proposal(self.vpnservice,conn_index)
self.ensure_ike_proposal(self.vpnservice,conn_index)
self.ensure_ike_peer(self.vpnservice,"vpn" + str(index) ,self.get_ike_proposal_id(),conn_index)
self.ensure_ipsec_static_route(self.vpnservice,conn_index,index)
self.ensure_ipsec_static_route_with_vpn(self.vpnservice,conn_index,index)
interface_name = self.get_interface_name() + "." + str(index)
self.ensure_ipsec_policy(self.vpnservice, self.get_ike_peer_name(),
self.get_ngfw_ipsec_proposal_name(),
self.get_acl_id(), interface_name ,conn_index)
return True
def get_format_name(self,name):
return name.replace('-','_')
def parse(self,xml):
return xmltodict.parse(xml)
def unparse(self,dict):
return xmltodict.unparse(dict)
def get_delete_acl_body(self,ngfw_ipsec_policy, index=0):
return """
<access-lists>
<access-list>
<access-control-list-name>""" + ngfw_ipsec_policy[
'acl'][index] +"""</access-control-list-name>
</access-list>
</access-lists>
"""
def filter_xml_summary(self,xml,filter):
if xml:
index = xml.find(filter)
if index == -1:
return None
bodyinfo = xml[index:]
return bodyinfo
return None
def get_delete_ipsec_policy_body(self,ipsec_xml_body):
return self.filter_xml_summary(ipsec_xml_body,'<ipsec-policy>')
def _gen_delete_ipsec_policy_body(self, ipsec_policy_dict, index):
if not ipsec_policy_dict:
return None
ipsec_policy = ipsec_policy_dict['ipsec-policy']['ipsec-policy']
cols = ['alias', 'name', 'sequence', 'acl', 'ike-peer-name',
'ipsec-proposal-name', 'status', 'scenario']
for col in cols:
attr_list = ipsec_policy.get(col)
if not isinstance(attr_list, list):
ipsec_policy[col] = [ipsec_policy.get(col)]
attr_list = [attr_list]
if not attr_list[index]:
ipsec_policy[col][index] = ''
if not isinstance((ipsec_policy['local-information']), list):
ipsec_policy['local-information'] = [ipsec_policy['local-information']]
if not ipsec_policy['local-information'][index]['interface-name']:
ipsec_policy['local-information'][index]['interface-name'] = ''
ipsec_policy_body = "<ipsec-policy><ipsec-policy>" \
"<alias>%s</alias><name>%s</name>" \
"<sequence>%s</sequence><acl>%s</acl>" \
"<ike-peer-name>%s</ike-peer-name>" \
"<ipsec-proposal-name>%s</ipsec-proposal-name>" \
"<status>%s</status><scenario>%s</scenario>" \
"<local-information>" \
"<interface-name>%s</interface-name>" \
"</local-information>" \
"</ipsec-policy></ipsec-policy>" % (
ipsec_policy['alias'][index],
ipsec_policy['name'][index],
ipsec_policy['sequence'][index],
ipsec_policy['acl'][index],
ipsec_policy['ike-peer-name'][index],
ipsec_policy['ipsec-proposal-name'][index],
ipsec_policy['status'][index],
ipsec_policy['scenario'][index],
ipsec_policy['local-information'][index]['interface-name'])
return ipsec_policy_body
def delete_ngfw_config(self):
try:
if len(self.vpnservice_cache['ipsec_site_connections']) > len(self.vpnservice['ipsec_site_connections']):
site_conn_deleted = set(self.vpnservice['ipsec_site_connections']) - set(self.vpnservice_cache['ipsec_site_connections'])
if not self.vpnservice:
return
for conn_index in range(len(self.vpnservice['ipsec_site_connections'])):
ipsec_site_connections = self.vpnservice['ipsec_site_connections'][conn_index]
conn_id = self.get_format_name(ipsec_site_connections['id'])
LOG.debug(_('start to delete ipsec site connection :(%s)'),
conn_id)
if conn_id != self.get_format_name(self.id):
continue
# get ipsec policy
response = self.rest.rest_api('GET', ngfw_utils.NGFW_URL_VPN_IPSEC_POLICY_GET + \
'&name=' + conn_id)
LOG.debug(_('delete vpn_ipsec_policy body:(%s)'), response['body'])
# index for specific connection in the ngfw response body
index = self._get_connection_index_in_ngfw(response, conn_id)
if index is not None:
ngfw_ipsec_policy = self.parse(self.filter_xml_summary(response['body'],'<ipsec-policy>'))
#delete ipsec policy
delete_ipsec_policy_xml = \
self._gen_delete_ipsec_policy_body(ngfw_ipsec_policy,
index)
if not delete_ipsec_policy_xml:
LOG.error(_('get delete_ipsec_policy_xml fail!'))
continue
response = self.rest.rest_api('DELETE', ngfw_utils.NGFW_URL_VPN_IPSEC_POLICY,delete_ipsec_policy_xml)
if response['status'] == SUCCESS_CODE:
LOG.debug(_('delete ipsec policy success!'))
else:
LOG.error(_('delete ipsec policy fail!'))
continue
# delete ipsec acl
delete_acl_body = self.get_delete_acl_body(
ngfw_ipsec_policy['ipsec-policy']['ipsec-policy'], index)
response = self.rest.rest_api('DELETE', ngfw_utils.NGFW_URL_VPN_IPSEC_ACL,delete_acl_body)
if response['status'] == SUCCESS_CODE:
LOG.debug(_('delete ipsec acl success!'))
else:
LOG.error(_('delete ike acl fail!'))
continue
#delete ipsec proposal
ipsec_proposal_names = ngfw_ipsec_policy['ipsec-policy']['ipsec-policy']['ipsec-proposal-name']
if not isinstance(ipsec_proposal_names, list):
ipsec_proposal_names = [ipsec_proposal_names]
response = self.rest.rest_api('GET', ngfw_utils.NGFW_URL_VPN_IPSEC_PROPOSAL_GET +
'&name=' +ipsec_proposal_names[index])
if self.is_exist(response['body'],'<ipsec-proposal>'):
delete_ipsec_proposal_body = self.filter_xml_summary(response['body'],'<ipsec-proposal>')
response = self.rest.rest_api('DELETE', ngfw_utils.NGFW_URL_VPN_IPSEC_PROPOSAL,
delete_ipsec_proposal_body)
if response['status'] == SUCCESS_CODE:
LOG.debug(_('delete ipsec proposal success!'))
else:
LOG.error(_('delete ipsec proposal fail!'))
continue
#delete ike peer
response = self.rest.rest_api('GET', ngfw_utils.NGFW_URL_VPN_IKE_PEER_GET + \
'&name=' + self.get_format_name(ipsec_site_connections['id'][0:15]))
if self.is_exist(response['body'],'<ike-peer>'):
delete_ike_proposal_body = self.filter_xml_summary(response['body'],'<ike-peer>')
response = self.rest.rest_api('DELETE', ngfw_utils.NGFW_URL_VPN_IKE_PEER,delete_ike_proposal_body)
if response['status'] == SUCCESS_CODE:
LOG.debug(_('delete ike peer success!'))
else:
LOG.error(_('delete ike peer fail!'))
continue
#delete ike proposal
delete_ike_peer_body = self.parse(delete_ike_proposal_body)
response = self.rest.rest_api('GET', ngfw_utils.NGFW_URL_VPN_IKE_PROPOSAL_GET +
'&name=' +delete_ike_peer_body['ike-peer']['ike-peer']['ike-proposal'])
if self.is_exist(response['body'],'<ike-proposal>'):
delete_ike_proposal_body = self.filter_xml_summary(response['body'],'<ike-proposal>')
response = self.rest.rest_api('DELETE', ngfw_utils.NGFW_URL_VPN_IKE_PROPOSAL,delete_ike_proposal_body)
if response['status'] == SUCCESS_CODE:
LOG.debug(_('delete ike proposal success!'))
else:
LOG.error(_('delete ike proposal fail!'))
continue
# static route
for i in range(len(self.ipsec_static_routes)):
static_route = self.ipsec_static_routes[i]
self._clear_static_route(static_route)
for i in range(len(self.ipsec_static_routes_with_vpn)):
static_route = self.ipsec_static_routes_with_vpn[i]
self._clear_static_route(static_route)
except Exception as e:
LOG.error(_("delete ngfw config has exception %s"),e)
def _clear_static_route(self, static_route):
LOG.debug(_('enter _clear_static_route.'))
ret = self.check_static_route("delete", static_route)
if not ret:
LOG.error(_('static_route is invalid.'))
return False
bodyinfo = ngfw_utils.get_static_route(static_route)
LOG.debug(_('_clear_static_route xml body is: %s' % bodyinfo))
response = self.rest.rest_api('DELETE', ngfw_utils.NGFW_URL_STATIC_ROUTE, bodyinfo)
if response['status'] >= 200 and response['status'] < 300:
return True
return False
def _make_static_route(self, static_route):
'''
{"ip_address":"172.28.0.0",
"mask_length":"24",
"next_hop_address":"172.28.0.1",
"outgoing_interface":"eth3",
"priority":"63",
"description":"aaaaaaaaaaa"
}
'''
LOG.debug(_('enter _make_static_route.'))
ret = self.check_static_route("add", static_route)
if not ret:
LOG.error(_('static_route is invalid.'))
return False
bodyinfo = ngfw_utils.get_static_route(static_route)
LOG.debug(_('_make_static_route xml body is: %s' % bodyinfo))
response = self.rest.rest_api('POST', ngfw_utils.NGFW_URL_STATIC_ROUTE, bodyinfo)
if response['status'] >= 400:
LOG.error(_('_make_static_route failed.'))
self._clear_static_route(static_route)
return False
LOG.debug(_('_make_static_route success.'))
return True
def check_static_route(self, action, static_route):
LOG.debug(_('static_route is: %s.' % static_route))
if ( not static_route.has_key("ip_address") ) or ( not static_route["ip_address"]):
LOG.error(_('static_route ip_address is invalid.'))
return False
if ( not static_route.has_key("mask_length") ) or ( not static_route["mask_length"]):
LOG.error(_('static_route mask_length is invalid.'))
return False
if "add" == action:
#outgoing_interface and next_hop_address must has one
if(( not static_route.has_key("outgoing_interface") ) or ( not static_route["outgoing_interface"]) ) and \
( ( not static_route.has_key("next_hop_address") ) or ( not static_route["next_hop_address"]) ):
LOG.error(_('static_route outgoing_interface and next_hop_address is invalid.'))
return False
return True
def get_status(self,conn_id):
"""
call get_status of ngfw
"""
#status {negotiating/ waiting/ succeed / failure}
response = self.rest.rest_api('GET', ngfw_utils.NGFW_URL_VPN_IPSEC_POLICY_GET + \
'&name=' + self.get_format_name(conn_id))
try:
for connection in self.vpnservice['ipsec_site_connections']:
if connection['id'] == conn_id:
if connection['status'] == constants.PENDING_DELETE:
return 'pending_delete'
LOG.debug(_("get_status response is :%s "),response)
if response['status'] == SUCCESS_CODE and response['body']:
ipsec_policy = self.parse(self.filter_xml_summary(response['body'],'<ipsec-policy>'))
index = self._get_connection_index_in_ngfw(response, conn_id)
if not isinstance(ipsec_policy['ipsec-policy']
['ipsec-policy']['status'], list):
ipsec_policy['ipsec-policy']['ipsec-policy']['status'] =\
[ipsec_policy['ipsec-policy']['ipsec-policy']['status']]
status = ipsec_policy['ipsec-policy']['ipsec-policy']['status'][index]
if status:
return status
else:
return 'failure'
elif response['status'] == NOCONTENT and self.id == conn_id:
self.start()
return 'failure'
else:
return 'error'
except Exception as e:
LOG.debug(_("get_status exception is :%s "),e)
return 'failure'
def restart(self):
"""Restart the process."""
self.stop()
self.start()
def _virtual_privates(self):
"""Returns line of virtual_privates.
virtual_private contains the networks
that are allowed as subnet for the remote client.
"""
virtual_privates = []
nets = [self.vpnservice['subnet']['cidr']]
for ipsec_site_conn in self.vpnservice['ipsec_site_connections']:
nets += ipsec_site_conn['peer_cidrs']
for net in nets:
version = netaddr.IPNetwork(net).version
virtual_privates.append('%%v%s:%s' % (version, net))
return ','.join(virtual_privates)
def is_exist(self,body,key):
try:
if self.filter_xml_summary(body,key):
return True
return False
except:
return False
def _is_connection_exist(self, ngfw_ipsec_response, conn_id):
conn_tag = "<alias>" + conn_id + "</alias>"
if conn_tag in ngfw_ipsec_response:
return True
else:
return False
def _get_connection_index_in_ngfw(self, ngfw_ipsec_response, conn_id):
conn_id = self.get_format_name(conn_id)
xml_body = self.filter_xml_summary(ngfw_ipsec_response['body'], '<ipsec-policy>')
if not xml_body:
return None
ngfw_ipsec_policy = self.parse(xml_body)
connection_ids = ngfw_ipsec_policy['ipsec-policy']['ipsec-policy']['alias']
if not isinstance(connection_ids, list):
connection_ids = [connection_ids]
for index, connection_id in enumerate(connection_ids):
if conn_id == connection_id:
return index
else:
return None
def start(self):
"""Start the process.
Note: if there is not namespace yet,
just do nothing, and wait next event.
"""
try:
self.update_vpnservice_cache(self.vpnservice)
for conn_index in range(len(self.vpnservice['ipsec_site_connections'])):
connection = self.vpnservice['ipsec_site_connections'][conn_index]
conn_id = self.get_format_name(connection['id'])
if conn_id != self.get_format_name(self.id):
continue
response = self.vpnservice["ngfw_infos"][self.id]
if response:
ngfw_ipsec_policy = self.parse(self.filter_xml_summary(response['body'],'<ipsec-policy>'))
acl_id = ngfw_ipsec_policy['ipsec-policy']['ipsec-policy']['acl']
if not self.ensure_configs(conn_index,acl_id):
continue
else:
if not self.ensure_configs(conn_index):
continue
# ipsec acl
body = ngfw_utils.get_vpn_ipsec_acl_list(self.ipsec_acl)
LOG.debug(_('_config_vpn_ipsec_acl body:(%s)'), body)
response = self.rest.rest_api('GET', ngfw_utils.NGFW_URL_VPN_IPSEC_ACL + "?acl=" + str(self.ipsec_acl['access_control_list_name']))
if self.is_exist(response['body'],'<access-lists>'):
response = self.rest.rest_api('PUT', ngfw_utils.NGFW_URL_VPN_IPSEC_ACL, body)
if response['status'] == SUCCESS_CODE:
LOG.debug(_('update ipsec acl success!'))
elif response['status'] != SUCCESS_CODE:
LOG.debug(_('update ipsec acl fail!'))
continue
else:
response = self.rest.rest_api('POST', ngfw_utils.NGFW_URL_VPN_IPSEC_ACL, body)
if response['status'] == SUCCESS_CODE:
LOG.debug(_('create ipsec acl success!'))
else:
LOG.debug(_('create ipsec acl fail!'))
continue
# ike proposal
body = ngfw_utils.get_vpn_ike_proposal(self.ike_proposal)
LOG.debug(_('_config_vpn_ike_proposal body:(%s)'), body)
response = self.rest.rest_api('GET', ngfw_utils.NGFW_URL_VPN_IKE_PROPOSAL_GET + \
'&name=' + str(self.get_ike_proposal_id()))
if self.is_exist(response['body'],'<ike-proposal>'):
response = self.rest.rest_api('PUT', ngfw_utils.NGFW_URL_VPN_IKE_PROPOSAL, body)
if response['status'] == SUCCESS_CODE:
LOG.debug(_('update ike proposal success!'))
else:
LOG.debug(_('update ike proposal fail!'))
continue
else:
response = self.rest.rest_api('POST', ngfw_utils.NGFW_URL_VPN_IKE_PROPOSAL, body)
if response['status'] == SUCCESS_CODE:
LOG.debug(_('create ike proposal success!'))
else:
LOG.debug(_('create ike proposal fail!'))
continue
# ike peer
body = ngfw_utils.get_vpn_ike_peer(self.ike_peer)
response = self.rest.rest_api('GET', ngfw_utils.NGFW_URL_VPN_IKE_PEER_GET + \
'&name=' + self.get_ike_peer_name())
if self.is_exist(response['body'],'<ike-peer>'):
response = self.rest.rest_api('PUT', ngfw_utils.NGFW_URL_VPN_IKE_PEER, body)
if response['status'] == SUCCESS_CODE:
LOG.debug(_('update ike peer success!'))
else:
LOG.debug(_('update ike peer fail!'))
continue
else:
response = self.rest.rest_api('POST', ngfw_utils.NGFW_URL_VPN_IKE_PEER, body)
if response['status'] == SUCCESS_CODE:
LOG.debug(_('create ike peer success!'))
else:
LOG.debug(_('create ike peer fail!'))
continue
# ipsec proposal
body = ngfw_utils.get_vpn_ipsec_proposal(self.ipsec_proposal)
LOG.debug(_('_config_vpn_ipsec_proposal body:(%s)'), body)
response = self.rest.rest_api('GET', ngfw_utils.NGFW_URL_VPN_IPSEC_PROPOSAL_GET + \
'&name=' + self.get_ngfw_ipsec_proposal_name())
if self.is_exist(response['body'],'<ipsec-proposal>'):
response = self.rest.rest_api('PUT', ngfw_utils.NGFW_URL_VPN_IPSEC_PROPOSAL, body)
if response['status'] == SUCCESS_CODE:
LOG.debug(_('update ipsec proposal success!'))
else:
LOG.debug(_('update ipsec proposal fail!'))
continue
else:
response = self.rest.rest_api('POST', ngfw_utils.NGFW_URL_VPN_IPSEC_PROPOSAL, body)
if response['status'] == SUCCESS_CODE:
LOG.debug(_('create ipsec proposal success!'))
else:
LOG.debug(_('create ipsec proposal fail!'))
continue
# ipsec policy
body = ngfw_utils.get_vpn_ipsec_policy(self.ipsec_policy)
LOG.debug(_('_config_vpn_ipsec_policy body:(%s)'), body)
response = self.rest.rest_api('GET', ngfw_utils.NGFW_URL_VPN_IPSEC_POLICY_GET + \
'&name=' + conn_id)
if self._is_connection_exist(response['body'], conn_id):
response = self.rest.rest_api('PUT', ngfw_utils.NGFW_URL_VPN_IPSEC_POLICY, body)
if response['status'] == SUCCESS_CODE:
LOG.debug(_('update ipsec policy success!'))
else:
LOG.debug(_('update ipsec policy fail!'))
continue
else:
response = self.rest.rest_api('POST', ngfw_utils.NGFW_URL_VPN_IPSEC_POLICY, body)
if response['status'] == SUCCESS_CODE:
LOG.debug(_('create ipsec policy success!'))
else:
LOG.debug(_('create ipsec policy fail!'))
continue
# static route
for i in range(len(self.ipsec_static_routes)):
static_route = self.ipsec_static_routes[i]
self._make_static_route(static_route)
for i in range(len(self.ipsec_static_routes_with_vpn)):
static_route = self.ipsec_static_routes_with_vpn[i]
self._make_static_route(static_route)
except Exception as e:
LOG.error(_('start has exception %s'),e)
def disconnect(self):
if not self.vpnservice:
return
def stop(self):
#Stop process using whack
#Note this will also stop pluto
self.disconnect()
#clean connection_status info
self.connection_status = {}
class IPsecVpnDriverApi(n_rpc.RpcProxy):
"""IPSecVpnDriver RPC api."""
IPSEC_PLUGIN_VERSION = '1.0'
def get_vpn_services_on_host(self, context, host):
"""Get list of vpnservices.
The vpnservices including related ipsec_site_connection,
ikepolicy and ipsecpolicy on this host
"""
return self.call(context,
self.make_msg('get_vpn_services_on_host',
host=host),
version=self.IPSEC_PLUGIN_VERSION)
def update_status(self, context, status):
"""Update local status.
This method call updates status attribute of
VPNServices.
"""
return self.cast(context,
self.make_msg('update_status',
status=status),
version=self.IPSEC_PLUGIN_VERSION)
def delete_connections(self, context, conn_ids):
return self.call(context,
self.make_msg('delete_connections',
conn_ids=conn_ids),
version=self.IPSEC_PLUGIN_VERSION)
def update_router(self, context, id, router):
"""update router routes
This method call updates status attribute of
VPNServices
"""
return self.call(context,
self.make_msg('update_router',
id=id, router=router),
version=self.IPSEC_PLUGIN_VERSION)
def get_networks(self, context, filters=None, fields=None):
return self.call(context, self.make_msg("get_networks",
filters=filters, fields=fields),
version=self.IPSEC_PLUGIN_VERSION)
def get_router(self, context, id):
"""
get router information by router id
:param context:
:param id:
:return:
"""
return self.call(context, self.make_msg("get_router",
id=id),
version=self.IPSEC_PLUGIN_VERSION)
def get_agent_by_router_id(self, context, id):
"""
get router information by router id
:param context:
:param id:
:return:
"""
return self.call(context, self.make_msg("get_agent_by_router_id",
router_id=id),
version=self.IPSEC_PLUGIN_VERSION)
@six.add_metaclass(abc.ABCMeta)
class IPsecDriver(device_drivers.DeviceDriver):
"""VPN Device Driver for IPSec.
This class is designed for use with L3-agent now.
However this driver will be used with another agent in future.
so the use of "Router" is kept minimul now.
Instead of router_id, we are using process_id in this code.
"""
# history
# 1.0 Initial version
RPC_API_VERSION = '1.0'
# TODO(ihrachys): we can't use RpcCallback here due to inheritance
# issues
target = messaging.Target(version=RPC_API_VERSION)
def __init__(self, agent, host):
self.agent = agent
self.conf = self.agent.conf
self.root_helper = self.agent.root_helper
self.host = host
self.conn = n_rpc.create_connection(new=True)
self.context = context.get_admin_context_without_session()
self.topic = topics.IPSEC_AGENT_TOPIC
node_topic = '%s.%s' % (self.topic, self.host)
self.processes = {}
self.process_status_cache = {}
self.rest = ngfw_api.ngfwRestAPI()
self.ngfw_agent_utils = ngfw_plugin.NGFWAgentUtils()
self.plugutil = ngfw_plugin.NGFWPluginUtils()
self.endpoints = [self]
self.conn.create_consumer(node_topic, self.endpoints, fanout=False)
self.conn.consume_in_threads()
self.agent_rpc = IPsecVpnDriverApi(topics.IPSEC_DRIVER_TOPIC, '1.0')
self.process_status_cache_check = loopingcall.FixedIntervalLoopingCall(
self.report_status, self.context)
self.process_status_cache_check.start(
interval=self.conf.ipsec.ipsec_status_check_interval)
def vpnservice_updated(self, context, **kwargs):
"""Vpnservice updated rpc handler
VPN Service Driver will call this method
when vpnservices updated.
Then this method start sync with server.
"""
self.sync(context, [])
@abc.abstractmethod
def create_process(self, process_id, vpnservice, namespace, kwargs):
pass
def ensure_process(self, process_id, vpnservice=None):
"""Ensuring process.
If the process doesn't exist, it will create process
and store it in self.processs
"""
kwargs = {
'vpn_ip_pool':self.conf.ngfw.vpn_ip_pool,
'vpn_ngfw_private_interface':self.conf.ngfw.vpn_ngfw_private_interface,
'vpn_ngfw_public_interface':self.conf.ngfw.vpn_ngfw_public_interface,
'vsys_ranges':self.conf.ngfw.vsys_ranges,
'static_route_priority':self.conf.ngfw.static_route_priority,
'vpn_nexthop':self.conf.ngfw.vpn_nexthop
}
process = self.processes.get(process_id)
if not process:
process = self.create_process(
process_id,
vpnservice,
"",
kwargs)
self.processes[process_id] = process
elif vpnservice:
process.update_vpnservice(vpnservice)
return process
def create_router(self, process_id):
"""Handling create router event.
Agent calls this method, when the process namespace
is ready.
"""
if process_id in self.processes:
# In case of vpnservice is created
# before router's namespace
process = self.processes[process_id]
self._update_router(self.context, process.vpnservice, 'add')
process.enable()
def destroy_router(self, process_id):
"""Handling destroy_router event.
Agent calls this method, when the process namespace
is deleted.
"""
if process_id in self.processes:
process = self.processes[process_id]
process.disable()
if process:
self._update_router(self.context, process, "remove")
del self.processes[process_id]
def get_process_status_cache(self, process):
if not self.process_status_cache.get(process.id):
self.process_status_cache[process.id] = {
'status': None,
'id': process.vpnservice['id'],
'updated_pending_status': False,
'ipsec_site_connections': {}}
return self.process_status_cache[process.id]
def is_status_updated(self, process, previous_status):
if process.updated_pending_status:
return True
if process.status != previous_status['status']:
return True
if (process.connection_status !=
previous_status['ipsec_site_connections']):
return True
def unset_updated_pending_status(self, process):
process.updated_pending_status = False
for connection_status in process.connection_status.values():
connection_status['updated_pending_status'] = False
def copy_process_status(self, process):
return {
'id': process.vpnservice['id'],
'status': process.status,
'updated_pending_status': copy.deepcopy(process.updated_pending_status),
'ipsec_site_connections': copy.deepcopy(process.connection_status)
}
def update_downed_connections(self, process_id, new_status):
"""Update info to be reported, if connections just went down.
If there is no longer any information for a connection, because it
has been removed (e.g. due to an admin down of VPN service or IPSec
connection), but there was previous status information for the
connection, mark the connection as down for reporting purposes.
"""
if process_id in self.process_status_cache:
for conn in self.process_status_cache[process_id][IPSEC_CONNS]:
if conn not in new_status[IPSEC_CONNS]:
new_status[IPSEC_CONNS][conn] = {
'status': constants.DOWN,
'updated_pending_status': True
}
def report_status(self, context):
status_changed_vpn_services = []
for process in self.processes.values():
previous_status = self.get_process_status_cache(process)
if self.is_status_updated(process, previous_status):
new_status = self.copy_process_status(process)
self.update_downed_connections(process.id, new_status)
status_changed_vpn_services.append(new_status)
self.process_status_cache[process.id] = (
self.copy_process_status(process))
# We need unset updated_pending status after it
# is reported to the server side
self.unset_updated_pending_status(process)
LOG.debug(_("report status %s"),status_changed_vpn_services)
if status_changed_vpn_services:
self.agent_rpc.update_status(
context,
status_changed_vpn_services)
def delete_connection_in_db(self, context, conn_ids):
self.agent_rpc.delete_connections(
context,
conn_ids)
def _whether_my_vpnservice(self,vpnservice):
if vpnservice.has_key('ipsec_site_connections'):
result = self.plugutil._check_ip_in_ip_pool(vpnservice['ipsec_site_connections'][0]['description'],
self.conf.ngfw.vpn_ip_pool)
if result:
LOG.info(_("the vpnservice belong the agent."))
return True
LOG.info(_("the vpnservice is not belong the agent."))
return False
def get_index_of_public_ip(self, vpn_ip_pool, public_ip):
try:
p_ip = IPNetwork(public_ip)
for index in range(len(vpn_ip_pool)):
cidr = IPNetwork(vpn_ip_pool[index])
if p_ip in cidr:
return index
except:
LOG.error("Public ip is invalid or not in the range of vpn_ip_pool")
return None
return None
def get_tenant_router_info(self, context, vpnservice):
try:
description = json.loads(vpnservice['description'])
tenant_router_id = description['tenant_router_id']
tenant_router_info = self.agent_rpc.get_router(context, tenant_router_id)
return tenant_router_info
except Exception, e:
LOG.error("get tenant_router_info error: %s", e)
return None
def _update_router(self, context, process, action=None):
"""
Update the tennat router
update the routes for destination is the peer cidrs, and the next hop is the vrrp of ngfw
set the gateway of tenant router
:param vpnservice:
:return:
"""
try:
vpnservice = process.vpnservice
process_id = process.id
if not vpnservice:
return
nexthop = self.conf.ngfw.ngfw_vrrp_ip
if not nexthop:
LOG.error("ngfw vrrp do not set!")
return False
tenant_router_info = self.get_tenant_router_info(context, vpnservice)
if not tenant_router_info:
return False
tenant_ext_network_prefix = self.conf.ngfw.tenant_ext_net_prefix
if not tenant_ext_network_prefix:
LOG.error("the ext network prefix do not set!")
return False
vpn_ip_pool = self.conf.ngfw.vpn_ip_pool
if not vpn_ip_pool:
LOG.error("the vlan ip pool do no set")
return False
vlan_ranges = self.conf.ngfw.vlan_ranges
if not vlan_ranges:
LOG.error("The VLAN range do not set!")
return False
old_routes = tenant_router_info.get("routes", [])
old_external_gateway = tenant_router_info.get("external_gateway_info", {})
old_external_network_id = ""
if old_external_gateway:
old_external_network_id = old_external_gateway.get("network_id", None)
tenant_router_id = tenant_router_info.get('id', None)
if not tenant_router_id:
LOG.error("get tenant router id failed")
return False
new_routes = []
routes = old_routes
for ipsec_site_conn in vpnservice['ipsec_site_connections']:
if process_id == ipsec_site_conn['id']:
for peer_cidr in ipsec_site_conn['peer_cidrs']:
new_routes.append({
"nexthop": nexthop,
"destination": peer_cidr
})
local_interface_ip = ipsec_site_conn['description']
index = self.get_index_of_public_ip(vpn_ip_pool, local_interface_ip)
if index is None:
return False
vlan_split = str(vlan_ranges[index]).split(":")
vlans = range(int(vlan_split[0]), int(vlan_split[1]))
if local_interface_ip:
i = list(IPNetwork(vpn_ip_pool[index])).index(IPNetwork(local_interface_ip).ip)
tenant_ext_network_name = tenant_ext_network_prefix + str(vlans[i])
filters = {'name': [tenant_ext_network_name]}
fields = ['id']
network_ids = self.agent_rpc.get_networks(context, filters=filters, fields=fields)
if not network_ids:
LOG.error("can not filter the network name %s." % tenant_ext_network_name)
return False
external_network_id = network_ids[0].get('id', None)
if not external_network_id:
LOG.error("the external network did not create.")
return False
if not old_external_network_id or old_external_network_id != \
external_network_id:
router = {
"router": {
"external_gateway_info": {"network_id": external_network_id}
}
}
self.agent_rpc.update_router(context, tenant_router_id, router)
added, removed = common_utils.diff_list_of_dict(old_routes, new_routes)
if action == "add":
routes = old_routes + added
elif action == "remove":
routes = removed
router = {
"router": {
"routes": routes
}
}
self.agent_rpc.update_router(context, tenant_router_id, router)
except Exception, e:
LOG.debug("_update router exception:%s", e)
return False
return True
def append_virtual_ip(self, context, vpnservice):
agents = []
try:
tenant_router_id = vpnservice['tenant_router_info']['id']
agents = self.agent_rpc.get_agent_by_router_id(context, tenant_router_id)
for agent in agents['agents']:
if agent['configurations']['agent_mode'] == 'dvr_snat':
virtual_ip = agent['configurations']['virtual_ip']
if virtual_ip:
vpnservice['virtual_ip'] = netaddr.IPNetwork(virtual_ip).ip
except:
LOG.error(_("Get virtual ip from agents error %s"), agents)
return False
LOG.debug(_("Get virtual ip success, the ip addr is %s"), vpnservice['virtual_ip'])
return True
@lockutils.synchronized('vpn-agent', 'neutron-')
def sync(self, context, routers):
"""Sync status with server side.
:param context: context object for RPC call
:param routers: Router objects which is created in this sync event
There could be many failure cases should be
considered including the followings.
1) Agent class restarted
2) Failure on process creation
3) VpnService is deleted during agent down
4) RPC failure
In order to handle, these failure cases,
This driver takes simple sync strategies.
"""
vpnservices = self.agent_rpc.get_vpn_services_on_host(
context, self.host)
pending_delete_connections = []
# Ensure the ipsec process is enabled
for vpnservice in vpnservices:
connections = [connection for connection in vpnservice.get(
"ipsec_site_connections")]
vpnservice["ngfw_infos"] = {}
vpnservice["sequences"] = {}
sequences = []
for connection in connections:
conn_id = connection.get("id")
if connection.get("status") == constants.PENDING_DELETE:
pending_delete_connections.append(conn_id)
response = self.rest.rest_api('GET',
ngfw_utils.NGFW_URL_VPN_IPSEC_POLICY_GET + '&name=' + copy.deepcopy(conn_id).replace('-','_'))
vpnservice["ngfw_infos"][conn_id] = response
sequence = self._get_sequence_index(response)
if sequence:
sequences.append(int(sequence))
vpnservice["sequences"][conn_id] = int(sequence)
else:
vpnservice["sequences"][conn_id] = -1
vpnservice["ngfw_infos"][conn_id] = None
for connection in connections:
conn_id = connection.get("id")
if -1 == vpnservice["sequences"][conn_id]:
se = self._get_available_sequence(sequences)
vpnservice["sequences"][conn_id] = se
sequences.append(se)
process = self.ensure_process(conn_id,
vpnservice=vpnservice)
ret = self._update_router(context, process, action='add')
if not ret:
LOG.debug("update router for connection: %s failed!" %
connection.get("id"))
continue
ret = self.append_virtual_ip(context, process.vpnservice)
if not ret:
continue
if connection.get("status") == constants.PENDING_DELETE:
continue
process.update()
# Delete any IPSec processes running
# VPN that do not have an associated router.
deleted_connections = []
for process_id in pending_delete_connections:
process = self.processes.get(process_id)
if not process:
deleted_connections.append(process_id)
continue
latest_vpnservice = process.vpnservice
self.ensure_process(process_id, vpnservice=latest_vpnservice)
self.destroy_router(process_id)
if process_id not in self.processes:
# already delete connection successfully
deleted_connections.append(process_id)
# notify neutron server to delete connection in db
self.delete_connection_in_db(context, deleted_connections)
self.report_status(context)
def filter_xml_summary(self,xml,filter):
if xml:
index = xml.find(filter)
if index == -1:
return None
bodyinfo = xml[index:]
return bodyinfo
return None
def _get_sequence_index(self, response):
if not response:
return None
# Only one ipsec-policy precisely matched with conn_id in the response
# body, thus, we don't need to treat sequence as a list any more
filter_xml = self.filter_xml_summary(response['body'],
'<ipsec-policy>')
if not filter_xml:
return None
ngfw_ipsec_policy = xmltodict.parse(filter_xml)
try:
sequence = ngfw_ipsec_policy['ipsec-policy']['ipsec-policy'][
'sequence']
except:
LOG.error(_("Ipsec policy response from ngfw doesn't have "
"sequence info"))
return None
return sequence
def _get_available_sequence(self, sequences):
if not sequences:
return 1
for i in range(1, max(sequences)):
if i not in sequences:
return i
else:
return max(sequences) + 1
class NGFWDriver(IPsecDriver):
def create_process(self, process_id, vpnservice, namespace,kwargs):
return NGFWProcess(
self.conf,
self.root_helper,
process_id,
vpnservice,
namespace,
kwargs)
|
|
import kivy
from clueless.help import help
kivy.require('1.1.3')
from kivy.app import App
from kivy.clock import Clock
from kivy.properties import ObjectProperty
from kivy.uix.widget import Widget
from kivy.uix.screenmanager import ScreenManager, Screen, WipeTransition
from kivy.uix.button import Button
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.popup import Popup
from kivy.graphics import Ellipse, Color
from clueless.client import errors
from clueless.client import game_play
from clueless.model import game_state
from clueless import log
_LOG = log.get_logger(__name__)
class DisableButton(Button):
def __init__(self, **kwargs):
self.disabled = False
super(DisableButton, self).__init__(**kwargs)
def on_touch_down(self, touch):
if (not self.disabled):
super(DisableButton, self).on_touch_down(touch)
def on_touch_move(self, touch):
if (not self.disabled):
super(DisableButton, self).on_touch_move(touch)
def on_touch_up(self, touch):
if (not self.disabled):
super(DisableButton, self).on_touch_up(touch)
def on_press(self):
if (not self.disabled):
super(DisableButton, self).on_press()
def on_release(self):
if (not self.disabled):
super(DisableButton, self).on_release()
def trigger_action(self, duration=0.1):
if (not self.disabled):
super(DisableButton, self).trigger_action(duration)
class GameTile(DisableButton, Widget):
pass
class StartScreen(Screen):
username = ObjectProperty(None)
suspect = ObjectProperty(None)
def __init__(self, client, **kwargs):
super(StartScreen, self).__init__(**kwargs)
self.client = client
def register_player(self):
try:
self.client.register_player(self.username.text)
try:
self.client.choose_suspect(self.username.text, self.suspect.text)
self.manager.get_screen('game').start_game(self.username.text)
self.manager.current = self.manager.next()
except errors.GameClientException:
p = ErrorPopup(message="Suspect unavailable. Please select a different Suspect.")
p.open()
except errors.GameClientException:
p = ErrorPopup(message="Invalid Username. Please provide a valid Username.")
p.open()
class GameScreen(Screen):
gameboard = ObjectProperty(0)
controls = ObjectProperty(0)
user = ObjectProperty(0)
def __init__(self, client, **kwargs):
super(GameScreen, self).__init__(**kwargs)
self.client = client
self.state = None
Clock.schedule_interval(self.update, 1 / 30.)
def start_game(self, username):
self.username = username
try:
self.state = self.client.start_new_game()
for card in self.state.case_file:
print card.item
self.game_id = self.state.game_id
self.suspect = self.client.get_player(self.username).suspect
self.user.text = "You are " + self.username + ": " + self.suspect
except errors.GameClientException:
p = ErrorPopup(message="Unable to start a new game. Please try again.")
p.open()
def update(self, dt):
if self.state != None:
try:
self.state = self.client.get_game_state(self.game_id)
except errors.GameClientException:
print "ERROR: Could not get the game state."
self.state = None
if self.state != None:
if self.state.game_winner != None:
if self.state.game_winner.username == self.username:
message = "Accusation correct. Congratulations, you won!"
else:
message = "Player " + self.state.game_winner.username + " won the game!"
self.state=None
self.manager.current = self.manager.previous()
p = ErrorPopup(message=message)
p.open()
else:
self.gameboard.update(self.client,
self.state,
self.username)
self.controls.update(self.client,
self.state,
self.username)
else:
self.manager.current = self.manager.previous()
def display_help(self):
try:
help.open()
except Exception as ex:
_LOG.exception(ex.message)
def quit_game(self):
try:
self.client.destroy_game(self.game_id)
self.state=None
self.manager.current = self.manager.previous()
except errors.GameClientException:
p = ErrorPopup(message="Unable to end game. Please try again.")
p.open()
class Gameboard(FloatLayout):
study = ObjectProperty(None)
study_hall = ObjectProperty(None)
hall = ObjectProperty(None)
hall_lounge = ObjectProperty(None)
lounge = ObjectProperty(None)
study_library = ObjectProperty(None)
study_billiard = ObjectProperty(None)
hall_billiard = ObjectProperty(None)
lounge_billiard = ObjectProperty(None)
lounge_dining = ObjectProperty(None)
library = ObjectProperty(None)
library_billiard = ObjectProperty(None)
billiard_room = ObjectProperty(None)
billiard_dining = ObjectProperty(None)
dining_room = ObjectProperty(None)
library_conservatory = ObjectProperty(None)
conservatory_billiard = ObjectProperty(None)
billiard_ballroom = ObjectProperty(None)
kitchen_billiard = ObjectProperty(None)
dining_kitchen = ObjectProperty(None)
conservatory = ObjectProperty(None)
conservatory_ballroom = ObjectProperty(None)
ballroom = ObjectProperty(None)
ballroom_kitchen = ObjectProperty(None)
kitchen = ObjectProperty(None)
COLORS = {game_state.SCARLET: (1.,0,0),
game_state.PEACOCK: (0,0,1.),
game_state.PLUM: (1.,0,1.),
game_state.GREEN: (0,1.,0),
game_state.WHITE: (1.,1.,1.),
game_state.MUSTARD: (1.,1.,0)}
def __init__(self, **kwargs):
super(Gameboard, self).__init__(**kwargs)
self.state = None
def disable_tiles(self):
self.study.disabled=True; self.study.canvas.opacity=.5
self.study_hall.disabled=True; self.study_hall.canvas.opacity=.5
self.hall.disabled=True; self.hall.canvas.opacity=.5
self.hall_lounge.disabled=True; self.hall_lounge.canvas.opacity=.5
self.lounge.disabled=True; self.lounge.canvas.opacity=.5
self.study_library.disabled=True; self.study_library.canvas.opacity=.5
self.hall_billiard.disabled=True; self.hall_billiard.canvas.opacity=.5
self.lounge_dining.disabled=True; self.lounge_dining.canvas.opacity=.5
self.library.disabled=True; self.library.canvas.opacity=.5
self.library_billiard.disabled=True; self.library_billiard.canvas.opacity=.5
self.billiard_room.disabled=True; self.billiard_room.canvas.opacity=.5
self.billiard_dining.disabled=True; self.billiard_dining.canvas.opacity=.5
self.dining_room.disabled=True; self.dining_room.canvas.opacity=.5
self.library_conservatory.disabled=True; self.library_conservatory.canvas.opacity=.5
self.billiard_ballroom.disabled=True; self.billiard_ballroom.canvas.opacity=.5
self.dining_kitchen.disabled=True; self.dining_kitchen.canvas.opacity=.5
self.conservatory.disabled=True; self.conservatory.canvas.opacity=.5
self.conservatory_ballroom.disabled=True; self.conservatory_ballroom.canvas.opacity=.5
self.ballroom.disabled=True; self.ballroom.canvas.opacity=.5
self.ballroom_kitchen.disabled=True; self.ballroom_kitchen.canvas.opacity=.5
self.kitchen.disabled=True; self.kitchen.canvas.opacity=.5
def enable_tiles(self):
self.study.disabled=False; self.study.canvas.opacity=1
self.study_hall.disabled=False; self.study_hall.canvas.opacity=1
self.hall.disabled=False; self.hall.canvas.opacity=1
self.hall_lounge.disabled=False; self.hall_lounge.canvas.opacity=1
self.lounge.disabled=False; self.lounge.canvas.opacity=1
self.study_library.disabled=False; self.study_library.canvas.opacity=1
self.hall_billiard.disabled=False; self.hall_billiard.canvas.opacity=1
self.lounge_dining.disabled=False; self.lounge_dining.canvas.opacity=1
self.library.disabled=False; self.library.canvas.opacity=1
self.library_billiard.disabled=False; self.library_billiard.canvas.opacity=1
self.billiard_room.disabled=False; self.billiard_room.canvas.opacity=1
self.billiard_dining.disabled=False; self.billiard_dining.canvas.opacity=1
self.dining_room.disabled=False; self.dining_room.canvas.opacity=1
self.library_conservatory.disabled=False; self.library_conservatory.canvas.opacity=1
self.billiard_ballroom.disabled=False; self.billiard_ballroom.canvas.opacity=1
self.dining_kitchen.disabled=False; self.dining_kitchen.canvas.opacity=1
self.conservatory.disabled=False; self.conservatory.canvas.opacity=1
self.conservatory_ballroom.disabled=False; self.conservatory_ballroom.canvas.opacity=1
self.ballroom.disabled=False; self.ballroom.canvas.opacity=1
self.ballroom_kitchen.disabled=False; self.ballroom_kitchen.canvas.opacity=1
self.kitchen.disabled=False; self.kitchen.canvas.opacity=1
def update(self, client, state, username):
self.client = client
self.state = state
self.username = username
TILES = {game_state.STUDY: self.study,
game_state.STUDY_HALL: self.study_hall,
game_state.HALL: self.hall,
game_state.HALL_LOUNGE: self.hall_lounge,
game_state.LOUNGE: self.lounge,
game_state.STUDY_LIBRARY: self.study_library,
game_state.HALL_BILLIARD: self.hall_billiard,
game_state.LOUNGE_DINING: self.lounge_dining,
game_state.LIBRARY: self.library,
game_state.LIBRARY_BILLIARD: self.library_billiard,
game_state.BILLIARD_ROOM: self.billiard_room,
game_state.BILLIARD_DINING: self.billiard_dining,
game_state.DINING_ROOM: self.dining_room,
game_state.LIBRARY_CONSERVATORY: self.library_conservatory,
game_state.BILLIARD_BALLROOM: self.billiard_ballroom,
game_state.DINING_KITCHEN: self.dining_kitchen,
game_state.CONSERVATORY: self.conservatory,
game_state.CONSERVATORY_BALLROOM: self.conservatory_ballroom,
game_state.BALLROOM: self.ballroom,
game_state.BALLROOM_KITCHEN: self.ballroom_kitchen,
game_state.KITCHEN: self.kitchen,
game_state.SCARLET: self.hall_lounge,
game_state.PEACOCK: self.library_conservatory,
game_state.PLUM: self.study_library,
game_state.GREEN: self.conservatory_ballroom,
game_state.WHITE: self.ballroom_kitchen,
game_state.MUSTARD: self.lounge_dining}
self.hall_lounge.canvas.after.clear()
self.library_conservatory.canvas.after.clear()
self.study_library.canvas.after.clear()
self.conservatory_ballroom.canvas.after.clear()
self.ballroom_kitchen.canvas.after.clear()
self.lounge_dining.canvas.after.clear()
for name, room in self.state.game_board.iteritems():
tile = TILES[room.name]
if tile != self.hall_lounge and \
tile != self.library_conservatory and \
tile != self.study_library and \
tile != self.conservatory_ballroom and \
tile != self.ballroom_kitchen and \
tile != self.lounge_dining:
tile.canvas.after.clear()
if name in game_state.SUSPECTS:
if game_state.SCARLET in room.suspects:
y = self.hall_lounge.top-self.hall_lounge.height/4
x = self.hall_lounge.right-self.hall_lounge.width/2
with self.hall_lounge.canvas.after:
Color(*self.COLORS[game_state.SCARLET])
Ellipse(pos=(x-7.5, y-7.5), size=(15,15))
if game_state.PEACOCK in room.suspects:
y = self.library_conservatory.top-self.library_conservatory.height/2
x = self.library_conservatory.right-3*(self.library_conservatory.width/4)
with self.library_conservatory.canvas.after:
Color(*self.COLORS[game_state.PEACOCK])
Ellipse(pos=(x-7.5, y-7.5), size=(15,15))
if game_state.PLUM in room.suspects:
y = self.study_library.top-self.study_library.height/2
x = self.study_library.right-3*(self.study_library.width/4)
with self.study_library.canvas.after:
Color(*self.COLORS[game_state.PLUM])
Ellipse(pos=(x-7.5, y-7.5), size=(15,15))
if game_state.GREEN in room.suspects:
y = self.conservatory_ballroom.top-3*(self.conservatory_ballroom.height/4)
x = self.conservatory_ballroom.right-self.conservatory_ballroom.width/2
with self.conservatory_ballroom.canvas.after:
Color(*self.COLORS[game_state.GREEN])
Ellipse(pos=(x-7.5, y-7.5), size=(15,15))
if game_state.WHITE in room.suspects:
y = self.ballroom_kitchen.top-3*(self.ballroom_kitchen.height/4)
x = self.ballroom_kitchen.right-self.ballroom_kitchen.width/2
with self.ballroom_kitchen.canvas.after:
Color(*self.COLORS[game_state.WHITE])
Ellipse(pos=(x-7.5, y-7.5), size=(15,15))
if game_state.MUSTARD in room.suspects:
y = self.lounge_dining.top-self.lounge_dining.height/2
x = self.lounge_dining.right-self.lounge_dining.width/4
with self.lounge_dining.canvas.after:
Color(*self.COLORS[game_state.MUSTARD])
Ellipse(pos=(x-7.5, y-7.5), size=(15,15))
else:
num_suspects = 0
for suspect in room.suspects:
if ('hallway' in name):
y = tile.top-(tile.height/2)
x = tile.right-(tile.width/2)
else:
y = tile.top-(num_suspects/2+1)*(tile.height/4)
if num_suspects%2 == 0:
x = tile.right-2*(tile.width/3)
else:
x = tile.right-tile.width/3
with tile.canvas.after:
Color(*self.COLORS[suspect])
Ellipse(pos=(x-7.5, y-7.5), size=(15,15))
num_suspects += 1
# enable the game tiles if it's the user's turn, otherwise disable them
if self.username == self.state.current_player.username:
self.enable_tiles()
else:
self.disable_tiles()
def make_move(self, room):
try:
self.suspect = self.client.get_player(self.username).suspect
try:
self.client.move_player(self.username, self.suspect, room)
self.disable_tiles()
except errors.GameClientException:
p = ErrorPopup(message="Invalid move. Please select a valid Room or Hallway\nor end your turn.")
p.open()
except errors.GameClientException:
p = ErrorPopup(message="Unable to obtain Player information from the Game Server. Please try again.")
p.open()
class ControlPanel(FloatLayout):
notifications = ObjectProperty(None)
notepad = ObjectProperty(None)
suggest_button = ObjectProperty(None)
accuse_button = ObjectProperty(None)
end_turn_button = ObjectProperty(None)
def __init__(self, **kwargs):
super(ControlPanel, self).__init__(**kwargs)
self.state = None
self.player = None
self.disproving = False
def update(self, client, state, username):
self.client = client
self.state = state
self.username = username
try:
self.player = self.client.get_player(self.username)
except errors.GameClientException:
print "ERROR: Could not get Player."
# enable the game tiles if it's the user's turn, otherwise disable them
if username == self.state.current_player.username:
self.enable_buttons()
else:
self.disable_buttons()
notifications = ''
self.state.player_messages.reverse()
for message in self.state.player_messages:
notifications = notifications + message + '\n'
self.notifications.text = notifications
notes = ''
for card in self.player.game_cards:
notes += card['item'] + " : " + card['item_type'] + '\n'
for card in self.player.card_items_seen:
notes += card['item'] + " : " + card['item_type'] + '\n'
self.notepad.text = notes
if self.state.suggestion_response_player == None or \
(self.state.suggestion_response_player != None and \
self.state.suggestion_response_player.username != self.username):
self.disproving = False
if self.state.suggestion_response_player != None and \
self.state.suggestion_response_player.username == self.username and \
not self.disproving:
self.disproving = True
self.disprove_suggestion_popup()
def disable_buttons(self):
self.suggest_button.disabled=True; self.suggest_button.canvas.opacity=.5
self.accuse_button.disabled=True; self.accuse_button.canvas.opacity=.5
self.end_turn_button.disabled=True; self.end_turn_button.canvas.opacity=.5
def enable_buttons(self):
self.suggest_button.disabled=False; self.suggest_button.canvas.opacity=1
self.accuse_button.disabled=False; self.accuse_button.canvas.opacity=1
self.end_turn_button.disabled=False; self.end_turn_button.canvas.opacity=1
def disprove_suggestion_popup(self):
cards = []
try:
self.player = self.client.get_player(self.username)
except errors.GameClientException:
print "ERROR: Could not get Player."
for card in self.player.game_cards:
if card['item'] == self.state.current_suggestion.suspect or \
card['item'] == self.state.current_suggestion.weapon or \
card['item'] == self.state.current_suggestion.room:
cards += [card['item']]
p = SuggestionResponsePopup(client=self.client, state=self.state,
username=self.username, cards=cards)
p.open()
def suggest_popup(self):
p = SuggestionPopup(client=self.client, state=self.state)
p.open()
def accuse_popup(self):
p = AccusationPopup(client=self.client, state=self.state)
p.open()
def end_turn(self):
try:
self.client.end_turn(self.username)
except errors.GameClientException:
p = ErrorPopup(message="Unable to end your turn. Please move your Suspect, \nmake a Suggestion, or try to end your turn again.")
p.open()
class SuggestionResponsePopup(Popup):
card = ObjectProperty(None)
def __init__(self, client, state, username, cards, **kwargs):
super(SuggestionResponsePopup, self).__init__(**kwargs)
self.username = username
self.client = client
self.state = state
self.card.values = cards
def disprove_suggestion(self):
try:
self.client.make_suggestion_response(self.username, self.card.text)
self.dismiss()
except errors.GameClientException:
p = ErrorPopup(message="Suggestion Response invalid. \nPlease select a valid Card and try again.")
p.open()
class AccusationPopup(Popup):
suspect = ObjectProperty(None)
weapon = ObjectProperty(None)
room = ObjectProperty(None)
def __init__(self, client, state, **kwargs):
super(AccusationPopup, self).__init__(**kwargs)
self.client = client
self.state = state
def confirm_popup(self):
p = AccusationConfirmPopup(client=self.client, state=self.state,
suspect=self.suspect, weapon=self.weapon,
room=self.room)
p.open()
self.dismiss()
class AccusationConfirmPopup(Popup):
def __init__(self, client, state, suspect, weapon, room, **kwargs):
super(AccusationConfirmPopup, self).__init__(**kwargs)
self.client = client
self.state = state
self.suspect = suspect
self.weapon = weapon
self.room = room
def make_accusation(self):
try:
self.client.make_accusation(self.state.current_player.username,
self.suspect.text,
self.weapon.text,
self.room.text)
except errors.GameClientException:
p = ErrorPopup(message="Accusation invalid. Please select a valid Suspect, \nRoom, and Weapon, and try again.")
p.open()
self.dismiss()
class SuggestionPopup(Popup):
suspect = ObjectProperty(None)
weapon = ObjectProperty(None)
def __init__(self, client, state, **kwargs):
super(SuggestionPopup, self).__init__(**kwargs)
self.client = client
self.state = state
def make_suggestion(self):
username = self.state.current_player.username
for room in self.state.game_board.values():
if self.state.current_player.suspect in room.suspects:
break
try:
self.client.make_suggestion(username,
self.suspect.text,
self.weapon.text,
room.name)
except errors.GameClientException:
p = ErrorPopup(message="You cannot make a Suggestion right now, or the Suspect \nor Weapon you chose for the Suggestion is invalid.\nPlease try again.")
p.open()
self.dismiss()
class ErrorPopup(Popup):
message = ObjectProperty(None)
def __init__(self, message, **kwargs):
super(ErrorPopup, self).__init__(**kwargs)
self.message.text = message
class CluelessApp(App):
def build(self):
client = game_play.GameClient(host="127.0.0.1", port="5000")
root = ScreenManager()
root.transition = WipeTransition()
root.add_widget(StartScreen(client, name="start"))
root.add_widget(GameScreen(client, name="game"))
return root
if __name__ == '__main__':
CluelessApp().run()
|
|
# Copyright 2021 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Legacy ImageNet preprocessing for EfficientNetV1."""
from absl import logging
import tensorflow.compat.v1 as tf
import autoaugment
def distorted_bounding_box_crop(image_bytes,
bbox,
min_object_covered=0.1,
aspect_ratio_range=(0.75, 1.33),
area_range=(0.05, 1.0),
max_attempts=100,
scope=None):
"""Generates cropped_image using one of the bboxes randomly distorted.
See `tf.image.sample_distorted_bounding_box` for more documentation.
Args:
image_bytes: `Tensor` of binary image data.
bbox: `Tensor` of bounding boxes arranged `[1, num_boxes, coords]`
where each coordinate is [0, 1) and the coordinates are arranged
as `[ymin, xmin, ymax, xmax]`. If num_boxes is 0 then use the whole
image.
min_object_covered: An optional `float`. Defaults to `0.1`. The cropped
area of the image must contain at least this fraction of any bounding
box supplied.
aspect_ratio_range: An optional list of `float`s. The cropped area of the
image must have an aspect ratio = width / height within this range.
area_range: An optional list of `float`s. The cropped area of the image
must contain a fraction of the supplied image within in this range.
max_attempts: An optional `int`. Number of attempts at generating a cropped
region of the image of the specified constraints. After `max_attempts`
failures, return the entire image.
scope: Optional `str` for name scope.
Returns:
cropped image `Tensor`
"""
with tf.name_scope(scope, 'distorted_bounding_box_crop', [image_bytes, bbox]):
shape = tf.image.extract_jpeg_shape(image_bytes)
sample_distorted_bounding_box = tf.image.sample_distorted_bounding_box(
shape,
bounding_boxes=bbox,
min_object_covered=min_object_covered,
aspect_ratio_range=aspect_ratio_range,
area_range=area_range,
max_attempts=max_attempts,
use_image_if_no_bounding_boxes=True)
bbox_begin, bbox_size, _ = sample_distorted_bounding_box
# Crop the image to the specified bounding box.
offset_y, offset_x, _ = tf.unstack(bbox_begin)
target_height, target_width, _ = tf.unstack(bbox_size)
crop_window = tf.stack([offset_y, offset_x, target_height, target_width])
image = tf.image.decode_and_crop_jpeg(image_bytes, crop_window, channels=3)
return image
def _at_least_x_are_equal(a, b, x):
"""At least `x` of `a` and `b` `Tensors` are equal."""
match = tf.equal(a, b)
match = tf.cast(match, tf.int32)
return tf.greater_equal(tf.reduce_sum(match), x)
def _resize_image(image, image_size, method=None):
if method is not None:
tf.logging.info('Use customized resize method {}'.format(method))
return tf.image.resize([image], [image_size, image_size], method)[0]
tf.logging.info('Use default resize_bicubic.')
return tf.image.resize_bicubic([image], [image_size, image_size])[0]
def _decode_and_random_crop(image_bytes, image_size, resize_method=None):
"""Make a random crop of image_size."""
bbox = tf.constant([0.0, 0.0, 1.0, 1.0], dtype=tf.float32, shape=[1, 1, 4])
image = distorted_bounding_box_crop(
image_bytes,
bbox,
min_object_covered=0.1,
aspect_ratio_range=(3. / 4, 4. / 3.),
area_range=(0.08, 1.0),
max_attempts=10,
scope=None)
original_shape = tf.image.extract_jpeg_shape(image_bytes)
bad = _at_least_x_are_equal(original_shape, tf.shape(image), 3)
image = tf.cond(
bad,
lambda: _decode_and_center_crop(image_bytes, image_size),
lambda: _resize_image(image, image_size, resize_method))
return image
def _decode_and_center_crop(image_bytes, image_size, resize_method=None):
"""Crops to center of image with padding then scales image_size."""
shape = tf.image.extract_jpeg_shape(image_bytes)
image_height = shape[0]
image_width = shape[1]
padded_center_crop_size = tf.cast(
((image_size / (image_size + 32)) *
tf.cast(tf.minimum(image_height, image_width), tf.float32)),
tf.int32)
offset_height = ((image_height - padded_center_crop_size) + 1) // 2
offset_width = ((image_width - padded_center_crop_size) + 1) // 2
crop_window = tf.stack([offset_height, offset_width,
padded_center_crop_size, padded_center_crop_size])
image = tf.image.decode_and_crop_jpeg(image_bytes, crop_window, channels=3)
image = _resize_image(image, image_size, resize_method)
return image
def _flip(image):
"""Random horizontal image flip."""
image = tf.image.random_flip_left_right(image)
return image
def preprocess_for_train(image_bytes,
image_size,
augment_name=None,
randaug_num_layers=None,
randaug_magnitude=None,
resize_method=None):
"""Preprocesses the given image for evaluation.
Args:
image_bytes: `Tensor` representing an image binary of arbitrary size.
image_size: image size.
augment_name: `string` that is the name of the augmentation method
to apply to the image. `autoaugment` if AutoAugment is to be used or
`randaugment` if RandAugment is to be used. If the value is `None` no
augmentation method will be applied applied. See autoaugment.py for more
details.
randaug_num_layers: 'int', if RandAug is used, what should the number of
layers be. See autoaugment.py for detailed description.
randaug_magnitude: 'int', if RandAug is used, what should the magnitude
be. See autoaugment.py for detailed description.
resize_method: resize method. If none, use bicubic.
Returns:
A preprocessed image `Tensor`.
"""
image = _decode_and_random_crop(image_bytes, image_size, resize_method)
image = _flip(image)
image = tf.reshape(image, [image_size, image_size, 3])
if augment_name:
logging.info('Apply AutoAugment policy %s', augment_name)
input_image_type = image.dtype
image = tf.clip_by_value(image, 0.0, 255.0)
image = tf.cast(image, dtype=tf.uint8)
if augment_name == 'autoaug':
logging.info('Apply AutoAugment policy %s', augment_name)
image = autoaugment.distort_image_with_autoaugment(image, 'v0')
elif augment_name == 'randaug':
image = autoaugment.distort_image_with_randaugment(
image, randaug_num_layers, randaug_magnitude)
else:
raise ValueError('Invalid value for augment_name: %s' % (augment_name))
image = tf.cast(image, dtype=input_image_type)
return image
def preprocess_for_eval(image_bytes,
image_size,
resize_method=None):
"""Preprocesses the given image for evaluation.
Args:
image_bytes: `Tensor` representing an image binary of arbitrary size.
image_size: image size.
resize_method: if None, use bicubic.
Returns:
A preprocessed image `Tensor`.
"""
image = _decode_and_center_crop(image_bytes, image_size, resize_method)
image = tf.reshape(image, [image_size, image_size, 3])
return image
def preprocess_image(image_bytes,
image_size,
is_training=False,
image_dtype=None,
augment_name=None,
randaug_num_layers=None,
randaug_magnitude=None,
resize_method=None):
"""Preprocesses the given image.
Args:
image_bytes: `Tensor` representing an image binary of arbitrary size.
image_size: image size.
is_training: `bool` for whether the preprocessing is for training.
image_dtype: The dtype of image. If None, default to tf.float32.
augment_name: `string` that is the name of the augmentation method
to apply to the image. `autoaugment` if AutoAugment is to be used or
`randaugment` if RandAugment is to be used. If the value is `None` no
augmentation method will be applied applied. See autoaugment.py for more
details.
randaug_num_layers: 'int', if RandAug is used, what should the number of
layers be. See autoaugment.py for detailed description.
randaug_magnitude: 'int', if RandAug is used, what should the magnitude
be. See autoaugment.py for detailed description.
resize_method: 'string' or None. Use resize_bicubic in default.
Returns:
A preprocessed image `Tensor` with value range of [0, 255].
"""
if is_training:
image = preprocess_for_train(image_bytes, image_size, augment_name,
randaug_num_layers, randaug_magnitude,
resize_method)
else:
image = preprocess_for_eval(image_bytes, image_size, resize_method)
# Normalize images.
image = tf.image.convert_image_dtype(image, dtype=image_dtype or tf.float32)
mean_rgb = [0.485 * 255, 0.456 * 255, 0.406 * 255]
stddev_rgb = [0.229 * 255, 0.224 * 255, 0.225 * 255]
image -= tf.constant(mean_rgb, shape=(1, 1, 3), dtype=image.dtype)
image /= tf.constant(stddev_rgb, shape=(1, 1, 3), dtype=image.dtype)
return image
|
|
"""
Sphinx plugins for Django documentation.
"""
import json
import os
import re
from sphinx import addnodes, __version__ as sphinx_ver
from sphinx.builders.html import StandaloneHTMLBuilder
from sphinx.writers.html import SmartyPantsHTMLTranslator
from sphinx.util.console import bold
from sphinx.util.compat import Directive
# RE for option descriptions without a '--' prefix
simple_option_desc_re = re.compile(
r'([-_a-zA-Z0-9]+)(\s*.*?)(?=,\s+(?:/|-|--)|$)')
def setup(app):
app.add_crossref_type(
directivename = "setting",
rolename = "setting",
indextemplate = "pair: %s; setting",
)
app.add_crossref_type(
directivename = "templatetag",
rolename = "ttag",
indextemplate = "pair: %s; template tag"
)
app.add_crossref_type(
directivename = "templatefilter",
rolename = "tfilter",
indextemplate = "pair: %s; template filter"
)
app.add_crossref_type(
directivename = "fieldlookup",
rolename = "lookup",
indextemplate = "pair: %s; field lookup type",
)
app.add_description_unit(
directivename = "django-admin",
rolename = "djadmin",
indextemplate = "pair: %s; django-admin command",
parse_node = parse_django_admin_node,
)
app.add_description_unit(
directivename = "django-admin-option",
rolename = "djadminopt",
indextemplate = "pair: %s; django-admin command-line option",
parse_node = parse_django_adminopt_node,
)
app.add_config_value('django_next_version', '0.0', True)
app.add_directive('versionadded', VersionDirective)
app.add_directive('versionchanged', VersionDirective)
app.add_builder(DjangoStandaloneHTMLBuilder)
class VersionDirective(Directive):
has_content = True
required_arguments = 1
optional_arguments = 1
final_argument_whitespace = True
option_spec = {}
def run(self):
if len(self.arguments) > 1:
msg = """Only one argument accepted for directive '{directive_name}::'.
Comments should be provided as content,
not as an extra argument.""".format(directive_name=self.name)
raise self.error(msg)
env = self.state.document.settings.env
ret = []
node = addnodes.versionmodified()
ret.append(node)
if self.arguments[0] == env.config.django_next_version:
node['version'] = "Development version"
else:
node['version'] = self.arguments[0]
node['type'] = self.name
if self.content:
self.state.nested_parse(self.content, self.content_offset, node)
env.note_versionchange(node['type'], node['version'], node, self.lineno)
return ret
class DjangoHTMLTranslator(SmartyPantsHTMLTranslator):
"""
Django-specific reST to HTML tweaks.
"""
# Don't use border=1, which docutils does by default.
def visit_table(self, node):
self._table_row_index = 0 # Needed by Sphinx
self.body.append(self.starttag(node, 'table', CLASS='docutils'))
# <big>? Really?
def visit_desc_parameterlist(self, node):
self.body.append('(')
self.first_param = 1
self.param_separator = node.child_text_separator
def depart_desc_parameterlist(self, node):
self.body.append(')')
if sphinx_ver < '1.0.8':
#
# Don't apply smartypants to literal blocks
#
def visit_literal_block(self, node):
self.no_smarty += 1
SmartyPantsHTMLTranslator.visit_literal_block(self, node)
def depart_literal_block(self, node):
SmartyPantsHTMLTranslator.depart_literal_block(self, node)
self.no_smarty -= 1
#
# Turn the "new in version" stuff (versionadded/versionchanged) into a
# better callout -- the Sphinx default is just a little span,
# which is a bit less obvious that I'd like.
#
# FIXME: these messages are all hardcoded in English. We need to change
# that to accommodate other language docs, but I can't work out how to make
# that work.
#
version_text = {
'deprecated': 'Deprecated in Django %s',
'versionchanged': 'Changed in Django %s',
'versionadded': 'New in Django %s',
}
def visit_versionmodified(self, node):
self.body.append(
self.starttag(node, 'div', CLASS=node['type'])
)
title = "%s%s" % (
self.version_text[node['type']] % node['version'],
len(node) and ":" or "."
)
self.body.append('<span class="title">%s</span> ' % title)
def depart_versionmodified(self, node):
self.body.append("</div>\n")
# Give each section a unique ID -- nice for custom CSS hooks
def visit_section(self, node):
old_ids = node.get('ids', [])
node['ids'] = ['s-' + i for i in old_ids]
node['ids'].extend(old_ids)
SmartyPantsHTMLTranslator.visit_section(self, node)
node['ids'] = old_ids
def parse_django_admin_node(env, sig, signode):
command = sig.split(' ')[0]
env._django_curr_admin_command = command
title = "django-admin.py %s" % sig
signode += addnodes.desc_name(title, title)
return sig
def parse_django_adminopt_node(env, sig, signode):
"""A copy of sphinx.directives.CmdoptionDesc.parse_signature()"""
from sphinx.domains.std import option_desc_re
count = 0
firstname = ''
for m in option_desc_re.finditer(sig):
optname, args = m.groups()
if count:
signode += addnodes.desc_addname(', ', ', ')
signode += addnodes.desc_name(optname, optname)
signode += addnodes.desc_addname(args, args)
if not count:
firstname = optname
count += 1
if not count:
for m in simple_option_desc_re.finditer(sig):
optname, args = m.groups()
if count:
signode += addnodes.desc_addname(', ', ', ')
signode += addnodes.desc_name(optname, optname)
signode += addnodes.desc_addname(args, args)
if not count:
firstname = optname
count += 1
if not firstname:
raise ValueError
return firstname
class DjangoStandaloneHTMLBuilder(StandaloneHTMLBuilder):
"""
Subclass to add some extra things we need.
"""
name = 'djangohtml'
def finish(self):
super(DjangoStandaloneHTMLBuilder, self).finish()
self.info(bold("writing templatebuiltins.js..."))
xrefs = self.env.domaindata["std"]["objects"]
templatebuiltins = {
"ttags": [n for ((t, n), (l, a)) in xrefs.items()
if t == "templatetag" and l == "ref/templates/builtins"],
"tfilters": [n for ((t, n), (l, a)) in xrefs.items()
if t == "templatefilter" and l == "ref/templates/builtins"],
}
outfilename = os.path.join(self.outdir, "templatebuiltins.js")
with open(outfilename, 'w') as fp:
fp.write('var django_template_builtins = ')
json.dump(templatebuiltins, fp)
fp.write(';\n')
|
|
import networkx
import logging
from ..errors import AngrCFGError
l = logging.getLogger(name="angr.cfg_base")
class CFGBase(object):
"""
The base class for control flow graphs.
"""
def __init__(self, project, context_sensitivity_level):
self._project = project
# Initialization
self._graph = None
self._nodes = None
self._edge_map = None
self._loop_back_edges = None
self._overlapped_loop_headers = None
self._thumb_addrs = set()
if context_sensitivity_level < 0:
raise Exception("Unsupported context sensitivity level %d" % context_sensitivity_level)
self._context_sensitivity_level=context_sensitivity_level
def __contains__(self, cfg_node):
return cfg_node in self._graph
@property
def context_sensitivity_level(self):
return self._context_sensitivity_level
def _initialize_cfg(self):
"""
Re-create the DiGraph
"""
self._graph = networkx.DiGraph()
# pylint: disable=no-self-use
def copy(self):
raise Exception("Not implemented.")
def _construct(self):
raise Exception("Not implemented")
def output(self):
raise Exception("Not implemented")
# TODO: Mark as deprecated
def get_bbl_dict(self):
return self._nodes
def get_predecessors(self, cfgnode, excluding_fakeret=True):
"""
Get predecessors of a node on the control flow graph.
:param CFGNode cfgnode: The node
:param bool excluding_fakeret: True if you want to exclude all predecessors that is connected to the node with
a fakeret edge.
:return: A list of predecessors
:rtype: list
"""
if not excluding_fakeret:
if cfgnode in self._graph:
return self._graph.predecessors(cfgnode)
else:
return []
else:
predecessors = []
for pred, _, data in self._graph.in_edges_iter([cfgnode], data=True):
jumpkind = data['jumpkind']
if jumpkind != 'Ijk_FakeRet':
predecessors.append(pred)
return predecessors
def get_successors(self, basic_block, excluding_fakeret=True):
if not excluding_fakeret:
if basic_block in self._graph:
return self._graph.successors(basic_block)
else:
return []
else:
successors = []
for _, suc, data in self._graph.out_edges_iter([basic_block], data=True):
jumpkind = data['jumpkind']
if jumpkind != 'Ijk_FakeRet':
successors.append(suc)
return successors
def get_successors_and_jumpkind(self, basic_block, excluding_fakeret=True):
successors = []
for _, suc, data in self._graph.out_edges_iter([basic_block], data=True):
if not excluding_fakeret or data['jumpkind'] != 'Ijk_FakeRet':
successors.append((suc, data['jumpkind']))
return successors
def get_all_predecessors(self, cfgnode):
"""
Get all predecessors of a specific node on the control flow graph.
:param CFGNode cfgnode: The CFGNode object
:return: A list of predecessors in the CFG
:rtype: list
"""
return networkx.dfs_predecessors(self._graph, cfgnode)
def get_all_successors(self, basic_block):
return networkx.dfs_successors(self._graph, basic_block)
def get_node(self, addr_tuple):
"""
Get a single node from node key.
:param addr_tuple: The node key
:return:
"""
if addr_tuple in self._nodes.keys():
return self._nodes[addr_tuple]
else:
return None
def nodes(self):
return self._graph.nodes()
def get_any_node(self, addr, is_syscall=None, anyaddr=False):
"""
Get an artitrary CFGNode (without considering their contexts) from our graph.
:param addr: Address of the beginning of the basic block. Set anyaddr to True to support arbitrary address.
:param is_syscall: Whether you want to get the syscall node or any other node. This is due to the fact that
syscall SimProcedures have the same address as the targer it returns to.
None means get either, True means get a syscall node, False means get something that isn't
a syscall node.
:param anyaddr: If anyaddr is True, then addr doesn't have to be the beginning address of a basic block.
`anyaddr=True` makes more sense after the CFG is normalized.
:return: A CFGNode if there is any that satisfies given conditions, or None otherwise
"""
# TODO: Loop though self._nodes instead of self.graph.nodes()
# TODO: Of course, I should first fix the issue that .normalize() doesn't update self._nodes
for n in self.graph.nodes_iter():
cond = n.looping_times == 0
if anyaddr and n.size is not None:
cond = cond and (addr >= n.addr and addr < n.addr + n.size)
else:
cond = cond and (addr == n.addr)
if cond:
if is_syscall is None:
return n
if n.is_syscall == is_syscall:
return n
return None
def _get_irsb(self, cfg_node):
if cfg_node is None:
return None
if cfg_node.input_state is None:
raise AngrCFGError(
'You should save the input state when generating the CFG if you want to retrieve the SimIRSB later.')
# Recreate the SimIRSB
return self._project.factory.sim_run(cfg_node.input_state)
def irsb_from_node(self, cfg_node):
"""
Create SimRun from a CFGNode object.
"""
return self._get_irsb(cfg_node)
def get_any_irsb(self, addr):
"""
Returns a SimRun of a certain address. If there are many SimRuns with the same address in CFG,
return an arbitrary one.
You should never assume this method returns a specific one.
"""
cfg_node = self.get_any_node(addr)
return self._get_irsb(cfg_node)
def get_all_nodes(self, addr, is_syscall=None):
"""
Get all CFGNodes whose address is the specified one,
:param addr: Address of the node
:param is_syscall: True returns the syscall node, False returns the normal CFGNode, None returns both
:return: all CFGNodes
"""
results = [ ]
for cfg_node in self._graph.nodes_iter():
if cfg_node.addr == addr:
if is_syscall and cfg_node.is_syscall:
results.append(cfg_node)
elif is_syscall == False and not cfg_node.is_syscall:
results.append(cfg_node)
else:
results.append(cfg_node)
return results
def get_all_irsbs(self, addr):
"""
Returns all SimRuns of a certain address, without considering contexts.
"""
nodes = self.get_all_nodes(addr)
results = [ ]
for n in nodes:
results.append(self._get_irsb(n))
return results
def get_loop_back_edges(self):
return self._loop_back_edges
def get_irsb_addr_set(self):
irsb_addr_set = set()
for tpl, _ in self._nodes:
irsb_addr_set.add(tpl[-1]) # IRSB address
return irsb_addr_set
def get_branching_nodes(self):
"""
Returns all nodes that has an out degree >= 2
"""
nodes = set()
for n in self._graph.nodes():
if self._graph.out_degree(n) >= 2:
nodes.add(n)
return nodes
def get_exit_stmt_idx(self, src_block, dst_block):
"""
Get the corresponding exit statement ID for control flow to reach destination block from source block. The exit
statement ID was put on the edge when creating the CFG.
Note that there must be a direct edge between the two blocks, otherwise an exception will be raised.
:return: The exit statement ID
"""
if not self.graph.has_edge(src_block, dst_block):
raise AngrCFGError('Edge (%s, %s) does not exist in CFG' % (src_block, dst_block))
return self.graph[src_block][dst_block]['exit_stmt_idx']
@property
def graph(self):
return self._graph
def remove_edge(self, simrun_from, simrun_to):
edge = (simrun_from, simrun_to)
if edge in self._graph:
self._graph.remove_edge(edge)
def is_thumb_addr(self, addr):
return addr in self._thumb_addrs
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 4.0.0-a53ec6ee1b on 2019-05-07.
# 2019, SMART Health IT.
import os
import io
import unittest
import json
from . import coverageeligibilityresponse
from .fhirdate import FHIRDate
class CoverageEligibilityResponseTests(unittest.TestCase):
def instantiate_from(self, filename):
datadir = os.environ.get('FHIR_UNITTEST_DATADIR') or ''
with io.open(os.path.join(datadir, filename), 'r', encoding='utf-8') as handle:
js = json.load(handle)
self.assertEqual("CoverageEligibilityResponse", js["resourceType"])
return coverageeligibilityresponse.CoverageEligibilityResponse(js)
def testCoverageEligibilityResponse1(self):
inst = self.instantiate_from("coverageeligibilityresponse-example.json")
self.assertIsNotNone(inst, "Must have instantiated a CoverageEligibilityResponse instance")
self.implCoverageEligibilityResponse1(inst)
js = inst.as_json()
self.assertEqual("CoverageEligibilityResponse", js["resourceType"])
inst2 = coverageeligibilityresponse.CoverageEligibilityResponse(js)
self.implCoverageEligibilityResponse1(inst2)
def implCoverageEligibilityResponse1(self, inst):
self.assertEqual(inst.created.date, FHIRDate("2014-08-16").date)
self.assertEqual(inst.created.as_json(), "2014-08-16")
self.assertEqual(inst.disposition, "Policy is currently in-force.")
self.assertEqual(inst.id, "E2500")
self.assertEqual(inst.identifier[0].system, "http://www.BenefitsInc.com/fhir/coverageeligibilityresponse")
self.assertEqual(inst.identifier[0].value, "881234")
self.assertTrue(inst.insurance[0].inforce)
self.assertEqual(inst.meta.tag[0].code, "HTEST")
self.assertEqual(inst.meta.tag[0].display, "test health data")
self.assertEqual(inst.meta.tag[0].system, "http://terminology.hl7.org/CodeSystem/v3-ActReason")
self.assertEqual(inst.outcome, "complete")
self.assertEqual(inst.purpose[0], "validation")
self.assertEqual(inst.status, "active")
self.assertEqual(inst.text.div, "<div xmlns=\"http://www.w3.org/1999/xhtml\">A human-readable rendering of the CoverageEligibilityResponse.</div>")
self.assertEqual(inst.text.status, "generated")
def testCoverageEligibilityResponse2(self):
inst = self.instantiate_from("coverageeligibilityresponse-example-error.json")
self.assertIsNotNone(inst, "Must have instantiated a CoverageEligibilityResponse instance")
self.implCoverageEligibilityResponse2(inst)
js = inst.as_json()
self.assertEqual("CoverageEligibilityResponse", js["resourceType"])
inst2 = coverageeligibilityresponse.CoverageEligibilityResponse(js)
self.implCoverageEligibilityResponse2(inst2)
def implCoverageEligibilityResponse2(self, inst):
self.assertEqual(inst.created.date, FHIRDate("2014-09-16").date)
self.assertEqual(inst.created.as_json(), "2014-09-16")
self.assertEqual(inst.disposition, "Eligibiliy request could not be processed, please address errors before submitting.")
self.assertEqual(inst.error[0].code.coding[0].code, "a001")
self.assertEqual(inst.error[0].code.coding[0].system, "http://terminology.hl7.org/CodeSystem/adjudication-error")
self.assertEqual(inst.form.coding[0].code, "ELRSP/2017/01")
self.assertEqual(inst.form.coding[0].system, "http://national.org/form")
self.assertEqual(inst.id, "E2503")
self.assertEqual(inst.identifier[0].system, "http://www.BenefitsInc.com/fhir/coverageeligibilityresponse")
self.assertEqual(inst.identifier[0].value, "8812343")
self.assertEqual(inst.meta.tag[0].code, "HTEST")
self.assertEqual(inst.meta.tag[0].display, "test health data")
self.assertEqual(inst.meta.tag[0].system, "http://terminology.hl7.org/CodeSystem/v3-ActReason")
self.assertEqual(inst.outcome, "error")
self.assertEqual(inst.purpose[0], "validation")
self.assertEqual(inst.status, "active")
self.assertEqual(inst.text.div, "<div xmlns=\"http://www.w3.org/1999/xhtml\">A human-readable rendering of the CoverageEligibilityResponse.</div>")
self.assertEqual(inst.text.status, "generated")
def testCoverageEligibilityResponse3(self):
inst = self.instantiate_from("coverageeligibilityresponse-example-benefits-2.json")
self.assertIsNotNone(inst, "Must have instantiated a CoverageEligibilityResponse instance")
self.implCoverageEligibilityResponse3(inst)
js = inst.as_json()
self.assertEqual("CoverageEligibilityResponse", js["resourceType"])
inst2 = coverageeligibilityresponse.CoverageEligibilityResponse(js)
self.implCoverageEligibilityResponse3(inst2)
def implCoverageEligibilityResponse3(self, inst):
self.assertEqual(inst.contained[0].id, "coverage-1")
self.assertEqual(inst.created.date, FHIRDate("2014-09-16").date)
self.assertEqual(inst.created.as_json(), "2014-09-16")
self.assertEqual(inst.disposition, "Policy is currently in-force.")
self.assertEqual(inst.form.coding[0].code, "ELRSP/2017/01")
self.assertEqual(inst.form.coding[0].system, "http://national.org/form")
self.assertEqual(inst.id, "E2502")
self.assertEqual(inst.identifier[0].system, "http://www.BenefitsInc.com/fhir/coverageeligibilityresponse")
self.assertEqual(inst.identifier[0].value, "8812342")
self.assertTrue(inst.insurance[0].inforce)
self.assertEqual(inst.insurance[0].item[0].benefit[0].allowedMoney.currency, "USD")
self.assertEqual(inst.insurance[0].item[0].benefit[0].allowedMoney.value, 500000)
self.assertEqual(inst.insurance[0].item[0].benefit[0].type.coding[0].code, "benefit")
self.assertEqual(inst.insurance[0].item[0].benefit[0].usedMoney.currency, "USD")
self.assertEqual(inst.insurance[0].item[0].benefit[0].usedMoney.value, 3748.0)
self.assertEqual(inst.insurance[0].item[0].benefit[1].allowedMoney.currency, "USD")
self.assertEqual(inst.insurance[0].item[0].benefit[1].allowedMoney.value, 100)
self.assertEqual(inst.insurance[0].item[0].benefit[1].type.coding[0].code, "copay-maximum")
self.assertEqual(inst.insurance[0].item[0].benefit[2].allowedUnsignedInt, 20)
self.assertEqual(inst.insurance[0].item[0].benefit[2].type.coding[0].code, "copay-percent")
self.assertEqual(inst.insurance[0].item[0].category.coding[0].code, "30")
self.assertEqual(inst.insurance[0].item[0].category.coding[0].display, "Health Benefit Plan Coverage")
self.assertEqual(inst.insurance[0].item[0].category.coding[0].system, "http://terminology.hl7.org/CodeSystem/ex-benefitcategory")
self.assertEqual(inst.insurance[0].item[0].network.coding[0].code, "in")
self.assertEqual(inst.insurance[0].item[0].network.coding[0].system, "http://terminology.hl7.org/CodeSystem/benefit-network")
self.assertEqual(inst.insurance[0].item[0].term.coding[0].code, "annual")
self.assertEqual(inst.insurance[0].item[0].term.coding[0].system, "http://terminology.hl7.org/CodeSystem/benefit-term")
self.assertEqual(inst.insurance[0].item[0].unit.coding[0].code, "individual")
self.assertEqual(inst.insurance[0].item[0].unit.coding[0].system, "http://terminology.hl7.org/CodeSystem/benefit-unit")
self.assertEqual(inst.insurance[0].item[1].benefit[0].allowedMoney.currency, "USD")
self.assertEqual(inst.insurance[0].item[1].benefit[0].allowedMoney.value, 15000)
self.assertEqual(inst.insurance[0].item[1].benefit[0].type.coding[0].code, "benefit")
self.assertEqual(inst.insurance[0].item[1].category.coding[0].code, "69")
self.assertEqual(inst.insurance[0].item[1].category.coding[0].display, "Maternity")
self.assertEqual(inst.insurance[0].item[1].category.coding[0].system, "http://terminology.hl7.org/CodeSystem/ex-benefitcategory")
self.assertEqual(inst.insurance[0].item[1].network.coding[0].code, "in")
self.assertEqual(inst.insurance[0].item[1].network.coding[0].system, "http://terminology.hl7.org/CodeSystem/benefit-network")
self.assertEqual(inst.insurance[0].item[1].term.coding[0].code, "annual")
self.assertEqual(inst.insurance[0].item[1].term.coding[0].system, "http://terminology.hl7.org/CodeSystem/benefit-term")
self.assertEqual(inst.insurance[0].item[1].unit.coding[0].code, "individual")
self.assertEqual(inst.insurance[0].item[1].unit.coding[0].system, "http://terminology.hl7.org/CodeSystem/benefit-unit")
self.assertEqual(inst.insurance[0].item[2].benefit[0].allowedMoney.currency, "USD")
self.assertEqual(inst.insurance[0].item[2].benefit[0].allowedMoney.value, 2000)
self.assertEqual(inst.insurance[0].item[2].benefit[0].type.coding[0].code, "benefit")
self.assertEqual(inst.insurance[0].item[2].category.coding[0].code, "F3")
self.assertEqual(inst.insurance[0].item[2].category.coding[0].display, "Dental Coverage")
self.assertEqual(inst.insurance[0].item[2].category.coding[0].system, "http://terminology.hl7.org/CodeSystem/ex-benefitcategory")
self.assertEqual(inst.insurance[0].item[2].network.coding[0].code, "in")
self.assertEqual(inst.insurance[0].item[2].network.coding[0].system, "http://terminology.hl7.org/CodeSystem/benefit-network")
self.assertEqual(inst.insurance[0].item[2].term.coding[0].code, "annual")
self.assertEqual(inst.insurance[0].item[2].term.coding[0].system, "http://terminology.hl7.org/CodeSystem/benefit-term")
self.assertEqual(inst.insurance[0].item[2].unit.coding[0].code, "individual")
self.assertEqual(inst.insurance[0].item[2].unit.coding[0].system, "http://terminology.hl7.org/CodeSystem/benefit-unit")
self.assertEqual(inst.insurance[0].item[3].category.coding[0].code, "F6")
self.assertEqual(inst.insurance[0].item[3].category.coding[0].display, "Vision Coverage")
self.assertEqual(inst.insurance[0].item[3].category.coding[0].system, "http://terminology.hl7.org/CodeSystem/ex-benefitcategory")
self.assertEqual(inst.insurance[0].item[3].description, "Vision products and services such as exams, glasses and contact lenses.")
self.assertTrue(inst.insurance[0].item[3].excluded)
self.assertEqual(inst.insurance[0].item[3].name, "Vision")
self.assertEqual(inst.meta.tag[0].code, "HTEST")
self.assertEqual(inst.meta.tag[0].display, "test health data")
self.assertEqual(inst.meta.tag[0].system, "http://terminology.hl7.org/CodeSystem/v3-ActReason")
self.assertEqual(inst.outcome, "complete")
self.assertEqual(inst.purpose[0], "validation")
self.assertEqual(inst.purpose[1], "benefits")
self.assertEqual(inst.status, "active")
self.assertEqual(inst.text.div, "<div xmlns=\"http://www.w3.org/1999/xhtml\">A human-readable rendering of the CoverageEligibilityResponse.</div>")
self.assertEqual(inst.text.status, "generated")
def testCoverageEligibilityResponse4(self):
inst = self.instantiate_from("coverageeligibilityresponse-example-benefits.json")
self.assertIsNotNone(inst, "Must have instantiated a CoverageEligibilityResponse instance")
self.implCoverageEligibilityResponse4(inst)
js = inst.as_json()
self.assertEqual("CoverageEligibilityResponse", js["resourceType"])
inst2 = coverageeligibilityresponse.CoverageEligibilityResponse(js)
self.implCoverageEligibilityResponse4(inst2)
def implCoverageEligibilityResponse4(self, inst):
self.assertEqual(inst.created.date, FHIRDate("2014-08-16").date)
self.assertEqual(inst.created.as_json(), "2014-08-16")
self.assertEqual(inst.disposition, "Policy is currently in-force.")
self.assertEqual(inst.id, "E2501")
self.assertEqual(inst.identifier[0].system, "http://www.BenefitsInc.com/fhir/coverageeligibilityresponse")
self.assertEqual(inst.identifier[0].value, "881234")
self.assertTrue(inst.insurance[0].inforce)
self.assertEqual(inst.insurance[0].item[0].benefit[0].allowedMoney.currency, "SAR")
self.assertEqual(inst.insurance[0].item[0].benefit[0].allowedMoney.value, 500000)
self.assertEqual(inst.insurance[0].item[0].benefit[0].type.coding[0].code, "benefit")
self.assertEqual(inst.insurance[0].item[0].benefit[1].allowedMoney.currency, "SAR")
self.assertEqual(inst.insurance[0].item[0].benefit[1].allowedMoney.value, 100)
self.assertEqual(inst.insurance[0].item[0].benefit[1].type.coding[0].code, "copay-maximum")
self.assertEqual(inst.insurance[0].item[0].benefit[2].allowedUnsignedInt, 20)
self.assertEqual(inst.insurance[0].item[0].benefit[2].type.coding[0].code, "copay-percent")
self.assertEqual(inst.insurance[0].item[0].category.coding[0].code, "30")
self.assertEqual(inst.insurance[0].item[0].category.coding[0].display, "Health Benefit Plan Coverage")
self.assertEqual(inst.insurance[0].item[0].category.coding[0].system, "http://terminology.hl7.org/CodeSystem/ex-benefitcategory")
self.assertEqual(inst.insurance[0].item[0].network.coding[0].code, "in")
self.assertEqual(inst.insurance[0].item[0].network.coding[0].system, "http://terminology.hl7.org/CodeSystem/benefit-network")
self.assertEqual(inst.insurance[0].item[0].term.coding[0].code, "annual")
self.assertEqual(inst.insurance[0].item[0].term.coding[0].system, "http://terminology.hl7.org/CodeSystem/benefit-term")
self.assertEqual(inst.insurance[0].item[0].unit.coding[0].code, "individual")
self.assertEqual(inst.insurance[0].item[0].unit.coding[0].system, "http://terminology.hl7.org/CodeSystem/benefit-unit")
self.assertEqual(inst.insurance[0].item[1].benefit[0].allowedMoney.currency, "SAR")
self.assertEqual(inst.insurance[0].item[1].benefit[0].allowedMoney.value, 15000)
self.assertEqual(inst.insurance[0].item[1].benefit[0].type.coding[0].code, "benefit")
self.assertEqual(inst.insurance[0].item[1].category.coding[0].code, "69")
self.assertEqual(inst.insurance[0].item[1].category.coding[0].display, "Maternity")
self.assertEqual(inst.insurance[0].item[1].category.coding[0].system, "http://terminology.hl7.org/CodeSystem/ex-benefitcategory")
self.assertEqual(inst.insurance[0].item[1].network.coding[0].code, "in")
self.assertEqual(inst.insurance[0].item[1].network.coding[0].system, "http://terminology.hl7.org/CodeSystem/benefit-network")
self.assertEqual(inst.insurance[0].item[1].term.coding[0].code, "annual")
self.assertEqual(inst.insurance[0].item[1].term.coding[0].system, "http://terminology.hl7.org/CodeSystem/benefit-term")
self.assertEqual(inst.insurance[0].item[1].unit.coding[0].code, "individual")
self.assertEqual(inst.insurance[0].item[1].unit.coding[0].system, "http://terminology.hl7.org/CodeSystem/benefit-unit")
self.assertEqual(inst.insurance[0].item[2].benefit[0].allowedMoney.currency, "SAR")
self.assertEqual(inst.insurance[0].item[2].benefit[0].allowedMoney.value, 2000)
self.assertEqual(inst.insurance[0].item[2].benefit[0].type.coding[0].code, "benefit")
self.assertEqual(inst.insurance[0].item[2].category.coding[0].code, "F3")
self.assertEqual(inst.insurance[0].item[2].category.coding[0].display, "Dental Coverage")
self.assertEqual(inst.insurance[0].item[2].category.coding[0].system, "http://terminology.hl7.org/CodeSystem/ex-benefitcategory")
self.assertEqual(inst.insurance[0].item[2].network.coding[0].code, "in")
self.assertEqual(inst.insurance[0].item[2].network.coding[0].system, "http://terminology.hl7.org/CodeSystem/benefit-network")
self.assertEqual(inst.insurance[0].item[2].term.coding[0].code, "annual")
self.assertEqual(inst.insurance[0].item[2].term.coding[0].system, "http://terminology.hl7.org/CodeSystem/benefit-term")
self.assertEqual(inst.insurance[0].item[2].unit.coding[0].code, "individual")
self.assertEqual(inst.insurance[0].item[2].unit.coding[0].system, "http://terminology.hl7.org/CodeSystem/benefit-unit")
self.assertEqual(inst.insurance[0].item[3].benefit[0].allowedMoney.currency, "SAR")
self.assertEqual(inst.insurance[0].item[3].benefit[0].allowedMoney.value, 400)
self.assertEqual(inst.insurance[0].item[3].benefit[0].type.coding[0].code, "benefit")
self.assertEqual(inst.insurance[0].item[3].category.coding[0].code, "F6")
self.assertEqual(inst.insurance[0].item[3].category.coding[0].display, "Vision Coverage")
self.assertEqual(inst.insurance[0].item[3].category.coding[0].system, "http://terminology.hl7.org/CodeSystem/ex-benefitcategory")
self.assertEqual(inst.insurance[0].item[3].network.coding[0].code, "in")
self.assertEqual(inst.insurance[0].item[3].network.coding[0].system, "http://terminology.hl7.org/CodeSystem/benefit-network")
self.assertEqual(inst.insurance[0].item[3].term.coding[0].code, "annual")
self.assertEqual(inst.insurance[0].item[3].term.coding[0].system, "http://terminology.hl7.org/CodeSystem/benefit-term")
self.assertEqual(inst.insurance[0].item[3].unit.coding[0].code, "individual")
self.assertEqual(inst.insurance[0].item[3].unit.coding[0].system, "http://terminology.hl7.org/CodeSystem/benefit-unit")
self.assertEqual(inst.insurance[0].item[4].benefit[0].allowedString, "shared")
self.assertEqual(inst.insurance[0].item[4].benefit[0].type.coding[0].code, "room")
self.assertEqual(inst.insurance[0].item[4].benefit[1].allowedMoney.currency, "SAR")
self.assertEqual(inst.insurance[0].item[4].benefit[1].allowedMoney.value, 600)
self.assertEqual(inst.insurance[0].item[4].benefit[1].type.coding[0].code, "benefit")
self.assertEqual(inst.insurance[0].item[4].category.coding[0].code, "49")
self.assertEqual(inst.insurance[0].item[4].category.coding[0].display, "Hospital Room and Board")
self.assertEqual(inst.insurance[0].item[4].category.coding[0].system, "http://terminology.hl7.org/CodeSystem/ex-benefitcategory")
self.assertEqual(inst.insurance[0].item[4].network.coding[0].code, "in")
self.assertEqual(inst.insurance[0].item[4].network.coding[0].system, "http://terminology.hl7.org/CodeSystem/benefit-network")
self.assertEqual(inst.insurance[0].item[4].term.coding[0].code, "day")
self.assertEqual(inst.insurance[0].item[4].term.coding[0].system, "http://terminology.hl7.org/CodeSystem/benefit-term")
self.assertEqual(inst.insurance[0].item[4].unit.coding[0].code, "individual")
self.assertEqual(inst.insurance[0].item[4].unit.coding[0].system, "http://terminology.hl7.org/CodeSystem/benefit-unit")
self.assertEqual(inst.meta.tag[0].code, "HTEST")
self.assertEqual(inst.meta.tag[0].display, "test health data")
self.assertEqual(inst.meta.tag[0].system, "http://terminology.hl7.org/CodeSystem/v3-ActReason")
self.assertEqual(inst.outcome, "complete")
self.assertEqual(inst.purpose[0], "validation")
self.assertEqual(inst.purpose[1], "benefits")
self.assertEqual(inst.servicedDate.date, FHIRDate("2014-09-17").date)
self.assertEqual(inst.servicedDate.as_json(), "2014-09-17")
self.assertEqual(inst.status, "active")
self.assertEqual(inst.text.div, "<div xmlns=\"http://www.w3.org/1999/xhtml\">A human-readable rendering of the CoverageEligibilityResponse.</div>")
self.assertEqual(inst.text.status, "generated")
|
|
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Client side of the compute RPC API.
"""
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging as messaging
from oslo_serialization import jsonutils
from nova import exception
from nova.i18n import _, _LW
from nova import objects
from nova.objects import base as objects_base
from nova import rpc
from nova import utils
rpcapi_opts = [
cfg.StrOpt('compute_topic',
default='compute',
help='The topic compute nodes listen on'),
]
CONF = cfg.CONF
CONF.register_opts(rpcapi_opts)
rpcapi_cap_opt = cfg.StrOpt('compute',
help='Set a version cap for messages sent to compute services. If you '
'plan to do a live upgrade from havana to icehouse, you should '
'set this option to "icehouse-compat" before beginning the live '
'upgrade procedure.')
CONF.register_opt(rpcapi_cap_opt, 'upgrade_levels')
LOG = logging.getLogger(__name__)
def _compute_host(host, instance):
'''Get the destination host for a message.
:param host: explicit host to send the message to.
:param instance: If an explicit host was not specified, use
instance['host']
:returns: A host
'''
if host:
return host
if not instance:
raise exception.NovaException(_('No compute host specified'))
if not instance.host:
raise exception.NovaException(_('Unable to find host for '
'Instance %s') % instance.uuid)
return instance.host
class ComputeAPI(object):
'''Client side of the compute rpc API.
API version history:
* 1.0 - Initial version.
* 1.1 - Adds get_host_uptime()
* 1.2 - Adds check_can_live_migrate_[destination|source]
* 1.3 - Adds change_instance_metadata()
* 1.4 - Remove instance_uuid, add instance argument to
reboot_instance()
* 1.5 - Remove instance_uuid, add instance argument to
pause_instance(), unpause_instance()
* 1.6 - Remove instance_uuid, add instance argument to
suspend_instance()
* 1.7 - Remove instance_uuid, add instance argument to
get_console_output()
* 1.8 - Remove instance_uuid, add instance argument to
add_fixed_ip_to_instance()
* 1.9 - Remove instance_uuid, add instance argument to attach_volume()
* 1.10 - Remove instance_id, add instance argument to
check_can_live_migrate_destination()
* 1.11 - Remove instance_id, add instance argument to
check_can_live_migrate_source()
* 1.12 - Remove instance_uuid, add instance argument to
confirm_resize()
* 1.13 - Remove instance_uuid, add instance argument to detach_volume()
* 1.14 - Remove instance_uuid, add instance argument to finish_resize()
* 1.15 - Remove instance_uuid, add instance argument to
finish_revert_resize()
* 1.16 - Remove instance_uuid, add instance argument to
get_diagnostics()
* 1.17 - Remove instance_uuid, add instance argument to
get_vnc_console()
* 1.18 - Remove instance_uuid, add instance argument to inject_file()
* 1.19 - Remove instance_uuid, add instance argument to
inject_network_info()
* 1.20 - Remove instance_id, add instance argument to
post_live_migration_at_destination()
* 1.21 - Remove instance_uuid, add instance argument to
power_off_instance() and stop_instance()
* 1.22 - Remove instance_uuid, add instance argument to
power_on_instance() and start_instance()
* 1.23 - Remove instance_id, add instance argument to
pre_live_migration()
* 1.24 - Remove instance_uuid, add instance argument to
rebuild_instance()
* 1.25 - Remove instance_uuid, add instance argument to
remove_fixed_ip_from_instance()
* 1.26 - Remove instance_id, add instance argument to
remove_volume_connection()
* 1.27 - Remove instance_uuid, add instance argument to
rescue_instance()
* 1.28 - Remove instance_uuid, add instance argument to reset_network()
* 1.29 - Remove instance_uuid, add instance argument to
resize_instance()
* 1.30 - Remove instance_uuid, add instance argument to
resume_instance()
* 1.31 - Remove instance_uuid, add instance argument to revert_resize()
* 1.32 - Remove instance_id, add instance argument to
rollback_live_migration_at_destination()
* 1.33 - Remove instance_uuid, add instance argument to
set_admin_password()
* 1.34 - Remove instance_uuid, add instance argument to
snapshot_instance()
* 1.35 - Remove instance_uuid, add instance argument to
unrescue_instance()
* 1.36 - Remove instance_uuid, add instance argument to
change_instance_metadata()
* 1.37 - Remove instance_uuid, add instance argument to
terminate_instance()
* 1.38 - Changes to prep_resize():
* remove instance_uuid, add instance
* remove instance_type_id, add instance_type
* remove topic, it was unused
* 1.39 - Remove instance_uuid, add instance argument to run_instance()
* 1.40 - Remove instance_id, add instance argument to live_migration()
* 1.41 - Adds refresh_instance_security_rules()
* 1.42 - Add reservations arg to prep_resize(), resize_instance(),
finish_resize(), confirm_resize(), revert_resize() and
finish_revert_resize()
* 1.43 - Add migrate_data to live_migration()
* 1.44 - Adds reserve_block_device_name()
* 2.0 - Remove 1.x backwards compat
* 2.1 - Adds orig_sys_metadata to rebuild_instance()
* 2.2 - Adds slave_info parameter to add_aggregate_host() and
remove_aggregate_host()
* 2.3 - Adds volume_id to reserve_block_device_name()
* 2.4 - Add bdms to terminate_instance
* 2.5 - Add block device and network info to reboot_instance
* 2.6 - Remove migration_id, add migration to resize_instance
* 2.7 - Remove migration_id, add migration to confirm_resize
* 2.8 - Remove migration_id, add migration to finish_resize
* 2.9 - Add publish_service_capabilities()
* 2.10 - Adds filter_properties and request_spec to prep_resize()
* 2.11 - Adds soft_delete_instance() and restore_instance()
* 2.12 - Remove migration_id, add migration to revert_resize
* 2.13 - Remove migration_id, add migration to finish_revert_resize
* 2.14 - Remove aggregate_id, add aggregate to add_aggregate_host
* 2.15 - Remove aggregate_id, add aggregate to remove_aggregate_host
* 2.16 - Add instance_type to resize_instance
* 2.17 - Add get_backdoor_port()
* 2.18 - Add bdms to rebuild_instance
* 2.19 - Add node to run_instance
* 2.20 - Add node to prep_resize
* 2.21 - Add migrate_data dict param to pre_live_migration()
* 2.22 - Add recreate, on_shared_storage and host arguments to
rebuild_instance()
* 2.23 - Remove network_info from reboot_instance
* 2.24 - Added get_spice_console method
* 2.25 - Add attach_interface() and detach_interface()
* 2.26 - Add validate_console_port to ensure the service connects to
vnc on the correct port
* 2.27 - Adds 'reservations' to terminate_instance() and
soft_delete_instance()
... Grizzly supports message version 2.27. So, any changes to existing
methods in 2.x after that point should be done such that they can
handle the version_cap being set to 2.27.
* 2.28 - Adds check_instance_shared_storage()
* 2.29 - Made start_instance() and stop_instance() take new-world
instance objects
* 2.30 - Adds live_snapshot_instance()
* 2.31 - Adds shelve_instance(), shelve_offload_instance, and
unshelve_instance()
* 2.32 - Make reboot_instance take a new world instance object
* 2.33 - Made suspend_instance() and resume_instance() take new-world
instance objects
* 2.34 - Added swap_volume()
* 2.35 - Made terminate_instance() and soft_delete_instance() take
new-world instance objects
* 2.36 - Made pause_instance() and unpause_instance() take new-world
instance objects
* 2.37 - Added the legacy_bdm_in_spec parameter to run_instance
* 2.38 - Made check_can_live_migrate_[destination|source] take
new-world instance objects
* 2.39 - Made revert_resize() and confirm_resize() take new-world
instance objects
* 2.40 - Made reset_network() take new-world instance object
* 2.41 - Make inject_network_info take new-world instance object
* 2.42 - Splits snapshot_instance() into snapshot_instance() and
backup_instance() and makes them take new-world instance
objects.
* 2.43 - Made prep_resize() take new-world instance object
* 2.44 - Add volume_snapshot_create(), volume_snapshot_delete()
* 2.45 - Made resize_instance() take new-world objects
* 2.46 - Made finish_resize() take new-world objects
* 2.47 - Made finish_revert_resize() take new-world objects
... Havana supports message version 2.47. So, any changes to existing
methods in 2.x after that point should be done such that they can
handle the version_cap being set to 2.47.
* 2.48 - Make add_aggregate_host() and remove_aggregate_host() take
new-world objects
* ... - Remove live_snapshot() that was never actually used
* 3.0 - Remove 2.x compatibility
* 3.1 - Update get_spice_console() to take an instance object
* 3.2 - Update get_vnc_console() to take an instance object
* 3.3 - Update validate_console_port() to take an instance object
* 3.4 - Update rebuild_instance() to take an instance object
* 3.5 - Pass preserve_ephemeral flag to rebuild_instance()
* 3.6 - Make volume_snapshot_{create,delete} use new-world objects
* 3.7 - Update change_instance_metadata() to take an instance object
* 3.8 - Update set_admin_password() to take an instance object
* 3.9 - Update rescue_instance() to take an instance object
* 3.10 - Added get_rdp_console method
* 3.11 - Update unrescue_instance() to take an object
* 3.12 - Update add_fixed_ip_to_instance() to take an object
* 3.13 - Update remove_fixed_ip_from_instance() to take an object
* 3.14 - Update post_live_migration_at_destination() to take an object
* 3.15 - Adds filter_properties and node to unshelve_instance()
* 3.16 - Make reserve_block_device_name and attach_volume use new-world
objects, and add disk_bus and device_type params to
reserve_block_device_name, and bdm param to attach_volume
* 3.17 - Update attach_interface and detach_interface to take an object
* 3.18 - Update get_diagnostics() to take an instance object
* Removed inject_file(), as it was unused.
* 3.19 - Update pre_live_migration to take instance object
* 3.20 - Make restore_instance take an instance object
* 3.21 - Made rebuild take new-world BDM objects
* 3.22 - Made terminate_instance take new-world BDM objects
* 3.23 - Added external_instance_event()
* build_and_run_instance was added in Havana and not used or
documented.
... Icehouse supports message version 3.23. So, any changes to
existing methods in 3.x after that point should be done such that they
can handle the version_cap being set to 3.23.
* 3.24 - Update rescue_instance() to take optional rescue_image_ref
* 3.25 - Make detach_volume take an object
* 3.26 - Make live_migration() and
rollback_live_migration_at_destination() take an object
* ... Removed run_instance()
* 3.27 - Make run_instance() accept a new-world object
* 3.28 - Update get_console_output() to accept a new-world object
* 3.29 - Make check_instance_shared_storage accept a new-world object
* 3.30 - Make remove_volume_connection() accept a new-world object
* 3.31 - Add get_instance_diagnostics
* 3.32 - Add destroy_disks and migrate_data optional parameters to
rollback_live_migration_at_destination()
* 3.33 - Make build_and_run_instance() take a NetworkRequestList object
* 3.34 - Add get_serial_console method
* 3.35 - Make reserve_block_device_name return a BDM object
... Juno supports message version 3.35. So, any changes to
existing methods in 3.x after that point should be done such that they
can handle the version_cap being set to 3.35.
* 3.36 - Make build_and_run_instance() send a Flavor object
* 3.37 - Add clean_shutdown to stop, resize, rescue, shelve, and
shelve_offload
* 3.38 - Add clean_shutdown to prep_resize
* 3.39 - Add quiesce_instance and unquiesce_instance methods
* 3.40 - Make build_and_run_instance() take a new-world topology
limits object
'''
VERSION_ALIASES = {
'icehouse': '3.23',
'juno': '3.35',
}
def __init__(self):
super(ComputeAPI, self).__init__()
target = messaging.Target(topic=CONF.compute_topic, version='3.0')
version_cap = self.VERSION_ALIASES.get(CONF.upgrade_levels.compute,
CONF.upgrade_levels.compute)
serializer = objects_base.NovaObjectSerializer()
self.client = self.get_client(target, version_cap, serializer)
# Cells overrides this
def get_client(self, target, version_cap, serializer):
return rpc.get_client(target,
version_cap=version_cap,
serializer=serializer)
def add_aggregate_host(self, ctxt, aggregate, host_param, host,
slave_info=None):
'''Add aggregate host.
:param ctxt: request context
:param aggregate:
:param host_param: This value is placed in the message to be the 'host'
parameter for the remote method.
:param host: This is the host to send the message to.
'''
version = '3.0'
cctxt = self.client.prepare(server=host, version=version)
cctxt.cast(ctxt, 'add_aggregate_host',
aggregate=aggregate, host=host_param,
slave_info=slave_info)
def add_fixed_ip_to_instance(self, ctxt, instance, network_id):
version = '3.12'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'add_fixed_ip_to_instance',
instance=instance, network_id=network_id)
def attach_interface(self, ctxt, instance, network_id, port_id,
requested_ip):
version = '3.17'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
return cctxt.call(ctxt, 'attach_interface',
instance=instance, network_id=network_id,
port_id=port_id, requested_ip=requested_ip)
def attach_volume(self, ctxt, instance, volume_id, mountpoint, bdm=None):
# NOTE(ndipanov): Remove volume_id and mountpoint on the next major
# version bump - they are not needed when using bdm objects.
version = '3.16'
kw = {'instance': instance, 'volume_id': volume_id,
'mountpoint': mountpoint, 'bdm': bdm}
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'attach_volume', **kw)
def change_instance_metadata(self, ctxt, instance, diff):
version = '3.7'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'change_instance_metadata',
instance=instance, diff=diff)
def _warn_buggy_live_migrations(self, data=None):
# NOTE(danms): We know that libvirt live migration with shared block
# storage was buggy (potential loss of data) before version 3.32.
# Since we need to support live migration with older clients, we need
# to warn the operator of this possibility. The logic below tries to
# decide if a warning should be emitted, assuming the positive if
# not sure. This can be removed when we bump to RPC API version 4.0.
if data:
if data.get('is_shared_block_storage') is not False:
# Shared block storage, or unknown
should_warn = True
else:
# Specifically not shared block storage
should_warn = False
else:
# Unknown, so warn to be safe
should_warn = True
if should_warn:
LOG.warning(_LW('Live migration with clients before RPC version '
'3.32 is known to be buggy with shared block '
'storage. See '
'https://bugs.launchpad.net/nova/+bug/1250751 for '
'more information!'))
def check_can_live_migrate_destination(self, ctxt, instance, destination,
block_migration, disk_over_commit):
if self.client.can_send_version('3.32'):
version = '3.32'
else:
version = '3.0'
self._warn_buggy_live_migrations()
cctxt = self.client.prepare(server=destination, version=version)
return cctxt.call(ctxt, 'check_can_live_migrate_destination',
instance=instance,
block_migration=block_migration,
disk_over_commit=disk_over_commit)
def check_can_live_migrate_source(self, ctxt, instance, dest_check_data):
if self.client.can_send_version('3.32'):
version = '3.32'
else:
version = '3.0'
self._warn_buggy_live_migrations()
source = _compute_host(None, instance)
cctxt = self.client.prepare(server=source, version=version)
return cctxt.call(ctxt, 'check_can_live_migrate_source',
instance=instance,
dest_check_data=dest_check_data)
def check_instance_shared_storage(self, ctxt, instance, data, host=None):
if self.client.can_send_version('3.29'):
version = '3.29'
else:
version = '3.0'
instance = jsonutils.to_primitive(instance)
cctxt = self.client.prepare(server=_compute_host(host, instance),
version=version)
return cctxt.call(ctxt, 'check_instance_shared_storage',
instance=instance,
data=data)
def confirm_resize(self, ctxt, instance, migration, host,
reservations=None, cast=True):
version = '3.0'
cctxt = self.client.prepare(server=_compute_host(host, instance),
version=version)
rpc_method = cctxt.cast if cast else cctxt.call
return rpc_method(ctxt, 'confirm_resize',
instance=instance, migration=migration,
reservations=reservations)
def detach_interface(self, ctxt, instance, port_id):
version = '3.17'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'detach_interface',
instance=instance, port_id=port_id)
def detach_volume(self, ctxt, instance, volume_id):
if self.client.can_send_version('3.25'):
version = '3.25'
else:
version = '3.0'
instance = jsonutils.to_primitive(instance)
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'detach_volume',
instance=instance, volume_id=volume_id)
def finish_resize(self, ctxt, instance, migration, image, disk_info,
host, reservations=None):
version = '3.0'
cctxt = self.client.prepare(server=host, version=version)
cctxt.cast(ctxt, 'finish_resize',
instance=instance, migration=migration,
image=image, disk_info=disk_info, reservations=reservations)
def finish_revert_resize(self, ctxt, instance, migration, host,
reservations=None):
version = '3.0'
cctxt = self.client.prepare(server=host, version=version)
cctxt.cast(ctxt, 'finish_revert_resize',
instance=instance, migration=migration,
reservations=reservations)
def get_console_output(self, ctxt, instance, tail_length):
if self.client.can_send_version('3.28'):
version = '3.28'
else:
version = '3.0'
instance = jsonutils.to_primitive(instance)
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
return cctxt.call(ctxt, 'get_console_output',
instance=instance, tail_length=tail_length)
def get_console_pool_info(self, ctxt, console_type, host):
version = '3.0'
cctxt = self.client.prepare(server=host, version=version)
return cctxt.call(ctxt, 'get_console_pool_info',
console_type=console_type)
def get_console_topic(self, ctxt, host):
version = '3.0'
cctxt = self.client.prepare(server=host, version=version)
return cctxt.call(ctxt, 'get_console_topic')
def get_diagnostics(self, ctxt, instance):
version = '3.18'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
return cctxt.call(ctxt, 'get_diagnostics', instance=instance)
def get_instance_diagnostics(self, ctxt, instance):
instance_p = jsonutils.to_primitive(instance)
kwargs = {'instance': instance_p}
version = '3.31'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
return cctxt.call(ctxt, 'get_instance_diagnostics', **kwargs)
def get_vnc_console(self, ctxt, instance, console_type):
version = '3.2'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
return cctxt.call(ctxt, 'get_vnc_console',
instance=instance, console_type=console_type)
def get_spice_console(self, ctxt, instance, console_type):
version = '3.1'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
return cctxt.call(ctxt, 'get_spice_console',
instance=instance, console_type=console_type)
def get_rdp_console(self, ctxt, instance, console_type):
version = '3.10'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
return cctxt.call(ctxt, 'get_rdp_console',
instance=instance, console_type=console_type)
def get_serial_console(self, ctxt, instance, console_type):
version = '3.34'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
return cctxt.call(ctxt, 'get_serial_console',
instance=instance, console_type=console_type)
def validate_console_port(self, ctxt, instance, port, console_type):
version = '3.3'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
return cctxt.call(ctxt, 'validate_console_port',
instance=instance, port=port,
console_type=console_type)
def host_maintenance_mode(self, ctxt, host_param, mode, host):
'''Set host maintenance mode
:param ctxt: request context
:param host_param: This value is placed in the message to be the 'host'
parameter for the remote method.
:param mode:
:param host: This is the host to send the message to.
'''
version = '3.0'
cctxt = self.client.prepare(server=host, version=version)
return cctxt.call(ctxt, 'host_maintenance_mode',
host=host_param, mode=mode)
def host_power_action(self, ctxt, action, host):
version = '3.0'
cctxt = self.client.prepare(server=host, version=version)
return cctxt.call(ctxt, 'host_power_action', action=action)
def inject_network_info(self, ctxt, instance):
version = '3.0'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'inject_network_info', instance=instance)
def live_migration(self, ctxt, instance, dest, block_migration, host,
migrate_data=None):
if self.client.can_send_version('3.26'):
version = '3.26'
else:
version = '3.0'
instance = jsonutils.to_primitive(instance)
cctxt = self.client.prepare(server=host, version=version)
cctxt.cast(ctxt, 'live_migration', instance=instance,
dest=dest, block_migration=block_migration,
migrate_data=migrate_data)
def pause_instance(self, ctxt, instance):
version = '3.0'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'pause_instance', instance=instance)
def post_live_migration_at_destination(self, ctxt, instance,
block_migration, host):
version = '3.14'
cctxt = self.client.prepare(server=host, version=version)
cctxt.cast(ctxt, 'post_live_migration_at_destination',
instance=instance, block_migration=block_migration)
def pre_live_migration(self, ctxt, instance, block_migration, disk,
host, migrate_data=None):
version = '3.19'
cctxt = self.client.prepare(server=host, version=version)
return cctxt.call(ctxt, 'pre_live_migration',
instance=instance,
block_migration=block_migration,
disk=disk, migrate_data=migrate_data)
def prep_resize(self, ctxt, image, instance, instance_type, host,
reservations=None, request_spec=None,
filter_properties=None, node=None,
clean_shutdown=True):
instance_type_p = jsonutils.to_primitive(instance_type)
image_p = jsonutils.to_primitive(image)
msg_args = {'instance': instance,
'instance_type': instance_type_p,
'image': image_p,
'reservations': reservations,
'request_spec': request_spec,
'filter_properties': filter_properties,
'node': node,
'clean_shutdown': clean_shutdown}
version = '3.38'
if not self.client.can_send_version(version):
del msg_args['clean_shutdown']
version = '3.0'
cctxt = self.client.prepare(server=host, version=version)
cctxt.cast(ctxt, 'prep_resize', **msg_args)
def reboot_instance(self, ctxt, instance, block_device_info,
reboot_type):
version = '3.0'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'reboot_instance',
instance=instance,
block_device_info=block_device_info,
reboot_type=reboot_type)
def rebuild_instance(self, ctxt, instance, new_pass, injected_files,
image_ref, orig_image_ref, orig_sys_metadata, bdms,
recreate=False, on_shared_storage=False, host=None,
preserve_ephemeral=False, kwargs=None):
# NOTE(danms): kwargs is only here for cells compatibility, don't
# actually send it to compute
extra = {'preserve_ephemeral': preserve_ephemeral}
version = '3.21'
cctxt = self.client.prepare(server=_compute_host(host, instance),
version=version)
cctxt.cast(ctxt, 'rebuild_instance',
instance=instance, new_pass=new_pass,
injected_files=injected_files, image_ref=image_ref,
orig_image_ref=orig_image_ref,
orig_sys_metadata=orig_sys_metadata, bdms=bdms,
recreate=recreate, on_shared_storage=on_shared_storage,
**extra)
def refresh_provider_fw_rules(self, ctxt, host):
version = '3.0'
cctxt = self.client.prepare(server=host, version=version)
cctxt.cast(ctxt, 'refresh_provider_fw_rules')
def remove_aggregate_host(self, ctxt, aggregate, host_param, host,
slave_info=None):
'''Remove aggregate host.
:param ctxt: request context
:param aggregate:
:param host_param: This value is placed in the message to be the 'host'
parameter for the remote method.
:param host: This is the host to send the message to.
'''
version = '3.0'
cctxt = self.client.prepare(server=host, version=version)
cctxt.cast(ctxt, 'remove_aggregate_host',
aggregate=aggregate, host=host_param,
slave_info=slave_info)
def remove_fixed_ip_from_instance(self, ctxt, instance, address):
version = '3.13'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'remove_fixed_ip_from_instance',
instance=instance, address=address)
def remove_volume_connection(self, ctxt, instance, volume_id, host):
if self.client.can_send_version('3.30'):
version = '3.30'
else:
version = '3.0'
instance = jsonutils.to_primitive(instance)
cctxt = self.client.prepare(server=host, version=version)
return cctxt.call(ctxt, 'remove_volume_connection',
instance=instance, volume_id=volume_id)
def rescue_instance(self, ctxt, instance, rescue_password,
rescue_image_ref=None, clean_shutdown=True):
msg_args = {'rescue_password': rescue_password}
if self.client.can_send_version('3.37'):
version = '3.37'
msg_args['clean_shutdown'] = clean_shutdown
msg_args['rescue_image_ref'] = rescue_image_ref
elif self.client.can_send_version('3.24'):
version = '3.24'
msg_args['rescue_image_ref'] = rescue_image_ref
else:
version = '3.9'
msg_args['instance'] = instance
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'rescue_instance', **msg_args)
def reset_network(self, ctxt, instance):
version = '3.0'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'reset_network', instance=instance)
def resize_instance(self, ctxt, instance, migration, image, instance_type,
reservations=None, clean_shutdown=True):
instance_type_p = jsonutils.to_primitive(instance_type)
msg_args = {'instance': instance, 'migration': migration,
'image': image, 'reservations': reservations,
'instance_type': instance_type_p}
if self.client.can_send_version('3.37'):
version = '3.37'
msg_args['clean_shutdown'] = clean_shutdown
else:
version = '3.0'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'resize_instance', **msg_args)
def resume_instance(self, ctxt, instance):
version = '3.0'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'resume_instance', instance=instance)
def revert_resize(self, ctxt, instance, migration, host,
reservations=None):
version = '3.0'
cctxt = self.client.prepare(server=_compute_host(host, instance),
version=version)
cctxt.cast(ctxt, 'revert_resize',
instance=instance, migration=migration,
reservations=reservations)
def rollback_live_migration_at_destination(self, ctxt, instance, host,
destroy_disks=True,
migrate_data=None):
if self.client.can_send_version('3.32'):
version = '3.32'
extra = {'destroy_disks': destroy_disks,
'migrate_data': migrate_data,
}
else:
version = '3.0'
extra = {}
self._warn_buggy_live_migrations(migrate_data)
cctxt = self.client.prepare(server=host, version=version)
cctxt.cast(ctxt, 'rollback_live_migration_at_destination',
instance=instance, **extra)
# NOTE(alaski): Remove this method when the scheduler rpc interface is
# bumped to 4.x as the only callers of this method will be removed.
def run_instance(self, ctxt, instance, host, request_spec,
filter_properties, requested_networks,
injected_files, admin_password,
is_first_time, node=None, legacy_bdm_in_spec=True):
if self.client.can_send_version('3.27'):
version = '3.27'
else:
version = '3.0'
instance = jsonutils.to_primitive(instance)
msg_kwargs = {'instance': instance, 'request_spec': request_spec,
'filter_properties': filter_properties,
'requested_networks': requested_networks,
'injected_files': injected_files,
'admin_password': admin_password,
'is_first_time': is_first_time, 'node': node,
'legacy_bdm_in_spec': legacy_bdm_in_spec}
cctxt = self.client.prepare(server=host, version=version)
cctxt.cast(ctxt, 'run_instance', **msg_kwargs)
def set_admin_password(self, ctxt, instance, new_pass):
version = '3.8'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
return cctxt.call(ctxt, 'set_admin_password',
instance=instance, new_pass=new_pass)
def set_host_enabled(self, ctxt, enabled, host):
version = '3.0'
cctxt = self.client.prepare(server=host, version=version)
return cctxt.call(ctxt, 'set_host_enabled', enabled=enabled)
def swap_volume(self, ctxt, instance, old_volume_id, new_volume_id):
version = '3.0'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'swap_volume',
instance=instance, old_volume_id=old_volume_id,
new_volume_id=new_volume_id)
def get_host_uptime(self, ctxt, host):
version = '3.0'
cctxt = self.client.prepare(server=host, version=version)
return cctxt.call(ctxt, 'get_host_uptime')
def reserve_block_device_name(self, ctxt, instance, device, volume_id,
disk_bus=None, device_type=None):
kw = {'instance': instance, 'device': device,
'volume_id': volume_id, 'disk_bus': disk_bus,
'device_type': device_type, 'return_bdm_object': True}
if self.client.can_send_version('3.35'):
version = '3.35'
else:
del kw['return_bdm_object']
version = '3.16'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
volume_bdm = cctxt.call(ctxt, 'reserve_block_device_name', **kw)
if not isinstance(volume_bdm, objects.BlockDeviceMapping):
volume_bdm = objects.BlockDeviceMapping.get_by_volume_id(
ctxt, volume_id)
return volume_bdm
def backup_instance(self, ctxt, instance, image_id, backup_type,
rotation):
version = '3.0'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'backup_instance',
instance=instance,
image_id=image_id,
backup_type=backup_type,
rotation=rotation)
def snapshot_instance(self, ctxt, instance, image_id):
version = '3.0'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'snapshot_instance',
instance=instance,
image_id=image_id)
def start_instance(self, ctxt, instance):
version = '3.0'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'start_instance', instance=instance)
def stop_instance(self, ctxt, instance, do_cast=True, clean_shutdown=True):
msg_args = {'instance': instance}
if self.client.can_send_version('3.37'):
version = '3.37'
msg_args['clean_shutdown'] = clean_shutdown
else:
version = '3.0'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
rpc_method = cctxt.cast if do_cast else cctxt.call
return rpc_method(ctxt, 'stop_instance', **msg_args)
def suspend_instance(self, ctxt, instance):
version = '3.0'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'suspend_instance', instance=instance)
def terminate_instance(self, ctxt, instance, bdms, reservations=None):
version = '3.22'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'terminate_instance',
instance=instance, bdms=bdms,
reservations=reservations)
def unpause_instance(self, ctxt, instance):
version = '3.0'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'unpause_instance', instance=instance)
def unrescue_instance(self, ctxt, instance):
version = '3.11'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'unrescue_instance', instance=instance)
def soft_delete_instance(self, ctxt, instance, reservations=None):
version = '3.0'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'soft_delete_instance',
instance=instance, reservations=reservations)
def restore_instance(self, ctxt, instance):
version = '3.20'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'restore_instance', instance=instance)
def shelve_instance(self, ctxt, instance, image_id=None,
clean_shutdown=True):
msg_args = {'instance': instance, 'image_id': image_id}
if self.client.can_send_version('3.37'):
version = '3.37'
msg_args['clean_shutdown'] = clean_shutdown
else:
version = '3.0'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'shelve_instance', **msg_args)
def shelve_offload_instance(self, ctxt, instance,
clean_shutdown=True):
msg_args = {'instance': instance}
if self.client.can_send_version('3.37'):
version = '3.37'
msg_args['clean_shutdown'] = clean_shutdown
else:
version = '3.0'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'shelve_offload_instance', **msg_args)
def unshelve_instance(self, ctxt, instance, host, image=None,
filter_properties=None, node=None):
version = '3.15'
msg_kwargs = {
'instance': instance,
'image': image,
'filter_properties': filter_properties,
'node': node,
}
cctxt = self.client.prepare(server=host, version=version)
cctxt.cast(ctxt, 'unshelve_instance', **msg_kwargs)
def volume_snapshot_create(self, ctxt, instance, volume_id,
create_info):
version = '3.6'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'volume_snapshot_create', instance=instance,
volume_id=volume_id, create_info=create_info)
def volume_snapshot_delete(self, ctxt, instance, volume_id, snapshot_id,
delete_info):
version = '3.6'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'volume_snapshot_delete', instance=instance,
volume_id=volume_id, snapshot_id=snapshot_id,
delete_info=delete_info)
def external_instance_event(self, ctxt, instances, events):
cctxt = self.client.prepare(
server=_compute_host(None, instances[0]),
version='3.23')
cctxt.cast(ctxt, 'external_instance_event', instances=instances,
events=events)
def build_and_run_instance(self, ctxt, instance, host, image, request_spec,
filter_properties, admin_password=None, injected_files=None,
requested_networks=None, security_groups=None,
block_device_mapping=None, node=None, limits=None):
version = '3.40'
if not self.client.can_send_version(version):
version = '3.36'
if 'numa_topology' in limits and limits['numa_topology']:
topology_limits = limits['numa_topology']
if node is not None:
cnode = objects.ComputeNode.get_by_host_and_nodename(
ctxt, host, node)
else:
cnode = (
objects.ComputeNode.
get_first_node_by_host_for_old_compat(
ctxt, host))
host_topology = objects.NUMATopology.obj_from_db_obj(
cnode.numa_topology)
limits['numa_topology'] = jsonutils.dumps(
topology_limits.to_dict_legacy(host_topology))
if not self.client.can_send_version(version):
version = '3.33'
if 'instance_type' in filter_properties:
flavor = filter_properties['instance_type']
flavor_p = objects_base.obj_to_primitive(flavor)
filter_properties = dict(filter_properties,
instance_type=flavor_p)
if not self.client.can_send_version(version):
version = '3.23'
if requested_networks is not None:
if utils.is_neutron():
requested_networks = [(network_id, address, port_id)
for (network_id, address, port_id, _) in
requested_networks.as_tuples()]
else:
requested_networks = [(network_id, address)
for (network_id, address) in
requested_networks.as_tuples()]
cctxt = self.client.prepare(server=host, version=version)
cctxt.cast(ctxt, 'build_and_run_instance', instance=instance,
image=image, request_spec=request_spec,
filter_properties=filter_properties,
admin_password=admin_password,
injected_files=injected_files,
requested_networks=requested_networks,
security_groups=security_groups,
block_device_mapping=block_device_mapping, node=node,
limits=limits)
def quiesce_instance(self, ctxt, instance):
version = '3.39'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
return cctxt.call(ctxt, 'quiesce_instance', instance=instance)
def unquiesce_instance(self, ctxt, instance, mapping=None):
version = '3.39'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'unquiesce_instance', instance=instance,
mapping=mapping)
class SecurityGroupAPI(object):
'''Client side of the security group rpc API.
API version history:
1.0 - Initial version.
1.41 - Adds refresh_instance_security_rules()
2.0 - Remove 1.x backwards compat
3.0 - Identical to 2.x, but has to be bumped at the same time as the
compute API since it's all together on the server side.
'''
def __init__(self):
super(SecurityGroupAPI, self).__init__()
target = messaging.Target(topic=CONF.compute_topic, version='3.0')
version_cap = ComputeAPI.VERSION_ALIASES.get(
CONF.upgrade_levels.compute, CONF.upgrade_levels.compute)
self.client = rpc.get_client(target, version_cap)
def refresh_security_group_rules(self, ctxt, security_group_id, host):
version = '3.0'
cctxt = self.client.prepare(server=host, version=version)
cctxt.cast(ctxt, 'refresh_security_group_rules',
security_group_id=security_group_id)
def refresh_security_group_members(self, ctxt, security_group_id,
host):
version = '3.0'
cctxt = self.client.prepare(server=host, version=version)
cctxt.cast(ctxt, 'refresh_security_group_members',
security_group_id=security_group_id)
def refresh_instance_security_rules(self, ctxt, host, instance):
version = '3.0'
instance_p = jsonutils.to_primitive(instance)
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'refresh_instance_security_rules',
instance=instance_p)
|
|
# Copyright 2013 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import socket
import ssl
import urllib2
import mock
from oslo.config import cfg
import testtools
import webob
import webob.exc
from neutron.api.v2 import attributes
from neutron.common import constants
from neutron.common import exceptions as exception
from neutron.tests import base
from neutron import wsgi
CONF = cfg.CONF
TEST_VAR_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__),
'..', 'var'))
def open_no_proxy(*args, **kwargs):
# NOTE(jamespage):
# Deal with more secure certification chain verficiation
# introduced in python 2.7.9 under PEP-0476
# https://github.com/python/peps/blob/master/pep-0476.txt
if hasattr(ssl, "_create_unverified_context"):
opener = urllib2.build_opener(
urllib2.ProxyHandler({}),
urllib2.HTTPSHandler(context=ssl._create_unverified_context())
)
else:
opener = urllib2.build_opener(urllib2.ProxyHandler({}))
return opener.open(*args, **kwargs)
class TestWSGIServer(base.BaseTestCase):
"""WSGI server tests."""
def test_start_random_port(self):
server = wsgi.Server("test_random_port")
server.start(None, 0, host="127.0.0.1")
self.assertNotEqual(0, server.port)
server.stop()
server.wait()
@mock.patch('neutron.openstack.common.service.ProcessLauncher')
def test_start_multiple_workers(self, ProcessLauncher):
launcher = ProcessLauncher.return_value
server = wsgi.Server("test_multiple_processes")
server.start(None, 0, host="127.0.0.1", workers=2)
launcher.launch_service.assert_called_once_with(mock.ANY, workers=2)
server.stop()
launcher.stop.assert_called_once_with()
server.wait()
launcher.wait.assert_called_once_with()
def test_start_random_port_with_ipv6(self):
server = wsgi.Server("test_random_port")
server.start(None, 0, host="::1")
self.assertEqual("::1", server.host)
self.assertNotEqual(0, server.port)
server.stop()
server.wait()
def test_ipv6_listen_called_with_scope(self):
server = wsgi.Server("test_app")
with mock.patch.object(wsgi.eventlet, 'listen') as mock_listen:
with mock.patch.object(socket, 'getaddrinfo') as mock_get_addr:
mock_get_addr.return_value = [
(socket.AF_INET6,
socket.SOCK_STREAM,
socket.IPPROTO_TCP,
'',
('fe80::204:acff:fe96:da87%eth0', 1234, 0, 2))
]
with mock.patch.object(server, 'pool') as mock_pool:
server.start(None,
1234,
host="fe80::204:acff:fe96:da87%eth0")
mock_get_addr.assert_called_once_with(
"fe80::204:acff:fe96:da87%eth0",
1234,
socket.AF_UNSPEC,
socket.SOCK_STREAM
)
mock_listen.assert_called_once_with(
('fe80::204:acff:fe96:da87%eth0', 1234, 0, 2),
family=socket.AF_INET6,
backlog=cfg.CONF.backlog
)
mock_pool.spawn.assert_has_calls([
mock.call(
server._run,
None,
mock_listen.return_value)
])
def test_app(self):
greetings = 'Hello, World!!!'
def hello_world(env, start_response):
if env['PATH_INFO'] != '/':
start_response('404 Not Found',
[('Content-Type', 'text/plain')])
return ['Not Found\r\n']
start_response('200 OK', [('Content-Type', 'text/plain')])
return [greetings]
server = wsgi.Server("test_app")
server.start(hello_world, 0, host="127.0.0.1")
response = open_no_proxy('http://127.0.0.1:%d/' % server.port)
self.assertEqual(greetings, response.read())
server.stop()
class SerializerTest(base.BaseTestCase):
def test_serialize_unknown_content_type(self):
"""Verify that exception InvalidContentType is raised."""
input_dict = {'servers': {'test': 'pass'}}
content_type = 'application/unknown'
serializer = wsgi.Serializer()
self.assertRaises(
exception.InvalidContentType, serializer.serialize,
input_dict, content_type)
def test_get_deserialize_handler_unknown_content_type(self):
"""Verify that exception InvalidContentType is raised."""
content_type = 'application/unknown'
serializer = wsgi.Serializer()
self.assertRaises(
exception.InvalidContentType,
serializer.get_deserialize_handler, content_type)
def test_serialize_content_type_json(self):
"""Test serialize with content type json."""
input_data = {'servers': ['test=pass']}
content_type = 'application/json'
serializer = wsgi.Serializer(default_xmlns="fake")
result = serializer.serialize(input_data, content_type)
self.assertEqual('{"servers": ["test=pass"]}', result)
def test_serialize_content_type_xml(self):
"""Test serialize with content type xml."""
input_data = {'servers': ['test=pass']}
content_type = 'application/xml'
serializer = wsgi.Serializer(default_xmlns="fake")
result = serializer.serialize(input_data, content_type)
expected = (
'<?xml version=\'1.0\''
' encoding=\'UTF-8\'?>\n'
'<servers xmlns="http://openstack.org/quantum/api/v2.0" '
'xmlns:quantum="http://openstack.org/quantum/api/v2.0" '
'xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">'
'<server>test=pass</server></servers>'
)
self.assertEqual(expected, result)
def test_deserialize_raise_bad_request(self):
"""Test serialize verifies that exception is raises."""
content_type = 'application/unknown'
data_string = 'test'
serializer = wsgi.Serializer(default_xmlns="fake")
self.assertRaises(
webob.exc.HTTPBadRequest,
serializer.deserialize, data_string, content_type)
def test_deserialize_json_content_type(self):
"""Test Serializer.deserialize with content type json."""
content_type = 'application/json'
data_string = '{"servers": ["test=pass"]}'
serializer = wsgi.Serializer(default_xmlns="fake")
result = serializer.deserialize(data_string, content_type)
self.assertEqual({'body': {u'servers': [u'test=pass']}}, result)
def test_deserialize_xml_content_type(self):
"""Test deserialize with content type xml."""
content_type = 'application/xml'
data_string = (
'<servers xmlns="fake">'
'<server>test=pass</server>'
'</servers>'
)
serializer = wsgi.Serializer(
default_xmlns="fake", metadata={'xmlns': 'fake'})
result = serializer.deserialize(data_string, content_type)
expected = {'body': {'servers': {'server': 'test=pass'}}}
self.assertEqual(expected, result)
def test_deserialize_xml_content_type_with_meta(self):
"""Test deserialize with content type xml with meta."""
content_type = 'application/xml'
data_string = (
'<servers>'
'<server name="s1">'
'<test test="a">passed</test>'
'</server>'
'</servers>'
)
metadata = {'plurals': {'servers': 'server'}, 'xmlns': 'fake'}
serializer = wsgi.Serializer(
default_xmlns="fake", metadata=metadata)
result = serializer.deserialize(data_string, content_type)
expected = {'body': {'servers': [{'name': 's1', 'test': 'passed'}]}}
self.assertEqual(expected, result)
def test_serialize_xml_root_key_is_dict(self):
"""Test Serializer.serialize with content type xml with meta dict."""
content_type = 'application/xml'
data = {'servers': {'network': (2, 3)}}
metadata = {'xmlns': 'fake'}
serializer = wsgi.Serializer(default_xmlns="fake", metadata=metadata)
result = serializer.serialize(data, content_type)
result = result.replace('\n', '')
expected = (
'<?xml version=\'1.0\' encoding=\'UTF-8\'?>'
'<servers xmlns="fake" xmlns:quantum="fake" '
'xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">'
'<network>(2, 3)</network></servers>'
)
self.assertEqual(result, expected)
def test_serialize_xml_root_key_is_list(self):
"""Test serialize with content type xml with meta list."""
input_dict = {'servers': ['test=pass']}
content_type = 'application/xml'
metadata = {'application/xml': {
'xmlns': 'fake'}}
serializer = wsgi.Serializer(default_xmlns="fake", metadata=metadata)
result = serializer.serialize(input_dict, content_type)
result = result.replace('\n', '').replace(' ', '')
expected = (
'<?xmlversion=\'1.0\''
'encoding=\'UTF-8\'?>'
'<serversxmlns="http://openstack.org/quantum/api/v2.0"'
'xmlns:quantum="http://openstack.org/quantum/api/v2.0"'
'xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">'
'<server>test=pass</server></servers>'
)
self.assertEqual(result, expected)
def test_serialize_xml_root_is_None(self):
input_dict = {'test': 'pass'}
content_type = 'application/xml'
serializer = wsgi.Serializer(default_xmlns="fake")
result = serializer.serialize(input_dict, content_type)
result = result.replace('\n', '').replace(' ', '')
expected = (
'<?xmlversion=\'1.0\''
'encoding=\'UTF-8\'?>'
'<testxmlns="http://openstack.org/quantum/api/v2.0"'
'xmlns:quantum="http://openstack.org/quantum/api/v2.0"'
'xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">'
'pass</test>'
)
self.assertEqual(result, expected)
class RequestDeserializerTest(testtools.TestCase):
def setUp(self):
super(RequestDeserializerTest, self).setUp()
class JSONDeserializer(object):
def deserialize(self, data, action='default'):
return 'pew_json'
class XMLDeserializer(object):
def deserialize(self, data, action='default'):
return 'pew_xml'
self.body_deserializers = {
'application/json': JSONDeserializer(),
'application/xml': XMLDeserializer()}
self.deserializer = wsgi.RequestDeserializer(self.body_deserializers)
def test_get_deserializer(self):
"""Test RequestDeserializer.get_body_deserializer."""
expected_json_serializer = self.deserializer.get_body_deserializer(
'application/json')
expected_xml_serializer = self.deserializer.get_body_deserializer(
'application/xml')
self.assertEqual(
expected_json_serializer,
self.body_deserializers['application/json'])
self.assertEqual(
expected_xml_serializer,
self.body_deserializers['application/xml'])
def test_get_expected_content_type(self):
"""Test RequestDeserializer.get_expected_content_type."""
request = wsgi.Request.blank('/')
request.headers['Accept'] = 'application/json'
self.assertEqual(
self.deserializer.get_expected_content_type(request),
'application/json')
def test_get_action_args(self):
"""Test RequestDeserializer.get_action_args."""
env = {
'wsgiorg.routing_args': [None, {
'controller': None,
'format': None,
'action': 'update',
'id': 12}]}
expected = {'action': 'update', 'id': 12}
self.assertEqual(
self.deserializer.get_action_args(env), expected)
def test_deserialize(self):
"""Test RequestDeserializer.deserialize."""
with mock.patch.object(
self.deserializer, 'get_action_args') as mock_method:
mock_method.return_value = {'action': 'create'}
request = wsgi.Request.blank('/')
request.headers['Accept'] = 'application/xml'
deserialized = self.deserializer.deserialize(request)
expected = ('create', {}, 'application/xml')
self.assertEqual(expected, deserialized)
def test_get_body_deserializer_unknown_content_type(self):
"""Verify that exception InvalidContentType is raised."""
content_type = 'application/unknown'
deserializer = wsgi.RequestDeserializer()
self.assertRaises(
exception.InvalidContentType,
deserializer.get_body_deserializer, content_type)
class ResponseSerializerTest(testtools.TestCase):
def setUp(self):
super(ResponseSerializerTest, self).setUp()
class JSONSerializer(object):
def serialize(self, data, action='default'):
return 'pew_json'
class XMLSerializer(object):
def serialize(self, data, action='default'):
return 'pew_xml'
class HeadersSerializer(object):
def serialize(self, response, data, action):
response.status_int = 404
self.body_serializers = {
'application/json': JSONSerializer(),
'application/xml': XMLSerializer()}
self.serializer = wsgi.ResponseSerializer(
self.body_serializers, HeadersSerializer())
def test_serialize_unknown_content_type(self):
"""Verify that exception InvalidContentType is raised."""
self.assertRaises(
exception.InvalidContentType,
self.serializer.serialize,
{}, 'application/unknown')
def test_get_body_serializer(self):
"""Verify that exception InvalidContentType is raised."""
self.assertRaises(
exception.InvalidContentType,
self.serializer.get_body_serializer, 'application/unknown')
def test_get_serializer(self):
"""Test ResponseSerializer.get_body_serializer."""
content_type = 'application/json'
self.assertEqual(
self.serializer.get_body_serializer(content_type),
self.body_serializers[content_type])
def test_serialize_json_response(self):
response = self.serializer.serialize({}, 'application/json')
self.assertEqual(response.headers['Content-Type'], 'application/json')
self.assertEqual(response.body, 'pew_json')
self.assertEqual(response.status_int, 404)
def test_serialize_xml_response(self):
response = self.serializer.serialize({}, 'application/xml')
self.assertEqual(response.headers['Content-Type'], 'application/xml')
self.assertEqual(response.body, 'pew_xml')
self.assertEqual(response.status_int, 404)
def test_serialize_response_None(self):
response = self.serializer.serialize(
None, 'application/json')
self.assertEqual(response.headers['Content-Type'], 'application/json')
self.assertEqual(response.body, '')
self.assertEqual(response.status_int, 404)
class RequestTest(base.BaseTestCase):
def test_content_type_missing(self):
request = wsgi.Request.blank('/tests/123', method='POST')
request.body = "<body />"
self.assertIsNone(request.get_content_type())
def test_content_type_unsupported(self):
request = wsgi.Request.blank('/tests/123', method='POST')
request.headers["Content-Type"] = "text/html"
request.body = "fake<br />"
self.assertIsNone(request.get_content_type())
def test_content_type_with_charset(self):
request = wsgi.Request.blank('/tests/123')
request.headers["Content-Type"] = "application/json; charset=UTF-8"
result = request.get_content_type()
self.assertEqual(result, "application/json")
def test_content_type_with_given_content_types(self):
request = wsgi.Request.blank('/tests/123')
request.headers["Content-Type"] = "application/new-type;"
self.assertIsNone(request.get_content_type())
def test_content_type_from_accept(self):
request = wsgi.Request.blank('/tests/123')
request.headers["Accept"] = "application/xml"
result = request.best_match_content_type()
self.assertEqual(result, "application/xml")
request = wsgi.Request.blank('/tests/123')
request.headers["Accept"] = "application/json"
result = request.best_match_content_type()
self.assertEqual(result, "application/json")
request = wsgi.Request.blank('/tests/123')
request.headers["Accept"] = "application/xml, application/json"
result = request.best_match_content_type()
self.assertEqual(result, "application/json")
request = wsgi.Request.blank('/tests/123')
request.headers["Accept"] = ("application/json; q=0.3, "
"application/xml; q=0.9")
result = request.best_match_content_type()
self.assertEqual(result, "application/xml")
def test_content_type_from_query_extension(self):
request = wsgi.Request.blank('/tests/123.xml')
result = request.best_match_content_type()
self.assertEqual(result, "application/xml")
request = wsgi.Request.blank('/tests/123.json')
result = request.best_match_content_type()
self.assertEqual(result, "application/json")
request = wsgi.Request.blank('/tests/123.invalid')
result = request.best_match_content_type()
self.assertEqual(result, "application/json")
def test_content_type_accept_and_query_extension(self):
request = wsgi.Request.blank('/tests/123.xml')
request.headers["Accept"] = "application/json"
result = request.best_match_content_type()
self.assertEqual(result, "application/xml")
def test_content_type_accept_default(self):
request = wsgi.Request.blank('/tests/123.unsupported')
request.headers["Accept"] = "application/unsupported1"
result = request.best_match_content_type()
self.assertEqual(result, "application/json")
def test_content_type_accept_with_given_content_types(self):
request = wsgi.Request.blank('/tests/123')
request.headers["Accept"] = "application/new_type"
result = request.best_match_content_type()
self.assertEqual(result, 'application/json')
class ActionDispatcherTest(base.BaseTestCase):
def test_dispatch(self):
"""Test ActionDispatcher.dispatch."""
serializer = wsgi.ActionDispatcher()
serializer.create = lambda x: x
self.assertEqual(
serializer.dispatch('pants', action='create'),
'pants')
def test_dispatch_action_None(self):
"""Test ActionDispatcher.dispatch with none action."""
serializer = wsgi.ActionDispatcher()
serializer.create = lambda x: x + ' pants'
serializer.default = lambda x: x + ' trousers'
self.assertEqual(
serializer.dispatch('Two', action=None),
'Two trousers')
def test_dispatch_default(self):
serializer = wsgi.ActionDispatcher()
serializer.create = lambda x: x + ' pants'
serializer.default = lambda x: x + ' trousers'
self.assertEqual(
serializer.dispatch('Two', action='update'),
'Two trousers')
class ResponseHeadersSerializerTest(base.BaseTestCase):
def test_default(self):
serializer = wsgi.ResponseHeaderSerializer()
response = webob.Response()
serializer.serialize(response, {'v': '123'}, 'fake')
self.assertEqual(response.status_int, 200)
def test_custom(self):
class Serializer(wsgi.ResponseHeaderSerializer):
def update(self, response, data):
response.status_int = 404
response.headers['X-Custom-Header'] = data['v']
serializer = Serializer()
response = webob.Response()
serializer.serialize(response, {'v': '123'}, 'update')
self.assertEqual(response.status_int, 404)
self.assertEqual(response.headers['X-Custom-Header'], '123')
class DictSerializerTest(base.BaseTestCase):
def test_dispatch_default(self):
serializer = wsgi.DictSerializer()
self.assertEqual(
serializer.serialize({}, 'NonExistentAction'), '')
class JSONDictSerializerTest(base.BaseTestCase):
def test_json(self):
input_dict = dict(servers=dict(a=(2, 3)))
expected_json = '{"servers":{"a":[2,3]}}'
serializer = wsgi.JSONDictSerializer()
result = serializer.serialize(input_dict)
result = result.replace('\n', '').replace(' ', '')
self.assertEqual(result, expected_json)
def test_json_with_utf8(self):
input_dict = dict(servers=dict(a=(2, '\xe7\xbd\x91\xe7\xbb\x9c')))
expected_json = '{"servers":{"a":[2,"\\u7f51\\u7edc"]}}'
serializer = wsgi.JSONDictSerializer()
result = serializer.serialize(input_dict)
result = result.replace('\n', '').replace(' ', '')
self.assertEqual(result, expected_json)
def test_json_with_unicode(self):
input_dict = dict(servers=dict(a=(2, u'\u7f51\u7edc')))
expected_json = '{"servers":{"a":[2,"\\u7f51\\u7edc"]}}'
serializer = wsgi.JSONDictSerializer()
result = serializer.serialize(input_dict)
result = result.replace('\n', '').replace(' ', '')
self.assertEqual(result, expected_json)
class TextDeserializerTest(base.BaseTestCase):
def test_dispatch_default(self):
deserializer = wsgi.TextDeserializer()
self.assertEqual(
deserializer.deserialize({}, 'update'), {})
class JSONDeserializerTest(base.BaseTestCase):
def test_json(self):
data = """{"a": {
"a1": "1",
"a2": "2",
"bs": ["1", "2", "3", {"c": {"c1": "1"}}],
"d": {"e": "1"},
"f": "1"}}"""
as_dict = {
'body': {
'a': {
'a1': '1',
'a2': '2',
'bs': ['1', '2', '3', {'c': {'c1': '1'}}],
'd': {'e': '1'},
'f': '1'}}}
deserializer = wsgi.JSONDeserializer()
self.assertEqual(
deserializer.deserialize(data), as_dict)
def test_default_raise_Malformed_Exception(self):
"""Test JsonDeserializer.default.
Test verifies JsonDeserializer.default raises exception
MalformedRequestBody correctly.
"""
data_string = ""
deserializer = wsgi.JSONDeserializer()
self.assertRaises(
exception.MalformedRequestBody, deserializer.default, data_string)
def test_json_with_utf8(self):
data = '{"a": "\xe7\xbd\x91\xe7\xbb\x9c"}'
as_dict = {'body': {'a': u'\u7f51\u7edc'}}
deserializer = wsgi.JSONDeserializer()
self.assertEqual(
deserializer.deserialize(data), as_dict)
def test_json_with_unicode(self):
data = '{"a": "\u7f51\u7edc"}'
as_dict = {'body': {'a': u'\u7f51\u7edc'}}
deserializer = wsgi.JSONDeserializer()
self.assertEqual(
deserializer.deserialize(data), as_dict)
class XMLDeserializerTest(base.BaseTestCase):
def test_xml_empty(self):
xml = '<a></a>'
as_dict = {'body': {'a': ''}}
deserializer = wsgi.XMLDeserializer()
self.assertEqual(
deserializer.deserialize(xml), as_dict)
def test_initialization(self):
xml = '<a><b>test</b></a>'
deserializer = wsgi.XMLDeserializer()
self.assertEqual(
{'body': {u'a': {u'b': u'test'}}}, deserializer(xml))
def test_default_raise_Malformed_Exception(self):
"""Verify that exception MalformedRequestBody is raised."""
data_string = ""
deserializer = wsgi.XMLDeserializer()
self.assertRaises(
exception.MalformedRequestBody, deserializer.default, data_string)
def test_xml_with_utf8(self):
xml = '<a>\xe7\xbd\x91\xe7\xbb\x9c</a>'
as_dict = {'body': {'a': u'\u7f51\u7edc'}}
deserializer = wsgi.XMLDeserializer()
self.assertEqual(
deserializer.deserialize(xml), as_dict)
class RequestHeadersDeserializerTest(base.BaseTestCase):
def test_default(self):
deserializer = wsgi.RequestHeadersDeserializer()
req = wsgi.Request.blank('/')
self.assertEqual(
deserializer.deserialize(req, 'nonExistent'), {})
def test_custom(self):
class Deserializer(wsgi.RequestHeadersDeserializer):
def update(self, request):
return {'a': request.headers['X-Custom-Header']}
deserializer = Deserializer()
req = wsgi.Request.blank('/')
req.headers['X-Custom-Header'] = 'b'
self.assertEqual(
deserializer.deserialize(req, 'update'), {'a': 'b'})
class ResourceTest(base.BaseTestCase):
def test_dispatch(self):
class Controller(object):
def index(self, request, index=None):
return index
def my_fault_body_function():
return 'off'
resource = wsgi.Resource(Controller(), my_fault_body_function)
actual = resource.dispatch(
resource.controller, 'index', action_args={'index': 'off'})
expected = 'off'
self.assertEqual(actual, expected)
def test_dispatch_unknown_controller_action(self):
class Controller(object):
def index(self, request, pants=None):
return pants
def my_fault_body_function():
return 'off'
resource = wsgi.Resource(Controller(), my_fault_body_function)
self.assertRaises(
AttributeError, resource.dispatch,
resource.controller, 'create', {})
def test_malformed_request_body_throws_bad_request(self):
def my_fault_body_function():
return 'off'
resource = wsgi.Resource(None, my_fault_body_function)
request = wsgi.Request.blank(
"/", body="{mal:formed", method='POST',
headers={'Content-Type': "application/json"})
response = resource(request)
self.assertEqual(response.status_int, 400)
def test_wrong_content_type_throws_unsupported_media_type_error(self):
def my_fault_body_function():
return 'off'
resource = wsgi.Resource(None, my_fault_body_function)
request = wsgi.Request.blank(
"/", body="{some:json}", method='POST',
headers={'Content-Type': "xxx"})
response = resource(request)
self.assertEqual(response.status_int, 400)
def test_wrong_content_type_server_error(self):
def my_fault_body_function():
return 'off'
resource = wsgi.Resource(None, my_fault_body_function)
request = wsgi.Request.blank(
"/", method='POST', headers={'Content-Type': "unknow"})
response = resource(request)
self.assertEqual(response.status_int, 500)
def test_call_resource_class_bad_request(self):
class Controller(object):
def index(self, request, index=None):
return index
def my_fault_body_function():
return 'off'
class FakeRequest():
def __init__(self):
self.url = 'http://where.no'
self.environ = 'environ'
self.body = 'body'
def method(self):
pass
def best_match_content_type(self):
return 'best_match_content_type'
resource = wsgi.Resource(Controller(), my_fault_body_function)
request = FakeRequest()
result = resource(request)
self.assertEqual(400, result.status_int)
def test_type_error(self):
class Controller(object):
def index(self, request, index=None):
return index
def my_fault_body_function():
return 'off'
resource = wsgi.Resource(Controller(), my_fault_body_function)
request = wsgi.Request.blank(
"/", method='POST', headers={'Content-Type': "xml"})
response = resource.dispatch(
request, action='index', action_args='test')
self.assertEqual(400, response.status_int)
def test_call_resource_class_internal_error(self):
class Controller(object):
def index(self, request, index=None):
return index
def my_fault_body_function():
return 'off'
class FakeRequest():
def __init__(self):
self.url = 'http://where.no'
self.environ = 'environ'
self.body = '{"Content-Type": "xml"}'
def method(self):
pass
def best_match_content_type(self):
return 'application/json'
resource = wsgi.Resource(Controller(), my_fault_body_function)
request = FakeRequest()
result = resource(request)
self.assertEqual(500, result.status_int)
class MiddlewareTest(base.BaseTestCase):
def test_process_response(self):
def application(environ, start_response):
response = 'Success'
return response
response = application('test', 'fake')
result = wsgi.Middleware(application).process_response(response)
self.assertEqual('Success', result)
class FaultTest(base.BaseTestCase):
def test_call_fault(self):
class MyException(object):
status_int = 415
explanation = 'test'
my_exceptions = MyException()
my_fault = wsgi.Fault(exception=my_exceptions)
request = wsgi.Request.blank(
"/", method='POST', headers={'Content-Type': "unknow"})
response = my_fault(request)
self.assertEqual(415, response.status_int)
class XMLDictSerializerTest(base.BaseTestCase):
def test_xml(self):
NETWORK = {'network': {'test': None,
'tenant_id': 'test-tenant',
'name': 'net1',
'admin_state_up': True,
'subnets': [],
'dict': {},
'int': 3,
'long': 4L,
'float': 5.0,
'prefix:external': True,
'tests': [{'test1': 'value1'},
{'test2': 2, 'test3': 3}]}}
# XML is:
# <network xmlns="http://openstack.org/quantum/api/v2.0"
# xmlns:prefix="http://xxxx.yy.com"
# xmlns:quantum="http://openstack.org/quantum/api/v2.0"
# xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
# <subnets quantum:type="list" /> # Empty List
# <int quantum:type="int">3</int> # Integer text
# <int quantum:type="long">4</int> # Long text
# <int quantum:type="float">5.0</int> # Float text
# <dict quantum:type="dict" /> # Empty Dict
# <name>net1</name>
# <admin_state_up quantum:type="bool">True</admin_state_up> # Bool
# <test xsi:nil="true" /> # None
# <tenant_id>test-tenant</tenant_id>
# # We must have a namespace defined in root for prefix:external
# <prefix:external quantum:type="bool">True</prefix:external>
# <tests> # List
# <test><test1>value1</test1></test>
# <test><test3 quantum:type="int">3</test3>
# <test2 quantum:type="int">2</test2>
# </test></tests>
# </network>
metadata = attributes.get_attr_metadata()
ns = {'prefix': 'http://xxxx.yy.com'}
metadata[constants.EXT_NS] = ns
metadata['plurals'] = {'tests': 'test'}
serializer = wsgi.XMLDictSerializer(metadata)
result = serializer.serialize(NETWORK)
deserializer = wsgi.XMLDeserializer(metadata)
new_net = deserializer.deserialize(result)['body']
self.assertEqual(NETWORK, new_net)
def test_None(self):
data = None
# Since it is None, we use xsi:nil='true'.
# In addition, we use an
# virtual XML root _v_root to wrap the XML doc.
# XML is:
# <_v_root xsi:nil="true"
# xmlns="http://openstack.org/quantum/api/v2.0"
# xmlns:quantum="http://openstack.org/quantum/api/v2.0"
# xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" />
serializer = wsgi.XMLDictSerializer(attributes.get_attr_metadata())
result = serializer.serialize(data)
deserializer = wsgi.XMLDeserializer(attributes.get_attr_metadata())
new_data = deserializer.deserialize(result)['body']
self.assertIsNone(new_data)
def test_empty_dic_xml(self):
data = {}
# Since it is an empty dict, we use quantum:type='dict' and
# an empty XML element to represent it. In addition, we use an
# virtual XML root _v_root to wrap the XML doc.
# XML is:
# <_v_root quantum:type="dict"
# xmlns="http://openstack.org/quantum/api/v2.0"
# xmlns:quantum="http://openstack.org/quantum/api/v2.0"
# xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" />
serializer = wsgi.XMLDictSerializer(attributes.get_attr_metadata())
result = serializer.serialize(data)
deserializer = wsgi.XMLDeserializer(attributes.get_attr_metadata())
new_data = deserializer.deserialize(result)['body']
self.assertEqual(data, new_data)
def test_non_root_one_item_dic_xml(self):
data = {'test1': 1}
# We have a key in this dict, and its value is an integer.
# XML is:
# <test1 quantum:type="int"
# xmlns="http://openstack.org/quantum/api/v2.0"
# xmlns:quantum="http://openstack.org/quantum/api/v2.0"
# xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
# 1</test1>
serializer = wsgi.XMLDictSerializer(attributes.get_attr_metadata())
result = serializer.serialize(data)
deserializer = wsgi.XMLDeserializer(attributes.get_attr_metadata())
new_data = deserializer.deserialize(result)['body']
self.assertEqual(data, new_data)
def test_non_root_two_items_dic_xml(self):
data = {'test1': 1, 'test2': '2'}
# We have no root element in this data, We will use a virtual
# root element _v_root to wrap the doct.
# The XML is:
# <_v_root xmlns="http://openstack.org/quantum/api/v2.0"
# xmlns:quantum="http://openstack.org/quantum/api/v2.0"
# xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
# <test1 quantum:type="int">1</test1><test2>2</test2>
# </_v_root>
serializer = wsgi.XMLDictSerializer(attributes.get_attr_metadata())
result = serializer.serialize(data)
deserializer = wsgi.XMLDeserializer(attributes.get_attr_metadata())
new_data = deserializer.deserialize(result)['body']
self.assertEqual(data, new_data)
def test_xml_root_key_is_list(self):
input_dict = {'servers': ['test-pass']}
serializer = wsgi.XMLDictSerializer(xmlns="fake")
result = serializer.default(input_dict)
result = result.replace('\n', '').replace(' ', '')
expected = (
'<?xmlversion=\'1.0\'encoding=\'UTF-8\'?>'
'<serversxmlns="fake"xmlns:quantum="fake"'
'xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">'
'<server>test-pass</server></servers>'
)
self.assertEqual(result, expected)
def test_xml_meta_contains_node_name_list(self):
input_dict = {'servers': ['test-pass']}
servers = {'nodename': 'test',
'item_name': 'test',
'item_key': 'test'}
metadata = {'list_collections': {'servers': servers}}
serializer = wsgi.XMLDictSerializer(xmlns="fake", metadata=metadata)
result = serializer.default(input_dict)
result = result.replace('\n', '').replace(' ', '')
expected = (
'<?xmlversion=\'1.0\'encoding=\'UTF-8\'?>'
'<serversxmlns="fake"xmlns:quantum="fake"'
'xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">'
'<server>test-pass</server></servers>'
)
self.assertEqual(result, expected)
def test_xml_meta_contains_node_name_dict(self):
input_dict = {'servers': {'a': {'2': '3'}}}
servers = {'servers': {
'nodename': 'test',
'item_name': 'test',
'item_key': 'test'}}
metadata = {'dict_collections': servers}
serializer = wsgi.XMLDictSerializer(xmlns="fake", metadata=metadata)
result = serializer.default(input_dict)
result = result.replace('\n', '').replace(' ', '')
expected = (
'<?xmlversion=\'1.0\'encoding=\'UTF-8\'?>'
'<serversxmlns="fake"xmlns:quantum="fake"'
'xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">'
'<a><2>3</2></a></servers>'
)
self.assertEqual(result, expected)
def test_call(self):
data = {'servers': {'a': {'2': '3'}}}
serializer = wsgi.XMLDictSerializer()
expected = (
'<?xmlversion=\'1.0\'encoding=\'UTF-8\'?>'
'<serversxmlns="http://openstack.org/quantum/api/v2.0"'
'xmlns:quantum="http://openstack.org/quantum/api/v2.0"'
'xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">'
'<a><2>3</2></a></servers>'
)
result = serializer(data)
result = result.replace('\n', '').replace(' ', '')
self.assertEqual(expected, result)
def test_xml_with_utf8(self):
data = {'servers': '\xe7\xbd\x91\xe7\xbb\x9c'}
serializer = wsgi.XMLDictSerializer()
expected = (
'<?xmlversion=\'1.0\'encoding=\'UTF-8\'?>'
'<serversxmlns="http://openstack.org/quantum/api/v2.0"'
'xmlns:quantum="http://openstack.org/quantum/api/v2.0"'
'xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">'
'\xe7\xbd\x91\xe7\xbb\x9c</servers>'
)
result = serializer(data)
result = result.replace('\n', '').replace(' ', '')
self.assertEqual(expected, result)
def test_xml_with_unicode(self):
data = {'servers': u'\u7f51\u7edc'}
serializer = wsgi.XMLDictSerializer()
expected = (
'<?xmlversion=\'1.0\'encoding=\'UTF-8\'?>'
'<serversxmlns="http://openstack.org/quantum/api/v2.0"'
'xmlns:quantum="http://openstack.org/quantum/api/v2.0"'
'xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">'
'\xe7\xbd\x91\xe7\xbb\x9c</servers>'
)
result = serializer(data)
result = result.replace('\n', '').replace(' ', '')
self.assertEqual(expected, result)
class TestWSGIServerWithSSL(base.BaseTestCase):
"""WSGI server tests."""
def test_app_using_ssl(self):
CONF.set_default('use_ssl', True)
CONF.set_default("ssl_cert_file",
os.path.join(TEST_VAR_DIR, 'certificate.crt'))
CONF.set_default("ssl_key_file",
os.path.join(TEST_VAR_DIR, 'privatekey.key'))
greetings = 'Hello, World!!!'
@webob.dec.wsgify
def hello_world(req):
return greetings
server = wsgi.Server("test_app")
server.start(hello_world, 0, host="127.0.0.1")
response = open_no_proxy('https://127.0.0.1:%d/' % server.port)
self.assertEqual(greetings, response.read())
server.stop()
def test_app_using_ssl_combined_cert_and_key(self):
CONF.set_default('use_ssl', True)
CONF.set_default("ssl_cert_file",
os.path.join(TEST_VAR_DIR, 'certandkey.pem'))
greetings = 'Hello, World!!!'
@webob.dec.wsgify
def hello_world(req):
return greetings
server = wsgi.Server("test_app")
server.start(hello_world, 0, host="127.0.0.1")
response = open_no_proxy('https://127.0.0.1:%d/' % server.port)
self.assertEqual(greetings, response.read())
server.stop()
def test_app_using_ipv6_and_ssl(self):
CONF.set_default('use_ssl', True)
CONF.set_default("ssl_cert_file",
os.path.join(TEST_VAR_DIR, 'certificate.crt'))
CONF.set_default("ssl_key_file",
os.path.join(TEST_VAR_DIR, 'privatekey.key'))
greetings = 'Hello, World!!!'
@webob.dec.wsgify
def hello_world(req):
return greetings
server = wsgi.Server("test_app")
server.start(hello_world, 0, host="::1")
response = open_no_proxy('https://[::1]:%d/' % server.port)
self.assertEqual(greetings, response.read())
server.stop()
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
import logging
from datetime import datetime
from io import BytesIO
from typing import Any, Dict, List, Optional
from zipfile import ZipFile
from flask import g, request, Response, send_file
from flask_appbuilder.api import expose, protect, rison, safe
from flask_appbuilder.models.sqla.interface import SQLAInterface
from marshmallow import ValidationError
from sqlalchemy.exc import NoSuchTableError, OperationalError, SQLAlchemyError
from superset import app, event_logger
from superset.commands.importers.exceptions import NoValidFilesFoundError
from superset.commands.importers.v1.utils import get_contents_from_bundle
from superset.constants import MODEL_API_RW_METHOD_PERMISSION_MAP, RouteMethod
from superset.databases.commands.create import CreateDatabaseCommand
from superset.databases.commands.delete import DeleteDatabaseCommand
from superset.databases.commands.exceptions import (
DatabaseConnectionFailedError,
DatabaseCreateFailedError,
DatabaseDeleteDatasetsExistFailedError,
DatabaseDeleteFailedError,
DatabaseInvalidError,
DatabaseNotFoundError,
DatabaseUpdateFailedError,
InvalidParametersError,
)
from superset.databases.commands.export import ExportDatabasesCommand
from superset.databases.commands.importers.dispatcher import ImportDatabasesCommand
from superset.databases.commands.test_connection import TestConnectionDatabaseCommand
from superset.databases.commands.update import UpdateDatabaseCommand
from superset.databases.commands.validate import ValidateDatabaseParametersCommand
from superset.databases.dao import DatabaseDAO
from superset.databases.decorators import check_datasource_access
from superset.databases.filters import DatabaseFilter
from superset.databases.schemas import (
database_schemas_query_schema,
DatabaseFunctionNamesResponse,
DatabasePostSchema,
DatabasePutSchema,
DatabaseRelatedObjectsResponse,
DatabaseTestConnectionSchema,
DatabaseValidateParametersSchema,
get_export_ids_schema,
SchemasResponseSchema,
SelectStarResponseSchema,
TableMetadataResponseSchema,
)
from superset.databases.utils import get_table_metadata
from superset.db_engine_specs import get_available_engine_specs
from superset.errors import ErrorLevel, SupersetError, SupersetErrorType
from superset.exceptions import InvalidPayloadFormatError
from superset.extensions import security_manager
from superset.models.core import Database
from superset.typing import FlaskResponse
from superset.utils.core import error_msg_from_exception
from superset.views.base_api import BaseSupersetModelRestApi, statsd_metrics
logger = logging.getLogger(__name__)
class DatabaseRestApi(BaseSupersetModelRestApi):
datamodel = SQLAInterface(Database)
include_route_methods = RouteMethod.REST_MODEL_VIEW_CRUD_SET | {
RouteMethod.EXPORT,
RouteMethod.IMPORT,
"table_metadata",
"select_star",
"schemas",
"test_connection",
"related_objects",
"function_names",
"available",
"validate_parameters",
}
resource_name = "database"
class_permission_name = "Database"
method_permission_name = MODEL_API_RW_METHOD_PERMISSION_MAP
allow_browser_login = True
base_filters = [["id", DatabaseFilter, lambda: []]]
show_columns = [
"id",
"database_name",
"cache_timeout",
"expose_in_sqllab",
"allow_run_async",
"allow_csv_upload",
"configuration_method",
"allow_ctas",
"allow_cvas",
"allow_dml",
"backend",
"force_ctas_schema",
"allow_multi_schema_metadata_fetch",
"impersonate_user",
"encrypted_extra",
"extra",
"parameters",
"server_cert",
"sqlalchemy_uri",
]
list_columns = [
"allow_csv_upload",
"allow_ctas",
"allow_cvas",
"allow_dml",
"allow_multi_schema_metadata_fetch",
"allow_run_async",
"allows_cost_estimate",
"allows_subquery",
"allows_virtual_table_explore",
"backend",
"changed_on",
"changed_on_delta_humanized",
"created_by.first_name",
"created_by.last_name",
"database_name",
"explore_database_id",
"expose_in_sqllab",
"extra",
"force_ctas_schema",
"id",
]
add_columns = [
"database_name",
"sqlalchemy_uri",
"cache_timeout",
"expose_in_sqllab",
"allow_run_async",
"allow_csv_upload",
"allow_ctas",
"allow_cvas",
"allow_dml",
"configuration_method",
"force_ctas_schema",
"impersonate_user",
"allow_multi_schema_metadata_fetch",
"extra",
"encrypted_extra",
"server_cert",
]
edit_columns = add_columns
list_select_columns = list_columns + ["extra", "sqlalchemy_uri", "password"]
order_columns = [
"allow_csv_upload",
"allow_dml",
"allow_run_async",
"changed_on",
"changed_on_delta_humanized",
"created_by.first_name",
"database_name",
"expose_in_sqllab",
]
# Removes the local limit for the page size
max_page_size = -1
add_model_schema = DatabasePostSchema()
edit_model_schema = DatabasePutSchema()
apispec_parameter_schemas = {
"database_schemas_query_schema": database_schemas_query_schema,
"get_export_ids_schema": get_export_ids_schema,
}
openapi_spec_tag = "Database"
openapi_spec_component_schemas = (
DatabaseFunctionNamesResponse,
DatabaseRelatedObjectsResponse,
DatabaseTestConnectionSchema,
DatabaseValidateParametersSchema,
TableMetadataResponseSchema,
SelectStarResponseSchema,
SchemasResponseSchema,
)
@expose("/", methods=["POST"])
@protect()
@safe
@statsd_metrics
@event_logger.log_this_with_context(
action=lambda self, *args, **kwargs: f"{self.__class__.__name__}.post",
log_to_statsd=False,
)
def post(self) -> Response:
"""Creates a new Database
---
post:
description: >-
Create a new Database.
requestBody:
description: Database schema
required: true
content:
application/json:
schema:
$ref: '#/components/schemas/{{self.__class__.__name__}}.post'
responses:
201:
description: Database added
content:
application/json:
schema:
type: object
properties:
id:
type: number
result:
$ref: '#/components/schemas/{{self.__class__.__name__}}.post'
302:
description: Redirects to the current digest
400:
$ref: '#/components/responses/400'
401:
$ref: '#/components/responses/401'
404:
$ref: '#/components/responses/404'
500:
$ref: '#/components/responses/500'
"""
if not request.is_json:
return self.response_400(message="Request is not JSON")
try:
item = self.add_model_schema.load(request.json)
# This validates custom Schema with custom validations
except ValidationError as error:
return self.response_400(message=error.messages)
try:
new_model = CreateDatabaseCommand(g.user, item).run()
# Return censored version for sqlalchemy URI
item["sqlalchemy_uri"] = new_model.sqlalchemy_uri
item["expose_in_sqllab"] = new_model.expose_in_sqllab
# If parameters are available return them in the payload
if new_model.parameters:
item["parameters"] = new_model.parameters
return self.response(201, id=new_model.id, result=item)
except DatabaseInvalidError as ex:
return self.response_422(message=ex.normalized_messages())
except DatabaseConnectionFailedError as ex:
return self.response_422(message=str(ex))
except DatabaseCreateFailedError as ex:
logger.error(
"Error creating model %s: %s",
self.__class__.__name__,
str(ex),
exc_info=True,
)
return self.response_422(message=str(ex))
@expose("/<int:pk>", methods=["PUT"])
@protect()
@safe
@statsd_metrics
@event_logger.log_this_with_context(
action=lambda self, *args, **kwargs: f"{self.__class__.__name__}.put",
log_to_statsd=False,
)
def put( # pylint: disable=too-many-return-statements, arguments-differ
self, pk: int
) -> Response:
"""Changes a Database
---
put:
description: >-
Changes a Database.
parameters:
- in: path
schema:
type: integer
name: pk
requestBody:
description: Database schema
required: true
content:
application/json:
schema:
$ref: '#/components/schemas/{{self.__class__.__name__}}.put'
responses:
200:
description: Database changed
content:
application/json:
schema:
type: object
properties:
id:
type: number
result:
$ref: '#/components/schemas/{{self.__class__.__name__}}.put'
400:
$ref: '#/components/responses/400'
401:
$ref: '#/components/responses/401'
403:
$ref: '#/components/responses/403'
404:
$ref: '#/components/responses/404'
422:
$ref: '#/components/responses/422'
500:
$ref: '#/components/responses/500'
"""
if not request.is_json:
return self.response_400(message="Request is not JSON")
try:
item = self.edit_model_schema.load(request.json)
# This validates custom Schema with custom validations
except ValidationError as error:
return self.response_400(message=error.messages)
try:
changed_model = UpdateDatabaseCommand(g.user, pk, item).run()
# Return censored version for sqlalchemy URI
item["sqlalchemy_uri"] = changed_model.sqlalchemy_uri
return self.response(200, id=changed_model.id, result=item)
except DatabaseNotFoundError:
return self.response_404()
except DatabaseInvalidError as ex:
return self.response_422(message=ex.normalized_messages())
except DatabaseConnectionFailedError as ex:
return self.response_422(message=str(ex))
except DatabaseUpdateFailedError as ex:
logger.error(
"Error updating model %s: %s",
self.__class__.__name__,
str(ex),
exc_info=True,
)
return self.response_422(message=str(ex))
@expose("/<int:pk>", methods=["DELETE"])
@protect()
@safe
@statsd_metrics
@event_logger.log_this_with_context(
action=lambda self, *args, **kwargs: f"{self.__class__.__name__}" f".delete",
log_to_statsd=False,
)
def delete(self, pk: int) -> Response: # pylint: disable=arguments-differ
"""Deletes a Database
---
delete:
description: >-
Deletes a Database.
parameters:
- in: path
schema:
type: integer
name: pk
responses:
200:
description: Database deleted
content:
application/json:
schema:
type: object
properties:
message:
type: string
401:
$ref: '#/components/responses/401'
403:
$ref: '#/components/responses/403'
404:
$ref: '#/components/responses/404'
422:
$ref: '#/components/responses/422'
500:
$ref: '#/components/responses/500'
"""
try:
DeleteDatabaseCommand(g.user, pk).run()
return self.response(200, message="OK")
except DatabaseNotFoundError:
return self.response_404()
except DatabaseDeleteDatasetsExistFailedError as ex:
return self.response_422(message=str(ex))
except DatabaseDeleteFailedError as ex:
logger.error(
"Error deleting model %s: %s",
self.__class__.__name__,
str(ex),
exc_info=True,
)
return self.response_422(message=str(ex))
@expose("/<int:pk>/schemas/")
@protect()
@safe
@rison(database_schemas_query_schema)
@statsd_metrics
@event_logger.log_this_with_context(
action=lambda self, *args, **kwargs: f"{self.__class__.__name__}" f".schemas",
log_to_statsd=False,
)
def schemas(self, pk: int, **kwargs: Any) -> FlaskResponse:
"""Get all schemas from a database
---
get:
description: Get all schemas from a database
parameters:
- in: path
schema:
type: integer
name: pk
description: The database id
- in: query
name: q
content:
application/json:
schema:
$ref: '#/components/schemas/database_schemas_query_schema'
responses:
200:
description: A List of all schemas from the database
content:
application/json:
schema:
$ref: "#/components/schemas/SchemasResponseSchema"
400:
$ref: '#/components/responses/400'
401:
$ref: '#/components/responses/401'
404:
$ref: '#/components/responses/404'
500:
$ref: '#/components/responses/500'
"""
database = self.datamodel.get(pk, self._base_filters)
if not database:
return self.response_404()
try:
schemas = database.get_all_schema_names(
cache=database.schema_cache_enabled,
cache_timeout=database.schema_cache_timeout,
force=kwargs["rison"].get("force", False),
)
schemas = security_manager.get_schemas_accessible_by_user(database, schemas)
return self.response(200, result=schemas)
except OperationalError:
return self.response(
500, message="There was an error connecting to the database"
)
@expose("/<int:pk>/table/<table_name>/<schema_name>/", methods=["GET"])
@protect()
@check_datasource_access
@safe
@statsd_metrics
@event_logger.log_this_with_context(
action=lambda self, *args, **kwargs: f"{self.__class__.__name__}"
f".table_metadata",
log_to_statsd=False,
)
def table_metadata(
self, database: Database, table_name: str, schema_name: str
) -> FlaskResponse:
"""Table schema info
---
get:
description: Get database table metadata
parameters:
- in: path
schema:
type: integer
name: pk
description: The database id
- in: path
schema:
type: string
name: table_name
description: Table name
- in: path
schema:
type: string
name: schema_name
description: Table schema
responses:
200:
description: Table metadata information
content:
application/json:
schema:
$ref: "#/components/schemas/TableMetadataResponseSchema"
400:
$ref: '#/components/responses/400'
401:
$ref: '#/components/responses/401'
404:
$ref: '#/components/responses/404'
422:
$ref: '#/components/responses/422'
500:
$ref: '#/components/responses/500'
"""
self.incr_stats("init", self.table_metadata.__name__)
try:
table_info = get_table_metadata(database, table_name, schema_name)
except SQLAlchemyError as ex:
self.incr_stats("error", self.table_metadata.__name__)
return self.response_422(error_msg_from_exception(ex))
self.incr_stats("success", self.table_metadata.__name__)
return self.response(200, **table_info)
@expose("/<int:pk>/select_star/<table_name>/", methods=["GET"])
@expose("/<int:pk>/select_star/<table_name>/<schema_name>/", methods=["GET"])
@protect()
@check_datasource_access
@safe
@statsd_metrics
@event_logger.log_this_with_context(
action=lambda self, *args, **kwargs: f"{self.__class__.__name__}.select_star",
log_to_statsd=False,
)
def select_star(
self, database: Database, table_name: str, schema_name: Optional[str] = None
) -> FlaskResponse:
"""Table schema info
---
get:
description: Get database select star for table
parameters:
- in: path
schema:
type: integer
name: pk
description: The database id
- in: path
schema:
type: string
name: table_name
description: Table name
- in: path
schema:
type: string
name: schema_name
description: Table schema
responses:
200:
description: SQL statement for a select star for table
content:
application/json:
schema:
$ref: "#/components/schemas/SelectStarResponseSchema"
400:
$ref: '#/components/responses/400'
401:
$ref: '#/components/responses/401'
404:
$ref: '#/components/responses/404'
422:
$ref: '#/components/responses/422'
500:
$ref: '#/components/responses/500'
"""
self.incr_stats("init", self.select_star.__name__)
try:
result = database.select_star(
table_name, schema_name, latest_partition=True, show_cols=True
)
except NoSuchTableError:
self.incr_stats("error", self.select_star.__name__)
return self.response(404, message="Table not found on the database")
self.incr_stats("success", self.select_star.__name__)
return self.response(200, result=result)
@expose("/test_connection", methods=["POST"])
@protect()
@statsd_metrics
@event_logger.log_this_with_context(
action=lambda self, *args, **kwargs: f"{self.__class__.__name__}"
f".test_connection",
log_to_statsd=False,
)
def test_connection( # pylint: disable=too-many-return-statements
self,
) -> FlaskResponse:
"""Tests a database connection
---
post:
description: >-
Tests a database connection
requestBody:
description: Database schema
required: true
content:
application/json:
schema:
$ref: "#/components/schemas/DatabaseTestConnectionSchema"
responses:
200:
description: Database Test Connection
content:
application/json:
schema:
type: object
properties:
message:
type: string
400:
$ref: '#/components/responses/400'
422:
$ref: '#/components/responses/422'
500:
$ref: '#/components/responses/500'
"""
if not request.is_json:
return self.response_400(message="Request is not JSON")
try:
item = DatabaseTestConnectionSchema().load(request.json)
# This validates custom Schema with custom validations
except ValidationError as error:
return self.response_400(message=error.messages)
TestConnectionDatabaseCommand(g.user, item).run()
return self.response(200, message="OK")
@expose("/<int:pk>/related_objects/", methods=["GET"])
@protect()
@safe
@statsd_metrics
@event_logger.log_this_with_context(
action=lambda self, *args, **kwargs: f"{self.__class__.__name__}"
f".related_objects",
log_to_statsd=False,
)
def related_objects(self, pk: int) -> Response:
"""Get charts and dashboards count associated to a database
---
get:
description:
Get charts and dashboards count associated to a database
parameters:
- in: path
name: pk
schema:
type: integer
responses:
200:
200:
description: Query result
content:
application/json:
schema:
$ref: "#/components/schemas/DatabaseRelatedObjectsResponse"
401:
$ref: '#/components/responses/401'
404:
$ref: '#/components/responses/404'
500:
$ref: '#/components/responses/500'
"""
database = DatabaseDAO.find_by_id(pk)
if not database:
return self.response_404()
data = DatabaseDAO.get_related_objects(pk)
charts = [
{
"id": chart.id,
"slice_name": chart.slice_name,
"viz_type": chart.viz_type,
}
for chart in data["charts"]
]
dashboards = [
{
"id": dashboard.id,
"json_metadata": dashboard.json_metadata,
"slug": dashboard.slug,
"title": dashboard.dashboard_title,
}
for dashboard in data["dashboards"]
]
return self.response(
200,
charts={"count": len(charts), "result": charts},
dashboards={"count": len(dashboards), "result": dashboards},
)
@expose("/export/", methods=["GET"])
@protect()
@safe
@statsd_metrics
@rison(get_export_ids_schema)
@event_logger.log_this_with_context(
action=lambda self, *args, **kwargs: f"{self.__class__.__name__}.export",
log_to_statsd=False,
)
def export(self, **kwargs: Any) -> Response:
"""Export database(s) with associated datasets
---
get:
description: Download database(s) and associated dataset(s) as a zip file
parameters:
- in: query
name: q
content:
application/json:
schema:
$ref: '#/components/schemas/get_export_ids_schema'
responses:
200:
description: A zip file with database(s) and dataset(s) as YAML
content:
application/zip:
schema:
type: string
format: binary
401:
$ref: '#/components/responses/401'
404:
$ref: '#/components/responses/404'
500:
$ref: '#/components/responses/500'
"""
token = request.args.get("token")
requested_ids = kwargs["rison"]
timestamp = datetime.now().strftime("%Y%m%dT%H%M%S")
root = f"database_export_{timestamp}"
filename = f"{root}.zip"
buf = BytesIO()
with ZipFile(buf, "w") as bundle:
try:
for file_name, file_content in ExportDatabasesCommand(
requested_ids
).run():
with bundle.open(f"{root}/{file_name}", "w") as fp:
fp.write(file_content.encode())
except DatabaseNotFoundError:
return self.response_404()
buf.seek(0)
response = send_file(
buf,
mimetype="application/zip",
as_attachment=True,
attachment_filename=filename,
)
if token:
response.set_cookie(token, "done", max_age=600)
return response
@expose("/import/", methods=["POST"])
@protect()
@statsd_metrics
@event_logger.log_this_with_context(
action=lambda self, *args, **kwargs: f"{self.__class__.__name__}.import_",
log_to_statsd=False,
)
def import_(self) -> Response:
"""Import database(s) with associated datasets
---
post:
requestBody:
required: true
content:
multipart/form-data:
schema:
type: object
properties:
formData:
description: upload file (ZIP)
type: string
format: binary
passwords:
description: JSON map of passwords for each file
type: string
overwrite:
description: overwrite existing databases?
type: boolean
responses:
200:
description: Database import result
content:
application/json:
schema:
type: object
properties:
message:
type: string
400:
$ref: '#/components/responses/400'
401:
$ref: '#/components/responses/401'
422:
$ref: '#/components/responses/422'
500:
$ref: '#/components/responses/500'
"""
upload = request.files.get("formData")
if not upload:
return self.response_400()
with ZipFile(upload) as bundle:
contents = get_contents_from_bundle(bundle)
if not contents:
raise NoValidFilesFoundError()
passwords = (
json.loads(request.form["passwords"])
if "passwords" in request.form
else None
)
overwrite = request.form.get("overwrite") == "true"
command = ImportDatabasesCommand(
contents, passwords=passwords, overwrite=overwrite
)
command.run()
return self.response(200, message="OK")
@expose("/<int:pk>/function_names/", methods=["GET"])
@protect()
@safe
@statsd_metrics
@event_logger.log_this_with_context(
action=lambda self, *args, **kwargs: f"{self.__class__.__name__}"
f".function_names",
log_to_statsd=False,
)
def function_names(self, pk: int) -> Response:
"""Get function names supported by a database
---
get:
description:
Get function names supported by a database
parameters:
- in: path
name: pk
schema:
type: integer
responses:
200:
description: Query result
content:
application/json:
schema:
$ref: "#/components/schemas/DatabaseFunctionNamesResponse"
401:
$ref: '#/components/responses/401'
404:
$ref: '#/components/responses/404'
500:
$ref: '#/components/responses/500'
"""
database = DatabaseDAO.find_by_id(pk)
if not database:
return self.response_404()
return self.response(200, function_names=database.function_names,)
@expose("/available/", methods=["GET"])
@protect()
@statsd_metrics
@event_logger.log_this_with_context(
action=lambda self, *args, **kwargs: f"{self.__class__.__name__}" f".available",
log_to_statsd=False,
)
def available(self) -> Response:
"""Return names of databases currently available
---
get:
description:
Get names of databases currently available
responses:
200:
description: Database names
content:
application/json:
schema:
type: array
items:
type: object
properties:
name:
description: Name of the database
type: string
engine:
description: Name of the SQLAlchemy engine
type: string
available_drivers:
description: Installed drivers for the engine
type: array
items:
type: string
default_driver:
description: Default driver for the engine
type: string
preferred:
description: Is the database preferred?
type: boolean
sqlalchemy_uri_placeholder:
description: Example placeholder for the SQLAlchemy URI
type: string
parameters:
description: JSON schema defining the needed parameters
type: object
400:
$ref: '#/components/responses/400'
500:
$ref: '#/components/responses/500'
"""
preferred_databases: List[str] = app.config.get("PREFERRED_DATABASES", [])
available_databases = []
for engine_spec, drivers in get_available_engine_specs().items():
if not drivers:
continue
payload: Dict[str, Any] = {
"name": engine_spec.engine_name,
"engine": engine_spec.engine,
"available_drivers": sorted(drivers),
"preferred": engine_spec.engine_name in preferred_databases,
}
if hasattr(engine_spec, "default_driver"):
payload["default_driver"] = engine_spec.default_driver # type: ignore
# show configuration parameters for DBs that support it
if (
hasattr(engine_spec, "parameters_json_schema")
and hasattr(engine_spec, "sqlalchemy_uri_placeholder")
and getattr(engine_spec, "default_driver") in drivers
):
payload[
"parameters"
] = engine_spec.parameters_json_schema() # type: ignore
payload[
"sqlalchemy_uri_placeholder"
] = engine_spec.sqlalchemy_uri_placeholder # type: ignore
available_databases.append(payload)
# sort preferred first
response = sorted(
(payload for payload in available_databases if payload["preferred"]),
key=lambda payload: preferred_databases.index(payload["name"]),
)
# add others
response.extend(
sorted(
(
payload
for payload in available_databases
if not payload["preferred"]
),
key=lambda payload: payload["name"],
)
)
return self.response(200, databases=response)
@expose("/validate_parameters", methods=["POST"])
@protect()
@statsd_metrics
@event_logger.log_this_with_context(
action=lambda self, *args, **kwargs: f"{self.__class__.__name__}"
f".validate_parameters",
log_to_statsd=False,
)
def validate_parameters( # pylint: disable=too-many-return-statements
self,
) -> FlaskResponse:
"""validates database connection parameters
---
post:
description: >-
Validates parameters used to connect to a database
requestBody:
description: DB-specific parameters
required: true
content:
application/json:
schema:
$ref: "#/components/schemas/DatabaseValidateParametersSchema"
responses:
200:
description: Database Test Connection
content:
application/json:
schema:
type: object
properties:
message:
type: string
400:
$ref: '#/components/responses/400'
422:
$ref: '#/components/responses/422'
500:
$ref: '#/components/responses/500'
"""
if not request.is_json:
raise InvalidPayloadFormatError("Request is not JSON")
try:
payload = DatabaseValidateParametersSchema().load(request.json)
except ValidationError as error:
errors = [
SupersetError(
message="\n".join(messages),
error_type=SupersetErrorType.INVALID_PAYLOAD_SCHEMA_ERROR,
level=ErrorLevel.ERROR,
extra={"invalid": [attribute]},
)
for attribute, messages in error.messages.items()
]
raise InvalidParametersError(errors)
command = ValidateDatabaseParametersCommand(g.user, payload)
command.run()
return self.response(200, message="OK")
|
|
"""
Utilities
==============
Rate limiter classes.
These are basically callables that when called register that a request was
issued. Depending on how they are configured that may cause a pause or exception
if a rate limit has been exceeded. Obviously it is up to the calling code to ensure
that these callables are invoked with every (successful?) call to the backend
API. (There is probably a better way to hook these into the requests library
directly ... TBD.)
From the Strava docs:
Strava API usage is limited on a per-application basis using a short term,
15 minute, limit and a long term, daily, limit. The default rate limit allows
600 requests every 15 minutes, with up to 30,000 requests per day.
This limit allows applications to make 40 requests per minute for about
half the day.
"""
from __future__ import division, absolute_import, print_function, unicode_literals
import collections
import logging
import time
from datetime import datetime, timedelta
import arrow
from stravalib import exc
def total_seconds(td):
"""Alternative to datetime.timedelta.total_seconds
total_seconds() only available since Python 2.7
https://docs.python.org/2/library/datetime.html#datetime.timedelta.total_seconds
"""
return (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10**6) / 10**6
RequestRate = collections.namedtuple('RequestRate', ['short_usage', 'long_usage', 'short_limit', 'long_limit'])
def get_rates_from_response_headers(headers):
"""
Returns a namedtuple with values for short - and long usage and limit rates found in provided HTTP response headers
:param headers: HTTP response headers
:type headers: dict
:return: namedtuple with request rates or None if no rate-limit headers present in response.
:rtype: Optional[RequestRate]
"""
try:
usage_rates = [int(v) for v in headers['X-RateLimit-Usage'].split(',')]
limit_rates = [int(v) for v in headers['X-RateLimit-Limit'].split(',')]
return RequestRate(short_usage=usage_rates[0], long_usage=usage_rates[1],
short_limit=limit_rates[0], long_limit=limit_rates[1])
except KeyError:
return None
def get_seconds_until_next_quarter(now=None):
"""
Returns the number of seconds until the next quarter of an hour. This is the short-term rate limit used by Strava.
:param now: A (utc) timestamp
:type now: arrow.arrow.Arrow
:return: the number of seconds until the next quarter, as int
"""
if now is None:
now = arrow.utcnow()
return 899 - (now - now.replace(minute=(now.minute // 15) * 15, second=0, microsecond=0)).seconds
def get_seconds_until_next_day(now=None):
"""
Returns the number of seconds until the next day (utc midnight). This is the long-term rate limit used by Strava.
:param now: A (utc) timestamp
:type now: arrow.arrow.Arrow
:return: the number of seconds until next day, as int
"""
if now is None:
now = arrow.utcnow()
return (now.ceil('day') - now).seconds
class XRateLimitRule(object):
def __init__(self, limits, force_limits=False):
"""
:param limits: THe limits structure.
:param force_limits: If False (default), this rule will set/update its limits based on what the Strava API
tells it. If True, the provided limits will be enforced, i.e. ignoring the limits given by the API.
"""
self.log = logging.getLogger('{0.__module__}.{0.__name__}'.format(self.__class__))
self.rate_limits = limits
# should limit args be validated?
self.limit_time_invalid = 0
self.force_limits = force_limits
@property
def limit_timeout(self):
return self.limit_time_invalid
def __call__(self, response_headers):
self._update_usage(response_headers)
for limit in self.rate_limits.values():
self._check_limit_time_invalid(limit)
self._check_limit_rates(limit)
def _update_usage(self, response_headers):
rates = get_rates_from_response_headers(response_headers)
if rates:
self.log.debug("Updating rate-limit limits and usage from headers: {}".format(rates))
self.rate_limits['short']['usage'] = rates.short_usage
self.rate_limits['long']['usage'] = rates.long_usage
if not self.force_limits:
self.rate_limits['short']['limit'] = rates.short_limit
self.rate_limits['long']['limit'] = rates.long_limit
def _check_limit_rates(self, limit):
if limit['usage'] >= limit['limit']:
self.log.debug("Rate limit of {0} reached.".format(limit['limit']))
limit['lastExceeded'] = datetime.now()
self._raise_rate_limit_exception(limit['limit'], limit['time'])
def _check_limit_time_invalid(self, limit):
self.limit_time_invalid = 0
if limit['lastExceeded'] is not None:
delta = (datetime.now() - limit['lastExceeded']).total_seconds()
if delta < limit['time']:
self.limit_time_invalid = limit['time'] - delta
self.log.debug("Rate limit invalid duration {0} seconds."
.format(self.limit_time_invalid))
self._raise_rate_limit_timeout(self.limit_timeout, limit['limit'])
def _raise_rate_limit_exception(self, timeout, limit_rate):
raise exc.RateLimitExceeded("Rate limit of {0} exceeded. "
"Try again in {1} seconds.".format(limit_rate, timeout),
limit=limit_rate, timeout=timeout)
def _raise_rate_limit_timeout(self, timeout, limit_rate):
raise exc.RateLimitTimeout("Rate limit of {0} exceeded. "
"Try again in {1} seconds.".format(limit_rate, timeout),
limit=limit_rate, timeout=timeout)
class SleepingRateLimitRule(object):
"""
A rate limit rule that can be prioritized and can dynamically adapt its limits based on API responses.
Given its priority, it will enforce a variable "cool-down" period after each response. When rate limits
are reached within their period, this limiter will wait until the end of that period. It will NOT raise
any kind of exception in this case.
"""
def __init__(self, priority='high', short_limit=10000, long_limit=1000000, force_limits=False):
"""
Constructs a new SleepingRateLimitRule.
:param priority: The priority for this rule. When 'low', the cool-down period after each request will be such
that the long-term limits will not be exceeded. When 'medium', the cool-down period will be such that the
short-term limits will not be exceeded. When 'high', there will be no cool-down period.
:type priority: str
:param short_limit: (Optional) explicit short-term limit
:type short_limit: int
:param long_limit: (Optional) explicit long-term limit
:type long_limit: int
:param force_limits: If False (default), this rule will set/update its limits based on what the Strava API
tells it. If True, the provided limits will be enforced, i.e. ignoring the limits given by the API.
"""
if priority not in ['low', 'medium', 'high']:
raise ValueError('Invalid priority "{0}", expecting one of "low", "medium" or "high"'.format(priority))
self.log = logging.getLogger('{0.__module__}.{0.__name__}'.format(self.__class__))
self.priority = priority
self.short_limit = short_limit
self.long_limit = long_limit
self.force_limits = force_limits
def _get_wait_time(self, short_usage, long_usage, seconds_until_short_limit, seconds_until_long_limit):
if long_usage >= self.long_limit:
self.log.warning('Long term API rate limit exceeded')
return seconds_until_long_limit
elif short_usage >= self.short_limit:
self.log.warning('Short term API rate limit exceeded')
return seconds_until_short_limit
if self.priority == 'high':
return 0
elif self.priority == 'medium':
return seconds_until_short_limit / (self.short_limit - short_usage)
elif self.priority == 'low':
return seconds_until_long_limit / (self.long_limit - long_usage)
def __call__(self, response_headers):
rates = get_rates_from_response_headers(response_headers)
if rates:
time.sleep(self._get_wait_time(rates.short_usage, rates.long_usage,
get_seconds_until_next_quarter(), get_seconds_until_next_day()))
if not self.force_limits:
self.short_limit = rates.short_limit
self.long_limit = rates.long_limit
class RateLimitRule(object):
def __init__(self, requests, seconds, raise_exc=False):
"""
:param requests: Number of requests for limit.
:param seconds: The number of seconds for that number of requests (may be float)
:param raise_exc: Whether to raise an exception when limit is reached (as opposed to pausing)
"""
self.log = logging.getLogger('{0.__module__}.{0.__name__}'.format(self.__class__))
self.timeframe = timedelta(seconds=seconds)
self.requests = requests
self.tab = collections.deque(maxlen=self.requests)
self.raise_exc = raise_exc
def __call__(self, args):
"""
Register another request is being issued.
Depending on configuration of the rule will pause if rate limit has
been reached, or raise exception, etc.
"""
# First check if the deque is full; that indicates that we'd better check whether
# we need to pause.
if len(self.tab) == self.requests:
# Grab the oldest (leftmost) timestamp and check to see if it is greater than 1 second
delta = datetime.now() - self.tab[0]
if delta < self.timeframe: # Has it been less than configured timeframe since oldest request?
if self.raise_exc:
raise exc.RateLimitExceeded("Rate limit exceeded (can try again in {0})".format(self.timeframe - delta))
else:
# Wait the difference between timeframe and the oldest request.
td = self.timeframe - delta
sleeptime = hasattr(td, 'total_seconds') and td.total_seconds() or total_seconds(td)
self.log.debug("Rate limit triggered; sleeping for {0}".format(sleeptime))
time.sleep(sleeptime)
self.tab.append(datetime.now())
class RateLimiter(object):
def __init__(self):
self.log = logging.getLogger('{0.__module__}.{0.__name__}'.format(self.__class__))
self.rules = []
def __call__(self, args):
"""
Register another request is being issued.
"""
for r in self.rules:
r(args)
class DefaultRateLimiter(RateLimiter):
"""
Implements something similar to the default rate limit for Strava apps.
To do this correctly we would actually need to change our logic to reset
the limit at midnight, etc. Will make this more complex in the future.
Strava API usage is limited on a per-application basis using a short term,
15 minute, limit and a long term, daily, limit. The default rate limit allows
600 requests every 15 minutes, with up to 30,000 requests per day.
"""
def __init__(self):
"""
Strava API usage is limited on a per-application basis using a short term,
15 minute, limit and a long term, daily, limit. The default rate limit
allows 600 requests every 15 minutes, with up to 30,000 requests per day.
This limit allows applications to make 40 requests per minute for about half the day.
"""
super(DefaultRateLimiter, self).__init__()
self.rules.append(XRateLimitRule(
{'short': {'usageFieldIndex': 0, 'usage': 0,
# 60s * 15 = 15 min
'limit': 600, 'time': (60*15),
'lastExceeded': None},
'long': {'usageFieldIndex': 1, 'usage': 0,
# 60s * 60m * 24 = 1 day
'limit': 30000, 'time': (60*60*24),
'lastExceeded': None}}))
# XRateLimitRule used instead of timer based RateLimitRule
# self.rules.append(RateLimitRule(requests=40, seconds=60, raise_exc=False))
# self.rules.append(RateLimitRule(requests=30000, seconds=(3600 * 24), raise_exc=True))
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from flask import current_app, g
from flask_appbuilder.security.sqla import models as sqla_models
from flask_appbuilder.security.sqla.manager import SecurityManager
from sqlalchemy import and_, or_
from airflow import models
from airflow.exceptions import AirflowException
from airflow.utils.log.logging_mixin import LoggingMixin
from airflow.utils.session import provide_session
from airflow.www.utils import CustomSQLAInterface
EXISTING_ROLES = {
'Admin',
'Viewer',
'User',
'Op',
'Public',
}
class AirflowSecurityManager(SecurityManager, LoggingMixin):
###########################################################################
# VIEW MENUS
###########################################################################
# [START security_viewer_vms]
VIEWER_VMS = {
'Airflow',
'DagModelView',
'Browse',
'DAG Runs',
'DagRunModelView',
'Task Instances',
'TaskInstanceModelView',
'SLA Misses',
'SlaMissModelView',
'Jobs',
'JobModelView',
'Logs',
'LogModelView',
'Docs',
'Documentation',
'Github',
'About',
'Version',
'VersionView',
}
# [END security_viewer_vms]
USER_VMS = VIEWER_VMS
# [START security_op_vms]
OP_VMS = {
'Admin',
'Configurations',
'ConfigurationView',
'Connections',
'ConnectionModelView',
'Pools',
'PoolModelView',
'Variables',
'VariableModelView',
'XComs',
'XComModelView',
}
# [END security_op_vms]
###########################################################################
# PERMISSIONS
###########################################################################
# [START security_viewer_perms]
VIEWER_PERMS = {
'menu_access',
'can_index',
'can_list',
'can_show',
'can_chart',
'can_dag_stats',
'can_dag_details',
'can_task_stats',
'can_code',
'can_log',
'can_get_logs_with_metadata',
'can_tries',
'can_graph',
'can_tree',
'can_task',
'can_task_instances',
'can_xcom',
'can_gantt',
'can_landing_times',
'can_duration',
'can_blocked',
'can_rendered',
'can_version',
}
# [END security_viewer_perms]
# [START security_user_perms]
USER_PERMS = {
'can_dagrun_clear',
'can_run',
'can_trigger',
'can_add',
'can_edit',
'can_delete',
'can_paused',
'can_refresh',
'can_success',
'muldelete',
'set_failed',
'set_running',
'set_success',
'clear',
'can_clear',
}
# [END security_user_perms]
# [START security_op_perms]
OP_PERMS = {
'can_conf',
'can_varimport',
}
# [END security_op_perms]
# global view-menu for dag-level access
DAG_VMS = {
'all_dags'
}
WRITE_DAG_PERMS = {
'can_dag_edit',
}
READ_DAG_PERMS = {
'can_dag_read',
}
DAG_PERMS = WRITE_DAG_PERMS | READ_DAG_PERMS
###########################################################################
# DEFAULT ROLE CONFIGURATIONS
###########################################################################
ROLE_CONFIGS = [
{
'role': 'Viewer',
'perms': VIEWER_PERMS | READ_DAG_PERMS,
'vms': VIEWER_VMS | DAG_VMS
},
{
'role': 'User',
'perms': VIEWER_PERMS | USER_PERMS | DAG_PERMS,
'vms': VIEWER_VMS | DAG_VMS | USER_VMS,
},
{
'role': 'Op',
'perms': VIEWER_PERMS | USER_PERMS | OP_PERMS | DAG_PERMS,
'vms': VIEWER_VMS | DAG_VMS | USER_VMS | OP_VMS,
},
]
def __init__(self, appbuilder):
super().__init__(appbuilder)
# Go and fix up the SQLAInterface used from the stock one to our subclass.
# This is needed to support the "hack" where we had to edit
# FieldConverter.conversion_table in place in airflow.www.utils
for attr in dir(self):
if not attr.endswith('view'):
continue
view = getattr(self, attr, None)
if not view or not getattr(view, 'datamodel', None):
continue
view.datamodel = CustomSQLAInterface(view.datamodel.obj)
def init_role(self, role_name, role_vms, role_perms):
"""
Initialize the role with the permissions and related view-menus.
:param role_name:
:param role_vms:
:param role_perms:
:return:
"""
pvms = self.get_session.query(sqla_models.PermissionView).all()
pvms = [p for p in pvms if p.permission and p.view_menu]
role = self.find_role(role_name)
if not role:
role = self.add_role(role_name)
if len(role.permissions) == 0:
self.log.info('Initializing permissions for role:%s in the database.', role_name)
role_pvms = set()
for pvm in pvms:
if pvm.view_menu.name in role_vms and pvm.permission.name in role_perms:
role_pvms.add(pvm)
role.permissions = list(role_pvms)
self.get_session.merge(role)
self.get_session.commit()
else:
self.log.debug('Existing permissions for the role:%s '
'within the database will persist.', role_name)
def delete_role(self, role_name):
"""Delete the given Role
:param role_name: the name of a role in the ab_role table
"""
session = self.get_session
role = session.query(sqla_models.Role)\
.filter(sqla_models.Role.name == role_name)\
.first()
if role:
self.log.info("Deleting role '%s'", role_name)
session.delete(role)
session.commit()
else:
raise AirflowException("Role named '{}' does not exist".format(
role_name))
@staticmethod
def get_user_roles(user=None):
"""
Get all the roles associated with the user.
:param user: the ab_user in FAB model.
:return: a list of roles associated with the user.
"""
if user is None:
user = g.user
if user.is_anonymous:
public_role = current_app.appbuilder.config.get('AUTH_ROLE_PUBLIC')
return [current_app.appbuilder.security_manager.find_role(public_role)] \
if public_role else []
return user.roles
def get_all_permissions_views(self):
"""
Returns a set of tuples with the perm name and view menu name
"""
perms_views = set()
for role in self.get_user_roles():
perms_views.update({(perm_view.permission.name, perm_view.view_menu.name)
for perm_view in role.permissions})
return perms_views
def get_accessible_dag_ids(self, username=None):
"""
Return a set of dags that user has access to(either read or write).
:param username: Name of the user.
:return: A set of dag ids that the user could access.
"""
if not username:
username = g.user
if username.is_anonymous or 'Public' in username.roles:
# return an empty set if the role is public
return set()
roles = {role.name for role in username.roles}
if {'Admin', 'Viewer', 'User', 'Op'} & roles:
return self.DAG_VMS
user_perms_views = self.get_all_permissions_views()
# return a set of all dags that the user could access
return {view for perm, view in user_perms_views if perm in self.DAG_PERMS}
def has_access(self, permission, view_name, user=None) -> bool:
"""
Verify whether a given user could perform certain permission
(e.g can_read, can_write) on the given dag_id.
:param permission: permission on dag_id(e.g can_read, can_edit).
:type permission: str
:param view_name: name of view-menu(e.g dag id is a view-menu as well).
:type view_name: str
:param user: user name
:type user: str
:return: a bool whether user could perform certain permission on the dag_id.
:rtype bool
"""
if not user:
user = g.user
if user.is_anonymous:
return self.is_item_public(permission, view_name)
return self._has_view_access(user, permission, view_name)
def _get_and_cache_perms(self):
"""
Cache permissions-views
"""
self.perms = self.get_all_permissions_views()
def _has_role(self, role_name_or_list):
"""
Whether the user has this role name
"""
if not isinstance(role_name_or_list, list):
role_name_or_list = [role_name_or_list]
return any(
[r.name in role_name_or_list for r in self.get_user_roles()])
def _has_perm(self, permission_name, view_menu_name):
"""
Whether the user has this perm
"""
if hasattr(self, 'perms'):
if (permission_name, view_menu_name) in self.perms:
return True
# rebuild the permissions set
self._get_and_cache_perms()
return (permission_name, view_menu_name) in self.perms
def has_all_dags_access(self):
"""
Has all the dag access in any of the 3 cases:
1. Role needs to be in (Admin, Viewer, User, Op).
2. Has can_dag_read permission on all_dags view.
3. Has can_dag_edit permission on all_dags view.
"""
return (
self._has_role(['Admin', 'Viewer', 'Op', 'User']) or
self._has_perm('can_dag_read', 'all_dags') or
self._has_perm('can_dag_edit', 'all_dags'))
def clean_perms(self):
"""
FAB leaves faulty permissions that need to be cleaned up
"""
self.log.debug('Cleaning faulty perms')
sesh = self.get_session
pvms = (
sesh.query(sqla_models.PermissionView)
.filter(or_(
sqla_models.PermissionView.permission == None, # noqa pylint: disable=singleton-comparison
sqla_models.PermissionView.view_menu == None, # noqa pylint: disable=singleton-comparison
))
)
# Since FAB doesn't define ON DELETE CASCADE on these tables, we need
# to delete the _object_ so that SQLA knows to delete the many-to-many
# relationship object too. :(
deleted_count = 0
for pvm in pvms:
sesh.delete(pvm)
deleted_count += 1
sesh.commit()
if deleted_count:
self.log.info('Deleted %s faulty permissions', deleted_count)
def _merge_perm(self, permission_name, view_menu_name):
"""
Add the new permission , view_menu to ab_permission_view_role if not exists.
It will add the related entry to ab_permission
and ab_view_menu two meta tables as well.
:param permission_name: Name of the permission.
:type permission_name: str
:param view_menu_name: Name of the view-menu
:type view_menu_name: str
:return:
"""
permission = self.find_permission(permission_name)
view_menu = self.find_view_menu(view_menu_name)
pv = None
if permission and view_menu:
pv = self.get_session.query(self.permissionview_model).filter_by(
permission=permission, view_menu=view_menu).first()
if not pv and permission_name and view_menu_name:
self.add_permission_view_menu(permission_name, view_menu_name)
@provide_session
def create_custom_dag_permission_view(self, session=None):
"""
Workflow:
1. Fetch all the existing (permissions, view-menu) from Airflow DB.
2. Fetch all the existing dag models that are either active or paused.
3. Create both read and write permission view-menus relation for every dags from step 2
4. Find out all the dag specific roles(excluded pubic, admin, viewer, op, user)
5. Get all the permission-vm owned by the user role.
6. Grant all the user role's permission-vm except the all-dag view-menus to the dag roles.
7. Commit the updated permission-vm-role into db
:return: None.
"""
self.log.debug('Fetching a set of all permission, view_menu from FAB meta-table')
def merge_pv(perm, view_menu):
"""Create permission view menu only if it doesn't exist"""
if view_menu and perm and (view_menu, perm) not in all_pvs:
self._merge_perm(perm, view_menu)
all_pvs = set()
for pv in self.get_session.query(self.permissionview_model).all():
if pv.permission and pv.view_menu:
all_pvs.add((pv.permission.name, pv.view_menu.name))
# Get all the active / paused dags and insert them into a set
all_dags_models = session.query(models.DagModel)\
.filter(or_(models.DagModel.is_active, models.DagModel.is_paused)).all()
# create can_dag_edit and can_dag_read permissions for every dag(vm)
for dag in all_dags_models:
for perm in self.DAG_PERMS:
merge_pv(perm, dag.dag_id)
# for all the dag-level role, add the permission of viewer
# with the dag view to ab_permission_view
all_roles = self.get_all_roles()
user_role = self.find_role('User')
dag_role = [role for role in all_roles if role.name not in EXISTING_ROLES]
update_perm_views = []
# need to remove all_dag vm from all the existing view-menus
dag_vm = self.find_view_menu('all_dags')
ab_perm_view_role = sqla_models.assoc_permissionview_role
perm_view = self.permissionview_model
view_menu = self.viewmenu_model
all_perm_view_by_user = session.query(ab_perm_view_role)\
.join(perm_view, perm_view.id == ab_perm_view_role
.columns.permission_view_id)\
.filter(ab_perm_view_role.columns.role_id == user_role.id)\
.join(view_menu)\
.filter(perm_view.view_menu_id != dag_vm.id)
all_perm_views = {role.permission_view_id for role in all_perm_view_by_user}
for role in dag_role:
# Get all the perm-view of the role
existing_perm_view_by_user = self.get_session.query(ab_perm_view_role)\
.filter(ab_perm_view_role.columns.role_id == role.id)
existing_perms_views = {pv.permission_view_id for pv in existing_perm_view_by_user}
missing_perm_views = all_perm_views - existing_perms_views
for perm_view_id in missing_perm_views:
update_perm_views.append({'permission_view_id': perm_view_id,
'role_id': role.id})
if update_perm_views:
self.get_session.execute(ab_perm_view_role.insert(), update_perm_views)
self.get_session.commit()
def update_admin_perm_view(self):
"""
Admin should has all the permission-views, except the dag views.
because Admin have already have all_dags permission.
Add the missing ones to the table for admin.
:return: None.
"""
all_dag_view = self.find_view_menu('all_dags')
dag_perm_ids = [self.find_permission('can_dag_edit').id, self.find_permission('can_dag_read').id]
pvms = self.get_session.query(sqla_models.PermissionView).filter(~and_(
sqla_models.PermissionView.permission_id.in_(dag_perm_ids),
sqla_models.PermissionView.view_menu_id != all_dag_view.id)
).all()
pvms = [p for p in pvms if p.permission and p.view_menu]
admin = self.find_role('Admin')
admin.permissions = list(set(admin.permissions) | set(pvms))
self.get_session.commit()
def sync_roles(self):
"""
1. Init the default role(Admin, Viewer, User, Op, public)
with related permissions.
2. Init the custom role(dag-user) with related permissions.
:return: None.
"""
self.log.debug('Start syncing user roles.')
# Create global all-dag VM
self.create_perm_vm_for_all_dag()
# Create default user role.
for config in self.ROLE_CONFIGS:
role = config['role']
vms = config['vms']
perms = config['perms']
self.init_role(role, vms, perms)
self.create_custom_dag_permission_view()
# init existing roles, the rest role could be created through UI.
self.update_admin_perm_view()
self.clean_perms()
def sync_perm_for_dag(self, dag_id, access_control=None):
"""
Sync permissions for given dag id. The dag id surely exists in our dag bag
as only / refresh button or cli.sync_perm will call this function
:param dag_id: the ID of the DAG whose permissions should be updated
:type dag_id: str
:param access_control: a dict where each key is a rolename and
each value is a set() of permission names (e.g.,
{'can_dag_read'}
:type access_control: dict
:return:
"""
for dag_perm in self.DAG_PERMS:
perm_on_dag = self.find_permission_view_menu(dag_perm, dag_id)
if perm_on_dag is None:
self.add_permission_view_menu(dag_perm, dag_id)
if access_control:
self._sync_dag_view_permissions(dag_id, access_control)
def _sync_dag_view_permissions(self, dag_id, access_control):
"""Set the access policy on the given DAG's ViewModel.
:param dag_id: the ID of the DAG whose permissions should be updated
:type dag_id: str
:param access_control: a dict where each key is a rolename and
each value is a set() of permission names (e.g.,
{'can_dag_read'}
:type access_control: dict
"""
def _get_or_create_dag_permission(perm_name):
dag_perm = self.find_permission_view_menu(perm_name, dag_id)
if not dag_perm:
self.log.info(
"Creating new permission '%s' on view '%s'",
perm_name, dag_id
)
dag_perm = self.add_permission_view_menu(perm_name, dag_id)
return dag_perm
def _revoke_stale_permissions(dag_view):
existing_dag_perms = self.find_permissions_view_menu(dag_view)
for perm in existing_dag_perms:
non_admin_roles = [role for role in perm.role
if role.name != 'Admin']
for role in non_admin_roles:
target_perms_for_role = access_control.get(role.name, {})
if perm.permission.name not in target_perms_for_role:
self.log.info(
"Revoking '%s' on DAG '%s' for role '%s'",
perm.permission, dag_id, role.name
)
self.del_permission_role(role, perm)
dag_view = self.find_view_menu(dag_id)
if dag_view:
_revoke_stale_permissions(dag_view)
for rolename, perms in access_control.items():
role = self.find_role(rolename)
if not role:
raise AirflowException(
"The access_control mapping for DAG '{}' includes a role "
"named '{}', but that role does not exist".format(
dag_id,
rolename))
perms = set(perms)
invalid_perms = perms - self.DAG_PERMS
if invalid_perms:
raise AirflowException(
"The access_control map for DAG '{}' includes the following "
"invalid permissions: {}; The set of valid permissions "
"is: {}".format(dag_id,
(perms - self.DAG_PERMS),
self.DAG_PERMS))
for perm_name in perms:
dag_perm = _get_or_create_dag_permission(perm_name)
self.add_permission_role(role, dag_perm)
def create_perm_vm_for_all_dag(self):
"""
Create perm-vm if not exist and insert into FAB security model for all-dags.
"""
# create perm for global logical dag
for dag_vm in self.DAG_VMS:
for perm in self.DAG_PERMS:
self._merge_perm(permission_name=perm,
view_menu_name=dag_vm)
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim: ai ts=4 sts=4 et sw=4 nu
"""
(c) 2014 - Ronan Delacroix
FTP utils
:author: Ronan Delacroix
"""
import sys
try:
import logging
import pyftpdlib.servers
import pyftpdlib.handlers
from . import text
try:
from pyftpdlib.handlers import TLS_FTPHandler as SFTPHandler
except ImportError:
from pyftpdlib.handlers import FTPHandler as SFTPHandler # If that case happens, install pyopenssl
class FTPEventLogger:
def on_connect(self):
self.log("Connection received.")
def on_disconnect(self):
self.log("Disconnection.")
def log(self, msg, logfun=None, error=False):
raise Exception('Sub classes should override this function.')
def on_login(self, username):
self.log("User %s logged in." % username)
def on_login_failed(self, username, password):
self.log("Login failed with credentials %s/%s." % (username, password))
def on_logout(self, username):
self.log("User %s logged out." % username)
def on_file_sent(self, filepath):
filepath = text.convert_to_unicode(filepath)
self.log(u"File %s has been successfully sent (User %s)." % (filepath, self.username))
def on_file_received(self, filepath):
filepath = text.convert_to_unicode(filepath)
self.log(u"User %s has uploaded a new file : %s" % (self.username, filepath))
def on_incomplete_file_sent(self, filepath):
filepath = text.convert_to_unicode(filepath)
self.log("""File %s has been Incompletely sent by user %s...
Waiting for the user to resume his download.""" % (filepath, self.username), error=True)
def on_incomplete_file_received(self, filepath):
filepath = text.convert_to_unicode(filepath)
self.log("""A new file %s has been uploaded but is incomplete by user %s...
Waiting for the user to resume his upload.""" % (filepath, self.username), error=True)
class FTPHandler(FTPEventLogger, pyftpdlib.handlers.FTPHandler):
def log(self, msg, logfun=None, error=False):
if error:
logging.error("[FTP] %s" % msg)
else:
logging.info("[FTP] %s" % msg)
class SecureFTPHandler(FTPEventLogger, SFTPHandler):
def log(self, msg, logfun=None, error=False):
if error:
logging.error("[FTPS] %s" % msg)
else:
logging.info("[FTPS] %s" % msg)
class DummyDictFTPAuthorizer(pyftpdlib.handlers.DummyAuthorizer):
"""
Dummy Dict FTP Authorizer class.
Provide authentication through FTP for users stored in a dict.
About permissions:
Read permissions:
- "e" = change directory (CWD command)
- "l" = list files (LIST, NLST, STAT, MLSD, MLST, SIZE, MDTM commands)
- "r" = retrieve file from the server (RETR command)
Write permissions:
- "a" = append data to an existing file (APPE command)
- "d" = delete file or directory (DELE, RMD commands)
- "f" = rename file or directory (RNFR, RNTO commands)
- "m" = create directory (MKD command)
- "w" = store a file to the server (STOR, STOU commands)
"""
def __init__(self, users):
"""
Constructor
"""
super(DummyDictFTPAuthorizer, self).__init__()
for username, user in users.items():
self.add_user(username,
user['password'],
user['homedir'],
perm=user.get('perm', self.read_perms),
msg_login="Hi %s, you're welcome here." % user.get('name', username),
msg_quit="Bye %s, hoping you get back soon!" % user.get('name', username)
)
self.custom_users = users
def create_server(handler, users, listen_to="", port=21, data_port_range='5500-5700', name="Ronan Python FTP Server", masquerade_ip=None, max_connection=500, max_connection_per_ip=10):
"""
Runs the FTP Server
"""
try:
start, stop = data_port_range.split('-')
start = int(start)
stop = int(stop)
except ValueError:
raise Exception('Invalid value for data ports')
else:
data_port_range = range(start, stop + 1)
handler.authorizer = DummyDictFTPAuthorizer(users=users)
handler.banner = "%s. (Advice : Please use UTF-8 encoding and always use Binary mode)" % name
handler.passive_ports = data_port_range
if masquerade_ip:
handler.masquerade_address = masquerade_ip
#logging.getLogger('pyftpdlib').disabled = True
pyftpdlib.log.logger = logging.getLogger() # Replace pyftpd logger by default logger
# Instantiate FTP server class and listen to 0.0.0.0:21 or whatever is written in the config
address = (listen_to, port)
server = pyftpdlib.servers.FTPServer(address, handler)
# set a limit for connections
server.max_cons = max_connection
server.max_cons_per_ip = max_connection_per_ip
return server
def create_ftp_server(users, listen_to="", port=21, data_port_range='5500-5700',
name="FTP Server", masquerade_ip=None, max_connection=500, max_connection_per_ip=10):
"""
FTP Server implements normal FTP mode.
"""
handler = FTPHandler
return create_server(handler, users, listen_to=listen_to, port=port, data_port_range=data_port_range,
name=name, masquerade_ip=masquerade_ip, max_connection=max_connection,
max_connection_per_ip=max_connection_per_ip)
def create_secure_ftp_server(users, certificate, listen_to="", port=990, data_port_range='5700-5900',
name="FTP Server", masquerade_ip=None, max_connection=500, max_connection_per_ip=10):
"""
FTP Server implements FTPS (FTP over TLS/SSL) mode.
Note: Connect from client using "FTP over TLS/SSL explicit mode".
"""
handler = SecureFTPHandler
handler.certfile = certificate
return create_server(handler, users, listen_to=listen_to, port=port, data_port_range=data_port_range,
name=name, masquerade_ip=masquerade_ip, max_connection=max_connection,
max_connection_per_ip=max_connection_per_ip)
except ImportError:
print("Impossible to import FTP Server helpers from tbx.ftp module. Requires pyftpdlib.", file=sys.stderr)
try:
"""
Following is only for client FTP using FTPUtil library.
"""
import ftplib
class FTPSession(ftplib.FTP):
"""
This class is required to connect to a different port with FTPUtil lib.
"""
def __init__(self, url, user, password, port=21, passive=None, timeout=None):
if timeout:
ftplib.FTP.__init__(self, timeout=timeout)
else:
ftplib.FTP.__init__(self)
self.connect(url, port)
self.login(user, password)
if passive is not None:
self.set_pasv(passive)
"""
Usage :
import ftputil
host = ftputil.FTPHost('my.server.url.or.ip', user='root', password='hello', 2121, session_factory=FTPSession)
names = host.listdir(host.curdir)
print(names)
"""
except ImportError:
print("Impossible to import FTP helpers from tbx.ftp module. Requires ftplib.", file=sys.stderr)
|
|
# Copyright 2014-2016 Ivan Kravets <me@ikravets.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import re
from glob import glob
from os import listdir, sep, walk
from os.path import basename, dirname, isdir, isfile, join, normpath, realpath
from SCons.Script import COMMAND_LINE_TARGETS, DefaultEnvironment, SConscript
from SCons.Util import case_sensitive_suffixes
from platformio.util import pioversion_to_intstr
SRC_BUILD_EXT = ["c", "cpp", "S", "spp", "SPP", "sx", "s", "asm", "ASM"]
SRC_HEADER_EXT = ["h", "hpp"]
SRC_DEFAULT_FILTER = " ".join([
"+<*>", "-<.git%s>" % sep, "-<svn%s>" % sep,
"-<example%s>" % sep, "-<examples%s>" % sep,
"-<test%s>" % sep, "-<tests%s>" % sep
])
def BuildProgram(env):
# fix ASM handling under non-casitive OS
if not case_sensitive_suffixes(".s", ".S"):
env.Replace(
AS="$CC",
ASCOM="$ASPPCOM"
)
env.ProcessFlags([
env.get("BOARD_OPTIONS", {}).get("build", {}).get("extra_flags"),
env.get("BUILD_FLAGS")
])
if env.get("FRAMEWORK"):
env.BuildFrameworks([
f.lower().strip() for f in env.get("FRAMEWORK", "").split(",")])
# build dependent libs
deplibs = env.BuildDependentLibraries("$PROJECTSRC_DIR")
# append specified LD_SCRIPT
if ("LDSCRIPT_PATH" in env and
not any(["-Wl,-T" in f for f in env['LINKFLAGS']])):
env.Append(
LINKFLAGS=['-Wl,-T"$LDSCRIPT_PATH"']
)
# enable "cyclic reference" for linker
if env.get("LIBS", deplibs) and env.GetCompilerType() == "gcc":
env.Prepend(
_LIBFLAGS="-Wl,--start-group "
)
env.Append(
_LIBFLAGS=" -Wl,--end-group"
)
# Handle SRC_BUILD_FLAGS
env.ProcessFlags([env.get("SRC_BUILD_FLAGS", None)])
env.Append(
CPPDEFINES=["PLATFORMIO={0:02d}{1:02d}{2:02d}".format(
*pioversion_to_intstr())],
LIBS=deplibs,
LIBPATH=["$BUILD_DIR"]
)
sources = env.LookupSources(
"$BUILDSRC_DIR", "$PROJECTSRC_DIR", duplicate=False,
src_filter=env.get("SRC_FILTER"))
if not sources and not COMMAND_LINE_TARGETS:
env.Exit(
"Error: Nothing to build. Please put your source code files "
"to '%s' folder" % env.subst("$PROJECTSRC_DIR"))
return env.Program(
join("$BUILD_DIR", env.subst("$PROGNAME")),
sources
)
def ProcessFlags(env, flags):
for f in flags:
if not f:
continue
parsed_flags = env.ParseFlags(str(f))
for flag in parsed_flags.pop("CPPDEFINES"):
if not isinstance(flag, list):
env.Append(CPPDEFINES=flag)
continue
if '\"' in flag[1]:
flag[1] = flag[1].replace('\"', '\\\"')
env.Append(CPPDEFINES=[flag])
env.Append(**parsed_flags)
# fix relative CPPPATH
for i, p in enumerate(env.get("CPPPATH", [])):
if isdir(p):
env['CPPPATH'][i] = realpath(p)
# Cancel any previous definition of name, either built in or
# provided with a -D option // Issue #191
undefines = [u for u in env.get("CCFLAGS", []) if u.startswith("-U")]
if undefines:
for undef in undefines:
env['CCFLAGS'].remove(undef)
env.Append(_CPPDEFFLAGS=" %s" % " ".join(undefines))
def IsFileWithExt(env, file_, ext): # pylint: disable=W0613
if basename(file_).startswith("."):
return False
for e in ext:
if file_.endswith(".%s" % e):
return True
return False
def VariantDirWrap(env, variant_dir, src_dir, duplicate=True):
DefaultEnvironment().Append(VARIANT_DIRS=[(variant_dir, src_dir)])
env.VariantDir(variant_dir, src_dir, duplicate)
def LookupSources(env, variant_dir, src_dir, duplicate=True, src_filter=None):
SRC_FILTER_PATTERNS_RE = re.compile(r"(\+|\-)<([^>]+)>")
def _append_build_item(items, item, src_dir):
if env.IsFileWithExt(item, SRC_BUILD_EXT + SRC_HEADER_EXT):
items.add(item.replace(src_dir + sep, ""))
def _match_sources(src_dir, src_filter):
matches = set()
# correct fs directory separator
src_filter = src_filter.replace("/", sep).replace("\\", sep)
for (action, pattern) in SRC_FILTER_PATTERNS_RE.findall(src_filter):
items = set()
for item in glob(join(src_dir, pattern)):
if isdir(item):
for root, _, files in walk(item, followlinks=True):
for f in files:
_append_build_item(items, join(root, f), src_dir)
else:
_append_build_item(items, item, src_dir)
if action == "+":
matches |= items
else:
matches -= items
return sorted(list(matches))
sources = []
variants = []
src_dir = env.subst(src_dir)
if src_dir.endswith(sep):
src_dir = src_dir[:-1]
for item in _match_sources(src_dir, src_filter or SRC_DEFAULT_FILTER):
_reldir = dirname(item)
_src_dir = join(src_dir, _reldir) if _reldir else src_dir
_var_dir = join(variant_dir, _reldir) if _reldir else variant_dir
if _var_dir not in variants:
variants.append(_var_dir)
env.VariantDirWrap(_var_dir, _src_dir, duplicate)
if env.IsFileWithExt(item, SRC_BUILD_EXT):
sources.append(env.File(join(_var_dir, basename(item))))
return sources
def BuildFrameworks(env, frameworks):
if not frameworks or "uploadlazy" in COMMAND_LINE_TARGETS:
return
board_frameworks = env.get("BOARD_OPTIONS", {}).get("frameworks", [])
if frameworks == ["platformio"]:
if board_frameworks:
frameworks.insert(0, board_frameworks[0])
else:
env.Exit("Error: Please specify board type")
for f in frameworks:
if f in ("arduino", "energia"):
env.ConvertInoToCpp()
if f in board_frameworks:
SConscript(env.subst(
join("$PIOBUILDER_DIR", "scripts", "frameworks", "%s.py" % f)))
else:
env.Exit("Error: This board doesn't support %s framework!" % f)
def BuildLibrary(env, variant_dir, src_dir, src_filter=None):
lib = env.Clone()
return lib.Library(
lib.subst(variant_dir),
lib.LookupSources(variant_dir, src_dir, src_filter=src_filter)
)
def BuildDependentLibraries(env, src_dir): # pylint: disable=R0914
INCLUDES_RE = re.compile(
r"^\s*#include\s+(\<|\")([^\>\"\']+)(?:\>|\")", re.M)
LIBSOURCE_DIRS = [env.subst(d) for d in env.get("LIBSOURCE_DIRS", [])]
# start internal prototypes
class IncludeFinder(object):
def __init__(self, base_dir, name, is_system=False):
self.base_dir = base_dir
self.name = name
self.is_system = is_system
self._inc_path = None
self._lib_dir = None
self._lib_name = None
def getIncPath(self):
return self._inc_path
def getLibDir(self):
return self._lib_dir
def getLibName(self):
return self._lib_name
def run(self):
if not self.is_system and self._find_in_local():
return True
return self._find_in_system()
def _find_in_local(self):
if isfile(join(self.base_dir, self.name)):
self._inc_path = join(self.base_dir, self.name)
return True
else:
return False
def _find_in_system(self):
for lsd_dir in LIBSOURCE_DIRS:
if not isdir(lsd_dir):
continue
for ld in env.get("LIB_USE", []) + sorted(listdir(lsd_dir)):
if not isdir(join(lsd_dir, ld)):
continue
inc_path = normpath(join(lsd_dir, ld, self.name))
try:
lib_dir = inc_path[:inc_path.index(
sep, len(lsd_dir) + 1)]
except ValueError:
continue
lib_name = basename(lib_dir)
# ignore user's specified libs
if lib_name in env.get("LIB_IGNORE", []):
continue
if not isfile(inc_path):
# if source code is in "src" dir
lib_dir = join(lsd_dir, lib_name, "src")
inc_path = join(lib_dir, self.name)
if isfile(inc_path):
self._lib_dir = lib_dir
self._lib_name = lib_name
self._inc_path = inc_path
return True
return False
def _get_dep_libs(src_dir):
state = {
"paths": set(),
"libs": set(),
"ordered": set()
}
state = _process_src_dir(state, env.subst(src_dir))
result = []
for item in sorted(state['ordered'], key=lambda s: s[0]):
result.append((item[1], item[2]))
return result
def _process_src_dir(state, src_dir):
for root, _, files in walk(src_dir, followlinks=True):
for f in files:
if env.IsFileWithExt(f, SRC_BUILD_EXT + SRC_HEADER_EXT):
state = _parse_includes(state, env.File(join(root, f)))
return state
def _parse_includes(state, node):
skip_includes = ("arduino.h", "energia.h")
matches = INCLUDES_RE.findall(node.get_text_contents())
for (inc_type, inc_name) in matches:
base_dir = dirname(node.get_abspath())
if inc_name.lower() in skip_includes:
continue
if join(base_dir, inc_name) in state['paths']:
continue
else:
state['paths'].add(join(base_dir, inc_name))
finder = IncludeFinder(base_dir, inc_name, inc_type == "<")
if finder.run():
_parse_includes(state, env.File(finder.getIncPath()))
_lib_dir = finder.getLibDir()
if _lib_dir and _lib_dir not in state['libs']:
state['ordered'].add((
len(state['ordered']) + 1, finder.getLibName(),
_lib_dir))
state['libs'].add(_lib_dir)
if env.subst("$LIB_DFCYCLIC").lower() == "true":
state = _process_src_dir(state, _lib_dir)
return state
# end internal prototypes
deplibs = _get_dep_libs(src_dir)
for l, ld in deplibs:
env.Append(
CPPPATH=[join("$BUILD_DIR", l)]
)
# add automatically "utility" dir from the lib (Arduino issue)
if isdir(join(ld, "utility")):
env.Append(
CPPPATH=[join("$BUILD_DIR", l, "utility")]
)
libs = []
for (libname, inc_dir) in deplibs:
lib = env.BuildLibrary(
join("$BUILD_DIR", libname), inc_dir)
env.Clean(libname, lib)
libs.append(lib)
return libs
def exists(_):
return True
def generate(env):
env.AddMethod(BuildProgram)
env.AddMethod(ProcessFlags)
env.AddMethod(IsFileWithExt)
env.AddMethod(VariantDirWrap)
env.AddMethod(LookupSources)
env.AddMethod(BuildFrameworks)
env.AddMethod(BuildLibrary)
env.AddMethod(BuildDependentLibraries)
return env
|
|
#!/usr/bin/python
#
# remap.py
import sys
from math import *
verbose = False
# Use fast vec3 implementation if Numpy is available
import numpy as N
class vec3(N.ndarray):
"""A simple 3D vector class, using Numpy for fast array operations."""
def __new__(cls, *args):
a = N.ndarray.__new__(vec3, (3,), float)
if len(args) == 0:
a[0] = a[1] = a[2] = 0
elif len(args) == 1:
v = args[0]
a[0] = v[0]
a[1] = v[1]
a[2] = v[2]
elif len(args) == 3:
a[0] = args[0]
a[1] = args[1]
a[2] = args[2]
else:
raise RuntimeError
return a
def _getx(self): return self[0]
def _gety(self): return self[1]
def _getz(self): return self[2]
def _setx(self, value): self[0] = value
def _sety(self, value): self[1] = value
def _setz(self, value): self[2] = value
x = property(_getx, _setx)
y = property(_gety, _sety)
z = property(_getz, _setz)
def dot(u, v):
return u.x*v.x + u.y*v.y + u.z*v.z
def square(v):
return v.x**2 + v.y**2 + v.z**2
def length(v):
return sqrt(square(v))
def triple_scalar_product(u, v, w):
return u.x*(v.y*w.z - v.z*w.y) + u.y*(v.z*w.x - v.x*w.z) + u.z*(v.x*w.y - v.y*w.x)
class Plane:
def __init__(self, p, n):
self.a = n.x
self.b = n.y
self.c = n.z
self.d = -dot(p,n)
def normal(self):
ell = sqrt(self.a**2 + self.b**2 + self.c**2)
return vec3(self.a/ell, self.b/ell, self.c/ell)
def test(self, x, y, z):
"""Compare a point to a plane. Return value is positive, negative, or
zero depending on whether the point lies above, below, or on the plane."""
return self.a*x + self.b*y + self.c*z + self.d
class Cell:
def __init__(self, ix=0, iy=0, iz=0):
self.ix = ix
self.iy = iy
self.iz = iz
self.faces = []
def contains(self, x, y, z):
for f in self.faces:
if f.test(x,y,z) < 0:
return False
return True
def UnitCubeTest(P):
"""Return +1, 0, or -1 if the unit cube is above, below, or intersecting the plane."""
above = 0
below = 0
for (a,b,c) in [(0,0,0), (0,0,1), (0,1,0), (0,1,1), (1,0,0), (1,0,1), (1,1,0), (1,1,1)]:
s = P.test(a, b, c)
if s > 0:
above = 1
elif s < 0:
below = 1
return above - below
class Cuboid:
"""Cuboid remapping class."""
def __init__(self, u1=(1,0,0), u2=(0,1,0), u3=(0,0,1)):
"""Initialize by passing a 3x3 invertible integer matrix."""
u1 = vec3(u1)
u2 = vec3(u2)
u3 = vec3(u3)
if triple_scalar_product(u1, u2, u3) != 1:
print( "!! Invalid lattice vectors: u1 = %s, u2 = %s, u3 = %s" % (u1,u2,u3) )
self.e1 = vec3(1,0,0)
self.e2 = vec3(0,1,0)
self.e3 = vec3(0,0,1)
else:
s1 = square(u1)
s2 = square(u2)
d12 = dot(u1, u2)
d23 = dot(u2, u3)
d13 = dot(u1, u3)
alpha = -d12/s1
gamma = -(alpha*d13 + d23)/(alpha*d12 + s2)
beta = -(d13 + gamma*d12)/s1
self.e1 = u1
self.e2 = u2 + alpha*u1
self.e3 = u3 + beta*u1 + gamma*u2
if verbose:
print( "e1 = %s" % self.e1)
print( "e2 = %s" % self.e2)
print( "e3 = %s" % self.e3)
self.L1 = length(self.e1)
self.L2 = length(self.e2)
self.L3 = length(self.e3)
self.n1 = self.e1/self.L1
self.n2 = self.e2/self.L2
self.n3 = self.e3/self.L3
self.cells = []
v0 = vec3(0,0,0)
self.v = [v0,
v0 + self.e3,
v0 + self.e2,
v0 + self.e2 + self.e3,
v0 + self.e1,
v0 + self.e1 + self.e3,
v0 + self.e1 + self.e2,
v0 + self.e1 + self.e2 + self.e3]
# Compute bounding box of cuboid
xs = [vk.x for vk in self.v]
ys = [vk.y for vk in self.v]
zs = [vk.z for vk in self.v]
vmin = vec3(min(xs), min(ys), min(zs))
vmax = vec3(max(xs), max(ys), max(zs))
# Extend to nearest integer coordinates
ixmin = int(floor(vmin.x))
ixmax = int(ceil(vmax.x))
iymin = int(floor(vmin.y))
iymax = int(ceil(vmax.y))
izmin = int(floor(vmin.z))
izmax = int(ceil(vmax.z))
if verbose:
print( "ixmin, ixmax = %d, %d" % (ixmin,ixmax) )
print( "iymin, iymax = %d, %d" % (iymin,iymax) )
print( "izmin, izmax = %d, %d" % (izmin,izmax) )
# Determine which cells (and which faces within those cells) are non-trivial
for ix in range(ixmin, ixmax):
for iy in range(iymin, iymax):
for iz in range(izmin, izmax):
shift = vec3(-ix, -iy, -iz)
faces = [Plane(self.v[0] + shift, +self.n1),
Plane(self.v[4] + shift, -self.n1),
Plane(self.v[0] + shift, +self.n2),
Plane(self.v[2] + shift, -self.n2),
Plane(self.v[0] + shift, +self.n3),
Plane(self.v[1] + shift, -self.n3)]
c = Cell(ix, iy, iz)
skipcell = False
for f in faces:
r = UnitCubeTest(f)
if r == +1:
# Unit cube is completely above this plane; this cell is empty
continue
elif r == 0:
# Unit cube intersects this plane; keep track of it
c.faces.append(f)
elif r == -1:
skipcell = True
break
if skipcell or len(c.faces) == 0:
if verbose:
print( "Skipping cell at (%d,%d,%d)" % (ix,iy,iz))
continue
else:
self.cells.append(c)
if verbose:
print( "Adding cell at (%d,%d,%d)" % (ix,iy,iz) )
# For the identity remapping, use exactly one cell
if len(self.cells) == 0:
self.cells.append(Cell())
# Print the full list of cells
if verbose:
print( "%d non-empty cells" % len(self.cells))
for c in self.cells:
print( "Cell at (%d,%d,%d) has %d non-trivial planes" % (c.ix, c.iy, c.iz, len(c.faces)) )
def Transform(self, x, y, z):
for c in self.cells:
if c.contains(x,y,z):
x += c.ix
y += c.iy
z += c.iz
p = vec3(x,y,z)
return (dot(p, self.n1), dot(p, self.n2), dot(p, self.n3))
raise RuntimeError( "(%g, %g, %g) not contained in any cell" % (x,y,z) )
def InverseTransform(self, r1, r2, r3):
p = r1*self.n1 + r2*self.n2 + r3*self.n3
x1 = fmod(p[0], 1) + (p[0] < 0)
x2 = fmod(p[1], 1) + (p[1] < 0)
x3 = fmod(p[2], 1) + (p[2] < 0)
return vec3(x1, x2, x3)
def abort(msg=None, code=1):
if msg:
print(msg) #>> sys.stderr, msg
sys.exit(code)
if __name__ == '__main__':
# Parse command line arguments
params = {}
for arg in sys.argv[1:]:
pair = arg.split('=', 1)
if len(pair) == 2:
name, val = pair
if name == "m": params['m'] = int(val)
elif name == "n": params['n'] = int(val)
elif name == "u": params['u'] = [int(f) for f in val.strip("[()]").replace(',', ' ').split()]
elif name == "u1": params['u1'] = [int(f) for f in val.strip("[()]").replace(',', ' ').split()]
elif name == "u2": params['u2'] = [int(f) for f in val.strip("[()]").replace(',', ' ').split()]
elif name == "u3": params['u3'] = [int(f) for f in val.strip("[()]").replace(',', ' ').split()]
elif name == "in": params['in'] = str(val)
elif name == "out": params['out'] = str(val)
else: abort("Unrecognized parameter '%s'" % name)
else:
if arg == "-v" or arg == "--verbose":
verbose = True
elif arg == "-h" or arg == "--help":
print( "Usage: python remap.py [OPTIONS] PARAMS" )
else:
abort("Unrecognized option '%s'" % arg)
# Open input and output files
if 'in' not in params or params['in'] == "stdin":
fin = sys.stdin
else:
fin = open(params['in'], "r")
if not fin: abort("Could not open input file '%s'" % params['in'])
if 'out' not in params or params['out'] == "stdout":
fout = sys.stdout
else:
fout = open(params['out'], "w")
if not fout: abort("!! Could not open output file '%s'" % params['out'])
# Initialize remapping
if 'm' in params and 'n' in params:
m = params['m']
n = params['n']
u1 = (1,m,n)
u2 = (0,1,0)
u3 = (0,0,1)
elif 'u' in params:
u = params['u']
if len(u) != 9: abort("!! Input matrix 'u' should have 9 components, not %d" % len(u))
u1 = (u[0], u[1], u[2])
u2 = (u[3], u[4], u[5])
u3 = (u[6], u[7], u[8])
elif 'u1' in params and 'u2' in params and 'u3' in params:
u1 = params['u1']
u2 = params['u2']
u3 = params['u3']
else:
print( "?? Cuboid geometry not specified, assuming trivial remapping" )
u1 = (1,0,0)
u2 = (0,1,0)
u3 = (0,0,1)
if verbose:
print( "u1 = %s, u2 = %s, u3 = %s" % (u1,u2,u3))
C = Cuboid(u1, u2, u3)
for line in fin:
line = line.strip()
if len(line) == 0 or line.startswith('#'):
continue
coords = line.replace(',', ' ').split()
if len(coords) != 3:
print("?? Expecting 3 coordinates per line, not '%s'" % line)
continue
(xin,yin,zin) = float(coords[0]), float(coords[1]), float(coords[2])
(xout,yout,zout) = C.Transform(xin, yin, zin)
print( "%e %e %e" % (xout,yout,zout ))
fin.close()
fout.close()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.