gt stringclasses 1
value | context stringlengths 2.49k 119k |
|---|---|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
This module provides API class definition to define observables.
Classes
-------
* :class:`Observable`: main object to define observables.
"""
from __future__ import print_function
import collections
import warnings
import inspect
import dill
import re
from copy import deepcopy
from tabulate import tabulate
_re_codestring = 'T([a-z])([a-z]*\d*[\.,]*\d*)M([a-z\-]+)J(\d+)'
def _is_valid_codestring(codestring):
chain = re.compile(_re_codestring)
m = chain.match(codestring)
if m:
return True
else:
return False
class ObservableError(Exception):
pass
class ObservableStringError(ObservableError):
pass
class ObservableNameError(ObservableError):
pass
class Observable(object):
"""Defines how to retrieve observables.
Parameters
----------
name : str
user name for this observable (can be one of the raw observable)
raw : str (default None)
raw name of the observable: must be a column name of raw data, i.e.
first element of one entry of Experiment.datatype
differentiate : boolean (default False)
whether to differentiate raw observable
scale : str {'linear', 'log'}
expected scaling form as a function of time
(used for extrapolating values at boundaries, including in the
local_fit procedure)
local_fit : boolean (default False)
whether to perform local fit procedure
time_window : float (default 20.)
Time window over which local fit procedure is applied
(used only when local_fit is activated)
join_points : int (default 3)
number of points over which extrapolation procedure is led by
fitting linearly [the scale of] the observable w.r.t time
mode : str {'dynamics', 'birth', 'division', 'net_increase', 'rate',
'average'}
mode used to retrieve data:
* 'dynamics': all timepoints are retrieved
* 'birth': only birth value is retrieved
* 'division': only division value is retrieved
* 'net-increase-additive': difference between division value
and birth value
* 'net-increase-multiplicative': ratio between division value
and birth value
* 'rate': rate of linear fit of [scale of] observable
* 'average': average of observable over cell cycle
timing : str {'t', 'b', 'd', 'm', 'g'}
set the time at which cell cycle observable is associated:
* 't' : time-lapse timing (associated to mode 'dynamics')
* 'b' : cell cycle birth time
* 'd' : cell cycle division time
* 'm' : cell cycle midpoint (half time)
* 'g' : cell cycle generation index
tref : float or 'root' (default None)
when timing is set to 'g', sets the 0th generation to the cell
that bounds this reference time
when timing is set to 't' (time-lapse timing), allows to translate time
values by substracting floating point value (if given as a float), or
aligns to the colony root cell last time value as origin.
"""
def __init__(self, name=None, from_string=None,
raw=None, differentiate=False, scale='linear',
local_fit=False, time_window=0., join_points=3,
mode='dynamics', timing='t', tref=None):
self._attr_names = ['name',
'raw',
'scale',
'differentiate',
'local_fit',
'time_window',
'join_points',
'mode',
'timing',
'tref']
if from_string is not None:
self.load_from_string(from_string)
else:
self.raw = raw
self.name = name
if raw is None and name is not None:
self.raw = name
self.differentiate = differentiate
self.scale = scale
self.local_fit = local_fit
self.time_window = time_window
self.join_points = join_points
self.mode = mode
self.timing = timing
self.tref = tref
# case where name is None
if name is None:
self.name = self.label # use the codestring
return
def as_timelapse(self):
"""Convert current observable to its dynamic counterpart
This is needed when computing cell-cycle observables.
"""
if self.mode == 'dynamics' and self.timing == 't':
# everything's fine
return self
else:
tobs = deepcopy(self)
tobs.mode = 'dynamics'
tobs.timing = 't'
tobs.name = '_timelapsed_' + self.name
return tobs
@property
def label(self):
"""Label is outputing a unique string representation
This method creates a string label that specifies each parameter to
re-construct the Observable. The output string is
(kind-of) human readable. More importantly, it is suitable
to be a filename (only alphanumeric caracters, and underscores), and in
fact serves to name directories in analysis folder.
Note
----
:func:`__repr__` : returns another string representation, that can be
called by the built-in :func:`eval()`, to instantiate a new object
with identical functional parameters.
"""
msg = ''
# timing is in between T flags
if self.tref is not None:
if isinstance(self.tref, float):
stref = '{:.2f}'.format(self.tref)
elif isinstance(self.tref, int) or isinstance(self.tref, str):
stref = '{}'.format(self.tref)
else:
stref = ''
msg += 'T' + self.timing + stref
# mode is in between M flags
msg += 'M' + self.mode
msg += 'J' + '{}'.format(self.join_points)
msg += '_'
if self.differentiate:
msg += 'dot_'
if self.local_fit:
msg += 'W{:.2f}_'.format(self.time_window)
if self.scale == 'log':
msg += 'log_'
msg += self.raw
return msg
@label.setter
def label(self, value):
"""Set Observable instance using given codestring"""
self.load_from_string(value)
def load_from_string(self, codestring):
"""Set Observable instance from string code created by `label` method
Parameters
----------
codestring : str
must follow some rules for parsing
"""
# set options to default and update if found
self.mode = 'dynamics'
self.timing = 't'
self.local_fit = False
self.time_window = 0.
self.join_points = 3 # default
self.differentiate = False
self.scale = 'linear'
items = codestring.split('_')
self.raw = items[-1] # last item is always raw observable label
if self.raw == 'none':
msg = ("'raw' is set to 'none'.\n"
"Update to a valid column name of your experiment.")
warnings.warn(msg)
# test whether codestring is valid: must have T and M flags
chain = re.compile(_re_codestring)
m = chain.match(codestring)
if m:
timing, stref, mode, sjoin = m.groups()
self.timing = timing
if stref:
if stref == 'root':
self.tref = 'root'
else: # convert to float
self.tref = float(stref.replace(',', '.')) # if decimal is ,
else:
self.tref = None
self.mode = mode
self.join_points = int(sjoin)
else:
raise ObservableStringError('Not a valid codestring')
# try to check whether local fit is performed and its parameters
pfit = re.compile('W(\d*[.,]*\d*)')
for item in items[:-1]:
# local_fit?
m = pfit.search(item)
if m is not None:
stime_window, = m.groups()
# check that tw_str is not empty
if stime_window:
self.time_window = float(stime_window.replace(',', '.'))
self.local_fit = True
# log scale
if item == 'log':
self.scale = 'log'
if item == 'dot':
self.differentiate = True
return
def as_string_table(self):
"""Human readable output as a table.
"""
tab = [['parameter', 'value']]
for key in self._attr_names:
val = self.__getattribute__(key)
tab.append([key, val])
return tabulate(tab, headers='firstrow')
def latexify(self, show_variable=True,
plus_delta=False,
shorten_time_variable=False,
prime_time=False,
as_description=False,
use_name=None):
"""Returns a latexified string for observable
Parameters
----------
show_variable : bool
whether to print out time/generation variable
plus_delta : bool
whether to add a $\Delta$ to time/generation variable; used for
auto- and cross-correlation labeling
shorten_time_variable : bool
when active, will display only $t$/$g$
prime_time : bool
whether to set a prime on the time/generation variable
as_description : bool (default False)
sets up the description of the observable from rules to compute it
(derivatives, log, and raw label)
use_name : str (default None)
when the observable name is too cumbersome to be printed, and the
user wants to choose a specific name for such a printout
Returns
-------
"""
output = r'$'
if self.name is not None and not as_description:
if use_name is None:
output += '\\mathrm{{ {} }}'.format(self.name.replace('-', '\, ').replace('_', '\ '))
else:
output += '\\mathrm{{ {} }}'.format(use_name)
else:
# give all details using raw and operations on it
if self.differentiate:
output += '\\frac{\\mathrm{d}}{\\mathrm{d}t}'
if self.scale == 'log':
output += '\\log\\left[' # parenthesis started
variable_name = '{}'.format(self.raw)
output += '\\mathrm{{ {} }}'.format(variable_name.replace('_', '\ ').replace('-', '\, '))
if self.differentiate and self.scale == 'log':
output += '\\right]' # parenthesis closed
if self.mode != 'dynamics':
output += '_{{\mathrm{{ {} }} }}'.format(self.mode)
if show_variable:
time_var = _latexify_time_var(self, prime_time=prime_time,
shorten_time_variable=shorten_time_variable,
plus_delta=plus_delta)
output += '\\left( {} \\right)'.format(time_var)
if self.local_fit and as_description:
output += '\\ [window: {}]'.format(self.time_window)
output += '$'
return output
@property
def as_latex_string(self):
"""Export as LaTeX string. Old format, replaced by latexify
"""
return self.latexify(as_description=False, plus_delta=False, prime_time=False)
# def __str__(self):
# return self.label
def __repr__(self):
name = type(self).__name__
chain = name + '('
for key in self._attr_names:
val = self.__getattribute__(key)
chain += '{}={}, '.format(key, repr(val))
chain += ')'
return chain
def _latexify_time_var(obs, prime_time=False,
shorten_time_variable=False,
plus_delta=False):
"""Latexify time variable from obs Observable
No $ dollar sign in this expression.
Parameters
----------
obs : Observable instance
this observable will be search for attributes timing and tref
prime_time : bool
whether to indicate a prime
shorten_time_variable : bool
whether to shorten time variable expression
plus_delta : bool
whether to add a '+ Delta'
Returns
-------
str
"""
# timing character
time_char = ''
if shorten_time_variable:
if obs.timing == 'g':
time_char = 'g'
else:
time_char = 't'
else:
if obs.timing == 't':
time_char += 't'
elif obs.timing == 'b':
time_char += 't_{\\mathrm{birth}}'
elif obs.timing == 'd':
time_char += 't_{\\mathrm{div}}'
elif obs.timing == 'm':
time_char += ('\\frac{t_{\\mathrm{birth}} + t_{\\mathrm{div}}}'
'{2}')
elif obs.timing == 'g':
time_char += 'g' # 'n_{\\mathrm{gen}}'
if prime_time:
time_char += "^'"
# substract troot
to_substract = ''
if not shorten_time_variable:
if obs.tref is None:
to_substract += ''
elif obs.tref == 'root':
if obs.timing != 'g':
to_substract += '- t^{\\mathrm{root}}_{\mathrm{div}}'
else:
to_substract += '- n^{\\mathrm{root}}_{\mathrm{gen}}'
else:
if obs.timing != 'g':
to_substract += '- {:.2f}'.format(obs.tref)
else:
to_substract += '- n_{{\mathrm{{gen}} }}({:.2f})'.format(obs.tref)
if not plus_delta:
time_var = time_char + to_substract
else:
time_var = time_char + to_substract + '+ \\Delta ' + time_char
return time_var
class FunctionalObservable(object):
"""Combination of :class:`Observable` instances
Parameters
----------
name : str
user defined name for this observable
f : callable
the function to apply to observables
observables : list of :class:`Observable` instances
parameters of the function f to be applied
Warning
-------
Contrary to :class:`Observable`, instances of :class:`FunctionalObservable`
cannot be represented as a string using :func:`repr()`, that could be
turned into a new instance with identical parameters using :func:`eval()`.
This is due to the applied function, difficult to serialize as a string
AND keeping a human-readable format to read its definition.
"""
def __init__(self, name=None, f=None, observables=[]):
if name is None:
raise ValueError('name must be a unique name string')
self.name = name
if not callable(f):
raise ValueError('f must be callable')
self.f = f
self.source_f = dill.dumps(f)
try: # python 3
from inspect import signature
sig = signature(f)
n_args = len(sig.parameters)
except ImportError: # python 2
from inspect import getargspec
argspec = getargspec(f)
n_args = len(argspec.args)
self.observables = observables
self.raw_observables = unroll_raw_obs(observables)
if len(observables) != n_args:
msg = ('length of observable list must match number of arguments of f ')
raise ValueError(msg)
for obs in observables:
if not (isinstance(obs, Observable) or isinstance(obs, FunctionalObservable)):
msg = ('observables argument must be a list of Observables '
'instances')
raise TypeError(msg)
return
@property
def timing(self):
"""Return timing depending on observables passed as parameters"""
timings = []
for item in self.observables:
timings.append(item.timing)
if 't' in timings:
return 't'
else:
return timings[0] # default
@property
def tref(self):
# tref is used only when timing is 'g' (generations)
if self.timing == 'g':
t = self.observables[0].tref
else:
t = None
return t
@property
def mode(self):
"""Returns mode depending on observables passed as parameters"""
modes = []
for item in self.observables:
modes.append(item.mode)
if 'dynamics' in modes:
return 'dynamics'
else:
return 'cell-cycle'
@property
def label(self):
"""get unique string identifier"""
msg = self.name + '('
for item in self.observables:
msg += item.label + ', '
msg += ')'
return msg
@property
def as_latex_string(self):
# args = r''
# for item in self.observables:
# args += item.latexify(show_variable=False,
# plus_delta=False,
# shorten_time_variable=False,
# prime_time=False,
# as_description=False,
# use_name=None) + ', '
# args = args.replace('$', '').rstrip(',')
# msg = r'$f( {} )$'.format(args)
msg = self.latexify(show_variable=True)
return msg
def latexify(self, show_variable=True,
plus_delta=False,
shorten_time_variable=False,
prime_time=False,
as_description=False,
use_name=None):
"""Latexify observable name"""
output = r'$'
if use_name is None:
output += '\\mathrm{{ {} }}'.format(self.name.replace('-', '\, ').replace('_', '\ '))
else:
output += '\\mathrm{{ {} }}'.format(use_name)
if show_variable:
time_var = _latexify_time_var(self, plus_delta=plus_delta,
shorten_time_variable=shorten_time_variable,
prime_time=prime_time)
output += '({})'.format(time_var)
output += '$'
return output
def unroll_raw_obs(obs):
"""Returns a generator over flattened list of Observable instances
Parameters
----------
obs : (list of) :class:`Observable` or :class:`FunctionalObservable` instances
Yields
-------
flatten
:class:`Observable` instances found in argument list, going
into nested layers in the case of nested list, or for
:class:`FunctionalObservable` instances
"""
if isinstance(obs, Observable):
yield obs
elif isinstance(obs, collections.Iterable):
for item in obs:
for elem in unroll_raw_obs(item):
yield elem
elif isinstance(obs, FunctionalObservable):
for item in obs.observables:
for elem in unroll_raw_obs(item):
yield elem
def unroll_func_obs(obs):
"""Returns flattened list of FunctionalObservable instances
It inspect recursively the observable content of the argument to yield
all nested FunctionalObservable instances. They are ordered from lower to
deeper layers in nested-ness. If you need to compute f(g(h(x))), where
x is a raw Observable, the generator yields h, g, and f lastly, so that
evaluation can be performed in direct order.
Parameters
----------
obs : :class:`FunctionalObservable` instance
the observable to inspect
Yields
-------
:class:`FunctionalObservable` instance
The generator yields funcObs instance in appropriate order (from lower
to higher level in nested-ness).
"""
if isinstance(obs, FunctionalObservable):
for item in obs.observables:
for elem in unroll_func_obs(item):
yield elem
yield obs
elif isinstance(obs, collections.Iterable):
for item in obs:
for elem in unroll_func_obs(item):
yield elem
def set_observable_list(*args, **kwargs):
"""Make raw, and functional observable lists for running analyses
Parameters
----------
*args
Variable length argument list of :class:`Observable` or :class:`FunctionalObservable` instances
**kwargs
Accepted keyword arguments: 'filters=[]' with a list of :class:`FilterSet`
or :class:`FilterGeneral` instance (must have a .obs attribute)
Returns
-------
raw_obs, func_obs
lists of raw observables, functional observables (correctly ordered)
"""
raw_obs = []
func_obs = []
# run through observables used in filtering
filters = []
if 'filters' in kwargs:
filters.extend(kwargs['filters'])
for filt in filters:
for obs in unroll_raw_obs(filt.obs):
if obs not in raw_obs:
raw_obs.append(obs)
for obs in unroll_func_obs(filt.obs):
if obs not in func_obs:
func_obs.append(obs)
for observable in args:
# extend with all raw Observable instances found in obs
for obs in unroll_raw_obs(observable):
if obs not in raw_obs:
raw_obs.append(obs)
# extend with all FunctionalObservable instances found in obs
for obs in unroll_func_obs(observable):
if obs not in func_obs:
func_obs.append(obs)
return raw_obs, func_obs
if __name__ == '__main__':
length = Observable('length', raw='length', scale='log')
width = Observable('width', raw='width')
def volume(x, y):
return x * y**2
combo = FunctionalObservable('volume', volume, [length, width])
divlength = Observable('divlength', raw='length', scale='log',
mode='division', timing='d')
newcombo = FunctionalObservable('resc_length', f=lambda x, y: x/y, observables=[length, divlength])
print(length.label == divlength.as_timelapse().label)
| |
import errno
import logging
import sys
import warnings
from socket import error as SocketError, timeout as SocketTimeout
import socket
try: # Python 3
from queue import LifoQueue, Empty, Full
except ImportError:
from Queue import LifoQueue, Empty, Full
import Queue as _ # Platform-specific: Windows
from .exceptions import (
ClosedPoolError,
ProtocolError,
EmptyPoolError,
HostChangedError,
LocationValueError,
MaxRetryError,
ProxyError,
ReadTimeoutError,
SSLError,
TimeoutError,
InsecureRequestWarning,
)
from .packages.ssl_match_hostname import CertificateError
from .packages import six
from .connection import (
port_by_scheme,
DummyConnection,
HTTPConnection, HTTPSConnection, VerifiedHTTPSConnection,
HTTPException, BaseSSLError, ConnectionError
)
from .request import RequestMethods
from .response import HTTPResponse
from .util.connection import is_connection_dropped
from .util.retry import Retry
from .util.timeout import Timeout
from .util.url import get_host
xrange = six.moves.xrange
log = logging.getLogger(__name__)
_Default = object()
## Pool objects
class ConnectionPool(object):
"""
Base class for all connection pools, such as
:class:`.HTTPConnectionPool` and :class:`.HTTPSConnectionPool`.
"""
scheme = None
QueueCls = LifoQueue
def __init__(self, host, port=None):
if not host:
raise LocationValueError("No host specified.")
# httplib doesn't like it when we include brackets in ipv6 addresses
self.host = host.strip('[]')
self.port = port
def __str__(self):
return '%s(host=%r, port=%r)' % (type(self).__name__,
self.host, self.port)
# This is taken from http://hg.python.org/cpython/file/7aaba721ebc0/Lib/socket.py#l252
_blocking_errnos = set([errno.EAGAIN, errno.EWOULDBLOCK])
class HTTPConnectionPool(ConnectionPool, RequestMethods):
"""
Thread-safe connection pool for one host.
:param host:
Host used for this HTTP Connection (e.g. "localhost"), passed into
:class:`httplib.HTTPConnection`.
:param port:
Port used for this HTTP Connection (None is equivalent to 80), passed
into :class:`httplib.HTTPConnection`.
:param strict:
Causes BadStatusLine to be raised if the status line can't be parsed
as a valid HTTP/1.0 or 1.1 status line, passed into
:class:`httplib.HTTPConnection`.
.. note::
Only works in Python 2. This parameter is ignored in Python 3.
:param timeout:
Socket timeout in seconds for each individual connection. This can
be a float or integer, which sets the timeout for the HTTP request,
or an instance of :class:`urllib3.util.Timeout` which gives you more
fine-grained control over request timeouts. After the constructor has
been parsed, this is always a `urllib3.util.Timeout` object.
:param maxsize:
Number of connections to save that can be reused. More than 1 is useful
in multithreaded situations. If ``block`` is set to false, more
connections will be created but they will not be saved once they've
been used.
:param block:
If set to True, no more than ``maxsize`` connections will be used at
a time. When no free connections are available, the call will block
until a connection has been released. This is a useful side effect for
particular multithreaded situations where one does not want to use more
than maxsize connections per host to prevent flooding.
:param headers:
Headers to include with all requests, unless other headers are given
explicitly.
:param retries:
Retry configuration to use by default with requests in this pool.
:param _proxy:
Parsed proxy URL, should not be used directly, instead, see
:class:`urllib3.connectionpool.ProxyManager`"
:param _proxy_headers:
A dictionary with proxy headers, should not be used directly,
instead, see :class:`urllib3.connectionpool.ProxyManager`"
:param \**conn_kw:
Additional parameters are used to create fresh :class:`urllib3.connection.HTTPConnection`,
:class:`urllib3.connection.HTTPSConnection` instances.
"""
scheme = 'http'
ConnectionCls = HTTPConnection
def __init__(self, host, port=None, strict=False,
timeout=Timeout.DEFAULT_TIMEOUT, maxsize=1, block=False,
headers=None, retries=None,
_proxy=None, _proxy_headers=None,
**conn_kw):
ConnectionPool.__init__(self, host, port)
RequestMethods.__init__(self, headers)
self.strict = strict
if not isinstance(timeout, Timeout):
timeout = Timeout.from_float(timeout)
if retries is None:
retries = Retry.DEFAULT
self.timeout = timeout
self.retries = retries
self.pool = self.QueueCls(maxsize)
self.block = block
self.proxy = _proxy
self.proxy_headers = _proxy_headers or {}
# Fill the queue up so that doing get() on it will block properly
for _ in xrange(maxsize):
self.pool.put(None)
# These are mostly for testing and debugging purposes.
self.num_connections = 0
self.num_requests = 0
self.conn_kw = conn_kw
if self.proxy:
# Enable Nagle's algorithm for proxies, to avoid packet fragmentation.
# We cannot know if the user has added default socket options, so we cannot replace the
# list.
self.conn_kw.setdefault('socket_options', [])
def _new_conn(self):
"""
Return a fresh :class:`HTTPConnection`.
"""
self.num_connections += 1
log.info("Starting new HTTP connection (%d): %s" %
(self.num_connections, self.host))
conn = self.ConnectionCls(host=self.host, port=self.port,
timeout=self.timeout.connect_timeout,
strict=self.strict, **self.conn_kw)
return conn
def _get_conn(self, timeout=None):
"""
Get a connection. Will return a pooled connection if one is available.
If no connections are available and :prop:`.block` is ``False``, then a
fresh connection is returned.
:param timeout:
Seconds to wait before giving up and raising
:class:`urllib3.exceptions.EmptyPoolError` if the pool is empty and
:prop:`.block` is ``True``.
"""
conn = None
try:
conn = self.pool.get(block=self.block, timeout=timeout)
except AttributeError: # self.pool is None
raise ClosedPoolError(self, "Pool is closed.")
except Empty:
if self.block:
raise EmptyPoolError(self,
"Pool reached maximum size and no more "
"connections are allowed.")
pass # Oh well, we'll create a new connection then
# If this is a persistent connection, check if it got disconnected
if conn and is_connection_dropped(conn):
log.info("Resetting dropped connection: %s" % self.host)
conn.close()
if getattr(conn, 'auto_open', 1) == 0:
# This is a proxied connection that has been mutated by
# httplib._tunnel() and cannot be reused (since it would
# attempt to bypass the proxy)
conn = None
return conn or self._new_conn()
def _put_conn(self, conn):
"""
Put a connection back into the pool.
:param conn:
Connection object for the current host and port as returned by
:meth:`._new_conn` or :meth:`._get_conn`.
If the pool is already full, the connection is closed and discarded
because we exceeded maxsize. If connections are discarded frequently,
then maxsize should be increased.
If the pool is closed, then the connection will be closed and discarded.
"""
try:
self.pool.put(conn, block=False)
return # Everything is dandy, done.
except AttributeError:
# self.pool is None.
pass
except Full:
# This should never happen if self.block == True
log.warning(
"Connection pool is full, discarding connection: %s" %
self.host)
# Connection never got put back into the pool, close it.
if conn:
conn.close()
def _validate_conn(self, conn):
"""
Called right before a request is made, after the socket is created.
"""
pass
def _prepare_proxy(self, conn):
# Nothing to do for HTTP connections.
pass
def _get_timeout(self, timeout):
""" Helper that always returns a :class:`urllib3.util.Timeout` """
if timeout is _Default:
return self.timeout.clone()
if isinstance(timeout, Timeout):
return timeout.clone()
else:
# User passed us an int/float. This is for backwards compatibility,
# can be removed later
return Timeout.from_float(timeout)
def _raise_timeout(self, err, url, timeout_value):
"""Is the error actually a timeout? Will raise a ReadTimeout or pass"""
if isinstance(err, SocketTimeout):
raise ReadTimeoutError(self, url, "Read timed out. (read timeout=%s)" % timeout_value)
# See the above comment about EAGAIN in Python 3. In Python 2 we have
# to specifically catch it and throw the timeout error
if hasattr(err, 'errno') and err.errno in _blocking_errnos:
raise ReadTimeoutError(self, url, "Read timed out. (read timeout=%s)" % timeout_value)
# Catch possible read timeouts thrown as SSL errors. If not the
# case, rethrow the original. We need to do this because of:
# http://bugs.python.org/issue10272
if 'timed out' in str(err) or 'did not complete (read)' in str(err): # Python 2.6
raise ReadTimeoutError(self, url, "Read timed out. (read timeout=%s)" % timeout_value)
def _make_request(self, conn, method, url, timeout=_Default,
**httplib_request_kw):
"""
Perform a request on a given urllib connection object taken from our
pool.
:param conn:
a connection from one of our connection pools
:param timeout:
Socket timeout in seconds for the request. This can be a
float or integer, which will set the same timeout value for
the socket connect and the socket read, or an instance of
:class:`urllib3.util.Timeout`, which gives you more fine-grained
control over your timeouts.
"""
self.num_requests += 1
timeout_obj = self._get_timeout(timeout)
timeout_obj.start_connect()
conn.timeout = timeout_obj.connect_timeout
# Trigger any extra validation we need to do.
try:
self._validate_conn(conn)
except (SocketTimeout, BaseSSLError) as e:
# Py2 raises this as a BaseSSLError, Py3 raises it as socket timeout.
self._raise_timeout(err=e, url=url, timeout_value=conn.timeout)
raise
# conn.request() calls httplib.*.request, not the method in
# urllib3.request. It also calls makefile (recv) on the socket.
conn.request(method, url, **httplib_request_kw)
# Reset the timeout for the recv() on the socket
read_timeout = timeout_obj.read_timeout
# App Engine doesn't have a sock attr
if getattr(conn, 'sock', None):
# In Python 3 socket.py will catch EAGAIN and return None when you
# try and read into the file pointer created by http.client, which
# instead raises a BadStatusLine exception. Instead of catching
# the exception and assuming all BadStatusLine exceptions are read
# timeouts, check for a zero timeout before making the request.
if read_timeout == 0:
raise ReadTimeoutError(
self, url, "Read timed out. (read timeout=%s)" % read_timeout)
if read_timeout is Timeout.DEFAULT_TIMEOUT:
conn.sock.settimeout(socket.getdefaulttimeout())
else: # None or a value
conn.sock.settimeout(read_timeout)
# Receive the response from the server
try:
try: # Python 2.7+, use buffering of HTTP responses
httplib_response = conn.getresponse(buffering=True)
except TypeError: # Python 2.6 and older
httplib_response = conn.getresponse()
except (SocketTimeout, BaseSSLError, SocketError) as e:
self._raise_timeout(err=e, url=url, timeout_value=read_timeout)
raise
# AppEngine doesn't have a version attr.
http_version = getattr(conn, '_http_vsn_str', 'HTTP/?')
log.debug("\"%s %s %s\" %s %s" % (method, url, http_version,
httplib_response.status,
httplib_response.length))
return httplib_response
def close(self):
"""
Close all pooled connections and disable the pool.
"""
# Disable access to the pool
old_pool, self.pool = self.pool, None
try:
while True:
conn = old_pool.get(block=False)
if conn:
conn.close()
except Empty:
pass # Done.
def is_same_host(self, url):
"""
Check if the given ``url`` is a member of the same host as this
connection pool.
"""
if url.startswith('/'):
return True
# TODO: Add optional support for socket.gethostbyname checking.
scheme, host, port = get_host(url)
# Use explicit default port for comparison when none is given
if self.port and not port:
port = port_by_scheme.get(scheme)
elif not self.port and port == port_by_scheme.get(scheme):
port = None
return (scheme, host, port) == (self.scheme, self.host, self.port)
def urlopen(self, method, url, body=None, headers=None, retries=None,
redirect=True, assert_same_host=True, timeout=_Default,
pool_timeout=None, release_conn=None, **response_kw):
"""
Get a connection from the pool and perform an HTTP request. This is the
lowest level call for making a request, so you'll need to specify all
the raw details.
.. note::
More commonly, it's appropriate to use a convenience method provided
by :class:`.RequestMethods`, such as :meth:`request`.
.. note::
`release_conn` will only behave as expected if
`preload_content=False` because we want to make
`preload_content=False` the default behaviour someday soon without
breaking backwards compatibility.
:param method:
HTTP request method (such as GET, POST, PUT, etc.)
:param body:
Data to send in the request body (useful for creating
POST requests, see HTTPConnectionPool.post_url for
more convenience).
:param headers:
Dictionary of custom headers to send, such as User-Agent,
If-None-Match, etc. If None, pool headers are used. If provided,
these headers completely replace any pool-specific headers.
:param retries:
Configure the number of retries to allow before raising a
:class:`~urllib3.exceptions.MaxRetryError` exception.
Pass ``None`` to retry until you receive a response. Pass a
:class:`~urllib3.util.retry.Retry` object for fine-grained control
over different types of retries.
Pass an integer number to retry connection errors that many times,
but no other types of errors. Pass zero to never retry.
If ``False``, then retries are disabled and any exception is raised
immediately. Also, instead of raising a MaxRetryError on redirects,
the redirect response will be returned.
:type retries: :class:`~urllib3.util.retry.Retry`, False, or an int.
:param redirect:
If True, automatically handle redirects (status codes 301, 302,
303, 307, 308). Each redirect counts as a retry. Disabling retries
will disable redirect, too.
:param assert_same_host:
If ``True``, will make sure that the host of the pool requests is
consistent else will raise HostChangedError. When False, you can
use the pool on an HTTP proxy and request foreign hosts.
:param timeout:
If specified, overrides the default timeout for this one
request. It may be a float (in seconds) or an instance of
:class:`urllib3.util.Timeout`.
:param pool_timeout:
If set and the pool is set to block=True, then this method will
block for ``pool_timeout`` seconds and raise EmptyPoolError if no
connection is available within the time period.
:param release_conn:
If False, then the urlopen call will not release the connection
back into the pool once a response is received (but will release if
you read the entire contents of the response such as when
`preload_content=True`). This is useful if you're not preloading
the response's content immediately. You will need to call
``r.release_conn()`` on the response ``r`` to return the connection
back into the pool. If None, it takes the value of
``response_kw.get('preload_content', True)``.
:param \**response_kw:
Additional parameters are passed to
:meth:`urllib3.response.HTTPResponse.from_httplib`
"""
if headers is None:
headers = self.headers
if not isinstance(retries, Retry):
retries = Retry.from_int(retries, redirect=redirect, default=self.retries)
if release_conn is None:
release_conn = response_kw.get('preload_content', True)
# Check host
if assert_same_host and not self.is_same_host(url):
raise HostChangedError(self, url, retries)
conn = None
# Merge the proxy headers. Only do this in HTTP. We have to copy the
# headers dict so we can safely change it without those changes being
# reflected in anyone else's copy.
if self.scheme == 'http':
headers = headers.copy()
headers.update(self.proxy_headers)
# Must keep the exception bound to a separate variable or else Python 3
# complains about UnboundLocalError.
err = None
try:
# Request a connection from the queue.
timeout_obj = self._get_timeout(timeout)
conn = self._get_conn(timeout=pool_timeout)
conn.timeout = timeout_obj.connect_timeout
is_new_proxy_conn = self.proxy is not None and not getattr(conn, 'sock', None)
if is_new_proxy_conn:
self._prepare_proxy(conn)
# Make the request on the httplib connection object.
httplib_response = self._make_request(conn, method, url,
timeout=timeout_obj,
body=body, headers=headers)
# If we're going to release the connection in ``finally:``, then
# the request doesn't need to know about the connection. Otherwise
# it will also try to release it and we'll have a double-release
# mess.
response_conn = not release_conn and conn
# Import httplib's response into our own wrapper object
response = HTTPResponse.from_httplib(httplib_response,
pool=self,
connection=response_conn,
**response_kw)
# else:
# The connection will be put back into the pool when
# ``response.release_conn()`` is called (implicitly by
# ``response.read()``)
except Empty:
# Timed out by queue.
raise EmptyPoolError(self, "No pool connections are available.")
except (BaseSSLError, CertificateError) as e:
# Close the connection. If a connection is reused on which there
# was a Certificate error, the next request will certainly raise
# another Certificate error.
if conn:
conn.close()
conn = None
raise SSLError(e)
except (TimeoutError, HTTPException, SocketError, ConnectionError) as e:
if conn:
# Discard the connection for these exceptions. It will be
# be replaced during the next _get_conn() call.
conn.close()
conn = None
stacktrace = sys.exc_info()[2]
if isinstance(e, SocketError) and self.proxy:
e = ProxyError('Cannot connect to proxy.', e)
elif isinstance(e, (SocketError, HTTPException)):
e = ProtocolError('Connection aborted.', e)
retries = retries.increment(method, url, error=e,
_pool=self, _stacktrace=stacktrace)
retries.sleep()
# Keep track of the error for the retry warning.
err = e
finally:
if release_conn:
# Put the connection back to be reused. If the connection is
# expired then it will be None, which will get replaced with a
# fresh connection during _get_conn.
self._put_conn(conn)
if not conn:
# Try again
log.warning("Retrying (%r) after connection "
"broken by '%r': %s" % (retries, err, url))
return self.urlopen(method, url, body, headers, retries,
redirect, assert_same_host,
timeout=timeout, pool_timeout=pool_timeout,
release_conn=release_conn, **response_kw)
# Handle redirect?
redirect_location = redirect and response.get_redirect_location()
if redirect_location:
if response.status == 303:
method = 'GET'
try:
retries = retries.increment(method, url, response=response, _pool=self)
except MaxRetryError:
if retries.raise_on_redirect:
raise
return response
log.info("Redirecting %s -> %s" % (url, redirect_location))
return self.urlopen(method, redirect_location, body, headers,
retries=retries, redirect=redirect,
assert_same_host=assert_same_host,
timeout=timeout, pool_timeout=pool_timeout,
release_conn=release_conn, **response_kw)
# Check if we should retry the HTTP response.
if retries.is_forced_retry(method, status_code=response.status):
retries = retries.increment(method, url, response=response, _pool=self)
retries.sleep()
log.info("Forced retry: %s" % url)
return self.urlopen(method, url, body, headers,
retries=retries, redirect=redirect,
assert_same_host=assert_same_host,
timeout=timeout, pool_timeout=pool_timeout,
release_conn=release_conn, **response_kw)
return response
class HTTPSConnectionPool(HTTPConnectionPool):
"""
Same as :class:`.HTTPConnectionPool`, but HTTPS.
When Python is compiled with the :mod:`ssl` module, then
:class:`.VerifiedHTTPSConnection` is used, which *can* verify certificates,
instead of :class:`.HTTPSConnection`.
:class:`.VerifiedHTTPSConnection` uses one of ``assert_fingerprint``,
``assert_hostname`` and ``host`` in this order to verify connections.
If ``assert_hostname`` is False, no verification is done.
The ``key_file``, ``cert_file``, ``cert_reqs``, ``ca_certs`` and
``ssl_version`` are only used if :mod:`ssl` is available and are fed into
:meth:`urllib3.util.ssl_wrap_socket` to upgrade the connection socket
into an SSL socket.
"""
scheme = 'https'
ConnectionCls = HTTPSConnection
def __init__(self, host, port=None,
strict=False, timeout=Timeout.DEFAULT_TIMEOUT, maxsize=1,
block=False, headers=None, retries=None,
_proxy=None, _proxy_headers=None,
key_file=None, cert_file=None, cert_reqs=None,
ca_certs=None, ssl_version=None,
assert_hostname=None, assert_fingerprint=None,
**conn_kw):
HTTPConnectionPool.__init__(self, host, port, strict, timeout, maxsize,
block, headers, retries, _proxy, _proxy_headers,
**conn_kw)
self.key_file = key_file
self.cert_file = cert_file
self.cert_reqs = cert_reqs
self.ca_certs = ca_certs
self.ssl_version = ssl_version
self.assert_hostname = assert_hostname
self.assert_fingerprint = assert_fingerprint
def _prepare_conn(self, conn):
"""
Prepare the ``connection`` for :meth:`urllib3.util.ssl_wrap_socket`
and establish the tunnel if proxy is used.
"""
if isinstance(conn, VerifiedHTTPSConnection):
conn.set_cert(key_file=self.key_file,
cert_file=self.cert_file,
cert_reqs=self.cert_reqs,
ca_certs=self.ca_certs,
assert_hostname=self.assert_hostname,
assert_fingerprint=self.assert_fingerprint)
conn.ssl_version = self.ssl_version
return conn
def _prepare_proxy(self, conn):
"""
Establish tunnel connection early, because otherwise httplib
would improperly set Host: header to proxy's IP:port.
"""
# Python 2.7+
try:
set_tunnel = conn.set_tunnel
except AttributeError: # Platform-specific: Python 2.6
set_tunnel = conn._set_tunnel
if sys.version_info <= (2, 6, 4) and not self.proxy_headers: # Python 2.6.4 and older
set_tunnel(self.host, self.port)
else:
set_tunnel(self.host, self.port, self.proxy_headers)
conn.connect()
def _new_conn(self):
"""
Return a fresh :class:`httplib.HTTPSConnection`.
"""
self.num_connections += 1
log.info("Starting new HTTPS connection (%d): %s"
% (self.num_connections, self.host))
if not self.ConnectionCls or self.ConnectionCls is DummyConnection:
# Platform-specific: Python without ssl
raise SSLError("Can't connect to HTTPS URL because the SSL "
"module is not available.")
actual_host = self.host
actual_port = self.port
if self.proxy is not None:
actual_host = self.proxy.host
actual_port = self.proxy.port
conn = self.ConnectionCls(host=actual_host, port=actual_port,
timeout=self.timeout.connect_timeout,
strict=self.strict, **self.conn_kw)
return self._prepare_conn(conn)
def _validate_conn(self, conn):
"""
Called right before a request is made, after the socket is created.
"""
super(HTTPSConnectionPool, self)._validate_conn(conn)
# Force connect early to allow us to validate the connection.
if not getattr(conn, 'sock', None): # AppEngine might not have `.sock`
conn.connect()
if not conn.is_verified:
warnings.warn((
'Unverified HTTPS request is being made. '
'Adding certificate verification is strongly advised. See: '
'https://urllib3.readthedocs.org/en/latest/security.html'),
InsecureRequestWarning)
def connection_from_url(url, **kw):
"""
Given a url, return an :class:`.ConnectionPool` instance of its host.
This is a shortcut for not having to parse out the scheme, host, and port
of the url before creating an :class:`.ConnectionPool` instance.
:param url:
Absolute URL string that must include the scheme. Port is optional.
:param \**kw:
Passes additional parameters to the constructor of the appropriate
:class:`.ConnectionPool`. Useful for specifying things like
timeout, maxsize, headers, etc.
Example::
>>> conn = connection_from_url('http://google.com/')
>>> r = conn.request('GET', '/')
"""
scheme, host, port = get_host(url)
if scheme == 'https':
return HTTPSConnectionPool(host, port=port, **kw)
else:
return HTTPConnectionPool(host, port=port, **kw)
| |
from __future__ import annotations
from . import ProgressiveTest
from progressivis.core import aio
from progressivis import Print
from progressivis.table.stirrer import Stirrer
from progressivis.linalg import (
Unary,
Binary,
ColsBinary,
Reduce,
func2class_name,
unary_module,
make_unary,
binary_module,
make_binary,
reduce_module,
make_reduce,
binary_dict_int_tst,
unary_dict_gen_tst,
binary_dict_gen_tst,
)
from progressivis.linalg._elementwise import (
Invert,
BitwiseNot,
ColsLdexp,
Ldexp,
Arccosh,
)
import progressivis.linalg as arr
from progressivis.core.bitmap import bitmap
from progressivis.stats import RandomTable, RandomDict
import numpy as np
from typing import Any, Type, TYPE_CHECKING
if TYPE_CHECKING:
from progressivis.table.module import TableModule
class TestUnary(ProgressiveTest):
def test_unary(self) -> None:
s = self.scheduler()
random = RandomTable(10, rows=100_000, scheduler=s)
module = Unary(np.log, scheduler=s)
module.input[0] = random.output.result
pr = Print(proc=self.terse, scheduler=s)
pr.input[0] = module.output.result
aio.run(s.start())
res1 = np.log(random.table.to_array())
res2 = module.table.to_array()
self.assertTrue(module.name.startswith("unary_"))
self.assertTrue(np.allclose(res1, res2, equal_nan=True))
def test_unary2(self) -> None:
s = self.scheduler()
random = RandomTable(10, rows=100_000, scheduler=s)
module = Unary(np.log, columns=["_3", "_5", "_7"], scheduler=s)
module.input[0] = random.output.result
pr = Print(proc=self.terse, scheduler=s)
pr.input[0] = module.output.result
aio.run(s.start())
res1 = np.log(random.table.to_array()[:, [2, 4, 6]])
res2 = module.table.to_array()
self.assertTrue(module.name.startswith("unary_"))
self.assertTrue(np.allclose(res1, res2, equal_nan=True))
def _t_stirred_unary(self, **kw: Any) -> None:
s = self.scheduler()
random = RandomTable(10, rows=100_000, scheduler=s)
stirrer = Stirrer(update_column="_3", fixed_step_size=1000, scheduler=s, **kw)
stirrer.input[0] = random.output.result
module = Unary(np.log, columns=["_3", "_5", "_7"], scheduler=s)
module.input[0] = stirrer.output.result
pr = Print(proc=self.terse, scheduler=s)
pr.input[0] = module.output.result
aio.run(s.start())
res1 = np.log(stirrer.table.to_array()[:, [2, 4, 6]])
res2 = module.table.to_array()
self.assertTrue(module.name.startswith("unary_"))
self.assertTrue(np.allclose(res1, res2, equal_nan=True))
def test_unary3(self) -> None:
self._t_stirred_unary(delete_rows=5)
def test_unary4(self) -> None:
self._t_stirred_unary(update_rows=5)
def _t_impl(self, cls: Type[TableModule], ufunc: np.ufunc, mod_name: str) -> None:
print("Testing", mod_name)
s = self.scheduler()
random = RandomTable(10, rows=10_000, scheduler=s)
module = cls(scheduler=s)
module.input[0] = random.output.result
pr = Print(proc=self.terse, scheduler=s)
pr.input[0] = module.output.result
aio.run(s.start())
res1 = ufunc(random.table.to_array())
res2 = module.table.to_array()
self.assertTrue(module.name.startswith(mod_name))
self.assertTrue(np.allclose(res1, res2, equal_nan=True))
def add_un_tst(k: str, ufunc: np.ufunc) -> None:
cls = func2class_name(k)
mod_name = k + "_"
def _f(self_: TestUnary) -> None:
TestUnary._t_impl(self_, arr.__dict__[cls], ufunc, mod_name)
setattr(TestUnary, "test_" + k, _f)
for k, ufunc in unary_dict_gen_tst.items():
add_un_tst(k, ufunc)
class TestOtherUnaries(ProgressiveTest):
def test_arccosh(self) -> None:
module_name = "arccosh_"
print("Testing", module_name)
s = self.scheduler()
random = RandomTable(
10, random=lambda x: np.random.rand(x) * 10000.0, rows=100_000, scheduler=s
)
module = Arccosh(scheduler=s)
module.input[0] = random.output.result
pr = Print(proc=self.terse, scheduler=s)
pr.input[0] = module.output.result
aio.run(s.start())
res1 = np.arccosh(random.table.to_array())
res2 = module.table.to_array()
self.assertTrue(module.name.startswith(module_name))
self.assertTrue(np.allclose(res1, res2, equal_nan=True))
def test_invert(self) -> None:
module_name = "invert_"
print("Testing", module_name)
s = self.scheduler()
random = RandomTable(
10,
random=lambda x: np.random.randint(100_000, size=x), # type: ignore
dtype="int64",
rows=100_000,
scheduler=s,
)
module = Invert(scheduler=s)
module.input[0] = random.output.result
pr = Print(proc=self.terse, scheduler=s)
pr.input[0] = module.output.result
aio.run(s.start())
res1 = np.invert(random.table.to_array())
res2 = module.table.to_array()
self.assertTrue(module.name.startswith(module_name))
self.assertTrue(np.allclose(res1, res2, equal_nan=True))
def test_bitwise_not(self) -> None:
module_name = "bitwise_not_"
print("Testing", module_name)
s = self.scheduler()
random = RandomTable(
10,
random=lambda x: np.random.randint(100_000, size=x), # type: ignore
dtype="int64",
rows=100_000,
scheduler=s,
)
module = BitwiseNot(scheduler=s)
module.input[0] = random.output.result
pr = Print(proc=self.terse, scheduler=s)
pr.input[0] = module.output.result
aio.run(s.start())
res1 = np.bitwise_not(random.table.to_array())
res2 = module.table.to_array()
self.assertTrue(module.name.startswith(module_name))
self.assertTrue(np.allclose(res1, res2, equal_nan=True))
# @skip
class TestColsBinary(ProgressiveTest):
def test_cols_binary(self) -> None:
s = self.scheduler()
cols = 10
random = RandomTable(cols, rows=100_000, scheduler=s)
module = ColsBinary(
np.add, first=["_3", "_5", "_7"], second=["_4", "_6", "_8"], scheduler=s
)
module.input[0] = random.output.result
pr = Print(proc=self.terse, scheduler=s)
pr.input[0] = module.output.result
aio.run(s.start())
self.assertListEqual(module.table.columns, ["_3", "_5", "_7"])
arr = random.table.to_array()
res1 = np.add(arr[:, [2, 4, 6]], arr[:, [3, 5, 7]])
res2 = module.table.to_array()
self.assertTrue(module.name.startswith("cols_binary_"))
self.assertTrue(np.allclose(res1, res2, equal_nan=True))
def test_cols_binary2(self) -> None:
s = self.scheduler()
cols = 10
random = RandomTable(cols, rows=100, scheduler=s)
module = ColsBinary(
np.add,
first=["_3", "_5", "_7"],
second=["_4", "_6", "_8"],
cols_out=["x", "y", "z"],
scheduler=s,
)
module.input[0] = random.output.result
pr = Print(proc=self.terse, scheduler=s)
pr.input[0] = module.output.result
aio.run(s.start())
self.assertListEqual(module.table.columns, ["x", "y", "z"])
def t_stirred_cols_binary(self, **kw: Any) -> None:
s = self.scheduler()
cols = 10
random = RandomTable(cols, rows=10_000, scheduler=s)
stirrer = Stirrer(update_column="_3", fixed_step_size=1000, scheduler=s, **kw)
stirrer.input[0] = random.output.result
module = ColsBinary(
np.add, first=["_3", "_5", "_7"], second=["_4", "_6", "_8"], scheduler=s
)
module.input[0] = stirrer.output.result
pr = Print(proc=self.terse, scheduler=s)
pr.input[0] = module.output.result
aio.run(s.start())
self.assertListEqual(module.table.columns, ["_3", "_5", "_7"])
arr = stirrer.table.to_array()
res1 = np.add(arr[:, [2, 4, 6]], arr[:, [3, 5, 7]])
res2 = module.table.to_array()
self.assertTrue(module.name.startswith("cols_binary_"))
self.assertTrue(np.allclose(res1, res2, equal_nan=True))
def test_cols_binary3(self) -> None:
self.t_stirred_cols_binary(delete_rows=5)
def test_cols_binary4(self) -> None:
self.t_stirred_cols_binary(update_rows=5)
def _t_impl(self, cls: Type[TableModule], ufunc: np.ufunc, mod_name: str) -> None:
print("Testing", mod_name)
s = self.scheduler()
random = RandomTable(10, rows=10_000, scheduler=s)
module = cls(
first=["_3", "_5", "_7"],
second=["_4", "_6", "_8"],
cols_out=["x", "y", "z"],
scheduler=s,
)
module.input[0] = random.output.result
pr = Print(proc=self.terse, scheduler=s)
pr.input[0] = module.output.result
aio.run(s.start())
self.assertListEqual(module.table.columns, ["x", "y", "z"])
arr = random.table.to_array()
res1 = ufunc(arr[:, [2, 4, 6]], arr[:, [3, 5, 7]])
res2 = module.table.to_array()
self.assertTrue(module.name.startswith(mod_name))
self.assertTrue(np.allclose(res1, res2, equal_nan=True))
def add_cols_bin_tst(c: Type[TestColsBinary], k: str, ufunc: np.ufunc) -> None:
cls = f"Cols{func2class_name(k)}"
mod_name = f"cols_{k}_"
def _f(self_: TestColsBinary) -> None:
c._t_impl(self_, arr.__dict__[cls], ufunc, mod_name)
setattr(c, "test_" + k, _f)
for k, ufunc in binary_dict_gen_tst.items():
add_cols_bin_tst(TestColsBinary, k, ufunc)
class TestOtherColsBinaries(ProgressiveTest):
def _t_impl(self, cls: Type[TableModule], ufunc: np.ufunc, mod_name: str) -> None:
print("Testing", mod_name)
s = self.scheduler()
cols = 10
random = RandomTable(
cols,
rows=10_000,
scheduler=s,
random=lambda x: np.random.randint(10, size=x), # type: ignore
dtype="int64",
)
module = cls(
first=["_3", "_5", "_7"],
second=["_4", "_6", "_8"],
cols_out=["x", "y", "z"],
scheduler=s,
)
module.input[0] = random.output.result
pr = Print(proc=self.terse, scheduler=s)
pr.input[0] = module.output.result
aio.run(s.start())
self.assertListEqual(module.table.columns, ["x", "y", "z"])
arr = random.table.to_array()
res1 = ufunc(arr[:, [2, 4, 6]], arr[:, [3, 5, 7]])
res2 = module.table.to_array()
self.assertTrue(module.name.startswith(mod_name))
self.assertTrue(np.allclose(res1, res2, equal_nan=True))
def test_ldexp(self) -> None:
cls, ufunc, mod_name = ColsLdexp, np.ldexp, "cols_ldexp_"
print("Testing", mod_name)
s = self.scheduler()
cols = 10
random = RandomTable(
cols,
rows=10_000,
scheduler=s,
random=lambda x: np.random.randint(10, size=x), # type: ignore
dtype="int64",
)
module = cls(
first=["_3", "_5", "_7"],
second=["_4", "_6", "_8"],
cols_out=["x", "y", "z"],
scheduler=s,
)
module.input[0] = random.output.result
pr = Print(proc=self.terse, scheduler=s)
pr.input[0] = module.output.result
aio.run(s.start())
self.assertListEqual(module.table.columns, ["x", "y", "z"])
arr = random.table.to_array()
res1 = ufunc(arr[:, [2, 4, 6]], arr[:, [3, 5, 7]])
res2 = module.table.to_array()
self.assertTrue(module.name.startswith(mod_name))
self.assertTrue(np.allclose(res1, res2, equal_nan=True))
def add_other_cols_bin_tst(
c: Type[TestOtherColsBinaries], k: str, ufunc: np.ufunc
) -> None:
cls = f"Cols{func2class_name(k)}"
mod_name = f"cols_{k}_"
def _f(self_: TestOtherColsBinaries) -> None:
c._t_impl(self_, arr.__dict__[cls], ufunc, mod_name)
setattr(c, f"test_cols_{k}", _f)
for k, ufunc in binary_dict_int_tst.items():
if k == "ldexp":
continue
add_other_cols_bin_tst(TestOtherColsBinaries, k, ufunc)
class TestBin(ProgressiveTest):
def _t_impl(self, cls: Type[TableModule], ufunc: np.ufunc, mod_name: str) -> None:
pass
class TestBinary(TestBin):
def test_binary(self) -> None:
s = self.scheduler()
random1 = RandomTable(3, rows=100_000, scheduler=s)
random2 = RandomTable(3, rows=100_000, scheduler=s)
module = Binary(np.add, scheduler=s)
module.input.first = random1.output.result
module.input.second = random2.output.result
pr = Print(proc=self.terse, scheduler=s)
pr.input[0] = module.output.result
aio.run(s.start())
res1 = np.add(random1.table.to_array(), random2.table.to_array())
res2 = module.table.to_array()
self.assertTrue(module.name.startswith("binary_"))
self.assertTrue(np.allclose(res1, res2, equal_nan=True))
def test_binary2(self) -> None:
s = self.scheduler()
cols = 10
_ = RandomTable(cols, rows=100_000, scheduler=s)
_ = RandomTable(cols, rows=100_000, scheduler=s)
with self.assertRaises(AssertionError):
_ = Binary(np.add, columns=["_3", "_5", "_7"], scheduler=s)
def test_binary3(self) -> None:
s = self.scheduler()
random1 = RandomTable(10, rows=100_000, scheduler=s)
random2 = RandomTable(10, rows=100_000, scheduler=s)
module = Binary(
np.add,
columns={"first": ["_3", "_5", "_7"], "second": ["_4", "_6", "_8"]},
scheduler=s,
)
module.input.first = random1.output.result
module.input.second = random2.output.result
pr = Print(proc=self.terse, scheduler=s)
pr.input[0] = module.output.result
aio.run(s.start())
res1 = np.add(
random1.table.to_array()[:, [2, 4, 6]],
random2.table.to_array()[:, [3, 5, 7]],
)
res2 = module.table.to_array()
self.assertTrue(module.name.startswith("binary_"))
self.assertTrue(np.allclose(res1, res2, equal_nan=True))
def _t_stirred_binary(self, **kw: Any) -> None:
s = self.scheduler()
random1 = RandomTable(10, rows=100000, scheduler=s)
random2 = RandomTable(10, rows=100000, scheduler=s)
stirrer1 = Stirrer(update_column="_3", fixed_step_size=1000, scheduler=s, **kw)
stirrer1.input[0] = random1.output.result
stirrer2 = Stirrer(update_column="_3", fixed_step_size=1000, scheduler=s, **kw)
stirrer2.input[0] = random2.output.result
module = Binary(
np.add,
columns={"first": ["_3", "_5", "_7"], "second": ["_4", "_6", "_8"]},
scheduler=s,
)
module.input.first = stirrer1.output.result
module.input.second = stirrer2.output.result
pr = Print(proc=self.terse, scheduler=s)
pr.input[0] = module.output.result
aio.run(s.start())
idx1 = stirrer1.table.index.to_array()
idx2 = stirrer2.table.index.to_array()
common = bitmap(idx1) & bitmap(idx2)
bt1 = stirrer1.table.loc[common, :]
bt2 = stirrer2.table.loc[common, :]
assert bt1 is not None and bt2 is not None
t1 = bt1.to_array()[:, [2, 4, 6]]
t2 = bt2.to_array()[:, [3, 5, 7]]
res1 = np.add(t1, t2)
res2 = module.table.to_array()
self.assertTrue(module.name.startswith("binary_"))
self.assertTrue(np.allclose(res1, res2, equal_nan=True))
def test_stirred_binary1(self) -> None:
self._t_stirred_binary(delete_rows=5)
def test_stirred_binary2(self) -> None:
self._t_stirred_binary(update_rows=5)
def _t_impl(self, cls: Type[TableModule], ufunc: np.ufunc, mod_name: str) -> None:
print("Testing", mod_name)
s = self.scheduler()
random1 = RandomTable(3, rows=10_000, scheduler=s)
random2 = RandomTable(3, rows=10_000, scheduler=s)
module = cls(scheduler=s)
module.input.first = random1.output.result
module.input.second = random2.output.result
pr = Print(proc=self.terse, scheduler=s)
pr.input[0] = module.output.result
aio.run(s.start())
res1 = ufunc(random1.table.to_array(), random2.table.to_array())
res2 = module.table.to_array()
self.assertTrue(module.name.startswith(mod_name))
self.assertTrue(np.allclose(res1, res2, equal_nan=True))
def add_bin_tst(c: Type[TestBin], k: str, ufunc: np.ufunc) -> None:
cls = func2class_name(k)
mod_name = k + "_"
def _f(self_: TestBinary) -> None:
c._t_impl(self_, arr.__dict__[cls], ufunc, mod_name)
setattr(c, "test_" + k, _f)
for k, ufunc in binary_dict_gen_tst.items():
add_bin_tst(TestBinary, k, ufunc)
class TestBinaryTD(TestBin):
def test_binary(self) -> None:
s = self.scheduler()
cols = 3
random1 = RandomTable(cols, rows=100000, scheduler=s)
random2 = RandomDict(cols, scheduler=s)
module = Binary(np.add, scheduler=s)
module.input.first = random1.output.result
module.input.second = random2.output.result
pr = Print(proc=self.terse, scheduler=s)
pr.input[0] = module.output.result
aio.run(s.start())
res1 = np.add(random1.table.to_array(), np.array(list(random2.psdict.values())))
res2 = module.table.to_array()
self.assertTrue(module.name.startswith("binary_"))
self.assertTrue(np.allclose(res1, res2, equal_nan=True))
def test_binary2(self) -> None:
s = self.scheduler()
cols = 10
_ = RandomTable(cols, rows=100_000, scheduler=s)
_ = RandomDict(cols, scheduler=s)
with self.assertRaises(AssertionError):
_ = Binary(np.add, columns=["_3", "_5", "_7"], scheduler=s)
def test_binary3(self) -> None:
s = self.scheduler()
cols = 10
random1 = RandomTable(cols, rows=100_000, scheduler=s)
random2 = RandomDict(cols, scheduler=s)
module = Binary(
np.add,
columns={"first": ["_3", "_5", "_7"], "second": ["_4", "_6", "_8"]},
scheduler=s,
)
module.input.first = random1.output.result
module.input.second = random2.output.result
pr = Print(proc=self.terse, scheduler=s)
pr.input[0] = module.output.result
aio.run(s.start())
res1 = np.add(
random1.table.to_array()[:, [2, 4, 6]],
np.array(list(random2.psdict.values()))[[3, 5, 7]],
)
res2 = module.table.to_array()
self.assertTrue(module.name.startswith("binary_"))
self.assertTrue(np.allclose(res1, res2, equal_nan=True))
def _t_impl(self, cls: Type[TableModule], ufunc: np.ufunc, mod_name: str) -> None:
print("Testing", mod_name)
s = self.scheduler()
cols = 3
random1 = RandomTable(3, rows=10_000, scheduler=s)
random2 = RandomDict(cols, scheduler=s)
module = cls(scheduler=s)
module.input.first = random1.output.result
module.input.second = random2.output.result
pr = Print(proc=self.terse, scheduler=s)
pr.input[0] = module.output.result
aio.run(s.start())
res1 = ufunc(random1.table.to_array(), np.array(list(random2.psdict.values())))
res2 = module.table.to_array()
self.assertTrue(module.name.startswith(mod_name))
self.assertTrue(np.allclose(res1, res2, equal_nan=True))
for k, ufunc in binary_dict_gen_tst.items():
add_bin_tst(TestBinaryTD, k, ufunc)
class TestOtherBinaries(ProgressiveTest):
def _t_impl(self, cls: Type[TableModule], ufunc: np.ufunc, mod_name: str) -> None:
print("Testing", mod_name)
s = self.scheduler()
random1 = RandomTable(
3,
rows=100_000,
scheduler=s,
random=lambda x: np.random.randint(10, size=x), # type: ignore
dtype="int64",
)
random2 = RandomTable(
3,
rows=100_000,
scheduler=s,
random=lambda x: np.random.randint(10, size=x), # type: ignore
dtype="int64",
)
module = cls(scheduler=s)
module.input.first = random1.output.result
module.input.second = random2.output.result
pr = Print(proc=self.terse, scheduler=s)
pr.input[0] = module.output.result
aio.run(s.start())
res1 = ufunc(random1.table.to_array(), random2.table.to_array())
res2 = module.table.to_array()
self.assertTrue(module.name.startswith(mod_name))
self.assertTrue(np.allclose(res1, res2, equal_nan=True))
def test_ldexp(self) -> None:
cls, ufunc, mod_name = Ldexp, np.ldexp, "ldexp_"
print("Testing", mod_name)
s = self.scheduler()
random1 = RandomTable(3, rows=100_000, scheduler=s)
random2 = RandomTable(
3,
rows=100_000,
scheduler=s,
random=lambda x: np.random.randint(10, size=x), # type: ignore
dtype="int64",
)
module = cls(scheduler=s)
module.input.first = random1.output.result
module.input.second = random2.output.result
pr = Print(proc=self.terse, scheduler=s)
pr.input[0] = module.output.result
aio.run(s.start())
res1 = ufunc(random1.table.to_array(), random2.table.to_array())
res2 = module.table.to_array()
self.assertTrue(module.name.startswith(mod_name))
self.assertTrue(np.allclose(res1, res2, equal_nan=True))
def add_other_bin_tst(c: Type[TestOtherBinaries], k: str, ufunc: np.ufunc) -> None:
cls = func2class_name(k)
mod_name = k + "_"
def _f(self_: TestOtherBinaries) -> None:
c._t_impl(self_, arr.__dict__[cls], ufunc, mod_name)
setattr(c, "test_" + k, _f)
for k, ufunc in binary_dict_int_tst.items():
if k == "ldexp":
continue
add_other_bin_tst(TestOtherBinaries, k, ufunc)
class TestReduce(ProgressiveTest):
def test_reduce(self) -> None:
s = self.scheduler()
random = RandomTable(10, rows=100_000, scheduler=s)
module = Reduce(np.add, scheduler=s)
module.input[0] = random.output.result
pr = Print(proc=self.terse, scheduler=s)
pr.input[0] = module.output.result
aio.run(s.start())
res1 = np.add.reduce(random.table.to_array())
res2 = np.array(list(module.psdict.values()))
self.assertTrue(module.name.startswith("reduce_"))
self.assertTrue(np.allclose(res1, res2, equal_nan=True))
def test_reduce2(self) -> None:
s = self.scheduler()
random = RandomTable(10, rows=100_000, scheduler=s)
module = Reduce(np.add, columns=["_3", "_5", "_7"], scheduler=s)
module.input[0] = random.output.result
pr = Print(proc=self.terse, scheduler=s)
pr.input[0] = module.output.result
aio.run(s.start())
res1 = np.add.reduce(random.table.to_array()[:, [2, 4, 6]])
res2 = np.array(list(module.psdict.values()))
self.assertTrue(module.name.startswith("reduce_"))
self.assertTrue(np.allclose(res1, res2, equal_nan=True))
def _t_impl(self, cls: Type[TableModule], ufunc: np.ufunc, mod_name: str) -> None:
print("Testing", mod_name)
s = self.scheduler()
random = RandomTable(10, rows=10_000, scheduler=s)
module = cls(scheduler=s)
module.input[0] = random.output.result
pr = Print(proc=self.terse, scheduler=s)
pr.input[0] = module.output.result
aio.run(s.start())
res1 = getattr(ufunc, "reduce")(random.table.to_array())
res2 = np.array(list(module.psdict.values()))
self.assertTrue(module.name.startswith(mod_name))
self.assertTrue(np.allclose(res1, res2, equal_nan=True))
def add_reduce_tst(c: Type[TestReduce], k: str, ufunc: np.ufunc) -> None:
cls = f"{func2class_name(k)}Reduce"
mod_name = f"{k}_reduce_"
def _f(self_: TestReduce) -> None:
c._t_impl(self_, arr.__dict__[cls], ufunc, mod_name)
setattr(c, f"test_{k}", _f)
for k, ufunc in binary_dict_gen_tst.items():
add_reduce_tst(TestReduce, k, ufunc)
class TestCustomFunctions(ProgressiveTest):
def test_custom_unary(self) -> None:
def custom_unary(x: float) -> float:
return (x + np.sin(x)) / (x + np.cos(x)) # type: ignore
CustomUnary = make_unary(custom_unary)
s = self.scheduler()
random = RandomTable(10, rows=100_000, scheduler=s)
module = CustomUnary(scheduler=s)
module.input[0] = random.output.result
pr = Print(proc=self.terse, scheduler=s)
pr.input[0] = module.output.result
aio.run(s.start())
res1 = np.array(module._ufunc(random.table.to_array()), dtype="float64")
res2 = module.table.to_array()
self.assertTrue(module.name.startswith("custom_unary_"))
self.assertTrue(np.allclose(res1, res2, equal_nan=True))
def test_custom_binary(self) -> None:
def custom_binary(x: float, y: float) -> float:
return (x + np.sin(y)) / (x + np.cos(y)) # type: ignore
CustomBinary = make_binary(custom_binary)
s = self.scheduler()
random1 = RandomTable(3, rows=100_000, scheduler=s)
random2 = RandomTable(3, rows=100_000, scheduler=s)
module = CustomBinary(scheduler=s)
module.input.first = random1.output.result
module.input.second = random2.output.result
pr = Print(proc=self.terse, scheduler=s)
pr.input[0] = module.output.result
aio.run(s.start())
res1 = np.array(
module._ufunc(random1.table.to_array(), random2.table.to_array()),
dtype="float64",
)
res2 = module.table.to_array()
self.assertTrue(module.name.startswith("custom_binary_"))
self.assertTrue(np.allclose(res1, res2, equal_nan=True))
def test_custom_reduce(self) -> None:
def custom_binary(x: float, y: float) -> float:
return (x + np.sin(y)) / (x + np.cos(y)) # type: ignore
CustomBinaryReduce = make_reduce(custom_binary)
s = self.scheduler()
random = RandomTable(10, rows=100_000, scheduler=s)
module = CustomBinaryReduce(scheduler=s)
module.input[0] = random.output.result
pr = Print(proc=self.terse, scheduler=s)
pr.input[0] = module.output.result
aio.run(s.start())
res1 = np.array(module._ufunc(random.table.to_array()), dtype="float64")
res2 = np.array(list(module.result.values()))
self.assertTrue(module.name.startswith("custom_binary_reduce_"))
self.assertTrue(np.allclose(res1, res2, equal_nan=True))
class TestOtherReduces(ProgressiveTest):
def _t_impl(self, cls: Type[TableModule], ufunc: np.ufunc, mod_name: str) -> None:
print("Testing", mod_name)
s = self.scheduler()
random = RandomTable(
3,
rows=10_000,
scheduler=s,
random=lambda x: np.random.randint(10, size=x), # type: ignore
dtype="int64",
)
module = cls(scheduler=s)
module.input[0] = random.output.result
pr = Print(proc=self.terse, scheduler=s)
pr.input[0] = module.output.result
aio.run(s.start())
res1 = getattr(ufunc, "reduce")(random.table.to_array())
res2 = np.array(list(module.psdict.values()))
self.assertTrue(module.name.startswith(mod_name))
self.assertTrue(np.allclose(res1, res2, equal_nan=True))
def add_other_reduce_tst(c: Type[TestOtherReduces], k: str, ufunc: np.ufunc) -> None:
cls = f"{func2class_name(k)}Reduce"
mod_name = f"{k}_reduce_"
def _f(self_: TestOtherReduces) -> None:
c._t_impl(self_, arr.__dict__[cls], ufunc, mod_name)
setattr(c, f"test_{k}", _f)
for k, ufunc in binary_dict_int_tst.items():
if k == "ldexp":
continue
add_other_reduce_tst(TestOtherReduces, k, ufunc)
class TestDecorators(ProgressiveTest):
def test_decorator_unary(self) -> None:
@unary_module
def CustomUnary(x: float) -> float:
return (x + np.sin(x)) / (x + np.cos(x)) # type: ignore
s = self.scheduler()
random = RandomTable(10, rows=100_000, scheduler=s)
module = CustomUnary(scheduler=s)
module.input[0] = random.output.result
pr = Print(proc=self.terse, scheduler=s)
pr.input[0] = module.output.result
aio.run(s.start())
res1 = np.array(module._ufunc(random.table.to_array()), dtype="float64")
res2 = module.table.to_array()
self.assertTrue(module.name.startswith("custom_unary_"))
self.assertTrue(np.allclose(res1, res2, equal_nan=True))
def test_decorator_binary(self) -> None:
@binary_module
def CustomBinary(x: float, y: float) -> float:
return (x + np.sin(y)) / (x + np.cos(y)) # type: ignore
s = self.scheduler()
random1 = RandomTable(3, rows=100_000, scheduler=s)
random2 = RandomTable(3, rows=100_000, scheduler=s)
module = CustomBinary(scheduler=s)
module.input.first = random1.output.result
module.input.second = random2.output.result
pr = Print(proc=self.terse, scheduler=s)
pr.input[0] = module.output.result
aio.run(s.start())
res1 = np.array(
module._ufunc(random1.table.to_array(), random2.table.to_array()),
dtype="float64",
)
res2 = module.table.to_array()
self.assertTrue(module.name.startswith("custom_binary_"))
self.assertTrue(np.allclose(res1, res2, equal_nan=True))
def test_decorator_reduce(self) -> None:
@reduce_module
def CustomBinaryReduce(x: float, y: float) -> float:
return (x + np.sin(y)) / (x + np.cos(y)) # type: ignore
s = self.scheduler()
random = RandomTable(10, rows=100_000, scheduler=s)
module = CustomBinaryReduce(scheduler=s)
module.input[0] = random.output.result
pr = Print(proc=self.terse, scheduler=s)
pr.input[0] = module.output.result
aio.run(s.start())
res1 = np.array(module._ufunc(random.table.to_array()), dtype="float64")
res2 = np.array(list(module.result.values()))
self.assertTrue(module.name.startswith("custom_binary_reduce_"))
self.assertTrue(np.allclose(res1, res2, equal_nan=True))
| |
import datetime
from django.core.urlresolvers import reverse
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.http import HttpResponseRedirect # , HttpResponse
from billing import CreditCard, get_gateway, get_integration
from billing.gateway import CardNotSupported
from app.forms import CreditCardForm
from app.urls import (authorize_net_obj, google_checkout_obj, world_pay_obj, pay_pal_obj,
amazon_fps_obj, fps_recur_obj, braintree_obj,
stripe_obj, ogone_obj)
from app.utils import randomword
from django.conf import settings
from django.contrib.sites.models import RequestSite
from billing.utils.paylane import PaylanePaymentCustomer, \
PaylanePaymentCustomerAddress
from app.conf import GATEWAY_INITIAL, INTEGRATION_INITIAL
def render(request, template, template_vars={}):
return render_to_response(template, template_vars, RequestContext(request))
def index(request, gateway=None):
return authorize(request)
def authorize(request):
amount = 1
response = None
if request.method == 'POST':
form = CreditCardForm(request.POST)
if form.is_valid():
data = form.cleaned_data
credit_card = CreditCard(**data)
merchant = get_gateway("authorize_net")
try:
merchant.validate_card(credit_card)
except CardNotSupported:
response = "Credit Card Not Supported"
response = merchant.purchase(amount, credit_card)
#response = merchant.recurring(amount, credit_card)
else:
form = CreditCardForm(initial=GATEWAY_INITIAL['authorize_net'])
return render(request, 'app/index.html', {'form': form,
'amount': amount,
'response': response,
'title': 'Authorize'})
def paypal(request):
amount = 1
response = None
if request.method == 'POST':
form = CreditCardForm(request.POST)
if form.is_valid():
data = form.cleaned_data
credit_card = CreditCard(**data)
merchant = get_gateway("pay_pal")
try:
merchant.validate_card(credit_card)
except CardNotSupported:
response = "Credit Card Not Supported"
# response = merchant.purchase(amount, credit_card, options={'request': request})
response = merchant.recurring(amount, credit_card, options={'request': request})
else:
form = CreditCardForm(initial=GATEWAY_INITIAL['paypal'])
return render(request, 'app/index.html', {'form': form,
'amount': amount,
'response': response,
'title': 'Paypal'})
def eway(request):
amount = 100
response = None
if request.method == 'POST':
form = CreditCardForm(request.POST)
if form.is_valid():
data = form.cleaned_data
credit_card = CreditCard(**data)
merchant = get_gateway("eway")
try:
merchant.validate_card(credit_card)
except CardNotSupported:
response = "Credit Card Not Supported"
billing_address = {'salutation': 'Mr.',
'address1': 'test',
'address2': ' street',
'city': 'Sydney',
'state': 'NSW',
'company': 'Test Company',
'zip': '2000',
'country': 'au',
'email': 'test@example.com',
'fax': '0267720000',
'phone': '0267720000',
'mobile': '0404085992',
'customer_ref': 'REF100',
'job_desc': 'test',
'comments': 'any',
'url': 'http://www.google.com.au',
}
response = merchant.purchase(amount, credit_card, options={'request': request, 'billing_address': billing_address})
else:
form = CreditCardForm(initial=GATEWAY_INITIAL['eway'])
return render(request, 'app/index.html', {'form': form,
'amount': amount,
'response': response,
'title': 'Eway'})
def braintree(request):
amount = 1
response = None
if request.method == 'POST':
form = CreditCardForm(request.POST)
if form.is_valid():
data = form.cleaned_data
credit_card = CreditCard(**data)
merchant = get_gateway("braintree_payments")
try:
merchant.validate_card(credit_card)
except CardNotSupported:
response = "Credit Card Not Supported"
response = merchant.purchase(amount, credit_card)
else:
form = CreditCardForm(initial=GATEWAY_INITIAL['braintree_payments'])
return render(request, 'app/index.html', {'form': form,
'amount': amount,
'response': response,
'title': 'Braintree Payments (S2S)'})
def stripe(request):
amount = 1
response= None
if request.method == 'POST':
form = CreditCardForm(request.POST)
if form.is_valid():
data = form.cleaned_data
credit_card = CreditCard(**data)
merchant = get_gateway("stripe")
response = merchant.purchase(amount,credit_card)
else:
form = CreditCardForm(initial=GATEWAY_INITIAL['stripe'])
return render(request, 'app/index.html',{'form': form,
'amount':amount,
'response':response,
'title':'Stripe Payment'})
def paylane(request):
amount = 1
response= None
if request.method == 'POST':
form = CreditCardForm(request.POST)
if form.is_valid():
data = form.cleaned_data
credit_card = CreditCard(**data)
merchant = get_gateway("paylane")
customer = PaylanePaymentCustomer()
customer.name = "%s %s" %(data['first_name'], data['last_name'])
customer.email = "testuser@example.com"
customer.ip_address = "127.0.0.1"
options = {}
address = PaylanePaymentCustomerAddress()
address.street_house = 'Av. 24 de Julho, 1117'
address.city = 'Lisbon'
address.zip_code = '1700-000'
address.country_code = 'PT'
customer.address = address
options['customer'] = customer
options['product'] = {}
response = merchant.purchase(amount, credit_card, options = options)
else:
form = CreditCardForm(initial=GATEWAY_INITIAL['paylane'])
return render(request, 'app/index.html', {'form': form,
'amount':amount,
'response':response,
'title':'Paylane Gateway'})
def we_pay(request):
wp = get_gateway("we_pay")
form = None
amount = 10
response = wp.purchase(10, None, {
"description": "Test Merchant Description",
"type": "SERVICE",
"redirect_uri": request.build_absolute_uri(reverse('app_we_pay_redirect'))
})
if response["status"] == "SUCCESS":
return HttpResponseRedirect(response["response"]["checkout_uri"])
return render(request, 'app/index.html', {'form': form,
'amount':amount,
'response':response,
'title':'WePay Payment'})
def we_pay_redirect(request):
checkout_id = request.GET.get("checkout_id", None)
return render(request, 'app/we_pay_success.html', {"checkout_id": checkout_id})
def we_pay_ipn(request):
# Just a dummy view for now.
return render(request, 'app/index.html', {})
def beanstream(request):
amount = 1
response = None
if request.method == 'POST':
form = CreditCardForm(request.POST)
if form.is_valid():
data = form.cleaned_data
credit_card = CreditCard(**data)
merchant = get_gateway("beanstream")
response = merchant.purchase(amount, credit_card,
{"billing_address": {
"name": "%s %s" % (data["first_name"], data["last_name"]),
# below are hardcoded just for the sake of the example
# you can make these optional by toggling the customer name
# and address in the account dashboard.
"email": "test@example.com",
"phone": "555-555-555-555",
"address1": "Addr1",
"address2": "Addr2",
"city": "Hyd",
"state": "AP",
"country": "IN"
}
})
else:
form = CreditCardForm(initial=GATEWAY_INITIAL['beanstream'])
return render(request, 'app/index.html',{'form': form,
'amount': amount,
'response': response,
'title': 'Beanstream'})
def chargebee(request):
amount = 1
response = None
if request.method == 'POST':
form = CreditCardForm(request.POST)
if form.is_valid():
data = form.cleaned_data
credit_card = CreditCard(**data)
merchant = get_gateway("chargebee")
response = merchant.purchase(amount, credit_card,
{"plan_id": "professional",
"description": "Quick Purchase"})
else:
form = CreditCardForm(initial=GATEWAY_INITIAL['chargebee'])
return render(request, 'app/index.html',{'form': form,
'amount': amount,
'response': response,
'title': 'Chargebee'})
def offsite_authorize_net(request):
params = {
'x_amount': 1,
'x_fp_sequence': datetime.datetime.now().strftime('%Y%m%d%H%M%S'),
'x_fp_timestamp': datetime.datetime.now().strftime('%s'),
'x_recurring_bill': 'F',
'x_card_num': '4007000000027',
'x_exp_date': '01/20',
'x_card_code': '100',
'x_first_name': 'John',
'x_last_name': 'Doe',
'x_address': '100, Spooner Street, Springfield',
'x_city': 'San Francisco',
'x_state': 'California',
'x_zip': '90210',
'x_country': 'United States'
}
authorize_net_obj.add_fields(params)
template_vars = {"obj": authorize_net_obj, 'title': authorize_net_obj.display_name}
return render(request, 'app/offsite_authorize_net.html', template_vars)
def offsite_paypal(request):
invoice_id = datetime.datetime.now().strftime('%Y%m%d%H%M%S')
return_url = request.build_absolute_uri(reverse('app_offsite_paypal_done'))
cancel_return = request.build_absolute_uri(request.META['PATH_INFO'])
notify_url = request.build_absolute_uri(reverse('paypal-ipn'))
paypal_params = {
'amount_1': 1,
'item_name_1': "Item 1",
'amount_2': 2,
'item_name_2': "Item 2",
'invoice': invoice_id,
'notify_url': notify_url,
'return_url': return_url,
'cancel_return': cancel_return,
}
pay_pal_obj.add_fields(paypal_params)
template_vars = {"obj": pay_pal_obj, 'title': 'PayPal Offsite'}
return render(request, 'app/offsite_paypal.html', template_vars)
def offsite_google_checkout(request):
return_url = request.build_absolute_uri(reverse('app_offsite_google_checkout_done'))
fields = {
'items': [{
'amount': 1,
'name': 'name of the item',
'description': 'Item description',
'id': '999AXZ',
'currency': 'USD',
'quantity': 1,
"subscription": {
"type": "merchant", # valid choices is ["merchant", "google"]
"period": "YEARLY", # valid choices is ["DAILY", "WEEKLY", "SEMI_MONTHLY", "MONTHLY", "EVERY_TWO_MONTHS"," QUARTERLY", "YEARLY"]
"payments": [{
"maximum-charge": 9.99, # Item amount must be "0.00"
"currency": "USD"
}]
},
"digital-content": {
"display-disposition": "OPTIMISTIC", # valid choices is ['OPTIMISTIC', 'PESSIMISTIC']
"description": "Congratulations! Your subscription is being set up. Continue: {return_url}".format(return_url=return_url)
},
}],
'return_url': return_url
}
google_checkout_obj.add_fields(fields)
template_vars = {'title': 'Google Checkout', "gc_obj": google_checkout_obj}
return render(request, 'app/google_checkout.html', template_vars)
def offsite_world_pay(request):
fields = {"instId": settings.MERCHANT_SETTINGS["world_pay"]["INSTALLATION_ID_TEST"],
"cartId": "TEST123",
"currency": "USD",
"amount": 1,
"desc": "Test Item",}
world_pay_obj.add_fields(fields)
template_vars = {'title': 'WorldPay', "wp_obj": world_pay_obj}
return render(request, 'app/world_pay.html', template_vars)
def offsite_amazon_fps(request):
url_scheme = "http"
if request.is_secure():
url_scheme = "https"
fields = {"transactionAmount": "100",
"pipelineName": "SingleUse",
"paymentReason": "Merchant Test",
"paymentPage": request.build_absolute_uri(),
"returnURL": "%s://%s%s" % (url_scheme,
RequestSite(request).domain,
reverse("fps_return_url"))
}
# Save the fps.fields["callerReference"] in the db along with
# the amount to be charged or use the user's unique id as
# the callerReference so that the amount to be charged is known
# Or save the callerReference in the session and send the user
# to FPS and then use the session value when the user is back.
amazon_fps_obj.add_fields(fields)
fields.update({"transactionAmount": "100",
"pipelineName": "Recurring",
"recurringPeriod": "1 Hour",
})
fps_recur_obj.add_fields(fields)
template_vars = {'title': 'Amazon Flexible Payment Service',
"fps_recur_obj": fps_recur_obj,
"fps_obj": amazon_fps_obj}
return render(request, 'app/amazon_fps.html', template_vars)
def offsite_braintree(request):
fields = {"transaction": {
"order_id": datetime.datetime.now().strftime("%Y%m%d%H%M%S"),
"type": "sale",
"options": {
"submit_for_settlement": True
},
},
"site": "%s://%s" % ("https" if request.is_secure() else "http",
RequestSite(request).domain)
}
braintree_obj.add_fields(fields)
template_vars = {'title': 'Braintree Payments Transparent Redirect',
"bp_obj": braintree_obj}
return render(request, "app/braintree_tr.html", template_vars)
def offsite_stripe(request):
status = request.GET.get("status")
stripe_obj.add_field("amount", 100)
template_vars = {'title': 'Stripe.js',
"stripe_obj": stripe_obj,
"status": status}
return render(request, "app/stripe.html", template_vars)
def offsite_eway(request):
return_url = request.build_absolute_uri(reverse(offsite_eway_done))
eway_obj = get_integration("eway_au")
customer = eway_obj.request_access_code(
return_url=return_url, customer={},
payment={"total_amount": 100})
request.session["eway_access_code"] = eway_obj.access_code
template_vars = {"title": "eWAY",
"eway_obj": eway_obj}
return render(request, "app/eway.html", template_vars)
def offsite_eway_done(request):
access_code = request.session["eway_access_code"]
eway_obj = get_integration("eway_au", access_code=access_code)
result = eway_obj.check_transaction()
return render(request, "app/eway_done.html", {"result": result})
def bitcoin(request):
amount = 0.01
bitcoin_obj = get_gateway("bitcoin")
address = request.session.get("bitcoin_address", None)
if not address:
address = bitcoin_obj.get_new_address()
request.session["bitcoin_address"] = address
return render(request, "app/bitcoin.html", {
"title": "Bitcoin",
"amount": amount,
"address": address,
"settings": settings
})
def bitcoin_done(request):
amount = 0.01
bitcoin_obj = get_gateway("bitcoin")
address = request.session.get("bitcoin_address", None)
if not address:
return HttpResponseRedirect(reverse("app_bitcoin"))
result = bitcoin_obj.purchase(amount, address)
if result['status'] == 'SUCCESS':
del request.session["bitcoin_address"]
return render(request, "app/bitcoin_done.html", {
"title": "Bitcoin",
"amount": amount,
"address": address,
"result": result
})
def offsite_ogone(request):
fields = {
# Required
# orderID needs to be unique per transaction.
'orderID': randomword(6),
'currency': u'INR',
'amount': u'10000', # 100.00
'language': 'en_US',
# Optional; Can be configured in Ogone Account:
'exceptionurl': request.build_absolute_uri(reverse("ogone_notify_handler")),
'declineurl': request.build_absolute_uri(reverse("ogone_notify_handler")),
'cancelurl': request.build_absolute_uri(reverse("ogone_notify_handler")),
'accepturl': request.build_absolute_uri(reverse("ogone_notify_handler")),
# Optional fields which can be used for billing:
# 'homeurl': u'http://127.0.0.1:8000/',
# 'catalogurl': u'http://127.0.0.1:8000/',
# 'ownerstate': u'',
# 'cn': u'Venkata Ramana',
# 'ownertown': u'Hyderabad',
# 'ownercty': u'IN',
# 'ownerzip': u'Postcode',
# 'owneraddress': u'Near Madapur PS',
# 'com': u'Order #21: Venkata Ramana',
# 'email': u'ramana@agiliq.com'
}
ogone_obj.add_fields(fields)
return render(request, "app/ogone.html", {"og_obj": ogone_obj})
| |
#!/usr/bin/env python
import os
import tempfile
import psycopg2
import json
from hashlib import md5
from copy import deepcopy
from collections import defaultdict
from wextractor.loaders.loader import Loader
class PostgresLoader(Loader):
def __init__(self, connection_params, schema=None):
super(PostgresLoader, self).__init__(connection_params, schema)
if self.schema is None:
self.schema = []
for table_schema in self.schema:
if table_schema.get('columns', None) is None:
raise Exception('Tables must contain columns')
elif not isinstance(table_schema['columns'][0], tuple):
raise Exception('Table columns must be tuples')
elif len(table_schema['columns'][0]) == 1:
raise Exception('Column Types are not specified')
def connect(self):
'''
The connect method implements the logic behind
the psycopg2 connection. Try/catch/finally
logic should be implemented outside this method
to ensure that the database connection always
closes when appropriate.
For PostgresLoader, the connection_params
must include at least a database and user.
It can also optionally include a hostname, port,
and password
'''
database = self.connection_params.get('database', None)
user = self.connection_params.get('user', None)
if not database or not user:
raise Exception('PostgresLoader must contain "database" and "user" keys')
conn = psycopg2.connect(**self.connection_params)
return conn
def generate_drop_table_query(self, table_schema):
'''
Generates a cascanding drop table query that will drop
all tables and their relations
'''
drop_query = '''DROP TABLE IF EXISTS {table} CASCADE'''.format(
table=table_schema['table_name']
)
return drop_query
def generate_create_table_query(self, table_schema):
'''
Geneates a create table query and raises exceptions
if the table schema generation is malformed
'''
if len(table_schema['columns'][0]) == 2:
coldefs = 'row_id SERIAL,'
coldefs += ','.join(
'{name} {dtype}'.format(name=name, dtype=dtype) for name, dtype in table_schema['columns']
)
create_query = '''CREATE TABLE IF NOT EXISTS {table} ({coldefs}, PRIMARY KEY({pkey}))'''.format(
table=table_schema['table_name'],
coldefs=coldefs,
pkey=table_schema['pkey']
)
return create_query
def generate_foreign_key_query(self, table, i=0):
'''
Generates alter table statements that add formal
foreign key relationships. Takes in a schema and
an optional index (defaults to 0) of the positon
of the relationship in the schema
NOTE: This must be called AFTER data is already
loaded. Otherwise, a psycopg2 error will be thrown.
'''
if table.get('from_relations', None) is None:
return
return '''ALTER TABLE {table} ADD FOREIGN KEY ({id}) REFERENCES {relationship}'''.format(
table=table['table_name'],
id=table['from_relations'][i] + '_id',
relationship=table['from_relations'][i]
)
def null_replace(self, field):
'''
Replaces empty string, None with 'NULL' for Postgres loading
'''
if type(field) in [str, unicode]:
if field == '':
return 'NULL'
elif field is None:
return 'NULL'
return field
def hash_row(self, row):
'''
Return an md5 hash of a row's contents (minus its index). This hash
will turn into the table's new primary key.
'''
return md5(json.dumps(row, sort_keys=True)).hexdigest()
def simple_dedupe(self, idx, table):
'''
Takes in a table that has been transformed by the
transform_to_schema method but not been deduplicated.
This method simply attempts to determine if the row
is an exact replica (minus fkeys). If it is, it checks to
make sure the fkeys are the same and handles the event that
the relationships are to different places and thus should
be different rows (by modifying the primary key). Returns
a deduplicated list.
'''
checker, output, pkey, fkey = {}, [], {}, {}
pkey_name = self.schema[idx]['table_name'] + '_id'
fkeys_name = [i + '_id' for i in self.schema[idx].get('from_relations', [])]
for row in table:
# store the value of the primary key
pkey[pkey_name] = row.pop(pkey_name, None)
for key in fkeys_name:
# store the values of the primary key
fkey[key] = row.pop(key, None)
row_as_tuple = tuple(row.items())
# Use try/except because it's faster than checking if
# the key is in the dictionary keys
try:
row_with_fkey = dict(row.items() + fkey.items())
checker[row_as_tuple]['pkey'].add(
(pkey_name, self.hash_row(row_with_fkey))
)
checker[row_as_tuple]['fkey'].add(tuple(fkey.items()))
except KeyError:
# make a deep copy because calling .pop() will update
# every single one of these otherwise
fkey_copy = deepcopy(fkey)
# create a tuple of tuples for proper extraction later
pkey_tuple = ((pkey_name, self.hash_row(row)),)
checker[row_as_tuple] = {
'pkey': set(pkey_tuple),
'fkey': set(tuple(fkey_copy.items()))
}
# We should now have deduplicated everything, so we just
# reshape the checker dictionary into the list of dict format
# that the remainder should expect
for checker_row, table_keys in checker.items():
if len(table_keys['fkey']) == 0 or len(fkeys_name) == 0:
final_output = dict(checker_row)
final_output.update(dict(table_keys['pkey']))
elif len(table_keys['pkey']) == 1 and len(table_keys['fkey']) == 1:
final_output = dict(checker_row)
final_output.update(dict(table_keys['pkey']))
final_output.update(dict(table_keys['fkey']))
elif len(table_keys['pkey']) == len(table_keys['fkey']):
for fkey in table_keys['fkey']:
new_pkey = self.hash_row(checker_row + fkey)
final_output = dict(checker_row)
final_output.update({pkey_name: new_pkey})
try:
final_output.update(dict((fkey,)))
except:
final_output.update(dict(fkey))
else:
# TODO: Implement something to handle this
raise Exception('pkey/fkey mismatch.')
output.append(final_output)
return output
def transform_to_schema(self, data, add_pkey):
'''
Schema for postgres must take the following form:
[
{
'table_name': '',
'pkey': '',
'index': '',
'to_relations': ['table_name', ...],
'from_relations': ['table_name', ...],
'columns': ( ('col_name', col_type), ... ),
}, ...
]
Input data will come in as a list of dictionaries, with
the keys being the column names and the values being the
values. The transformed data will return a list of
dictionaries where each dictionary is a table to write
to the final data store.
Additionally, this method holds a dictionary of like items
and their ids to allow for very simple deduplication.
'''
# start by generating the output list of lists
output = [list() for i in range(len(self.schema))]
holder = [defaultdict(list) for i in range(len(self.schema))]
# initialize a dictionary to hold potential duplicates
deduper = {}
for ix, line in enumerate(data):
for table_idx, table in enumerate(self.schema):
col_names = zip(*table['columns'])[0]
# initialize the new row to add to the final loaded data
new_row = dict()
for cell in line.iteritems():
if cell[0] in col_names:
# extend the new row with the value of the cell
new_row[cell[0]] = str(self.null_replace(cell[1]))
else:
continue
row_id = self.hash_row(new_row)
if add_pkey or table.get('pkey', None) is None:
new_row[table['table_name'] + '_id'] = row_id
else:
new_row[table['pkey']] = row_id
# once we have added all of the data fields, add the relationships
for relationship in table.get('to_relations', []):
# find the index of the matching relationship table
rel_index = next(index for (index, d) in enumerate(self.schema) if d['table_name'] == relationship)
output[rel_index][ix][self.schema[table_idx]['table_name'] + '_id'] = row_id
output[table_idx].extend([new_row])
final_output = []
for table_ix, table in enumerate(output):
final_output.append(self.simple_dedupe(table_ix, table))
return final_output
def generate_data_tempfile(self, data):
'''
Takes in a list and generates a temporary tab-separated
file. This file can then be consumed by the Postgres \COPY
function
'''
tmp_file = tempfile.TemporaryFile(dir=os.getcwd())
if len(data) == 0:
return tmp_file, None
n = 0
for row in data:
row = sorted(row.items())
n += 1
if n % 10000 == 0:
print 'Wrote {n} lines'.format(n=n)
rowstr = '\t'.join([str(n)] + [i[1] for i in row]) + '\n'
tmp_file.write(rowstr)
tmp_file.seek(0)
return tmp_file, ['row_id'] + sorted(data[0].keys())
def load(self, data, add_pkey=True):
'''
Main method for final Postgres loading.
Takes in data and a flag for adding a primary key and
transforms the input data to the proper schema, generates
relationships, does simple deduplcation on exact matches,
writes a tempfile with all of the data, boots up a
connection to Postgres, and loads everything in
'''
conn = None
try:
conn = self.connect()
cursor = conn.cursor()
if not self.schema:
raise Exception('Schemaless loading is not supported by PostgresLoader')
tables = self.transform_to_schema(data, add_pkey)
for ix, table in enumerate(self.schema):
table['columns'] = ( (table['table_name'] + '_id', 'VARCHAR(32)'), ) + table['columns']
if add_pkey or table.get('pkey', None) is None:
table['pkey'] = table['table_name'] + '_id'
if table.get('from_relations', None):
for relationship in table['from_relations']:
table['columns'] += ( ( relationship + '_id', 'VARCHAR(32)' ), )
drop_table = self.generate_drop_table_query(table)
cursor.execute(drop_table)
create_table = self.generate_create_table_query(table)
cursor.execute(create_table)
tmp_file, column_names = self.generate_data_tempfile(tables[ix])
cursor.copy_from(tmp_file, table['table_name'], null='NULL', sep='\t', columns=column_names)
for table in self.schema:
for ix, relationship in enumerate(table.get('from_relations', [])):
fk_query = self.generate_foreign_key_query(table, ix)
cursor.execute(fk_query)
conn.commit()
except psycopg2.Error, e:
if conn:
conn.rollback()
raise e
finally:
if conn:
conn.close()
| |
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import logging
import os
import re
from abc import abstractmethod
from collections import defaultdict
from glob import glob1
from twitter.common.collections import OrderedSet
from pants.util.dirutil import safe_walk
from pants.util.meta import AbstractClass
logger = logging.getLogger(__name__)
# Note: Significant effort has been made to keep the types BuildFile, BuildGraph, Address, and
# Target separated appropriately. Don't add references to those other types to this module.
class BuildFile(AbstractClass):
class BuildFileError(Exception):
"""Base class for all exceptions raised in BuildFile to make exception handling easier"""
pass
class MissingBuildFileError(BuildFileError):
"""Raised when a BUILD file cannot be found at the path in the spec."""
pass
class InvalidRootDirError(BuildFileError):
"""Raised when the root_dir specified to a BUILD file is not valid."""
pass
class BadPathError(BuildFileError):
"""Raised when scan_buildfiles is called on a nonexistent directory."""
pass
_BUILD_FILE_PREFIX = 'BUILD'
_PATTERN = re.compile('^{prefix}(\.[a-zA-Z0-9_-]+)?$'.format(prefix=_BUILD_FILE_PREFIX))
# Subclasses must have an _cache field.
@classmethod
def clear_cache(cls):
cls._cache = {}
@classmethod
def from_cache(cls, root_dir, relpath, must_exist=True):
key = (root_dir, relpath, must_exist)
if key not in cls._cache:
cls._cache[key] = cls(*key)
return cls._cache[key]
@abstractmethod
def _glob1(self, path, glob):
"""Returns a list of paths in path that match glob"""
def _get_all_build_files(self, path):
"""Returns all the BUILD files on a path"""
results = []
for build in self._glob1(path, '{prefix}*'.format(prefix=self._BUILD_FILE_PREFIX)):
if self._is_buildfile_name(build) and self._isfile(os.path.join(path, build)):
results.append(build)
return sorted(results)
@classmethod
def _is_buildfile_name(cls, name):
return cls._PATTERN.match(name)
@classmethod
def scan_buildfiles(cls, root_dir, base_path=None, spec_excludes=None):
"""Looks for all BUILD files
:param root_dir: the root of the repo containing sources
:param base_path: directory under root_dir to scan
:param spec_excludes: list of paths to exclude from the scan. These can be absolute paths
or paths that are relative to the root_dir.
"""
def calc_exclude_roots(root_dir, excludes):
"""Return a map of root directories to subdirectory names suitable for a quick evaluation
inside safe_walk()
"""
result = defaultdict(set)
for exclude in excludes:
if exclude:
if os.path.isabs(exclude):
exclude = os.path.realpath(exclude)
else:
exclude = os.path.join(root_dir, exclude)
if exclude.startswith(root_dir):
result[os.path.dirname(exclude)].add(os.path.basename(exclude))
return result
def find_excluded(root, dirs, exclude_roots):
"""Removes any of the directories specified in exclude_roots from dirs.
"""
to_remove = set()
for exclude_root in exclude_roots:
# root ends with a /, trim it off
if root.rstrip('/') == exclude_root:
for subdir in exclude_roots[exclude_root]:
if subdir in dirs:
to_remove.add(subdir)
return to_remove
root_dir = os.path.realpath(root_dir)
if base_path and not cls._isdir(os.path.join(root_dir, base_path)):
raise cls.BadPathError('Can only scan directories and {0} is not a valid dir'
.format(base_path))
buildfiles = []
if not spec_excludes:
exclude_roots = {}
else:
exclude_roots = calc_exclude_roots(root_dir, spec_excludes)
for root, dirs, files in cls._walk(root_dir, base_path or '', topdown=True):
to_remove = find_excluded(root, dirs, exclude_roots)
# For performance, ignore hidden dirs such as .git, .pants.d and .local_artifact_cache.
# TODO: Instead of this heuristic, only walk known source_roots. But we can't do this
# until we're able to express source_roots in some way other than bootstrap BUILD files...
to_remove.update(d for d in dirs if d.startswith('.'))
for subdir in to_remove:
dirs.remove(subdir)
for filename in files:
if cls._is_buildfile_name(filename):
buildfile_relpath = os.path.relpath(os.path.join(root, filename), root_dir)
buildfiles.append(cls.from_cache(root_dir, buildfile_relpath))
return OrderedSet(sorted(buildfiles, key=lambda buildfile: buildfile.full_path))
@abstractmethod
def _walk(self, root_dir, relpath, topdown=False):
"""Walk the file tree rooted at `path`. Works like os.walk."""
@classmethod
def _isdir(cls, path):
"""Returns True if path is a directory"""
raise NotImplementedError()
@classmethod
def _isfile(cls, path):
"""Returns True if path is a file"""
raise NotImplementedError()
@classmethod
def _exists(cls, path):
"""Returns True if path exists"""
raise NotImplementedError()
def __init__(self, root_dir, relpath=None, must_exist=True):
"""Creates a BuildFile object representing the BUILD file family at the specified path.
:param string root_dir: The base directory of the project.
:param string relpath: The path relative to root_dir where the BUILD file is found - this can
either point directly at the BUILD file or else to a directory which contains BUILD files.
:param bool must_exist: If True, at least one BUILD file must exist at the given location or
else an` `MissingBuildFileError` is thrown
:raises IOError: if the root_dir path is not absolute.
:raises MissingBuildFileError: if the path does not house a BUILD file and must_exist is `True`.
"""
if not os.path.isabs(root_dir):
raise self.InvalidRootDirError('BuildFile root_dir {root_dir} must be an absolute path.'
.format(root_dir=root_dir))
self.root_dir = os.path.realpath(root_dir)
path = os.path.join(self.root_dir, relpath) if relpath else self.root_dir
self._build_basename = self._BUILD_FILE_PREFIX
buildfile = os.path.join(path, self._build_basename) if self._isdir(path) else path
# There is no BUILD file without a prefix so select any viable sibling
if not self._exists(buildfile) or self._isdir(buildfile):
for build in self._get_all_build_files(os.path.dirname(buildfile)):
self._build_basename = build
buildfile = os.path.join(path, self._build_basename)
break
if must_exist:
if not self._exists(buildfile):
raise self.MissingBuildFileError('BUILD file does not exist at: {path}'
.format(path=buildfile))
# If a build file must exist then we want to make sure it's not a dir.
# In other cases we are ok with it being a dir, for example someone might have
# repo/scripts/build/doit.sh.
if self._isdir(buildfile):
raise self.MissingBuildFileError('Path to buildfile ({buildfile}) is a directory, '
'but it must be a file.'.format(buildfile=buildfile))
if not self._is_buildfile_name(os.path.basename(buildfile)):
raise self.MissingBuildFileError('{path} is not a BUILD file'
.format(path=buildfile))
self.full_path = os.path.realpath(buildfile)
self.name = os.path.basename(self.full_path)
self.parent_path = os.path.dirname(self.full_path)
self.relpath = os.path.relpath(self.full_path, self.root_dir)
self.spec_path = os.path.dirname(self.relpath)
def file_exists(self):
"""Returns True if this BuildFile corresponds to a real BUILD file on disk."""
return self._isfile(self.full_path)
def descendants(self, spec_excludes=None):
"""Returns all BUILD files in descendant directories of this BUILD file's parent directory."""
descendants = self.scan_buildfiles(self.root_dir, self.parent_path, spec_excludes=spec_excludes)
for sibling in self.family():
descendants.discard(sibling)
return descendants
def ancestors(self):
"""Returns all BUILD files in ancestor directories of this BUILD file's parent directory."""
def find_parent(dir):
parent = os.path.dirname(dir)
for parent_buildfile in self._get_all_build_files(parent):
buildfile = os.path.join(parent, parent_buildfile)
return parent, self.from_cache(self.root_dir, os.path.relpath(buildfile, self.root_dir))
return parent, None
parent_buildfiles = OrderedSet()
def is_root(path):
return os.path.abspath(self.root_dir) == os.path.abspath(path)
parentdir = os.path.dirname(self.full_path)
visited = set()
while parentdir not in visited and not is_root(parentdir):
visited.add(parentdir)
parentdir, buildfile = find_parent(parentdir)
if buildfile:
parent_buildfiles.update(buildfile.family())
return parent_buildfiles
def siblings(self):
"""Returns an iterator over all the BUILD files co-located with this BUILD file not including
this BUILD file itself"""
for build in self._get_all_build_files(self.parent_path):
if self.name != build:
siblingpath = os.path.join(os.path.dirname(self.relpath), build)
yield self.from_cache(self.root_dir, siblingpath)
def family(self):
"""Returns an iterator over all the BUILD files co-located with this BUILD file including this
BUILD file itself. The family forms a single logical BUILD file composed of the canonical BUILD
file if it exists and sibling build files each with their own extension, eg: BUILD.extras."""
yield self
for sibling in self.siblings():
yield sibling
@abstractmethod
def source(self):
"""Returns the source code for this BUILD file."""
def code(self):
"""Returns the code object for this BUILD file."""
return compile(self.source(), self.full_path, 'exec', flags=0, dont_inherit=True)
def __eq__(self, other):
result = other and (
type(other) == type(self)) and (
self.full_path == other.full_path)
return result
def __hash__(self):
return hash(self.full_path)
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return '{}({})'.format(self.__class__.__name__, self.full_path)
class FilesystemBuildFile(BuildFile):
# TODO(dturner): this cache should really be in BuildFileAddressMapper, but unfortunately this
# class needs to access it, so it can't be moved yet.
_cache = {}
def _glob1(self, path, glob):
return glob1(path, glob)
def source(self):
"""Returns the source code for this BUILD file."""
with open(self.full_path, 'rb') as source:
return source.read()
@classmethod
def _isdir(cls, path):
return os.path.isdir(path)
@classmethod
def _isfile(cls, path):
return os.path.isfile(path)
@classmethod
def _exists(cls, path):
return os.path.exists(path)
@classmethod
def _walk(self, root_dir, relpath, topdown=False):
path = os.path.join(root_dir, relpath)
return safe_walk(path, topdown=True)
| |
import os
import re
import sys
import imp
import copy
import glob
import atexit
import tempfile
try:
set
except NameError:
from sets import Set as set
__all__ = ['Configuration', 'get_numpy_include_dirs', 'default_config_dict',
'dict_append', 'appendpath', 'generate_config_py',
'get_cmd', 'allpath', 'get_mathlibs',
'terminal_has_colors', 'red_text', 'green_text', 'yellow_text',
'blue_text', 'cyan_text', 'cyg2win32','mingw32','all_strings',
'has_f_sources', 'has_cxx_sources', 'filter_sources',
'get_dependencies', 'is_local_src_dir', 'get_ext_source_files',
'get_script_files', 'get_lib_source_files', 'get_data_files',
'dot_join', 'get_frame', 'minrelpath','njoin',
'is_sequence', 'is_string', 'as_list', 'gpaths', 'get_language',
'quote_args', 'get_build_architecture']
def quote_args(args):
# don't used _nt_quote_args as it does not check if
# args items already have quotes or not.
args = list(args)
for i in range(len(args)):
a = args[i]
if ' ' in a and a[0] not in '"\'':
args[i] = '"%s"' % (a)
return args
def allpath(name):
"Convert a /-separated pathname to one using the OS's path separator."
splitted = name.split('/')
return os.path.join(*splitted)
def rel_path(path, parent_path):
"""Return path relative to parent_path.
"""
pd = os.path.abspath(parent_path)
apath = os.path.abspath(path)
if len(apath)<len(pd):
return path
if apath==pd:
return ''
if pd == apath[:len(pd)]:
assert apath[len(pd)] in [os.sep],`path,apath[len(pd)]`
path = apath[len(pd)+1:]
return path
def get_path_from_frame(frame, parent_path=None):
"""Return path of the module given a frame object from the call stack.
Returned path is relative to parent_path when given,
otherwise it is absolute path.
"""
# First, try to find if the file name is in the frame.
try:
caller_file = eval('__file__', frame.f_globals, frame.f_locals)
d = os.path.dirname(os.path.abspath(caller_file))
except NameError:
# __file__ is not defined, so let's try __name__. We try this second
# because setuptools spoofs __name__ to be '__main__' even though
# sys.modules['__main__'] might be something else, like easy_install(1).
caller_name = eval('__name__', frame.f_globals, frame.f_locals)
__import__(caller_name)
mod = sys.modules[caller_name]
if hasattr(mod, '__file__'):
d = os.path.dirname(os.path.abspath(mod.__file__))
else:
# we're probably running setup.py as execfile("setup.py")
# (likely we're building an egg)
d = os.path.abspath('.')
# hmm, should we use sys.argv[0] like in __builtin__ case?
if parent_path is not None:
d = rel_path(d, parent_path)
return d or '.'
def njoin(*path):
"""Join two or more pathname components +
- convert a /-separated pathname to one using the OS's path separator.
- resolve `..` and `.` from path.
Either passing n arguments as in njoin('a','b'), or a sequence
of n names as in njoin(['a','b']) is handled, or a mixture of such arguments.
"""
paths = []
for p in path:
if is_sequence(p):
# njoin(['a', 'b'], 'c')
paths.append(njoin(*p))
else:
assert is_string(p)
paths.append(p)
path = paths
if not path:
# njoin()
joined = ''
else:
# njoin('a', 'b')
joined = os.path.join(*path)
if os.path.sep != '/':
joined = joined.replace('/',os.path.sep)
return minrelpath(joined)
def get_mathlibs(path=None):
"""Return the MATHLIB line from config.h
"""
if path is None:
path = get_numpy_include_dirs()[0]
config_file = os.path.join(path,'config.h')
fid = open(config_file)
mathlibs = []
s = '#define MATHLIB'
for line in fid.readlines():
if line.startswith(s):
value = line[len(s):].strip()
if value:
mathlibs.extend(value.split(','))
fid.close()
return mathlibs
def minrelpath(path):
"""Resolve `..` and '.' from path.
"""
if not is_string(path):
return path
if '.' not in path:
return path
l = path.split(os.sep)
while l:
try:
i = l.index('.',1)
except ValueError:
break
del l[i]
j = 1
while l:
try:
i = l.index('..',j)
except ValueError:
break
if l[i-1]=='..':
j += 1
else:
del l[i],l[i-1]
j = 1
if not l:
return ''
return os.sep.join(l)
def _fix_paths(paths,local_path,include_non_existing):
assert is_sequence(paths), repr(type(paths))
new_paths = []
assert not is_string(paths),`paths`
for n in paths:
if is_string(n):
if '*' in n or '?' in n:
p = glob.glob(n)
p2 = glob.glob(njoin(local_path,n))
if p2:
new_paths.extend(p2)
elif p:
new_paths.extend(p)
else:
if include_non_existing:
new_paths.append(n)
print 'could not resolve pattern in %r: %r' \
% (local_path,n)
else:
n2 = njoin(local_path,n)
if os.path.exists(n2):
new_paths.append(n2)
else:
if os.path.exists(n):
new_paths.append(n)
elif include_non_existing:
new_paths.append(n)
if not os.path.exists(n):
print 'non-existing path in %r: %r' \
% (local_path,n)
elif is_sequence(n):
new_paths.extend(_fix_paths(n,local_path,include_non_existing))
else:
new_paths.append(n)
return map(minrelpath,new_paths)
def gpaths(paths, local_path='', include_non_existing=True):
"""Apply glob to paths and prepend local_path if needed.
"""
if is_string(paths):
paths = (paths,)
return _fix_paths(paths,local_path, include_non_existing)
_temporary_directory = None
def clean_up_temporary_directory():
from numpy.distutils import log
global _temporary_directory
if not _temporary_directory:
return
log.debug('removing %s', _temporary_directory)
try:
os.rmdir(_temporary_directory)
except OSError:
pass
_temporary_directory = None
def make_temp_file(suffix='', prefix='', text=True):
global _temporary_directory
if not _temporary_directory:
_temporary_directory = tempfile.mkdtemp()
atexit.register(clean_up_temporary_directory)
fid, name = tempfile.mkstemp(suffix=suffix,
prefix=prefix,
dir=_temporary_directory,
text=text)
fo = os.fdopen(fid, 'w')
return fo, name
# Hooks for colored terminal output.
# See also http://www.livinglogic.de/Python/ansistyle
def terminal_has_colors():
if sys.platform=='cygwin' and not os.environ.has_key('USE_COLOR'):
# Avoid importing curses that causes illegal operation
# with a message:
# PYTHON2 caused an invalid page fault in
# module CYGNURSES7.DLL as 015f:18bbfc28
# Details: Python 2.3.3 [GCC 3.3.1 (cygming special)]
# ssh to Win32 machine from debian
# curses.version is 2.2
# CYGWIN_98-4.10, release 1.5.7(0.109/3/2))
return 0
if hasattr(sys.stdout,'isatty') and sys.stdout.isatty():
try:
import curses
curses.setupterm()
if (curses.tigetnum("colors") >= 0
and curses.tigetnum("pairs") >= 0
and ((curses.tigetstr("setf") is not None
and curses.tigetstr("setb") is not None)
or (curses.tigetstr("setaf") is not None
and curses.tigetstr("setab") is not None)
or curses.tigetstr("scp") is not None)):
return 1
except Exception,msg:
pass
return 0
if terminal_has_colors():
_colour_codes = dict(black=0, red=1, green=2, yellow=3,
blue=4, magenta=5, cyan=6, white=7)
def colour_text(s, fg=None, bg=None, bold=False):
seq = []
if bold:
seq.append('1')
if fg:
fgcode = 30 + _colour_codes.get(fg.lower(), 0)
seq.append(str(fgcode))
if bg:
bgcode = 40 + _colour_codes.get(fg.lower(), 7)
seq.append(str(bgcode))
if seq:
return '\x1b[%sm%s\x1b[0m' % (';'.join(seq), s)
else:
return s
else:
def colour_text(s, fg=None, bg=None):
return s
def red_text(s):
return colour_text(s, 'red')
def green_text(s):
return colour_text(s, 'green')
def yellow_text(s):
return colour_text(s, 'yellow')
def cyan_text(s):
return colour_text(s, 'cyan')
def blue_text(s):
return colour_text(s, 'blue')
#########################
def cyg2win32(path):
if sys.platform=='cygwin' and path.startswith('/cygdrive'):
path = path[10] + ':' + os.path.normcase(path[11:])
return path
def mingw32():
"""Return true when using mingw32 environment.
"""
if sys.platform=='win32':
if os.environ.get('OSTYPE','')=='msys':
return True
if os.environ.get('MSYSTEM','')=='MINGW32':
return True
return False
def msvc_runtime_library():
"Return name of MSVC runtime library if Python was built with MSVC >= 7"
msc_pos = sys.version.find('MSC v.')
if msc_pos != -1:
msc_ver = sys.version[msc_pos+6:msc_pos+10]
lib = {'1300' : 'msvcr70', # MSVC 7.0
'1310' : 'msvcr71', # MSVC 7.1
'1400' : 'msvcr80', # MSVC 8
}.get(msc_ver, None)
else:
lib = None
return lib
def msvc_on_amd64():
if not (sys.platform=='win32' or os.name=='nt'):
return
if get_build_architecture() != 'AMD64':
return
if os.environ.has_key('DISTUTILS_USE_SDK'):
return
# try to avoid _MSVCCompiler__root attribute error
print 'Forcing DISTUTILS_USE_SDK=1'
os.environ['DISTUTILS_USE_SDK']='1'
return
#########################
#XXX need support for .C that is also C++
cxx_ext_match = re.compile(r'.*[.](cpp|cxx|cc)\Z',re.I).match
fortran_ext_match = re.compile(r'.*[.](f90|f95|f77|for|ftn|f)\Z',re.I).match
f90_ext_match = re.compile(r'.*[.](f90|f95)\Z',re.I).match
f90_module_name_match = re.compile(r'\s*module\s*(?P<name>[\w_]+)',re.I).match
def _get_f90_modules(source):
"""Return a list of Fortran f90 module names that
given source file defines.
"""
if not f90_ext_match(source):
return []
modules = []
f = open(source,'r')
f_readlines = getattr(f,'xreadlines',f.readlines)
for line in f_readlines():
m = f90_module_name_match(line)
if m:
name = m.group('name')
modules.append(name)
# break # XXX can we assume that there is one module per file?
f.close()
return modules
def is_string(s):
return isinstance(s, str)
def all_strings(lst):
"""Return True if all items in lst are string objects. """
for item in lst:
if not is_string(item):
return False
return True
def is_sequence(seq):
if is_string(seq):
return False
try:
len(seq)
except:
return False
return True
def is_glob_pattern(s):
return is_string(s) and ('*' in s or '?' is s)
def as_list(seq):
if is_sequence(seq):
return list(seq)
else:
return [seq]
def get_language(sources):
# not used in numpy/scipy packages, use build_ext.detect_language instead
"""Determine language value (c,f77,f90) from sources """
language = None
for source in sources:
if isinstance(source, str):
if f90_ext_match(source):
language = 'f90'
break
elif fortran_ext_match(source):
language = 'f77'
return language
def has_f_sources(sources):
"""Return True if sources contains Fortran files """
for source in sources:
if fortran_ext_match(source):
return True
return False
def has_cxx_sources(sources):
"""Return True if sources contains C++ files """
for source in sources:
if cxx_ext_match(source):
return True
return False
def filter_sources(sources):
"""Return four lists of filenames containing
C, C++, Fortran, and Fortran 90 module sources,
respectively.
"""
c_sources = []
cxx_sources = []
f_sources = []
fmodule_sources = []
for source in sources:
if fortran_ext_match(source):
modules = _get_f90_modules(source)
if modules:
fmodule_sources.append(source)
else:
f_sources.append(source)
elif cxx_ext_match(source):
cxx_sources.append(source)
else:
c_sources.append(source)
return c_sources, cxx_sources, f_sources, fmodule_sources
def _get_headers(directory_list):
# get *.h files from list of directories
headers = []
for d in directory_list:
head = glob.glob(os.path.join(d,"*.h")) #XXX: *.hpp files??
headers.extend(head)
return headers
def _get_directories(list_of_sources):
# get unique directories from list of sources.
direcs = []
for f in list_of_sources:
d = os.path.split(f)
if d[0] != '' and not d[0] in direcs:
direcs.append(d[0])
return direcs
def get_dependencies(sources):
#XXX scan sources for include statements
return _get_headers(_get_directories(sources))
def is_local_src_dir(directory):
"""Return true if directory is local directory.
"""
if not is_string(directory):
return False
abs_dir = os.path.abspath(directory)
c = os.path.commonprefix([os.getcwd(),abs_dir])
new_dir = abs_dir[len(c):].split(os.sep)
if new_dir and not new_dir[0]:
new_dir = new_dir[1:]
if new_dir and new_dir[0]=='build':
return False
new_dir = os.sep.join(new_dir)
return os.path.isdir(new_dir)
def general_source_files(top_path):
pruned_directories = {'CVS':1, '.svn':1, 'build':1}
prune_file_pat = re.compile(r'(?:[~#]|\.py[co]|\.o)$')
for dirpath, dirnames, filenames in os.walk(top_path, topdown=True):
pruned = [ d for d in dirnames if d not in pruned_directories ]
dirnames[:] = pruned
for f in filenames:
if not prune_file_pat.search(f):
yield os.path.join(dirpath, f)
def general_source_directories_files(top_path):
"""Return a directory name relative to top_path and
files contained.
"""
pruned_directories = ['CVS','.svn','build']
prune_file_pat = re.compile(r'(?:[~#]|\.py[co]|\.o)$')
for dirpath, dirnames, filenames in os.walk(top_path, topdown=True):
pruned = [ d for d in dirnames if d not in pruned_directories ]
dirnames[:] = pruned
for d in dirnames:
dpath = os.path.join(dirpath, d)
rpath = rel_path(dpath, top_path)
files = []
for f in os.listdir(dpath):
fn = os.path.join(dpath,f)
if os.path.isfile(fn) and not prune_file_pat.search(fn):
files.append(fn)
yield rpath, files
dpath = top_path
rpath = rel_path(dpath, top_path)
filenames = [os.path.join(dpath,f) for f in os.listdir(dpath) \
if not prune_file_pat.search(f)]
files = [f for f in filenames if os.path.isfile(f)]
yield rpath, files
def get_ext_source_files(ext):
# Get sources and any include files in the same directory.
filenames = []
sources = filter(is_string, ext.sources)
filenames.extend(sources)
filenames.extend(get_dependencies(sources))
for d in ext.depends:
if is_local_src_dir(d):
filenames.extend(list(general_source_files(d)))
elif os.path.isfile(d):
filenames.append(d)
return filenames
def get_script_files(scripts):
scripts = filter(is_string, scripts)
return scripts
def get_lib_source_files(lib):
filenames = []
sources = lib[1].get('sources',[])
sources = filter(is_string, sources)
filenames.extend(sources)
filenames.extend(get_dependencies(sources))
depends = lib[1].get('depends',[])
for d in depends:
if is_local_src_dir(d):
filenames.extend(list(general_source_files(d)))
elif os.path.isfile(d):
filenames.append(d)
return filenames
def get_data_files(data):
if is_string(data):
return [data]
sources = data[1]
filenames = []
for s in sources:
if callable(s):
continue
if is_local_src_dir(s):
filenames.extend(list(general_source_files(s)))
elif is_string(s):
if os.path.isfile(s):
filenames.append(s)
else:
print 'Not existing data file:',s
else:
raise TypeError,repr(s)
return filenames
def dot_join(*args):
return '.'.join([a for a in args if a])
def get_frame(level=0):
"""Return frame object from call stack with given level.
"""
try:
return sys._getframe(level+1)
except AttributeError:
frame = sys.exc_info()[2].tb_frame
for _ in range(level+1):
frame = frame.f_back
return frame
######################
class Configuration(object):
_list_keys = ['packages', 'ext_modules', 'data_files', 'include_dirs',
'libraries', 'headers', 'scripts', 'py_modules']
_dict_keys = ['package_dir']
_extra_keys = ['name', 'version']
numpy_include_dirs = []
def __init__(self,
package_name=None,
parent_name=None,
top_path=None,
package_path=None,
caller_level=1,
**attrs):
"""Construct configuration instance of a package.
package_name -- name of the package
Ex.: 'distutils'
parent_name -- name of the parent package
Ex.: 'numpy'
top_path -- directory of the toplevel package
Ex.: the directory where the numpy package source sits
package_path -- directory of package. Will be computed by magic from the
directory of the caller module if not specified
Ex.: the directory where numpy.distutils is
caller_level -- frame level to caller namespace, internal parameter.
"""
self.name = dot_join(parent_name, package_name)
self.version = None
caller_frame = get_frame(caller_level)
self.local_path = get_path_from_frame(caller_frame, top_path)
# local_path -- directory of a file (usually setup.py) that
# defines a configuration() function.
# local_path -- directory of a file (usually setup.py) that
# defines a configuration() function.
if top_path is None:
top_path = self.local_path
self.local_path = ''
if package_path is None:
package_path = self.local_path
elif os.path.isdir(njoin(self.local_path,package_path)):
package_path = njoin(self.local_path,package_path)
if not os.path.isdir(package_path or '.'):
raise ValueError("%r is not a directory" % (package_path,))
self.top_path = top_path
self.package_path = package_path
# this is the relative path in the installed package
self.path_in_package = os.path.join(*self.name.split('.'))
self.list_keys = self._list_keys[:]
self.dict_keys = self._dict_keys[:]
for n in self.list_keys:
v = copy.copy(attrs.get(n, []))
setattr(self, n, as_list(v))
for n in self.dict_keys:
v = copy.copy(attrs.get(n, {}))
setattr(self, n, v)
known_keys = self.list_keys + self.dict_keys
self.extra_keys = self._extra_keys[:]
for n in attrs.keys():
if n in known_keys:
continue
a = attrs[n]
setattr(self,n,a)
if isinstance(a, list):
self.list_keys.append(n)
elif isinstance(a, dict):
self.dict_keys.append(n)
else:
self.extra_keys.append(n)
if os.path.exists(njoin(package_path,'__init__.py')):
self.packages.append(self.name)
self.package_dir[self.name] = package_path
self.options = dict(
ignore_setup_xxx_py = False,
assume_default_configuration = False,
delegate_options_to_subpackages = False,
quiet = False,
)
caller_instance = None
for i in range(1,3):
try:
f = get_frame(i)
except ValueError:
break
try:
caller_instance = eval('self',f.f_globals,f.f_locals)
break
except NameError:
pass
if isinstance(caller_instance, self.__class__):
if caller_instance.options['delegate_options_to_subpackages']:
self.set_options(**caller_instance.options)
def todict(self):
"""Return configuration distionary suitable for passing
to distutils.core.setup() function.
"""
self._optimize_data_files()
d = {}
known_keys = self.list_keys + self.dict_keys + self.extra_keys
for n in known_keys:
a = getattr(self,n)
if a:
d[n] = a
return d
def info(self, message):
if not self.options['quiet']:
print message
def warn(self, message):
print>>sys.stderr, blue_text('Warning: %s' % (message,))
def set_options(self, **options):
"""Configure Configuration instance.
The following options are available:
- ignore_setup_xxx_py
- assume_default_configuration
- delegate_options_to_subpackages
- quiet
"""
for key, value in options.items():
if self.options.has_key(key):
self.options[key] = value
else:
raise ValueError,'Unknown option: '+key
def get_distribution(self):
from numpy.distutils.core import get_distribution
return get_distribution()
def _wildcard_get_subpackage(self, subpackage_name,
parent_name,
caller_level = 1):
l = subpackage_name.split('.')
subpackage_path = njoin([self.local_path]+l)
dirs = filter(os.path.isdir,glob.glob(subpackage_path))
config_list = []
for d in dirs:
if not os.path.isfile(njoin(d,'__init__.py')):
continue
if 'build' in d.split(os.sep):
continue
n = '.'.join(d.split(os.sep)[-len(l):])
c = self.get_subpackage(n,
parent_name = parent_name,
caller_level = caller_level+1)
config_list.extend(c)
return config_list
def _get_configuration_from_setup_py(self, setup_py,
subpackage_name,
subpackage_path,
parent_name,
caller_level = 1):
# In case setup_py imports local modules:
sys.path.insert(0,os.path.dirname(setup_py))
try:
fo_setup_py = open(setup_py, 'U')
setup_name = os.path.splitext(os.path.basename(setup_py))[0]
n = dot_join(self.name,subpackage_name,setup_name)
setup_module = imp.load_module('_'.join(n.split('.')),
fo_setup_py,
setup_py,
('.py', 'U', 1))
fo_setup_py.close()
if not hasattr(setup_module,'configuration'):
if not self.options['assume_default_configuration']:
self.warn('Assuming default configuration '\
'(%s does not define configuration())'\
% (setup_module))
config = Configuration(subpackage_name, parent_name,
self.top_path, subpackage_path,
caller_level = caller_level + 1)
else:
pn = dot_join(*([parent_name] + subpackage_name.split('.')[:-1]))
args = (pn,)
if setup_module.configuration.func_code.co_argcount > 1:
args = args + (self.top_path,)
config = setup_module.configuration(*args)
if config.name!=dot_join(parent_name,subpackage_name):
self.warn('Subpackage %r configuration returned as %r' % \
(dot_join(parent_name,subpackage_name), config.name))
finally:
del sys.path[0]
return config
def get_subpackage(self,subpackage_name,
subpackage_path=None,
parent_name=None,
caller_level = 1):
"""Return list of subpackage configurations.
'*' in subpackage_name is handled as a wildcard.
"""
if subpackage_name is None:
if subpackage_path is None:
raise ValueError(
"either subpackage_name or subpackage_path must be specified")
subpackage_name = os.path.basename(subpackage_path)
# handle wildcards
l = subpackage_name.split('.')
if subpackage_path is None and '*' in subpackage_name:
return self._wildcard_get_subpackage(subpackage_name,
parent_name,
caller_level = caller_level+1)
assert '*' not in subpackage_name,`subpackage_name, subpackage_path,parent_name`
if subpackage_path is None:
subpackage_path = njoin([self.local_path] + l)
else:
subpackage_path = njoin([subpackage_path] + l[:-1])
subpackage_path = self.paths([subpackage_path])[0]
setup_py = njoin(subpackage_path, 'setup.py')
if not self.options['ignore_setup_xxx_py']:
if not os.path.isfile(setup_py):
setup_py = njoin(subpackage_path,
'setup_%s.py' % (subpackage_name))
if not os.path.isfile(setup_py):
if not self.options['assume_default_configuration']:
self.warn('Assuming default configuration '\
'(%s/{setup_%s,setup}.py was not found)' \
% (os.path.dirname(setup_py), subpackage_name))
config = Configuration(subpackage_name, parent_name,
self.top_path, subpackage_path,
caller_level = caller_level+1)
else:
config = self._get_configuration_from_setup_py(
setup_py,
subpackage_name,
subpackage_path,
parent_name,
caller_level = caller_level + 1)
if config:
return [config]
else:
return []
def add_subpackage(self,subpackage_name,
subpackage_path=None,
standalone = False):
"""Add subpackage to configuration.
"""
if standalone:
parent_name = None
else:
parent_name = self.name
config_list = self.get_subpackage(subpackage_name,subpackage_path,
parent_name = parent_name,
caller_level = 2)
if not config_list:
self.warn('No configuration returned, assuming unavailable.')
for config in config_list:
d = config
if isinstance(config, Configuration):
d = config.todict()
assert isinstance(d,dict),`type(d)`
self.info('Appending %s configuration to %s' \
% (d.get('name'), self.name))
self.dict_append(**d)
dist = self.get_distribution()
if dist is not None:
self.warn('distutils distribution has been initialized,'\
' it may be too late to add a subpackage '+ subpackage_name)
def add_data_dir(self,data_path):
"""Recursively add files under data_path to data_files list.
Argument can be either
- 2-sequence (<datadir suffix>,<path to data directory>)
- path to data directory where python datadir suffix defaults
to package dir.
Rules for installation paths:
foo/bar -> (foo/bar, foo/bar) -> parent/foo/bar
(gun, foo/bar) -> parent/gun
foo/* -> (foo/a, foo/a), (foo/b, foo/b) -> parent/foo/a, parent/foo/b
(gun, foo/*) -> (gun, foo/a), (gun, foo/b) -> gun
(gun/*, foo/*) -> parent/gun/a, parent/gun/b
/foo/bar -> (bar, /foo/bar) -> parent/bar
(gun, /foo/bar) -> parent/gun
(fun/*/gun/*, sun/foo/bar) -> parent/fun/foo/gun/bar
"""
if is_sequence(data_path):
d, data_path = data_path
else:
d = None
if is_sequence(data_path):
[self.add_data_dir((d,p)) for p in data_path]
return
if not is_string(data_path):
raise TypeError("not a string: %r" % (data_path,))
if d is None:
if os.path.isabs(data_path):
return self.add_data_dir((os.path.basename(data_path), data_path))
return self.add_data_dir((data_path, data_path))
paths = self.paths(data_path, include_non_existing=False)
if is_glob_pattern(data_path):
if is_glob_pattern(d):
pattern_list = allpath(d).split(os.sep)
pattern_list.reverse()
# /a/*//b/ -> /a/*/b
rl = range(len(pattern_list)-1); rl.reverse()
for i in rl:
if not pattern_list[i]:
del pattern_list[i]
#
for path in paths:
if not os.path.isdir(path):
print 'Not a directory, skipping',path
continue
rpath = rel_path(path, self.local_path)
path_list = rpath.split(os.sep)
path_list.reverse()
target_list = []
i = 0
for s in pattern_list:
if is_glob_pattern(s):
if i>=len(path_list):
raise ValueError,'cannot fill pattern %r with %r' \
% (d, path)
target_list.append(path_list[i])
else:
assert s==path_list[i],`s,path_list[i],data_path,d,path,rpath`
target_list.append(s)
i += 1
if path_list[i:]:
self.warn('mismatch of pattern_list=%s and path_list=%s'\
% (pattern_list,path_list))
target_list.reverse()
self.add_data_dir((os.sep.join(target_list),path))
else:
for path in paths:
self.add_data_dir((d,path))
return
assert not is_glob_pattern(d),`d`
dist = self.get_distribution()
if dist is not None and dist.data_files is not None:
data_files = dist.data_files
else:
data_files = self.data_files
for path in paths:
for d1,f in list(general_source_directories_files(path)):
target_path = os.path.join(self.path_in_package,d,d1)
data_files.append((target_path, f))
def _optimize_data_files(self):
data_dict = {}
for p,files in self.data_files:
if not data_dict.has_key(p):
data_dict[p] = set()
map(data_dict[p].add,files)
self.data_files[:] = [(p,list(files)) for p,files in data_dict.items()]
def add_data_files(self,*files):
"""Add data files to configuration data_files.
Argument(s) can be either
- 2-sequence (<datadir prefix>,<path to data file(s)>)
- paths to data files where python datadir prefix defaults
to package dir.
Rules for installation paths:
file.txt -> (., file.txt)-> parent/file.txt
foo/file.txt -> (foo, foo/file.txt) -> parent/foo/file.txt
/foo/bar/file.txt -> (., /foo/bar/file.txt) -> parent/file.txt
*.txt -> parent/a.txt, parent/b.txt
foo/*.txt -> parent/foo/a.txt, parent/foo/b.txt
*/*.txt -> (*, */*.txt) -> parent/c/a.txt, parent/d/b.txt
(sun, file.txt) -> parent/sun/file.txt
(sun, bar/file.txt) -> parent/sun/file.txt
(sun, /foo/bar/file.txt) -> parent/sun/file.txt
(sun, *.txt) -> parent/sun/a.txt, parent/sun/b.txt
(sun, bar/*.txt) -> parent/sun/a.txt, parent/sun/b.txt
(sun/*, */*.txt) -> parent/sun/c/a.txt, parent/d/b.txt
"""
if len(files)>1:
map(self.add_data_files, files)
return
assert len(files)==1
if is_sequence(files[0]):
d,files = files[0]
else:
d = None
if is_string(files):
filepat = files
elif is_sequence(files):
if len(files)==1:
filepat = files[0]
else:
for f in files:
self.add_data_files((d,f))
return
else:
raise TypeError,`type(files)`
if d is None:
if callable(filepat):
d = ''
elif os.path.isabs(filepat):
d = ''
else:
d = os.path.dirname(filepat)
self.add_data_files((d,files))
return
paths = self.paths(filepat, include_non_existing=False)
if is_glob_pattern(filepat):
if is_glob_pattern(d):
pattern_list = d.split(os.sep)
pattern_list.reverse()
for path in paths:
path_list = path.split(os.sep)
path_list.reverse()
path_list.pop() # filename
target_list = []
i = 0
for s in pattern_list:
if is_glob_pattern(s):
target_list.append(path_list[i])
i += 1
else:
target_list.append(s)
target_list.reverse()
self.add_data_files((os.sep.join(target_list), path))
else:
self.add_data_files((d,paths))
return
assert not is_glob_pattern(d),`d,filepat`
dist = self.get_distribution()
if dist is not None and dist.data_files is not None:
data_files = dist.data_files
else:
data_files = self.data_files
data_files.append((os.path.join(self.path_in_package,d),paths))
### XXX Implement add_py_modules
def add_include_dirs(self,*paths):
"""Add paths to configuration include directories.
"""
include_dirs = self.paths(paths)
dist = self.get_distribution()
if dist is not None:
dist.include_dirs.extend(include_dirs)
else:
self.include_dirs.extend(include_dirs)
def add_numarray_include_dirs(self):
import numpy.numarray.util as nnu
self.add_include_dirs(*nnu.get_numarray_include_dirs())
def add_headers(self,*files):
"""Add installable headers to configuration.
Argument(s) can be either
- 2-sequence (<includedir suffix>,<path to header file(s)>)
- path(s) to header file(s) where python includedir suffix will default
to package name.
"""
headers = []
for path in files:
if is_string(path):
[headers.append((self.name,p)) for p in self.paths(path)]
else:
if not isinstance(path, (tuple, list)) or len(path) != 2:
raise TypeError(repr(path))
[headers.append((path[0],p)) for p in self.paths(path[1])]
dist = self.get_distribution()
if dist is not None:
dist.headers.extend(headers)
else:
self.headers.extend(headers)
def paths(self,*paths,**kws):
"""Apply glob to paths and prepend local_path if needed.
"""
include_non_existing = kws.get('include_non_existing',True)
return gpaths(paths,
local_path = self.local_path,
include_non_existing=include_non_existing)
def _fix_paths_dict(self,kw):
for k in kw.keys():
v = kw[k]
if k in ['sources','depends','include_dirs','library_dirs',
'module_dirs','extra_objects']:
new_v = self.paths(v)
kw[k] = new_v
def add_extension(self,name,sources,**kw):
"""Add extension to configuration.
Keywords:
include_dirs, define_macros, undef_macros,
library_dirs, libraries, runtime_library_dirs,
extra_objects, extra_compile_args, extra_link_args,
export_symbols, swig_opts, depends, language,
f2py_options, module_dirs
extra_info - dict or list of dict of keywords to be
appended to keywords.
"""
ext_args = copy.copy(kw)
ext_args['name'] = dot_join(self.name,name)
ext_args['sources'] = sources
if ext_args.has_key('extra_info'):
extra_info = ext_args['extra_info']
del ext_args['extra_info']
if isinstance(extra_info, dict):
extra_info = [extra_info]
for info in extra_info:
assert isinstance(info, dict), repr(info)
dict_append(ext_args,**info)
self._fix_paths_dict(ext_args)
# Resolve out-of-tree dependencies
libraries = ext_args.get('libraries',[])
libnames = []
ext_args['libraries'] = []
for libname in libraries:
if isinstance(libname,tuple):
self._fix_paths_dict(libname[1])
# Handle library names of the form libname@relative/path/to/library
if '@' in libname:
lname,lpath = libname.split('@',1)
lpath = os.path.abspath(njoin(self.local_path,lpath))
if os.path.isdir(lpath):
c = self.get_subpackage(None,lpath,
caller_level = 2)
if isinstance(c,Configuration):
c = c.todict()
for l in [l[0] for l in c.get('libraries',[])]:
llname = l.split('__OF__',1)[0]
if llname == lname:
c.pop('name',None)
dict_append(ext_args,**c)
break
continue
libnames.append(libname)
ext_args['libraries'] = libnames + ext_args['libraries']
from numpy.distutils.core import Extension
ext = Extension(**ext_args)
self.ext_modules.append(ext)
dist = self.get_distribution()
if dist is not None:
self.warn('distutils distribution has been initialized,'\
' it may be too late to add an extension '+name)
return ext
def add_library(self,name,sources,**build_info):
"""Add library to configuration.
Valid keywords for build_info:
depends
macros
include_dirs
extra_compiler_args
f2py_options
language
"""
build_info = copy.copy(build_info)
name = name #+ '__OF__' + self.name
build_info['sources'] = sources
self._fix_paths_dict(build_info)
self.libraries.append((name,build_info))
dist = self.get_distribution()
if dist is not None:
self.warn('distutils distribution has been initialized,'\
' it may be too late to add a library '+ name)
def add_scripts(self,*files):
"""Add scripts to configuration.
"""
scripts = self.paths(files)
dist = self.get_distribution()
if dist is not None:
dist.scripts.extend(scripts)
else:
self.scripts.extend(scripts)
def dict_append(self,**dict):
for key in self.list_keys:
a = getattr(self,key)
a.extend(dict.get(key,[]))
for key in self.dict_keys:
a = getattr(self,key)
a.update(dict.get(key,{}))
known_keys = self.list_keys + self.dict_keys + self.extra_keys
for key in dict.keys():
if key not in known_keys:
a = getattr(self, key, None)
if a and a==dict[key]: continue
self.warn('Inheriting attribute %r=%r from %r' \
% (key,dict[key],dict.get('name','?')))
setattr(self,key,dict[key])
self.extra_keys.append(key)
elif key in self.extra_keys:
self.info('Ignoring attempt to set %r (from %r to %r)' \
% (key, getattr(self,key), dict[key]))
elif key in known_keys:
# key is already processed above
pass
else:
raise ValueError, "Don't know about key=%r" % (key)
def __str__(self):
from pprint import pformat
known_keys = self.list_keys + self.dict_keys + self.extra_keys
s = '<'+5*'-' + '\n'
s += 'Configuration of '+self.name+':\n'
known_keys.sort()
for k in known_keys:
a = getattr(self,k,None)
if a:
s += '%s = %s\n' % (k,pformat(a))
s += 5*'-' + '>'
return s
def get_config_cmd(self):
cmd = get_cmd('config')
cmd.ensure_finalized()
cmd.dump_source = 0
cmd.noisy = 0
old_path = os.environ.get('PATH')
if old_path:
path = os.pathsep.join(['.',old_path])
os.environ['PATH'] = path
return cmd
def get_build_temp_dir(self):
cmd = get_cmd('build')
cmd.ensure_finalized()
return cmd.build_temp
def have_f77c(self):
"""Check for availability of Fortran 77 compiler.
Use it inside source generating function to ensure that
setup distribution instance has been initialized.
"""
simple_fortran_subroutine = '''
subroutine simple
end
'''
config_cmd = self.get_config_cmd()
flag = config_cmd.try_compile(simple_fortran_subroutine,lang='f77')
return flag
def have_f90c(self):
"""Check for availability of Fortran 90 compiler.
Use it inside source generating function to ensure that
setup distribution instance has been initialized.
"""
simple_fortran_subroutine = '''
subroutine simple
end
'''
config_cmd = self.get_config_cmd()
flag = config_cmd.try_compile(simple_fortran_subroutine,lang='f90')
return flag
def append_to(self, extlib):
"""Append libraries, include_dirs to extension or library item.
"""
if is_sequence(extlib):
lib_name, build_info = extlib
dict_append(build_info,
libraries=self.libraries,
include_dirs=self.include_dirs)
else:
from numpy.distutils.core import Extension
assert isinstance(extlib,Extension), repr(extlib)
extlib.libraries.extend(self.libraries)
extlib.include_dirs.extend(self.include_dirs)
def _get_svn_revision(self,path):
"""Return path's SVN revision number.
"""
revision = None
m = None
try:
sin, sout = os.popen4('svnversion')
m = re.match(r'(?P<revision>\d+)', sout.read())
except:
pass
if m:
revision = int(m.group('revision'))
return revision
if sys.platform=='win32' and os.environ.get('SVN_ASP_DOT_NET_HACK',None):
entries = njoin(path,'_svn','entries')
else:
entries = njoin(path,'.svn','entries')
if os.path.isfile(entries):
f = open(entries)
fstr = f.read()
f.close()
if fstr[:5] == '<?xml': # pre 1.4
m = re.search(r'revision="(?P<revision>\d+)"',fstr)
if m:
revision = int(m.group('revision'))
else: # non-xml entries file --- check to be sure that
m = re.search(r'dir[\n\r]+(?P<revision>\d+)', fstr)
if m:
revision = int(m.group('revision'))
return revision
def get_version(self, version_file=None, version_variable=None):
"""Try to get version string of a package.
"""
version = getattr(self,'version',None)
if version is not None:
return version
# Get version from version file.
if version_file is None:
files = ['__version__.py',
self.name.split('.')[-1]+'_version.py',
'version.py',
'__svn_version__.py']
else:
files = [version_file]
if version_variable is None:
version_vars = ['version',
'__version__',
self.name.split('.')[-1]+'_version']
else:
version_vars = [version_variable]
for f in files:
fn = njoin(self.local_path,f)
if os.path.isfile(fn):
info = (open(fn),fn,('.py','U',1))
name = os.path.splitext(os.path.basename(fn))[0]
n = dot_join(self.name,name)
try:
version_module = imp.load_module('_'.join(n.split('.')),*info)
except ImportError,msg:
self.warn(str(msg))
version_module = None
if version_module is None:
continue
for a in version_vars:
version = getattr(version_module,a,None)
if version is not None:
break
if version is not None:
break
if version is not None:
self.version = version
return version
# Get version as SVN revision number
revision = self._get_svn_revision(self.local_path)
if revision is not None:
version = str(revision)
self.version = version
return version
def make_svn_version_py(self, delete=True):
"""Generate package __svn_version__.py file from SVN revision number,
it will be removed after python exits but will be available
when sdist, etc commands are executed.
If __svn_version__.py existed before, nothing is done.
"""
target = njoin(self.local_path,'__svn_version__.py')
revision = self._get_svn_revision(self.local_path)
if os.path.isfile(target) or revision is None:
return
else:
def generate_svn_version_py():
if not os.path.isfile(target):
version = str(revision)
self.info('Creating %s (version=%r)' % (target,version))
f = open(target,'w')
f.write('version = %r\n' % (version))
f.close()
import atexit
def rm_file(f=target,p=self.info):
if delete:
try: os.remove(f); p('removed '+f)
except OSError: pass
try: os.remove(f+'c'); p('removed '+f+'c')
except OSError: pass
atexit.register(rm_file)
return target
self.add_data_files(('', generate_svn_version_py()))
def make_config_py(self,name='__config__'):
"""Generate package __config__.py file containing system_info
information used during building the package.
"""
self.py_modules.append((self.name,name,generate_config_py))
def get_info(self,*names):
"""Get resources information.
"""
from system_info import get_info, dict_append
info_dict = {}
for a in names:
dict_append(info_dict,**get_info(a))
return info_dict
def get_cmd(cmdname, _cache={}):
if not _cache.has_key(cmdname):
import distutils.core
dist = distutils.core._setup_distribution
if dist is None:
from distutils.errors import DistutilsInternalError
raise DistutilsInternalError(
'setup distribution instance not initialized')
cmd = dist.get_command_obj(cmdname)
_cache[cmdname] = cmd
return _cache[cmdname]
def get_numpy_include_dirs():
# numpy_include_dirs are set by numpy/core/setup.py, otherwise []
include_dirs = Configuration.numpy_include_dirs[:]
if not include_dirs:
import numpy
include_dirs = [ numpy.get_include() ]
# else running numpy/core/setup.py
return include_dirs
#########################
def default_config_dict(name = None, parent_name = None, local_path=None):
"""Return a configuration dictionary for usage in
configuration() function defined in file setup_<name>.py.
"""
import warnings
warnings.warn('Use Configuration(%r,%r,top_path=%r) instead of '\
'deprecated default_config_dict(%r,%r,%r)'
% (name, parent_name, local_path,
name, parent_name, local_path,
))
c = Configuration(name, parent_name, local_path)
return c.todict()
def dict_append(d, **kws):
for k, v in kws.items():
if d.has_key(k):
ov = d[k]
if isinstance(ov,str):
d[k] = v
else:
d[k].extend(v)
else:
d[k] = v
def appendpath(prefix, path):
if os.path.sep != '/':
prefix = prefix.replace('/', os.path.sep)
path = path.replace('/', os.path.sep)
drive = ''
if os.path.isabs(path):
drive = os.path.splitdrive(prefix)[0]
absprefix = os.path.splitdrive(os.path.abspath(prefix))[1]
pathdrive, path = os.path.splitdrive(path)
d = os.path.commonprefix([absprefix, path])
if os.path.join(absprefix[:len(d)], absprefix[len(d):]) != absprefix \
or os.path.join(path[:len(d)], path[len(d):]) != path:
# Handle invalid paths
d = os.path.dirname(d)
subpath = path[len(d):]
if os.path.isabs(subpath):
subpath = subpath[1:]
else:
subpath = path
return os.path.normpath(njoin(drive + prefix, subpath))
def generate_config_py(target):
"""Generate config.py file containing system_info information
used during building the package.
Usage:
config['py_modules'].append((packagename, '__config__',generate_config_py))
"""
from numpy.distutils.system_info import system_info
from distutils.dir_util import mkpath
mkpath(os.path.dirname(target))
f = open(target, 'w')
f.write('# This file is generated by %s\n' % (os.path.abspath(sys.argv[0])))
f.write('# It contains system_info results at the time of building this package.\n')
f.write('__all__ = ["get_info","show"]\n\n')
for k, i in system_info.saved_results.items():
f.write('%s=%r\n' % (k, i))
f.write(r'''
def get_info(name):
g = globals()
return g.get(name, g.get(name + "_info", {}))
def show():
for name,info_dict in globals().items():
if name[0] == "_" or type(info_dict) is not type({}): continue
print name + ":"
if not info_dict:
print " NOT AVAILABLE"
for k,v in info_dict.items():
v = str(v)
if k == "sources" and len(v) > 200:
v = v[:60] + " ...\n... " + v[-60:]
print " %s = %s" % (k,v)
print
''')
f.close()
return target
if sys.version[:3] >= '2.5':
def get_build_architecture():
from distutils.msvccompiler import get_build_architecture
return get_build_architecture()
else:
#copied from python 2.5.1 distutils/msvccompiler.py
def get_build_architecture():
"""Return the processor architecture.
Possible results are "Intel", "Itanium", or "AMD64".
"""
prefix = " bit ("
i = sys.version.find(prefix)
if i == -1:
return "Intel"
j = sys.version.find(")", i)
return sys.version[i+len(prefix):j]
| |
import string
INDENT = ' '
KEYWORDS = ['auto',
'break',
'case',
'char',
'const',
'continue',
'default',
'do',
'double',
'else',
'enum',
'extern',
'float',
'for',
'goto',
'if',
'int',
'long',
'register',
'return',
'short',
'signed',
'sizeof',
'static',
'struct',
'switch',
'typedef',
'union',
'unsigned',
'void',
'volatile',
'while']
ID_FIRST = '_' + string.ascii_letters
ID_OTHER = ID_FIRST + string.digits
CPP_DIRECTIVES = ['assert',
'define',
'elif',
'else',
'endif',
'error',
'ident',
'if',
'ifdef',
'ifndef',
'import',
'include',
'include_next',
'line',
'pragma',
'sccs',
'unassert',
'undef',
'warning']
def is_identifier(s):
'''Check if the given string represents a valid C identifier.'''
if not s:
return False
if s in KEYWORDS:
return False
accept = ID_FIRST
for c in s:
if c not in accept:
return False
accept = ID_OTHER
return True
def to_identifier(s):
'''Convert the given string to a valid C identifier by replacing invalid chars by an underscore.'''
if not s:
return '_'
if s in KEYWORDS:
return s + '_'
def iter_chars():
accept = ID_FIRST
for c in s:
if c in accept:
yield c
else:
yield '_'
accept = ID_OTHER
return ''.join(iter_chars())
def byte_reader(file_object):
'''Yield bytes from the given file object.'''
while True:
data = file_object.read(1)
if not data:
break
yield data
def strip_comments(iterable, throw_final=True):
'''Yield bytes Strip C and C++ comments from the given text.
The iterable argument must contain only byte values (0-255). The result bytes contain all characters except those
enclosed in C or C++ comments. The only exception is new line characters - those are yield always, even when inside
a block comment, this way it is easy to determine the correct line number when the result is further processed.
The code is aware of special cases like comment tokens (// or /*) inside literal strings and characters.
If throw_final evaluates to True, the current state is checked after all input bytes have been processed. If the
internal FSM is not in the final state, a ValueError exception is risen. This happens only when there are unclosed
block comments, string or character literals.
'''
# States
CODE = 0
STRING = 1
STRING_ESCAPE = 2
CHAR = 3
CHAR_ESCAPE = 4
SLASH = 5
LINECOMMENT = 6
BLOCKCOMMENT = 7
BLOCKASTER = 8
BLOCKNEWLINE = 9
state = CODE
# State transitions
transitions = {
CODE : {'"': STRING, "'": CHAR, '/': SLASH, },
STRING : {'"': CODE, '\\': STRING_ESCAPE,},
STRING_ESCAPE : { '': STRING },
CHAR : {"'": CODE, '\\': CHAR_ESCAPE},
CHAR_ESCAPE : {'': CHAR},
SLASH : {'/': LINECOMMENT, '*': BLOCKCOMMENT, '': CODE},
LINECOMMENT : {'\n': CODE},
BLOCKCOMMENT : {'*': BLOCKASTER, '\n': BLOCKNEWLINE},
BLOCKASTER : { '/': CODE, '*': BLOCKASTER, '': BLOCKCOMMENT, '\n': BLOCKNEWLINE },
BLOCKNEWLINE : { '\n': BLOCKNEWLINE, '*': BLOCKASTER, '': BLOCKCOMMENT}
}
# Output generation (Mealy FSM)
silent = lambda x : ''
default = lambda x : x
transition_out = {
(CODE, SLASH) : silent,
(SLASH, CODE) : lambda x: '/' + x,
(SLASH, BLOCKCOMMENT) : silent,
(SLASH, LINECOMMENT) : silent,
(LINECOMMENT, LINECOMMENT) : silent,
(LINECOMMENT, CODE) : default,
(BLOCKCOMMENT, BLOCKNEWLINE) : default,
(BLOCKASTER, BLOCKNEWLINE) : default,
(BLOCKNEWLINE, BLOCKNEWLINE) : default,
(BLOCKCOMMENT, None) : silent,
(BLOCKASTER, None) : silent,
(BLOCKNEWLINE, None) : silent,
}
for byte in iterable:
trans = transitions[state]
next = trans.get(byte, trans.get('', state))
trans = (state, next)
fn = (transition_out.get((state, next), None) or
transition_out.get((state, None), None) or
transition_out.get((None, next), None) or
default)
out = fn(byte)
if False: # Change to True for debugging
state_desc = {0: 'CODE',
1: 'STRING',
2: 'STRING_ESCAPE',
3: 'CHAR',
4: 'CHAR_ESCAPE',
5: 'SLASH',
6: 'LINECOMMENT',
7: 'BLOCKCOMMENT',
8: 'BLOCKASTER',
9: 'BLOCKNEWLINE'}
out_str = out.replace('\n', '\\n').replace('\t', '\\t')
print 'FSM %10s -> %10s : "%s"' % (state_desc[state], state_desc[next], out_str)
for c in out:
yield c
state = next
# Check for invalid final states
if not throw_final:
return
if state in (STRING, STRING_ESCAPE):
raise ValueError('''missing terminating '"' character''')
elif state in (CHAR, CHAR_ESCAPE):
raise ValueError('''missing terminating ' character''')
elif state in (BLOCKCOMMENT, BLOCKASTER, BLOCKNEWLINE):
raise ValueError('''unterminated /* comment''')
def iter_lines(iterable, throw_final=False):
'''Yield pairs of line number and line contents.
The function takes a file object as parameter and yields pairs of line numbers and their content. Lines, which
were split using the backslash character, are merged and yield together as a single line. Lines are yield in order,
but because of merging of split lines, some lines may be missing.
C and C++ comments are automatically removed from the input file using the strip_comments function. The throw_final
argument has the same meaning as in the case of the strip_comments function.
'''
def iter_lines_raw():
'''Yield whole lines of input characters with the new line character removed.'''
line = []
for char in strip_comments(iterable, throw_final):
if char != '\n':
line.append(char)
else:
yield ''.join(line)
line = []
yield ''.join(line)
next_line = []
lineno = 0
for lineno, line_raw in enumerate(iter_lines_raw(), 1):
line_stripped = line_raw.rstrip()
continued = line_stripped.endswith('\\')
if continued:
line_stripped = line_stripped[:-1]
next_line.append(line_stripped)
if not continued:
item = ' '.join(next_line)
yield lineno, item
next_line = []
item = ' '.join(next_line)
yield lineno, item
| |
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo.utils import timeutils
from nova import db
from nova import exception
from nova.objects import aggregate
from nova.objects import service
from nova.tests.objects import test_compute_node
from nova.tests.objects import test_objects
NOW = timeutils.utcnow().replace(microsecond=0)
fake_service = {
'created_at': NOW,
'updated_at': None,
'deleted_at': None,
'deleted': False,
'id': 123,
'host': 'fake-host',
'binary': 'fake-service',
'topic': 'fake-service-topic',
'report_count': 1,
'disabled': False,
'disabled_reason': None,
}
OPTIONAL = ['availability_zone', 'compute_node']
class _TestServiceObject(object):
def supported_hv_specs_comparator(self, expected, obj_val):
obj_val = [inst.to_list() for inst in obj_val]
self.json_comparator(expected, obj_val)
def comparators(self):
return {'stats': self.json_comparator,
'host_ip': self.str_comparator,
'supported_hv_specs': self.supported_hv_specs_comparator}
def subs(self):
return {'supported_hv_specs': 'supported_instances'}
def _test_query(self, db_method, obj_method, *args, **kwargs):
self.mox.StubOutWithMock(db, db_method)
getattr(db, db_method)(self.context, *args, **kwargs).AndReturn(
fake_service)
self.mox.ReplayAll()
obj = getattr(service.Service, obj_method)(self.context, *args,
**kwargs)
self.compare_obj(obj, fake_service, allow_missing=OPTIONAL)
def test_get_by_id(self):
self._test_query('service_get', 'get_by_id', 123)
def test_get_by_host_and_topic(self):
self._test_query('service_get_by_host_and_topic',
'get_by_host_and_topic', 'fake-host', 'fake-topic')
def test_get_by_compute_host(self):
self._test_query('service_get_by_compute_host', 'get_by_compute_host',
'fake-host')
def test_get_by_args(self):
self._test_query('service_get_by_args', 'get_by_args', 'fake-host',
'fake-service')
def test_with_compute_node(self):
self.mox.StubOutWithMock(db, 'service_get')
self.mox.StubOutWithMock(db, 'compute_node_get_by_service_id')
_fake_service = dict(
fake_service, compute_node=[test_compute_node.fake_compute_node])
db.service_get(self.context, 123).AndReturn(_fake_service)
self.mox.ReplayAll()
service_obj = service.Service.get_by_id(self.context, 123)
self.assertTrue(service_obj.obj_attr_is_set('compute_node'))
self.compare_obj(service_obj.compute_node,
test_compute_node.fake_compute_node,
subs=self.subs(),
allow_missing=OPTIONAL,
comparators=self.comparators())
def test_create(self):
self.mox.StubOutWithMock(db, 'service_create')
db.service_create(self.context, {'host': 'fake-host'}).AndReturn(
fake_service)
self.mox.ReplayAll()
service_obj = service.Service()
service_obj.host = 'fake-host'
service_obj.create(self.context)
self.assertEqual(fake_service['id'], service_obj.id)
def test_recreate_fails(self):
self.mox.StubOutWithMock(db, 'service_create')
db.service_create(self.context, {'host': 'fake-host'}).AndReturn(
fake_service)
self.mox.ReplayAll()
service_obj = service.Service()
service_obj.host = 'fake-host'
service_obj.create(self.context)
self.assertRaises(exception.ObjectActionError, service_obj.create,
self.context)
def test_save(self):
self.mox.StubOutWithMock(db, 'service_update')
db.service_update(self.context, 123, {'host': 'fake-host'}).AndReturn(
fake_service)
self.mox.ReplayAll()
service_obj = service.Service()
service_obj.id = 123
service_obj.host = 'fake-host'
service_obj.save(self.context)
@mock.patch.object(db, 'service_create',
return_value=fake_service)
def test_set_id_failure(self, db_mock):
service_obj = service.Service()
service_obj.create(self.context)
self.assertRaises(exception.ReadOnlyFieldError, setattr,
service_obj, 'id', 124)
def _test_destroy(self):
self.mox.StubOutWithMock(db, 'service_destroy')
db.service_destroy(self.context, 123)
self.mox.ReplayAll()
service_obj = service.Service()
service_obj.id = 123
service_obj.destroy(self.context)
def test_destroy(self):
# The test harness needs db.service_destroy to work,
# so avoid leaving it broken here after we're done
orig_service_destroy = db.service_destroy
try:
self._test_destroy()
finally:
db.service_destroy = orig_service_destroy
def test_get_by_topic(self):
self.mox.StubOutWithMock(db, 'service_get_all_by_topic')
db.service_get_all_by_topic(self.context, 'fake-topic').AndReturn(
[fake_service])
self.mox.ReplayAll()
services = service.ServiceList.get_by_topic(self.context, 'fake-topic')
self.assertEqual(1, len(services))
self.compare_obj(services[0], fake_service, allow_missing=OPTIONAL)
def test_get_by_host(self):
self.mox.StubOutWithMock(db, 'service_get_all_by_host')
db.service_get_all_by_host(self.context, 'fake-host').AndReturn(
[fake_service])
self.mox.ReplayAll()
services = service.ServiceList.get_by_host(self.context, 'fake-host')
self.assertEqual(1, len(services))
self.compare_obj(services[0], fake_service, allow_missing=OPTIONAL)
def test_get_all(self):
self.mox.StubOutWithMock(db, 'service_get_all')
db.service_get_all(self.context, disabled=False).AndReturn(
[fake_service])
self.mox.ReplayAll()
services = service.ServiceList.get_all(self.context, disabled=False)
self.assertEqual(1, len(services))
self.compare_obj(services[0], fake_service, allow_missing=OPTIONAL)
def test_get_all_with_az(self):
self.mox.StubOutWithMock(db, 'service_get_all')
self.mox.StubOutWithMock(aggregate.AggregateList,
'get_by_metadata_key')
db.service_get_all(self.context, disabled=None).AndReturn(
[dict(fake_service, topic='compute')])
agg = aggregate.Aggregate()
agg.name = 'foo'
agg.metadata = {'availability_zone': 'test-az'}
agg.create(self.context)
agg.hosts = [fake_service['host']]
aggregate.AggregateList.get_by_metadata_key(self.context,
'availability_zone', hosts=set(agg.hosts)).AndReturn([agg])
self.mox.ReplayAll()
services = service.ServiceList.get_all(self.context, set_zones=True)
self.assertEqual(1, len(services))
self.assertEqual('test-az', services[0].availability_zone)
def test_compute_node(self):
self.mox.StubOutWithMock(db, 'compute_node_get_by_service_id')
db.compute_node_get_by_service_id(self.context, 123).AndReturn(
test_compute_node.fake_compute_node)
self.mox.ReplayAll()
service_obj = service.Service()
service_obj._context = self.context
service_obj.id = 123
self.compare_obj(service_obj.compute_node,
test_compute_node.fake_compute_node,
subs=self.subs(),
allow_missing=OPTIONAL,
comparators=self.comparators())
# Make sure it doesn't re-fetch this
service_obj.compute_node
def test_load_when_orphaned(self):
service_obj = service.Service()
service_obj.id = 123
self.assertRaises(exception.OrphanedObjectError,
getattr, service_obj, 'compute_node')
class TestServiceObject(test_objects._LocalTest,
_TestServiceObject):
pass
class TestRemoteServiceObject(test_objects._RemoteTest,
_TestServiceObject):
pass
| |
# Copyright 2019 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from abc import ABC, abstractmethod
from contextlib import contextmanager
from datetime import datetime
from functools import partial
from weakref import WeakSet
from shlex import quote
from time import monotonic
import os
import signal
import socket
import subprocess
import threading
import time
import logging
import select
import fcntl
from devlib.utils.misc import InitCheckpoint
_KILL_TIMEOUT = 3
def _kill_pgid_cmd(pgid, sig, busybox):
return '{} kill -{} -{}'.format(busybox, sig.value, pgid)
def _popen_communicate(bg, popen, input, timeout):
try:
stdout, stderr = popen.communicate(input=input, timeout=timeout)
except subprocess.TimeoutExpired:
bg.cancel()
raise
ret = popen.returncode
if ret:
raise subprocess.CalledProcessError(
ret,
popen.args,
stdout,
stderr,
)
else:
return (stdout, stderr)
class ConnectionBase(InitCheckpoint):
"""
Base class for all connections.
"""
def __init__(self):
self._current_bg_cmds = WeakSet()
self._closed = False
self._close_lock = threading.Lock()
self.busybox = None
def cancel_running_command(self):
bg_cmds = set(self._current_bg_cmds)
for bg_cmd in bg_cmds:
bg_cmd.cancel()
@abstractmethod
def _close(self):
"""
Close the connection.
The public :meth:`close` method makes sure that :meth:`_close` will
only be called once, and will serialize accesses to it if it happens to
be called from multiple threads at once.
"""
def close(self):
# Locking the closing allows any thread to safely call close() as long
# as the connection can be closed from a thread that is not the one it
# started its life in.
with self._close_lock:
if not self._closed:
self._close()
self._closed = True
# Ideally, that should not be relied upon but that will improve the chances
# of the connection being properly cleaned up when it's not in use anymore.
def __del__(self):
# Since __del__ will be called if an exception is raised in __init__
# (e.g. we cannot connect), we only run close() when we are sure
# __init__ has completed successfully.
if self.initialized:
self.close()
class BackgroundCommand(ABC):
"""
Allows managing a running background command using a subset of the
:class:`subprocess.Popen` API.
Instances of this class can be used as context managers, with the same
semantic as :class:`subprocess.Popen`.
"""
@abstractmethod
def send_signal(self, sig):
"""
Send a POSIX signal to the background command's process group ID
(PGID).
:param signal: Signal to send.
:type signal: signal.Signals
"""
def kill(self):
"""
Send SIGKILL to the background command.
"""
self.send_signal(signal.SIGKILL)
def cancel(self, kill_timeout=_KILL_TIMEOUT):
"""
Try to gracefully terminate the process by sending ``SIGTERM``, then
waiting for ``kill_timeout`` to send ``SIGKILL``.
"""
if self.poll() is None:
self._cancel(kill_timeout=kill_timeout)
@abstractmethod
def _cancel(self, kill_timeout):
"""
Method to override in subclasses to implement :meth:`cancel`.
"""
pass
@abstractmethod
def wait(self):
"""
Block until the background command completes, and return its exit code.
"""
def communicate(self, input=b'', timeout=None):
"""
Block until the background command completes while reading stdout and stderr.
Return ``tuple(stdout, stderr)``. If the return code is non-zero,
raises a :exc:`subprocess.CalledProcessError` exception.
"""
try:
return self._communicate(input=input, timeout=timeout)
finally:
self.close()
@abstractmethod
def _communicate(self, input, timeout):
pass
@abstractmethod
def poll(self):
"""
Return exit code if the command has exited, None otherwise.
"""
@property
@abstractmethod
def stdin(self):
"""
File-like object connected to the background's command stdin.
"""
@property
@abstractmethod
def stdout(self):
"""
File-like object connected to the background's command stdout.
"""
@property
@abstractmethod
def stderr(self):
"""
File-like object connected to the background's command stderr.
"""
@property
@abstractmethod
def pid(self):
"""
Process Group ID (PGID) of the background command.
Since the command is usually wrapped in shell processes for IO
redirections, sudo etc, the PID cannot be assumed to be the actual PID
of the command passed by the user. It's is guaranteed to be a PGID
instead, which means signals sent to it as such will target all
subprocesses involved in executing that command.
"""
@abstractmethod
def close(self):
"""
Close all opened streams and then wait for command completion.
:returns: Exit code of the command.
.. note:: If the command is writing to its stdout/stderr, it might be
blocked on that and die when the streams are closed.
"""
def __enter__(self):
return self
def __exit__(self, *args, **kwargs):
self.close()
class PopenBackgroundCommand(BackgroundCommand):
"""
:class:`subprocess.Popen`-based background command.
"""
def __init__(self, popen):
self.popen = popen
def send_signal(self, sig):
return os.killpg(self.popen.pid, sig)
@property
def stdin(self):
return self.popen.stdin
@property
def stdout(self):
return self.popen.stdout
@property
def stderr(self):
return self.popen.stderr
@property
def pid(self):
return self.popen.pid
def wait(self):
return self.popen.wait()
def _communicate(self, input, timeout):
return _popen_communicate(self, self.popen, input, timeout)
def poll(self):
return self.popen.poll()
def _cancel(self, kill_timeout):
popen = self.popen
os.killpg(os.getpgid(popen.pid), signal.SIGTERM)
try:
popen.wait(timeout=kill_timeout)
except subprocess.TimeoutExpired:
os.killpg(os.getpgid(popen.pid), signal.SIGKILL)
def close(self):
self.popen.__exit__(None, None, None)
return self.popen.returncode
def __enter__(self):
self.popen.__enter__()
return self
def __exit__(self, *args, **kwargs):
self.popen.__exit__(*args, **kwargs)
class ParamikoBackgroundCommand(BackgroundCommand):
"""
:mod:`paramiko`-based background command.
"""
def __init__(self, conn, chan, pid, as_root, cmd, stdin, stdout, stderr, redirect_thread):
self.chan = chan
self.as_root = as_root
self.conn = conn
self._pid = pid
self._stdin = stdin
self._stdout = stdout
self._stderr = stderr
self.redirect_thread = redirect_thread
self.cmd = cmd
def send_signal(self, sig):
# If the command has already completed, we don't want to send a signal
# to another process that might have gotten that PID in the meantime.
if self.poll() is not None:
return
# Use -PGID to target a process group rather than just the process
# itself
cmd = _kill_pgid_cmd(self.pid, sig, self.conn.busybox)
self.conn.execute(cmd, as_root=self.as_root)
@property
def pid(self):
return self._pid
def wait(self):
status = self.chan.recv_exit_status()
# Ensure that the redirection thread is finished copying the content
# from paramiko to the pipe.
self.redirect_thread.join()
return status
def _communicate(self, input, timeout):
stdout = self._stdout
stderr = self._stderr
stdin = self._stdin
chan = self.chan
# For some reason, file descriptors in the read-list of select() can
# still end up blocking in .read(), so make the non-blocking to avoid a
# deadlock. Since _communicate() will consume all input and all output
# until the command dies, we can do whatever we want with the pipe
# without affecting external users.
for s in (stdout, stderr):
fcntl.fcntl(s.fileno(), fcntl.F_SETFL, os.O_NONBLOCK)
out = {stdout: [], stderr: []}
ret = None
can_send = True
select_timeout = 1
if timeout is not None:
select_timeout = min(select_timeout, 1)
def create_out():
return (
b''.join(out[stdout]),
b''.join(out[stderr])
)
start = monotonic()
while ret is None:
# Even if ret is not None anymore, we need to drain the streams
ret = self.poll()
if timeout is not None and ret is None and monotonic() - start >= timeout:
self.cancel()
_stdout, _stderr = create_out()
raise subprocess.TimeoutExpired(self.cmd, timeout, _stdout, _stderr)
can_send &= (not chan.closed) & bool(input)
wlist = [chan] if can_send else []
if can_send and chan.send_ready():
try:
n = chan.send(input)
# stdin might have been closed already
except OSError:
can_send = False
chan.shutdown_write()
else:
input = input[n:]
if not input:
# Send EOF on stdin
chan.shutdown_write()
rs, ws, _ = select.select(
[x for x in (stdout, stderr) if not x.closed],
wlist,
[],
select_timeout,
)
for r in rs:
chunk = r.read()
if chunk:
out[r].append(chunk)
_stdout, _stderr = create_out()
if ret:
raise subprocess.CalledProcessError(
ret,
self.cmd,
_stdout,
_stderr,
)
else:
return (_stdout, _stderr)
def poll(self):
# Wait for the redirection thread to finish, otherwise we would
# indicate the caller that the command is finished and that the streams
# are safe to drain, but actually the redirection thread is not
# finished yet, which would end up in lost data.
if self.redirect_thread.is_alive():
return None
elif self.chan.exit_status_ready():
return self.wait()
else:
return None
def _cancel(self, kill_timeout):
self.send_signal(signal.SIGTERM)
# Check if the command terminated quickly
time.sleep(10e-3)
# Otherwise wait for the full timeout and kill it
if self.poll() is None:
time.sleep(kill_timeout)
self.send_signal(signal.SIGKILL)
self.wait()
@property
def stdin(self):
return self._stdin
@property
def stdout(self):
return self._stdout
@property
def stderr(self):
return self._stderr
def close(self):
for x in (self.stdin, self.stdout, self.stderr):
if x is not None:
x.close()
exit_code = self.wait()
thread = self.redirect_thread
if thread:
thread.join()
return exit_code
class AdbBackgroundCommand(BackgroundCommand):
"""
``adb``-based background command.
"""
def __init__(self, conn, adb_popen, pid, as_root):
self.conn = conn
self.as_root = as_root
self.adb_popen = adb_popen
self._pid = pid
def send_signal(self, sig):
self.conn.execute(
_kill_pgid_cmd(self.pid, sig, self.conn.busybox),
as_root=self.as_root,
)
@property
def stdin(self):
return self.adb_popen.stdin
@property
def stdout(self):
return self.adb_popen.stdout
@property
def stderr(self):
return self.adb_popen.stderr
@property
def pid(self):
return self._pid
def wait(self):
return self.adb_popen.wait()
def _communicate(self, input, timeout):
return _popen_communicate(self, self.adb_popen, input, timeout)
def poll(self):
return self.adb_popen.poll()
def _cancel(self, kill_timeout):
self.send_signal(signal.SIGTERM)
try:
self.adb_popen.wait(timeout=kill_timeout)
except subprocess.TimeoutExpired:
self.send_signal(signal.SIGKILL)
self.adb_popen.kill()
def close(self):
self.adb_popen.__exit__(None, None, None)
return self.adb_popen.returncode
def __enter__(self):
self.adb_popen.__enter__()
return self
def __exit__(self, *args, **kwargs):
self.adb_popen.__exit__(*args, **kwargs)
class TransferManagerBase(ABC):
def _pull_dest_size(self, dest):
if os.path.isdir(dest):
return sum(
os.stat(os.path.join(dirpath, f)).st_size
for dirpath, _, fnames in os.walk(dest)
for f in fnames
)
else:
return os.stat(dest).st_size
return 0
def _push_dest_size(self, dest):
cmd = '{} du -s {}'.format(quote(self.conn.busybox), quote(dest))
out = self.conn.execute(cmd)
try:
return int(out.split()[0])
except ValueError:
return 0
def __init__(self, conn, poll_period, start_transfer_poll_delay, total_timeout):
self.conn = conn
self.poll_period = poll_period
self.total_timeout = total_timeout
self.start_transfer_poll_delay = start_transfer_poll_delay
self.logger = logging.getLogger('FileTransfer')
self.managing = threading.Event()
self.transfer_started = threading.Event()
self.transfer_completed = threading.Event()
self.transfer_aborted = threading.Event()
self.monitor_thread = None
self.sources = None
self.dest = None
self.direction = None
@abstractmethod
def _cancel(self):
pass
def cancel(self, reason=None):
msg = 'Cancelling file transfer {} -> {}'.format(self.sources, self.dest)
if reason is not None:
msg += ' due to \'{}\''.format(reason)
self.logger.warning(msg)
self.transfer_aborted.set()
self._cancel()
@abstractmethod
def isactive(self):
pass
@contextmanager
def manage(self, sources, dest, direction):
try:
self.sources, self.dest, self.direction = sources, dest, direction
m_thread = threading.Thread(target=self._monitor)
self.transfer_completed.clear()
self.transfer_aborted.clear()
self.transfer_started.set()
m_thread.start()
yield self
except BaseException:
self.cancel(reason='exception during transfer')
raise
finally:
self.transfer_completed.set()
self.transfer_started.set()
m_thread.join()
self.transfer_started.clear()
self.transfer_completed.clear()
self.transfer_aborted.clear()
def _monitor(self):
start_t = monotonic()
self.transfer_completed.wait(self.start_transfer_poll_delay)
while not self.transfer_completed.wait(self.poll_period):
if not self.isactive():
self.cancel(reason='transfer inactive')
elif monotonic() - start_t > self.total_timeout:
self.cancel(reason='transfer timed out')
class PopenTransferManager(TransferManagerBase):
def __init__(self, conn, poll_period=30, start_transfer_poll_delay=30, total_timeout=3600):
super().__init__(conn, poll_period, start_transfer_poll_delay, total_timeout)
self.transfer = None
self.last_sample = None
def _cancel(self):
if self.transfer:
self.transfer.cancel()
self.transfer = None
self.last_sample = None
def isactive(self):
size_fn = self._push_dest_size if self.direction == 'push' else self._pull_dest_size
curr_size = size_fn(self.dest)
self.logger.debug('Polled file transfer, destination size {}'.format(curr_size))
active = True if self.last_sample is None else curr_size > self.last_sample
self.last_sample = curr_size
return active
def set_transfer_and_wait(self, popen_bg_cmd):
self.transfer = popen_bg_cmd
self.last_sample = None
ret = self.transfer.wait()
if ret and not self.transfer_aborted.is_set():
raise subprocess.CalledProcessError(ret, self.transfer.popen.args)
elif self.transfer_aborted.is_set():
raise TimeoutError(self.transfer.popen.args)
class SSHTransferManager(TransferManagerBase):
def __init__(self, conn, poll_period=30, start_transfer_poll_delay=30, total_timeout=3600):
super().__init__(conn, poll_period, start_transfer_poll_delay, total_timeout)
self.transferer = None
self.progressed = False
self.transferred = None
self.to_transfer = None
def _cancel(self):
self.transferer.close()
def isactive(self):
progressed = self.progressed
self.progressed = False
msg = 'Polled transfer: {}% [{}B/{}B]'
pc = format((self.transferred / self.to_transfer) * 100, '.2f')
self.logger.debug(msg.format(pc, self.transferred, self.to_transfer))
return progressed
@contextmanager
def manage(self, sources, dest, direction, transferer):
with super().manage(sources, dest, direction):
try:
self.progressed = False
self.transferer = transferer # SFTPClient or SCPClient
yield self
except socket.error as e:
if self.transfer_aborted.is_set():
self.transfer_aborted.clear()
method = 'SCP' if self.conn.use_scp else 'SFTP'
raise TimeoutError('{} {}: {} -> {}'.format(method, self.direction, sources, self.dest))
else:
raise e
def progress_cb(self, *args):
if self.transfer_started.is_set():
self.progressed = True
if len(args) == 3: # For SCPClient callbacks
self.transferred = args[2]
self.to_transfer = args[1]
elif len(args) == 2: # For SFTPClient callbacks
self.transferred = args[0]
self.to_transfer = args[1]
| |
LICENSE = """\
Copyright (c) 2014 Ian Good <ian.good@rackspace.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
""" # NOQA
import os
import os.path
import sys
import socket
import subprocess
import argparse
import pkg_resources
import signal
import pygtk
pygtk.require('2.0') # NOQA
import gtk
import gobject
import pynotify
__version__ = pkg_resources.require('irssi-icon')[0].version
class State(object):
def __init__(self, args):
self.icon = Icon(self, args)
if args.ssh:
self.host = RemoteHost(self.icon, args.ssh, args.ssh_key)
else:
self.host = LocalHost()
self.irssi = Irssi(self, args)
def main(self):
self.icon.start()
self.host.start()
self.irssi.start()
gtk.main()
def close(self):
self.host.close()
def icon_clicked(self, action=True):
self.icon.clear_alert_icon()
if action:
self.irssi.click_action()
def new_irssi_message(self, info, data, whisper=False):
self.icon.set_alert(info, whisper)
if whisper:
self.icon.notify(info, data)
class Irssi(object):
def __init__(self, state, args):
self.state = state
self.onclick = args.onclick
def start(self):
self._connect_local_socket()
def send_clear_message(self):
s = socket.create_connection(('localhost', 21693))
try:
s.send('{0}:CLEAR> '.format(__version__))
finally:
s.close()
def _get_request(self, client):
request = client.recv(4096)
client.close()
data = ''
if '\r\n' in request:
request, data = request.split('\r\n', 1)
command, info = request.split('> ', 1)
version, command = command.split(':', 1)
assert version == __version__, 'Plugin version mismatch.'
return command, info, data
def _msg_client_data(self, client, cond):
command, info, data = self._get_request(client)
if command == 'NEWMSG':
data = 'New message in {0}'.format(data)
self.state.new_irssi_message(info, data)
elif command == 'NEWWHISPER':
self.state.new_irssi_message(info, data, whisper=True)
elif command == 'CLEAR':
self.state.icon_clicked(False)
return False
def _msg_sock_connection(self, f, cond):
client, from_ = f.accept()
gobject.io_add_watch(client, gobject.IO_IN, self._msg_client_data)
return True
def _connect_local_socket(self):
self._msg_sock = socket.socket(socket.AF_INET)
self._msg_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self._msg_sock.bind(('127.0.0.1', 21693))
self._msg_sock.listen(5)
gobject.io_add_watch(self._msg_sock, gobject.IO_IN,
self._msg_sock_connection)
def click_action(self):
if not self.onclick:
return
p = subprocess.Popen(self.onclick, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, shell=True)
p.communicate()
class Icon(object):
def __init__(self, state, args):
self.state = state
self.icon = None
self.show_notifications = not args.no_notify
self._whisper_alert = False
self._load_icons()
def start(self):
pynotify.init('irssi-icon')
self._create_icon()
def _load_icons(self):
def load(name):
from pkg_resources import resource_filename
resource_name = 'icons/{0}.png'.format(name)
fn = resource_filename(__name__, resource_name)
return gtk.gdk.pixbuf_new_from_file(fn)
self._icon_pixbuf = load('main')
self._important_icon_pixbuf = load('important')
self._notify_icon_pixbuf = load('notify')
def _create_icon(self):
self.icon = gtk.StatusIcon()
self.icon.connect('popup-menu', self._right_click)
self.icon.connect('activate', self._left_click)
self.clear_alert_icon()
def alert(self, msg):
flags = gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT
box = gtk.MessageDialog(buttons=gtk.BUTTONS_OK, flags=flags,
type=gtk.MESSAGE_WARNING,
message_format=msg)
box.run()
box.destroy()
def clear_alert_icon(self):
self._whisper_alert = False
self.icon.set_from_pixbuf(self._icon_pixbuf)
self.icon.set_tooltip('Irssi Icon')
def set_alert(self, info, whisper):
if whisper:
self._whisper_alert = True
self.icon.set_from_pixbuf(self._important_icon_pixbuf)
self.icon.set_tooltip('Irssi Icon\nWhisper from ' + info)
elif not self._whisper_alert:
self.icon.set_from_pixbuf(self._notify_icon_pixbuf)
self.icon.set_tooltip('Irssi Icon\nNew messages in ' + info)
def _hide_notification(self, notification):
notification.close()
return False
def notify(self, info, data):
if self.show_notifications:
notification = pynotify.Notification(info, data, None)
notification.show()
gobject.timeout_add(10000, self._hide_notification, notification)
def _right_click(self, icon, button, timestamp):
menu = gtk.Menu()
about = gtk.ImageMenuItem('About')
quit = gtk.ImageMenuItem('Quit')
img = gtk.image_new_from_stock(gtk.STOCK_ABOUT, gtk.ICON_SIZE_MENU)
img.show()
about.set_image(img)
img = gtk.image_new_from_stock(gtk.STOCK_QUIT, gtk.ICON_SIZE_MENU)
img.show()
quit.set_image(img)
about.connect('activate', self._show_about_dialog)
quit.connect('activate', gtk.main_quit)
menu.append(about)
menu.append(gtk.SeparatorMenuItem())
menu.append(quit)
menu.show_all()
menu.popup(None, None, gtk.status_icon_position_menu,
button, timestamp, icon)
def _left_click(self, icon):
self.state.icon_clicked()
def _show_about_dialog(self, widget):
about = gtk.AboutDialog()
about.set_destroy_with_parent(True)
about.set_name('Irssi Icon')
about.set_version(__version__)
about.set_authors(['Ian Good <ian.good@rackspace.com>'])
about.set_license(LICENSE)
comments = 'Displays an icon to give notifications from irssi.'
about.set_comments(comments)
about.run()
about.destroy()
class BaseHost(object):
def start(self):
raise NotImplementedError()
def close(self):
pass
def _load_plugin_contents(self):
plugin_name = 'irssi-icon-notify.pl'
from pkg_resources import Requirement, resource_stream
res_name = os.path.join('irssiicon', plugin_name)
from_fp = resource_stream(Requirement.parse('irssi-icon'), res_name)
try:
return from_fp.read().replace('<<irssi-icon version>>',
__version__)
finally:
from_fp.close()
def _get_plugin_path(self, home_dir=None):
scripts_dir = os.path.join('.irssi', 'scripts')
if home_dir:
scripts_dir = os.path.join(home_dir, scripts_dir)
autorun_dir = os.path.join(scripts_dir, 'autorun')
plugin_name = 'irssi-icon-notify.pl'
return scripts_dir, autorun_dir, plugin_name
class LocalHost(BaseHost):
def start(self):
home_dir = os.path.expanduser('~')
scripts_dir, autorun_dir, plugin_name = self._get_plugin_path(home_dir)
plugin_path = os.path.join(scripts_dir, plugin_name)
autorun_path = os.path.join(autorun_dir, plugin_name)
plugin_contents = self._load_plugin_contents()
try:
os.makedirs(autorun_dir)
except OSError, (err, msg):
if err != 17:
raise
with open(plugin_path, 'w') as fp:
fp.write(plugin_contents)
try:
os.unlink(autorun_path)
except OSError, (err, msg):
if err != 2:
raise
os.symlink(plugin_path, autorun_path)
class RemoteHost(BaseHost):
def __init__(self, state, target, keyfile):
super(RemoteHost, self).__init__()
self.state = state
self.icon = state.icon
self.target = target
self.keyfile = keyfile
self.ssh_pid = None
self.done = False
def _restart_forwarding(self, pid, condition):
self.ssh_pid = None
if not self.done:
gobject.timeout_add(5000, self._start_forwarding)
def _start_forwarding(self):
args = ['ssh', self.target, '-o', 'PasswordAuthentication no',
'-N', '-R', '21693:localhost:21693']
if self.keyfile:
args[2:2] = ['-i', self.keyfile]
flags = gobject.SPAWN_SEARCH_PATH | gobject.SPAWN_DO_NOT_REAP_CHILD
self.ssh_pid, stdin_fd, stdout_fd, stderr_fd = \
gobject.spawn_async(args, flags=flags)
gobject.child_watch_add(self.ssh_pid, self._restart_forwarding)
def _install_plugin(self):
plugin_contents = self._load_plugin_contents()
scripts_dir, autorun_dir, plugin_name = self._get_plugin_path()
plugin_path = os.path.join('~', scripts_dir, plugin_name)
autorun_path = os.path.join('~', autorun_dir, plugin_name)
args = ['ssh', self.target, '-o', 'PasswordAuthentication no',
'cat > {0}; ln -sf {1} {2}'.format(plugin_path, plugin_path,
autorun_path)]
if self.keyfile:
args[2:2] = ['-i', self.keyfile]
flags = gobject.SPAWN_SEARCH_PATH
pid, stdin_fd, stdout_fd, stderr_fd = \
gobject.spawn_async(args, flags=flags, standard_input=True)
stdin = os.fdopen(stdin_fd, 'w')
try:
stdin.write(plugin_contents)
finally:
stdin.close()
def start(self):
self._install_plugin()
self._start_forwarding()
def close(self):
self.done = True
if self.ssh_pid:
os.kill(self.ssh_pid, signal.SIGTERM)
def _parse_args():
desc = 'Adds a GTK status-bar icon allowing one-click control of irssi.'
version = '%prog {0}'.format(__version__)
parser = argparse.ArgumentParser(description=desc)
parser.add_argument('-v', '--version', action='version', version=version)
parser.add_argument('-f', '--foreground', action='store_true',
dest='foreground', help='Do not run as a daemon.')
parser.add_argument('--no-notify', action='store_true',
help='Disable libnotify notifications.')
parser.add_argument('--on-click', dest='onclick', metavar='CMD',
help='Execute CMD when the icon is clicked.')
parser.add_argument('--clear', action='store_true', dest='clear',
help='Signal a clear event to a running daemon.')
parser.add_argument('--ssh', metavar='TARGET', default=None,
help='Forward the listening port to TARGET, which '
'is of the form [user@]host[:port]')
parser.add_argument('--ssh-key', metavar='FILE', default=None,
help='If given, FILE is used as an SSH key. If a '
'key cannot be found, the password must be entered '
'in a dialog box.')
return parser.parse_args()
def _daemonize():
"""Daemonize the current process."""
# Fork once.
try:
pid = os.fork()
if pid > 0:
os._exit(0)
except OSError:
return
# Set some options to detach from the terminal.
os.chdir('/')
os.setsid()
os.umask(0)
# Fork again.
try:
pid = os.fork()
if pid > 0:
os._exit(0)
except OSError:
return
# Find the OS /dev/null equivalent.
nullfile = getattr(os, 'devnull', '/dev/null')
logfile = '/tmp/irssi-icon.log'
# Redirect all standard I/O to /dev/null.
sys.stdout.flush()
sys.stderr.flush()
si = open(nullfile, 'r')
so = open(logfile, 'a+')
se = open(logfile, 'a+', 0)
os.dup2(si.fileno(), sys.stdin.fileno())
os.dup2(so.fileno(), sys.stdout.fileno())
os.dup2(se.fileno(), sys.stderr.fileno())
def main():
args = _parse_args()
if args.clear:
Irssi(None, args).send_clear_message()
sys.exit(0)
if not args.foreground:
_daemonize()
state = State(args)
try:
state.main()
except KeyboardInterrupt:
pass
state.close()
# vim:et:sts=4:sw=4:ts=4
| |
#!/usr/bin/env python
import sys
import os
import wx
import random
import math
import images
import wx.lib.mixins.listctrl as listmix
from wx.lib.embeddedimage import PyEmbeddedImage
try:
dirName = os.path.dirname(os.path.abspath(__file__))
except:
dirName = os.path.dirname(os.path.abspath(sys.argv[0]))
sys.path.append(os.path.split(dirName)[0])
try:
from agw import ultimatelistctrl as ULC
except ImportError: # if it's not there locally, try the wxPython lib.
from wx.lib.agw import ultimatelistctrl as ULC
#---------------------------------------------------------------------------
catalog = {}
index = []
folder = PyEmbeddedImage(
b"iVBORw0KGgoAAAANSUhEUgAAACAAAAAgCAYAAABzenr0AAAABGdBTUEAANkE3LLaAgAABI5J"
b"REFUeJztls+LHEUUxz+vqntmYgL+RCFoEPyFR6/r1Vs8ehRviwTBq4f8B+rBePCg+QM0gig5"
b"qFFERFdFQSNiiCRqyK5r1nVNsjuZ6ep673nontmJ6Uk25iLig0dR1d31/bxvdVU3/EciALv/"
b"yYPSNXjw4MHF/fv3v3pDSDOxsLDQqdMZbz5323vublVV+Xg89pSS55z9RmJpacn7/f69XXrF"
b"FQOR+1NKsry8jLsTQiCEQIzxigwxEttrYaYV6Sz4Cq3OQTeX7996mrKMqBnuDg7mBg7guF/+"
b"jAiEIDz4+Cv0ej2KopgHcW0ANWffY4e44/Y9WK6ZvibT+bYn9pl+nTO/fHmYvY88xWAwoCzL"
b"HUF02uKAjzfZ2tgAd1wVN8VdcbWmNWfqhgi4M/r1BKOHK4qimC7JtSA6AQRwzbgmMMfdGogJ"
b"gCluTWuquDuj9d8Y/r5CykrOeSoeY7x+AABM8VQ1jljjAGbQthMITMEh0PRTXaOqmBmq+g8B"
b"TBmdOwNEXHMrbFMQNwM3UJ0CoAncUdUpwKz9VVV1SoXOUTfWv3qXvLmBWG4m1wo0IZoQrSBX"
b"0zG0ImgCHFObVm+TXXSV6AQQN9Jvpzh//MOmiroRFB3PgFQNTK6QnBBLiBtqDcAsBECv1+t8"
b"G7sdsExZCsPvjlGtnCBE2qpTI5arFmA7g7UOzAAAxBhZXV09lVIaXR9AcEpPXPz8DfTCOQJO"
b"mAhaRcip6VtTfbCEW7P+ua3czDh8+PDygQMHXgT+2DmAZwpqBrt6hOEawy9ex4frBHFExwRN"
b"iDVLEDQR8pgoxu7BXVwaDkkpUZYlR48eXVlcXHx2bW3tCLBTBxyxTEGmF2kgNleovj6Cnz9L"
b"jEJoKw5WNaljAs5d9zwAP7zA5vmLpJQ4ffr0D8AHwJ+dhc5zQDxTkumX0kAMehSjdfT42/jy"
b"N0RPRIxozTIESwSUfp15aN+jDD9/hosXLmJmNTCeJw7zTkJTSqnxguYFFKAowWv850/x9VOE"
b"Ox+EPXcgIjgZ8Qyu+OpP3Kp7GW4NyTkHIAJ5xwC7b7k7Bq8pJOOlNHdImwTwAONzcGYNBrcS"
b"9tyJD26B2AcCEgW9NGY0HKJmMs/luQBBfJDWz3BTkWHwdwDaL1UBBtR/wsYGYhEJfQh9XARJ"
b"GbXmVGTOX9dcgNGFFRVqKHMjFtsaLgOgASgjKFBDyGMkjcEyQXehZqhd/RTsBGiQK7yoIUgD"
b"MGvkBEBbCG0h64AEiDkilTdfSdMbAChzozxxoAtgkmE7RUDaE5FrrP8cAIdg0PNtByZLIGzb"
b"P6k+zwAISBBCKeScOXt2ecjkx+m6HIjgPSAIUgoSZRtiFiC3WQARDENdqerMyy8d+vH9j44d"
b"ae+8LgDbXP2WrbXjF+pYIP1eiP1BDGURRKKIBMEEUQFFrDK3UbY8rDRtVRqHl/LyuX7xyccn"
b"D9V1/VHr084BPjtZLY7rd57fVdx8XyyMEJ1YjoIEIwR3EXd3TFoXXHEy1u4GCfTK/a9tPQEc"
b"u5rwJObt0b1LS0tPdl1YWFj4YAfzfrMT8f/jXxF/AROfMsX0/mQPAAAAAElFTkSuQmCC")
index.append('folder')
catalog['folder'] = folder
#----------------------------------------------------------------------
movie = PyEmbeddedImage(
b"iVBORw0KGgoAAAANSUhEUgAAACAAAAAgCAYAAABzenr0AAAAAXNSR0IArs4c6QAAAARnQU1B"
b"AACxjwv8YQUAAAAgY0hSTQAAeiYAAICEAAD6AAAAgOgAAHUwAADqYAAAOpgAABdwnLpRPAAA"
b"ABh0RVh0U29mdHdhcmUAUGFpbnQuTkVUIHYzLjM2qefiJQAACIZJREFUWEetV2lMXNcVxkkt"
b"V/HSJk6dxO6iFrWhqiJVrVSpUvujf6K2SX90caVaUZpfURupUqv+qJQ2ctrihTiJMeDBxoSt"
b"gAGz7zsMszIzDDDDMAuzD8zA8GZlGTAwX8+5eCxFnaGW2jecucy775773fOd7R3Le3TZF+0F"
b"zz73eevpM2fy9g/28vJwLC+P/v7jAk3RR1yZAchLp9N5u7u7+/GtZIV/7/jlV7/5VSnL6ty3"
b"tFptgdfrRcDnh81mh81updEKq/VQFhcXhVgsFiELCwtCzGYz5ufnodfrYaHfPr83sLBoe7fR"
b"KT2bbbdjM3yyLNeUQlXgcrnhdrtI6QIcjiUSB+x2+2Ox2WwEyvYYVAaYyWSCTjeNyQk5lhz2"
b"tN/vjxoXzP/ssPkuXHZuPN7wKT2Ok5zICkClUhV4PB5MyRXo6+uFy+WC0+kUsrS09FiygWLL"
b"GGYMGBkZoXEGkUgUHq9nzWhe/KDT5v1iZsPPGPAUydNZAeg02gKf14NEPI5gMMgKwIDcbrbK"
b"oWUYVDZgbCWmYWxsDEqlAhsbG1hfD6fdbk/YaLJcrXKtZ6XjU0DUarWwAJ+YuSUzEp8++HwB"
b"+H1eeFk8vkegCJyLrEOUMSC2kHXRiinFFJQqJXZ2U9hIbkCSorTGm5gxm651Wr0XjnRKjUYj"
b"nJBGjI6OYnl5GcuBAAI0BgJ++APLBIoBMRg/vF4f3GQlL4F2kXWYGrVGDe20Dnv7e0ildghE"
b"kkBE0m6nK2A0Lfy1xBU9nRNEBsD29jZisZigYYUkuEIjCQMik0KlM6OhS4GypgkU35+ArHkc"
b"HUMa6GYOI6GtvRW3b8tw5eoV3LxZjPb2dpCTpt0uV9Bstf1hxO47mxUEA2AK2Mvn5uawuroq"
b"JBQKIRQMYc5kRVG9BpfKLXi9Poafdu3gJ70P8bOuFH5Rt4q3y42QNY5iaHgIzc0tuFdRiTt3"
b"7uBeZSUaGuoxODREIJyBeaPxYk4ATAGfYmpqCuFwGOH1dRrXiWM33ipW4WsfruL58iTO3d/B"
b"+bYdvERyviOFF2pT+N4nEr5T5ERhRS9URKNGrYRiSoHx8TG0tbXhX/X1TFWaou2PWQFknHBv"
b"bw9bW1sUShFIkTDxv4I/l0/h6cIoTsliyG8I4Y36acg6RlDfN4LCxgFcvG+Bb2MPdw1b+NJ7"
b"NtxrV1MUeeH2eMmpXSDzC0pZ5FOTfzoSACcVg8GAaDRKEsGE2oSX/+HEMyVRnLsTxNUuLRxO"
b"L4WZJKwzorLiYvUSCmd2UTa3jePX1/DjawpyUD9CqyGxqWnBjJHhYWFVpUqRHUDGCTnNGo1G"
b"AWBtTcLlBh1OFYXwudII3qqdQYAcMkZzUkTCgMaJS3VefL06jNcrNHj5YwtOlsZw6m9WjKuN"
b"kAjkOtHIeUKhUAidaq02NwB2wocPHwoK+OFAIIhLsll89sYaLtwKonp0gTaPIR6LQ25045e1"
b"PuTXELAGI/QmC35XqcXJsgSOX5NQ0yVHhCzIkognsJ3i6IpDpdYcTQGbn/xBAPARgJ/fNuHk"
b"TQn5twLo0doRT8RpLob3W+fwrQdxXKzTY4Eo4Qz692Y9nilex4mPEyh/ICegCcToPqfq/oF+"
b"RClFk+7cADgKOO1yKMZp4TKF36WKeZwsJv4/9KNimADQaRKUYPTziyjvU8Dh9iGR2CRKovjt"
b"XQNREMaJjyJoGlDTcwkk6dkAJbQ5StWcX4jqowFQTQcno0QiwVkM7zXN4LlSCWdvEh13zVgN"
b"SyLNcpZjIDwmN5LQzHvwykd28pV1PH/VAc2sWdznusCUplIpAeBIC7APsPknJyfFwk1SoDAs"
b"4tsyH168m8D3KyWU9NjhXg6TaQ9BRKNx6KwreLvOgfzbIZyThfGbMgU5XwSbm5tCmIKOjo5D"
b"J8xFQSYKOGx8Pp9AvbnJBSWCwmYlzlTE8Y3aJH7dFEVhdwCNEx50KL24N+rFO80B/KB2DefL"
b"QvhyuYQ3PrHDbHGJzVkPhx8Xuf9KAVuAFzFvTMP21g7JNpZXVvFmlRrn70v4SmMK321I4tWm"
b"OF57kMAPG2MoqIviQl0SL1Zv4mx5nH6n8EGzTlDEelgnU/pEFCiVSkxMTBwCYCHutiiElpeD"
b"uNWjwo/anMhvD+KF1i18oTWFlx4k8UqbD7+qN+K1u2qca9nC6ZY0fl+lF5sy90xBa2vrk1HA"
b"PHHy4IUp2pjL6jZJ6lGVNFiWUDWux/vd0/hLpw5XeqbRrpqDg9Lu7KID77Zp8U7LNBqHdORH"
b"m0IPRxQXtSeiQJIkyoBr2NnZEcJRkfmflfH/PDK3QghY5j7PCd8hYetl1vHGXM6fiAKOArlc"
b"Ljb+fwnnlUwU5MwDmSjgE3AIZlDzKdgq3BswIJEhKUq4WrJzZe7ziXmOPZ7XZ6KJrclz7NhM"
b"xZGJiKOAsxZv0N/fj+vXr4vwaWpqQlVVlZirra0VXU5NTQ1mZ2dF08E+wwWMrVddXS3eG4qL"
b"i9HY2Aiq/+IAbIUjKchYgF5QqLNVCgBFRUXo7e1FJXU1DIDvdXZ2Cm67u7vR09ODemo0dDqd"
b"aMk5jWcAlJSUoKuri9q5FdG08vNsodwWoLacm4iDgwPs7x9gYGBAbHzjxg20tLQIxX19fUIR"
b"V0wGNjQ0JDaXyWQCAFuCn+OwKy0tFc0tn35/fx8HpFNQkKscU5kkAB7qbum1zObAwOCA2KCh"
b"oUH0+6yYW3WmgDdnCphf9gW2FHfT7D98nwGwBRg49wHc1Bqo1WML5CzHoiklAHPzc8TnrKj7"
b"oh48yufsVHwS3pDfBXjk3yw8x89yO8dOyU532EW7D98vqIUfHx+n3uAICrT8XkDJBEgjnaZv"
b"+vpfhZQd6qIPf3Mjo8nVkBj0MwV6nR6T8kkMDw6Sww0Izo8Sdspc85k51jNI+kbHRqGbniYq"
b"Zj7VD/wb8xLWxx63hcwAAAAASUVORK5CYII=")
index.append('movie')
catalog['movie'] = movie
PIPE_HEIGHT = 18
PIPE_WIDTH = 2000
class MacRenderer(object):
DONE_BITMAP = None
REMAINING_BITMAP = None
def __init__(self, parent):
self.progressValue = random.randint(1, 99)
def DrawSubItem(self, dc, rect, line, highlighted, enabled):
"""Draw a custom progress bar using double buffering to prevent flicker"""
canvas = wx.Bitmap(rect.width, rect.height)
mdc = wx.MemoryDC()
mdc.SelectObject(canvas)
if highlighted:
mdc.SetBackground(wx.Brush(wx.SystemSettings.GetColour(wx.SYS_COLOUR_HIGHLIGHT)))
mdc.SetTextForeground(wx.WHITE)
else:
mdc.SetBackground(wx.Brush(wx.SystemSettings.GetColour(wx.SYS_COLOUR_WINDOW)))
mdc.Clear()
mdc.SetFont(wx.Font(9, wx.FONTFAMILY_SWISS, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_BOLD, False))
if line == 0:
text1 = "Apple Ads"
text2 = "2.67 MB of 11.9 MB selected (22.53%) - 5 min 13 sec remaining"
text3 = "Downloading from 1 of 1 peer - DL: 30.0 KB/s, UL: 0.0 KB/s"
progress = 22.53
else:
text1 = "Apple TV Intro (HD).mov"
text2 = "13.4 MB, uploaded 8.65 MB (Ratio: 0.64) - 1 hr 23 min remaining"
text3 = "Seeding to 1 of 1 peer - UL: 12.0 KB/s"
progress = 18.0
ypos = 5
xtext, ytext = mdc.GetTextExtent(text1)
mdc.DrawText(text1, 0, ypos)
ypos += ytext + 5
mdc.SetFont(wx.Font(7, wx.FONTFAMILY_SWISS, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False))
xtext, ytext = mdc.GetTextExtent(text2)
mdc.DrawText(text2, 0, ypos)
ypos += ytext + 5
self.DrawProgressBar(mdc, 0, ypos, rect.width, 20, progress)
mdc.SetFont(wx.Font(7, wx.FONTFAMILY_SWISS, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False))
ypos += 25
mdc.DrawText(text3, 0, ypos)
dc.Blit(rect.x+3, rect.y, rect.width-6, rect.height, mdc, 0, 0)
def GetLineHeight(self):
dc = wx.MemoryDC()
dc.SelectObject(wx.Bitmap(1, 1))
dc.SetFont(wx.Font(9, wx.FONTFAMILY_SWISS, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_BOLD, False))
dummy, ytext1 = dc.GetTextExtent("Agw")
dc.SetFont(wx.Font(7, wx.FONTFAMILY_SWISS, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False))
dummy, ytext2 = dc.GetTextExtent("Agw")
dc.SelectObject(wx.NullBitmap)
return ytext1 + 2*ytext2 + 40
def GetSubItemWidth(self):
return 250
def DrawHorizontalPipe(self, dc, x, y, w, colour):
"""Draws a horizontal 3D-looking pipe."""
for r in range(PIPE_HEIGHT):
red = int(colour.Red() * math.sin((math.pi/PIPE_HEIGHT)*r))
green = int(colour.Green() * math.sin((math.pi/PIPE_HEIGHT)*r))
blue = int(colour.Blue() * math.sin((math.pi/PIPE_HEIGHT)*r))
dc.SetPen(wx.Pen(wx.Colour(red, green, blue)))
dc.DrawLine(x, y+r, x+w, y+r)
def DrawProgressBar(self, dc, x, y, w, h, percent):
"""
Draws a progress bar in the (x,y,w,h) box that represents a progress of
'percent'. The progress bar is only horizontal and it's height is constant
(PIPE_HEIGHT). The 'h' parameter is used to vertically center the progress
bar in the allotted space.
The drawing is speed-optimized. Two bitmaps are created the first time this
function runs - one for the done (green) part of the progress bar and one for
the remaining (white) part. During normal operation the function just cuts
the necessary part of the two bitmaps and draws them.
"""
# Create two pipes
if self.DONE_BITMAP is None:
self.DONE_BITMAP = wx.Bitmap(PIPE_WIDTH, PIPE_HEIGHT)
mdc = wx.MemoryDC()
mdc.SelectObject(self.DONE_BITMAP)
self.DrawHorizontalPipe(mdc, 0, 0, PIPE_WIDTH, wx.GREEN)
mdc.SelectObject(wx.NullBitmap)
self.REMAINING_BITMAP = wx.Bitmap(PIPE_WIDTH, PIPE_HEIGHT)
mdc = wx.MemoryDC()
mdc.SelectObject(self.REMAINING_BITMAP)
self.DrawHorizontalPipe(mdc, 0, 0, PIPE_WIDTH, wx.RED)
self.DrawHorizontalPipe(mdc, 1, 0, PIPE_WIDTH-1, wx.WHITE)
mdc.SelectObject(wx.NullBitmap)
# Center the progress bar vertically in the box supplied
y = y + (h - PIPE_HEIGHT)/2
if percent == 0:
middle = 0
else:
middle = (w * percent)/100
if w < 1:
return
if middle == 0: # not started
bitmap = self.REMAINING_BITMAP.GetSubBitmap((1, 0, w, PIPE_HEIGHT))
dc.DrawBitmap(bitmap, x, y, False)
elif middle == w: # completed
bitmap = self.DONE_BITMAP.GetSubBitmap((0, 0, w, PIPE_HEIGHT))
dc.DrawBitmap(bitmap, x, y, False)
else: # in progress
doneBitmap = self.DONE_BITMAP.GetSubBitmap((0, 0, middle, PIPE_HEIGHT))
dc.DrawBitmap(doneBitmap, x, y, False)
remainingBitmap = self.REMAINING_BITMAP.GetSubBitmap((0, 0, w - middle, PIPE_HEIGHT))
dc.DrawBitmap(remainingBitmap, x + middle, y, False)
class TestUltimateListCtrl(ULC.UltimateListCtrl, listmix.ListCtrlAutoWidthMixin):
def __init__(self, parent, id=wx.ID_ANY, pos=wx.DefaultPosition, size=wx.DefaultSize, style=0, agwStyle=0):
ULC.UltimateListCtrl.__init__(self, parent, id, pos, size, style, agwStyle)
listmix.ListCtrlAutoWidthMixin.__init__(self)
class UltimateListCtrlPanel(wx.Panel):
def __init__(self, parent, log):
wx.Panel.__init__(self, parent, -1, style=wx.WANTS_CHARS)
sizer = wx.BoxSizer(wx.VERTICAL)
self.log = log
self.il = wx.ImageList(32, 32)
self.il.Add(folder.GetBitmap())
self.il.Add(movie.GetBitmap())
self.list = TestUltimateListCtrl(self, -1,
agwStyle=wx.LC_REPORT
| wx.BORDER_SUNKEN
#| wx.BORDER_NONE
#| wx.LC_SORT_ASCENDING
#| wx.LC_NO_HEADER
#| wx.LC_VRULES
| wx.LC_HRULES
#| wx.LC_SINGLE_SEL
| ULC.ULC_HAS_VARIABLE_ROW_HEIGHT)
self.list.SetImageList(self.il, wx.IMAGE_LIST_SMALL)
sizer.Add(self.list, 1, wx.EXPAND)
self.PopulateList()
self.SetSizer(sizer)
self.SetAutoLayout(True)
self.Bind(wx.EVT_LIST_COL_BEGIN_DRAG, self.OnColBeginDrag, self.list)
def PopulateList(self):
self.list.Freeze()
info = ULC.UltimateListItem()
info.Mask = wx.LIST_MASK_TEXT | wx.LIST_MASK_FORMAT
info.Align = 0
info.Text = ""
self.list.InsertColumnInfo(0, info)
info = ULC.UltimateListItem()
info.Align = wx.LIST_FORMAT_LEFT
info.Mask = wx.LIST_MASK_TEXT | wx.LIST_MASK_FORMAT
info.Image = []
info.Text = "Some useful info here"
self.list.InsertColumnInfo(1, info)
for i in range(2):
index = self.list.InsertImageStringItem(sys.maxsize, "", [i])
self.list.SetStringItem(index, 1, "")
klass = MacRenderer(self)
self.list.SetItemCustomRenderer(index, 1, klass)
self.list.SetColumnWidth(0, 34)
self.list.SetColumnWidth(1, 300)
self.list.Thaw()
self.list.Update()
def OnColBeginDrag(self, event):
if event.GetColumn() == 0:
event.Veto()
return
event.Skip()
#---------------------------------------------------------------------------
class TestFrame(wx.Frame):
def __init__(self, parent, log):
wx.Frame.__init__(self, parent, -1, "UltimateListCtrl in torrent style :-D", size=(800, 600))
self.log = log
# Create the CustomTreeCtrl, using a derived class defined below
self.ulc = UltimateListCtrlPanel(self, self.log)
self.SetIcon(images.Mondrian.GetIcon())
self.CenterOnScreen()
self.Show()
#---------------------------------------------------------------------------
if __name__ == '__main__':
import sys
app = wx.App(0)
frame = TestFrame(None, sys.stdout)
frame.Show(True)
app.MainLoop()
| |
"""DNS Authenticator using RFC 2136 Dynamic Updates."""
import logging
from typing import Any
from typing import Callable
from typing import Optional
import dns.flags
import dns.message
import dns.name
import dns.query
import dns.rdataclass
import dns.rdatatype
import dns.tsig
import dns.tsigkeyring
import dns.update
from certbot import errors
from certbot.plugins import dns_common
from certbot.plugins.dns_common import CredentialsConfiguration
from certbot.util import is_ipaddress
logger = logging.getLogger(__name__)
DEFAULT_NETWORK_TIMEOUT = 45
class Authenticator(dns_common.DNSAuthenticator):
"""DNS Authenticator using RFC 2136 Dynamic Updates
This Authenticator uses RFC 2136 Dynamic Updates to fulfill a dns-01 challenge.
"""
ALGORITHMS = {
'HMAC-MD5': dns.tsig.HMAC_MD5,
'HMAC-SHA1': dns.tsig.HMAC_SHA1,
'HMAC-SHA224': dns.tsig.HMAC_SHA224,
'HMAC-SHA256': dns.tsig.HMAC_SHA256,
'HMAC-SHA384': dns.tsig.HMAC_SHA384,
'HMAC-SHA512': dns.tsig.HMAC_SHA512
}
PORT = 53
description = 'Obtain certificates using a DNS TXT record (if you are using BIND for DNS).'
ttl = 120
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
self.credentials: Optional[CredentialsConfiguration] = None
@classmethod
def add_parser_arguments(cls, add: Callable[..., None],
default_propagation_seconds: int = 60) -> None:
super().add_parser_arguments(add, default_propagation_seconds=60)
add('credentials', help='RFC 2136 credentials INI file.')
def more_info(self) -> str:
return 'This plugin configures a DNS TXT record to respond to a dns-01 challenge using ' + \
'RFC 2136 Dynamic Updates.'
def _validate_credentials(self, credentials: CredentialsConfiguration) -> None:
server = credentials.conf('server')
if not is_ipaddress(server):
raise errors.PluginError("The configured target DNS server ({0}) is not a valid IPv4 "
"or IPv6 address. A hostname is not allowed.".format(server))
algorithm = credentials.conf('algorithm')
if algorithm:
if not self.ALGORITHMS.get(algorithm.upper()):
raise errors.PluginError("Unknown algorithm: {0}.".format(algorithm))
def _setup_credentials(self) -> None:
self.credentials = self._configure_credentials(
'credentials',
'RFC 2136 credentials INI file',
{
'name': 'TSIG key name',
'secret': 'TSIG key secret',
'server': 'The target DNS server'
},
self._validate_credentials
)
def _perform(self, _domain: str, validation_name: str, validation: str) -> None:
self._get_rfc2136_client().add_txt_record(validation_name, validation, self.ttl)
def _cleanup(self, _domain: str, validation_name: str, validation: str) -> None:
self._get_rfc2136_client().del_txt_record(validation_name, validation)
def _get_rfc2136_client(self) -> "_RFC2136Client":
if not self.credentials: # pragma: no cover
raise errors.Error("Plugin has not been prepared.")
return _RFC2136Client(self.credentials.conf('server'),
int(self.credentials.conf('port') or self.PORT),
self.credentials.conf('name'),
self.credentials.conf('secret'),
self.ALGORITHMS.get(self.credentials.conf('algorithm'),
dns.tsig.HMAC_MD5))
class _RFC2136Client:
"""
Encapsulates all communication with the target DNS server.
"""
def __init__(self, server: str, port: int, key_name: str, key_secret: str,
key_algorithm: dns.name.Name, timeout: int = DEFAULT_NETWORK_TIMEOUT) -> None:
self.server = server
self.port = port
self.keyring = dns.tsigkeyring.from_text({
key_name: key_secret
})
self.algorithm = key_algorithm
self._default_timeout = timeout
def add_txt_record(self, record_name: str, record_content: str, record_ttl: int) -> None:
"""
Add a TXT record using the supplied information.
:param str record_name: The record name (typically beginning with '_acme-challenge.').
:param str record_content: The record content (typically the challenge validation).
:param int record_ttl: The record TTL (number of seconds that the record may be cached).
:raises certbot.errors.PluginError: if an error occurs communicating with the DNS server
"""
domain = self._find_domain(record_name)
n = dns.name.from_text(record_name)
o = dns.name.from_text(domain)
rel = n.relativize(o)
update = dns.update.Update(
domain,
keyring=self.keyring,
keyalgorithm=self.algorithm)
update.add(rel, record_ttl, dns.rdatatype.TXT, record_content)
try:
response = dns.query.tcp(update, self.server, self._default_timeout, self.port)
except Exception as e:
raise errors.PluginError('Encountered error adding TXT record: {0}'
.format(e))
rcode = response.rcode() # type: ignore[attr-defined]
if rcode == dns.rcode.NOERROR:
logger.debug('Successfully added TXT record %s', record_name)
else:
raise errors.PluginError('Received response from server: {0}'
.format(dns.rcode.to_text(rcode)))
def del_txt_record(self, record_name: str, record_content: str) -> None:
"""
Delete a TXT record using the supplied information.
:param str record_name: The record name (typically beginning with '_acme-challenge.').
:param str record_content: The record content (typically the challenge validation).
:param int record_ttl: The record TTL (number of seconds that the record may be cached).
:raises certbot.errors.PluginError: if an error occurs communicating with the DNS server
"""
domain = self._find_domain(record_name)
n = dns.name.from_text(record_name)
o = dns.name.from_text(domain)
rel = n.relativize(o)
update = dns.update.Update(
domain,
keyring=self.keyring,
keyalgorithm=self.algorithm)
update.delete(rel, dns.rdatatype.TXT, record_content)
try:
response = dns.query.tcp(update, self.server, self._default_timeout, self.port)
except Exception as e:
raise errors.PluginError('Encountered error deleting TXT record: {0}'
.format(e))
rcode = response.rcode() # type: ignore[attr-defined]
if rcode == dns.rcode.NOERROR:
logger.debug('Successfully deleted TXT record %s', record_name)
else:
raise errors.PluginError('Received response from server: {0}'
.format(dns.rcode.to_text(rcode)))
def _find_domain(self, record_name: str) -> str:
"""
Find the closest domain with an SOA record for a given domain name.
:param str record_name: The record name for which to find the closest SOA record.
:returns: The domain, if found.
:rtype: str
:raises certbot.errors.PluginError: if no SOA record can be found.
"""
domain_name_guesses = dns_common.base_domain_name_guesses(record_name)
# Loop through until we find an authoritative SOA record
for guess in domain_name_guesses:
if self._query_soa(guess):
return guess
raise errors.PluginError('Unable to determine base domain for {0} using names: {1}.'
.format(record_name, domain_name_guesses))
def _query_soa(self, domain_name: str) -> bool:
"""
Query a domain name for an authoritative SOA record.
:param str domain_name: The domain name to query for an SOA record.
:returns: True if found, False otherwise.
:rtype: bool
:raises certbot.errors.PluginError: if no response is received.
"""
domain = dns.name.from_text(domain_name)
request = dns.message.make_query(domain, dns.rdatatype.SOA, dns.rdataclass.IN)
# Turn off Recursion Desired bit in query
request.flags ^= dns.flags.RD
try:
try:
response = dns.query.tcp(request, self.server, self._default_timeout, self.port)
except (OSError, dns.exception.Timeout) as e:
logger.debug('TCP query failed, fallback to UDP: %s', e)
response = dns.query.udp(request, self.server, self._default_timeout, self.port)
rcode = response.rcode() # type: ignore[attr-defined]
# Authoritative Answer bit should be set
if (rcode == dns.rcode.NOERROR
and response.get_rrset(response.answer, # type: ignore[attr-defined]
domain, dns.rdataclass.IN, dns.rdatatype.SOA)
and response.flags & dns.flags.AA):
logger.debug('Received authoritative SOA response for %s', domain_name)
return True
logger.debug('No authoritative SOA record found for %s', domain_name)
return False
except Exception as e:
raise errors.PluginError('Encountered error when making query: {0}'
.format(e))
| |
"""Test the various means of instantiating and invoking tools."""
import gzip
import io
import sys
import time
import types
import unittest
import operator
from http.client import IncompleteRead
import cherrypy
from cherrypy import tools
from cherrypy._cpcompat import ntou
from cherrypy.test import helper, _test_decorators
timeout = 0.2
europoundUnicode = ntou('\x80\xa3')
# Client-side code #
class ToolTests(helper.CPWebCase):
@staticmethod
def setup_server():
# Put check_access in a custom toolbox with its own namespace
myauthtools = cherrypy._cptools.Toolbox('myauth')
def check_access(default=False):
if not getattr(cherrypy.request, 'userid', default):
raise cherrypy.HTTPError(401)
myauthtools.check_access = cherrypy.Tool(
'before_request_body', check_access)
def numerify():
def number_it(body):
for chunk in body:
for k, v in cherrypy.request.numerify_map:
chunk = chunk.replace(k, v)
yield chunk
cherrypy.response.body = number_it(cherrypy.response.body)
class NumTool(cherrypy.Tool):
def _setup(self):
def makemap():
m = self._merged_args().get('map', {})
cherrypy.request.numerify_map = list(m.items())
cherrypy.request.hooks.attach('on_start_resource', makemap)
def critical():
cherrypy.request.error_response = cherrypy.HTTPError(
502).set_response
critical.failsafe = True
cherrypy.request.hooks.attach('on_start_resource', critical)
cherrypy.request.hooks.attach(self._point, self.callable)
tools.numerify = NumTool('before_finalize', numerify)
# It's not mandatory to inherit from cherrypy.Tool.
class NadsatTool:
def __init__(self):
self.ended = {}
self._name = 'nadsat'
def nadsat(self):
def nadsat_it_up(body):
for chunk in body:
chunk = chunk.replace(b'good', b'horrorshow')
chunk = chunk.replace(b'piece', b'lomtick')
yield chunk
cherrypy.response.body = nadsat_it_up(cherrypy.response.body)
nadsat.priority = 0
def cleanup(self):
# This runs after the request has been completely written out.
cherrypy.response.body = [b'razdrez']
id = cherrypy.request.params.get('id')
if id:
self.ended[id] = True
cleanup.failsafe = True
def _setup(self):
cherrypy.request.hooks.attach('before_finalize', self.nadsat)
cherrypy.request.hooks.attach('on_end_request', self.cleanup)
tools.nadsat = NadsatTool()
def pipe_body():
cherrypy.request.process_request_body = False
clen = int(cherrypy.request.headers['Content-Length'])
cherrypy.request.body = cherrypy.request.rfile.read(clen)
# Assert that we can use a callable object instead of a function.
class Rotator(object):
def __call__(self, scale):
r = cherrypy.response
r.collapse_body()
r.body = [bytes([(x + scale) % 256 for x in r.body[0]])]
cherrypy.tools.rotator = cherrypy.Tool('before_finalize', Rotator())
def stream_handler(next_handler, *args, **kwargs):
actual = cherrypy.request.config.get('tools.streamer.arg')
assert actual == 'arg value'
cherrypy.response.output = o = io.BytesIO()
try:
next_handler(*args, **kwargs)
# Ignore the response and return our accumulated output
# instead.
return o.getvalue()
finally:
o.close()
cherrypy.tools.streamer = cherrypy._cptools.HandlerWrapperTool(
stream_handler)
class Root:
@cherrypy.expose
def index(self):
return 'Howdy earth!'
@cherrypy.expose
@cherrypy.config(**{
'tools.streamer.on': True,
'tools.streamer.arg': 'arg value',
})
def tarfile(self):
actual = cherrypy.request.config.get('tools.streamer.arg')
assert actual == 'arg value'
cherrypy.response.output.write(b'I am ')
cherrypy.response.output.write(b'a tarfile')
@cherrypy.expose
def euro(self):
hooks = list(cherrypy.request.hooks['before_finalize'])
hooks.sort()
cbnames = [x.callback.__name__ for x in hooks]
assert cbnames == ['gzip'], cbnames
priorities = [x.priority for x in hooks]
assert priorities == [80], priorities
yield ntou('Hello,')
yield ntou('world')
yield europoundUnicode
# Bare hooks
@cherrypy.expose
@cherrypy.config(**{'hooks.before_request_body': pipe_body})
def pipe(self):
return cherrypy.request.body
# Multiple decorators; include kwargs just for fun.
# Note that rotator must run before gzip.
@cherrypy.expose
def decorated_euro(self, *vpath):
yield ntou('Hello,')
yield ntou('world')
yield europoundUnicode
decorated_euro = tools.gzip(compress_level=6)(decorated_euro)
decorated_euro = tools.rotator(scale=3)(decorated_euro)
root = Root()
class TestType(type):
"""Metaclass which automatically exposes all functions in each
subclass, and adds an instance of the subclass as an attribute
of root.
"""
def __init__(cls, name, bases, dct):
type.__init__(cls, name, bases, dct)
for value in dct.values():
if isinstance(value, types.FunctionType):
cherrypy.expose(value)
setattr(root, name.lower(), cls())
Test = TestType('Test', (object,), {})
# METHOD ONE:
# Declare Tools in _cp_config
@cherrypy.config(**{'tools.nadsat.on': True})
class Demo(Test):
def index(self, id=None):
return 'A good piece of cherry pie'
def ended(self, id):
return repr(tools.nadsat.ended[id])
def err(self, id=None):
raise ValueError()
def errinstream(self, id=None):
yield 'nonconfidential'
raise ValueError()
yield 'confidential'
# METHOD TWO: decorator using Tool()
# We support Python 2.3, but the @-deco syntax would look like
# this:
# @tools.check_access()
def restricted(self):
return 'Welcome!'
restricted = myauthtools.check_access()(restricted)
userid = restricted
def err_in_onstart(self):
return 'success!'
@cherrypy.config(**{'response.stream': True})
def stream(self, id=None):
for x in range(100000000):
yield str(x)
conf = {
# METHOD THREE:
# Declare Tools in detached config
'/demo': {
'tools.numerify.on': True,
'tools.numerify.map': {b'pie': b'3.14159'},
},
'/demo/restricted': {
'request.show_tracebacks': False,
},
'/demo/userid': {
'request.show_tracebacks': False,
'myauth.check_access.default': True,
},
'/demo/errinstream': {
'response.stream': True,
},
'/demo/err_in_onstart': {
# Because this isn't a dict, on_start_resource will error.
'tools.numerify.map': 'pie->3.14159'
},
# Combined tools
'/euro': {
'tools.gzip.on': True,
'tools.encode.on': True,
},
# Priority specified in config
'/decorated_euro/subpath': {
'tools.gzip.priority': 10,
},
# Handler wrappers
'/tarfile': {'tools.streamer.on': True}
}
app = cherrypy.tree.mount(root, config=conf)
app.request_class.namespaces['myauth'] = myauthtools
root.tooldecs = _test_decorators.ToolExamples()
def testHookErrors(self):
self.getPage('/demo/?id=1')
# If body is "razdrez", then on_end_request is being called too early.
self.assertBody('A horrorshow lomtick of cherry 3.14159')
# If this fails, then on_end_request isn't being called at all.
time.sleep(0.1)
self.getPage('/demo/ended/1')
self.assertBody('True')
valerr = '\n raise ValueError()\nValueError'
self.getPage('/demo/err?id=3')
# If body is "razdrez", then on_end_request is being called too early.
self.assertErrorPage(502, pattern=valerr)
# If this fails, then on_end_request isn't being called at all.
time.sleep(0.1)
self.getPage('/demo/ended/3')
self.assertBody('True')
# If body is "razdrez", then on_end_request is being called too early.
if (cherrypy.server.protocol_version == 'HTTP/1.0' or
getattr(cherrypy.server, 'using_apache', False)):
self.getPage('/demo/errinstream?id=5')
# Because this error is raised after the response body has
# started, the status should not change to an error status.
self.assertStatus('200 OK')
self.assertBody('nonconfidential')
else:
# Because this error is raised after the response body has
# started, and because it's chunked output, an error is raised by
# the HTTP client when it encounters incomplete output.
self.assertRaises((ValueError, IncompleteRead), self.getPage,
'/demo/errinstream?id=5')
# If this fails, then on_end_request isn't being called at all.
time.sleep(0.1)
self.getPage('/demo/ended/5')
self.assertBody('True')
# Test the "__call__" technique (compile-time decorator).
self.getPage('/demo/restricted')
self.assertErrorPage(401)
# Test compile-time decorator with kwargs from config.
self.getPage('/demo/userid')
self.assertBody('Welcome!')
def testEndRequestOnDrop(self):
old_timeout = None
try:
httpserver = cherrypy.server.httpserver
old_timeout = httpserver.timeout
except (AttributeError, IndexError):
return self.skip()
try:
httpserver.timeout = timeout
# Test that on_end_request is called even if the client drops.
self.persistent = True
try:
conn = self.HTTP_CONN
conn.putrequest('GET', '/demo/stream?id=9', skip_host=True)
conn.putheader('Host', self.HOST)
conn.endheaders()
# Skip the rest of the request and close the conn. This will
# cause the server's active socket to error, which *should*
# result in the request being aborted, and request.close being
# called all the way up the stack (including WSGI middleware),
# eventually calling our on_end_request hook.
finally:
self.persistent = False
time.sleep(timeout * 2)
# Test that the on_end_request hook was called.
self.getPage('/demo/ended/9')
self.assertBody('True')
finally:
if old_timeout is not None:
httpserver.timeout = old_timeout
def testGuaranteedHooks(self):
# The 'critical' on_start_resource hook is 'failsafe' (guaranteed
# to run even if there are failures in other on_start methods).
# This is NOT true of the other hooks.
# Here, we have set up a failure in NumerifyTool.numerify_map,
# but our 'critical' hook should run and set the error to 502.
self.getPage('/demo/err_in_onstart')
self.assertErrorPage(502)
tmpl = "AttributeError: 'str' object has no attribute '{attr}'"
expected_msg = tmpl.format(attr='items')
self.assertInBody(expected_msg)
def testCombinedTools(self):
expectedResult = (ntou('Hello,world') +
europoundUnicode).encode('utf-8')
zbuf = io.BytesIO()
zfile = gzip.GzipFile(mode='wb', fileobj=zbuf, compresslevel=9)
zfile.write(expectedResult)
zfile.close()
self.getPage('/euro',
headers=[
('Accept-Encoding', 'gzip'),
('Accept-Charset', 'ISO-8859-1,utf-8;q=0.7,*;q=0.7')])
self.assertInBody(zbuf.getvalue()[:3])
zbuf = io.BytesIO()
zfile = gzip.GzipFile(mode='wb', fileobj=zbuf, compresslevel=6)
zfile.write(expectedResult)
zfile.close()
self.getPage('/decorated_euro', headers=[('Accept-Encoding', 'gzip')])
self.assertInBody(zbuf.getvalue()[:3])
# This returns a different value because gzip's priority was
# lowered in conf, allowing the rotator to run after gzip.
# Of course, we don't want breakage in production apps,
# but it proves the priority was changed.
self.getPage('/decorated_euro/subpath',
headers=[('Accept-Encoding', 'gzip')])
self.assertInBody(bytes([(x + 3) % 256 for x in zbuf.getvalue()]))
def testBareHooks(self):
content = 'bit of a pain in me gulliver'
self.getPage('/pipe',
headers=[('Content-Length', str(len(content))),
('Content-Type', 'text/plain')],
method='POST', body=content)
self.assertBody(content)
def testHandlerWrapperTool(self):
self.getPage('/tarfile')
self.assertBody('I am a tarfile')
def testToolWithConfig(self):
if not sys.version_info >= (2, 5):
return self.skip('skipped (Python 2.5+ only)')
self.getPage('/tooldecs/blah')
self.assertHeader('Content-Type', 'application/data')
def testWarnToolOn(self):
# get
try:
cherrypy.tools.numerify.on
except AttributeError:
pass
else:
raise AssertionError('Tool.on did not error as it should have.')
# set
try:
cherrypy.tools.numerify.on = True
except AttributeError:
pass
else:
raise AssertionError('Tool.on did not error as it should have.')
def testDecorator(self):
@cherrypy.tools.register('on_start_resource')
def example():
pass
self.assertTrue(isinstance(cherrypy.tools.example, cherrypy.Tool))
self.assertEqual(cherrypy.tools.example._point, 'on_start_resource')
@cherrypy.tools.register( # noqa: F811
'before_finalize', name='renamed', priority=60,
)
def example():
pass
self.assertTrue(isinstance(cherrypy.tools.renamed, cherrypy.Tool))
self.assertEqual(cherrypy.tools.renamed._point, 'before_finalize')
self.assertEqual(cherrypy.tools.renamed._name, 'renamed')
self.assertEqual(cherrypy.tools.renamed._priority, 60)
class SessionAuthTest(unittest.TestCase):
def test_login_screen_returns_bytes(self):
"""
login_screen must return bytes even if unicode parameters are passed.
Issue 1132 revealed that login_screen would return unicode if the
username and password were unicode.
"""
sa = cherrypy.lib.cptools.SessionAuth()
res = sa.login_screen(None, username=str('nobody'),
password=str('anypass'))
self.assertTrue(isinstance(res, bytes))
class TestHooks:
def test_priorities(self):
"""
Hooks should sort by priority order.
"""
Hook = cherrypy._cprequest.Hook
hooks = [
Hook(None, priority=48),
Hook(None),
Hook(None, priority=49),
]
hooks.sort()
by_priority = operator.attrgetter('priority')
priorities = list(map(by_priority, hooks))
assert priorities == [48, 49, 50]
| |
# Copyright 2016, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Provides function wrappers that implement page streaming and retrying."""
from __future__ import absolute_import, division, unicode_literals
from future import utils
from google import gax
from google.gax import bundling
from google.gax.utils import metrics
_MILLIS_PER_SECOND = 1000
def _bundleable(desc):
"""Creates a function that transforms an API call into a bundling call.
It transform a_func from an API call that receives the requests and returns
the response into a callable that receives the same request, and
returns a :class:`bundling.Event`.
The returned Event object can be used to obtain the eventual result of the
bundled call.
Args:
desc (gax.BundleDescriptor): describes the bundling that a_func
supports.
Returns:
Callable: takes the API call's request and keyword args and returns a
bundling.Event object.
"""
def inner(a_func, settings, request, **kwargs):
"""Schedules execution of a bundling task."""
if not settings.bundler:
return a_func(request, **kwargs)
the_id = bundling.compute_bundle_id(
request, desc.request_discriminator_fields)
return settings.bundler.schedule(a_func, the_id, desc, request, kwargs)
return inner
def _page_streamable(page_descriptor):
"""Creates a function that yields an iterable to performs page-streaming.
Args:
page_descriptor (:class:`PageDescriptor`): indicates the structure
of page streaming to be performed.
Returns:
Callable: A function that returns an iterator.
"""
def inner(a_func, settings, request, **kwargs):
"""Actual page-streaming based on the settings."""
page_iterator = gax.PageIterator(
a_func, page_descriptor, settings.page_token, request, **kwargs)
if settings.flatten_pages:
return gax.ResourceIterator(page_iterator)
else:
return page_iterator
return inner
def _construct_bundling(bundle_config, bundle_descriptor):
"""Helper for ``construct_settings()``.
Args:
bundle_config (dict): A dictionary specifying a bundle parameters, the
value for 'bundling' field in a method config (See
``construct_settings()`` for information on this config.)
bundle_descriptor (BundleDescriptor): A BundleDescriptor object
describing the structure of bundling for this method. If not set,
this method will not bundle.
Returns:
Tuple[bundling.Executor, BundleDescriptor]: A tuple that configures
bundling. The bundling.Executor may be None if this method should not
bundle.
"""
if bundle_config and bundle_descriptor:
bundler = bundling.Executor(gax.BundleOptions(
element_count_threshold=bundle_config.get(
'element_count_threshold', 0),
element_count_limit=bundle_config.get('element_count_limit', 0),
request_byte_threshold=bundle_config.get(
'request_byte_threshold', 0),
request_byte_limit=bundle_config.get('request_byte_limit', 0),
delay_threshold=bundle_config.get('delay_threshold_millis', 0)))
else:
bundler = None
return bundler
def _construct_retry(method_config, retry_codes, retry_params, retry_names):
"""Helper for ``construct_settings()``.
Args:
method_config (dict): A dictionary representing a single ``methods``
entry of the standard API client config file. (See
``construct_settings()`` for information on this yaml.)
retry_codes (dict): A dictionary parsed from the ``retry_codes`` entry
of the standard API client config file. (See ``construct_settings()``
for information on this yaml.)
retry_params (dict): A dictionary parsed from the ``retry_params`` entry
of the standard API client config file. (See ``construct_settings()``
for information on this yaml.)
retry_names (dict): A dictionary mapping the string names used in the
standard API client config file to API response status codes.
Returns:
Optional[RetryOptions]: The retry options, if applicable.
"""
if method_config is None:
return None
codes = None
if retry_codes and 'retry_codes_name' in method_config:
codes_name = method_config['retry_codes_name']
if codes_name in retry_codes and retry_codes[codes_name]:
codes = [retry_names[name] for name in retry_codes[codes_name]]
else:
codes = []
backoff_settings = None
if retry_params and 'retry_params_name' in method_config:
params_name = method_config['retry_params_name']
if params_name and params_name in retry_params:
backoff_settings = gax.BackoffSettings(**retry_params[params_name])
return gax.RetryOptions(
backoff_settings=backoff_settings,
retry_codes=codes,
)
def _merge_retry_options(retry_options, overrides):
"""Helper for ``construct_settings()``.
Takes two retry options, and merges them into a single RetryOption instance.
Args:
retry_options (RetryOptions): The base RetryOptions.
overrides (RetryOptions): The RetryOptions used for overriding ``retry``.
Use the values if it is not None. If entire ``overrides`` is None,
ignore the base retry and return None.
Returns:
RetryOptions: The merged options, or None if it will be canceled.
"""
if overrides is None:
return None
if overrides.retry_codes is None and overrides.backoff_settings is None:
return retry_options
codes = retry_options.retry_codes
if overrides.retry_codes is not None:
codes = overrides.retry_codes
backoff_settings = retry_options.backoff_settings
if overrides.backoff_settings is not None:
backoff_settings = overrides.backoff_settings
return gax.RetryOptions(
backoff_settings=backoff_settings,
retry_codes=codes,
)
def _upper_camel_to_lower_under(string):
if not string:
return ''
out = ''
out += string[0].lower()
for char in string[1:]:
if char.isupper():
out += '_' + char.lower()
else:
out += char
return out
def construct_settings(
service_name, client_config, config_override,
retry_names, bundle_descriptors=None, page_descriptors=None,
metrics_headers=(), kwargs=None):
"""Constructs a dictionary mapping method names to _CallSettings.
The ``client_config`` parameter is parsed from a client configuration JSON
file of the form:
.. code-block:: json
{
"interfaces": {
"google.fake.v1.ServiceName": {
"retry_codes": {
"idempotent": ["UNAVAILABLE", "DEADLINE_EXCEEDED"],
"non_idempotent": []
},
"retry_params": {
"default": {
"initial_retry_delay_millis": 100,
"retry_delay_multiplier": 1.2,
"max_retry_delay_millis": 1000,
"initial_rpc_timeout_millis": 2000,
"rpc_timeout_multiplier": 1.5,
"max_rpc_timeout_millis": 30000,
"total_timeout_millis": 45000
}
},
"methods": {
"CreateFoo": {
"retry_codes_name": "idempotent",
"retry_params_name": "default",
"timeout_millis": 30000
},
"Publish": {
"retry_codes_name": "non_idempotent",
"retry_params_name": "default",
"bundling": {
"element_count_threshold": 40,
"element_count_limit": 200,
"request_byte_threshold": 90000,
"request_byte_limit": 100000,
"delay_threshold_millis": 100
}
}
}
}
}
}
Args:
service_name (str): The fully-qualified name of this service, used as a
key into the client config file (in the example above, this value
would be ``google.fake.v1.ServiceName``).
client_config (dict): A dictionary parsed from the standard API client
config file.
bundle_descriptors (Mapping[str, BundleDescriptor]): A dictionary of
method names to BundleDescriptor objects for methods that are
bundling-enabled.
page_descriptors (Mapping[str, PageDescriptor]): A dictionary of method
names to PageDescriptor objects for methods that are page
streaming-enabled.
config_override (str): A dictionary in the same structure of
client_config to override the settings. Usually client_config is
supplied from the default config and config_override will be
specified by users.
retry_names (Mapping[str, object]): A dictionary mapping the strings
referring to response status codes to the Python objects representing
those codes.
metrics_headers (Mapping[str, str]): Dictionary of headers to be passed
for analytics. Sent as a dictionary; eventually becomes a
space-separated string (e.g. 'foo/1.0.0 bar/3.14.1').
kwargs (dict): The keyword arguments to be passed to the API calls.
Returns:
dict: A dictionary mapping method names to _CallSettings.
Raises:
KeyError: If the configuration for the service in question cannot be
located in the provided ``client_config``.
"""
# pylint: disable=too-many-locals
# pylint: disable=protected-access
defaults = {}
bundle_descriptors = bundle_descriptors or {}
page_descriptors = page_descriptors or {}
kwargs = kwargs or {}
# Sanity check: It is possible that we got this far but some headers
# were specified with an older library, which sends them as...
# kwargs={'metadata': [('x-goog-api-client', 'foo/1.0 bar/3.0')]}
#
# Note: This is the final format we will send down to GRPC shortly.
#
# Remove any x-goog-api-client header that may have been present
# in the metadata list.
if 'metadata' in kwargs:
kwargs['metadata'] = [value for value in kwargs['metadata']
if value[0].lower() != 'x-goog-api-client']
# Fill out the metrics headers with GAX and GRPC info, and convert
# to a string in the format that the GRPC layer expects.
kwargs.setdefault('metadata', [])
kwargs['metadata'].append(
('x-goog-api-client', metrics.stringify(metrics.fill(metrics_headers)))
)
try:
service_config = client_config['interfaces'][service_name]
except KeyError:
raise KeyError('Client configuration not found for service: {}'
.format(service_name))
overrides = config_override.get('interfaces', {}).get(service_name, {})
for method in service_config.get('methods'):
method_config = service_config['methods'][method]
overriding_method = overrides.get('methods', {}).get(method, {})
snake_name = _upper_camel_to_lower_under(method)
if overriding_method and overriding_method.get('timeout_millis'):
timeout = overriding_method['timeout_millis']
else:
timeout = method_config['timeout_millis']
timeout /= _MILLIS_PER_SECOND
bundle_descriptor = bundle_descriptors.get(snake_name)
bundling_config = method_config.get('bundling', None)
if overriding_method and 'bundling' in overriding_method:
bundling_config = overriding_method['bundling']
bundler = _construct_bundling(bundling_config, bundle_descriptor)
retry_options = _merge_retry_options(
_construct_retry(method_config, service_config['retry_codes'],
service_config['retry_params'], retry_names),
_construct_retry(overriding_method, overrides.get('retry_codes'),
overrides.get('retry_params'), retry_names))
defaults[snake_name] = gax._CallSettings(
timeout=timeout, retry=retry_options,
page_descriptor=page_descriptors.get(snake_name),
bundler=bundler, bundle_descriptor=bundle_descriptor,
kwargs=kwargs)
return defaults
def _catch_errors(a_func, to_catch):
"""Updates a_func to wrap exceptions with GaxError
Args:
a_func (callable): A callable.
to_catch (list[Exception]): Configures the exceptions to wrap.
Returns:
Callable: A function that will wrap certain exceptions with GaxError
"""
def inner(*args, **kwargs):
"""Wraps specified exceptions"""
try:
return a_func(*args, **kwargs)
# pylint: disable=catching-non-exception
except tuple(to_catch) as exception:
utils.raise_with_traceback(
gax.errors.create_error('RPC failed', cause=exception))
return inner
def _merge_options_metadata(options, settings):
"""Merge metadata list (add all missing tuples)"""
if not options:
return options
kwargs = options.kwargs
if kwargs == gax.OPTION_INHERIT or 'metadata' not in kwargs:
return options
kwarg_meta_dict = {}
merged_kwargs = options.kwargs.copy()
for kwarg_meta in merged_kwargs['metadata']:
kwarg_meta_dict[kwarg_meta[0].lower()] = kwarg_meta
for kwarg_meta in settings.kwargs['metadata']:
if kwarg_meta[0].lower() not in kwarg_meta_dict:
merged_kwargs['metadata'].append(kwarg_meta)
return gax.CallOptions(
timeout=options.timeout, retry=options.retry,
page_token=options.page_token,
is_bundling=options.is_bundling,
**merged_kwargs)
def create_api_call(func, settings):
"""Converts an rpc call into an API call governed by the settings.
In typical usage, ``func`` will be a callable used to make an rpc request.
This will mostly likely be a bound method from a request stub used to make
an rpc call.
The result is created by applying a series of function decorators defined
in this module to ``func``. ``settings`` is used to determine which
function decorators to apply.
The result is another callable which for most values of ``settings`` has
has the same signature as the original. Only when ``settings`` configures
bundling does the signature change.
Args:
func (Callable[Sequence[object], object]): is used to make a bare rpc
call.
settings (_CallSettings): provides the settings for this call
Returns:
Callable[Sequence[object], object]: a bound method on a request stub used
to make an rpc call
Raises:
ValueError: if ``settings`` has incompatible values, e.g, if bundling
and page_streaming are both configured
"""
def base_caller(api_call, _, *args):
"""Simply call api_call and ignore settings."""
return api_call(*args)
def inner(request, options=None):
"""Invoke with the actual settings."""
this_options = _merge_options_metadata(options, settings)
this_settings = settings.merge(this_options)
if this_settings.retry and this_settings.retry.retry_codes:
api_call = gax.retry.retryable(
func, this_settings.retry, **this_settings.kwargs)
else:
api_call = gax.retry.add_timeout_arg(
func, this_settings.timeout, **this_settings.kwargs)
api_call = _catch_errors(api_call, gax.config.API_ERRORS)
return api_caller(api_call, this_settings, request)
if settings.page_descriptor:
if settings.bundler and settings.bundle_descriptor:
raise ValueError('The API call has incompatible settings: '
'bundling and page streaming')
api_caller = _page_streamable(settings.page_descriptor)
elif settings.bundler and settings.bundle_descriptor:
api_caller = _bundleable(settings.bundle_descriptor)
else:
api_caller = base_caller
return inner
| |
# Author: Eric Larson <larson.eric.d@gmail.com>
#
# License: BSD-3-Clause
import os
import os.path as op
import numpy as np
from numpy.testing import (assert_allclose, assert_array_equal,
assert_array_less)
import matplotlib.pyplot as plt
import pytest
from mne import (read_dipole, read_forward_solution,
convert_forward_solution, read_evokeds, read_cov,
SourceEstimate, write_evokeds, fit_dipole,
transform_surface_to, make_sphere_model, pick_types,
pick_info, EvokedArray, read_source_spaces, make_ad_hoc_cov,
make_forward_solution, Dipole, DipoleFixed, Epochs,
make_fixed_length_events, Evoked, head_to_mni)
from mne.dipole import get_phantom_dipoles, _BDIP_ERROR_KEYS
from mne.simulation import simulate_evoked
from mne.datasets import testing
from mne.utils import requires_mne, run_subprocess, requires_nibabel
from mne.proj import make_eeg_average_ref_proj
from mne.io import read_raw_fif, read_raw_ctf
from mne.io.constants import FIFF
from mne.surface import _compute_nearest
from mne.bem import _bem_find_surface, read_bem_solution
from mne.transforms import apply_trans, _get_trans
data_path = testing.data_path(download=False)
meg_path = op.join(data_path, 'MEG', 'sample')
fname_dip_xfit_80 = op.join(meg_path, 'sample_audvis-ave_xfit.dip')
fname_raw = op.join(meg_path, 'sample_audvis_trunc_raw.fif')
fname_dip = op.join(meg_path, 'sample_audvis_trunc_set1.dip')
fname_bdip = op.join(meg_path, 'sample_audvis_trunc_set1.bdip')
fname_dip_xfit = op.join(meg_path, 'sample_audvis_trunc_xfit.dip')
fname_bdip_xfit = op.join(meg_path, 'sample_audvis_trunc_xfit.bdip')
fname_evo = op.join(meg_path, 'sample_audvis_trunc-ave.fif')
fname_evo_full = op.join(meg_path, 'sample_audvis-ave.fif')
fname_cov = op.join(meg_path, 'sample_audvis_trunc-cov.fif')
fname_trans = op.join(meg_path, 'sample_audvis_trunc-trans.fif')
fname_fwd = op.join(meg_path, 'sample_audvis_trunc-meg-eeg-oct-6-fwd.fif')
fname_bem = op.join(data_path, 'subjects', 'sample', 'bem',
'sample-1280-1280-1280-bem-sol.fif')
fname_src = op.join(data_path, 'subjects', 'sample', 'bem',
'sample-oct-2-src.fif')
fname_xfit_dip = op.join(data_path, 'dip', 'fixed_auto.fif')
fname_xfit_dip_txt = op.join(data_path, 'dip', 'fixed_auto.dip')
fname_xfit_seq_txt = op.join(data_path, 'dip', 'sequential.dip')
fname_ctf = op.join(data_path, 'CTF', 'testdata_ctf_short.ds')
subjects_dir = op.join(data_path, 'subjects')
def _compare_dipoles(orig, new):
"""Compare dipole results for equivalence."""
assert_allclose(orig.times, new.times, atol=1e-3, err_msg='times')
assert_allclose(orig.pos, new.pos, err_msg='pos')
assert_allclose(orig.amplitude, new.amplitude, err_msg='amplitude')
assert_allclose(orig.gof, new.gof, err_msg='gof')
assert_allclose(orig.ori, new.ori, rtol=1e-4, atol=1e-4, err_msg='ori')
assert orig.name == new.name
def _check_dipole(dip, n_dipoles):
"""Check dipole sizes."""
assert len(dip) == n_dipoles
assert dip.pos.shape == (n_dipoles, 3)
assert dip.ori.shape == (n_dipoles, 3)
assert dip.gof.shape == (n_dipoles,)
assert dip.amplitude.shape == (n_dipoles,)
@testing.requires_testing_data
def test_io_dipoles(tmp_path):
"""Test IO for .dip files."""
dipole = read_dipole(fname_dip)
assert 'Dipole ' in repr(dipole) # test repr
out_fname = op.join(str(tmp_path), 'temp.dip')
dipole.save(out_fname)
dipole_new = read_dipole(out_fname)
_compare_dipoles(dipole, dipole_new)
@testing.requires_testing_data
def test_dipole_fitting_ctf():
"""Test dipole fitting with CTF data."""
raw_ctf = read_raw_ctf(fname_ctf).set_eeg_reference(projection=True)
events = make_fixed_length_events(raw_ctf, 1)
evoked = Epochs(raw_ctf, events, 1, 0, 0, baseline=None).average()
cov = make_ad_hoc_cov(evoked.info)
sphere = make_sphere_model((0., 0., 0.))
# XXX Eventually we should do some better checks about accuracy, but
# for now our CTF phantom fitting tutorials will have to do
# (otherwise we need to add that to the testing dataset, which is
# a bit too big)
fit_dipole(evoked, cov, sphere, rank=dict(meg=len(evoked.data)),
tol=1e-3, accuracy='accurate')
@pytest.mark.slowtest
@testing.requires_testing_data
@requires_nibabel()
@requires_mne
def test_dipole_fitting(tmp_path):
"""Test dipole fitting."""
amp = 100e-9
tempdir = str(tmp_path)
rng = np.random.RandomState(0)
fname_dtemp = op.join(tempdir, 'test.dip')
fname_sim = op.join(tempdir, 'test-ave.fif')
fwd = convert_forward_solution(read_forward_solution(fname_fwd),
surf_ori=False, force_fixed=True,
use_cps=True)
evoked = read_evokeds(fname_evo)[0]
cov = read_cov(fname_cov)
n_per_hemi = 5
vertices = [np.sort(rng.permutation(s['vertno'])[:n_per_hemi])
for s in fwd['src']]
nv = sum(len(v) for v in vertices)
stc = SourceEstimate(amp * np.eye(nv), vertices, 0, 0.001)
evoked = simulate_evoked(fwd, stc, evoked.info, cov, nave=evoked.nave,
random_state=rng)
# For speed, let's use a subset of channels (strange but works)
picks = np.sort(np.concatenate([
pick_types(evoked.info, meg=True, eeg=False)[::2],
pick_types(evoked.info, meg=False, eeg=True)[::2]]))
evoked.pick_channels([evoked.ch_names[p] for p in picks])
evoked.add_proj(make_eeg_average_ref_proj(evoked.info))
write_evokeds(fname_sim, evoked)
# Run MNE-C version
run_subprocess([
'mne_dipole_fit', '--meas', fname_sim, '--meg', '--eeg',
'--noise', fname_cov, '--dip', fname_dtemp,
'--mri', fname_fwd, '--reg', '0', '--tmin', '0',
])
dip_c = read_dipole(fname_dtemp)
# Run mne-python version
sphere = make_sphere_model(head_radius=0.1)
with pytest.warns(RuntimeWarning, match='projection'):
dip, residual = fit_dipole(evoked, cov, sphere, fname_fwd,
rank='info') # just to test rank support
assert isinstance(residual, Evoked)
# Test conversion of dip.pos to MNI coordinates.
dip_mni_pos = dip.to_mni('sample', fname_trans,
subjects_dir=subjects_dir)
head_to_mni_dip_pos = head_to_mni(dip.pos, 'sample', fwd['mri_head_t'],
subjects_dir=subjects_dir)
assert_allclose(dip_mni_pos, head_to_mni_dip_pos, rtol=1e-3, atol=0)
# Test finding label for dip.pos in an aseg, also tests `to_mri`
target_labels = ['Left-Cerebral-Cortex', 'Unknown', 'Left-Cerebral-Cortex',
'Right-Cerebral-Cortex', 'Left-Cerebral-Cortex',
'Unknown', 'Unknown', 'Unknown',
'Right-Cerebral-White-Matter', 'Right-Cerebral-Cortex']
labels = dip.to_volume_labels(fname_trans, subject='fsaverage',
aseg="aseg", subjects_dir=subjects_dir)
assert labels == target_labels
# Sanity check: do our residuals have less power than orig data?
data_rms = np.sqrt(np.sum(evoked.data ** 2, axis=0))
resi_rms = np.sqrt(np.sum(residual.data ** 2, axis=0))
assert (data_rms > resi_rms * 0.95).all(), \
'%s (factor: %s)' % ((data_rms / resi_rms).min(), 0.95)
# Compare to original points
transform_surface_to(fwd['src'][0], 'head', fwd['mri_head_t'])
transform_surface_to(fwd['src'][1], 'head', fwd['mri_head_t'])
assert fwd['src'][0]['coord_frame'] == FIFF.FIFFV_COORD_HEAD
src_rr = np.concatenate([s['rr'][v] for s, v in zip(fwd['src'], vertices)],
axis=0)
src_nn = np.concatenate([s['nn'][v] for s, v in zip(fwd['src'], vertices)],
axis=0)
# MNE-C skips the last "time" point :(
out = dip.crop(dip_c.times[0], dip_c.times[-1])
assert (dip is out)
src_rr, src_nn = src_rr[:-1], src_nn[:-1]
# check that we did about as well
corrs, dists, gc_dists, amp_errs, gofs = [], [], [], [], []
for d in (dip_c, dip):
new = d.pos
diffs = new - src_rr
corrs += [np.corrcoef(src_rr.ravel(), new.ravel())[0, 1]]
dists += [np.sqrt(np.mean(np.sum(diffs * diffs, axis=1)))]
gc_dists += [180 / np.pi * np.mean(np.arccos(np.sum(src_nn * d.ori,
axis=1)))]
amp_errs += [np.sqrt(np.mean((amp - d.amplitude) ** 2))]
gofs += [np.mean(d.gof)]
# XXX possibly some OpenBLAS numerical differences make
# things slightly worse for us
factor = 0.7
assert dists[0] / factor >= dists[1], 'dists: %s' % dists
assert corrs[0] * factor <= corrs[1], 'corrs: %s' % corrs
assert gc_dists[0] / factor >= gc_dists[1] * 0.8, \
'gc-dists (ori): %s' % gc_dists
assert amp_errs[0] / factor >= amp_errs[1],\
'amplitude errors: %s' % amp_errs
# This one is weird because our cov/sim/picking is weird
assert gofs[0] * factor <= gofs[1] * 2, 'gof: %s' % gofs
@testing.requires_testing_data
def test_dipole_fitting_fixed(tmp_path):
"""Test dipole fitting with a fixed position."""
tpeak = 0.073
sphere = make_sphere_model(head_radius=0.1)
evoked = read_evokeds(fname_evo, baseline=(None, 0))[0]
evoked.pick_types(meg=True)
t_idx = np.argmin(np.abs(tpeak - evoked.times))
evoked_crop = evoked.copy().crop(tpeak, tpeak)
assert len(evoked_crop.times) == 1
cov = read_cov(fname_cov)
dip_seq, resid = fit_dipole(evoked_crop, cov, sphere)
assert isinstance(dip_seq, Dipole)
assert isinstance(resid, Evoked)
assert len(dip_seq.times) == 1
pos, ori, gof = dip_seq.pos[0], dip_seq.ori[0], dip_seq.gof[0]
amp = dip_seq.amplitude[0]
# Fix position, allow orientation to change
dip_free, resid_free = fit_dipole(evoked, cov, sphere, pos=pos)
assert isinstance(dip_free, Dipole)
assert isinstance(resid_free, Evoked)
assert_allclose(dip_free.times, evoked.times)
assert_allclose(np.tile(pos[np.newaxis], (len(evoked.times), 1)),
dip_free.pos)
assert_allclose(ori, dip_free.ori[t_idx]) # should find same ori
assert (np.dot(dip_free.ori, ori).mean() < 0.9) # but few the same
assert_allclose(gof, dip_free.gof[t_idx]) # ... same gof
assert_allclose(amp, dip_free.amplitude[t_idx]) # and same amp
assert_allclose(resid.data, resid_free.data[:, [t_idx]])
# Fix position and orientation
dip_fixed, resid_fixed = fit_dipole(evoked, cov, sphere, pos=pos, ori=ori)
assert (isinstance(dip_fixed, DipoleFixed))
assert_allclose(dip_fixed.times, evoked.times)
assert_allclose(dip_fixed.info['chs'][0]['loc'][:3], pos)
assert_allclose(dip_fixed.info['chs'][0]['loc'][3:6], ori)
assert_allclose(dip_fixed.data[1, t_idx], gof)
assert_allclose(resid.data, resid_fixed.data[:, [t_idx]])
_check_roundtrip_fixed(dip_fixed, tmp_path)
# bad resetting
evoked.info['bads'] = [evoked.ch_names[3]]
dip_fixed, resid_fixed = fit_dipole(evoked, cov, sphere, pos=pos, ori=ori)
# Degenerate conditions
evoked_nan = evoked.copy().crop(0, 0)
evoked_nan.data[0, 0] = None
pytest.raises(ValueError, fit_dipole, evoked_nan, cov, sphere)
pytest.raises(ValueError, fit_dipole, evoked, cov, sphere, ori=[1, 0, 0])
pytest.raises(ValueError, fit_dipole, evoked, cov, sphere, pos=[0, 0, 0],
ori=[2, 0, 0])
pytest.raises(ValueError, fit_dipole, evoked, cov, sphere, pos=[0.1, 0, 0])
# copying
dip_fixed_2 = dip_fixed.copy()
dip_fixed_2.data[:] = 0.
assert not np.isclose(dip_fixed.data, 0., atol=1e-20).any()
# plotting
plt.close('all')
dip_fixed.plot()
plt.close('all')
orig_times = np.array(dip_fixed.times)
shift_times = dip_fixed.shift_time(1.).times
assert_allclose(shift_times, orig_times + 1)
@testing.requires_testing_data
def test_len_index_dipoles():
"""Test len and indexing of Dipole objects."""
dipole = read_dipole(fname_dip)
d0 = dipole[0]
d1 = dipole[:1]
_check_dipole(d0, 1)
_check_dipole(d1, 1)
_compare_dipoles(d0, d1)
mask = dipole.gof > 15
idx = np.where(mask)[0]
d_mask = dipole[mask]
_check_dipole(d_mask, 4)
_compare_dipoles(d_mask, dipole[idx])
@pytest.mark.slowtest # slow-ish on Travis OSX
@testing.requires_testing_data
def test_min_distance_fit_dipole():
"""Test dipole min_dist to inner_skull."""
subject = 'sample'
raw = read_raw_fif(fname_raw, preload=True)
# select eeg data
picks = pick_types(raw.info, meg=False, eeg=True, exclude='bads')
info = pick_info(raw.info, picks)
# Let's use cov = Identity
cov = read_cov(fname_cov)
cov['data'] = np.eye(cov['data'].shape[0])
# Simulated scal map
simulated_scalp_map = np.zeros(picks.shape[0])
simulated_scalp_map[27:34] = 1
simulated_scalp_map = simulated_scalp_map[:, None]
evoked = EvokedArray(simulated_scalp_map, info, tmin=0)
min_dist = 5. # distance in mm
bem = read_bem_solution(fname_bem)
dip, residual = fit_dipole(evoked, cov, bem, fname_trans,
min_dist=min_dist, tol=1e-4)
assert isinstance(residual, Evoked)
dist = _compute_depth(dip, fname_bem, fname_trans, subject, subjects_dir)
# Constraints are not exact, so bump the minimum slightly
assert (min_dist - 0.1 < (dist[0] * 1000.) < (min_dist + 1.))
with pytest.raises(ValueError, match='min_dist should be positive'):
fit_dipole(evoked, cov, fname_bem, fname_trans, -1.)
def _compute_depth(dip, fname_bem, fname_trans, subject, subjects_dir):
"""Compute dipole depth."""
trans = _get_trans(fname_trans)[0]
bem = read_bem_solution(fname_bem)
surf = _bem_find_surface(bem, 'inner_skull')
points = surf['rr']
points = apply_trans(trans['trans'], points)
depth = _compute_nearest(points, dip.pos, return_dists=True)[1][0]
return np.ravel(depth)
@testing.requires_testing_data
def test_accuracy():
"""Test dipole fitting to sub-mm accuracy."""
evoked = read_evokeds(fname_evo)[0].crop(0., 0.,)
evoked.pick_types(meg=True, eeg=False)
evoked.pick_channels([c for c in evoked.ch_names[::4]])
for rad, perc_90 in zip((0.09, None), (0.002, 0.004)):
bem = make_sphere_model('auto', rad, evoked.info,
relative_radii=(0.999, 0.998, 0.997, 0.995))
src = read_source_spaces(fname_src)
fwd = make_forward_solution(evoked.info, None, src, bem)
fwd = convert_forward_solution(fwd, force_fixed=True, use_cps=True)
vertices = [src[0]['vertno'], src[1]['vertno']]
n_vertices = sum(len(v) for v in vertices)
amp = 10e-9
data = np.eye(n_vertices + 1)[:n_vertices]
data[-1, -1] = 1.
data *= amp
stc = SourceEstimate(data, vertices, 0., 1e-3, 'sample')
evoked.info.normalize_proj()
sim = simulate_evoked(fwd, stc, evoked.info, cov=None, nave=np.inf)
cov = make_ad_hoc_cov(evoked.info)
dip = fit_dipole(sim, cov, bem, min_dist=0.001)[0]
ds = []
for vi in range(n_vertices):
if vi < len(vertices[0]):
hi = 0
vertno = vi
else:
hi = 1
vertno = vi - len(vertices[0])
vertno = src[hi]['vertno'][vertno]
rr = src[hi]['rr'][vertno]
d = np.sqrt(np.sum((rr - dip.pos[vi]) ** 2))
ds.append(d)
# make sure that our median is sub-mm and the large majority are very
# close (we expect some to be off by a bit e.g. because they are
# radial)
assert_array_less(np.percentile(ds, [50, 90]), [0.0005, perc_90])
@testing.requires_testing_data
def test_dipole_fixed(tmp_path):
"""Test reading a fixed-position dipole (from Xfit)."""
dip = read_dipole(fname_xfit_dip)
# print the representation of the object DipoleFixed
assert 'DipoleFixed ' in repr(dip)
_check_roundtrip_fixed(dip, tmp_path)
with pytest.warns(RuntimeWarning, match='extra fields'):
dip_txt = read_dipole(fname_xfit_dip_txt)
assert_allclose(dip.info['chs'][0]['loc'][:3], dip_txt.pos[0])
assert_allclose(dip_txt.amplitude[0], 12.1e-9)
with pytest.warns(RuntimeWarning, match='extra fields'):
dip_txt_seq = read_dipole(fname_xfit_seq_txt)
assert_allclose(dip_txt_seq.gof, [27.3, 46.4, 43.7, 41., 37.3, 32.5])
def _check_roundtrip_fixed(dip, tmp_path):
"""Check roundtrip IO for fixed dipoles."""
tempdir = str(tmp_path)
dip.save(op.join(tempdir, 'test-dip.fif.gz'))
dip_read = read_dipole(op.join(tempdir, 'test-dip.fif.gz'))
assert_allclose(dip_read.data, dip_read.data)
assert_allclose(dip_read.times, dip.times, atol=1e-8)
assert dip_read.info['xplotter_layout'] == dip.info['xplotter_layout']
assert dip_read.ch_names == dip.ch_names
for ch_1, ch_2 in zip(dip_read.info['chs'], dip.info['chs']):
assert ch_1['ch_name'] == ch_2['ch_name']
for key in ('loc', 'kind', 'unit_mul', 'range', 'coord_frame', 'unit',
'cal', 'coil_type', 'scanno', 'logno'):
assert_allclose(ch_1[key], ch_2[key], err_msg=key)
def test_get_phantom_dipoles():
"""Test getting phantom dipole locations."""
pytest.raises(ValueError, get_phantom_dipoles, 0)
pytest.raises(ValueError, get_phantom_dipoles, 'foo')
for kind in ('vectorview', 'otaniemi'):
pos, ori = get_phantom_dipoles(kind)
assert pos.shape == (32, 3)
assert ori.shape == (32, 3)
@testing.requires_testing_data
def test_confidence(tmp_path):
"""Test confidence limits."""
evoked = read_evokeds(fname_evo_full, 'Left Auditory', baseline=(None, 0))
evoked.crop(0.08, 0.08).pick_types(meg=True) # MEG-only
cov = make_ad_hoc_cov(evoked.info)
sphere = make_sphere_model((0., 0., 0.04), 0.08)
dip_py = fit_dipole(evoked, cov, sphere)[0]
fname_test = op.join(str(tmp_path), 'temp-dip.txt')
dip_py.save(fname_test)
dip_read = read_dipole(fname_test)
with pytest.warns(RuntimeWarning, match="'noise/ft/cm', 'prob'"):
dip_xfit = read_dipole(fname_dip_xfit_80)
for dip_check in (dip_py, dip_read):
assert_allclose(dip_check.pos, dip_xfit.pos, atol=5e-4) # < 0.5 mm
assert_allclose(dip_check.gof, dip_xfit.gof, atol=5e-1) # < 0.5%
assert_array_equal(dip_check.nfree, dip_xfit.nfree) # exact match
assert_allclose(dip_check.khi2, dip_xfit.khi2, rtol=2e-2) # 2% miss
assert set(dip_check.conf.keys()) == set(dip_xfit.conf.keys())
for key in sorted(dip_check.conf.keys()):
assert_allclose(dip_check.conf[key], dip_xfit.conf[key],
rtol=1.5e-1, err_msg=key)
# bdip created with:
# mne_dipole_fit --meas sample_audvis_trunc-ave.fif --set 1 --meg --tmin 40 --tmax 95 --bmin -200 --bmax 0 --noise sample_audvis_trunc-cov.fif --bem ../../subjects/sample/bem/sample-1280-1280-1280-bem-sol.fif --origin 0\:0\:40 --mri sample_audvis_trunc-trans.fif --bdip sample_audvis_trunc_set1.bdip # noqa: E501
# It gives equivalent results to .dip in non-dipole mode.
# xfit bdip created by taking sample_audvis_trunc-ave.fif, picking MEG
# channels, writitng to disk (with MNE), then running xfit on 40-95 ms
# with a 3.3 ms step
@testing.requires_testing_data
@pytest.mark.parametrize('fname_dip_, fname_bdip_', [
(fname_dip, fname_bdip),
(fname_dip_xfit, fname_bdip_xfit),
])
def test_bdip(fname_dip_, fname_bdip_, tmp_path):
"""Test bdip I/O."""
# use text as veridical
with pytest.warns(None): # ignored fields
dip = read_dipole(fname_dip_)
# read binary
orig_size = os.stat(fname_bdip_).st_size
bdip = read_dipole(fname_bdip_)
# test round-trip by writing and reading, too
fname = tmp_path / 'test.bdip'
bdip.save(fname)
bdip_read = read_dipole(fname)
write_size = os.stat(str(fname)).st_size
assert orig_size == write_size
assert len(dip) == len(bdip) == len(bdip_read) == 17
dip_has_conf = fname_dip_ == fname_dip_xfit
for kind, this_bdip in (('orig', bdip), ('read', bdip_read)):
for key, atol in (
('pos', 5e-5),
('ori', 5e-3),
('gof', 0.5e-1),
('times', 5e-5),
('khi2', 1e-2)):
d = getattr(dip, key)
b = getattr(this_bdip, key)
if key == 'khi2' and dip_has_conf:
if d is not None:
assert_allclose(d, b, atol=atol,
err_msg='%s: %s' % (kind, key))
else:
assert b is None
if dip_has_conf:
# conf
conf_keys = _BDIP_ERROR_KEYS + ('vol',)
assert (set(this_bdip.conf.keys()) ==
set(dip.conf.keys()) ==
set(conf_keys))
for key in conf_keys:
d = dip.conf[key]
b = this_bdip.conf[key]
assert_allclose(d, b, rtol=0.12, # no so great, text I/O
err_msg='%s: %s' % (kind, key))
# Not stored
assert this_bdip.name is None
assert this_bdip.nfree is None
# Test whether indexing works
this_bdip0 = this_bdip[0]
_check_dipole(this_bdip0, 1)
| |
#!/usr/bin/python
import argparse
import sys
import os
import time
import traceback
import sys
import ctypes
import subprocess
from subprocess import Popen, PIPE
import os
from optparse import OptionParser
from biokbase.workspace.client import Workspace
import MySQLdb as mdb
desc1 = '''
NAME
gwas_GeneList2Networks -- build Networks object from GeneList
SYNOPSIS
gwas_GeneList2Networks -u workspace_url -w workspace_id -i input_object_ID -o output_object_ID -p password
'''
desc2 = '''
DESCRIPTION
To speed up network building, this script skip Networks API and directly build Networks typed object (NTO).
All the data is feteched from KBase workspace and the constructed network output will be stored back to workspace.
'''
desc3 = '''
EXAMPLES
build internal networks
SEE ALSO
net-build-internal-networks
AUTHORS
'''
class Node:
nodes = []
edges = []
ugids = {}
igids = {}
gid2nt = {}
clst2genes = {}
def __init__(self, unodes = [], uedges=[]):
self._register_nodes(unodes)
self._register_edges(uedges)
def get_node_id(self, node, nt = "GENE"):
if not node in self.ugids.keys() :
#print node + ":" + nt
self.ugids[node] = len(self.ugids)
self.nodes.append( {
'entity_id' : node,
'name' : node,
'user_annotations' : {},
'type' : nt,
'id' : 'kb|netnode.' + `self.ugids[node]`,
'properties' : {}
} )
self.igids['kb|netnode.' + `self.ugids[node]`] = node
self.gid2nt[node] = nt
return "kb|netnode." + `self.ugids[node]`
def get_node_id(self, node, eid, nt = "GENE"):
if not node in self.ugids.keys() :
#print node + ":" + nt
self.ugids[node] = len(self.ugids)
self.nodes.append( {
'entity_id' : node,
'name' : eid,
'user_annotations' : {},
'type' : nt,
'id' : 'kb|netnode.' + `self.ugids[node]`,
'properties' : {}
} )
self.igids['kb|netnode.' + `self.ugids[node]`] = node
self.gid2nt[node] = nt
return "kb|netnode." + `self.ugids[node]`
def add_edge(self, strength, ds_id, node1, nt1, node2, nt2, confidence):
#print node1 + "<->" + node2
self.edges.append( {
'name' : 'interacting gene pair',
'properties' : {},
'strength' : float(strength),
'dataset_id' : ds_id,
'directed' : 'false',
'user_annotations' : {},
'id' : 'kb|netedge.'+`len(self.edges)`,
'node_id1' : self.get_node_id(node1, nt1),
'node_id2' : self.get_node_id(node2, nt2),
'confidence' : float(confidence)
})
if(nt1 == 'CLUSTER'):
if not node1 in self.clstr2genes.keys() : self.clst2genes[node1] = {}
if(nt2 == 'GENE'):
self.clst2gene[node1][node2] = 1
else:
if(nt2 == 'CLUSTER'):
if not node2 in self.clst2genes.keys() : self.clst2genes[node2] = {}
self.clst2genes[node2][node1] = 1
def add_edge(self, strength, ds_id, node1, nt1, node2, nt2, confidence, eid1, eid2):
#print node1 + "<->" + node2
self.edges.append( {
'name' : 'interacting gene pair',
'properties' : {},
'strength' : float(strength),
'dataset_id' : ds_id,
'directed' : 'false',
'user_annotations' : {},
'id' : 'kb|netedge.'+`len(self.edges)`,
'node_id1' : self.get_node_id(node1, eid1, nt1),
'node_id2' : self.get_node_id(node2, eid2, nt2),
'confidence' : float(confidence)
})
if(nt1 == 'CLUSTER'):
if not node1 in self.clstr2genes.keys() : self.clst2genes[node1] = {}
if(nt2 == 'GENE'):
self.clst2gene[node1][node2] = 1
else:
if(nt2 == 'CLUSTER'):
if not node2 in self.clst2genes.keys() : self.clst2genes[node2] = {}
self.clst2genes[node2][node1] = 1
def _register_nodes(self, unodes):
self.nodes = unodes
self.ugids = {}
for node in self.nodes:
nnid = node['id']
nnid = nnid.replace("kb|netnode.","");
self.ugids[node['entity_id']] = nnid
self.igids[node['id']] = node['entity_id']
self.gid2nt[node['entity_id']] = node['type']
def _register_edges(self, uedges):
self.edges = uedges
for edge in self.edges:
node1 = self.igids[edge['node_id1']];
nt1 = self.gid2nt[node1];
node2 = self.igids[edge['node_id2']];
nt2 = self.gid2nt[node2];
if(nt1 == 'CLUSTER'):
if not node1 in self.clstr2genes.keys() : self.clst2genes[node1] = {}
if(nt2 == 'GENE'):
self.clst2genes[node1][node2] = 1
else:
if(nt2 == 'CLUSTER'):
if not node2 in self.clst2genes.keys() : self.clst2genes[node2] = {}
self.clst2genes[node2][node1] = 1
def get_gene_list(self, cnode):
if(cnode in self.clst2genes.keys()) : return self.clst2genes[cnode].keys()
return []
def gl2networks (args) :
###
# download ws object and convert them to csv
wsd = Workspace(url=args.ws_url, token=os.environ.get('KB_AUTH_TOKEN'))
#raw_data = wsd.get_object({'id' : args.inobj_id,
# 'workspace' : args.ws_id})['data']
gl = args.inobj_id.split(',')#[ gr[2] for gr in raw_data['genes']]
gl_str = "'" + "','".join(gl)+ "'"
sql = "SELECT DISTINCT af1.to_link, af2.to_link, f1.source_id, f2.source_id, af1.strength, ig.from_link FROM IsGroupingOf ig, AssociationFeature af1, AssociationFeature af2, Feature f1, Feature f2 WHERE ig.to_link = af1.from_link and af1.from_link = af2.from_link and (af1.to_link IN ({}) AND af2.to_link IN ({}) ) AND af1.to_link < af2.to_link AND f1.id = af1.to_link AND f2.id = af2.to_link".format(gl_str, gl_str)
nc = Node()
datasets = [];
try:
con = mdb.connect(args.db_host, args.db_user, args.db_pass, args.db_name);
cur = con.cursor()
cur.execute(sql)
edge = cur.fetchone()
dsid = set()
while( edge is not None):
nc.add_edge(edge[4], edge[5], edge[0], 'GENE', edge[1], 'GENE', 0.0, edge[2], edge[3]);
dsid.add(edge[5]);
edge = cur.fetchone()
ds_str = "'" + "','".join(dsid)+ "'"
cur.execute("SELECT id, association_type, data_source, description , df.to_link, sr.from_link FROM AssociationDataset, IsDatasetFor df, IsSourceForAssociationDataset sr WHERE id = df.from_link and id = sr.to_link and id IN({})".format(ds_str))
ds = cur.fetchone()
while( ds is not None):
datasets.append ( {
'network_type' : ds[1],
'taxons' : [ ds[4] ],
'source_ref' : ds[5],
'name' : ds[0],
'id' : ds[0],
'description' : ds[3],
'properties' : {
}
})
ds = cur.fetchone()
# generate Networks object
net_object = {
'datasets' : datasets,
'nodes' : nc.nodes,
'edges' : nc.edges,
'user_annotations' : {"genes" :",".join(gl) },
'name' : 'GeneList Internal Network',
'id' : args.outobj_id,
'properties' : {
'graphType' : 'edu.uci.ics.jung.graph.SparseMultigraph'
}
}
# Store results object into workspace
wsd.save_objects({'workspace' : args.ws_id, 'objects' : [{'type' : 'KBaseNetworks.Network', 'data' : net_object, 'name' : args.outobj_id, 'meta' : {'org_gene_list' : args.inobj_id}}]})
except mdb.Error, e:
print "Error %d: %s" % (e.args[0],e.args[1])
sys.exit(1)
finally:
if con:
con.close()
if __name__ == "__main__":
# Parse options.
parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter, prog='gwas_GeneList2Networks', epilog=desc3)
parser.add_argument('-u', '--ws_url', help='Workspace url', action='store', dest='ws_url', default='https://kbase.us/services/ws')
parser.add_argument('-w', '--ws_id', help='Workspace id', action='store', dest='ws_id', default=None, required=True)
parser.add_argument('-i', '--in_ids', help='input gene list(comma separated)', action='store', dest='inobj_id', default=None, required=True)
parser.add_argument('-o', '--out_id', help='Output Network object id', action='store', dest='outobj_id', default=None, required=True)
parser.add_argument('-d', '--db_host', help='DB Host', action='store', dest='db_host', default='db4.chicago.kbase.us', required=False)
parser.add_argument('-s', '--db_user', help='DB User', action='store', dest='db_user', default='kbase_sapselect', required=False)
parser.add_argument('-p', '--db_password', help='DB User', action='store', dest='db_pass', default=None, required=True)
parser.add_argument('-n', '--db_name', help='DB Name', action='store', dest='db_name', default='kbase_sapling_v4', required=False)
usage = parser.format_usage()
parser.description = desc1 + ' ' + usage + desc2
parser.usage = argparse.SUPPRESS
args = parser.parse_args()
# main loop
gl2networks(args)
exit(0);
| |
from __future__ import unicode_literals
import re
import json
from .common import InfoExtractor
from .gigya import GigyaBaseIE
from ..compat import compat_HTTPError
from ..utils import (
extract_attributes,
ExtractorError,
strip_or_none,
float_or_none,
int_or_none,
merge_dicts,
str_or_none,
url_or_none,
)
class CanvasIE(InfoExtractor):
_VALID_URL = r'https?://mediazone\.vrt\.be/api/v1/(?P<site_id>canvas|een|ketnet|vrt(?:video|nieuws)|sporza)/assets/(?P<id>[^/?#&]+)'
_TESTS = [{
'url': 'https://mediazone.vrt.be/api/v1/ketnet/assets/md-ast-4ac54990-ce66-4d00-a8ca-9eac86f4c475',
'md5': '68993eda72ef62386a15ea2cf3c93107',
'info_dict': {
'id': 'md-ast-4ac54990-ce66-4d00-a8ca-9eac86f4c475',
'display_id': 'md-ast-4ac54990-ce66-4d00-a8ca-9eac86f4c475',
'ext': 'mp4',
'title': 'Nachtwacht: De Greystook',
'description': 'Nachtwacht: De Greystook',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 1468.04,
},
'expected_warnings': ['is not a supported codec', 'Unknown MIME type'],
}, {
'url': 'https://mediazone.vrt.be/api/v1/canvas/assets/mz-ast-5e5f90b6-2d72-4c40-82c2-e134f884e93e',
'only_matching': True,
}]
_GEO_BYPASS = False
_HLS_ENTRY_PROTOCOLS_MAP = {
'HLS': 'm3u8_native',
'HLS_AES': 'm3u8',
}
_REST_API_BASE = 'https://media-services-public.vrt.be/vualto-video-aggregator-web/rest/external/v1'
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
site_id, video_id = mobj.group('site_id'), mobj.group('id')
data = None
if site_id != 'vrtvideo':
# Old API endpoint, serves more formats but may fail for some videos
data = self._download_json(
'https://mediazone.vrt.be/api/v1/%s/assets/%s'
% (site_id, video_id), video_id, 'Downloading asset JSON',
'Unable to download asset JSON', fatal=False)
# New API endpoint
if not data:
headers = self.geo_verification_headers()
headers.update({'Content-Type': 'application/json'})
token = self._download_json(
'%s/tokens' % self._REST_API_BASE, video_id,
'Downloading token', data=b'', headers=headers)['vrtPlayerToken']
data = self._download_json(
'%s/videos/%s' % (self._REST_API_BASE, video_id),
video_id, 'Downloading video JSON', query={
'vrtPlayerToken': token,
'client': '%s@PROD' % site_id,
}, expected_status=400)
if not data.get('title'):
code = data.get('code')
if code == 'AUTHENTICATION_REQUIRED':
self.raise_login_required()
elif code == 'INVALID_LOCATION':
self.raise_geo_restricted(countries=['BE'])
raise ExtractorError(data.get('message') or code, expected=True)
title = data['title']
description = data.get('description')
formats = []
for target in data['targetUrls']:
format_url, format_type = url_or_none(target.get('url')), str_or_none(target.get('type'))
if not format_url or not format_type:
continue
format_type = format_type.upper()
if format_type in self._HLS_ENTRY_PROTOCOLS_MAP:
formats.extend(self._extract_m3u8_formats(
format_url, video_id, 'mp4', self._HLS_ENTRY_PROTOCOLS_MAP[format_type],
m3u8_id=format_type, fatal=False))
elif format_type == 'HDS':
formats.extend(self._extract_f4m_formats(
format_url, video_id, f4m_id=format_type, fatal=False))
elif format_type == 'MPEG_DASH':
formats.extend(self._extract_mpd_formats(
format_url, video_id, mpd_id=format_type, fatal=False))
elif format_type == 'HSS':
formats.extend(self._extract_ism_formats(
format_url, video_id, ism_id='mss', fatal=False))
else:
formats.append({
'format_id': format_type,
'url': format_url,
})
self._sort_formats(formats)
subtitles = {}
subtitle_urls = data.get('subtitleUrls')
if isinstance(subtitle_urls, list):
for subtitle in subtitle_urls:
subtitle_url = subtitle.get('url')
if subtitle_url and subtitle.get('type') == 'CLOSED':
subtitles.setdefault('nl', []).append({'url': subtitle_url})
return {
'id': video_id,
'display_id': video_id,
'title': title,
'description': description,
'formats': formats,
'duration': float_or_none(data.get('duration'), 1000),
'thumbnail': data.get('posterImageUrl'),
'subtitles': subtitles,
}
class CanvasEenIE(InfoExtractor):
IE_DESC = 'canvas.be and een.be'
_VALID_URL = r'https?://(?:www\.)?(?P<site_id>canvas|een)\.be/(?:[^/]+/)*(?P<id>[^/?#&]+)'
_TESTS = [{
'url': 'http://www.canvas.be/video/de-afspraak/najaar-2015/de-afspraak-veilt-voor-de-warmste-week',
'md5': 'ed66976748d12350b118455979cca293',
'info_dict': {
'id': 'mz-ast-5e5f90b6-2d72-4c40-82c2-e134f884e93e',
'display_id': 'de-afspraak-veilt-voor-de-warmste-week',
'ext': 'flv',
'title': 'De afspraak veilt voor de Warmste Week',
'description': 'md5:24cb860c320dc2be7358e0e5aa317ba6',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 49.02,
},
'expected_warnings': ['is not a supported codec'],
}, {
# with subtitles
'url': 'http://www.canvas.be/video/panorama/2016/pieter-0167',
'info_dict': {
'id': 'mz-ast-5240ff21-2d30-4101-bba6-92b5ec67c625',
'display_id': 'pieter-0167',
'ext': 'mp4',
'title': 'Pieter 0167',
'description': 'md5:943cd30f48a5d29ba02c3a104dc4ec4e',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 2553.08,
'subtitles': {
'nl': [{
'ext': 'vtt',
}],
},
},
'params': {
'skip_download': True,
},
'skip': 'Pagina niet gevonden',
}, {
'url': 'https://www.een.be/thuis/emma-pakt-thilly-aan',
'info_dict': {
'id': 'md-ast-3a24ced2-64d7-44fb-b4ed-ed1aafbf90b8',
'display_id': 'emma-pakt-thilly-aan',
'ext': 'mp4',
'title': 'Emma pakt Thilly aan',
'description': 'md5:c5c9b572388a99b2690030afa3f3bad7',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 118.24,
},
'params': {
'skip_download': True,
},
'expected_warnings': ['is not a supported codec'],
}, {
'url': 'https://www.canvas.be/check-point/najaar-2016/de-politie-uw-vriend',
'only_matching': True,
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
site_id, display_id = mobj.group('site_id'), mobj.group('id')
webpage = self._download_webpage(url, display_id)
title = strip_or_none(self._search_regex(
r'<h1[^>]+class="video__body__header__title"[^>]*>(.+?)</h1>',
webpage, 'title', default=None) or self._og_search_title(
webpage, default=None))
video_id = self._html_search_regex(
r'data-video=(["\'])(?P<id>(?:(?!\1).)+)\1', webpage, 'video id',
group='id')
return {
'_type': 'url_transparent',
'url': 'https://mediazone.vrt.be/api/v1/%s/assets/%s' % (site_id, video_id),
'ie_key': CanvasIE.ie_key(),
'id': video_id,
'display_id': display_id,
'title': title,
'description': self._og_search_description(webpage),
}
class VrtNUIE(GigyaBaseIE):
IE_DESC = 'VrtNU.be'
_VALID_URL = r'https?://(?:www\.)?vrt\.be/vrtnu/a-z/(?:[^/]+/){2}(?P<id>[^/?#&]+)'
_TESTS = [{
# Available via old API endpoint
'url': 'https://www.vrt.be/vrtnu/a-z/postbus-x/1989/postbus-x-s1989a1/',
'info_dict': {
'id': 'pbs-pub-e8713dac-899e-41de-9313-81269f4c04ac$vid-90c932b1-e21d-4fb8-99b1-db7b49cf74de',
'ext': 'mp4',
'title': 'Postbus X - Aflevering 1 (Seizoen 1989)',
'description': 'md5:b704f669eb9262da4c55b33d7c6ed4b7',
'duration': 1457.04,
'thumbnail': r're:^https?://.*\.jpg$',
'series': 'Postbus X',
'season': 'Seizoen 1989',
'season_number': 1989,
'episode': 'De zwarte weduwe',
'episode_number': 1,
'timestamp': 1595822400,
'upload_date': '20200727',
},
'skip': 'This video is only available for registered users',
'params': {
'username': '<snip>',
'password': '<snip>',
},
'expected_warnings': ['is not a supported codec'],
}, {
# Only available via new API endpoint
'url': 'https://www.vrt.be/vrtnu/a-z/kamp-waes/1/kamp-waes-s1a5/',
'info_dict': {
'id': 'pbs-pub-0763b56c-64fb-4d38-b95b-af60bf433c71$vid-ad36a73c-4735-4f1f-b2c0-a38e6e6aa7e1',
'ext': 'mp4',
'title': 'Aflevering 5',
'description': 'Wie valt door de mand tijdens een missie?',
'duration': 2967.06,
'season': 'Season 1',
'season_number': 1,
'episode_number': 5,
},
'skip': 'This video is only available for registered users',
'params': {
'username': '<snip>',
'password': '<snip>',
},
'expected_warnings': ['Unable to download asset JSON', 'is not a supported codec', 'Unknown MIME type'],
}]
_NETRC_MACHINE = 'vrtnu'
_APIKEY = '3_0Z2HujMtiWq_pkAjgnS2Md2E11a1AwZjYiBETtwNE-EoEHDINgtnvcAOpNgmrVGy'
_CONTEXT_ID = 'R3595707040'
def _real_initialize(self):
self._login()
def _login(self):
username, password = self._get_login_info()
if username is None:
return
auth_data = {
'APIKey': self._APIKEY,
'targetEnv': 'jssdk',
'loginID': username,
'password': password,
'authMode': 'cookie',
}
auth_info = self._gigya_login(auth_data)
# Sometimes authentication fails for no good reason, retry
login_attempt = 1
while login_attempt <= 3:
try:
# When requesting a token, no actual token is returned, but the
# necessary cookies are set.
self._request_webpage(
'https://token.vrt.be',
None, note='Requesting a token', errnote='Could not get a token',
headers={
'Content-Type': 'application/json',
'Referer': 'https://www.vrt.be/vrtnu/',
},
data=json.dumps({
'uid': auth_info['UID'],
'uidsig': auth_info['UIDSignature'],
'ts': auth_info['signatureTimestamp'],
'email': auth_info['profile']['email'],
}).encode('utf-8'))
except ExtractorError as e:
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 401:
login_attempt += 1
self.report_warning('Authentication failed')
self._sleep(1, None, msg_template='Waiting for %(timeout)s seconds before trying again')
else:
raise e
else:
break
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
attrs = extract_attributes(self._search_regex(
r'(<nui-media[^>]+>)', webpage, 'media element'))
video_id = attrs['videoid']
publication_id = attrs.get('publicationid')
if publication_id:
video_id = publication_id + '$' + video_id
page = (self._parse_json(self._search_regex(
r'digitalData\s*=\s*({.+?});', webpage, 'digial data',
default='{}'), video_id, fatal=False) or {}).get('page') or {}
info = self._search_json_ld(webpage, display_id, default={})
return merge_dicts(info, {
'_type': 'url_transparent',
'url': 'https://mediazone.vrt.be/api/v1/vrtvideo/assets/%s' % video_id,
'ie_key': CanvasIE.ie_key(),
'id': video_id,
'display_id': display_id,
'season_number': int_or_none(page.get('episode_season')),
})
| |
from a10sdk.common.A10BaseClass import A10BaseClass
class ReceiveCfg(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param receive: {"default": 0, "type": "number", "description": "Advertisement reception", "format": "flag"}
:param version: {"enum": ["1", "2", "1-2"], "type": "string", "description": "'1': RIP version 1; '2': RIP version 2; '1-2': RIP version 1 & 2; ", "format": "enum"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "receive-cfg"
self.DeviceProxy = ""
self.receive = ""
self.version = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class SplitHorizonCfg(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param state: {"default": "poisoned", "enum": ["poisoned", "disable", "enable"], "type": "string", "description": "'poisoned': Perform split horizon with poisoned reverse; 'disable': Disable split horizon; 'enable': Perform split horizon without poisoned reverse; ", "format": "enum"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "split-horizon-cfg"
self.DeviceProxy = ""
self.state = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class KeyChain(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param key_chain: {"type": "string", "description": "Authentication key-chain (Name of key-chain)", "format": "string-rlx"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "key-chain"
self.DeviceProxy = ""
self.key_chain = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class Mode(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param mode: {"default": "text", "enum": ["md5", "text"], "type": "string", "description": "'md5': Keyed message digest; 'text': Clear text authentication; ", "format": "enum"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "mode"
self.DeviceProxy = ""
self.mode = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class Str(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param string: {"minLength": 1, "maxLength": 16, "type": "string", "description": "The RIP authentication string", "format": "string-rlx"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "str"
self.DeviceProxy = ""
self.string = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class Authentication(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "authentication"
self.DeviceProxy = ""
self.key_chain = {}
self.mode = {}
self.A10WW_str = {}
for keys, value in kwargs.items():
setattr(self,keys, value)
class SendCfg(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param version: {"enum": ["1", "2", "1-compatible", "1-2"], "type": "string", "description": "'1': RIP version 1; '2': RIP version 2; '1-compatible': RIPv1-compatible; '1-2': RIP version 1 & 2; ", "format": "enum"}
:param send: {"default": 0, "type": "number", "description": "Advertisement transmission", "format": "flag"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "send-cfg"
self.DeviceProxy = ""
self.version = ""
self.send = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class Rip(A10BaseClass):
"""Class Description::
RIP.
Class rip supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
:param uuid: {"description": "uuid of the object", "format": "string", "minLength": 1, "modify-not-allowed": 1, "optional": true, "maxLength": 64, "type": "string"}
:param receive_packet: {"default": 1, "optional": true, "type": "number", "description": "Enable receiving packet through the specified interface", "format": "flag"}
:param send_packet: {"default": 1, "optional": true, "type": "number", "description": "Enable sending packets through the specified interface", "format": "flag"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/interface/ve/{ifnum}/ip/rip`.
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.required=[]
self.b_key = "rip"
self.a10_url="/axapi/v3/interface/ve/{ifnum}/ip/rip"
self.DeviceProxy = ""
self.receive_cfg = {}
self.uuid = ""
self.receive_packet = ""
self.split_horizon_cfg = {}
self.authentication = {}
self.send_cfg = {}
self.send_packet = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
| |
#!/usr/bin/env python
"""Collection filters.
Example usage:
Filter('time', low, high)
.bounds(ring)
.eq('time', value)
.lt('time', value)
"""
# Using lowercase function naming to match the JavaScript names.
# pylint: disable=g-bad-name
# Our custom instance/static decorator is not recognized by lint.
# pylint: disable=no-self-argument, no-method-argument, g-doc-args
import functools
import apifunction
import computedobject
import deprecation
import ee_exception
class _FilterAutoCreator(object):
"""A decorator to make Filter methods both static and instance.
If the decorated method is called as an instance method, its result is passed
through _append().
"""
def __init__(self, func):
self.func = func
def __get__(self, filter_instance, cls=None):
if filter_instance is None:
return self.func
deprecated_decorator = deprecation.Deprecated(
'Use the static version of this method.')
deprecated_func = deprecated_decorator(self.func)
@functools.wraps(deprecated_func)
def PassThroughAppend(*args, **kwargs):
return filter_instance._append( # pylint: disable=protected-access
deprecated_func(*args, **kwargs))
return PassThroughAppend
# A map from the deprecated old-style comparison operator names to API
# function names, implicitly prefixed with "Filter.". Negative operators
# (those starting with "not_") are not included.
_FUNCTION_NAMES = {
'equals': 'equals',
'less_than': 'lessThan',
'greater_than': 'greaterThan',
'contains': 'stringContains',
'starts_with': 'stringStartsWith',
'ends_with': 'stringEndsWith',
}
class Filter(computedobject.ComputedObject):
"""An object to represent collection filters."""
_initialized = False
def __init__(self, opt_filter=None):
"""Construct a filter.
This constuctor accepts the following args:
1) Another filter.
2) An array of filters (which are implicitly ANDed together).
3) A ComputedObject returning a filter. Users shouldn't be making these;
they're produced by the generator functions below.
Args:
opt_filter: Optional filter to add.
"""
self.initialize()
if isinstance(opt_filter, (list, tuple)):
if not opt_filter:
raise ee_exception.EEException('Empty list specified for ee.Filter().')
elif len(opt_filter) == 1:
opt_filter = opt_filter[0]
else:
self._filter = tuple(opt_filter)
super(Filter, self).__init__(
apifunction.ApiFunction.lookup('Filter.and'),
{'filters': self._filter})
return
if isinstance(opt_filter, computedobject.ComputedObject):
super(Filter, self).__init__(
opt_filter.func, opt_filter.args, opt_filter.varName)
self._filter = (opt_filter,)
elif opt_filter is None:
# A silly call with no arguments left for backward-compatibility.
# Encoding such a filter is expected to fail, but it can be composed
# by calling the various methods that end up in _append().
super(Filter, self).__init__(None, None)
self._filter = ()
else:
raise ee_exception.EEException(
'Invalid argument specified for ee.Filter(): %s' % opt_filter)
@classmethod
def initialize(cls):
"""Imports API functions to this class."""
if not cls._initialized:
apifunction.ApiFunction.importApi(cls, 'Filter', 'Filter')
cls._initialized = True
@classmethod
def reset(cls):
"""Removes imported API functions from this class."""
apifunction.ApiFunction.clearApi(cls)
cls._initialized = False
def predicateCount(self):
"""Return the number of predicates that have been added to this filter.
Returns:
The number of predicates that have been added to this filter.
This does not count nested predicates.
"""
return len(self._filter)
def _append(self, new_filter):
"""Append a predicate to this filter.
These are implicitly ANDed.
Args:
new_filter: The filter to append to this one. Possible types are:
1) another fully constructed Filter,
2) a JSON representation of a filter,
3) an array of 1 or 2.
Returns:
A new filter that is the combination of both.
"""
if new_filter is not None:
prev = list(self._filter)
if isinstance(new_filter, Filter):
prev.extend(new_filter._filter) # pylint: disable=protected-access
elif isinstance(new_filter, list):
prev.extend(new_filter)
else:
prev.append(new_filter)
return Filter(prev)
def Not(self):
"""Returns the opposite of this filter.
Returns:
The negated filter, which will match iff this filter doesn't.
"""
return apifunction.ApiFunction.call_('Filter.not', self)
@_FilterAutoCreator
@deprecation.Deprecated('Use ee.Filter.eq(), ee.Filter.gte(), etc.')
def metadata_(name, operator, value):
"""Filter on metadata. This is deprecated.
Args:
name: The property name to filter on.
operator: The type of comparison. One of:
"equals", "less_than", "greater_than", "contains", "begins_with",
"ends_with", or any of these prefixed with "not_".
value: The value to compare against.
Returns:
The new filter.
"""
operator = operator.lower()
# Check for negated filters.
negated = False
if operator.startswith('not_'):
negated = True
operator = operator[4:]
# Convert the operator to a function.
if operator not in _FUNCTION_NAMES:
raise ee_exception.EEException(
'Unknown filtering operator: %s' % operator)
func_name = 'Filter.' + _FUNCTION_NAMES[operator]
new_filter = apifunction.ApiFunction.call_(func_name, name, value)
return new_filter.Not() if negated else new_filter
@_FilterAutoCreator
def eq(name, value):
"""Filter to metadata equal to the given value."""
return apifunction.ApiFunction.call_('Filter.equals', name, value)
@_FilterAutoCreator
def neq(name, value):
"""Filter to metadata not equal to the given value."""
return Filter.eq(name, value).Not()
@_FilterAutoCreator
def lt(name, value):
"""Filter to metadata less than the given value."""
return apifunction.ApiFunction.call_('Filter.lessThan', name, value)
@_FilterAutoCreator
def gte(name, value):
"""Filter on metadata greater than or equal to the given value."""
return Filter.lt(name, value).Not()
@_FilterAutoCreator
def gt(name, value):
"""Filter on metadata greater than the given value."""
return apifunction.ApiFunction.call_('Filter.greaterThan', name, value)
@_FilterAutoCreator
def lte(name, value):
"""Filter on metadata less than or equal to the given value."""
return Filter.gt(name, value).Not()
@_FilterAutoCreator
@deprecation.Deprecated('Use ee.Filter.stringContains().')
def contains(name, value):
"""Filter on metadata containing the given string."""
return apifunction.ApiFunction.call_('Filter.stringContains', name, value)
@_FilterAutoCreator
@deprecation.Deprecated('Use ee.Filter.stringStartsWith(...).Not().')
def not_contains(name, value):
"""Filter on metadata not containing the given string."""
return Filter.contains(name, value).Not()
@_FilterAutoCreator
@deprecation.Deprecated('Use ee.Filter.stringStartsWith().')
def starts_with(name, value):
"""Filter on metadata begining with the given string."""
return apifunction.ApiFunction.call_('Filter.stringStartsWith', name, value)
@_FilterAutoCreator
@deprecation.Deprecated('Use ee.Filter.stringStartsWith().Not().')
def not_starts_with(name, value):
"""Filter on metadata not begining with the given string."""
return Filter.starts_with(name, value).Not()
@_FilterAutoCreator
@deprecation.Deprecated('Use ee.Filter.stringEndsWith().')
def ends_with(name, value):
"""Filter on metadata ending with the given string."""
return apifunction.ApiFunction.call_('Filter.stringEndsWith', name, value)
@_FilterAutoCreator
@deprecation.Deprecated('Use ee.Filter.stringEndsWith().Not().')
def not_ends_with(name, value):
"""Filter on metadata not ending with the given string."""
return Filter.ends_with(name, value).Not()
@_FilterAutoCreator
def And(*args):
"""Combine two or more filters using boolean AND."""
if len(args) == 1 and isinstance(args[0], (list, tuple)):
args = args[0]
return apifunction.ApiFunction.call_('Filter.and', args)
@staticmethod
def Or(*args):
"""Combine two or more filters using boolean OR."""
if len(args) == 1 and isinstance(args[0], (list, tuple)):
args = args[0]
return apifunction.ApiFunction.call_('Filter.or', args)
@_FilterAutoCreator
def date(start, opt_end=None):
"""Filter images by date.
The start and end may be a Date, numbers (interpreted as milliseconds since
1970-01-01T00:00:00Z), or strings (such as '1996-01-01T08:00').
Args:
start: The inclusive start date.
opt_end: The optional exclusive end date, If not specified, a
1-millisecond range starting at 'start' is created.
Returns:
The modified filter.
"""
date_range = apifunction.ApiFunction.call_('DateRange', start, opt_end)
return apifunction.ApiFunction.apply_('Filter.dateRangeContains', {
'leftValue': date_range,
'rightField': 'system:time_start'
})
@_FilterAutoCreator
def inList(opt_leftField=None,
opt_rightValue=None,
opt_rightField=None,
opt_leftValue=None):
"""Filter on metadata contained in a list.
Args:
opt_leftField: A selector for the left operand.
Should not be specified if leftValue is specified.
opt_rightValue: The value of the right operand.
Should not be specified if rightField is specified.
opt_rightField: A selector for the right operand.
Should not be specified if rightValue is specified.
opt_leftValue: The value of the left operand.
Should not be specified if leftField is specified.
Returns:
The constructed filter.
"""
# Implement this in terms of listContains, with the arguments switched.
# In listContains the list is on the left side, while in inList it's on
# the right.
return apifunction.ApiFunction.apply_('Filter.listContains', {
'leftField': opt_rightField,
'rightValue': opt_leftValue,
'rightField': opt_leftField,
'leftValue': opt_rightValue
})
@_FilterAutoCreator
def geometry(geometry, opt_errorMargin=None):
"""Filter on bounds.
Items in the collection with a footprint that fails to intersect
the bounds will be excluded when the collection is evaluated.
Args:
geometry: The geometry to filter to either as a GeoJSON geometry,
or a FeatureCollection, from which a geometry will be extracted.
opt_errorMargin: An optional error margin. If a number, interpreted as
sphere surface meters.
Returns:
The modified filter.
"""
# Invoke geometry promotion then manually promote to a Feature.
args = {
'leftField': '.all',
'rightValue': apifunction.ApiFunction.call_('Feature', geometry)
}
if opt_errorMargin is not None:
args['maxError'] = opt_errorMargin
return apifunction.ApiFunction.apply_('Filter.intersects', args)
@staticmethod
def name():
return 'Filter'
| |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.layers.base."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.keras.engine import base_layer as keras_base_layer
from tensorflow.python.keras.engine import input_spec
from tensorflow.python.layers import base as base_layers
from tensorflow.python.layers import core as core_layers
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import test
class BaseLayerTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def testLayerProperties(self):
layer = base_layers.Layer(name='my_layer')
self.assertEqual(layer.variables, [])
self.assertEqual(layer.trainable_variables, [])
self.assertEqual(layer.non_trainable_variables, [])
if not context.executing_eagerly():
# updates, losses only supported in GRAPH mode
self.assertEqual(layer.updates, [])
self.assertEqual(layer.losses, [])
self.assertEqual(layer.built, False)
layer = base_layers.Layer(name='my_layer', trainable=False)
self.assertEqual(layer.trainable, False)
@test_util.run_in_graph_and_eager_modes
def testInt64Layer(self):
layer = base_layers.Layer(name='my_layer', dtype='int64')
layer.add_variable('my_var', [2, 2])
self.assertEqual(layer.name, 'my_layer')
@test_util.run_in_graph_and_eager_modes
def testKerasStyleAddWeight(self):
keras_layer = keras_base_layer.Layer(name='keras_layer')
with ops.name_scope('foo', skip_on_eager=False):
keras_variable = keras_layer.add_variable(
'my_var', [2, 2], initializer=init_ops.zeros_initializer())
self.assertEqual(keras_variable.name, 'foo/my_var:0')
with ops.name_scope('baz', skip_on_eager=False):
old_style_layer = base_layers.Layer(name='my_layer')
# Test basic variable creation.
variable = old_style_layer.add_variable(
'my_var', [2, 2], initializer=init_ops.zeros_initializer())
self.assertEqual(variable.name, 'my_layer/my_var:0')
with base_layers.keras_style_scope():
layer = base_layers.Layer(name='my_layer')
# Test basic variable creation.
with ops.name_scope('bar', skip_on_eager=False):
variable = layer.add_variable(
'my_var', [2, 2], initializer=init_ops.zeros_initializer())
self.assertEqual(variable.name, 'bar/my_var:0')
@test_util.run_in_graph_and_eager_modes
def testAddWeight(self):
layer = base_layers.Layer(name='my_layer')
# Test basic variable creation.
variable = layer.add_variable(
'my_var', [2, 2], initializer=init_ops.zeros_initializer())
self.assertEqual(variable.name, 'my_layer/my_var:0')
self.assertEqual(layer.variables, [variable])
self.assertEqual(layer.trainable_variables, [variable])
self.assertEqual(layer.non_trainable_variables, [])
if not context.executing_eagerly():
self.assertEqual(
layer.variables,
ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES))
# Test non-trainable variable creation.
# layer.add_variable should work even outside `build` and `call`.
variable_2 = layer.add_variable(
'non_trainable_var', [2, 2],
initializer=init_ops.zeros_initializer(),
trainable=False)
self.assertEqual(layer.variables, [variable, variable_2])
self.assertEqual(layer.trainable_variables, [variable])
self.assertEqual(layer.non_trainable_variables, [variable_2])
if not context.executing_eagerly():
self.assertEqual(
len(ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)), 1)
regularizer = lambda x: math_ops.reduce_sum(x) * 1e-3
_ = layer.add_variable(
'reg_var', [2, 2],
initializer=init_ops.zeros_initializer(),
regularizer=regularizer)
self.assertEqual(len(layer.losses), 1)
added_variable = [False]
# Test that sync `ON_READ` variables are defaulted to be non-trainable.
variable_3 = layer.add_variable(
'sync_on_read_var', [2, 2],
initializer=init_ops.zeros_initializer(),
synchronization=variable_scope.VariableSynchronization.ON_READ,
aggregation=variable_scope.VariableAggregation.SUM)
self.assertEqual(layer.non_trainable_variables, [variable_2, variable_3])
@def_function.function
def function_adds_weight():
if not added_variable[0]:
layer.add_variable(
'reg_var_from_function', [2, 2],
initializer=init_ops.zeros_initializer(),
regularizer=regularizer)
added_variable[0] = True
function_adds_weight()
self.assertEqual(len(layer.losses), 2)
def testInvalidTrainableSynchronizationCombination(self):
layer = base_layers.Layer(name='my_layer')
with self.assertRaisesRegexp(
ValueError, 'Synchronization value can be set to '
'VariableSynchronization.ON_READ only for non-trainable variables. '
'You have specified trainable=True and '
'synchronization=VariableSynchronization.ON_READ.'):
_ = layer.add_variable(
'v', [2, 2],
initializer=init_ops.zeros_initializer(),
synchronization=variable_scope.VariableSynchronization.ON_READ,
trainable=True)
@test_util.run_deprecated_v1
def testReusePartitionedVaraiblesAndRegularizers(self):
regularizer = lambda x: math_ops.reduce_sum(x) * 1e-3
partitioner = partitioned_variables.fixed_size_partitioner(3)
for reuse in [False, True]:
with variable_scope.variable_scope(variable_scope.get_variable_scope(),
partitioner=partitioner,
reuse=reuse):
layer = base_layers.Layer(name='my_layer')
_ = layer.add_variable(
'reg_part_var', [4, 4],
initializer=init_ops.zeros_initializer(),
regularizer=regularizer)
self.assertEqual(
len(ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)), 3)
@test_util.run_in_graph_and_eager_modes
def testCall(self):
class MyLayer(base_layers.Layer):
def call(self, inputs):
return math_ops.square(inputs)
layer = MyLayer(name='my_layer')
inputs = random_ops.random_uniform((5,), seed=1)
outputs = layer.apply(inputs)
self.assertEqual(layer.built, True)
if not context.executing_eagerly():
# op is only supported in GRAPH mode
self.assertEqual(outputs.op.name, 'my_layer/Square')
@test_util.run_in_graph_and_eager_modes
def testDeepCopy(self):
class MyLayer(base_layers.Layer):
def call(self, inputs):
return math_ops.square(inputs)
layer = MyLayer(name='my_layer')
layer._private_tensor = random_ops.random_uniform(())
inputs = random_ops.random_uniform((5,), seed=1)
outputs = layer.apply(inputs)
self.assertEqual(layer.built, True)
if not context.executing_eagerly():
# op only supported in GRAPH mode.
self.assertEqual(outputs.op.name, 'my_layer/Square')
layer_copy = copy.deepcopy(layer)
self.assertEqual(layer_copy.name, layer.name)
self.assertEqual(layer_copy._scope.name, layer._scope.name)
self.assertEqual(layer_copy._private_tensor, layer._private_tensor)
@test_util.run_in_graph_and_eager_modes
def testScopeNaming(self):
class PrivateLayer(base_layers.Layer):
def call(self, inputs):
return inputs
inputs = random_ops.random_uniform((5,))
default_layer = PrivateLayer()
_ = default_layer.apply(inputs)
self.assertEqual(default_layer._scope.name, 'private_layer')
default_layer1 = PrivateLayer()
default_layer1.apply(inputs)
self.assertEqual(default_layer1._scope.name, 'private_layer_1')
my_layer = PrivateLayer(name='my_layer')
my_layer.apply(inputs)
self.assertEqual(my_layer._scope.name, 'my_layer')
my_layer1 = PrivateLayer(name='my_layer')
my_layer1.apply(inputs)
self.assertEqual(my_layer1._scope.name, 'my_layer_1')
my_layer2 = PrivateLayer(name='my_layer')
my_layer2.apply(inputs)
self.assertEqual(my_layer2._scope.name, 'my_layer_2')
# Name scope shouldn't affect names.
with ops.name_scope('some_name_scope'):
default_layer2 = PrivateLayer()
default_layer2.apply(inputs)
self.assertEqual(default_layer2._scope.name, 'private_layer_2')
my_layer3 = PrivateLayer(name='my_layer')
my_layer3.apply(inputs)
self.assertEqual(my_layer3._scope.name, 'my_layer_3')
other_layer = PrivateLayer(name='other_layer')
other_layer.apply(inputs)
self.assertEqual(other_layer._scope.name, 'other_layer')
# Variable scope gets added to scope names.
with variable_scope.variable_scope('var_scope'):
default_layer_scoped = PrivateLayer()
default_layer_scoped.apply(inputs)
self.assertEqual(default_layer_scoped._scope.name,
'var_scope/private_layer')
my_layer_scoped = PrivateLayer(name='my_layer')
my_layer_scoped.apply(inputs)
self.assertEqual(my_layer_scoped._scope.name, 'var_scope/my_layer')
my_layer_scoped1 = PrivateLayer(name='my_layer')
my_layer_scoped1.apply(inputs)
self.assertEqual(my_layer_scoped1._scope.name, 'var_scope/my_layer_1')
@test_util.run_in_graph_and_eager_modes
def testInputSpecNdimCheck(self):
class CustomerLayer(base_layers.Layer):
def __init__(self):
super(CustomerLayer, self).__init__()
self.input_spec = input_spec.InputSpec(ndim=2)
def call(self, inputs):
return inputs
if not context.executing_eagerly():
layer = CustomerLayer()
with self.assertRaisesRegexp(ValueError, r'requires a defined rank'):
layer.apply(array_ops.placeholder('int32'))
layer = CustomerLayer()
with self.assertRaisesRegexp(ValueError, r'expected ndim=2'):
layer.apply(constant_op.constant([1]))
# Note that we re-create the layer since in Eager mode, input spec checks
# only happen on first call.
# Works
layer = CustomerLayer()
layer.apply(constant_op.constant([[1], [2]]))
@test_util.run_in_graph_and_eager_modes
def testInputSpecMinNdimCheck(self):
class CustomerLayer(base_layers.Layer):
def __init__(self):
super(CustomerLayer, self).__init__()
self.input_spec = input_spec.InputSpec(min_ndim=2)
def call(self, inputs):
return inputs
if not context.executing_eagerly():
layer = CustomerLayer()
with self.assertRaisesRegexp(ValueError, r'requires a defined rank'):
layer.apply(array_ops.placeholder('int32'))
layer = CustomerLayer()
with self.assertRaisesRegexp(ValueError, r'expected min_ndim=2'):
layer.apply(constant_op.constant([1]))
# Works
layer = CustomerLayer()
layer.apply(constant_op.constant([[1], [2]]))
layer = CustomerLayer()
layer.apply(constant_op.constant([[[1], [2]]]))
@test_util.run_in_graph_and_eager_modes
def testInputSpecMaxNdimCheck(self):
class CustomerLayer(base_layers.Layer):
def __init__(self):
super(CustomerLayer, self).__init__()
self.input_spec = input_spec.InputSpec(max_ndim=2)
def call(self, inputs):
return inputs
if not context.executing_eagerly():
layer = CustomerLayer()
with self.assertRaisesRegexp(ValueError, r'requires a defined rank'):
layer.apply(array_ops.placeholder('int32'))
layer = CustomerLayer()
with self.assertRaisesRegexp(ValueError, r'expected max_ndim=2'):
layer.apply(constant_op.constant([[[1], [2]]]))
# Works
layer = CustomerLayer()
layer.apply(constant_op.constant([1]))
layer = CustomerLayer()
layer.apply(constant_op.constant([[1], [2]]))
@test_util.run_in_graph_and_eager_modes
def testInputSpecDtypeCheck(self):
class CustomerLayer(base_layers.Layer):
def __init__(self):
super(CustomerLayer, self).__init__()
self.input_spec = input_spec.InputSpec(dtype='float32')
def call(self, inputs):
return inputs
layer = CustomerLayer()
with self.assertRaisesRegexp(ValueError, r'expected dtype=float32'):
layer.apply(constant_op.constant(1, dtype=dtypes.int32))
# Works
layer = CustomerLayer()
layer.apply(constant_op.constant(1.0, dtype=dtypes.float32))
@test_util.run_in_graph_and_eager_modes
def testInputSpecAxesCheck(self):
class CustomerLayer(base_layers.Layer):
def __init__(self):
super(CustomerLayer, self).__init__()
self.input_spec = input_spec.InputSpec(axes={-1: 2})
def call(self, inputs):
return inputs
layer = CustomerLayer()
with self.assertRaisesRegexp(ValueError, r'expected axis'):
layer.apply(constant_op.constant([1, 2, 3]))
# Works
layer = CustomerLayer()
layer.apply(constant_op.constant([1, 2]))
layer = CustomerLayer()
layer.apply(constant_op.constant([[1, 2], [3, 4], [5, 6]]))
@test_util.run_in_graph_and_eager_modes
def testInputSpecShapeCheck(self):
class CustomerLayer(base_layers.Layer):
def __init__(self):
super(CustomerLayer, self).__init__()
self.input_spec = input_spec.InputSpec(shape=(None, 3))
def call(self, inputs):
return inputs
layer = CustomerLayer()
with self.assertRaisesRegexp(ValueError, r'expected shape'):
layer.apply(constant_op.constant([[1, 2]]))
# Works
layer = CustomerLayer()
layer.apply(constant_op.constant([[1, 2, 3], [4, 5, 6]]))
@test_util.run_in_graph_and_eager_modes
def testNoInputSpec(self):
class CustomerLayer(base_layers.Layer):
def __init__(self):
super(CustomerLayer, self).__init__()
self.input_spec = None
def call(self, inputs):
return inputs
layer = CustomerLayer()
layer.apply(constant_op.constant(1))
# Works
if not context.executing_eagerly():
layer.apply(array_ops.placeholder('int32'))
layer.apply(array_ops.placeholder('int32', shape=(2, 3)))
@test_util.run_in_graph_and_eager_modes
def test_count_params(self):
dense = core_layers.Dense(16)
dense.build((None, 4))
self.assertEqual(dense.count_params(), 16 * 4 + 16)
dense = core_layers.Dense(16)
with self.assertRaises(ValueError):
dense.count_params()
@test_util.run_in_graph_and_eager_modes
def testDictInputOutput(self):
class DictLayer(base_layers.Layer):
def call(self, inputs):
return {'l' + key: inputs[key] for key in inputs}
layer = DictLayer()
if context.executing_eagerly():
i1 = constant_op.constant(3)
i2 = constant_op.constant(4.0)
result = layer.apply({'abel': i1, 'ogits': i2})
self.assertTrue(isinstance(result, dict))
self.assertEqual(set(['label', 'logits']), set(result.keys()))
self.assertEqual(3, result['label'].numpy())
self.assertEqual(4.0, result['logits'].numpy())
else:
i1 = array_ops.placeholder('int32')
i2 = array_ops.placeholder('float32')
result = layer.apply({'abel': i1, 'ogits': i2})
self.assertTrue(isinstance(result, dict))
self.assertEqual(set(['label', 'logits']), set(result.keys()))
@test_util.run_deprecated_v1
def testActivityRegularizer(self):
regularizer = math_ops.reduce_sum
layer = base_layers.Layer(activity_regularizer=regularizer)
x = array_ops.placeholder('int32')
layer.apply(x)
self.assertEqual(len(layer.get_losses_for(x)), 1)
def testNameScopeIsConsistentWithVariableScope(self):
# Github issue 13429.
class MyLayer(base_layers.Layer):
def build(self, input_shape):
self.my_var = self.add_variable('my_var', (), dtypes.float32)
self.built = True
def call(self, inputs):
return math_ops.multiply(inputs, self.my_var, name='my_op')
def _gen_layer(x, name=None):
layer = MyLayer(name=name)
out = layer.apply(x)
return layer, out
# unnamed layer
with ops.Graph().as_default():
x = array_ops.placeholder(dtypes.float32, (), 'x')
layer, op = _gen_layer(x)
layer1, op1 = _gen_layer(op)
layer2, op2 = _gen_layer(op1)
self.assertEqual(layer.my_var.name, 'my_layer/my_var:0')
self.assertEqual(op.name, 'my_layer/my_op:0')
self.assertEqual(layer1.my_var.name, 'my_layer_1/my_var:0')
self.assertEqual(op1.name, 'my_layer_1/my_op:0')
self.assertEqual(layer2.my_var.name, 'my_layer_2/my_var:0')
self.assertEqual(op2.name, 'my_layer_2/my_op:0')
# name starts from zero
with ops.Graph().as_default():
x = array_ops.placeholder(dtypes.float32, (), 'x')
layer, op = _gen_layer(x, name='name')
layer1, op1 = _gen_layer(op, name='name_1')
layer2, op2 = _gen_layer(op1, name='name_2')
self.assertEqual(layer.my_var.name, 'name/my_var:0')
self.assertEqual(op.name, 'name/my_op:0')
self.assertEqual(layer1.my_var.name, 'name_1/my_var:0')
self.assertEqual(op1.name, 'name_1/my_op:0')
self.assertEqual(layer2.my_var.name, 'name_2/my_var:0')
self.assertEqual(op2.name, 'name_2/my_op:0')
# name starts from one
with ops.Graph().as_default():
x = array_ops.placeholder(dtypes.float32, (), 'x')
layer, op = _gen_layer(x, name='name_1')
layer1, op1 = _gen_layer(op, name='name_2')
layer2, op2 = _gen_layer(op1, name='name_3')
self.assertEqual(layer.my_var.name, 'name_1/my_var:0')
self.assertEqual(op.name, 'name_1/my_op:0')
self.assertEqual(layer1.my_var.name, 'name_2/my_var:0')
self.assertEqual(op1.name, 'name_2/my_op:0')
self.assertEqual(layer2.my_var.name, 'name_3/my_var:0')
self.assertEqual(op2.name, 'name_3/my_op:0')
def testVariablesAreLiftedFromFunctionBuildingGraphs(self):
class MyLayer(base_layers.Layer):
def build(self, input_shape):
self.my_var = self.add_variable('my_var', (), dtypes.float32)
self.built = True
def call(self, inputs):
return inputs
outer_graph = ops.get_default_graph()
function_building_graph = ops.Graph()
function_building_graph._building_function = True
with outer_graph.as_default():
with function_building_graph.as_default():
layer = MyLayer()
# Create a variable by invoking build through __call__ and assert that
# it is both tracked and lifted into the outer graph.
inputs = array_ops.placeholder(dtypes.float32, (), 'inputs')
layer.apply(inputs)
self.assertEqual(len(layer.variables), 1)
self.assertEqual(len(layer.trainable_variables), 1)
self.assertEqual(layer.variables[0].graph, outer_graph)
@test_util.run_deprecated_v1
def testGetUpdateFor(self):
class MyLayer(base_layers.Layer):
def build(self, input_shape):
self.a = self.add_variable('a',
(),
dtypes.float32,
trainable=False)
self.b = self.add_variable('b',
(),
dtypes.float32,
trainable=False)
self.add_update(state_ops.assign_add(self.a, 1., name='b_update'))
self.built = True
def call(self, inputs):
self.add_update(state_ops.assign_add(self.a, inputs, name='a_update'),
inputs=True)
return inputs + 1
layer = MyLayer()
inputs = array_ops.placeholder(dtypes.float32, (), 'inputs')
intermediate_inputs = inputs + 1
outputs = layer.apply(intermediate_inputs)
self.assertEqual(len(layer.updates), 2)
self.assertEqual(len(layer.get_updates_for(None)), 1)
self.assertEqual(len(layer.get_updates_for([inputs])), 1)
self.assertEqual(len(layer.get_updates_for([intermediate_inputs])), 1)
self.assertEqual(len(layer.get_updates_for([outputs])), 0)
# Call same layer on new input, creating one more conditional update
inputs = array_ops.placeholder(dtypes.float32, (), 'inputs')
intermediate_inputs = inputs + 1
outputs = layer.apply(intermediate_inputs)
self.assertEqual(len(layer.updates), 3)
self.assertEqual(len(layer.get_updates_for(None)), 1)
# Check that we are successfully filtering out irrelevant updates
self.assertEqual(len(layer.get_updates_for([inputs])), 1)
self.assertEqual(len(layer.get_updates_for([intermediate_inputs])), 1)
self.assertEqual(len(layer.get_updates_for([outputs])), 0)
@test_util.run_deprecated_v1
def testGetLossesFor(self):
class MyLayer(base_layers.Layer):
def build(self, input_shape):
self.a = self.add_variable('a',
(),
dtypes.float32,
trainable=False)
self.b = self.add_variable('b',
(),
dtypes.float32,
trainable=False)
self.add_loss(self.a)
self.built = True
def call(self, inputs):
self.add_loss(inputs, inputs=True)
return inputs + 1
layer = MyLayer()
inputs = array_ops.placeholder(dtypes.float32, (), 'inputs')
intermediate_inputs = inputs + 1
outputs = layer.apply(intermediate_inputs)
self.assertEqual(len(layer.losses), 2)
self.assertEqual(len(layer.get_losses_for(None)), 1)
self.assertEqual(len(layer.get_losses_for([inputs])), 1)
self.assertEqual(len(layer.get_losses_for([intermediate_inputs])), 1)
self.assertEqual(len(layer.get_losses_for([outputs])), 0)
# Call same layer on new input, creating one more conditional loss
inputs = array_ops.placeholder(dtypes.float32, (), 'inputs')
intermediate_inputs = inputs + 1
outputs = layer.apply(intermediate_inputs)
self.assertEqual(len(layer.losses), 3)
self.assertEqual(len(layer.get_losses_for(None)), 1)
# Check that we are successfully filtering out irrelevant losses
self.assertEqual(len(layer.get_losses_for([inputs])), 1)
self.assertEqual(len(layer.get_losses_for([intermediate_inputs])), 1)
self.assertEqual(len(layer.get_losses_for([outputs])), 0)
class IdentityLayer(base_layers.Layer):
"""A layer returns the identity of it's input."""
def call(self, inputs):
return inputs
@test_util.run_all_in_graph_and_eager_modes
class DTypeTest(test.TestCase):
def _const(self, dtype):
return array_ops.constant(1, dtype=dtype)
def test_dtype_inferred_from_input(self):
# Test with Tensor input
layer = IdentityLayer()
self.assertIsNone(layer.dtype)
layer(self._const('float64'))
self.assertEqual(layer.dtype, 'float64')
# Test with Numpy input
layer = IdentityLayer()
self.assertIsNone(layer.dtype)
layer(np.array(1., dtype='float64'))
self.assertEqual(layer.dtype, 'float64')
# Test with integer input
layer = IdentityLayer()
self.assertIsNone(layer.dtype)
layer(self._const('int32'))
self.assertEqual(layer.dtype, 'int32')
# Test layer dtype doesn't change when passed a new dtype
layer = IdentityLayer()
self.assertIsNone(layer.dtype)
layer(self._const('float64'))
self.assertEqual(layer.dtype, 'float64')
layer(self._const('float16'))
self.assertEqual(layer.dtype, 'float64')
# Test layer dtype inferred from first input
layer = IdentityLayer()
layer([self._const('float32'), self._const('float64')])
self.assertEqual(layer.dtype, 'float32')
def test_passing_dtype_to_constructor(self):
layer = IdentityLayer(dtype='float64')
layer(self._const('float32'))
self.assertEqual(layer.dtype, 'float64')
layer = IdentityLayer(dtype='int32')
layer(self._const('float32'))
self.assertEqual(layer.dtype, 'int32')
layer = IdentityLayer(dtype=dtypes.float64)
layer(self._const('float32'))
self.assertEqual(layer.dtype, 'float64')
def test_inputs_not_casted(self):
layer = IdentityLayer(dtype='float32')
self.assertEqual(layer(self._const('float64')).dtype, 'float64')
if __name__ == '__main__':
test.main()
| |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import absolute_import, division, print_function
import copy
import os
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import matplotlib.patheffects as PathEffects
from matplotlib.patches import Circle, Ellipse, Rectangle
from matplotlib.colors import LogNorm, Normalize, PowerNorm
from matplotlib.colors import LinearSegmentedColormap
from matplotlib.lines import Line2D
import matplotlib.mlab as mlab
from astropy.io import fits
from astropy.wcs import WCS
from astropy.coordinates import SkyCoord
import numpy as np
from scipy.stats import norm
from scipy.stats import chi2
from scipy import interpolate
from gammapy.maps import WcsNDMap, HpxNDMap, MapCoord
import fermipy
import fermipy.config
import fermipy.utils as utils
import fermipy.wcs_utils as wcs_utils
import fermipy.hpx_utils as hpx_utils
import fermipy.defaults as defaults
import fermipy.catalog as catalog
from fermipy.utils import merge_dict
from fermipy.logger import Logger
from fermipy.logger import log_level
def truncate_colormap(cmap, minval=0.0, maxval=1.0, n=256):
"""Function that extracts a subset of a colormap.
"""
if minval is None:
minval = 0.0
if maxval is None:
maxval = 0.0
name = "%s-trunc-%.2g-%.2g" % (cmap.name, minval, maxval)
return LinearSegmentedColormap.from_list(
name, cmap(np.linspace(minval, maxval, n)))
def get_xerr(sed):
delo = sed['e_ctr'] - sed['e_min']
dehi = sed['e_max'] - sed['e_ctr']
xerr = np.vstack((delo, dehi))
return xerr
def make_counts_spectrum_plot(o, roi, energies, imfile, **kwargs):
figsize = kwargs.get('figsize', (8.0, 6.0))
weighted = kwargs.get('weighted', False)
fig = plt.figure(figsize=figsize)
gs = gridspec.GridSpec(2, 1, height_ratios=[1.4, 1])
ax0 = fig.add_subplot(gs[0, 0])
ax1 = fig.add_subplot(gs[1, 0], sharex=ax0)
# axes = axes_grid.Grid(fig,111,
# nrows_ncols=(2,1),
# axes_pad=0.05,
# add_all=True)
# ax = axes[0]
x = 0.5 * (energies[1:] + energies[:-1])
xerr = 0.5 * (energies[1:] - energies[:-1])
count_str = 'counts'
model_counts_str = 'model_counts'
npred_str = 'npred'
if weighted:
count_str += '_wt'
model_counts_str += '_wt'
npred_str += '_wt'
y = o[count_str]
ym = o[model_counts_str]
ax0.errorbar(x, y, yerr=np.sqrt(y), xerr=xerr, color='k',
linestyle='None', marker='s',
label='Data')
ax0.errorbar(x, ym, color='k', linestyle='-', marker='None',
label='Total')
for s in sorted(roi.sources,
key=lambda t: t[npred_str], reverse=True)[:6]:
ax0.errorbar(x, s[model_counts_str], linestyle='-', marker='None',
label=s['name'])
for s in sorted(roi.sources,
key=lambda t: t[npred_str], reverse=True)[6:]:
ax0.errorbar(x, s[model_counts_str], color='gray',
linestyle='-', marker='None',
label='__nolabel__')
ax0.set_yscale('log')
ax0.set_ylim(0.1, None)
ax0.set_xlim(energies[0], energies[-1])
ax0.legend(frameon=False, loc='best', prop={'size': 8}, ncol=2)
ax1.errorbar(x, (y - ym) / ym, xerr=xerr, yerr=np.sqrt(y) / ym,
color='k', linestyle='None', marker='s',
label='Data')
ax1.set_xlabel('Energy [log$_{10}$(E/MeV)]')
ax1.set_ylabel('Fractional Residual')
ax0.set_ylabel('Counts')
ax1.set_ylim(-0.4, 0.4)
ax1.axhline(0.0, color='k')
plt.savefig(imfile)
plt.close(fig)
def load_ds9_cmap():
# http://tdc-www.harvard.edu/software/saoimage/saoimage.color.html
ds9_b = {
'red': [[0.0, 0.0, 0.0],
[0.25, 0.0, 0.0],
[0.50, 1.0, 1.0],
[0.75, 1.0, 1.0],
[1.0, 1.0, 1.0]],
'green': [[0.0, 0.0, 0.0],
[0.25, 0.0, 0.0],
[0.50, 0.0, 0.0],
[0.75, 1.0, 1.0],
[1.0, 1.0, 1.0]],
'blue': [[0.0, 0.0, 0.0],
[0.25, 1.0, 1.0],
[0.50, 0.0, 0.0],
[0.75, 0.0, 0.0],
[1.0, 1.0, 1.0]]
}
plt.register_cmap(name='ds9_b', data=ds9_b)
plt.cm.ds9_b = plt.cm.get_cmap('ds9_b')
return plt.cm.ds9_b
def load_bluered_cmap():
bluered = {'red': ((0.0, 0.0, 0.0),
(0.5, 0.0, 0.0),
(1.0, 1.0, 1.0)),
'green': ((0.0, 0.0, 0.0),
(1.0, 0.0, 0.0)),
'blue': ((0.0, 0.0, 1.0),
(0.5, 0.0, 0.0),
(1.0, 0.0, 0.0))
}
plt.register_cmap(name='bluered', data=bluered)
plt.cm.bluered = plt.cm.get_cmap('bluered')
return plt.cm.bluered
def annotate_name(data, xy=(0.05, 0.93), **kwargs):
if not 'name' in data:
return
ax = kwargs.pop('ax', plt.gca())
ax.annotate(data['name'],
xy=xy,
xycoords='axes fraction', fontsize=12,
xytext=(-5, 5), textcoords='offset points',
ha='left', va='center')
def annotate(**kwargs):
ax = kwargs.pop('ax', plt.gca())
loge_bounds = kwargs.pop('loge_bounds', None)
src = kwargs.pop('src', None)
text = []
if src:
if 'ASSOC1' in src['assoc'] and src['assoc']['ASSOC1']:
text += ['%s (%s)' % (src['name'], src['assoc']['ASSOC1'])]
else:
text += [src['name']]
if loge_bounds:
text += ['E = %.3f - %.3f GeV' % (10 ** loge_bounds[0] / 1E3,
10 ** loge_bounds[1] / 1E3)]
if not text:
return
ax.annotate('\n'.join(text),
xy=(0.05, 0.93),
xycoords='axes fraction', fontsize=12,
xytext=(-5, 5), textcoords='offset points',
ha='left', va='top')
def plot_markers(lon, lat, **kwargs):
transform = kwargs.get('transform', 'icrs')
path_effects = kwargs.get('path_effects', None)
p = plt.gca().plot(lon, lat,
marker=kwargs.get('marker', '+'),
color=kwargs.get('color', 'w'),
label=kwargs.get('label', '__nolabel__'),
linestyle='None',
transform=plt.gca().get_transform(transform))
if path_effects:
plt.setp(p, path_effects=path_effects)
def plot_error_ellipse(fit, xy, cdelt, **kwargs):
ax = kwargs.pop('ax', plt.gca())
colname = kwargs.pop('colname', 'r68')
color = kwargs.pop('color', 'k')
sigma = fit['pos_err']
sigmax = fit['pos_err_semimajor']
sigmay = fit['pos_err_semiminor']
theta = fit['pos_angle']
radius = fit[colname]
e0 = Ellipse(xy=(float(xy[0]), float(xy[1])),
width=2.0 * sigmax / cdelt[0] * radius / sigma,
height=2.0 * sigmay / cdelt[1] * radius / sigma,
angle=-theta,
facecolor='None', **kwargs)
ax.add_artist(e0)
class ImagePlotter(object):
def __init__(self, img, mapping=None):
if isinstance(img, WcsNDMap):
self._projtype = 'WCS'
img = copy.deepcopy(img)
self._geom = img.geom
elif isinstance(img, HpxNDMap):
self._projtype = 'HPX'
raise ValueError
else:
raise ValueError("Can't plot map of unknown type %s" % type(proj))
self._img = img
@property
def projtype(self):
return self._projtype
@property
def geom(self):
return self._geom
def plot(self, subplot=111, cmap='magma', **kwargs):
kwargs_contour = {'levels': None, 'colors': ['k'],
'linewidths': 1.0}
kwargs_imshow = {'interpolation': 'nearest',
'origin': 'lower', 'norm': None,
'vmin': None, 'vmax': None}
zscale = kwargs.get('zscale', 'lin')
gamma = kwargs.get('gamma', 0.5)
transform = kwargs.get('transform', None)
if zscale == 'pow':
kwargs_imshow['norm'] = PowerNorm(gamma=gamma)
elif zscale == 'sqrt':
kwargs_imshow['norm'] = PowerNorm(gamma=0.5)
elif zscale == 'log':
kwargs_imshow['norm'] = LogNorm()
elif zscale == 'lin':
kwargs_imshow['norm'] = Normalize()
else:
kwargs_imshow['norm'] = Normalize()
fig = plt.gcf()
ax = fig.add_subplot(subplot, projection=self._geom.wcs)
load_ds9_cmap()
try:
colormap = plt.get_cmap(cmap)
except:
colormap = plt.get_cmap('ds9_b')
colormap.set_under(colormap(0))
data = copy.copy(self._img.data)
if transform == 'sqrt':
data = np.sqrt(data)
kwargs_imshow = merge_dict(kwargs_imshow, kwargs)
kwargs_contour = merge_dict(kwargs_contour, kwargs)
im = ax.imshow(data, **kwargs_imshow)
im.set_cmap(colormap)
if kwargs_contour['levels']:
cs = ax.contour(data, **kwargs_contour)
cs.levels = ['%.0f' % val for val in cs.levels]
plt.clabel(cs, inline=1, fontsize=8)
coordsys = self._geom.coordsys
if coordsys == 'CEL':
ax.set_xlabel('RA')
ax.set_ylabel('DEC')
elif coordsys == 'GAL':
ax.set_xlabel('GLON')
ax.set_ylabel('GLAT')
xlabel = kwargs.get('xlabel', None)
ylabel = kwargs.get('ylabel', None)
if xlabel is not None:
ax.set_xlabel(xlabel)
if ylabel is not None:
ax.set_ylabel(ylabel)
# plt.colorbar(im,orientation='horizontal',shrink=0.7,pad=0.15,
# fraction=0.05)
ax.coords.grid(color='white', linestyle=':',
linewidth=0.5) # , alpha=0.5)
# ax.locator_params(axis="x", nbins=12)
return im, ax
def make_cube_slice(map_in, loge_bounds):
"""Extract a slice from a map cube object.
"""
# FIXME: This functionality should be moved into a slice method of
# gammapy.maps
axis = map_in.geom.axes[0]
i0 = utils.val_to_edge(axis.edges, 10**loge_bounds[0])[0]
i1 = utils.val_to_edge(axis.edges, 10**loge_bounds[1])[0]
new_axis = map_in.geom.axes[0].slice(slice(i0, i1))
geom = map_in.geom.to_image()
geom = geom.to_cube([new_axis])
map_out = WcsNDMap(geom, map_in.data[slice(i0, i1), ...].copy())
return map_out
class ROIPlotter(fermipy.config.Configurable):
defaults = {
'loge_bounds': (None, '', list),
'catalogs': (None, '', list),
'graticule_radii': (None, '', list),
'label_ts_threshold': (0.0, '', float),
'cmap': ('ds9_b', '', str),
}
def __init__(self, data_map, hpx2wcs=None, **kwargs):
self._roi = kwargs.pop('roi', None)
super(ROIPlotter, self).__init__(None, **kwargs)
self._catalogs = []
for c in self.config['catalogs']:
if utils.isstr(c):
self._catalogs += [catalog.Catalog.create(c)]
else:
self._catalogs += [c]
self._loge_bounds = self.config['loge_bounds']
if isinstance(data_map, WcsNDMap):
self._projtype = 'WCS'
self._data_map = copy.deepcopy(data_map)
elif isinstance(data_map, HpxNDMap):
self._projtype = 'HPX'
self._data_map = data_map.to_wcs(normalize=False, hpx2wcs=hpx2wcs)
else:
raise Exception(
"Can't make ROIPlotter of unknown projection type %s" % type(data_map))
if self._loge_bounds:
self._data_map = make_cube_slice(self._data_map, self._loge_bounds)
self._implot = ImagePlotter(self._data_map.sum_over_axes())
@property
def data(self):
return self._data_map.data
@property
def geom(self):
return self._data_map.geom
@property
def map(self):
return self._data_map
@property
def projtype(self):
return self._projtype
@property
def proj(self):
return self._proj
@classmethod
def create_from_fits(cls, fitsfile, roi, **kwargs):
map_in = Map.read(fitsfile)
return cls(map_in, roi, **kwargs)
def plot_projection(self, iaxis, **kwargs):
data_map = kwargs.pop('data', self._data_map)
noerror = kwargs.pop('noerror', False)
xmin = kwargs.pop('xmin', -1)
xmax = kwargs.pop('xmax', 1)
axes = wcs_utils.wcs_to_axes(self.geom.wcs,
self._data_map.data.shape[-2:])
x = utils.edge_to_center(axes[iaxis])
xerr = 0.5 * utils.edge_to_width(axes[iaxis])
y = self.get_data_projection(data_map, axes, iaxis,
loge_bounds=self._loge_bounds,
xmin=xmin, xmax=xmax)
if noerror:
plt.errorbar(x, y, **kwargs)
else:
plt.errorbar(x, y, yerr=y ** 0.5, xerr=xerr, **kwargs)
@staticmethod
def get_data_projection(data_map, axes, iaxis, xmin=-1, xmax=1, loge_bounds=None):
s0 = slice(None, None)
s1 = slice(None, None)
s2 = slice(None, None)
if iaxis == 0:
if xmin is None:
xmin = axes[1][0]
if xmax is None:
xmax = axes[1][-1]
i0 = utils.val_to_edge(axes[iaxis], xmin)[0]
i1 = utils.val_to_edge(axes[iaxis], xmax)[0]
s1 = slice(i0, i1)
saxes = [1, 2]
else:
if xmin is None:
xmin = axes[0][0]
if xmax is None:
xmax = axes[0][-1]
i0 = utils.val_to_edge(axes[iaxis], xmin)[0]
i1 = utils.val_to_edge(axes[iaxis], xmax)[0]
s0 = slice(i0, i1)
saxes = [0, 2]
if loge_bounds is not None:
j0 = utils.val_to_edge(
data_map.geom.axes[0].edges, 10**loge_bounds[0])[0]
j1 = utils.val_to_edge(
data_map.geom.axes[0].edges, 10**loge_bounds[1])[0]
s2 = slice(j0, j1)
c = np.apply_over_axes(np.sum, data_map.data.T[s0, s1, s2], axes=saxes)
c = np.squeeze(c)
return c
@staticmethod
def setup_projection_axis(iaxis, loge_bounds=None):
plt.gca().legend(frameon=False, prop={'size': 10})
plt.gca().set_ylabel('Counts')
if iaxis == 0:
plt.gca().set_xlabel('LON Offset [deg]')
else:
plt.gca().set_xlabel('LAT Offset [deg]')
def plot_sources(self, skydir, labels,
plot_kwargs, text_kwargs, **kwargs):
ax = plt.gca()
nolabels = kwargs.get('nolabels', False)
label_mask = kwargs.get('label_mask',
np.ones(len(labels), dtype=bool))
if nolabels:
label_mask.fill(False)
pixcrd = wcs_utils.skydir_to_pix(skydir, self._implot.geom.wcs)
path_effect = PathEffects.withStroke(linewidth=2.0,
foreground="black")
for i, (x, y, label, show_label) in enumerate(zip(pixcrd[0], pixcrd[1],
labels, label_mask)):
if show_label:
t = ax.annotate(label, xy=(x, y),
xytext=(5.0, 5.0), textcoords='offset points',
**text_kwargs)
plt.setp(t, path_effects=[path_effect])
t = ax.plot(x, y, **plot_kwargs)
plt.setp(t, path_effects=[path_effect])
def plot_roi(self, roi, **kwargs):
src_color = 'w'
label_ts_threshold = kwargs.get('label_ts_threshold', 0.0)
plot_kwargs = dict(linestyle='None', marker='+',
markerfacecolor='None', mew=0.66, ms=8,
# markersize=8,
markeredgecolor=src_color, clip_on=True)
text_kwargs = dict(color=src_color, size=8, clip_on=True,
fontweight='normal')
ts = np.array([s['ts'] for s in roi.point_sources])
if label_ts_threshold is None:
m = np.zeros(len(ts), dtype=bool)
elif label_ts_threshold <= 0:
m = np.ones(len(ts), dtype=bool)
else:
m = ts > label_ts_threshold
skydir = roi._src_skydir
labels = [s.name for s in roi.point_sources]
self.plot_sources(skydir, labels, plot_kwargs, text_kwargs,
label_mask=m, **kwargs)
def plot_catalog(self, catalog):
color = 'lime'
plot_kwargs = dict(linestyle='None', marker='x',
markerfacecolor='None',
markeredgecolor=color, clip_on=True)
text_kwargs = dict(color=color, size=8, clip_on=True,
fontweight='normal')
skydir = catalog.skydir
if 'NickName' in catalog.table.columns:
labels = catalog.table['NickName']
else:
labels = catalog.table['Source_Name']
separation = skydir.separation(self.map.skydir).deg
m = separation < max(self.map.width)
self.plot_sources(skydir[m], labels[m], plot_kwargs, text_kwargs,
nolabels=True)
def plot(self, **kwargs):
zoom = kwargs.get('zoom', None)
graticule_radii = kwargs.get('graticule_radii',
self.config['graticule_radii'])
label_ts_threshold = kwargs.get('label_ts_threshold',
self.config['label_ts_threshold'])
im_kwargs = dict(cmap=self.config['cmap'],
interpolation='nearest', transform=None,
vmin=None, vmax=None, levels=None,
zscale='lin', subplot=111, colors=['k'])
cb_kwargs = dict(orientation='vertical', shrink=1.0, pad=0.1,
fraction=0.1, cb_label=None)
im_kwargs = merge_dict(im_kwargs, kwargs)
cb_kwargs = merge_dict(cb_kwargs, kwargs)
im, ax = self._implot.plot(**im_kwargs)
self._ax = ax
for c in self._catalogs:
self.plot_catalog(c)
if self._roi is not None:
self.plot_roi(self._roi,
label_ts_threshold=label_ts_threshold)
self._extent = im.get_extent()
ax.set_xlim(self._extent[0], self._extent[1])
ax.set_ylim(self._extent[2], self._extent[3])
self.zoom(zoom)
cb_label = cb_kwargs.pop('cb_label', None)
cb = plt.colorbar(im, **cb_kwargs)
if cb_label:
cb.set_label(cb_label)
for r in graticule_radii:
self.draw_circle(r)
def draw_circle(self, radius, **kwargs):
# coordsys = wcs_utils.get_coordsys(self.proj)
skydir = kwargs.get('skydir', None)
path_effects = kwargs.get('path_effects', None)
if skydir is None:
pix = self.map.geom.center_pix[:2]
else:
pix = skydir.to_pixel(self.map.geom.wcs)[:2]
kw = dict(facecolor='none', edgecolor='w', linestyle='--',
linewidth=0.5, label='__nolabel__')
kw = merge_dict(kw, kwargs)
pix_radius = radius / max(np.abs(self.map.geom.wcs.wcs.cdelt))
c = Circle(pix, pix_radius, **kw)
if path_effects is not None:
plt.setp(c, path_effects=path_effects)
self._ax.add_patch(c)
def zoom(self, zoom):
if zoom is None:
return
extent = self._extent
xw = extent[1] - extent[0]
x0 = 0.5 * (extent[0] + extent[1])
yw = extent[1] - extent[0]
y0 = 0.5 * (extent[0] + extent[1])
xlim = [x0 - 0.5 * xw / zoom, x0 + 0.5 * xw / zoom]
ylim = [y0 - 0.5 * yw / zoom, y0 + 0.5 * yw / zoom]
self._ax.set_xlim(xlim[0], xlim[1])
self._ax.set_ylim(ylim[0], ylim[1])
class SEDPlotter(object):
def __init__(self, sed):
self._sed = copy.deepcopy(sed)
@property
def sed(self):
return self._sed
@staticmethod
def get_ylims(sed):
fmin = np.log10(np.nanmin(sed['e2dnde_ul95'])) - 0.5
fmax = np.log10(np.nanmax(sed['e2dnde_ul95'])) + 0.5
fdelta = fmax - fmin
if fdelta < 2.0:
fmin -= 0.5 * (2.0 - fdelta)
fmax += 0.5 * (2.0 - fdelta)
return fmin, fmax
@staticmethod
def plot_lnlscan(sed, **kwargs):
ax = kwargs.pop('ax', plt.gca())
llhcut = kwargs.pop('llhcut', -2.70)
cmap = kwargs.pop('cmap', 'BuGn')
cmap_trunc_lo = kwargs.pop('cmap_trunc_lo', None)
cmap_trunc_hi = kwargs.pop('cmap_trunc_hi', None)
ylim = kwargs.pop('ylim', None)
if ylim is None:
fmin, fmax = SEDPlotter.get_ylims(sed)
else:
fmin, fmax = np.log10(ylim)
fluxM = np.arange(fmin, fmax, 0.01)
fbins = len(fluxM)
llhMatrix = np.zeros((len(sed['e_ctr']), fbins))
# loop over energy bins
for i in range(len(sed['e_ctr'])):
m = sed['norm_scan'][i] > 0
e2dnde_scan = sed['norm_scan'][i][m] * sed['ref_e2dnde'][i]
flux = np.log10(e2dnde_scan)
logl = sed['dloglike_scan'][i][m]
logl -= np.max(logl)
try:
fn = interpolate.interp1d(flux, logl, fill_value='extrapolate')
logli = fn(fluxM)
except:
logli = np.interp(fluxM, flux, logl)
llhMatrix[i, :] = logli
cmap = copy.deepcopy(plt.cm.get_cmap(cmap))
# cmap.set_under('w')
if cmap_trunc_lo is not None or cmap_trunc_hi is not None:
cmap = truncate_colormap(cmap, cmap_trunc_lo, cmap_trunc_hi, 1024)
xedge = 10**np.insert(sed['loge_max'], 0, sed['loge_min'][0])
yedge = np.logspace(fmin, fmax, fbins)
xedge, yedge = np.meshgrid(xedge, yedge)
im = ax.pcolormesh(xedge, yedge, llhMatrix.T,
vmin=llhcut, vmax=0, cmap=cmap,
linewidth=0)
cb = plt.colorbar(im)
cb.set_label('Delta LogLikelihood')
plt.gca().set_ylim(10 ** fmin, 10 ** fmax)
plt.gca().set_yscale('log')
plt.gca().set_xscale('log')
plt.gca().set_xlim(sed['e_min'][0], sed['e_max'][-1])
@staticmethod
def plot_flux_points(sed, **kwargs):
ax = kwargs.pop('ax', plt.gca())
ul_ts_threshold = kwargs.pop('ul_ts_threshold', 4)
kw = {}
kw['marker'] = kwargs.get('marker', 'o')
kw['linestyle'] = kwargs.get('linestyle', 'None')
kw['color'] = kwargs.get('color', 'k')
fmin, fmax = SEDPlotter.get_ylims(sed)
m = sed['ts'] < ul_ts_threshold
x = sed['e_ctr']
y = sed['e2dnde']
yerr = sed['e2dnde_err']
yerr_lo = sed['e2dnde_err_lo']
yerr_hi = sed['e2dnde_err_hi']
yul = sed['e2dnde_ul95']
delo = sed['e_ctr'] - sed['e_min']
dehi = sed['e_max'] - sed['e_ctr']
xerr0 = np.vstack((delo[m], dehi[m]))
xerr1 = np.vstack((delo[~m], dehi[~m]))
plt.errorbar(x[~m], y[~m], xerr=xerr1,
yerr=(yerr_lo[~m], yerr_hi[~m]), **kw)
plt.errorbar(x[m], yul[m], xerr=xerr0,
yerr=yul[m] * 0.2, uplims=True, **kw)
ax.set_yscale('log')
ax.set_xscale('log')
ax.set_xlim(sed['e_min'][0], sed['e_max'][-1])
ax.set_ylim(10 ** fmin, 10 ** fmax)
@staticmethod
def plot_resid(src, model_flux, **kwargs):
ax = kwargs.pop('ax', plt.gca())
sed = src['sed']
m = sed['ts'] < 4
x = sed['e_ctr']
y = sed['e2dnde']
yerr = sed['e2dnde_err']
yul = sed['e2dnde_ul95']
delo = sed['e_ctr'] - sed['e_min']
dehi = sed['e_max'] - sed['e_ctr']
xerr = np.vstack((delo, dehi))
ym = np.interp(sed['e_ctr'], model_flux['log_energies'],
10 ** (2 * model_flux['log_energies']) *
model_flux['dnde'])
ax.errorbar(x, (y - ym) / ym, xerr=xerr, yerr=yerr / ym, **kwargs)
@staticmethod
def plot_model(model_flux, **kwargs):
ax = kwargs.pop('ax', plt.gca())
color = kwargs.pop('color', 'k')
noband = kwargs.pop('noband', False)
e2 = 10 ** (2 * model_flux['log_energies'])
ax.plot(10 ** model_flux['log_energies'],
model_flux['dnde'] * e2, color=color)
ax.plot(10 ** model_flux['log_energies'],
model_flux['dnde_lo'] * e2, color=color,
linestyle='--')
ax.plot(10 ** model_flux['log_energies'],
model_flux['dnde_hi'] * e2, color=color,
linestyle='--')
if not noband:
ax.fill_between(10 ** model_flux['log_energies'],
model_flux['dnde_lo'] * e2,
model_flux['dnde_hi'] * e2,
alpha=0.5, color=color, zorder=-1)
@staticmethod
def plot_sed(sed, showlnl=False, **kwargs):
"""Render a plot of a spectral energy distribution.
Parameters
----------
showlnl : bool
Overlay a map of the delta-loglikelihood values vs. flux
in each energy bin.
cmap : str
Colormap that will be used for the delta-loglikelihood
map.
llhcut : float
Minimum delta-loglikelihood value.
ul_ts_threshold : float
TS threshold that determines whether the MLE or UL
is plotted in each energy bin.
"""
ax = kwargs.pop('ax', plt.gca())
cmap = kwargs.get('cmap', 'BuGn')
annotate_name(sed, ax=ax)
SEDPlotter.plot_flux_points(sed, **kwargs)
if np.any(sed['ts'] > 9.):
if 'model_flux' in sed:
SEDPlotter.plot_model(sed['model_flux'],
noband=showlnl, **kwargs)
if showlnl:
SEDPlotter.plot_lnlscan(sed, **kwargs)
ax.set_yscale('log')
ax.set_xscale('log')
ax.set_xlabel('Energy [MeV]')
ax.set_ylabel('E$^{2}$dN/dE [MeV cm$^{-2}$ s$^{-1}$]')
def plot(self, showlnl=False, **kwargs):
return SEDPlotter.plot_sed(self.sed, showlnl, **kwargs)
class ExtensionPlotter(object):
def __init__(self, src, roi, suffix, workdir, loge_bounds=None):
self._src = copy.deepcopy(src)
name = src['name'].lower().replace(' ', '_')
self._file0 = os.path.join(workdir,
'mcube_%s_noext%s.fits' % (name, suffix))
self._file1 = os.path.join(workdir,
'mcube_%s_ext_bkg%s.fits' % (name, suffix))
self._file2 = os.path.join(workdir, 'ccube%s.fits' % suffix)
self._files = []
self._width = src['extension']['width']
for i, w in enumerate(src['extension']['width']):
self._files += [os.path.join(workdir, 'mcube_%s_ext%02i%s.fits' % (
name, i, suffix))]
self._roi = roi
self._loge_bounds = loge_bounds
def plot(self, iaxis):
p0 = ROIPlotter.create_from_fits(self._file2, roi=self._roi,
loge_bounds=self._loge_bounds)
p1 = ROIPlotter.create_from_fits(self._file1, roi=self._roi,
loge_bounds=self._loge_bounds)
p0.plot_projection(iaxis, color='k', label='Data', marker='s',
linestyle='None')
p1.plot_projection(iaxis, color='b', noerror=True, label='Background')
n = len(self._width)
step = max(1, int(n / 5.))
fw = zip(self._files, self._width)[::step]
for i, (f, w) in enumerate(fw):
cf = float(i) / float(len(fw) - 1.0)
cf = 0.2 + cf * 0.8
p = ROIPlotter.create_from_fits(f, roi=self._roi,
loge_bounds=self._loge_bounds)
p._data += p1.data
p.plot_projection(iaxis, color=matplotlib.cm.Reds(cf),
noerror=True, label='%.4f$^\circ$' % w)
class AnalysisPlotter(fermipy.config.Configurable):
defaults = dict(defaults.plotting.items(),
fileio=defaults.fileio,
logging=defaults.logging)
def __init__(self, config, **kwargs):
fermipy.config.Configurable.__init__(self, config, **kwargs)
matplotlib.rcParams['font.size'] = 12
matplotlib.interactive(self.config['interactive'])
self._catalogs = []
for c in self.config['catalogs']:
self._catalogs += [catalog.Catalog.create(c)]
def run(self, gta, mcube_map, **kwargs):
"""Make all plots."""
prefix = kwargs.get('prefix', 'test')
format = kwargs.get('format', self.config['format'])
loge_bounds = [None] + self.config['loge_bounds']
for x in loge_bounds:
self.make_roi_plots(gta, mcube_map, loge_bounds=x,
**kwargs)
imfile = utils.format_filename(self.config['fileio']['workdir'],
'counts_spectrum', prefix=[prefix],
extension=format)
make_counts_spectrum_plot(gta._roi_data, gta.roi,
gta.log_energies,
imfile, **kwargs)
def make_residmap_plots(self, maps, roi=None, **kwargs):
"""Make plots from the output of
`~fermipy.gtanalysis.GTAnalysis.residmap`.
Parameters
----------
maps : dict
Output dictionary of
`~fermipy.gtanalysis.GTAnalysis.residmap`.
roi : `~fermipy.roi_model.ROIModel`
ROI Model object. Generate markers at the positions of
the sources in this ROI.
zoom : float
Crop the image by this factor. If None then no crop is
applied.
"""
fmt = kwargs.get('format', self.config['format'])
figsize = kwargs.get('figsize', self.config['figsize'])
workdir = kwargs.pop('workdir', self.config['fileio']['workdir'])
use_weights = kwargs.pop('use_weights', False)
# FIXME, how to set this:
no_contour = False
zoom = kwargs.get('zoom', None)
kwargs.setdefault('graticule_radii', self.config['graticule_radii'])
kwargs.setdefault('label_ts_threshold',
self.config['label_ts_threshold'])
cmap = kwargs.setdefault('cmap', self.config['cmap'])
cmap_resid = kwargs.pop('cmap_resid', self.config['cmap_resid'])
kwargs.setdefault('catalogs', self.config['catalogs'])
if no_contour:
sigma_levels = None
else:
sigma_levels = [-5, -3, 3, 5, 7] + list(np.logspace(1, 3, 17))
load_bluered_cmap()
prefix = maps['name']
mask = maps['mask']
if use_weights:
sigma_hist_data = maps['sigma'].data[maps['mask'].data.astype(
bool)]
maps['sigma'].data *= maps['mask'].data
maps['data'].data *= maps['mask'].data
maps['model'].data *= maps['mask'].data
maps['excess'].data *= maps['mask'].data
else:
sigma_hist_data = maps['sigma'].data
fig = plt.figure(figsize=figsize)
p = ROIPlotter(maps['sigma'], roi=roi, **kwargs)
p.plot(vmin=-5, vmax=5, levels=sigma_levels,
cb_label='Significance [$\sigma$]', interpolation='bicubic',
cmap=cmap_resid, zoom=zoom)
plt.savefig(utils.format_filename(workdir,
'residmap_sigma',
prefix=[prefix],
extension=fmt))
plt.close(fig)
# make and draw histogram
fig, ax = plt.subplots(figsize=figsize)
nBins = np.linspace(-6, 6, 121)
data = np.nan_to_num(sigma_hist_data)
# find best fit parameters
mu, sigma = norm.fit(data.flatten())
# make and draw the histogram
data[data > 6.0] = 6.0
data[data < -6.0] = -6.0
n, bins, patches = ax.hist(data.flatten(), nBins, density=True,
histtype='stepfilled',
facecolor='green', alpha=0.75)
# make and draw best fit line
y = norm.pdf(bins, mu, sigma)
ax.plot(bins, y, 'r--', linewidth=2)
y = norm.pdf(bins, 0.0, 1.0)
ax.plot(bins, y, 'k', linewidth=1)
# labels and such
ax.set_xlabel(r'Significance ($\sigma$)')
ax.set_ylabel('Probability')
paramtext = 'Gaussian fit:\n'
paramtext += '$\\mu=%.2f$\n' % mu
paramtext += '$\\sigma=%.2f$' % sigma
ax.text(0.05, 0.95, paramtext, verticalalignment='top',
horizontalalignment='left', transform=ax.transAxes)
plt.savefig(utils.format_filename(workdir,
'residmap_sigma_hist',
prefix=[prefix],
extension=fmt))
plt.close(fig)
vmax = max(np.max(maps['data'].data), np.max(maps['model'].data))
vmin = min(np.min(maps['data'].data), np.min(maps['model'].data))
fig = plt.figure(figsize=figsize)
p = ROIPlotter(maps['data'], roi=roi, **kwargs)
p.plot(cb_label='Counts', interpolation='bicubic',
cmap=cmap, zscale='sqrt', vmin=vmin, vmax=vmax)
plt.savefig(utils.format_filename(workdir,
'residmap_data',
prefix=[prefix],
extension=fmt))
plt.close(fig)
fig = plt.figure(figsize=figsize)
p = ROIPlotter(maps['model'], roi=roi, **kwargs)
p.plot(cb_label='Counts', interpolation='bicubic',
cmap=cmap, zscale='sqrt', vmin=vmin, vmax=vmax)
plt.savefig(utils.format_filename(workdir,
'residmap_model',
prefix=[prefix],
extension=fmt))
plt.close(fig)
fig = plt.figure(figsize=figsize)
p = ROIPlotter(maps['excess'], roi=roi, **kwargs)
p.plot(cb_label='Counts', interpolation='bicubic',
cmap=cmap_resid)
plt.savefig(utils.format_filename(workdir,
'residmap_excess',
prefix=[prefix],
extension=fmt))
plt.close(fig)
def make_tsmap_plots(self, maps, roi=None, **kwargs):
"""Make plots from the output of
`~fermipy.gtanalysis.GTAnalysis.tsmap` or
`~fermipy.gtanalysis.GTAnalysis.tscube`. This method
generates a 2D sky map for the best-fit test source in
sqrt(TS) and Npred.
Parameters
----------
maps : dict
Output dictionary of
`~fermipy.gtanalysis.GTAnalysis.tsmap` or
`~fermipy.gtanalysis.GTAnalysis.tscube`.
roi : `~fermipy.roi_model.ROIModel`
ROI Model object. Generate markers at the positions of
the sources in this ROI.
zoom : float
Crop the image by this factor. If None then no crop is
applied.
"""
kwargs.setdefault('graticule_radii', self.config['graticule_radii'])
kwargs.setdefault('label_ts_threshold',
self.config['label_ts_threshold'])
kwargs.setdefault('cmap', self.config['cmap'])
kwargs.setdefault('catalogs', self.config['catalogs'])
fmt = kwargs.get('format', self.config['format'])
figsize = kwargs.get('figsize', self.config['figsize'])
workdir = kwargs.pop('workdir', self.config['fileio']['workdir'])
suffix = kwargs.pop('suffix', 'tsmap')
zoom = kwargs.pop('zoom', None)
if 'ts' not in maps:
return
sigma_levels = [3, 5, 7] + list(np.logspace(1, 3, 17))
prefix = maps['name']
fig = plt.figure(figsize=figsize)
p = ROIPlotter(maps['sqrt_ts'], roi=roi, **kwargs)
p.plot(vmin=0, vmax=5, levels=sigma_levels,
cb_label='Sqrt(TS) [$\sigma$]', interpolation='bicubic',
zoom=zoom)
plt.savefig(utils.format_filename(workdir,
'%s_sqrt_ts' % suffix,
prefix=[prefix],
extension=fmt))
plt.close(fig)
fig = plt.figure(figsize=figsize)
p = ROIPlotter(maps['npred'], roi=roi, **kwargs)
p.plot(vmin=0, cb_label='NPred [Counts]', interpolation='bicubic',
zoom=zoom)
plt.savefig(utils.format_filename(workdir,
'%s_npred' % suffix,
prefix=[prefix],
extension=fmt))
plt.close(fig)
# make and draw histogram
fig, ax = plt.subplots(figsize=figsize)
bins = np.linspace(0, 25, 101)
data = np.nan_to_num(maps['ts'].data.T)
data[data > 25.0] = 25.0
data[data < 0.0] = 0.0
n, bins, patches = ax.hist(data.flatten(), bins, density=True,
histtype='stepfilled',
facecolor='green', alpha=0.75)
# ax.plot(bins,(1-chi2.cdf(x,dof))/2.,**kwargs)
ax.plot(bins, 0.5 * chi2.pdf(bins, 1.0), color='k',
label=r"$\chi^2_{1} / 2$")
ax.set_yscale('log')
ax.set_ylim(1E-4)
ax.legend(loc='upper right', frameon=False)
# labels and such
ax.set_xlabel('TS')
ax.set_ylabel('Probability')
plt.savefig(utils.format_filename(workdir,
'%s_ts_hist' % suffix,
prefix=[prefix],
extension=fmt))
plt.close(fig)
def make_roi_plots(self, gta, mcube_tot, **kwargs):
"""Make various diagnostic plots for the 1D and 2D
counts/model distributions.
Parameters
----------
prefix : str
Prefix that will be appended to all filenames.
"""
fmt = kwargs.get('format', self.config['format'])
figsize = kwargs.get('figsize', self.config['figsize'])
prefix = kwargs.get('prefix', '')
loge_bounds = kwargs.get('loge_bounds', None)
weighted = kwargs.get('weighted', False)
roi_kwargs = {}
roi_kwargs.setdefault('loge_bounds', loge_bounds)
roi_kwargs.setdefault(
'graticule_radii', self.config['graticule_radii'])
roi_kwargs.setdefault('label_ts_threshold',
self.config['label_ts_threshold'])
roi_kwargs.setdefault('cmap', self.config['cmap'])
roi_kwargs.setdefault('catalogs', self._catalogs)
if loge_bounds is None:
loge_bounds = (gta.log_energies[0], gta.log_energies[-1])
esuffix = '_%.3f_%.3f' % (loge_bounds[0], loge_bounds[1])
mcube_diffuse = gta.model_counts_map('diffuse')
counts_map = gta.counts_map()
if weighted:
wmap = gta.weight_map()
counts_map = copy.deepcopy(counts_map)
mcube_tot = copy.deepcopy(mcube_tot)
counts_map.data *= wmap.data
mcube_tot.data *= wmap.data
mcube_diffuse.data *= wmap.data
# colors = ['k', 'b', 'g', 'r']
data_style = {'marker': 's', 'linestyle': 'None'}
fig = plt.figure(figsize=figsize)
if gta.projtype == "WCS":
xmin = -1
xmax = 1
elif gta.projtype == "HPX":
hpx2wcs = counts_map.make_wcs_mapping(proj='CAR', oversample=2)
counts_map = counts_map.to_wcs(hpx2wcs=hpx2wcs)
mcube_tot = mcube_tot.to_wcs(hpx2wcs=hpx2wcs)
mcube_diffuse = mcube_diffuse.to_wcs(hpx2wcs=hpx2wcs)
xmin = None
xmax = None
fig = plt.figure(figsize=figsize)
rp = ROIPlotter(mcube_tot, roi=gta.roi, **roi_kwargs)
rp.plot(cb_label='Counts', zscale='pow', gamma=1. / 3.)
plt.savefig(os.path.join(gta.config['fileio']['workdir'],
'%s_model_map%s.%s' % (
prefix, esuffix, fmt)))
plt.close(fig)
rp = ROIPlotter(counts_map, roi=gta.roi, **roi_kwargs)
rp.plot(cb_label='Counts', zscale='sqrt')
plt.savefig(os.path.join(gta.config['fileio']['workdir'],
'%s_counts_map%s.%s' % (
prefix, esuffix, fmt)))
plt.close(fig)
for iaxis, xlabel, psuffix in zip([0, 1],
['LON Offset [deg]', 'LAT Offset [deg]'],
['xproj', 'yproj']):
fig = plt.figure(figsize=figsize)
rp.plot_projection(iaxis, label='Data', color='k',
xmin=xmin, xmax=xmax, **data_style)
rp.plot_projection(iaxis, data=mcube_tot, label='Model', xmin=xmin, xmax=xmax,
noerror=True)
rp.plot_projection(iaxis, data=mcube_diffuse, label='Diffuse', xmin=xmin, xmax=xmax,
noerror=True)
plt.gca().set_ylabel('Counts')
plt.gca().set_xlabel(xlabel)
plt.gca().legend(frameon=False)
annotate(loge_bounds=loge_bounds)
plt.savefig(os.path.join(gta.config['fileio']['workdir'],
'%s_counts_map_%s%s.%s' % (prefix, psuffix,
esuffix, fmt)))
plt.close(fig)
def make_sed_plots(self, sed, **kwargs):
prefix = kwargs.get('prefix', '')
name = sed['name'].lower().replace(' ', '_')
fmt = kwargs.get('format', self.config['format'])
figsize = kwargs.get('figsize', self.config['figsize'])
p = SEDPlotter(sed)
fig = plt.figure(figsize=figsize)
p.plot()
outfile = utils.format_filename(self.config['fileio']['workdir'],
'sed', prefix=[prefix, name],
extension=fmt)
plt.savefig(outfile)
plt.close(fig)
fig = plt.figure(figsize=figsize)
p.plot(showlnl=True)
outfile = utils.format_filename(self.config['fileio']['workdir'],
'sedlnl', prefix=[prefix, name],
extension=fmt)
plt.savefig(outfile)
plt.close(fig)
def make_localization_plots(self, loc, roi=None, **kwargs):
fmt = kwargs.get('format', self.config['format'])
figsize = kwargs.get('figsize', self.config['figsize'])
prefix = kwargs.get('prefix', '')
skydir = kwargs.get('skydir', None)
cmap = kwargs.get('cmap', self.config['cmap'])
name = loc.get('name', '')
name = name.lower().replace(' ', '_')
tsmap = loc['tsmap']
fit_init = loc['fit_init']
tsmap_renorm = copy.deepcopy(tsmap)
tsmap_renorm.data -= np.max(tsmap_renorm.data)
skydir = loc['tsmap_peak'].geom.get_coord(flat=True)
coordsys = loc['tsmap_peak'].geom.coordsys
skydir = MapCoord.create(skydir, coordsys=coordsys).skycoord
path_effect = PathEffects.withStroke(linewidth=2.0,
foreground="black")
p = ROIPlotter(tsmap_renorm, roi=roi)
fig = plt.figure(figsize=figsize)
vmin = max(-100.0, np.min(tsmap_renorm.data))
p.plot(levels=[-200, -100, -50, -20, -9.21, -5.99, -2.3, -1.0],
cmap=cmap, vmin=vmin, colors=['k'],
interpolation='bicubic', cb_label='2$\\times\Delta\ln$L')
cdelt0 = np.abs(tsmap.geom.wcs.wcs.cdelt[0])
cdelt1 = np.abs(tsmap.geom.wcs.wcs.cdelt[1])
cdelt = [cdelt0, cdelt1]
peak_skydir = SkyCoord(fit_init['ra'], fit_init['dec'],
frame='icrs', unit='deg')
scan_skydir = SkyCoord(loc['ra'], loc['dec'],
frame='icrs', unit='deg')
peak_pix = peak_skydir.to_pixel(tsmap_renorm.geom.wcs)
scan_pix = scan_skydir.to_pixel(tsmap_renorm.geom.wcs)
if 'ra_preloc' in loc:
preloc_skydir = SkyCoord(loc['ra_preloc'], loc['dec_preloc'],
frame='icrs', unit='deg')
plot_markers(preloc_skydir.ra.deg, preloc_skydir.dec.deg,
marker='+', color='w', path_effects=[path_effect],
label='Old Position')
plot_markers(peak_skydir.ra.deg, peak_skydir.dec.deg,
marker='x', color='lime', path_effects=[path_effect])
plot_markers(scan_skydir.ra.deg, scan_skydir.dec.deg,
marker='x', color='w', path_effects=[path_effect],
label='New Position')
if skydir is not None:
pix = skydir.to_pixel(tsmap_renorm.geom.wcs)
xmin = np.min(pix[0])
ymin = np.min(pix[1])
xwidth = np.max(pix[0]) - xmin
ywidth = np.max(pix[1]) - ymin
r = Rectangle((xmin, ymin), xwidth, ywidth,
edgecolor='w', facecolor='none', linestyle='--')
plt.gca().add_patch(r)
plot_error_ellipse(fit_init, peak_pix, cdelt, edgecolor='lime',
color='lime', colname='pos_r68')
plot_error_ellipse(fit_init, peak_pix, cdelt, edgecolor='lime',
color='lime', colname='pos_r99', linestyle=':')
plot_error_ellipse(loc, scan_pix, cdelt, edgecolor='w',
color='w', colname='pos_r68', label='68% Uncertainty')
plot_error_ellipse(loc, scan_pix, cdelt, edgecolor='w',
color='w', colname='pos_r99', label='99% Uncertainty',
linestyle='--')
handles, labels = plt.gca().get_legend_handles_labels()
h0 = Line2D([], [], color='w', marker='None',
label='68% Uncertainty', linewidth=1.0)
h1 = Line2D([], [], color='w', marker='None',
label='99% Uncertainty', linewidth=1.0,
linestyle='--')
plt.legend(handles=handles + [h0, h1])
outfile = utils.format_filename(self.config['fileio']['workdir'],
'localize', prefix=[prefix, name],
extension=fmt)
plt.savefig(outfile)
plt.close(fig)
tsmap = loc['tsmap_peak']
tsmap_renorm = copy.deepcopy(tsmap)
tsmap_renorm.data -= np.max(tsmap_renorm.data)
p = ROIPlotter(tsmap_renorm, roi=roi)
fig = plt.figure(figsize=figsize)
vmin = max(-50.0, np.min(tsmap_renorm.data))
p.plot(levels=[-200, -100, -50, -20, -9.21, -5.99, -2.3, -1.0],
cmap=cmap, vmin=vmin, colors=['k'],
interpolation='bicubic', cb_label='2$\\times\Delta\ln$L')
cdelt0 = np.abs(tsmap.geom.wcs.wcs.cdelt[0])
cdelt1 = np.abs(tsmap.geom.wcs.wcs.cdelt[1])
cdelt = [cdelt0, cdelt1]
scan_pix = scan_skydir.to_pixel(tsmap_renorm.geom.wcs)
if 'ra_preloc' in loc:
preloc_skydir = SkyCoord(loc['ra_preloc'], loc['dec_preloc'],
frame='icrs', unit='deg')
plot_markers(preloc_skydir.ra.deg, preloc_skydir.dec.deg,
marker='+', color='w', path_effects=[path_effect],
label='Old Position')
plot_markers(scan_skydir.ra.deg, scan_skydir.dec.deg,
marker='x', color='w', path_effects=[path_effect],
label='New Position')
plot_error_ellipse(loc, scan_pix, cdelt, edgecolor='w',
color='w', colname='pos_r68', label='68% Uncertainty')
plot_error_ellipse(loc, scan_pix, cdelt, edgecolor='w',
color='w', colname='pos_r99', label='99% Uncertainty',
linestyle='--')
handles, labels = plt.gca().get_legend_handles_labels()
h0 = Line2D([], [], color='w', marker='None',
label='68% Uncertainty', linewidth=1.0)
h1 = Line2D([], [], color='w', marker='None',
label='99% Uncertainty', linewidth=1.0,
linestyle='--')
plt.legend(handles=handles + [h0, h1])
outfile = utils.format_filename(self.config['fileio']['workdir'],
'localize_peak', prefix=[prefix, name],
extension=fmt)
plt.savefig(outfile)
plt.close(fig)
def make_extension_plots(self, ext, roi=None, **kwargs):
if ext.get('tsmap') is not None:
self._plot_extension_tsmap(ext, roi=roi, **kwargs)
if ext.get('ebin_ts_ext') is not None:
self._plot_extension_ebin(ext, roi=roi, **kwargs)
def _plot_extension_ebin(self, ext, roi=None, **kwargs):
fmt = kwargs.get('format', self.config['format'])
figsize = kwargs.get('figsize', self.config['figsize'])
prefix = kwargs.get('prefix', '')
name = ext.get('name', '')
name = name.lower().replace(' ', '_')
m = ext['ebin_ts_ext'] > 4.0
fig = plt.figure(figsize=figsize)
ectr = ext['ebin_e_ctr']
delo = ext['ebin_e_ctr'] - ext['ebin_e_min']
dehi = ext['ebin_e_max'] - ext['ebin_e_ctr']
xerr0 = np.vstack((delo[m], dehi[m]))
xerr1 = np.vstack((delo[~m], dehi[~m]))
ax = plt.gca()
ax.errorbar(ectr[m], ext['ebin_ext'][m], xerr=xerr0,
yerr=(ext['ebin_ext_err_lo'][m],
ext['ebin_ext_err_hi'][m]),
color='k', linestyle='None', marker='o')
ax.errorbar(ectr[~m], ext['ebin_ext_ul95'][~m], xerr=xerr1,
yerr=0.2 * ext['ebin_ext_ul95'][~m], uplims=True,
color='k', linestyle='None', marker='o')
ax.set_xlabel('Energy [log$_{10}$(E/MeV)]')
ax.set_ylabel('Extension [deg]')
ax.set_xscale('log')
ax.set_yscale('log')
annotate_name(ext)
ymin = min(10**-1.5, 0.8 * ext['ext_ul95'])
ymax = max(10**-0.5, 1.2 * ext['ext_ul95'])
if np.any(np.isfinite(ext['ebin_ext_ul95'])):
ymin = min(ymin, 0.8 * np.nanmin(ext['ebin_ext_ul95']))
ymax = max(ymax, 1.2 * np.nanmax(ext['ebin_ext_ul95']))
if ext['ts_ext'] > 4.0:
plt.axhline(ext['ext'], color='k')
ext_lo = ext['ext'] - ext['ext_err_lo']
ext_hi = ext['ext'] + ext['ext_err_hi']
ax.fill_between([ext['ebin_e_min'][0], ext['ebin_e_max'][-1]],
[ext_lo, ext_lo], [ext_hi, ext_hi],
alpha=0.5, color='k', zorder=-1)
ymin = min(ymin, 0.8 * (ext['ext'] - ext['ext_err_lo']))
ymax = max(ymax, 1.2 * (ext['ext'] + ext['ext_err_hi']))
else:
plt.axhline(ext['ext_ul95'], color='k', linestyle='--')
ax.set_ylim(ymin, ymax)
ax.set_xlim(ext['ebin_e_min'][0], ext['ebin_e_max'][-1])
outfile = utils.format_filename(self.config['fileio']['workdir'],
'extension_ebin', prefix=[prefix, name],
extension=fmt)
plt.savefig(outfile)
plt.close(fig)
def _plot_extension_tsmap(self, ext, roi=None, **kwargs):
fmt = kwargs.get('format', self.config['format'])
figsize = kwargs.get('figsize', self.config['figsize'])
prefix = kwargs.get('prefix', '')
cmap = kwargs.get('cmap', self.config['cmap'])
name = ext.get('name', '')
name = name.lower().replace(' ', '_')
p = ROIPlotter(ext['tsmap'], roi=roi)
fig = plt.figure(figsize=figsize)
sigma_levels = [3, 5, 7] + list(np.logspace(1, 3, 17))
p.plot(cmap=cmap, interpolation='bicubic', levels=sigma_levels,
transform='sqrt')
c = SkyCoord(ext['ra'], ext['dec'], unit='deg')
path_effect = PathEffects.withStroke(linewidth=2.0,
foreground="black")
if ext['ts_ext'] > 9.0:
p.draw_circle(ext['ext'], skydir=c, edgecolor='lime', linestyle='-',
linewidth=1.0, label='R$_{68}$', path_effects=[path_effect])
p.draw_circle(ext['ext'] + ext['ext_err'], skydir=c, edgecolor='lime', linestyle='--',
linewidth=1.0, label='R$_{68}$ $\pm 1 \sigma$', path_effects=[path_effect])
p.draw_circle(ext['ext'] - ext['ext_err'], skydir=c, edgecolor='lime', linestyle='--',
linewidth=1.0, path_effects=[path_effect])
else:
p.draw_circle(ext['ext_ul95'], skydir=c, edgecolor='lime', linestyle='--',
linewidth=1.0, label='R$_{68}$ 95% UL',
path_effects=[path_effect])
leg = plt.gca().legend(frameon=False, loc='upper left')
for text in leg.get_texts():
text.set_color('lime')
outfile = utils.format_filename(self.config['fileio']['workdir'],
'extension', prefix=[prefix, name],
extension=fmt)
plt.savefig(outfile)
plt.close(fig)
def _plot_extension(self, gta, prefix, src, loge_bounds=None, **kwargs):
"""Utility function for generating diagnostic plots for the
extension analysis."""
# format = kwargs.get('format', self.config['plotting']['format'])
if loge_bounds is None:
loge_bounds = (self.energies[0], self.energies[-1])
name = src['name'].lower().replace(' ', '_')
esuffix = '_%.3f_%.3f' % (loge_bounds[0], loge_bounds[1])
p = ExtensionPlotter(src, self.roi, '',
self.config['fileio']['workdir'],
loge_bounds=loge_bounds)
fig = plt.figure()
p.plot(0)
plt.gca().set_xlim(-2, 2)
ROIPlotter.setup_projection_axis(0)
annotate(src=src, loge_bounds=loge_bounds)
plt.savefig(os.path.join(self.config['fileio']['workdir'],
'%s_%s_extension_xproj%s.png' % (
prefix, name, esuffix)))
plt.close(fig)
fig = plt.figure()
p.plot(1)
plt.gca().set_xlim(-2, 2)
ROIPlotter.setup_projection_axis(1)
annotate(src=src, loge_bounds=loge_bounds)
plt.savefig(os.path.join(self.config['fileio']['workdir'],
'%s_%s_extension_yproj%s.png' % (
prefix, name, esuffix)))
plt.close(fig)
for i, c in enumerate(self.components):
suffix = '_%02i' % i
p = ExtensionPlotter(src, self.roi, suffix,
self.config['fileio']['workdir'],
loge_bounds=loge_bounds)
fig = plt.figure()
p.plot(0)
ROIPlotter.setup_projection_axis(0, loge_bounds=loge_bounds)
annotate(src=src, loge_bounds=loge_bounds)
plt.gca().set_xlim(-2, 2)
plt.savefig(os.path.join(self.config['fileio']['workdir'],
'%s_%s_extension_xproj%s%s.png' % (
prefix, name, esuffix, suffix)))
plt.close(fig)
fig = plt.figure()
p.plot(1)
plt.gca().set_xlim(-2, 2)
ROIPlotter.setup_projection_axis(1, loge_bounds=loge_bounds)
annotate(src=src, loge_bounds=loge_bounds)
plt.savefig(os.path.join(self.config['fileio']['workdir'],
'%s_%s_extension_yproj%s%s.png' % (
prefix, name, esuffix, suffix)))
plt.close(fig)
| |
"""Functional tests for slice op."""
import tensorflow.python.platform
import numpy as np
import tensorflow as tf
class SliceTest(tf.test.TestCase):
def _testEmpty(self, use_gpu):
inp = np.random.rand(4, 4).astype("f")
for k in xrange(4):
with self.test_session(use_gpu=use_gpu):
a = tf.constant(inp, shape=[4, 4], dtype=tf.float32)
slice_t = a[2, k:k]
slice_val = slice_t.eval()
self.assertAllEqual(slice_val, inp[2, k:k])
def testEmptyAll(self):
self._testEmpty(use_gpu=False)
self._testEmpty(use_gpu=True)
def _testInt32(self, use_gpu):
inp = np.random.rand(4, 4).astype("i")
for k in xrange(4):
with self.test_session(use_gpu=use_gpu):
a = tf.constant(inp, shape=[4, 4], dtype=tf.int32)
slice_t = a[2, k:k]
slice_val = slice_t.eval()
self.assertAllEqual(slice_val, inp[2, k:k])
def testInt32(self):
self._testEmpty(use_gpu=False)
self._testEmpty(use_gpu=True)
def _testSelectAll(self, use_gpu):
with self.test_session(use_gpu=use_gpu):
inp = np.random.rand(4, 4, 4, 4).astype("f")
a = tf.constant(inp, shape=[4, 4, 4, 4],
dtype=tf.float32)
slice_explicit_t = tf.slice(a, [0, 0, 0, 0], [-1, -1, -1, -1])
slice_implicit_t = a[:, :, :, :]
self.assertAllEqual(inp, slice_explicit_t.eval())
self.assertAllEqual(inp, slice_implicit_t.eval())
self.assertEqual(inp.shape, slice_explicit_t.get_shape())
self.assertEqual(inp.shape, slice_implicit_t.get_shape())
def testSelectAll(self):
for _ in range(10):
self._testSelectAll(use_gpu=False)
self._testSelectAll(use_gpu=True)
def _testSingleDimension(self, use_gpu):
with self.test_session(use_gpu=use_gpu):
inp = np.random.rand(10).astype("f")
a = tf.constant(inp, shape=[10], dtype=tf.float32)
hi = np.random.random_integers(0, 9)
scalar_t = a[hi]
scalar_val = scalar_t.eval()
self.assertAllEqual(scalar_val, inp[hi])
lo = np.random.random_integers(0, hi)
slice_t = a[lo:hi]
slice_val = slice_t.eval()
self.assertAllEqual(slice_val, inp[lo:hi])
def testSingleDimension(self):
for _ in range(10):
self._testSingleDimension(use_gpu=False)
self._testSingleDimension(use_gpu=True)
def _testSliceMatrixDim0(self, x, begin, size, use_gpu):
with self.test_session(use_gpu=use_gpu):
tf_ans = tf.slice(x, [begin, 0], [size, x.shape[1]]).eval()
np_ans = x[begin:begin+size, :]
self.assertAllEqual(tf_ans, np_ans)
def testSliceMatrixDim0(self):
for use_gpu in [False, True]:
x = np.random.rand(8, 4).astype("f")
self._testSliceMatrixDim0(x, 1, 2, use_gpu)
self._testSliceMatrixDim0(x, 3, 3, use_gpu)
y = np.random.rand(8, 7).astype("f") # 7 * sizeof(float) is not aligned
self._testSliceMatrixDim0(y, 1, 2, use_gpu)
self._testSliceMatrixDim0(y, 3, 3, use_gpu)
def _testIndexAndSlice(self, use_gpu):
with self.test_session(use_gpu=use_gpu):
inp = np.random.rand(4, 4).astype("f")
a = tf.constant(inp, shape=[4, 4], dtype=tf.float32)
x, y = np.random.random_integers(0, 3, size=2).tolist()
slice_t = a[x, 0:y]
slice_val = slice_t.eval()
self.assertAllEqual(slice_val, inp[x, 0:y])
def testSingleElementAll(self):
for _ in range(10):
self._testIndexAndSlice(use_gpu=False)
self._testIndexAndSlice(use_gpu=True)
def _testSimple(self, use_gpu):
with self.test_session(use_gpu=use_gpu) as sess:
inp = np.random.rand(4, 4).astype("f")
a = tf.constant([float(x) for x in inp.ravel(order="C")],
shape=[4, 4], dtype=tf.float32)
slice_t = tf.slice(a, [0, 0], [2, 2])
slice2_t = a[:2, :2]
slice_val, slice2_val = sess.run([slice_t, slice2_t])
self.assertAllEqual(slice_val, inp[:2, :2])
self.assertAllEqual(slice2_val, inp[:2, :2])
self.assertEqual(slice_val.shape, slice_t.get_shape())
self.assertEqual(slice2_val.shape, slice2_t.get_shape())
def testSimpleAll(self):
self._testSimple(use_gpu=False)
self._testSimple(use_gpu=True)
def _testComplex(self, use_gpu):
with self.test_session(use_gpu=use_gpu):
inp = np.random.rand(4, 10, 10, 4).astype("f")
a = tf.constant(inp, dtype=tf.float32)
x = np.random.random_integers(0, 9)
z = np.random.random_integers(0, 9)
y = np.random.random_integers(0, z)
slice_t = a[:, x, y:z, :]
self.assertAllEqual(slice_t.eval(), inp[:, x, y:z, :])
def testComplex(self):
for _ in range(10):
self._testComplex(use_gpu=False)
self._testComplex(use_gpu=True)
def _RunAndVerifyResult(self, use_gpu):
# Random dims of rank 5
input_shape = np.random.randint(0, 20, size=5)
inp = np.random.rand(*input_shape).astype("f")
with self.test_session(use_gpu=use_gpu) as sess:
a = tf.constant([float(x) for x in inp.ravel(order="C")],
shape=input_shape, dtype=tf.float32)
indices = [0 if x == 0 else np.random.randint(x) for x in input_shape]
sizes = [np.random.randint(0, input_shape[i] - indices[i] + 1)
for i in range(5)]
slice_t = tf.slice(a, indices, sizes)
slice2_t = a[indices[0]:indices[0]+sizes[0],
indices[1]:indices[1]+sizes[1],
indices[2]:indices[2]+sizes[2],
indices[3]:indices[3]+sizes[3],
indices[4]:indices[4]+sizes[4]]
slice_val, slice2_val = sess.run([slice_t, slice2_t])
expected_val = inp[indices[0]:indices[0]+sizes[0],
indices[1]:indices[1]+sizes[1],
indices[2]:indices[2]+sizes[2],
indices[3]:indices[3]+sizes[3],
indices[4]:indices[4]+sizes[4]]
self.assertAllEqual(slice_val, expected_val)
self.assertAllEqual(slice2_val, expected_val)
self.assertEqual(expected_val.shape, slice_t.get_shape())
self.assertEqual(expected_val.shape, slice2_t.get_shape())
def testRandom(self):
for _ in range(10):
self._RunAndVerifyResult(use_gpu=False)
self._RunAndVerifyResult(use_gpu=True)
def _testGradientSlice(self, input_shape, slice_begin, slice_size, use_gpu):
with self.test_session(use_gpu=use_gpu):
num_inputs = np.prod(input_shape)
num_grads = np.prod(slice_size)
inp = np.random.rand(num_inputs).astype("f").reshape(input_shape)
a = tf.constant([float(x) for x in inp.ravel(order="C")],
shape=input_shape, dtype=tf.float32)
slice_t = tf.slice(a, slice_begin, slice_size)
grads = np.random.rand(num_grads).astype("f").reshape(slice_size)
grad_tensor = tf.constant(grads)
grad = tf.gradients(slice_t, [a], grad_tensor)[0]
result = grad.eval()
# Create a zero tensor of the input shape ane place
# the grads into the right location to compare against TensorFlow.
np_ans = np.zeros(input_shape)
slices = []
for i in xrange(len(input_shape)):
slices.append(slice(slice_begin[i], slice_begin[i] + slice_size[i]))
np_ans[slices] = grads
self.assertAllClose(np_ans, result)
def _testGradientVariableSize(self, use_gpu):
with self.test_session(use_gpu=use_gpu):
inp = tf.constant([1.0, 2.0, 3.0], name="in")
out = tf.slice(inp, [1], [-1])
grad_actual = tf.gradients(out, inp)[0].eval()
self.assertAllClose([0., 1., 1.], grad_actual)
def _testGradientsSimple(self, use_gpu):
# Slice the middle square out of a 4x4 input
self._testGradientSlice([4, 4], [1, 1], [2, 2], use_gpu)
# Slice the upper left square out of a 4x4 input
self._testGradientSlice([4, 4], [0, 0], [2, 2], use_gpu)
# Slice a non-square input starting from (2,1)
self._testGradientSlice([4, 4], [2, 1], [1, 2], use_gpu)
# Slice a 3D tensor
self._testGradientSlice([3, 3, 3], [0, 1, 0], [2, 1, 1], use_gpu)
# Use -1 as a slice dimension.
self._testGradientVariableSize(use_gpu)
def testGradientsAll(self):
self._testGradientsSimple(use_gpu=False)
self._testGradientsSimple(use_gpu=True)
def testNotIterable(self):
# NOTE(mrry): If we register __getitem__ as an overloaded
# operator, Python will valiantly attempt to iterate over the
# Tensor from 0 to infinity. This test ensures that this
# unintended behavior is prevented.
c = tf.constant(5.0)
with self.assertRaisesWithPredicateMatch(
TypeError,
lambda e: "'Tensor' object is not iterable" in e.message):
for _ in c:
pass
if __name__ == "__main__":
tf.test.main()
| |
from bpy import data, types
from .. import constants, logger
from .constants import MULTIPLY, WIRE, IMAGE
def _material(func):
def inner(name, *args, **kwargs):
if isinstance(name, types.Material):
material = name
else:
material = data.materials[name]
return func(material, *args, **kwargs)
return inner
@_material
def ambient_color(material):
logger.debug('material.ambient_color(%s)', material)
diffuse = diffuse_color(material)
return (material.ambient * diffuse[0],
material.ambient * diffuse[1],
material.ambient * diffuse[2])
@_material
def blending(material):
logger.debug('material.blending(%s)', material)
try:
blend = material.THREE_blending_type
except AttributeError:
logger.debug('No THREE_blending_type attribute found')
blend = constants.NORMAL_BLENDING
return blend
@_material
def bump_map(material):
logger.debug('material.bump_map(%s)', material)
for texture in _valid_textures(material):
if texture.use_map_normal and not \
texture.texture.use_normal_map:
return texture.texture
@_material
def bump_scale(material):
return normal_scale(material)
@_material
def depth_test(material):
logger.debug('material.depth_test(%s)', material)
try:
test = material.THREE_depth_test
except AttributeError:
logger.debug('No THREE_depth_test attribute found')
test = True
return test
@_material
def depth_write(material):
logger.debug('material.depth_write(%s)', material)
try:
write = material.THREE_depth_write
except AttributeError:
logger.debug('No THREE_depth_write attribute found')
write = True
return write
@_material
def diffuse_color(material):
logger.debug('material.diffuse_color(%s)', material)
return (material.diffuse_intensity * material.diffuse_color[0],
material.diffuse_intensity * material.diffuse_color[1],
material.diffuse_intensity * material.diffuse_color[2])
@_material
def diffuse_map(material):
logger.debug('material.diffuse_map(%s)', material)
for texture in _valid_textures(material):
if texture.use_map_color_diffuse and not \
texture.blend_type == MULTIPLY:
return texture.texture
@_material
def emissive_color(material):
logger.debug('material.emissive_color(%s)', material)
diffuse = diffuse_color(material)
return (material.emit * diffuse[0],
material.emit * diffuse[1],
material.emit * diffuse[2])
@_material
def light_map(material):
logger.debug('material.light_map(%s)', material)
for texture in _valid_textures(material):
if texture.use_map_color_diffuse and \
texture.blend_type == MULTIPLY:
return texture.texture
@_material
def normal_scale(material):
logger.debug('material.normal_scale(%s)', material)
for texture in _valid_textures(material):
if texture.use_map_normal:
return texture.normal_factor
@_material
def normal_map(material):
logger.debug('material.normal_map(%s)', material)
for texture in _valid_textures(material):
if texture.use_map_normal and \
texture.texture.use_normal_map:
return texture.texture
@_material
def opacity(material):
logger.debug('material.opacity(%s)', material)
return round( material.alpha, 2 );
@_material
def shading(material):
logger.debug('material.shading(%s)', material)
dispatch = {
True: constants.PHONG,
False: constants.LAMBERT
}
return dispatch[material.specular_intensity > 0.0]
@_material
def specular_coef(material):
logger.debug('material.specular_coef(%s)', material)
return material.specular_hardness
@_material
def specular_color(material):
logger.debug('material.specular_color(%s)', material)
return (material.specular_intensity * material.specular_color[0],
material.specular_intensity * material.specular_color[1],
material.specular_intensity * material.specular_color[2])
@_material
def specular_map(material):
logger.debug('material.specular_map(%s)', material)
for texture in _valid_textures(material):
if texture.use_map_specular:
return texture.texture
@_material
def transparent(material):
logger.debug('material.transparent(%s)', material)
return material.use_transparency
@_material
def type(material):
logger.debug('material.type(%s)', material)
if material.diffuse_shader != 'LAMBERT':
material_type = constants.BASIC
elif material.specular_intensity > 0:
material_type = constants.PHONG
else:
material_type = constants.LAMBERT
return material_type
@_material
def use_vertex_colors(material):
logger.debug('material.use_vertex_colors(%s)', material)
return material.use_vertex_color_paint
def used_materials():
logger.debug('material.used_materials()')
for material in data.materials:
if material.users > 0:
yield material.name
@_material
def visible(material):
logger.debug('material.visible(%s)', material)
try:
vis = material.THREE_visible
except AttributeError:
logger.debug('No THREE_visible attribute found')
vis = True
return vis
@_material
def wireframe(material):
logger.debug('material.wireframe(%s)', material)
return material.type == WIRE
def _valid_textures(material):
for texture in material.texture_slots:
if not texture: continue
if texture.texture.type != IMAGE: continue
logger.debug('Valid texture found %s', texture)
yield texture
| |
from configparser import RawConfigParser
from binascii import hexlify
from itertools import islice
import errno
import os
import shutil
import struct
import sys
from zlib import crc32
from .hashindex import NSIndex
from .helpers import Error, IntegrityError, read_msgpack, write_msgpack, unhexlify
from .locking import UpgradableLock
from .lrucache import LRUCache
MAX_OBJECT_SIZE = 20 * 1024 * 1024
MAGIC = b'BORG_SEG'
MAGIC_LEN = len(MAGIC)
TAG_PUT = 0
TAG_DELETE = 1
TAG_COMMIT = 2
class Repository:
"""Filesystem based transactional key value store
On disk layout:
dir/README
dir/config
dir/data/<X / SEGMENTS_PER_DIR>/<X>
dir/index.X
dir/hints.X
"""
DEFAULT_MAX_SEGMENT_SIZE = 5 * 1024 * 1024
DEFAULT_SEGMENTS_PER_DIR = 10000
class DoesNotExist(Error):
"""Repository {} does not exist."""
class AlreadyExists(Error):
"""Repository {} already exists."""
class InvalidRepository(Error):
"""{} is not a valid repository."""
class CheckNeeded(Error):
"""Inconsistency detected. Please run "borg check {}"."""
class ObjectNotFound(Error):
"""Object with key {} not found in repository {}."""
def __init__(self, path, create=False, exclusive=False):
self.path = os.path.abspath(path)
self.io = None
self.lock = None
self.index = None
self._active_txn = False
if create:
self.create(self.path)
self.open(self.path, exclusive)
def __del__(self):
self.close()
def __repr__(self):
return '<%s %s>' % (self.__class__.__name__, self.path)
def create(self, path):
"""Create a new empty repository at `path`
"""
if os.path.exists(path) and (not os.path.isdir(path) or os.listdir(path)):
raise self.AlreadyExists(path)
if not os.path.exists(path):
os.mkdir(path)
with open(os.path.join(path, 'README'), 'w') as fd:
fd.write('This is a Borg repository\n')
os.mkdir(os.path.join(path, 'data'))
config = RawConfigParser()
config.add_section('repository')
config.set('repository', 'version', '1')
config.set('repository', 'segments_per_dir', self.DEFAULT_SEGMENTS_PER_DIR)
config.set('repository', 'max_segment_size', self.DEFAULT_MAX_SEGMENT_SIZE)
config.set('repository', 'id', hexlify(os.urandom(32)).decode('ascii'))
self.save_config(path, config)
def save_config(self, path, config):
config_path = os.path.join(path, 'config')
with open(config_path, 'w') as fd:
config.write(fd)
def save_key(self, keydata):
assert self.config
keydata = keydata.decode('utf-8') # remote repo: msgpack issue #99, getting bytes
self.config.set('repository', 'key', keydata)
self.save_config(self.path, self.config)
def load_key(self):
keydata = self.config.get('repository', 'key')
return keydata.encode('utf-8') # remote repo: msgpack issue #99, returning bytes
def destroy(self):
"""Destroy the repository at `self.path`
"""
self.close()
os.remove(os.path.join(self.path, 'config')) # kill config first
shutil.rmtree(self.path)
def get_index_transaction_id(self):
indices = sorted((int(name[6:]) for name in os.listdir(self.path) if name.startswith('index.') and name[6:].isdigit()))
if indices:
return indices[-1]
else:
return None
def get_transaction_id(self):
index_transaction_id = self.get_index_transaction_id()
segments_transaction_id = self.io.get_segments_transaction_id()
if index_transaction_id is not None and segments_transaction_id is None:
raise self.CheckNeeded(self.path)
# Attempt to automatically rebuild index if we crashed between commit
# tag write and index save
if index_transaction_id != segments_transaction_id:
if index_transaction_id is not None and index_transaction_id > segments_transaction_id:
replay_from = None
else:
replay_from = index_transaction_id
self.replay_segments(replay_from, segments_transaction_id)
return self.get_index_transaction_id()
def open(self, path, exclusive):
self.path = path
if not os.path.isdir(path):
raise self.DoesNotExist(path)
self.lock = UpgradableLock(os.path.join(path, 'lock'), exclusive).acquire()
self.config = RawConfigParser()
self.config.read(os.path.join(self.path, 'config'))
if 'repository' not in self.config.sections() or self.config.getint('repository', 'version') != 1:
raise self.InvalidRepository(path)
self.max_segment_size = self.config.getint('repository', 'max_segment_size')
self.segments_per_dir = self.config.getint('repository', 'segments_per_dir')
self.id = unhexlify(self.config.get('repository', 'id').strip())
self.io = LoggedIO(self.path, self.max_segment_size, self.segments_per_dir)
def close(self):
if self.lock:
if self.io:
self.io.close()
self.io = None
self.lock.release()
self.lock = None
def commit(self):
"""Commit transaction
"""
self.io.write_commit()
self.compact_segments()
self.write_index()
self.rollback()
def open_index(self, transaction_id):
if transaction_id is None:
return NSIndex()
return NSIndex.read((os.path.join(self.path, 'index.%d') % transaction_id).encode('utf-8'))
def prepare_txn(self, transaction_id, do_cleanup=True):
self._active_txn = True
try:
self.lock.upgrade()
except UpgradableLock.ExclusiveLockFailed:
# if upgrading the lock to exclusive fails, we do not have an
# active transaction. this is important for "serve" mode, where
# the repository instance lives on - even if exceptions happened.
self._active_txn = False
raise
if not self.index:
self.index = self.open_index(transaction_id)
if transaction_id is None:
self.segments = {}
self.compact = set()
else:
if do_cleanup:
self.io.cleanup(transaction_id)
hints = read_msgpack(os.path.join(self.path, 'hints.%d' % transaction_id))
if hints[b'version'] != 1:
raise ValueError('Unknown hints file version: %d' % hints['version'])
self.segments = hints[b'segments']
self.compact = set(hints[b'compact'])
def write_index(self):
hints = {b'version': 1,
b'segments': self.segments,
b'compact': list(self.compact)}
transaction_id = self.io.get_segments_transaction_id()
write_msgpack(os.path.join(self.path, 'hints.%d' % transaction_id), hints)
self.index.write(os.path.join(self.path, 'index.tmp'))
os.rename(os.path.join(self.path, 'index.tmp'),
os.path.join(self.path, 'index.%d' % transaction_id))
# Remove old indices
current = '.%d' % transaction_id
for name in os.listdir(self.path):
if not name.startswith('index.') and not name.startswith('hints.'):
continue
if name.endswith(current):
continue
os.unlink(os.path.join(self.path, name))
self.index = None
def compact_segments(self):
"""Compact sparse segments by copying data into new segments
"""
if not self.compact:
return
index_transaction_id = self.get_index_transaction_id()
segments = self.segments
for segment in sorted(self.compact):
if self.io.segment_exists(segment):
for tag, key, offset, data in self.io.iter_objects(segment, include_data=True):
if tag == TAG_PUT and self.index.get(key, (-1, -1)) == (segment, offset):
new_segment, offset = self.io.write_put(key, data)
self.index[key] = new_segment, offset
segments.setdefault(new_segment, 0)
segments[new_segment] += 1
segments[segment] -= 1
elif tag == TAG_DELETE:
if index_transaction_id is None or segment > index_transaction_id:
self.io.write_delete(key)
assert segments[segment] == 0
self.io.write_commit()
for segment in sorted(self.compact):
assert self.segments.pop(segment) == 0
self.io.delete_segment(segment)
self.compact = set()
def replay_segments(self, index_transaction_id, segments_transaction_id):
self.prepare_txn(index_transaction_id, do_cleanup=False)
for segment, filename in self.io.segment_iterator():
if index_transaction_id is not None and segment <= index_transaction_id:
continue
if segment > segments_transaction_id:
break
self.segments[segment] = 0
for tag, key, offset in self.io.iter_objects(segment):
if tag == TAG_PUT:
try:
s, _ = self.index[key]
self.compact.add(s)
self.segments[s] -= 1
except KeyError:
pass
self.index[key] = segment, offset
self.segments[segment] += 1
elif tag == TAG_DELETE:
try:
s, _ = self.index.pop(key)
self.segments[s] -= 1
self.compact.add(s)
except KeyError:
pass
self.compact.add(segment)
elif tag == TAG_COMMIT:
continue
else:
raise self.CheckNeeded(self.path)
if self.segments[segment] == 0:
self.compact.add(segment)
self.write_index()
self.rollback()
def check(self, repair=False):
"""Check repository consistency
This method verifies all segment checksums and makes sure
the index is consistent with the data stored in the segments.
"""
error_found = False
def report_error(msg):
nonlocal error_found
error_found = True
print(msg, file=sys.stderr)
assert not self._active_txn
try:
transaction_id = self.get_transaction_id()
current_index = self.open_index(transaction_id)
except Exception:
transaction_id = self.io.get_segments_transaction_id()
current_index = None
if transaction_id is None:
transaction_id = self.get_index_transaction_id()
if transaction_id is None:
transaction_id = self.io.get_latest_segment()
if repair:
self.io.cleanup(transaction_id)
segments_transaction_id = self.io.get_segments_transaction_id()
self.prepare_txn(None)
for segment, filename in self.io.segment_iterator():
if segment > transaction_id:
continue
try:
objects = list(self.io.iter_objects(segment))
except IntegrityError as err:
report_error('Error reading segment {}: {}'.format(segment, err))
objects = []
if repair:
self.io.recover_segment(segment, filename)
objects = list(self.io.iter_objects(segment))
self.segments[segment] = 0
for tag, key, offset in objects:
if tag == TAG_PUT:
try:
s, _ = self.index[key]
self.compact.add(s)
self.segments[s] -= 1
except KeyError:
pass
self.index[key] = segment, offset
self.segments[segment] += 1
elif tag == TAG_DELETE:
try:
s, _ = self.index.pop(key)
self.segments[s] -= 1
self.compact.add(s)
except KeyError:
pass
self.compact.add(segment)
elif tag == TAG_COMMIT:
continue
else:
report_error('Unexpected tag {} in segment {}'.format(tag, segment))
# We might need to add a commit tag if no committed segment is found
if repair and segments_transaction_id is None:
report_error('Adding commit tag to segment {}'.format(transaction_id))
self.io.segment = transaction_id + 1
self.io.write_commit()
if current_index and not repair:
if len(current_index) != len(self.index):
report_error('Index object count mismatch. {} != {}'.format(len(current_index), len(self.index)))
elif current_index:
for key, value in self.index.iteritems():
if current_index.get(key, (-1, -1)) != value:
report_error('Index mismatch for key {}. {} != {}'.format(key, value, current_index.get(key, (-1, -1))))
if repair:
self.compact_segments()
self.write_index()
self.rollback()
return not error_found or repair
def rollback(self):
"""
"""
self.index = None
self._active_txn = False
def __len__(self):
if not self.index:
self.index = self.open_index(self.get_transaction_id())
return len(self.index)
def __contains__(self, id):
if not self.index:
self.index = self.open_index(self.get_transaction_id())
return id in self.index
def list(self, limit=None, marker=None):
if not self.index:
self.index = self.open_index(self.get_transaction_id())
return [id_ for id_, _ in islice(self.index.iteritems(marker=marker), limit)]
def get(self, id_):
if not self.index:
self.index = self.open_index(self.get_transaction_id())
try:
segment, offset = self.index[id_]
return self.io.read(segment, offset, id_)
except KeyError:
raise self.ObjectNotFound(id_, self.path)
def get_many(self, ids, is_preloaded=False):
for id_ in ids:
yield self.get(id_)
def put(self, id, data, wait=True):
if not self._active_txn:
self.prepare_txn(self.get_transaction_id())
try:
segment, _ = self.index[id]
self.segments[segment] -= 1
self.compact.add(segment)
segment = self.io.write_delete(id)
self.segments.setdefault(segment, 0)
self.compact.add(segment)
except KeyError:
pass
segment, offset = self.io.write_put(id, data)
self.segments.setdefault(segment, 0)
self.segments[segment] += 1
self.index[id] = segment, offset
def delete(self, id, wait=True):
if not self._active_txn:
self.prepare_txn(self.get_transaction_id())
try:
segment, offset = self.index.pop(id)
except KeyError:
raise self.ObjectNotFound(id, self.path)
self.segments[segment] -= 1
self.compact.add(segment)
segment = self.io.write_delete(id)
self.compact.add(segment)
self.segments.setdefault(segment, 0)
def preload(self, ids):
"""Preload objects (only applies to remote repositories)
"""
class LoggedIO:
header_fmt = struct.Struct('<IIB')
assert header_fmt.size == 9
put_header_fmt = struct.Struct('<IIB32s')
assert put_header_fmt.size == 41
header_no_crc_fmt = struct.Struct('<IB')
assert header_no_crc_fmt.size == 5
crc_fmt = struct.Struct('<I')
assert crc_fmt.size == 4
_commit = header_no_crc_fmt.pack(9, TAG_COMMIT)
COMMIT = crc_fmt.pack(crc32(_commit)) + _commit
def __init__(self, path, limit, segments_per_dir, capacity=90):
self.path = path
self.fds = LRUCache(capacity,
dispose=lambda fd: fd.close())
self.segment = 0
self.limit = limit
self.segments_per_dir = segments_per_dir
self.offset = 0
self._write_fd = None
def close(self):
self.close_segment()
self.fds.clear()
self.fds = None # Just to make sure we're disabled
def segment_iterator(self, reverse=False):
data_path = os.path.join(self.path, 'data')
dirs = sorted((dir for dir in os.listdir(data_path) if dir.isdigit()), key=int, reverse=reverse)
for dir in dirs:
filenames = os.listdir(os.path.join(data_path, dir))
sorted_filenames = sorted((filename for filename in filenames
if filename.isdigit()), key=int, reverse=reverse)
for filename in sorted_filenames:
yield int(filename), os.path.join(data_path, dir, filename)
def get_latest_segment(self):
for segment, filename in self.segment_iterator(reverse=True):
return segment
return None
def get_segments_transaction_id(self):
"""Verify that the transaction id is consistent with the index transaction id
"""
for segment, filename in self.segment_iterator(reverse=True):
if self.is_committed_segment(filename):
return segment
return None
def cleanup(self, transaction_id):
"""Delete segment files left by aborted transactions
"""
self.segment = transaction_id + 1
for segment, filename in self.segment_iterator(reverse=True):
if segment > transaction_id:
os.unlink(filename)
else:
break
def is_committed_segment(self, filename):
"""Check if segment ends with a COMMIT_TAG tag
"""
with open(filename, 'rb') as fd:
try:
fd.seek(-self.header_fmt.size, os.SEEK_END)
except OSError as e:
# return False if segment file is empty or too small
if e.errno == errno.EINVAL:
return False
raise e
return fd.read(self.header_fmt.size) == self.COMMIT
def segment_filename(self, segment):
return os.path.join(self.path, 'data', str(segment // self.segments_per_dir), str(segment))
def get_write_fd(self, no_new=False):
if not no_new and self.offset and self.offset > self.limit:
self.close_segment()
if not self._write_fd:
if self.segment % self.segments_per_dir == 0:
dirname = os.path.join(self.path, 'data', str(self.segment // self.segments_per_dir))
if not os.path.exists(dirname):
os.mkdir(dirname)
self._write_fd = open(self.segment_filename(self.segment), 'ab')
self._write_fd.write(MAGIC)
self.offset = MAGIC_LEN
return self._write_fd
def get_fd(self, segment):
try:
return self.fds[segment]
except KeyError:
fd = open(self.segment_filename(segment), 'rb')
self.fds[segment] = fd
return fd
def delete_segment(self, segment):
if segment in self.fds:
del self.fds[segment]
try:
os.unlink(self.segment_filename(segment))
except OSError:
pass
def segment_exists(self, segment):
return os.path.exists(self.segment_filename(segment))
def iter_objects(self, segment, include_data=False):
fd = self.get_fd(segment)
fd.seek(0)
if fd.read(MAGIC_LEN) != MAGIC:
raise IntegrityError('Invalid segment magic')
offset = MAGIC_LEN
header = fd.read(self.header_fmt.size)
while header:
try:
crc, size, tag = self.header_fmt.unpack(header)
except struct.error as err:
raise IntegrityError('Invalid segment entry header [offset {}]: {}'.format(offset, err))
if size > MAX_OBJECT_SIZE:
raise IntegrityError('Invalid segment entry size [offset {}]'.format(offset))
length = size - self.header_fmt.size
rest = fd.read(length)
if len(rest) != length:
raise IntegrityError('Segment entry data short read [offset {}]: expected: {}, got {} bytes'.format(
offset, length, len(rest)))
if crc32(rest, crc32(memoryview(header)[4:])) & 0xffffffff != crc:
raise IntegrityError('Segment entry checksum mismatch [offset {}]'.format(offset))
if tag not in (TAG_PUT, TAG_DELETE, TAG_COMMIT):
raise IntegrityError('Invalid segment entry tag [offset {}]'.format(offset))
key = None
if tag in (TAG_PUT, TAG_DELETE):
key = rest[:32]
if include_data:
yield tag, key, offset, rest[32:]
else:
yield tag, key, offset
offset += size
header = fd.read(self.header_fmt.size)
def recover_segment(self, segment, filename):
if segment in self.fds:
del self.fds[segment]
# FIXME: save a copy of the original file
with open(filename, 'rb') as fd:
data = memoryview(fd.read())
os.rename(filename, filename + '.beforerecover')
print('attempting to recover ' + filename, file=sys.stderr)
with open(filename, 'wb') as fd:
fd.write(MAGIC)
while len(data) >= self.header_fmt.size:
crc, size, tag = self.header_fmt.unpack(data[:self.header_fmt.size])
if size < self.header_fmt.size or size > len(data):
data = data[1:]
continue
if crc32(data[4:size]) & 0xffffffff != crc:
data = data[1:]
continue
fd.write(data[:size])
data = data[size:]
def read(self, segment, offset, id):
if segment == self.segment and self._write_fd:
self._write_fd.flush()
fd = self.get_fd(segment)
fd.seek(offset)
header = fd.read(self.put_header_fmt.size)
crc, size, tag, key = self.put_header_fmt.unpack(header)
if size > MAX_OBJECT_SIZE:
raise IntegrityError('Invalid segment object size')
data = fd.read(size - self.put_header_fmt.size)
if crc32(data, crc32(memoryview(header)[4:])) & 0xffffffff != crc:
raise IntegrityError('Segment checksum mismatch')
if tag != TAG_PUT or id != key:
raise IntegrityError('Invalid segment entry header')
return data
def write_put(self, id, data):
size = len(data) + self.put_header_fmt.size
fd = self.get_write_fd()
offset = self.offset
header = self.header_no_crc_fmt.pack(size, TAG_PUT)
crc = self.crc_fmt.pack(crc32(data, crc32(id, crc32(header))) & 0xffffffff)
fd.write(b''.join((crc, header, id, data)))
self.offset += size
return self.segment, offset
def write_delete(self, id):
fd = self.get_write_fd()
header = self.header_no_crc_fmt.pack(self.put_header_fmt.size, TAG_DELETE)
crc = self.crc_fmt.pack(crc32(id, crc32(header)) & 0xffffffff)
fd.write(b''.join((crc, header, id)))
self.offset += self.put_header_fmt.size
return self.segment
def write_commit(self):
fd = self.get_write_fd(no_new=True)
header = self.header_no_crc_fmt.pack(self.header_fmt.size, TAG_COMMIT)
crc = self.crc_fmt.pack(crc32(header) & 0xffffffff)
fd.write(b''.join((crc, header)))
self.close_segment()
def close_segment(self):
if self._write_fd:
self.segment += 1
self.offset = 0
self._write_fd.flush()
os.fsync(self._write_fd.fileno())
if hasattr(os, 'posix_fadvise'): # python >= 3.3, only on UNIX
# tell the OS that it does not need to cache what we just wrote,
# avoids spoiling the cache for the OS and other processes.
os.posix_fadvise(self._write_fd.fileno(), 0, 0, os.POSIX_FADV_DONTNEED)
self._write_fd.close()
self._write_fd = None
| |
# hw1.py
# Name: Connor Durkin
# netID : cwd28
# Date: 29 September 2015
# Class: CPSC 458
# Instructor: Prof. Stephen Slade
import random
import numpy
import json
# initialize some useful global variables
global in_play
in_play = False
global outcome
outcome = " start game"
score = 0
# define globals for cards
SUITS = ('C', 'S', 'H', 'D')
RANKS = ('A', '2', '3', '4', '5', '6', '7', '8', '9', 'T', 'J', 'Q', 'K')
VALUES = {'A':1, '2':2, '3':3, '4':4, '5':5, '6':6, '7':7, '8':8, '9':9, 'T':10, 'J':10, 'Q':10, 'K':10}
# define card class
class Card:
def __init__(self, suit, rank):
if (suit in SUITS) and (rank in RANKS):
self.suit = suit
self.rank = rank
else:
self.suit = None
self.rank = None
print "Invalid card: ", suit, rank
def __str__(self):
return self.suit + self.rank
def get_suit(self):
return self.suit
def get_rank(self):
return self.rank
# define hand class
class Hand:
def __init__(self):
self.cards = []
def __str__(self):
ans = "Hand contains "
for i in range(len(self.cards)):
ans += str(self.cards[i]) + " "
return ans
# return a string representation of a hand
def add_card(self, card):
self.cards.append(card)
# add a card object to a hand
def get_value(self):
value = 0
aces = False
for c in self.cards:
rank = c.get_rank()
v = VALUES[rank]
if rank == 'A': aces = True
value += v
if aces and value < 12: value += 10
return value
# count aces as 1, if the hand has an ace, then add 10 to hand value if it doesn't bust
# compute the value of the hand, see Blackjack video
# define deck class
class Deck:
def __init__(self):
self.deck = []
for s in SUITS:
for r in RANKS:
self.deck.append(Card(s, r))
# create a Deck object
def shuffle(self):
random.shuffle(self.deck)
# shuffle the deck
def deal_card(self):
return self.deck.pop()
# deal a card object from the deck
def __str__(self):
ans = "The deck: "
for c in self.deck:
ans += str(c) + " "
return ans
# return a string representing the deck
#define event handlers for buttons
def deal():
global outcome, in_play, theDeck, playerhand, househand, score
if in_play:
outcome = "House winds by default!"
score -= 1
else:
outcome = "Hit or stand?"
in_play = True
theDeck = Deck()
theDeck.shuffle()
#print theDeck
playerhand = Hand()
househand = Hand()
playerhand.add_card(theDeck.deal_card())
playerhand.add_card(theDeck.deal_card())
househand.add_card(theDeck.deal_card())
househand.add_card(theDeck.deal_card())
#print "Player", playerhand, "Value:", playerhand.get_value()
#print "House", househand, "Value:", househand.get_value()
#print theDeck
def hit():
global in_play, score, outcome
if in_play:
playerhand.add_card(theDeck.deal_card())
val = playerhand.get_value()
#print "Player", playerhand, "Value:", val
if val > 21:
outcome = "You are busted! House wins!"
in_play = False
score -= 1
#print outcome, "Score:", score
# if the hand is in play, hit the player
# if busted, assign a message to outcome, update in_play and score
def stand():
global score, in_play, outcome
if playerhand.get_value() > 21:
outcome = "You are busted."
return None
if not in_play:
outcome = "Game is over."
return None
val = househand.get_value()
while(val < 17):
househand.add_card(theDeck.deal_card())
val = househand.get_value()
#print "House:", househand, "Value:", val
if (val > 21):
# print "House is busted!"
if playerhand.get_value() > 21:
outcome = "House is busted, but House wins tie game!"
score -= 1
else:
outcome = "House is busted! Player wins!"
score += 1
else:
if (val == playerhand.get_value()):
outcome = "House wins ties!"
score -= 1
elif (val > playerhand.get_value()):
outcome = "House wins!"
score -= 1
else:
outcome = "Player wins!"
score += 1
in_play = False
#print outcome, "Score:", score
# if hand is in play, repeatedly hit dealer until his hand has value 17 or more
# assign a message to outcome, update in_play and score
# sim
# performs Monte Carlo simulation to generate transcript
def sim(trials):
transcript = {}
for dealer_face_score in range(1,11):
for player_hand_value in range(1,22):
matrix_key = '{0}{1}'.format(player_hand_value,dealer_face_score)
transcript[matrix_key] = 0.0
for i in range(trials):
s = score
bust = False
deal()
matrix_key = '{0}{1}'.format(playerhand.get_value(),VALUES[househand.cards[0].get_rank()])
while not bust:
hit()
if (score-s) >= 0:
try:
transcript[matrix_key] = transcript[matrix_key] + [1]
except:
transcript[matrix_key] = [1]
else:
try:
transcript[matrix_key] = transcript[matrix_key] + [0]
except:
transcript[matrix_key] = [0]
bust = True
matrix_key = '{0}{1}'.format(playerhand.get_value(),VALUES[househand.cards[0].get_rank()])
# Convert created dictionary to boolean lookup table
transcript.update({n: (numpy.mean(transcript[n])) for n in transcript.keys()})
json.dump(transcript, open("transcript",'w'))
# hitme
# performs lookup function to transcript
def hitme(player_hand,dealerfacecard):
transcript = json.load(open("transcript","r"))
matrix_key = '{0}{1}'.format(player_hand,dealerfacecard)
hit = (transcript[matrix_key] > .5)
return hit
# play
# plays blackjack many times using the hitme function to determine whether or
# not to hit and returns win ratio
wins = []
def play(trials):
global in_play, score
score = 0
in_play = False
for i in range(trials):
deal()
s = score
while in_play:
player_hand = playerhand.get_value()
dealerfacecard = VALUES[househand.cards[0].get_rank()]
if hitme(player_hand,dealerfacecard):
hit()
else:
stand()
if (score-s) > 0:
wins.append(1)
else:
wins.append(0)
print numpy.mean(wins)
return numpy.mean(wins)
| |
"""JSON API implementation for aiohttp."""
import inspect
from collections import MutableMapping, Sequence
__author__ = """Vladimir Bolshakov"""
__email__ = 'vovanbo@gmail.com'
__version__ = '0.37.0'
def setup_app_registry(app, registry_class, config):
"""Set up JSON API application registry."""
from .common import ALLOWED_MEMBER_NAME_REGEX, logger, JSONAPI
from .registry import Registry
from .abc.schema import SchemaABC
from .abc.contoller import ControllerABC
if registry_class is not None:
if not issubclass(registry_class, Registry):
raise TypeError(f'Subclass of Registry is required. '
f'Got: {registry_class}')
else:
registry_class = Registry
app_registry = registry_class()
for schema_cls, controller_cls in config.items():
resource_type = schema_cls.opts.resource_type
resource_cls = schema_cls.opts.resource_cls
if not inspect.isclass(controller_cls):
raise TypeError('Class (not instance) of controller is required.')
if not issubclass(controller_cls, ControllerABC):
raise TypeError(f'Subclass of ControllerABC is required. '
f'Got: {controller_cls}')
if not inspect.isclass(schema_cls):
raise TypeError('Class (not instance) of schema is required.')
if not issubclass(schema_cls, SchemaABC):
raise TypeError(f'Subclass of SchemaABC is required. '
f'Got: {schema_cls}')
if not inspect.isclass(schema_cls.opts.resource_cls):
raise TypeError('Class (not instance) of resource is required.')
if not ALLOWED_MEMBER_NAME_REGEX.fullmatch(resource_type):
raise ValueError(f"Resource type '{resource_type}' is not allowed.")
app_registry[resource_type] = schema_cls, controller_cls
app_registry[resource_cls] = schema_cls, controller_cls
logger.debug(
'Registered %r '
'(schema: %r, resource class: %r, type %r)',
controller_cls.__name__, schema_cls.__name__,
resource_cls.__name__, resource_type
)
return app_registry
def setup_custom_handlers(custom_handlers):
"""Set up default and custom handlers for JSON API application."""
from . import handlers as default_handlers
from .common import logger
handlers = {
name: handler
for name, handler in inspect.getmembers(default_handlers,
inspect.iscoroutinefunction)
if name in default_handlers.__all__
}
if custom_handlers is not None:
if isinstance(custom_handlers, MutableMapping):
custom_handlers_iter = custom_handlers.items()
elif isinstance(custom_handlers, Sequence):
custom_handlers_iter = ((c.__name__, c) for c in custom_handlers)
else:
raise TypeError('Wrong type of "custom_handlers" parameter. '
'Mapping or Sequence is expected.')
for name, custom_handler in custom_handlers_iter:
handler_name = custom_handler.__name__
if name not in handlers:
logger.warning('Custom handler %s is ignored.', name)
continue
if not inspect.iscoroutinefunction(custom_handler):
logger.error('"%s" is not a co-routine function (ignored).',
handler_name)
continue
handlers[name] = custom_handler
logger.debug('Default handler "%s" is replaced '
'with co-routine "%s" (%s)',
name, handler_name, inspect.getmodule(custom_handler))
return handlers
def setup_resources(app, base_path, handlers, routes_namespace):
"""Set up JSON API application resources."""
from .common import ALLOWED_MEMBER_NAME_RULE
type_part = '{type:' + ALLOWED_MEMBER_NAME_RULE + '}'
relation_part = '{relation:' + ALLOWED_MEMBER_NAME_RULE + '}'
collection_resource = app.router.add_resource(
f'{base_path}/{type_part}',
name=f'{routes_namespace}.collection'
)
resource_resource = app.router.add_resource(
f'{base_path}/{type_part}/{{id}}',
name=f'{routes_namespace}.resource'
)
relationships_resource = app.router.add_resource(
f'{base_path}/{type_part}/{{id}}/relationships/{relation_part}',
name=f'{routes_namespace}.relationships'
)
related_resource = app.router.add_resource(
f'{base_path}/{type_part}/{{id}}/{relation_part}',
name=f'{routes_namespace}.related'
)
collection_resource.add_route('GET', handlers['get_collection'])
collection_resource.add_route('POST', handlers['post_resource'])
resource_resource.add_route('GET', handlers['get_resource'])
resource_resource.add_route('PATCH', handlers['patch_resource'])
resource_resource.add_route('DELETE', handlers['delete_resource'])
relationships_resource.add_route('GET', handlers['get_relationship'])
relationships_resource.add_route('POST', handlers['post_relationship'])
relationships_resource.add_route('PATCH', handlers['patch_relationship'])
relationships_resource.add_route('DELETE', handlers['delete_relationship'])
related_resource.add_route('GET', handlers['get_related'])
def setup_jsonapi(app, config, *, base_path='/api', version='1.0',
meta=None, context_cls=None, registry_class=None,
custom_handlers=None, log_errors=True,
routes_namespace=None):
"""
Set up JSON API in aiohttp application.
This function will setup resources, handlers and middleware.
:param ~aiohttp.web.Application app:
Application instance
:param ~typing.Sequence[DefaultController] controllers:
List of controllers to register in JSON API
:param str base_path:
Prefix of JSON API routes paths
:param str version:
JSON API version (used in ``jsonapi`` key of document)
:param dict meta:
Meta information will added to response (``meta`` key of document)
:param context_cls:
Override of JSONAPIContext class
(must be subclass of :class:`~aiohttp_json_api.context.JSONAPIContext`)
:param registry_class:
Override of Registry class
(must be subclass of :class:`~aiohttp_json_api.registry.Registry`)
:param custom_handlers:
Sequence or mapping with overrides of default JSON API handlers.
If your custom handlers named in conform with convention
of this application, then pass it as sequence::
custom_handlers=(get_collection, patch_resource)
If you have custom name of these handlers, then pass it as mapping::
custom_handlers={
'get_collection': some_handler_for_get_collection,
'patch_resource': another_handler_to_patch_resource
}
:param bool log_errors:
Log errors handled by
:func:`~aiohttp_json_api.middleware.jsonapi_middleware`
:param str routes_namespace:
Namespace of JSON API application routes
:return:
aiohttp Application instance with configured JSON API
:rtype: ~aiohttp.web.Application
"""
from .common import JSONAPI, logger
from .context import JSONAPIContext
from .middleware import jsonapi_middleware
if JSONAPI in app:
logger.warning('JSON API application is initialized already. '
'Please check your aiohttp.web.Application instance '
'does not have a "%s" dictionary key.', JSONAPI)
logger.error('Initialization of JSON API application is FAILED.')
return app
routes_namespace = routes_namespace \
if routes_namespace and isinstance(routes_namespace, str) \
else JSONAPI
if context_cls is not None:
if not issubclass(context_cls, JSONAPIContext):
raise TypeError(f'Subclass of JSONAPIContext is required. '
f'Got: {context_cls}')
else:
context_cls = JSONAPIContext
app[JSONAPI] = {
'registry': setup_app_registry(app, registry_class, config),
'context_cls': context_cls,
'meta': meta,
'jsonapi': {
'version': version,
},
'log_errors': log_errors,
'routes_namespace': routes_namespace
}
handlers = setup_custom_handlers(custom_handlers)
setup_resources(app, base_path, handlers, routes_namespace)
logger.debug('Registered JSON API resources list:')
for resource in filter(lambda r: r.name.startswith(routes_namespace),
app.router.resources()):
logger.debug('%s -> %s',
[r.method for r in resource], resource.get_info())
app.middlewares.append(jsonapi_middleware)
return app
| |
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Vispy Development Team.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
from __future__ import division
import re
from ... import gloo
class Compiler(object):
"""
Compiler is used to convert Function and Variable instances into
ready-to-use GLSL code. This class handles name mangling to ensure that
there are no name collisions amongst global objects. The final name of
each object may be retrieved using ``Compiler.__getitem__(obj)``.
Accepts multiple root Functions as keyword arguments. ``compile()`` then
returns a dict of GLSL strings with the same keys.
Example::
# initialize with two main functions
compiler = Compiler(vert=v_func, frag=f_func)
# compile and extract shaders
code = compiler.compile()
v_code = code['vert']
f_code = code['frag']
# look up name of some object
name = compiler[obj]
"""
def __init__(self, namespace=None, **shaders):
# cache of compilation results for each function and variable
if namespace is None:
namespace = {}
self._object_names = namespace # {object: name}
self.shaders = shaders
def __getitem__(self, item):
"""
Return the name of the specified object, if it has been assigned one.
"""
return self._object_names[item]
def compile(self, pretty=True):
""" Compile all code and return a dict {name: code} where the keys
are determined by the keyword arguments passed to __init__().
Parameters
----------
pretty : bool
If True, use a slower method to mangle object names. This produces
GLSL that is more readable.
If False, then the output is mostly unreadable GLSL, but is about
10x faster to compile.
"""
# Authoritative mapping of {obj: name}
self._object_names = {}
#
# 1. collect list of dependencies for each shader
#
# maps {shader_name: [deps]}
self._shader_deps = {}
for shader_name, shader in self.shaders.items():
this_shader_deps = []
self._shader_deps[shader_name] = this_shader_deps
dep_set = set()
for dep in shader.dependencies(sort=True):
# visit each object no more than once per shader
if dep.name is None or dep in dep_set:
continue
this_shader_deps.append(dep)
dep_set.add(dep)
#
# 2. Assign names to all objects.
#
if pretty:
self._rename_objects_pretty()
else:
self._rename_objects_fast()
#
# 3. Now we have a complete namespace; concatenate all definitions
# together in topological order.
#
compiled = {}
obj_names = self._object_names
for shader_name, shader in self.shaders.items():
code = []
for dep in self._shader_deps[shader_name]:
dep_code = dep.definition(obj_names)
if dep_code is not None:
# strip out version pragma if present;
regex = r'#version (\d+)'
m = re.search(regex, dep_code)
if m is not None:
# check requested version
if m.group(1) != '120':
raise RuntimeError("Currently only GLSL #version "
"120 is supported.")
dep_code = re.sub(regex, '', dep_code)
code.append(dep_code)
compiled[shader_name] = '\n'.join(code)
self.code = compiled
return compiled
def _rename_objects_fast(self):
""" Rename all objects quickly to guaranteed-unique names using the
id() of each object.
This produces mostly unreadable GLSL, but is about 10x faster to
compile.
"""
for shader_name, deps in self._shader_deps.items():
for dep in deps:
name = dep.name
if name != 'main':
ext = '_%x' % id(dep)
name = name[:32-len(ext)] + ext
self._object_names[dep] = name
def _rename_objects_pretty(self):
""" Rename all objects like "name_1" to avoid conflicts. Objects are
only renamed if necessary.
This method produces more readable GLSL, but is rather slow.
"""
#
# 1. For each object, add its static names to the global namespace
# and make a list of the shaders used by the object.
#
# {name: obj} mapping for finding unique names
# initialize with reserved keywords.
self._global_ns = dict([(kwd, None) for kwd in gloo.util.KEYWORDS])
# functions are local per-shader
self._shader_ns = dict([(shader, {}) for shader in self.shaders])
# for each object, keep a list of shaders the object appears in
obj_shaders = {}
for shader_name, deps in self._shader_deps.items():
for dep in deps:
# Add static names to namespace
for name in dep.static_names():
self._global_ns[name] = None
obj_shaders.setdefault(dep, []).append(shader_name)
#
# 2. Assign new object names
#
name_index = {}
for obj, shaders in obj_shaders.items():
name = obj.name
if self._name_available(obj, name, shaders):
# hooray, we get to keep this name
self._assign_name(obj, name, shaders)
else:
# boo, find a new name
while True:
index = name_index.get(name, 0) + 1
name_index[name] = index
ext = '_%d' % index
new_name = name[:32-len(ext)] + ext
if self._name_available(obj, new_name, shaders):
self._assign_name(obj, new_name, shaders)
break
def _is_global(self, obj):
""" Return True if *obj* should be declared in the global namespace.
Some objects need to be declared only in per-shader namespaces:
functions, static variables, and const variables may all be given
different definitions in each shader.
"""
# todo: right now we assume all Variables are global, and all
# Functions are local. Is this actually correct? Are there any
# global functions? Are there any local variables?
from .variable import Variable
return isinstance(obj, Variable)
def _name_available(self, obj, name, shaders):
""" Return True if *name* is available for *obj* in *shaders*.
"""
if name in self._global_ns:
return False
shaders = self.shaders if self._is_global(obj) else shaders
for shader in shaders:
if name in self._shader_ns[shader]:
return False
return True
def _assign_name(self, obj, name, shaders):
""" Assign *name* to *obj* in *shaders*.
"""
if self._is_global(obj):
assert name not in self._global_ns
self._global_ns[name] = obj
else:
for shader in shaders:
ns = self._shader_ns[shader]
assert name not in ns
ns[name] = obj
self._object_names[obj] = name
| |
"""Authentication views"""
from urllib.parse import quote
import requests
from django.conf import settings
from django.core import mail as django_mail
from django.contrib.auth import get_user_model, update_session_auth_hash
from django.shortcuts import render, redirect
from social_core.backends.email import EmailAuth
from social_django.models import UserSocialAuth
from social_django.utils import load_backend
from rest_framework import status
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework.decorators import api_view, permission_classes, action
from rest_framework.permissions import IsAuthenticated
from rest_framework_jwt.settings import api_settings
from anymail.message import AnymailMessage
from djoser.views import UserViewSet
from djoser.utils import ActionViewMixin
from djoser.email import PasswordResetEmail as DjoserPasswordResetEmail
from authentication.serializers import (
LoginEmailSerializer,
LoginPasswordSerializer,
RegisterEmailSerializer,
RegisterConfirmSerializer,
RegisterDetailsSerializer,
)
from authentication.utils import load_drf_strategy
from mail.api import render_email_templates, send_messages
User = get_user_model()
class SocialAuthAPIView(APIView):
"""API view for social auth endpoints"""
authentication_classes = []
permission_classes = []
def get_serializer_cls(self): # pragma: no cover
"""Return the serializer cls"""
raise NotImplementedError("get_serializer_cls must be implemented")
def post(self, request):
"""Processes a request"""
if request.session.get("is_hijacked_user", False):
return Response(status=status.HTTP_403_FORBIDDEN)
serializer_cls = self.get_serializer_cls()
strategy = load_drf_strategy(request)
backend = load_backend(strategy, EmailAuth.name, None)
serializer = serializer_cls(
data=request.data,
context={"request": request, "strategy": strategy, "backend": backend},
)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class LoginEmailView(SocialAuthAPIView):
"""Email login view"""
def get_serializer_cls(self):
"""Return the serializer cls"""
return LoginEmailSerializer
class LoginPasswordView(SocialAuthAPIView):
"""Email login view"""
def get_serializer_cls(self):
"""Return the serializer cls"""
return LoginPasswordSerializer
class RegisterEmailView(SocialAuthAPIView):
"""Email register view"""
def get_serializer_cls(self):
"""Return the serializer cls"""
return RegisterEmailSerializer
def post(self, request):
""" Verify recaptcha response before proceeding """
if request.session.get("is_hijacked_user", False):
return Response(status=status.HTTP_403_FORBIDDEN)
if settings.RECAPTCHA_SITE_KEY:
r = requests.post(
"https://www.google.com/recaptcha/api/siteverify?secret={key}&response={captcha}".format(
key=quote(settings.RECAPTCHA_SECRET_KEY),
captcha=quote(request.data["recaptcha"]),
)
)
response = r.json()
if not response["success"]:
return Response(response, status=status.HTTP_400_BAD_REQUEST)
return super().post(request)
class RegisterConfirmView(SocialAuthAPIView):
"""Email registration confirmation view"""
def get_serializer_cls(self):
"""Return the serializer cls"""
return RegisterConfirmSerializer
class RegisterDetailsView(SocialAuthAPIView):
"""Email registration details view"""
def get_serializer_cls(self):
"""Return the serializer cls"""
return RegisterDetailsSerializer
@api_view(["GET"])
@permission_classes([IsAuthenticated])
def get_social_auth_types(request):
"""
View that returns a serialized list of the logged-in user's UserSocialAuth types
"""
social_auths = (
UserSocialAuth.objects.filter(user=request.user).values("provider").distinct()
)
return Response(data=social_auths, status=status.HTTP_200_OK)
def login_complete(request, **kwargs): # pylint: disable=unused-argument
"""View that completes the login"""
# redirect to home
response = redirect("/")
if request.session.get("is_hijacked_user", False):
return response
if api_settings.JWT_AUTH_COOKIE in request.COOKIES:
# to clear a cookie, it's most reliable to set it to expire immediately
response.set_cookie(
api_settings.JWT_AUTH_COOKIE,
domain=settings.OPEN_DISCUSSIONS_COOKIE_DOMAIN,
httponly=True,
max_age=0,
)
return response
def confirmation_sent(request, **kwargs): # pylint: disable=unused-argument
"""The confirmation of an email being sent"""
return render(request, "confirmation_sent.html")
class CustomPasswordResetEmail(DjoserPasswordResetEmail):
"""Custom class to modify base functionality in Djoser's PasswordResetEmail class"""
def send(self, to, *args, **kwargs):
"""
Overrides djoser.email.PasswordResetEmail#send to use our mail API.
"""
context = self.get_context_data()
context.update(self.context)
with django_mail.get_connection(
settings.NOTIFICATION_EMAIL_BACKEND
) as connection:
subject, text_body, html_body = render_email_templates(
"password_reset", context
)
msg = AnymailMessage(
subject=subject,
body=text_body,
to=to,
from_email=settings.MAILGUN_FROM_EMAIL,
connection=connection,
)
msg.attach_alternative(html_body, "text/html")
send_messages([msg])
def get_context_data(self):
"""Adds base_url to the template context"""
context = super().get_context_data()
context["base_url"] = settings.SITE_BASE_URL
return context
class CustomDjoserAPIView(UserViewSet, ActionViewMixin):
"""
Overrides post methods of a Djoser view and adds one extra piece of logic:
In version 0.30.0, the fetch function in redux-hammock does not handle responses
with empty response data. Djoser returns 204's with empty response data, so we are
coercing that to a 200 with an empty dict as the response data. This can be removed
when redux-hammock is changed to support 204's.
"""
def post(
self, request, **kwargs
): # pylint: disable=missing-docstring,arguments-differ
response = super().post(request)
if response.status_code == status.HTTP_204_NO_CONTENT:
return Response({}, status=status.HTTP_200_OK)
return response
@action(["post"], detail=False)
def reset_password(self, request, *args, **kwargs):
response = super().reset_password(request, *args, **kwargs)
# See class docstring for explanation
if response.status_code == status.HTTP_204_NO_CONTENT:
return Response({}, status=status.HTTP_200_OK)
return response
@action(["post"], detail=False)
def reset_password_confirm(self, request, *args, **kwargs):
response = super().reset_password_confirm(request, *args, **kwargs)
# See class docstring for explanation
if response.status_code == status.HTTP_204_NO_CONTENT:
return Response({}, status=status.HTTP_200_OK)
return response
@action(["post"], detail=False)
def set_password(self, request, *args, **kwargs):
"""
Overrides CustomDjoserAPIView.post to update the session after a successful
password change. Without this explicit refresh, the user's session would be
invalid and they would be logged out.
"""
response = super().set_password(request, *args, **kwargs)
if response.status_code in (status.HTTP_200_OK, status.HTTP_204_NO_CONTENT):
update_session_auth_hash(self.request, self.request.user)
return Response({}, status=status.HTTP_200_OK)
return response
| |
# Copyright 2015 DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
import unittest2 as unittest
except ImportError:
import unittest # noqa
from datetime import datetime, timedelta, time
from decimal import Decimal
from uuid import uuid1, uuid4, UUID
import six
from cassandra.cqlengine import columns
from cassandra.cqlengine.management import sync_table
from cassandra.cqlengine.management import drop_table
from cassandra.cqlengine.models import Model
from cassandra.util import Date, Time
from tests.integration import PROTOCOL_VERSION
from tests.integration.cqlengine.base import BaseCassEngTestCase
class BaseColumnIOTest(BaseCassEngTestCase):
"""
Tests that values are come out of cassandra in the format we expect
To test a column type, subclass this test, define the column, and the primary key
and data values you want to test
"""
# The generated test model is assigned here
_generated_model = None
# the column we want to test
column = None
# the values we want to test against, you can
# use a single value, or multiple comma separated values
pkey_val = None
data_val = None
@classmethod
def setUpClass(cls):
super(BaseColumnIOTest, cls).setUpClass()
# if the test column hasn't been defined, bail out
if not cls.column:
return
# create a table with the given column
class IOTestModel(Model):
table_name = cls.column.db_type + "_io_test_model_{0}".format(uuid4().hex[:8])
pkey = cls.column(primary_key=True)
data = cls.column()
cls._generated_model = IOTestModel
sync_table(cls._generated_model)
# tupleify the tested values
if not isinstance(cls.pkey_val, tuple):
cls.pkey_val = cls.pkey_val,
if not isinstance(cls.data_val, tuple):
cls.data_val = cls.data_val,
@classmethod
def tearDownClass(cls):
super(BaseColumnIOTest, cls).tearDownClass()
if not cls.column:
return
drop_table(cls._generated_model)
def comparator_converter(self, val):
""" If you want to convert the original value used to compare the model vales """
return val
def test_column_io(self):
""" Tests the given models class creates and retrieves values as expected """
if not self.column:
return
for pkey, data in zip(self.pkey_val, self.data_val):
# create
m1 = self._generated_model.create(pkey=pkey, data=data)
# get
m2 = self._generated_model.get(pkey=pkey)
assert m1.pkey == m2.pkey == self.comparator_converter(pkey), self.column
assert m1.data == m2.data == self.comparator_converter(data), self.column
# delete
self._generated_model.filter(pkey=pkey).delete()
class TestBlobIO(BaseColumnIOTest):
column = columns.Blob
pkey_val = six.b('blake'), uuid4().bytes
data_val = six.b('eggleston'), uuid4().bytes
class TestBlobIO2(BaseColumnIOTest):
column = columns.Blob
pkey_val = bytearray(six.b('blake')), uuid4().bytes
data_val = bytearray(six.b('eggleston')), uuid4().bytes
class TestTextIO(BaseColumnIOTest):
column = columns.Text
pkey_val = 'bacon'
data_val = 'monkey'
class TestNonBinaryTextIO(BaseColumnIOTest):
column = columns.Text
pkey_val = 'bacon'
data_val = '0xmonkey'
class TestInteger(BaseColumnIOTest):
column = columns.Integer
pkey_val = 5
data_val = 6
class TestBigInt(BaseColumnIOTest):
column = columns.BigInt
pkey_val = 6
data_val = pow(2, 63) - 1
class TestDateTime(BaseColumnIOTest):
column = columns.DateTime
now = datetime(*datetime.now().timetuple()[:6])
pkey_val = now
data_val = now + timedelta(days=1)
class TestUUID(BaseColumnIOTest):
column = columns.UUID
pkey_val = str(uuid4()), uuid4()
data_val = str(uuid4()), uuid4()
def comparator_converter(self, val):
return val if isinstance(val, UUID) else UUID(val)
class TestTimeUUID(BaseColumnIOTest):
column = columns.TimeUUID
pkey_val = str(uuid1()), uuid1()
data_val = str(uuid1()), uuid1()
def comparator_converter(self, val):
return val if isinstance(val, UUID) else UUID(val)
# until Floats are implicitly single:
class FloatSingle(columns.Float):
def __init__(self, **kwargs):
super(FloatSingle, self).__init__(double_precision=False, **kwargs)
class TestFloatIO(BaseColumnIOTest):
column = FloatSingle
pkey_val = 4.75
data_val = -1.5
class TestDoubleIO(BaseColumnIOTest):
column = columns.Double
pkey_val = 3.14
data_val = -1982.11
class TestDecimalIO(BaseColumnIOTest):
column = columns.Decimal
pkey_val = Decimal('1.35'), 5, '2.4'
data_val = Decimal('0.005'), 3.5, '8'
def comparator_converter(self, val):
return Decimal(repr(val) if isinstance(val, float) else val)
class ProtocolV4Test(BaseColumnIOTest):
@classmethod
def setUpClass(cls):
if PROTOCOL_VERSION >= 4:
super(ProtocolV4Test, cls).setUpClass()
@classmethod
def tearDownClass(cls):
if PROTOCOL_VERSION >= 4:
super(ProtocolV4Test, cls).tearDownClass()
class TestDate(ProtocolV4Test):
def setUp(self):
if PROTOCOL_VERSION < 4:
raise unittest.SkipTest("Protocol v4 datatypes require native protocol 4+, currently using: {0}".format(PROTOCOL_VERSION))
super(TestDate, self).setUp()
column = columns.Date
now = Date(datetime.now().date())
pkey_val = now
data_val = Date(now.days_from_epoch + 1)
class TestTime(ProtocolV4Test):
def setUp(self):
if PROTOCOL_VERSION < 4:
raise unittest.SkipTest("Protocol v4 datatypes require native protocol 4+, currently using: {0}".format(PROTOCOL_VERSION))
super(TestTime, self).setUp()
column = columns.Time
pkey_val = Time(time(2, 12, 7, 48))
data_val = Time(time(16, 47, 25, 7))
class TestSmallInt(ProtocolV4Test):
def setUp(self):
if PROTOCOL_VERSION < 4:
raise unittest.SkipTest("Protocol v4 datatypes require native protocol 4+, currently using: {0}".format(PROTOCOL_VERSION))
super(TestSmallInt, self).setUp()
column = columns.SmallInt
pkey_val = 16768
data_val = 32523
class TestTinyInt(ProtocolV4Test):
def setUp(self):
if PROTOCOL_VERSION < 4:
raise unittest.SkipTest("Protocol v4 datatypes require native protocol 4+, currently using: {0}".format(PROTOCOL_VERSION))
super(TestTinyInt, self).setUp()
column = columns.TinyInt
pkey_val = 1
data_val = 123
| |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import sys
from copy import deepcopy
from datetime import datetime
from pathlib import Path
from typing import Dict, List, Optional, Tuple, Type
from unittest import mock
from urllib.parse import ParseResult, urlparse
import pytest
import yaml
from _pytest._code import ExceptionInfo
from botocore.exceptions import ClientError
from freezegun import freeze_time
from moto.core import ACCOUNT_ID
from moto.core.exceptions import AWSError
from moto.eks.exceptions import (
InvalidParameterException,
InvalidRequestException,
ResourceInUseException,
ResourceNotFoundException,
)
from moto.eks.models import (
CLUSTER_EXISTS_MSG,
CLUSTER_IN_USE_MSG,
CLUSTER_NOT_FOUND_MSG,
CLUSTER_NOT_READY_MSG,
FARGATE_PROFILE_EXISTS_MSG,
FARGATE_PROFILE_NEEDS_SELECTOR_MSG,
FARGATE_PROFILE_NOT_FOUND_MSG,
FARGATE_PROFILE_SELECTOR_NEEDS_NAMESPACE,
FARGATE_PROFILE_TOO_MANY_LABELS,
LAUNCH_TEMPLATE_WITH_DISK_SIZE_MSG,
LAUNCH_TEMPLATE_WITH_REMOTE_ACCESS_MSG,
NODEGROUP_EXISTS_MSG,
NODEGROUP_NOT_FOUND_MSG,
)
from airflow.providers.amazon.aws.hooks.eks import EksHook
from ..utils.eks_test_constants import (
DEFAULT_CONN_ID,
DEFAULT_NAMESPACE,
DISK_SIZE,
FROZEN_TIME,
INSTANCE_TYPES,
LAUNCH_TEMPLATE,
MAX_FARGATE_LABELS,
NODEGROUP_OWNERSHIP_TAG_DEFAULT_VALUE,
NODEGROUP_OWNERSHIP_TAG_KEY,
NON_EXISTING_CLUSTER_NAME,
NON_EXISTING_FARGATE_PROFILE_NAME,
NON_EXISTING_NODEGROUP_NAME,
PACKAGE_NOT_PRESENT_MSG,
PARTITION,
POD_EXECUTION_ROLE_ARN,
REGION,
REMOTE_ACCESS,
BatchCountSize,
ClusterAttributes,
ClusterInputs,
ErrorAttributes,
FargateProfileAttributes,
FargateProfileInputs,
NodegroupAttributes,
NodegroupInputs,
PossibleTestResults,
RegExTemplates,
ResponseAttributes,
)
from ..utils.eks_test_utils import (
attributes_to_test,
generate_clusters,
generate_dict,
generate_fargate_profiles,
generate_nodegroups,
iso_date,
region_matches_partition,
)
try:
from moto import mock_eks
except ImportError:
mock_eks = None
@pytest.fixture(scope="function")
def cluster_builder():
"""A fixture to generate a batch of EKS Clusters on the mocked backend for testing."""
class ClusterTestDataFactory:
"""A Factory class for building the Cluster objects."""
def __init__(self, count: int, minimal: bool) -> None:
# Generate 'count' number of Cluster objects.
self.cluster_names: List[str] = generate_clusters(
eks_hook=eks_hook, num_clusters=count, minimal=minimal
)
self.existing_cluster_name: str = self.cluster_names[0]
self.nonexistent_cluster_name: str = NON_EXISTING_CLUSTER_NAME
# Collect the output of describe_cluster() for the first Cluster.
self.cluster_describe_output: Dict = eks_hook.describe_cluster(name=self.existing_cluster_name)[
ResponseAttributes.CLUSTER
]
# Generate a list of the Cluster attributes to be tested when validating results.
self.attributes_to_test: List[Tuple] = attributes_to_test(
inputs=ClusterInputs, cluster_name=self.existing_cluster_name
)
def _execute(count: int = 1, minimal: bool = True) -> Tuple[EksHook, ClusterTestDataFactory]:
return eks_hook, ClusterTestDataFactory(count=count, minimal=minimal)
mock_eks().start()
eks_hook = EksHook(
aws_conn_id=DEFAULT_CONN_ID,
region_name=REGION,
)
yield _execute
mock_eks().stop()
@pytest.fixture(scope="function")
def fargate_profile_builder(cluster_builder):
"""A fixture to generate a batch of EKS Fargate profiles on the mocked backend for testing."""
class FargateProfileTestDataFactory:
"""A Factory class for building the Fargate profile objects."""
def __init__(self, count: int, minimal: bool) -> None:
self.cluster_name = cluster.existing_cluster_name
# Generate 'count' number of FargateProfile objects.
self.fargate_profile_names = generate_fargate_profiles(
eks_hook=eks_hook,
cluster_name=self.cluster_name,
num_profiles=count,
minimal=minimal,
)
# Get the name of the first generated profile.
self.existing_fargate_profile_name: str = self.fargate_profile_names[0]
self.nonexistent_fargate_profile_name: str = NON_EXISTING_FARGATE_PROFILE_NAME
self.nonexistent_cluster_name: str = NON_EXISTING_CLUSTER_NAME
# Collect the output of describe_fargate_profiles() for the first profile.
self.fargate_describe_output: Dict = eks_hook.describe_fargate_profile(
clusterName=self.cluster_name, fargateProfileName=self.existing_fargate_profile_name
)[ResponseAttributes.FARGATE_PROFILE]
# Generate a list of the Fargate Profile attributes to be tested when validating results.
self.attributes_to_test: List[Tuple] = attributes_to_test(
inputs=FargateProfileInputs,
cluster_name=self.cluster_name,
fargate_profile_name=self.existing_fargate_profile_name,
)
def _execute(count: int = 1, minimal: bool = True) -> Tuple[EksHook, FargateProfileTestDataFactory]:
return eks_hook, FargateProfileTestDataFactory(count=count, minimal=minimal)
eks_hook, cluster = cluster_builder()
return _execute
@pytest.fixture(scope="function")
def nodegroup_builder(cluster_builder):
"""A fixture to generate a batch of EKS Managed Nodegroups on the mocked backend for testing."""
class NodegroupTestDataFactory:
"""A Factory class for building the Nodegroup objects."""
def __init__(self, count: int, minimal: bool) -> None:
self.cluster_name: str = cluster.existing_cluster_name
# Generate 'count' number of Nodegroup objects.
self.nodegroup_names: List[str] = generate_nodegroups(
eks_hook=eks_hook,
cluster_name=self.cluster_name,
num_nodegroups=count,
minimal=minimal,
)
# Get the name of the first generated Nodegroup.
self.existing_nodegroup_name: str = self.nodegroup_names[0]
self.nonexistent_nodegroup_name: str = NON_EXISTING_NODEGROUP_NAME
self.nonexistent_cluster_name: str = NON_EXISTING_CLUSTER_NAME
# Collect the output of describe_nodegroup() for the first Nodegroup.
self.nodegroup_describe_output: Dict = eks_hook.describe_nodegroup(
clusterName=self.cluster_name, nodegroupName=self.existing_nodegroup_name
)[ResponseAttributes.NODEGROUP]
# Generate a list of the Nodegroup attributes to be tested when validating results.
self.attributes_to_test: List[Tuple] = attributes_to_test(
inputs=NodegroupInputs,
cluster_name=self.cluster_name,
nodegroup_name=self.existing_nodegroup_name,
)
def _execute(count: int = 1, minimal: bool = True) -> Tuple[EksHook, NodegroupTestDataFactory]:
return eks_hook, NodegroupTestDataFactory(count=count, minimal=minimal)
eks_hook, cluster = cluster_builder()
return _execute
@pytest.mark.skipif(mock_eks is None, reason=PACKAGE_NOT_PRESENT_MSG)
class TestEksHooks:
def test_hook(self, cluster_builder) -> None:
eks_hook, _ = cluster_builder()
assert eks_hook.get_conn() is not None
assert eks_hook.aws_conn_id == DEFAULT_CONN_ID
assert eks_hook.region_name == REGION
###
# This specific test does not use the fixture since
# it is intended to verify that there are no clusters
# in the list at initialization, which means the mock
# decorator must be used manually in this one case.
###
@mock_eks
def test_list_clusters_returns_empty_by_default(self) -> None:
eks_hook: EksHook = EksHook(aws_conn_id=DEFAULT_CONN_ID, region_name=REGION)
result: List = eks_hook.list_clusters()
assert isinstance(result, list)
assert len(result) == 0
def test_list_clusters_returns_sorted_cluster_names(
self, cluster_builder, initial_batch_size: int = BatchCountSize.SMALL
) -> None:
eks_hook, generated_test_data = cluster_builder(count=initial_batch_size)
expected_result: List = sorted(generated_test_data.cluster_names)
result: List = eks_hook.list_clusters()
assert_result_matches_expected_list(result, expected_result, initial_batch_size)
def test_list_clusters_returns_all_results(
self, cluster_builder, initial_batch_size: int = BatchCountSize.LARGE
) -> None:
eks_hook, generated_test_data = cluster_builder(count=initial_batch_size)
expected_result: List = sorted(generated_test_data.cluster_names)
result: List = eks_hook.list_clusters()
assert_result_matches_expected_list(result, expected_result)
def test_create_cluster_throws_exception_when_cluster_exists(
self, cluster_builder, initial_batch_size: int = BatchCountSize.SMALL
) -> None:
eks_hook, generated_test_data = cluster_builder(count=initial_batch_size)
expected_exception: Type[AWSError] = ResourceInUseException
expected_msg: str = CLUSTER_EXISTS_MSG.format(
clusterName=generated_test_data.existing_cluster_name,
)
with pytest.raises(ClientError) as raised_exception:
eks_hook.create_cluster(
name=generated_test_data.existing_cluster_name, **dict(ClusterInputs.REQUIRED) # type: ignore
)
assert_client_error_exception_thrown(
expected_exception=expected_exception,
expected_msg=expected_msg,
raised_exception=raised_exception,
)
# Verify no new cluster was created.
len_after_test: int = len(eks_hook.list_clusters())
assert len_after_test == initial_batch_size
def test_create_cluster_generates_valid_cluster_arn(self, cluster_builder) -> None:
_, generated_test_data = cluster_builder()
expected_arn_values: List = [
PARTITION,
REGION,
ACCOUNT_ID,
generated_test_data.cluster_names,
]
assert_all_arn_values_are_valid(
expected_arn_values=expected_arn_values,
pattern=RegExTemplates.CLUSTER_ARN,
arn_under_test=generated_test_data.cluster_describe_output[ClusterAttributes.ARN],
)
@freeze_time(FROZEN_TIME)
def test_create_cluster_generates_valid_cluster_created_timestamp(self, cluster_builder) -> None:
_, generated_test_data = cluster_builder()
result_time: datetime = generated_test_data.cluster_describe_output[ClusterAttributes.CREATED_AT]
assert iso_date(result_time) == FROZEN_TIME
def test_create_cluster_generates_valid_cluster_endpoint(self, cluster_builder) -> None:
_, generated_test_data = cluster_builder()
result_endpoint: str = generated_test_data.cluster_describe_output[ClusterAttributes.ENDPOINT]
assert_is_valid_uri(result_endpoint)
def test_create_cluster_generates_valid_oidc_identity(self, cluster_builder) -> None:
_, generated_test_data = cluster_builder()
result_issuer: str = generated_test_data.cluster_describe_output[ClusterAttributes.IDENTITY][
ClusterAttributes.OIDC
][ClusterAttributes.ISSUER]
assert_is_valid_uri(result_issuer)
def test_create_cluster_saves_provided_parameters(self, cluster_builder) -> None:
_, generated_test_data = cluster_builder(minimal=False)
for key, expected_value in generated_test_data.attributes_to_test:
assert generated_test_data.cluster_describe_output[key] == expected_value
def test_describe_cluster_throws_exception_when_cluster_not_found(
self, cluster_builder, initial_batch_size: int = BatchCountSize.SMALL
) -> None:
eks_hook, generated_test_data = cluster_builder(count=initial_batch_size)
expected_exception: Type[AWSError] = ResourceNotFoundException
expected_msg = CLUSTER_NOT_FOUND_MSG.format(
clusterName=generated_test_data.nonexistent_cluster_name,
)
with pytest.raises(ClientError) as raised_exception:
eks_hook.describe_cluster(name=generated_test_data.nonexistent_cluster_name)
assert_client_error_exception_thrown(
expected_exception=expected_exception,
expected_msg=expected_msg,
raised_exception=raised_exception,
)
def test_delete_cluster_returns_deleted_cluster(
self, cluster_builder, initial_batch_size: int = BatchCountSize.SMALL
) -> None:
eks_hook, generated_test_data = cluster_builder(count=initial_batch_size, minimal=False)
result: Dict = eks_hook.delete_cluster(name=generated_test_data.existing_cluster_name)[
ResponseAttributes.CLUSTER
]
for key, expected_value in generated_test_data.attributes_to_test:
assert result[key] == expected_value
def test_delete_cluster_removes_deleted_cluster(
self, cluster_builder, initial_batch_size: int = BatchCountSize.SMALL
) -> None:
eks_hook, generated_test_data = cluster_builder(count=initial_batch_size, minimal=False)
eks_hook.delete_cluster(name=generated_test_data.existing_cluster_name)
result_cluster_list: List = eks_hook.list_clusters()
assert len(result_cluster_list) == (initial_batch_size - 1)
assert generated_test_data.existing_cluster_name not in result_cluster_list
def test_delete_cluster_throws_exception_when_cluster_not_found(
self, cluster_builder, initial_batch_size: int = BatchCountSize.SMALL
) -> None:
eks_hook, generated_test_data = cluster_builder(count=initial_batch_size)
expected_exception: Type[AWSError] = ResourceNotFoundException
expected_msg: str = CLUSTER_NOT_FOUND_MSG.format(
clusterName=generated_test_data.nonexistent_cluster_name,
)
with pytest.raises(ClientError) as raised_exception:
eks_hook.delete_cluster(name=generated_test_data.nonexistent_cluster_name)
assert_client_error_exception_thrown(
expected_exception=expected_exception,
expected_msg=expected_msg,
raised_exception=raised_exception,
)
# Verify nothing was deleted.
cluster_count_after_test: int = len(eks_hook.list_clusters())
assert cluster_count_after_test == initial_batch_size
def test_list_nodegroups_returns_empty_by_default(self, cluster_builder) -> None:
eks_hook, generated_test_data = cluster_builder()
result: List = eks_hook.list_nodegroups(clusterName=generated_test_data.existing_cluster_name)
assert isinstance(result, list)
assert len(result) == 0
def test_list_nodegroups_returns_sorted_nodegroup_names(
self, nodegroup_builder, initial_batch_size: int = BatchCountSize.SMALL
) -> None:
eks_hook, generated_test_data = nodegroup_builder(count=initial_batch_size)
expected_result: List = sorted(generated_test_data.nodegroup_names)
result: List = eks_hook.list_nodegroups(clusterName=generated_test_data.cluster_name)
assert_result_matches_expected_list(result, expected_result, initial_batch_size)
def test_list_nodegroups_returns_all_results(
self, nodegroup_builder, initial_batch_size: int = BatchCountSize.LARGE
) -> None:
eks_hook, generated_test_data = nodegroup_builder(count=initial_batch_size)
expected_result: List = sorted(generated_test_data.nodegroup_names)
result: List = eks_hook.list_nodegroups(clusterName=generated_test_data.cluster_name)
assert_result_matches_expected_list(result, expected_result)
@mock_eks
def test_create_nodegroup_throws_exception_when_cluster_not_found(self) -> None:
eks_hook: EksHook = EksHook(aws_conn_id=DEFAULT_CONN_ID, region_name=REGION)
non_existent_cluster_name: str = NON_EXISTING_CLUSTER_NAME
non_existent_nodegroup_name: str = NON_EXISTING_NODEGROUP_NAME
expected_exception: Type[AWSError] = ResourceNotFoundException
expected_msg: str = CLUSTER_NOT_FOUND_MSG.format(
clusterName=non_existent_cluster_name,
)
with pytest.raises(ClientError) as raised_exception:
eks_hook.create_nodegroup(
clusterName=non_existent_cluster_name,
nodegroupName=non_existent_nodegroup_name,
**dict(NodegroupInputs.REQUIRED), # type: ignore
)
assert_client_error_exception_thrown(
expected_exception=expected_exception,
expected_msg=expected_msg,
raised_exception=raised_exception,
)
def test_create_nodegroup_throws_exception_when_nodegroup_already_exists(
self, nodegroup_builder, initial_batch_size: int = BatchCountSize.SMALL
) -> None:
eks_hook, generated_test_data = nodegroup_builder(count=initial_batch_size)
expected_exception: Type[AWSError] = ResourceInUseException
expected_msg: str = NODEGROUP_EXISTS_MSG.format(
clusterName=generated_test_data.cluster_name,
nodegroupName=generated_test_data.existing_nodegroup_name,
)
with pytest.raises(ClientError) as raised_exception:
eks_hook.create_nodegroup(
clusterName=generated_test_data.cluster_name,
nodegroupName=generated_test_data.existing_nodegroup_name,
**dict(NodegroupInputs.REQUIRED), # type: ignore
)
assert_client_error_exception_thrown(
expected_exception=expected_exception,
expected_msg=expected_msg,
raised_exception=raised_exception,
)
# Verify no new nodegroup was created.
nodegroup_count_after_test = len(
eks_hook.list_nodegroups(clusterName=generated_test_data.cluster_name)
)
assert nodegroup_count_after_test == initial_batch_size
def test_create_nodegroup_throws_exception_when_cluster_not_active(
self, nodegroup_builder, initial_batch_size: int = BatchCountSize.SMALL
) -> None:
eks_hook, generated_test_data = nodegroup_builder(count=initial_batch_size)
non_existent_nodegroup_name: str = NON_EXISTING_NODEGROUP_NAME
expected_exception: Type[AWSError] = InvalidRequestException
expected_msg: str = CLUSTER_NOT_READY_MSG.format(
clusterName=generated_test_data.cluster_name,
)
with mock.patch("moto.eks.models.Cluster.isActive", return_value=False):
with pytest.raises(ClientError) as raised_exception:
eks_hook.create_nodegroup(
clusterName=generated_test_data.cluster_name,
nodegroupName=non_existent_nodegroup_name,
**dict(NodegroupInputs.REQUIRED), # type: ignore
)
assert_client_error_exception_thrown(
expected_exception=expected_exception,
expected_msg=expected_msg,
raised_exception=raised_exception,
)
# Verify no new nodegroup was created.
nodegroup_count_after_test = len(
eks_hook.list_nodegroups(clusterName=generated_test_data.cluster_name)
)
assert nodegroup_count_after_test == initial_batch_size
def test_create_nodegroup_generates_valid_nodegroup_arn(self, nodegroup_builder) -> None:
_, generated_test_data = nodegroup_builder()
expected_arn_values: List = [
PARTITION,
REGION,
ACCOUNT_ID,
generated_test_data.cluster_name,
generated_test_data.nodegroup_names,
None,
]
assert_all_arn_values_are_valid(
expected_arn_values=expected_arn_values,
pattern=RegExTemplates.NODEGROUP_ARN,
arn_under_test=generated_test_data.nodegroup_describe_output[NodegroupAttributes.ARN],
)
@freeze_time(FROZEN_TIME)
def test_create_nodegroup_generates_valid_nodegroup_created_timestamp(self, nodegroup_builder) -> None:
_, generated_test_data = nodegroup_builder()
result_time: datetime = generated_test_data.nodegroup_describe_output[NodegroupAttributes.CREATED_AT]
assert iso_date(result_time) == FROZEN_TIME
@freeze_time(FROZEN_TIME)
def test_create_nodegroup_generates_valid_nodegroup_modified_timestamp(self, nodegroup_builder) -> None:
_, generated_test_data = nodegroup_builder()
result_time: datetime = generated_test_data.nodegroup_describe_output[NodegroupAttributes.MODIFIED_AT]
assert iso_date(result_time) == FROZEN_TIME
def test_create_nodegroup_generates_valid_autoscaling_group_name(self, nodegroup_builder) -> None:
_, generated_test_data = nodegroup_builder()
result_resources: Dict = generated_test_data.nodegroup_describe_output[NodegroupAttributes.RESOURCES]
result_asg_name: str = result_resources[NodegroupAttributes.AUTOSCALING_GROUPS][0][
NodegroupAttributes.NAME
]
assert RegExTemplates.NODEGROUP_ASG_NAME_PATTERN.match(result_asg_name)
def test_create_nodegroup_generates_valid_security_group_name(self, nodegroup_builder) -> None:
_, generated_test_data = nodegroup_builder()
result_resources: Dict = generated_test_data.nodegroup_describe_output[NodegroupAttributes.RESOURCES]
result_security_group: str = result_resources[NodegroupAttributes.REMOTE_ACCESS_SG]
assert RegExTemplates.NODEGROUP_SECURITY_GROUP_NAME_PATTERN.match(result_security_group)
def test_create_nodegroup_saves_provided_parameters(self, nodegroup_builder) -> None:
_, generated_test_data = nodegroup_builder(minimal=False)
for key, expected_value in generated_test_data.attributes_to_test:
assert generated_test_data.nodegroup_describe_output[key] == expected_value
def test_create_nodegroup_without_tags_uses_default(self, nodegroup_builder) -> None:
_, generated_test_data = nodegroup_builder()
tag_list: Dict = generated_test_data.nodegroup_describe_output[NodegroupAttributes.TAGS]
ownership_tag_key: str = NODEGROUP_OWNERSHIP_TAG_KEY.format(
cluster_name=generated_test_data.cluster_name
)
assert tag_list.get(ownership_tag_key) == NODEGROUP_OWNERSHIP_TAG_DEFAULT_VALUE
def test_create_nodegroup_with_ownership_tag_uses_provided_value(self, cluster_builder) -> None:
eks_hook, generated_test_data = cluster_builder()
cluster_name: str = generated_test_data.existing_cluster_name
ownership_tag_key: str = NODEGROUP_OWNERSHIP_TAG_KEY.format(cluster_name=cluster_name)
provided_tag_value: str = "shared"
created_nodegroup: Dict = eks_hook.create_nodegroup(
clusterName=cluster_name,
nodegroupName="nodegroup",
tags={ownership_tag_key: provided_tag_value},
**dict(deepcopy(NodegroupInputs.REQUIRED)),
)[ResponseAttributes.NODEGROUP]
tags = created_nodegroup.get(NodegroupAttributes.TAGS)
assert tags is not None
assert tags.get(ownership_tag_key) == provided_tag_value
def test_describe_nodegroup_throws_exception_when_cluster_not_found(self, nodegroup_builder) -> None:
eks_hook, generated_test_data = nodegroup_builder()
expected_exception: Type[AWSError] = ResourceNotFoundException
expected_msg: str = CLUSTER_NOT_FOUND_MSG.format(
clusterName=generated_test_data.nonexistent_cluster_name,
)
with pytest.raises(ClientError) as raised_exception:
eks_hook.describe_nodegroup(
clusterName=generated_test_data.nonexistent_cluster_name,
nodegroupName=generated_test_data.existing_nodegroup_name,
)
assert_client_error_exception_thrown(
expected_exception=expected_exception,
expected_msg=expected_msg,
raised_exception=raised_exception,
)
def test_describe_nodegroup_throws_exception_when_nodegroup_not_found(self, nodegroup_builder) -> None:
eks_hook, generated_test_data = nodegroup_builder()
expected_exception: Type[AWSError] = ResourceNotFoundException
expected_msg: str = NODEGROUP_NOT_FOUND_MSG.format(
nodegroupName=generated_test_data.nonexistent_nodegroup_name,
)
with pytest.raises(ClientError) as raised_exception:
eks_hook.describe_nodegroup(
clusterName=generated_test_data.cluster_name,
nodegroupName=generated_test_data.nonexistent_nodegroup_name,
)
assert_client_error_exception_thrown(
expected_exception=expected_exception,
expected_msg=expected_msg,
raised_exception=raised_exception,
)
def test_delete_cluster_throws_exception_when_nodegroups_exist(self, nodegroup_builder) -> None:
eks_hook, generated_test_data = nodegroup_builder()
expected_exception: Type[AWSError] = ResourceInUseException
expected_msg: str = CLUSTER_IN_USE_MSG
with pytest.raises(ClientError) as raised_exception:
eks_hook.delete_cluster(name=generated_test_data.cluster_name)
assert_client_error_exception_thrown(
expected_exception=expected_exception,
expected_msg=expected_msg,
raised_exception=raised_exception,
)
# Verify no clusters were deleted.
cluster_count_after_test: int = len(eks_hook.list_clusters())
assert cluster_count_after_test == BatchCountSize.SINGLE
def test_delete_nodegroup_removes_deleted_nodegroup(
self, nodegroup_builder, initial_batch_size: int = BatchCountSize.SMALL
) -> None:
eks_hook, generated_test_data = nodegroup_builder(count=initial_batch_size)
eks_hook.delete_nodegroup(
clusterName=generated_test_data.cluster_name,
nodegroupName=generated_test_data.existing_nodegroup_name,
)
result_nodegroup_list: List = eks_hook.list_nodegroups(clusterName=generated_test_data.cluster_name)
assert len(result_nodegroup_list) == (initial_batch_size - 1)
assert generated_test_data.existing_nodegroup_name not in result_nodegroup_list
def test_delete_nodegroup_returns_deleted_nodegroup(
self, nodegroup_builder, initial_batch_size: int = BatchCountSize.SMALL
) -> None:
eks_hook, generated_test_data = nodegroup_builder(count=initial_batch_size, minimal=False)
result: Dict = eks_hook.delete_nodegroup(
clusterName=generated_test_data.cluster_name,
nodegroupName=generated_test_data.existing_nodegroup_name,
)[ResponseAttributes.NODEGROUP]
for key, expected_value in generated_test_data.attributes_to_test:
assert result[key] == expected_value
def test_delete_nodegroup_throws_exception_when_cluster_not_found(self, nodegroup_builder) -> None:
eks_hook, generated_test_data = nodegroup_builder()
expected_exception: Type[AWSError] = ResourceNotFoundException
expected_msg: str = CLUSTER_NOT_FOUND_MSG.format(
clusterName=generated_test_data.nonexistent_cluster_name,
)
with pytest.raises(ClientError) as raised_exception:
eks_hook.delete_nodegroup(
clusterName=generated_test_data.nonexistent_cluster_name,
nodegroupName=generated_test_data.existing_nodegroup_name,
)
assert_client_error_exception_thrown(
expected_exception=expected_exception,
expected_msg=expected_msg,
raised_exception=raised_exception,
)
def test_delete_nodegroup_throws_exception_when_nodegroup_not_found(
self, nodegroup_builder, initial_batch_size: int = BatchCountSize.SMALL
) -> None:
eks_hook, generated_test_data = nodegroup_builder(count=initial_batch_size)
expected_exception: Type[AWSError] = ResourceNotFoundException
expected_msg: str = NODEGROUP_NOT_FOUND_MSG.format(
nodegroupName=generated_test_data.nonexistent_nodegroup_name,
)
with pytest.raises(ClientError) as raised_exception:
eks_hook.delete_nodegroup(
clusterName=generated_test_data.cluster_name,
nodegroupName=generated_test_data.nonexistent_nodegroup_name,
)
assert_client_error_exception_thrown(
expected_exception=expected_exception,
expected_msg=expected_msg,
raised_exception=raised_exception,
)
# Verify no new nodegroup was created.
nodegroup_count_after_test: int = len(
eks_hook.list_nodegroups(clusterName=generated_test_data.cluster_name)
)
assert nodegroup_count_after_test == initial_batch_size
# If launch_template is specified, you can not specify instanceTypes, diskSize, or remoteAccess.
test_cases = [
# Happy Paths
(LAUNCH_TEMPLATE, None, None, None, PossibleTestResults.SUCCESS),
(None, INSTANCE_TYPES, DISK_SIZE, REMOTE_ACCESS, PossibleTestResults.SUCCESS),
(None, None, DISK_SIZE, REMOTE_ACCESS, PossibleTestResults.SUCCESS),
(None, INSTANCE_TYPES, None, REMOTE_ACCESS, PossibleTestResults.SUCCESS),
(None, INSTANCE_TYPES, DISK_SIZE, None, PossibleTestResults.SUCCESS),
(None, INSTANCE_TYPES, None, None, PossibleTestResults.SUCCESS),
(None, None, DISK_SIZE, None, PossibleTestResults.SUCCESS),
(None, None, None, REMOTE_ACCESS, PossibleTestResults.SUCCESS),
(None, None, None, None, PossibleTestResults.SUCCESS),
# Unhappy Paths
(LAUNCH_TEMPLATE, INSTANCE_TYPES, None, None, PossibleTestResults.FAILURE),
(LAUNCH_TEMPLATE, None, DISK_SIZE, None, PossibleTestResults.FAILURE),
(LAUNCH_TEMPLATE, None, None, REMOTE_ACCESS, PossibleTestResults.FAILURE),
(LAUNCH_TEMPLATE, INSTANCE_TYPES, DISK_SIZE, None, PossibleTestResults.FAILURE),
(LAUNCH_TEMPLATE, INSTANCE_TYPES, None, REMOTE_ACCESS, PossibleTestResults.FAILURE),
(LAUNCH_TEMPLATE, None, DISK_SIZE, REMOTE_ACCESS, PossibleTestResults.FAILURE),
(LAUNCH_TEMPLATE, INSTANCE_TYPES, DISK_SIZE, REMOTE_ACCESS, PossibleTestResults.FAILURE),
]
@pytest.mark.parametrize(
"launch_template, instance_types, disk_size, remote_access, expected_result",
test_cases,
)
def test_create_nodegroup_handles_launch_template_combinations(
self,
cluster_builder,
launch_template,
instance_types,
disk_size,
remote_access,
expected_result,
):
eks_hook, generated_test_data = cluster_builder()
nodegroup_name: str = NON_EXISTING_NODEGROUP_NAME
expected_exception: Type[AWSError] = InvalidParameterException
expected_message: str = ""
test_inputs = dict(
deepcopy(
# Required Constants
NodegroupInputs.REQUIRED
# Required Variables
+ [
(
ClusterAttributes.CLUSTER_NAME,
generated_test_data.existing_cluster_name,
),
(NodegroupAttributes.NODEGROUP_NAME, nodegroup_name),
]
# Test Case Values
+ [_ for _ in [launch_template, instance_types, disk_size, remote_access] if _]
)
)
if expected_result == PossibleTestResults.SUCCESS:
result: Dict = eks_hook.create_nodegroup(**test_inputs)[ResponseAttributes.NODEGROUP]
expected_output = deepcopy(test_inputs)
# The Create Nodegroup hook magically adds the required
# cluster/owned tag, so add that to the expected outputs.
expected_output['tags'] = {
f'kubernetes.io/cluster/{generated_test_data.existing_cluster_name}': 'owned'
}
for key, expected_value in expected_output.items():
assert result[key] == expected_value
else:
if launch_template and disk_size:
expected_message = LAUNCH_TEMPLATE_WITH_DISK_SIZE_MSG
elif launch_template and remote_access:
expected_message = LAUNCH_TEMPLATE_WITH_REMOTE_ACCESS_MSG
# Docs say this combination throws an exception but testing shows that
# instanceTypes overrides the launchTemplate instance values instead.
# Leaving here for easier correction if/when that gets fixed.
elif launch_template and instance_types:
pass
if expected_message:
with pytest.raises(ClientError) as raised_exception:
eks_hook.create_nodegroup(**test_inputs)
assert_client_error_exception_thrown(
expected_exception=expected_exception,
expected_msg=expected_message,
raised_exception=raised_exception,
)
def test_list_fargate_profiles_returns_empty_by_default(self, cluster_builder) -> None:
eks_hook, generated_test_data = cluster_builder()
result: List = eks_hook.list_fargate_profiles(clusterName=generated_test_data.existing_cluster_name)
assert isinstance(result, list)
assert len(result) == 0
def test_list_fargate_profiles_returns_sorted_profile_names(
self, fargate_profile_builder, initial_batch_size: int = BatchCountSize.SMALL
) -> None:
eks_hook, generated_test_data = fargate_profile_builder(count=initial_batch_size)
expected_result: List = sorted(generated_test_data.fargate_profile_names)
result: List = eks_hook.list_fargate_profiles(clusterName=generated_test_data.cluster_name)
assert_result_matches_expected_list(result, expected_result, initial_batch_size)
def test_list_fargate_profiles_returns_all_results(
self, fargate_profile_builder, initial_batch_size: int = BatchCountSize.LARGE
) -> None:
eks_hook, generated_test_data = fargate_profile_builder(count=initial_batch_size)
expected_result: List = sorted(generated_test_data.fargate_profile_names)
result: List = eks_hook.list_fargate_profiles(clusterName=generated_test_data.cluster_name)
assert_result_matches_expected_list(result, expected_result)
@mock_eks
def test_create_fargate_profile_throws_exception_when_cluster_not_found(self) -> None:
eks_hook: EksHook = EksHook(aws_conn_id=DEFAULT_CONN_ID, region_name=REGION)
non_existent_cluster_name: str = NON_EXISTING_CLUSTER_NAME
non_existent_fargate_profile_name: str = NON_EXISTING_FARGATE_PROFILE_NAME
expected_exception: Type[AWSError] = ResourceNotFoundException
expected_msg: str = CLUSTER_NOT_FOUND_MSG.format(clusterName=non_existent_cluster_name)
with pytest.raises(ClientError) as raised_exception:
eks_hook.create_fargate_profile(
clusterName=non_existent_cluster_name,
fargateProfileName=non_existent_fargate_profile_name,
**dict(FargateProfileInputs.REQUIRED), # type: ignore
)
assert_client_error_exception_thrown(
expected_exception=expected_exception,
expected_msg=expected_msg,
raised_exception=raised_exception,
)
def test_create_fargate_profile_throws_exception_when_fargate_profile_already_exists(
self, fargate_profile_builder, initial_batch_size: int = BatchCountSize.SMALL
) -> None:
eks_hook, generated_test_data = fargate_profile_builder(count=initial_batch_size)
expected_exception: Type[AWSError] = ResourceInUseException
expected_msg: str = FARGATE_PROFILE_EXISTS_MSG
with pytest.raises(ClientError) as raised_exception:
eks_hook.create_fargate_profile(
clusterName=generated_test_data.cluster_name,
fargateProfileName=generated_test_data.existing_fargate_profile_name,
**dict(FargateProfileInputs.REQUIRED), # type: ignore
)
assert_client_error_exception_thrown(
expected_exception=expected_exception,
expected_msg=expected_msg,
raised_exception=raised_exception,
)
# Verify no new Fargate profile was created.
fargate_profile_count_after_test: int = len(
eks_hook.list_fargate_profiles(clusterName=generated_test_data.cluster_name)
)
assert fargate_profile_count_after_test == initial_batch_size
def test_create_fargate_profile_throws_exception_when_cluster_not_active(
self, fargate_profile_builder, initial_batch_size: int = BatchCountSize.SMALL
) -> None:
eks_hook, generated_test_data = fargate_profile_builder(count=initial_batch_size)
non_existent_fargate_profile_name: str = NON_EXISTING_FARGATE_PROFILE_NAME
expected_exception: Type[AWSError] = InvalidRequestException
expected_msg: str = CLUSTER_NOT_READY_MSG.format(
clusterName=generated_test_data.cluster_name,
)
with mock.patch("moto.eks.models.Cluster.isActive", return_value=False):
with pytest.raises(ClientError) as raised_exception:
eks_hook.create_fargate_profile(
clusterName=generated_test_data.cluster_name,
fargateProfileName=non_existent_fargate_profile_name,
**dict(FargateProfileInputs.REQUIRED), # type: ignore
)
assert_client_error_exception_thrown(
expected_exception=expected_exception,
expected_msg=expected_msg,
raised_exception=raised_exception,
)
# Verify no new Fargate profile was created.
fargate_profile_count_after_test: int = len(
eks_hook.list_fargate_profiles(clusterName=generated_test_data.cluster_name)
)
assert fargate_profile_count_after_test == initial_batch_size
def test_create_fargate_profile_generates_valid_profile_arn(self, fargate_profile_builder) -> None:
_, generated_test_data = fargate_profile_builder()
expected_arn_values: List = [
PARTITION,
REGION,
ACCOUNT_ID,
generated_test_data.cluster_name,
generated_test_data.fargate_profile_names,
None,
]
assert_all_arn_values_are_valid(
expected_arn_values=expected_arn_values,
pattern=RegExTemplates.FARGATE_PROFILE_ARN,
arn_under_test=generated_test_data.fargate_describe_output[FargateProfileAttributes.ARN],
)
@freeze_time(FROZEN_TIME)
def test_create_fargate_profile_generates_valid_created_timestamp(self, fargate_profile_builder) -> None:
_, generated_test_data = fargate_profile_builder()
result_time: datetime = generated_test_data.fargate_describe_output[
FargateProfileAttributes.CREATED_AT
]
assert iso_date(result_time) == FROZEN_TIME
def test_create_fargate_profile_saves_provided_parameters(self, fargate_profile_builder) -> None:
_, generated_test_data = fargate_profile_builder(minimal=False)
for key, expected_value in generated_test_data.attributes_to_test:
assert generated_test_data.fargate_describe_output[key] == expected_value
def test_describe_fargate_profile_throws_exception_when_cluster_not_found(
self, fargate_profile_builder
) -> None:
eks_hook, generated_test_data = fargate_profile_builder()
expected_exception: Type[AWSError] = ResourceNotFoundException
expected_msg: str = CLUSTER_NOT_FOUND_MSG.format(
clusterName=generated_test_data.nonexistent_cluster_name,
)
with pytest.raises(ClientError) as raised_exception:
eks_hook.describe_fargate_profile(
clusterName=generated_test_data.nonexistent_cluster_name,
fargateProfileName=generated_test_data.existing_fargate_profile_name,
)
assert_client_error_exception_thrown(
expected_exception=expected_exception,
expected_msg=expected_msg,
raised_exception=raised_exception,
)
def test_describe_fargate_profile_throws_exception_when_profile_not_found(
self, fargate_profile_builder
) -> None:
client, generated_test_data = fargate_profile_builder()
expected_exception: Type[AWSError] = ResourceNotFoundException
expected_msg: str = FARGATE_PROFILE_NOT_FOUND_MSG.format(
fargateProfileName=generated_test_data.nonexistent_fargate_profile_name,
)
with pytest.raises(ClientError) as raised_exception:
client.describe_fargate_profile(
clusterName=generated_test_data.cluster_name,
fargateProfileName=generated_test_data.nonexistent_fargate_profile_name,
)
assert_client_error_exception_thrown(
expected_exception=expected_exception,
expected_msg=expected_msg,
raised_exception=raised_exception,
)
def test_delete_fargate_profile_removes_deleted_fargate_profile(
self, fargate_profile_builder, initial_batch_size: int = BatchCountSize.SMALL
) -> None:
eks_hook, generated_test_data = fargate_profile_builder(initial_batch_size)
eks_hook.delete_fargate_profile(
clusterName=generated_test_data.cluster_name,
fargateProfileName=generated_test_data.existing_fargate_profile_name,
)
result_fargate_profile_list: List = eks_hook.list_fargate_profiles(
clusterName=generated_test_data.cluster_name
)
assert len(result_fargate_profile_list) == (initial_batch_size - 1)
assert generated_test_data.existing_fargate_profile_name not in result_fargate_profile_list
def test_delete_fargate_profile_returns_deleted_fargate_profile(
self, fargate_profile_builder, initial_batch_size: int = BatchCountSize.SMALL
) -> None:
eks_hook, generated_test_data = fargate_profile_builder(count=initial_batch_size, minimal=False)
result: Dict = eks_hook.delete_fargate_profile(
clusterName=generated_test_data.cluster_name,
fargateProfileName=generated_test_data.existing_fargate_profile_name,
)[ResponseAttributes.FARGATE_PROFILE]
for key, expected_value in generated_test_data.attributes_to_test:
assert result[key] == expected_value
def test_delete_fargate_profile_throws_exception_when_cluster_not_found(
self, fargate_profile_builder
) -> None:
eks_hook, generated_test_data = fargate_profile_builder()
expected_exception: Type[AWSError] = ResourceNotFoundException
expected_msg: str = CLUSTER_NOT_FOUND_MSG.format(
clusterName=generated_test_data.nonexistent_cluster_name,
)
with pytest.raises(ClientError) as raised_exception:
eks_hook.delete_fargate_profile(
clusterName=generated_test_data.nonexistent_cluster_name,
fargateProfileName=generated_test_data.existing_fargate_profile_name,
)
assert_client_error_exception_thrown(
expected_exception=expected_exception,
expected_msg=expected_msg,
raised_exception=raised_exception,
)
def test_delete_fargate_profile_throws_exception_when_fargate_profile_not_found(
self, fargate_profile_builder, initial_batch_size: int = BatchCountSize.SMALL
) -> None:
eks_hook, generated_test_data = fargate_profile_builder(count=initial_batch_size)
expected_exception: Type[AWSError] = ResourceNotFoundException
expected_msg: str = FARGATE_PROFILE_NOT_FOUND_MSG.format(
fargateProfileName=generated_test_data.nonexistent_fargate_profile_name,
)
with pytest.raises(ClientError) as raised_exception:
eks_hook.delete_fargate_profile(
clusterName=generated_test_data.cluster_name,
fargateProfileName=generated_test_data.nonexistent_fargate_profile_name,
)
assert_client_error_exception_thrown(
expected_exception=expected_exception,
expected_msg=expected_msg,
raised_exception=raised_exception,
)
# Verify no new Fargate profile was created.
fargate_profile_count_after_test: int = len(
eks_hook.list_fargate_profiles(clusterName=generated_test_data.cluster_name)
)
assert fargate_profile_count_after_test == initial_batch_size
# The following Selector test cases have all been verified against the AWS API using cURL.
selector_formatting_test_cases = [
# Format is ([Selector(s), expected_message, expected_result])
# Happy Paths
# Selector with a Namespace and no Labels
(
[{FargateProfileAttributes.NAMESPACE: DEFAULT_NAMESPACE}],
None,
PossibleTestResults.SUCCESS,
),
# Selector with a Namespace and an empty collection of Labels
(
[
{
FargateProfileAttributes.NAMESPACE: DEFAULT_NAMESPACE,
FargateProfileAttributes.LABELS: generate_dict("label", 0),
}
],
None,
PossibleTestResults.SUCCESS,
),
# Selector with a Namespace and one valid Label
(
[
{
FargateProfileAttributes.NAMESPACE: DEFAULT_NAMESPACE,
FargateProfileAttributes.LABELS: generate_dict("label", 1),
}
],
None,
PossibleTestResults.SUCCESS,
),
# Selector with a Namespace and the maximum number of Labels
(
[
{
FargateProfileAttributes.NAMESPACE: DEFAULT_NAMESPACE,
FargateProfileAttributes.LABELS: generate_dict("label", MAX_FARGATE_LABELS),
}
],
None,
PossibleTestResults.SUCCESS,
),
# Two valid Selectors
(
[
{FargateProfileAttributes.NAMESPACE: DEFAULT_NAMESPACE},
{FargateProfileAttributes.NAMESPACE: f'{DEFAULT_NAMESPACE}_2'},
],
None,
PossibleTestResults.SUCCESS,
),
# Unhappy Cases
# No Selectors provided
([], FARGATE_PROFILE_NEEDS_SELECTOR_MSG, PossibleTestResults.FAILURE),
# Empty Selector / Selector without a Namespace or Labels
([{}], FARGATE_PROFILE_SELECTOR_NEEDS_NAMESPACE, PossibleTestResults.FAILURE),
# Selector with labels but no Namespace
(
[{FargateProfileAttributes.LABELS: generate_dict("label", 1)}],
FARGATE_PROFILE_SELECTOR_NEEDS_NAMESPACE,
PossibleTestResults.FAILURE,
),
# Selector with Namespace but too many Labels
(
[
{
FargateProfileAttributes.NAMESPACE: DEFAULT_NAMESPACE,
FargateProfileAttributes.LABELS: generate_dict("label", MAX_FARGATE_LABELS + 1),
}
],
FARGATE_PROFILE_TOO_MANY_LABELS,
PossibleTestResults.FAILURE,
),
# Valid Selector followed by Empty Selector
(
[{FargateProfileAttributes.NAMESPACE: DEFAULT_NAMESPACE}, {}],
FARGATE_PROFILE_SELECTOR_NEEDS_NAMESPACE,
PossibleTestResults.FAILURE,
),
# Empty Selector followed by Valid Selector
(
[{}, {FargateProfileAttributes.NAMESPACE: DEFAULT_NAMESPACE}],
FARGATE_PROFILE_SELECTOR_NEEDS_NAMESPACE,
PossibleTestResults.FAILURE,
),
# Empty Selector followed by Empty Selector
([{}, {}], FARGATE_PROFILE_SELECTOR_NEEDS_NAMESPACE, PossibleTestResults.FAILURE),
# Valid Selector followed by Selector with Namespace but too many Labels
(
[
{FargateProfileAttributes.NAMESPACE: DEFAULT_NAMESPACE},
{
FargateProfileAttributes.NAMESPACE: DEFAULT_NAMESPACE,
FargateProfileAttributes.LABELS: generate_dict("label", MAX_FARGATE_LABELS + 1),
},
],
FARGATE_PROFILE_TOO_MANY_LABELS,
PossibleTestResults.FAILURE,
),
]
@pytest.mark.parametrize(
"selectors, expected_message, expected_result",
selector_formatting_test_cases,
)
@mock_eks
def test_create_fargate_selectors(self, cluster_builder, selectors, expected_message, expected_result):
client, generated_test_data = cluster_builder()
cluster_name: str = generated_test_data.existing_cluster_name
fargate_profile_name: str = NON_EXISTING_FARGATE_PROFILE_NAME
expected_exception: Type[AWSError] = InvalidParameterException
test_inputs = dict(
deepcopy(
# Required Constants
[POD_EXECUTION_ROLE_ARN]
# Required Variables
+ [
(ClusterAttributes.CLUSTER_NAME, cluster_name),
(FargateProfileAttributes.FARGATE_PROFILE_NAME, fargate_profile_name),
]
# Test Case Values
+ [(FargateProfileAttributes.SELECTORS, selectors)]
)
)
if expected_result == PossibleTestResults.SUCCESS:
result: List = client.create_fargate_profile(**test_inputs)[ResponseAttributes.FARGATE_PROFILE]
for key, expected_value in test_inputs.items():
assert result[key] == expected_value
else:
with pytest.raises(ClientError) as raised_exception:
client.create_fargate_profile(**test_inputs)
assert_client_error_exception_thrown(
expected_exception=expected_exception,
expected_msg=expected_message,
raised_exception=raised_exception,
)
class TestEksHook:
@mock.patch('airflow.providers.amazon.aws.hooks.base_aws.AwsBaseHook.conn')
@pytest.mark.parametrize(
"aws_conn_id, region_name, expected_args",
[
[
'test-id',
'test-region',
[
'-m',
'airflow.providers.amazon.aws.utils.eks_get_token',
'--region-name',
'test-region',
'--aws-conn-id',
'test-id',
'--cluster-name',
'test-cluster',
],
],
[
None,
'test-region',
[
'-m',
'airflow.providers.amazon.aws.utils.eks_get_token',
'--region-name',
'test-region',
'--cluster-name',
'test-cluster',
],
],
[
None,
None,
['-m', 'airflow.providers.amazon.aws.utils.eks_get_token', '--cluster-name', 'test-cluster'],
],
],
)
def test_generate_config_file(self, mock_conn, aws_conn_id, region_name, expected_args):
mock_conn.describe_cluster.return_value = {
'cluster': {'certificateAuthority': {'data': 'test-cert'}, 'endpoint': 'test-endpoint'}
}
hook = EksHook(aws_conn_id=aws_conn_id, region_name=region_name)
with hook.generate_config_file(
eks_cluster_name='test-cluster', pod_namespace='k8s-namespace'
) as config_file:
config = yaml.safe_load(Path(config_file).read_text())
assert config == {
'apiVersion': 'v1',
'kind': 'Config',
'clusters': [
{
'cluster': {'server': 'test-endpoint', 'certificate-authority-data': 'test-cert'},
'name': 'test-cluster',
}
],
'contexts': [
{
'context': {'cluster': 'test-cluster', 'namespace': 'k8s-namespace', 'user': 'aws'},
'name': 'aws',
}
],
'current-context': 'aws',
'preferences': {},
'users': [
{
'name': 'aws',
'user': {
'exec': {
'apiVersion': 'client.authentication.k8s.io/v1alpha1',
'args': expected_args,
'command': sys.executable,
'env': [{'name': 'AIRFLOW__LOGGING__LOGGING_LEVEL', 'value': 'FATAL'}],
'interactiveMode': 'Never',
}
},
}
],
}
@mock.patch('airflow.providers.amazon.aws.hooks.eks.RequestSigner')
@mock.patch('airflow.providers.amazon.aws.hooks.base_aws.AwsBaseHook.conn')
@mock.patch('airflow.providers.amazon.aws.hooks.base_aws.AwsBaseHook.get_session')
def test_fetch_access_token_for_cluster(self, mock_get_session, mock_conn, mock_signer):
mock_signer.return_value.generate_presigned_url.return_value = 'http://example.com'
mock_get_session.return_value.region_name = 'us-east-1'
hook = EksHook()
token = hook.fetch_access_token_for_cluster(eks_cluster_name='test-cluster')
mock_signer.assert_called_once_with(
service_id=mock_conn.meta.service_model.service_id,
region_name='us-east-1',
signing_name='sts',
signature_version='v4',
credentials=mock_get_session.return_value.get_credentials.return_value,
event_emitter=mock_get_session.return_value.events,
)
mock_signer.return_value.generate_presigned_url.assert_called_once_with(
request_dict={
'method': 'GET',
'url': 'https://sts.us-east-1.amazonaws.com/?Action=GetCallerIdentity&Version=2011-06-15',
'body': {},
'headers': {'x-k8s-aws-id': 'test-cluster'},
'context': {},
},
region_name='us-east-1',
expires_in=60,
operation_name='',
)
assert token == 'k8s-aws-v1.aHR0cDovL2V4YW1wbGUuY29t'
# Helper methods for repeated assert combinations.
def assert_all_arn_values_are_valid(expected_arn_values, pattern, arn_under_test) -> None:
"""
Applies regex `pattern` to `arn_under_test` and asserts
that each group matches the provided expected value.
A list entry of None in the 'expected_arn_values' will
assert that the value exists but not match a specific value.
"""
findall: List = pattern.findall(arn_under_test)[0]
# findall() returns a list of matches from right to left so it must be reversed
# in order to match the logical order of the 'expected_arn_values' list.
for value in reversed(findall):
expected_value = expected_arn_values.pop()
if expected_value:
assert value in expected_value
else:
assert value
assert region_matches_partition(findall[1], findall[0])
def assert_client_error_exception_thrown(
expected_exception: Type[AWSError], expected_msg: str, raised_exception: ExceptionInfo
) -> None:
"""
Asserts that the raised exception is of the expected type
and the resulting message matches the expected format.
"""
response = raised_exception.value.response[ErrorAttributes.ERROR]
assert response[ErrorAttributes.CODE] == expected_exception.TYPE
assert response[ErrorAttributes.MESSAGE] == expected_msg
def assert_result_matches_expected_list(
result: List, expected_result: List, expected_len: Optional[int] = None
) -> None:
assert result == expected_result
assert len(result) == expected_len or len(expected_result)
def assert_is_valid_uri(value: str) -> None:
result: ParseResult = urlparse(value)
assert all([result.scheme, result.netloc, result.path])
assert REGION in value
| |
# Lint as: python2, python3
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Converts a frozen graph into a TFLite FlatBuffer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import distutils.spawn
import enum # pylint: disable=g-bad-import-order
import os as _os
import platform as _platform
import subprocess as _subprocess
import tempfile as _tempfile
import six
from six.moves import map
from tensorflow.lite.python import lite_constants
from tensorflow.lite.python import util
from tensorflow.lite.python import wrap_toco
from tensorflow.lite.toco import model_flags_pb2 as _model_flags_pb2
from tensorflow.lite.toco import toco_flags_pb2 as _toco_flags_pb2
from tensorflow.lite.toco import types_pb2 as _types_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_shape
from tensorflow.python.platform import resource_loader as _resource_loader
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export as _tf_export
def _requires_input_stats(toco_flags: _toco_flags_pb2.TocoFlags()) -> bool:
"""Checks if the `input_stats` flag is required for conversion.
Args:
toco_flags: A protocol buffer describing the conversion process.
Returns:
True, if the `inference_type` or the `inference_input_type` is a quantized
type and it is not post training quantization, else False.
"""
quantized_inference_types = \
[_types_pb2.QUANTIZED_UINT8, _types_pb2.QUANTIZED_INT8]
return ((toco_flags.inference_type in quantized_inference_types or
toco_flags.inference_input_type in quantized_inference_types) and
not toco_flags.post_training_quantize)
def convert_tensor_tf_type_to_tflite_type(
tf_type: dtypes.DType, usage: str = "") -> _types_pb2.IODataType:
"""Convert tensor type from tf type to tflite type.
Args:
tf_type: TensorFlow type.
usage: Text describing the reason for invoking this function.
Raises:
ValueError: If `tf_type` is unsupported.
Returns:
tflite_type: TFLite type. Refer to lite/toco/types.proto.
"""
mapping = {
dtypes.float16: _types_pb2.FLOAT16,
dtypes.float32: _types_pb2.FLOAT,
dtypes.float64: _types_pb2.FLOAT64,
dtypes.int8: _types_pb2.INT8,
dtypes.int16: _types_pb2.INT16,
dtypes.int32: _types_pb2.INT32,
dtypes.int64: _types_pb2.INT64,
dtypes.uint8: _types_pb2.UINT8,
dtypes.uint32: _types_pb2.UINT32,
dtypes.uint64: _types_pb2.UINT64,
dtypes.string: _types_pb2.STRING,
dtypes.bool: _types_pb2.BOOL,
dtypes.complex64: _types_pb2.COMPLEX64,
dtypes.complex128: _types_pb2.COMPLEX128,
}
tflite_type = mapping.get(tf_type)
if tflite_type is None:
raise ValueError("Unsupported TensorFlow type `{0}` provided for the {1}"
.format(tf_type, usage))
return tflite_type
# Only a few restricted tensor types are allowed for explicitly setting
# inference/input/output types.
def convert_inference_tf_type_to_tflite_type(
tf_type: dtypes.DType, usage: str = "") -> _types_pb2.IODataType:
"""Convert inference type from tf type to tflite type.
Args:
tf_type: TensorFlow type.
usage: Text describing the reason for invoking this function.
Raises:
ValueError: If `tf_type` is unsupported.
Returns:
tflite_type: TFLite type. Refer to lite/toco/types.proto.
"""
mapping = {
dtypes.float32: _types_pb2.FLOAT,
dtypes.uint8: _types_pb2.QUANTIZED_UINT8,
dtypes.int8: _types_pb2.QUANTIZED_INT8,
dtypes.int16: _types_pb2.QUANTIZED_INT16,
}
tflite_type = mapping.get(tf_type)
if tflite_type is None:
raise ValueError("Unsupported TensorFlow type `{0}` provided for the {1}"
.format(tf_type, usage))
return tflite_type
# Find the toco_from_protos binary using the resource loader if using from
# bazel, otherwise we are in a pip where console_scripts already has
# the toco_from_protos tool.
if lite_constants.EXPERIMENTAL_USE_TOCO_API_DIRECTLY:
_toco_from_proto_bin = ""
else:
_toco_from_proto_bin = _resource_loader.get_path_to_datafile(
"../toco/python/toco_from_protos")
if _toco_from_proto_bin and not _os.path.exists(_toco_from_proto_bin):
_toco_from_proto_bin = "toco_from_protos"
def _try_convert_to_unicode(output):
if output is None:
return u""
if isinstance(output, bytes):
try:
return six.ensure_text(output)
except UnicodeDecodeError:
pass
return output
@_tf_export("lite.OpsSet")
class OpsSet(enum.Enum):
"""Enum class defining the sets of ops available to generate TFLite models.
WARNING: Experimental interface, subject to change.
"""
# Convert model using TensorFlow Lite builtin ops.
TFLITE_BUILTINS = "TFLITE_BUILTINS"
# Convert model using TensorFlow ops. Not all TensorFlow ops are available.
# WARNING: Experimental interface, subject to change.
SELECT_TF_OPS = "SELECT_TF_OPS"
# Convert model using only TensorFlow Lite quantized int8 operations.
# Specifying this will throw an error for operations that do not yet have
# quantized implementations.
TFLITE_BUILTINS_INT8 = "TFLITE_BUILTINS_INT8"
# Convert model using only TensorFlow Lite operations with quantized int8
# weights, int16 activations and int64 bias.
# Specifying this will throw an error for operations that do not yet have
# quantized implementations.
# This quantization mode may be used in models for super-resolution,
# audio signal processing or image de-noising. It improves accuracy
# significantly, but only slightly increases the model size.
# WARNING: These ops are currently experimental and have not yet been
# finalized.
# They are only compatible with CPU execution, and have not been optimized for
# production.
EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8 = \
"EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8"
def __str__(self):
return str(self.value)
@staticmethod
def get_options():
"""Returns a list of OpsSet options as a list of strings."""
return [str(option) for option in list(OpsSet)]
class ConverterError(Exception):
"""Raised when an error occurs during model conversion."""
pass
def mlir_quantize(input_data_str,
disable_per_channel=False,
fully_quantize=False,
inference_type=_types_pb2.QUANTIZED_INT8,
enable_numeric_verify=False):
"""Quantize `input_data_str` with calibration results.
Args:
input_data_str: Input data in serialized form (e.g. a TFLITE model with
calibration results).
disable_per_channel: Bool indicating whether to do per-channel or per-tensor
quantization
fully_quantize: Bool indicating whether to fully quantize the model. Besides
model body, the input/output will be quantized as well.
inference_type: Data type for the activations. The default value is int8.
enable_numeric_verify: Experimental. Subject to change. Bool indicating
whether to add NumericVerify ops into the debug mode quantized model.
Returns:
Quantized model in serialized form (e.g. a TFLITE model) with floating-point
inputs and outputs.
"""
return wrap_toco.wrapped_experimental_mlir_quantize(input_data_str,
disable_per_channel,
fully_quantize,
inference_type,
enable_numeric_verify)
def mlir_sparsify(input_data_str):
"""Sparsify `input_data_str` to encode sparse tensor with proper format.
Args:
input_data_str: Input data in serialized form (e.g. a TFLITE model).
Returns:
Sparsified model in serialized form (e.g. a TFLITE model).
"""
return wrap_toco.wrapped_experimental_mlir_sparsify(input_data_str)
def register_custom_opdefs(custom_opdefs_list):
"""Register the given custom opdefs to the TensorFlow global op registry.
Args:
custom_opdefs_list: String representing the custom ops OpDefs that are
included in the GraphDef.
Returns:
True if the registration is successfully completed.
"""
return wrap_toco.wrapped_register_custom_opdefs(custom_opdefs_list)
def toco_convert_protos(model_flags_str,
toco_flags_str,
input_data_str,
debug_info_str=None,
enable_mlir_converter=False):
"""Convert `input_data_str` according to model and toco parameters.
Unless you know what you are doing consider using
the more friendly `tf.compat.v1.lite.toco_convert`.
Args:
model_flags_str: Serialized proto describing model properties, see
`toco/model_flags.proto`.
toco_flags_str: Serialized proto describing conversion properties, see
`toco/toco_flags.proto`.
input_data_str: Input data in serialized form (e.g. a graphdef is common)
debug_info_str: Serialized `GraphDebugInfo` proto describing logging
information. (default None)
enable_mlir_converter: Enables MLIR-based conversion instead of the default
TOCO conversion. (default False)
Returns:
Converted model in serialized form (e.g. a TFLITE model is common).
Raises:
ConverterError: When conversion fails in TFLiteConverter, usually due to
ops not being supported.
RuntimeError: When conversion fails, an exception is raised with the error
message embedded.
"""
# Historically, TOCO conversion failures would trigger a crash, so we would
# attempt to run the converter out-of-process. The MLIR conversion pipeline
# surfaces errors instead, and can be safely run in-process.
if enable_mlir_converter or not _toco_from_proto_bin:
try:
model_str = wrap_toco.wrapped_toco_convert(model_flags_str,
toco_flags_str, input_data_str,
debug_info_str,
enable_mlir_converter)
return model_str
except Exception as e:
raise ConverterError(str(e))
if distutils.spawn.find_executable(_toco_from_proto_bin) is None:
raise ConverterError("""Could not find toco_from_protos binary, make sure
your virtualenv bin directory or pip local bin directory is in your path.
In particular, if you have installed TensorFlow with --user, make sure you
add the install directory to your path.
For example:
Linux: export PATH=$PATH:~/.local/bin/
Mac: export PATH=$PATH:~/Library/Python/<version#>/bin
Alternative, use virtualenv.""")
# Windows and TemporaryFile are not that useful together,
# since you cannot have two readers/writers. So we have to
# make the temporaries and close and delete them explicitly.
toco_filename, model_filename, input_filename, output_filename = (None, None,
None, None)
try:
# Build all input files
with _tempfile.NamedTemporaryFile(delete=False) as fp_toco, \
_tempfile.NamedTemporaryFile(delete=False) as fp_model, \
_tempfile.NamedTemporaryFile(delete=False) as fp_input, \
_tempfile.NamedTemporaryFile(delete=False) as fp_debug:
toco_filename = fp_toco.name
input_filename = fp_input.name
model_filename = fp_model.name
debug_filename = fp_debug.name
fp_model.write(model_flags_str)
fp_toco.write(toco_flags_str)
fp_input.write(six.ensure_binary(input_data_str))
debug_info_str = debug_info_str if debug_info_str else ""
# if debug_info_str contains a "string value", then the call to
# fp_debug.write(debug_info_str) will fail with the following error
#
# TypeError: a bytes-like object is required, not 'str'
#
# Some of the subtests within the "convert_test" unit-test fail
# with the error shown above. So watch out for that scenario and
# convert debug_info_str to bytes where needed
if not isinstance(debug_info_str, bytes):
fp_debug.write(debug_info_str.encode("utf-8"))
else:
fp_debug.write(debug_info_str)
# Reserve an output file
with _tempfile.NamedTemporaryFile(delete=False) as fp:
output_filename = fp.name
# Run
cmd = [
_toco_from_proto_bin,
model_filename,
toco_filename,
input_filename,
output_filename,
"--debug_proto_file={}".format(debug_filename),
]
if enable_mlir_converter:
cmd.append("--enable_mlir_converter")
cmdline = " ".join(cmd)
is_windows = _platform.system() == "Windows"
proc = _subprocess.Popen(
cmdline,
shell=True,
stdout=_subprocess.PIPE,
stderr=_subprocess.STDOUT,
close_fds=not is_windows)
stdout, stderr = proc.communicate()
exitcode = proc.returncode
if exitcode == 0:
with open(output_filename, "rb") as fp:
return fp.read()
else:
stdout = _try_convert_to_unicode(stdout)
stderr = _try_convert_to_unicode(stderr)
raise ConverterError("See console for info.\n%s\n%s\n" % (stdout, stderr))
finally:
# Must manually cleanup files.
for filename in [
toco_filename, input_filename, model_filename, output_filename
]:
try:
_os.unlink(filename)
except (OSError, TypeError):
pass
def build_toco_flags(inference_type=dtypes.float32,
inference_input_type=None,
input_format=lite_constants.TENSORFLOW_GRAPHDEF,
output_format=lite_constants.TFLITE,
default_ranges_stats=None,
drop_control_dependency=True,
reorder_across_fake_quant=False,
allow_custom_ops=False,
post_training_quantize=False,
quantize_to_float16=False,
dump_graphviz_dir=None,
dump_graphviz_video=False,
target_ops=None,
conversion_summary_dir=None,
select_user_tf_ops=None,
enable_tflite_resource_variables=False,
**_):
"""Build the TOCO flags object from params."""
toco = _toco_flags_pb2.TocoFlags()
toco.input_format = input_format
toco.output_format = output_format
toco.inference_type = convert_inference_tf_type_to_tflite_type(
inference_type, usage="inference_type flag")
if inference_input_type:
toco.inference_input_type = convert_inference_tf_type_to_tflite_type(
inference_input_type, usage="inference_input_type flag")
else:
toco.inference_input_type = toco.inference_type
toco.drop_control_dependency = drop_control_dependency
toco.reorder_across_fake_quant = reorder_across_fake_quant
toco.allow_custom_ops = allow_custom_ops
if select_user_tf_ops:
toco.select_user_tf_ops.extend(select_user_tf_ops)
toco.post_training_quantize = post_training_quantize
toco.quantize_to_float16 = quantize_to_float16
if default_ranges_stats:
toco.default_ranges_min = default_ranges_stats[0]
toco.default_ranges_max = default_ranges_stats[1]
if dump_graphviz_dir:
toco.dump_graphviz_dir = dump_graphviz_dir
toco.dump_graphviz_include_video = dump_graphviz_video
if conversion_summary_dir:
toco.conversion_summary_dir = conversion_summary_dir
if target_ops:
if OpsSet.SELECT_TF_OPS in set(target_ops):
toco.enable_select_tf_ops = True
if set(target_ops) == set([OpsSet.SELECT_TF_OPS]):
toco.force_select_tf_ops = True
toco.enable_tflite_resource_variables = enable_tflite_resource_variables
return toco
def build_toco_convert_protos(input_tensors,
output_tensors,
inference_type=dtypes.float32,
inference_input_type=None,
input_format=lite_constants.TENSORFLOW_GRAPHDEF,
input_shapes=None,
output_format=lite_constants.TFLITE,
quantized_input_stats=None,
default_ranges_stats=None,
drop_control_dependency=True,
reorder_across_fake_quant=False,
allow_custom_ops=False,
change_concat_input_ranges=False,
post_training_quantize=False,
quantize_to_float16=False,
dump_graphviz_dir=None,
dump_graphviz_video=False,
target_ops=None,
allow_nonexistent_arrays=False,
debug_info=None,
conversion_summary_dir=None,
saved_model_dir=None,
saved_model_version=0,
saved_model_tags=None,
saved_model_exported_names=None,
select_user_tf_ops=None):
"""Builds protocol buffers describing a conversion of a model using TOCO.
Typically this is to convert from TensorFlow GraphDef to TFLite, in which
case the default `input_format` and `output_format` are sufficient.
Args:
input_tensors: List of input tensors. Type and shape are computed using
`foo.shape` and `foo.dtype`.
output_tensors: List of output tensors (only .name is used from this).
inference_type: Data type of numeric arrays, excluding the input layer.
(default tf.float32, must be in {tf.float32, tf.int8, tf.uint8})
inference_input_type: Data type of the numeric arrays in the input layer. If
`inference_input_type` is in {tf.int8, tf.uint8}, then
`quantized_input_stats` must be provided. (default is the value assigned
to `inference_type`, must be in {tf.float32, tf.int8, tf.uint8})
input_format: Type of data to read.
(default TENSORFLOW_GRAPHDEF, must be in {TENSORFLOW_GRAPHDEF})
input_shapes: Input array shape. (default None, must be None or a list of
the same length as `input_tensors`.)
output_format: Output file format. (default TFLITE, must be in
{TFLITE, GRAPHVIZ_DOT})
quantized_input_stats: Map of input tensor names to a tuple of floats
representing the mean and standard deviation of the training data.
(e.g., {"foo" : (0., 1.)}). Required if `inference_input_type` is tf.int8
or tf.uint8. (default None)
default_ranges_stats: Tuple of integers representing (min, max) range values
for all arrays without a specified range. Intended for experimenting with
quantization via "dummy quantization". (default None)
drop_control_dependency: Boolean indicating whether to drop control
dependencies silently. This is due to TFLite not supporting control
dependencies. (default True)
reorder_across_fake_quant: Boolean indicating whether to reorder FakeQuant
nodes in unexpected locations. Used when the location of the FakeQuant
nodes is preventing graph transformations necessary to convert the graph.
Results in a graph that differs from the quantized training graph,
potentially causing differing arithmetic behavior. (default False)
allow_custom_ops: Boolean indicating whether to allow custom operations.
When false any unknown operation is an error. When true, custom ops are
created for any op that is unknown. The developer will need to provide
these to the TensorFlow Lite runtime with a custom resolver. (default
False)
change_concat_input_ranges: Boolean to change behavior of min/max ranges for
inputs and outputs of the concat operator for quantized models. Changes
the ranges of concat operator overlap when true. (default False)
post_training_quantize: Boolean indicating whether to quantize the weights
of the converted float model. Model size will be reduced and there will be
latency improvements (at the cost of accuracy). (default False)
quantize_to_float16: Boolean indicating whether to convert float buffers to
float16. (default False)
dump_graphviz_dir: Full filepath of folder to dump the graphs at various
stages of processing GraphViz .dot files. Preferred over
--output_format=GRAPHVIZ_DOT in order to keep the requirements of the
output file. (default None)
dump_graphviz_video: Boolean indicating whether to dump the graph after
every graph transformation. (default False)
target_ops: Experimental flag, subject to change. Set of OpsSet options
indicating which converter to use. (default set([OpsSet.TFLITE_BUILTINS]))
allow_nonexistent_arrays: Allow specifying array names that don't exist or
are unused in the final graph. (default False)
debug_info: `GraphDebugInfo` proto containing the stack traces for the
original nodes referred by the converted graph.
conversion_summary_dir: A string, the path to the generated conversion logs.
saved_model_dir: Filepath of the saved model to be converted. This value
will be non-empty only when the saved model import path will be used.
Otherwises, the graph def-based conversion will be processed.
saved_model_version: SavedModel file format version of The saved model file
to be converted. This value will be set only when the SavedModel import
path will be used.
saved_model_tags: Set of string saved model tags, formatted in the
comma-separated value. This value will be set only when the SavedModel
import path will be used.
saved_model_exported_names: Names to be exported (default: export all) when
the saved model import path is on. This value will be set only when the
SavedModel import path will be used.
select_user_tf_ops: List of user's defined TensorFlow ops need to be
supported in the TensorFlow Lite runtime. These ops will be supported as
select TensorFlow ops.
Returns:
model_flags, toco_flags, debug_info: three protocol buffers describing the
conversion process and debug information.
Raises:
ValueError:
If the input tensor type is unknown
Missing mean_values or std_dev_values
RuntimeError: If TOCO fails to convert (in which case the runtime error's
error text will contain the TOCO error log)
"""
toco = build_toco_flags(inference_type, inference_input_type, input_format,
output_format, default_ranges_stats,
drop_control_dependency, reorder_across_fake_quant,
allow_custom_ops,
post_training_quantize, quantize_to_float16,
dump_graphviz_dir, dump_graphviz_video, target_ops,
conversion_summary_dir, select_user_tf_ops)
model = _model_flags_pb2.ModelFlags()
model.change_concat_input_ranges = change_concat_input_ranges
for idx, input_tensor in enumerate(input_tensors):
input_array = model.input_arrays.add()
if saved_model_dir:
input_array.name = input_tensor.name
else:
input_array.name = util.get_tensor_name(input_tensor)
input_array.data_type = convert_tensor_tf_type_to_tflite_type(
input_tensor.dtype, usage="input type of the TensorFlow model")
if _requires_input_stats(toco) and quantized_input_stats:
input_array.mean_value, input_array.std_value = quantized_input_stats[idx]
if input_shapes is None:
shape = input_tensor.shape
else:
shape = input_shapes[idx]
if shape.rank is not None:
# Create shapes with -1 for unknown dimensions.
dims = []
for dim in shape:
if (dim is None or
(isinstance(dim, tensor_shape.Dimension) and dim.value is None)):
dims.append(-1)
else:
dims.append(int(dim))
input_array.shape.dims.extend(dims)
input_array.shape.unknown_rank = False
else:
input_array.shape.unknown_rank = True
for output_tensor in output_tensors:
if saved_model_dir:
model.output_arrays.append(output_tensor.name)
else:
model.output_arrays.append(util.get_tensor_name(output_tensor))
model.allow_nonexistent_arrays = allow_nonexistent_arrays
if saved_model_dir:
model.saved_model_dir = saved_model_dir
model.saved_model_version = saved_model_version
if saved_model_tags:
model.saved_model_tags.extend(saved_model_tags)
if saved_model_exported_names:
model.saved_model_exported_names.extend(saved_model_exported_names)
return model, toco, debug_info
def toco_convert_graph_def(input_data, input_arrays_with_shape, output_arrays,
enable_mlir_converter, *args, **kwargs):
""""Convert a model using TOCO.
This function is used to convert GraphDefs that cannot be loaded into
TensorFlow to TFLite. Conversion can be customized by providing arguments
that are forwarded to `build_toco_convert_protos` (see documentation for
details).
Args:
input_data: Input data (i.e. often `sess.graph_def`),
input_arrays_with_shape: Tuple of strings representing input tensor names
and list of integers representing input shapes
(e.g., [("foo" : [1, 16, 16, 3])]). Use only when graph cannot be loaded
into TensorFlow and when `input_tensors` is None. (default None)
output_arrays: List of output tensors to freeze graph with. Use only when
graph cannot be loaded into TensorFlow and when `output_tensors` is None.
(default None)
enable_mlir_converter: Enables MLIR-based conversion instead of TOCO
conversion.
*args: See `build_toco_convert_protos`,
**kwargs: See `build_toco_convert_protos`.
Returns:
The converted data. For example if TFLite was the destination, then
this will be a tflite flatbuffer in a bytes array.
Raises:
Defined in `build_toco_convert_protos`.
"""
model_flags, toco_flags, _ = build_toco_convert_protos(
input_tensors=[], output_tensors=[], *args, **kwargs)
for idx, (name, shape) in enumerate(input_arrays_with_shape):
input_array = model_flags.input_arrays.add()
if _requires_input_stats(toco_flags):
if (("quantized_input_stats" not in kwargs) or
(not kwargs["quantized_input_stats"])):
raise ValueError(
"The `quantized_input_stats` flag must be defined when either "
"`inference_type` flag or `inference_input_type` flag is set to "
"tf.int8 or tf.uint8.")
input_array.mean_value, input_array.std_value = kwargs[
"quantized_input_stats"][idx]
input_array.name = name
input_array.shape.dims.extend(list(map(int, shape)))
for name in output_arrays:
model_flags.output_arrays.append(name)
data = toco_convert_protos(
model_flags.SerializeToString(),
toco_flags.SerializeToString(),
input_data.SerializeToString(),
enable_mlir_converter=enable_mlir_converter)
return data
def toco_convert_impl(input_data, input_tensors, output_tensors,
enable_mlir_converter, *args, **kwargs):
""""Convert a model using TOCO.
Typically this function is used to convert from TensorFlow GraphDef to TFLite.
Conversion can be customized by providing arguments that are forwarded to
`build_toco_convert_protos` (see documentation for details).
Args:
input_data: Input data (i.e. often `sess.graph_def`),
input_tensors: List of input tensors. Type and shape are computed using
`foo.shape` and `foo.dtype`.
output_tensors: List of output tensors (only .name is used from this).
enable_mlir_converter: Enables MLIR-based conversion instead of TOCO
conversion.
*args: See `build_toco_convert_protos`,
**kwargs: See `build_toco_convert_protos`.
Returns:
The converted data. For example if TFLite was the destination, then
this will be a tflite flatbuffer in a bytes array.
Raises:
Defined in `build_toco_convert_protos`.
"""
model_flags, toco_flags, debug_info = build_toco_convert_protos(
input_tensors, output_tensors, *args, **kwargs)
debug_info_str = debug_info.SerializeToString() if debug_info else None
data = toco_convert_protos(
model_flags.SerializeToString(),
toco_flags.SerializeToString(),
input_data.SerializeToString(),
debug_info_str=debug_info_str,
enable_mlir_converter=enable_mlir_converter)
return data
def convert_saved_model(saved_model_dir=None,
saved_model_version=0,
saved_model_tags=None,
saved_model_exported_names=None,
**kwargs):
"""Converts a saved_model using TF Lite converter."""
model_flags = _model_flags_pb2.ModelFlags()
if saved_model_dir:
model_flags.saved_model_dir = saved_model_dir
model_flags.saved_model_version = saved_model_version
if saved_model_tags:
model_flags.saved_model_tags.extend(saved_model_tags)
if saved_model_exported_names:
model_flags.saved_model_exported_names.extend(saved_model_exported_names)
toco_flags = build_toco_flags(**kwargs)
data = toco_convert_protos(
model_flags.SerializeToString(),
toco_flags.SerializeToString(),
None, # input_data, unused
None, # debug_info_str, unused
enable_mlir_converter=True)
return data
@_tf_export(v1=["lite.toco_convert"])
@deprecation.deprecated(None, "Use `lite.TFLiteConverter` instead.")
def toco_convert(input_data, input_tensors, output_tensors, *args, **kwargs):
"""Convert a model using TOCO.
Typically this function is used to convert from TensorFlow GraphDef to TFLite.
Conversion can be customized by providing arguments that are forwarded to
`build_toco_convert_protos` (see documentation for details). This function has
been deprecated. Please use `tf.lite.TFLiteConverter` instead.
Args:
input_data: Input data (i.e. often `sess.graph_def`),
input_tensors: List of input tensors. Type and shape are computed using
`foo.shape` and `foo.dtype`.
output_tensors: List of output tensors (only .name is used from this).
*args: See `build_toco_convert_protos`,
**kwargs: See `build_toco_convert_protos`.
Returns:
The converted data. For example if TFLite was the destination, then
this will be a tflite flatbuffer in a bytes array.
Raises:
Defined in `build_toco_convert_protos`.
"""
enable_mlir_converter = kwargs.get("enable_mlir_converter", False)
return toco_convert_impl(input_data, input_tensors, output_tensors,
enable_mlir_converter, *args, **kwargs)
| |
# This file is part of khmer, https://github.com/dib-lab/khmer/, and is
# Copyright (C) 2014-2015, Michigan State University.
# Copyright (C) 2015-2016, The Regents of the University of California.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# * Neither the name of the Michigan State University nor the names
# of its contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Contact: khmer-project@idyll.org
# pylint: disable=missing-docstring,invalid-name,no-member
"""
Tests for various argument-handling code.
"""
import sys
import io
import collections
from . import khmer_tst_utils as utils
import pytest
import khmer.kfile
from khmer import khmer_args
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
# For map(long, [list of ints]) cross-version hackery
if sys.version_info.major > 2:
long = int # pylint: disable=redefined-builtin
def test_check_space():
fakelump_fa = utils.get_test_data('fakelump.fa')
save_stderr, sys.stderr = sys.stderr, io.StringIO()
try:
khmer.kfile.check_space(
[fakelump_fa], force=False, _testhook_free_space=0)
assert 0, "this should fail"
except SystemExit as e:
print(str(e))
finally:
sys.stderr = save_stderr
@pytest.mark.parametrize('graph_type,buckets_per_byte', [
('countgraph', 1),
('smallcountgraph', 2),
('nodegraph', 8),
])
def test_check_tablespace(graph_type, buckets_per_byte):
oldstderr = sys.stderr
sys.stderr = StringIO()
outfile = utils.get_test_data('truncated.fq')
parser = khmer_args.build_counting_args()
args = parser.parse_args(['-M', '16G'])
buckets_per_table = khmer_args.calculate_graphsize(args, graph_type)
total_buckets = buckets_per_table * args.n_tables
space_needed = total_buckets / buckets_per_byte
# First, try with insufficient space
with pytest.raises(SystemExit) as se:
khmer.kfile.check_space_for_graph(outfile, space_needed, force=False,
_testhook_free_space=10e9)
assert 'ERROR: Not enough free space' in str(se)
# Now, try with insufficient space, but in force mode
khmer.kfile.check_space_for_graph(outfile, space_needed, force=True,
_testhook_free_space=10e9)
assert 'WARNING: Not enough free space' in sys.stderr.getvalue()
# Finally, try with sufficient space
sys.stderr = StringIO()
khmer.kfile.check_space_for_graph(outfile, space_needed, force=False,
_testhook_free_space=20e9)
assert sys.stderr.getvalue() == ''
sys.stderr = oldstderr
@pytest.mark.parametrize('graph_type,exp_buckets', [
('qfcounttable', '2.4 million buckets'),
('countgraph', '3.0 million buckets'),
('smallcountgraph', '6.0 million buckets'),
('nodegraph', '24.0 million buckets'),
])
def test_check_tablespace_nodegraph(graph_type, exp_buckets):
parser = khmer_args.build_counting_args()
args = parser.parse_args(['-M', '3G'])
buckets_per_table = khmer_args.calculate_graphsize(args, graph_type)
total_buckets = buckets_per_table * args.n_tables
sizestr = '{:.1f} million buckets'.format(float(total_buckets) / 1e9)
assert sizestr == exp_buckets
def test_normal_help(capsys):
# check -x and -N are hidden by default with --help
parser = khmer_args.build_graph_args()
with pytest.raises(SystemExit):
parser.parse_args(['-h'])
out, err = capsys.readouterr()
assert "--max-tablesize" not in out
assert '--n_tables' not in out
def test_expert_help(capsys):
# check -x and -N are hidden by default but appear with --help-expert
old_argv = sys.argv[:]
sys.argv.append('--help-expert')
parser = khmer_args.build_graph_args()
with pytest.raises(SystemExit):
parser.parse_args(['-h', '--help-expert'])
out, err = capsys.readouterr()
assert "--max-tablesize" in out
assert '--n_tables' in out
sys.argv = old_argv
def test_check_space_force():
fakelump_fa = utils.get_test_data('fakelump.fa')
save_stderr, sys.stderr = sys.stderr, io.StringIO()
try:
khmer.kfile.check_space(
[fakelump_fa], force=True, _testhook_free_space=0)
assert True, "this should pass"
except SystemExit as e:
print(str(e))
finally:
sys.stderr = save_stderr
def test_check_tablespace_force():
save_stderr, sys.stderr = sys.stderr, io.StringIO()
outfile = utils.get_test_data('truncated')
parser = khmer_args.build_counting_args()
args = parser.parse_args(['-M', '1e9'])
try:
tablesize = khmer_args.calculate_graphsize(args, 'countgraph')
khmer.kfile.check_space_for_graph(outfile, tablesize,
True, _testhook_free_space=0)
assert True, "this should pass"
except SystemExit as e:
print(str(e))
finally:
sys.stderr = save_stderr
def test_invalid_file_warn():
save_stderr, sys.stderr = sys.stderr, io.StringIO()
try:
khmer.kfile.check_valid_file_exists(["nonexistent", "nonexistent2"])
assert sys.stderr.getvalue().count("\n") == 2, \
"Should produce two warning lines"
except SystemExit as e:
print(str(e))
finally:
sys.stderr = save_stderr
def test_check_valid_stdin_nowarn():
save_stderr, sys.stderr = sys.stderr, io.StringIO()
try:
khmer.kfile.check_valid_file_exists(["-"])
err = sys.stderr.getvalue()
assert err.count("\n") == 0, err
except SystemExit as e:
print(str(e))
finally:
sys.stderr = save_stderr
FakeArgparseObject = collections.namedtuple('FakeArgs',
['ksize', 'n_tables',
'max_tablesize',
'max_memory_usage',
'unique_kmers',
'small_count',
'force'])
def test_create_countgraph_1():
ksize = khmer_args.DEFAULT_K
n_tables = khmer_args.DEFAULT_N_TABLES
max_tablesize = khmer_args.DEFAULT_MAX_TABLESIZE
max_mem = 1e7
args = FakeArgparseObject(ksize, n_tables, max_tablesize, max_mem, 0,
False, 0)
countgraph = khmer_args.create_countgraph(args)
expected_hashsz = utils.longify([2499997, 2499989, 2499983, 2499967])
assert countgraph.hashsizes() == expected_hashsz, countgraph.hashsizes()
assert sum(countgraph.hashsizes()) < max_mem, sum(countgraph.hashsizes())
def test_create_countgraph_2():
# tests overriding ksize by passing into create_nodegraph explicitly.
ksize = khmer_args.DEFAULT_K
n_tables = khmer_args.DEFAULT_N_TABLES
max_tablesize = khmer_args.DEFAULT_MAX_TABLESIZE
max_mem = 1e7
args = FakeArgparseObject(ksize, n_tables, max_tablesize, max_mem, 0,
False, 0)
countgraph = khmer_args.create_countgraph(args, ksize=15)
assert countgraph.ksize() == 15
def test_create_countgraph_3():
# tests too-big ksize
ksize = khmer_args.DEFAULT_K
n_tables = khmer_args.DEFAULT_N_TABLES
max_tablesize = khmer_args.DEFAULT_MAX_TABLESIZE
max_mem = 1e7
args = FakeArgparseObject(ksize, n_tables, max_tablesize, max_mem, 0,
False, 0)
old_stderr = sys.stderr
sys.stderr = capture = StringIO()
try:
khmer_args.create_countgraph(args, ksize=35)
assert 0, "should not reach this"
except SystemExit:
err = capture.getvalue()
assert 'khmer only supports k-mer sizes <= 32.' in err, err
finally:
sys.stderr = old_stderr
def test_create_countgraph_4():
# tests too-big n_tables WITHOUT force
ksize = khmer_args.DEFAULT_K
n_tables = 21 # some number larger than 20
max_tablesize = khmer_args.DEFAULT_MAX_TABLESIZE
max_mem = 1e7
args = FakeArgparseObject(ksize, n_tables, max_tablesize, max_mem, 0,
False, 0)
old_stderr = sys.stderr
sys.stderr = capture = StringIO()
try:
khmer_args.create_countgraph(args, ksize=None)
assert 0, "should not reach this"
except SystemExit:
err = capture.getvalue()
assert 'khmer only supports number of tables <= 20.' in err, err
finally:
sys.stderr = old_stderr
def test_create_countgraph_5():
# tests too-big n_tables WITH force
ksize = khmer_args.DEFAULT_K
n_tables = 21 # some number larger than 20
max_tablesize = khmer_args.DEFAULT_MAX_TABLESIZE
max_mem = 1e7
args = FakeArgparseObject(ksize, n_tables, max_tablesize, max_mem, 0,
False, 1)
old_stderr = sys.stderr
sys.stderr = capture = StringIO()
try:
khmer_args.create_countgraph(args, ksize=None)
message = "Warning: Maximum recommended number of tables is 20, " + \
"discarded by force nonetheless!"
assert message in capture.getvalue()
except SystemExit as e:
print(str(e))
finally:
sys.stderr = old_stderr
def test_create_countgraph_4_multiplier():
ksize = khmer_args.DEFAULT_K
n_tables = khmer_args.DEFAULT_N_TABLES
max_tablesize = khmer_args.DEFAULT_MAX_TABLESIZE
max_mem = 1e7
args = FakeArgparseObject(ksize, n_tables, max_tablesize, max_mem, 0,
False, 0)
countgraph = khmer_args.create_countgraph(args, multiplier=2.0)
assert sum(countgraph.hashsizes()) < max_mem * 2.0, \
sum(countgraph.hashsizes())
def test_create_nodegraph_1():
ksize = khmer_args.DEFAULT_K
n_tables = khmer_args.DEFAULT_N_TABLES
max_tablesize = khmer_args.DEFAULT_MAX_TABLESIZE
max_mem = 1e7
args = FakeArgparseObject(ksize, n_tables, max_tablesize, max_mem, 0,
False, 0)
nodegraph = khmer_args.create_nodegraph(args)
expected_hashsz = utils.longify([19999999, 19999981, 19999963, 19999927])
assert nodegraph.hashsizes() == expected_hashsz, nodegraph.hashsizes()
assert sum(nodegraph.hashsizes()) / \
8.0 < max_mem, sum(nodegraph.hashsizes())
def test_create_nodegraph_2():
# tests overriding ksize by passing into create_nodegraph explicitly.
ksize = khmer_args.DEFAULT_K
n_tables = khmer_args.DEFAULT_N_TABLES
max_tablesize = khmer_args.DEFAULT_MAX_TABLESIZE
max_mem = 1e7
args = FakeArgparseObject(ksize, n_tables, max_tablesize, max_mem, 0,
False, 0)
nodegraph = khmer_args.create_nodegraph(args, ksize=15)
assert nodegraph.ksize() == 15
def test_create_nodegraph_3():
# tests too-big ksize
ksize = khmer_args.DEFAULT_K
n_tables = khmer_args.DEFAULT_N_TABLES
max_tablesize = khmer_args.DEFAULT_MAX_TABLESIZE
max_mem = 1e7
args = FakeArgparseObject(ksize, n_tables, max_tablesize, max_mem, 0,
False, 0)
sys.stderr = capture = StringIO()
try:
khmer_args.create_nodegraph(args, ksize=35)
assert 0, "should not reach this"
except SystemExit:
err = capture.getvalue()
assert 'khmer only supports k-mer sizes <= 32.' in err, err
def test_create_nodegraph_4():
# tests too-big number of tables WITHOUT force
ksize = khmer_args.DEFAULT_K
n_tables = 21 # some number larger than 20
max_tablesize = khmer_args.DEFAULT_MAX_TABLESIZE
max_mem = 1e7
args = FakeArgparseObject(ksize, n_tables, max_tablesize, max_mem, 0,
False, 0)
sys.stderr = capture = StringIO()
try:
khmer_args.create_nodegraph(args, ksize=None)
assert 0, "should not reach this"
except SystemExit:
err = capture.getvalue()
assert 'khmer only supports number of tables <= 20.' in err, err
def test_create_nodegraph_5():
# tests too-big number of tables WITH force
ksize = khmer_args.DEFAULT_K
n_tables = 21 # some number larger than 20
max_tablesize = khmer_args.DEFAULT_MAX_TABLESIZE
max_mem = 1e7
args = FakeArgparseObject(ksize, n_tables, max_tablesize, max_mem, 0,
False, 1)
sys.stderr = capture = StringIO()
try:
khmer_args.create_nodegraph(args, ksize=None)
message = "Warning: Maximum recommended number of tables is 20, " + \
"discarded by force nonetheless!"
assert message in capture.getvalue()
except SystemExit as e:
print(str(e))
def test_create_nodegraph_4_multiplier():
ksize = khmer_args.DEFAULT_K
n_tables = khmer_args.DEFAULT_N_TABLES
max_tablesize = khmer_args.DEFAULT_MAX_TABLESIZE
max_mem = 1e7
args = FakeArgparseObject(ksize, n_tables, max_tablesize, max_mem, 0,
False, 0)
nodegraph = khmer_args.create_nodegraph(args, multiplier=2.0)
assert sum(nodegraph.hashsizes()) / 8.0 < max_mem * 2.0, \
sum(nodegraph.hashsizes())
def test_report_on_config_bad_graphtype():
ksize = khmer_args.DEFAULT_K
n_tables = khmer_args.DEFAULT_N_TABLES
max_tablesize = khmer_args.DEFAULT_MAX_TABLESIZE
max_mem = 1e7
args = FakeArgparseObject(ksize, n_tables, max_tablesize, max_mem, 0,
False, 0)
try:
khmer_args.report_on_config(args, 'foograph')
assert 0, "the previous statement should raise an exception"
except ValueError as err:
assert "unknown graph type: foograph" in str(err), str(err)
def test_fail_calculate_foograph_size():
# tests unknown graph type
ksize = khmer_args.DEFAULT_K
n_tables = khmer_args.DEFAULT_N_TABLES
max_tablesize = khmer_args.DEFAULT_MAX_TABLESIZE
max_mem = 1e7
args = FakeArgparseObject(ksize, n_tables, max_tablesize, max_mem, 0,
False, 0)
try:
khmer_args.calculate_graphsize(args, 'foograph')
assert 0, "previous statement should fail"
except ValueError as err:
assert "unknown graph type: foograph" in str(err), str(err)
def test_memory_setting():
assert khmer_args.memory_setting('1') == 1.0
assert khmer_args.memory_setting('42') == 42.0
assert khmer_args.memory_setting('10000') == 1e4
assert khmer_args.memory_setting('2.3e5') == 230000.0
assert khmer_args.memory_setting('1e9') == 1e9
assert khmer_args.memory_setting('1K') == 1e3
assert khmer_args.memory_setting('3.14m') == 3.14e6
assert khmer_args.memory_setting('8G') == 8e9
assert khmer_args.memory_setting('8g') == 8e9
assert khmer_args.memory_setting('16T') == 16e12
try:
_ = khmer_args.memory_setting('16Tb')
assert False, 'previous command should have failed'
except ValueError as err:
assert 'cannot parse memory setting' in str(err)
try:
_ = khmer_args.memory_setting('16E')
assert False, 'previous command should have failed'
except ValueError as err:
assert 'cannot parse memory setting' in str(err)
try:
_ = khmer_args.memory_setting('16Ki')
assert False, 'previous command should have failed'
except ValueError as err:
assert 'cannot parse memory setting' in str(err)
try:
_ = khmer_args.memory_setting('b0gu$G')
assert False, 'previous command should have failed'
except ValueError as err:
assert 'cannot parse memory setting' in str(err)
| |
#Copyright ReportLab Europe Ltd. 2000-2012
#see license.txt for license details
#history http://www.reportlab.co.uk/cgi-bin/viewcvs.cgi/public/reportlab/trunk/reportlab/graphics/widgetbase.py
__version__=''' $Id: widgetbase.py 3959 2012-09-27 14:39:39Z robin $ '''
__doc__='''Base class for user-defined graphical widgets'''
import string
from reportlab.graphics import shapes
from reportlab import rl_config
from reportlab.lib import colors
from reportlab.lib.validators import *
from reportlab.lib.attrmap import *
class PropHolder:
'''Base for property holders'''
_attrMap = None
def verify(self):
"""If the _attrMap attribute is not None, this
checks all expected attributes are present; no
unwanted attributes are present; and (if a
checking function is found) checks each
attribute has a valid value. Either succeeds
or raises an informative exception.
"""
if self._attrMap is not None:
for key in self.__dict__.keys():
if key[0] != '_':
msg = "Unexpected attribute %s found in %s" % (key, self)
assert key in self._attrMap, msg
for (attr, metavalue) in self._attrMap.items():
msg = "Missing attribute %s from %s" % (attr, self)
assert hasattr(self, attr), msg
value = getattr(self, attr)
args = (value, attr, self.__class__.__name__)
assert metavalue.validate(value), "Invalid value %s for attribute %s in class %s" % args
if rl_config.shapeChecking:
"""This adds the ability to check every attribute assignment
as it is made. It slows down shapes but is a big help when
developing. It does not get defined if rl_config.shapeChecking = 0.
"""
def __setattr__(self, name, value):
"""By default we verify. This could be off
in some parallel base classes."""
validateSetattr(self,name,value)
def getProperties(self,recur=1):
"""Returns a list of all properties which can be edited and
which are not marked as private. This may include 'child
widgets' or 'primitive shapes'. You are free to override
this and provide alternative implementations; the default
one simply returns everything without a leading underscore.
"""
from reportlab.lib.validators import isValidChild
# TODO when we need it, but not before -
# expose sequence contents?
props = {}
for name in self.__dict__.keys():
if name[0:1] != '_':
component = getattr(self, name)
if recur and isValidChild(component):
# child object, get its properties too
childProps = component.getProperties(recur=recur)
for (childKey, childValue) in childProps.items():
#key might be something indexed like '[2].fillColor'
#or simple like 'fillColor'; in the former case we
#don't need a '.' between me and my child.
if childKey[0] == '[':
props['%s%s' % (name, childKey)] = childValue
else:
props['%s.%s' % (name, childKey)] = childValue
else:
props[name] = component
return props
def setProperties(self, propDict):
"""Permits bulk setting of properties. These may include
child objects e.g. "chart.legend.width = 200".
All assignments will be validated by the object as if they
were set individually in python code.
All properties of a top-level object are guaranteed to be
set before any of the children, which may be helpful to
widget designers.
"""
childPropDicts = {}
for (name, value) in propDict.items():
parts = name.split('.', 1)
if len(parts) == 1:
#simple attribute, set it now
setattr(self, name, value)
else:
(childName, remains) = parts
try:
childPropDicts[childName][remains] = value
except KeyError:
childPropDicts[childName] = {remains: value}
# now assign to children
for (childName, childPropDict) in childPropDicts.items():
child = getattr(self, childName)
child.setProperties(childPropDict)
def dumpProperties(self, prefix=""):
"""Convenience. Lists them on standard output. You
may provide a prefix - mostly helps to generate code
samples for documentation.
"""
propList = self.getProperties().items()
propList.sort()
if prefix:
prefix = prefix + '.'
for (name, value) in propList:
print('%s%s = %s' % (prefix, name, value))
class Widget(PropHolder, shapes.UserNode):
"""Base for all user-defined widgets. Keep as simple as possible. Does
not inherit from Shape so that we can rewrite shapes without breaking
widgets and vice versa."""
def _setKeywords(self,**kw):
for k,v in kw.items():
if k not in self.__dict__:
setattr(self,k,v)
def draw(self):
msg = "draw() must be implemented for each Widget!"
raise shapes.NotImplementedError(msg)
def demo(self):
msg = "demo() must be implemented for each Widget!"
raise shapes.NotImplementedError(msg)
def provideNode(self):
return self.draw()
def getBounds(self):
"Return outer boundary as x1,y1,x2,y2. Can be overridden for efficiency"
return self.draw().getBounds()
class ScaleWidget(Widget):
'''Contents with a scale and offset'''
_attrMap = AttrMap(
x = AttrMapValue(isNumber,desc="x offset"),
y = AttrMapValue(isNumber,desc="y offset"),
scale = AttrMapValue(isNumber,desc="scale"),
contents = AttrMapValue(None,desc="Contained drawable elements"),
)
def __init__(self,x=0,y=0,scale=1.0,contents=None):
self.x = x
self.y = y
if not contents: contents=[]
elif not isinstance(contents,(tuple,list)):
contents = (contents,)
self.contents = list(contents)
self.scale = scale
def draw(self):
return shapes.Group(transform=(self.scale,0,0,self.scale,self.x,self.y),*self.contents)
_ItemWrapper={}
class CloneMixin:
def clone(self,**kwds):
n = self.__class__()
n.__dict__.clear()
n.__dict__.update(self.__dict__)
if kwds: n.__dict__.update(kwds)
return n
class TypedPropertyCollection(PropHolder):
"""A container with properties for objects of the same kind.
This makes it easy to create lists of objects. You initialize
it with a class of what it is to contain, and that is all you
can add to it. You can assign properties to the collection
as a whole, or to a numeric index within it; if so it creates
a new child object to hold that data.
So:
wedges = TypedPropertyCollection(WedgeProperties)
wedges.strokeWidth = 2 # applies to all
wedges.strokeColor = colors.red # applies to all
wedges[3].strokeColor = colors.blue # only to one
The last line should be taken as a prescription of how to
create wedge no. 3 if one is needed; no error is raised if
there are only two data points.
We try and make sensible use of tuple indeces.
line[(3,x)] is backed by line[(3,)], line[3] & line
"""
def __init__(self, exampleClass):
#give it same validation rules as what it holds
self.__dict__['_value'] = exampleClass()
self.__dict__['_children'] = {}
def wKlassFactory(self,Klass):
class WKlass(Klass,CloneMixin):
def __getattr__(self,name):
try:
return self.__class__.__bases__[0].__getattr__(self,name)
except:
i = self._index
if i:
c = self._parent._children
if i in c and name in c[i].__dict__:
return getattr(c[i],name)
elif len(i)==1:
i = i[0]
if i in c and name in c[i].__dict__:
return getattr(c[i],name)
return getattr(self._parent,name)
return WKlass
def __getitem__(self, index):
try:
return self._children[index]
except KeyError:
Klass = self._value.__class__
if Klass in _ItemWrapper:
WKlass = _ItemWrapper[Klass]
else:
_ItemWrapper[Klass] = WKlass = self.wKlassFactory(Klass)
child = WKlass()
child._parent = self
if type(index) in (type(()),type([])):
index = tuple(index)
if len(index)>1:
child._index = tuple(index[:-1])
else:
child._index = None
else:
child._index = None
for i in filter(lambda x,K=child.__dict__.keys(): x in K,child._attrMap.keys()):
del child.__dict__[i]
self._children[index] = child
return child
def __contains__(self,key):
if type(key) in (type(()),type([])): key = tuple(key)
return key in self._children
def __setitem__(self, key, value):
msg = "This collection can only hold objects of type %s" % self._value.__class__.__name__
assert isinstance(value, self._value.__class__), msg
def __len__(self):
return len(self._children.keys())
def getProperties(self,recur=1):
# return any children which are defined and whatever
# differs from the parent
props = {}
for (key, value) in self._value.getProperties(recur=recur).items():
props['%s' % key] = value
for idx in self._children.keys():
childProps = self._children[idx].getProperties(recur=recur)
for (key, value) in childProps.items():
if not hasattr(self,key) or getattr(self, key)!=value:
newKey = '[%s].%s' % (idx, key)
props[newKey] = value
return props
def setVector(self,**kw):
for name, value in kw.items():
for i in range(len(value)):
setattr(self[i],name,value[i])
def __getattr__(self,name):
return getattr(self._value,name)
def __setattr__(self,name,value):
return setattr(self._value,name,value)
## No longer needed!
class StyleProperties(PropHolder):
"""A container class for attributes used in charts and legends.
Attributes contained can be those for any graphical element
(shape?) in the ReportLab graphics package. The idea for this
container class is to be useful in combination with legends
and/or the individual appearance of data series in charts.
A legend could be as simple as a wrapper around a list of style
properties, where the 'desc' attribute contains a descriptive
string and the rest could be used by the legend e.g. to draw
something like a color swatch. The graphical presentation of
the legend would be its own business, though.
A chart could be inspecting a legend or, more directly, a list
of style properties to pick individual attributes that it knows
about in order to render a particular row of the data. A bar
chart e.g. could simply use 'strokeColor' and 'fillColor' for
drawing the bars while a line chart could also use additional
ones like strokeWidth.
"""
_attrMap = AttrMap(
strokeWidth = AttrMapValue(isNumber,desc='width of the stroke line'),
strokeLineCap = AttrMapValue(isNumber,desc='Line cap 0=butt, 1=round & 2=square',advancedUsage=1),
strokeLineJoin = AttrMapValue(isNumber,desc='Line join 0=miter, 1=round & 2=bevel',advancedUsage=1),
strokeMiterLimit = AttrMapValue(None,desc='miter limit control miter line joins',advancedUsage=1),
strokeDashArray = AttrMapValue(isListOfNumbersOrNone,desc='dashing patterns e.g. (1,3)'),
strokeOpacity = AttrMapValue(isNumber,desc='level of transparency (alpha) accepts values between 0..1',advancedUsage=1),
strokeColor = AttrMapValue(isColorOrNone,desc='the color of the stroke'),
fillColor = AttrMapValue(isColorOrNone,desc='the filling color'),
desc = AttrMapValue(isString),
)
def __init__(self, **kwargs):
"Initialize with attributes if any."
for k, v in kwargs.items():
setattr(self, k, v)
def __setattr__(self, name, value):
"Verify attribute name and value, before setting it."
validateSetattr(self,name,value)
class TwoCircles(Widget):
def __init__(self):
self.leftCircle = shapes.Circle(100,100,20, fillColor=colors.red)
self.rightCircle = shapes.Circle(300,100,20, fillColor=colors.red)
def draw(self):
return shapes.Group(self.leftCircle, self.rightCircle)
class Face(Widget):
"""This draws a face with two eyes.
It exposes a couple of properties
to configure itself and hides all other details.
"""
_attrMap = AttrMap(
x = AttrMapValue(isNumber),
y = AttrMapValue(isNumber),
size = AttrMapValue(isNumber),
skinColor = AttrMapValue(isColorOrNone),
eyeColor = AttrMapValue(isColorOrNone),
mood = AttrMapValue(OneOf('happy','sad','ok')),
)
def __init__(self):
self.x = 10
self.y = 10
self.size = 80
self.skinColor = None
self.eyeColor = colors.blue
self.mood = 'happy'
def demo(self):
pass
def draw(self):
s = self.size # abbreviate as we will use this a lot
g = shapes.Group()
g.transform = [1,0,0,1,self.x, self.y]
# background
g.add(shapes.Circle(s * 0.5, s * 0.5, s * 0.5, fillColor=self.skinColor))
# left eye
g.add(shapes.Circle(s * 0.35, s * 0.65, s * 0.1, fillColor=colors.white))
g.add(shapes.Circle(s * 0.35, s * 0.65, s * 0.05, fillColor=self.eyeColor))
# right eye
g.add(shapes.Circle(s * 0.65, s * 0.65, s * 0.1, fillColor=colors.white))
g.add(shapes.Circle(s * 0.65, s * 0.65, s * 0.05, fillColor=self.eyeColor))
# nose
g.add(shapes.Polygon(
points=[s * 0.5, s * 0.6, s * 0.4, s * 0.3, s * 0.6, s * 0.3],
fillColor=None))
# mouth
if self.mood == 'happy':
offset = -0.05
elif self.mood == 'sad':
offset = +0.05
else:
offset = 0
g.add(shapes.Polygon(
points = [
s * 0.3, s * 0.2, #left of mouth
s * 0.7, s * 0.2, #right of mouth
s * 0.6, s * (0.2 + offset), # the bit going up or down
s * 0.4, s * (0.2 + offset) # the bit going up or down
],
fillColor = colors.pink,
strokeColor = colors.red,
strokeWidth = s * 0.03
))
return g
class TwoFaces(Widget):
def __init__(self):
self.faceOne = Face()
self.faceOne.mood = "happy"
self.faceTwo = Face()
self.faceTwo.x = 100
self.faceTwo.mood = "sad"
def draw(self):
"""Just return a group"""
return shapes.Group(self.faceOne, self.faceTwo)
def demo(self):
"""The default case already looks good enough,
no implementation needed here"""
pass
class Sizer(Widget):
"Container to show size of all enclosed objects"
_attrMap = AttrMap(BASE=shapes.SolidShape,
contents = AttrMapValue(isListOfShapes,desc="Contained drawable elements"),
)
def __init__(self, *elements):
self.contents = []
self.fillColor = colors.cyan
self.strokeColor = colors.magenta
for elem in elements:
self.add(elem)
def _addNamedNode(self,name,node):
'if name is not None add an attribute pointing to node and add to the attrMap'
if name:
if name not in self._attrMap.keys():
self._attrMap[name] = AttrMapValue(isValidChild)
setattr(self, name, node)
def add(self, node, name=None):
"""Appends non-None child node to the 'contents' attribute. In addition,
if a name is provided, it is subsequently accessible by name
"""
# propagates properties down
if node is not None:
assert isValidChild(node), "Can only add Shape or UserNode objects to a Group"
self.contents.append(node)
self._addNamedNode(name,node)
def getBounds(self):
# get bounds of each object
if self.contents:
b = []
for elem in self.contents:
b.append(elem.getBounds())
return shapes.getRectsBounds(b)
else:
return (0,0,0,0)
def draw(self):
g = shapes.Group()
(x1, y1, x2, y2) = self.getBounds()
r = shapes.Rect(
x = x1,
y = y1,
width = x2-x1,
height = y2-y1,
fillColor = self.fillColor,
strokeColor = self.strokeColor
)
g.add(r)
for elem in self.contents:
g.add(elem)
return g
def test():
from reportlab.graphics.charts.piecharts import WedgeProperties
wedges = TypedPropertyCollection(WedgeProperties)
wedges.fillColor = colors.red
wedges.setVector(fillColor=(colors.blue,colors.green,colors.white))
print(len(_ItemWrapper))
d = shapes.Drawing(400, 200)
tc = TwoCircles()
d.add(tc)
import renderPDF
renderPDF.drawToFile(d, 'sample_widget.pdf', 'A Sample Widget')
print('saved sample_widget.pdf')
d = shapes.Drawing(400, 200)
f = Face()
f.skinColor = colors.yellow
f.mood = "sad"
d.add(f, name='theFace')
print('drawing 1 properties:')
d.dumpProperties()
renderPDF.drawToFile(d, 'face.pdf', 'A Sample Widget')
print('saved face.pdf')
d2 = d.expandUserNodes()
renderPDF.drawToFile(d2, 'face_copy.pdf', 'An expanded drawing')
print('saved face_copy.pdf')
print('drawing 2 properties:')
d2.dumpProperties()
if __name__=='__main__':
test()
| |
#! /usr/bin/env python
"""Generate C code from an ASDL description."""
# TO DO
# handle fields that have a type but no name
import os, sys, traceback
import asdl
TABSIZE = 8
MAX_COL = 80
def get_c_type(name):
"""Return a string for the C name of the type.
This function special cases the default types provided by asdl:
identifier, string, int, bool.
"""
# XXX ack! need to figure out where Id is useful and where string
if isinstance(name, asdl.Id):
name = name.value
if name in asdl.builtin_types:
return name
else:
return "%s_ty" % name
def reflow_lines(s, depth):
"""Reflow the line s indented depth tabs.
Return a sequence of lines where no line extends beyond MAX_COL
when properly indented. The first line is properly indented based
exclusively on depth * TABSIZE. All following lines -- these are
the reflowed lines generated by this function -- start at the same
column as the first character beyond the opening { in the first
line.
"""
size = MAX_COL - depth * TABSIZE
if len(s) < size:
return [s]
lines = []
cur = s
padding = ""
while len(cur) > size:
i = cur.rfind(' ', 0, size)
# XXX this should be fixed for real
if i == -1 and 'GeneratorExp' in cur:
i = size + 3
assert i != -1, "Impossible line %d to reflow: %s" % (size, `s`)
lines.append(padding + cur[:i])
if len(lines) == 1:
# find new size based on brace
j = cur.find('{', 0, i)
if j >= 0:
j += 2 # account for the brace and the space after it
size -= j
padding = " " * j
else:
j = cur.find('(', 0, i)
if j >= 0:
j += 1 # account for the paren (no space after it)
size -= j
padding = " " * j
cur = cur[i+1:]
else:
lines.append(padding + cur)
return lines
def is_simple(sum):
"""Return True if a sum is a simple.
A sum is simple if its types have no fields, e.g.
unaryop = Invert | Not | UAdd | USub
"""
for t in sum.types:
if t.fields:
return False
return True
class EmitVisitor(asdl.VisitorBase):
"""Visit that emits lines"""
def __init__(self, file):
self.file = file
super(EmitVisitor, self).__init__()
def emit(self, s, depth, reflow=1):
# XXX reflow long lines?
if reflow:
lines = reflow_lines(s, depth)
else:
lines = [s]
for line in lines:
line = (" " * TABSIZE * depth) + line + "\n"
self.file.write(line)
class TypeDefVisitor(EmitVisitor):
def visitModule(self, mod):
for dfn in mod.dfns:
self.visit(dfn)
def visitType(self, type, depth=0):
self.visit(type.value, type.name, depth)
def visitSum(self, sum, name, depth):
if is_simple(sum):
self.simple_sum(sum, name, depth)
else:
self.sum_with_constructors(sum, name, depth)
def simple_sum(self, sum, name, depth):
enum = []
for i in range(len(sum.types)):
type = sum.types[i]
enum.append("%s=%d" % (type.name, i + 1))
enums = ", ".join(enum)
ctype = get_c_type(name)
s = "typedef enum _%s { %s } %s;" % (name, enums, ctype)
self.emit(s, depth)
self.emit("", depth)
def sum_with_constructors(self, sum, name, depth):
ctype = get_c_type(name)
s = "typedef struct _%(name)s *%(ctype)s;" % locals()
self.emit(s, depth)
self.emit("", depth)
def visitProduct(self, product, name, depth):
ctype = get_c_type(name)
s = "typedef struct _%(name)s *%(ctype)s;" % locals()
self.emit(s, depth)
self.emit("", depth)
class StructVisitor(EmitVisitor):
"""Visitor to generate typdefs for AST."""
def visitModule(self, mod):
for dfn in mod.dfns:
self.visit(dfn)
def visitType(self, type, depth=0):
self.visit(type.value, type.name, depth)
def visitSum(self, sum, name, depth):
if not is_simple(sum):
self.sum_with_constructors(sum, name, depth)
def sum_with_constructors(self, sum, name, depth):
def emit(s, depth=depth):
self.emit(s % sys._getframe(1).f_locals, depth)
enum = []
for i in range(len(sum.types)):
type = sum.types[i]
enum.append("%s_kind=%d" % (type.name, i + 1))
emit("enum _%(name)s_kind {" + ", ".join(enum) + "};")
emit("struct _%(name)s {")
emit("enum _%(name)s_kind kind;", depth + 1)
emit("union {", depth + 1)
for t in sum.types:
self.visit(t, depth + 2)
emit("} v;", depth + 1)
for field in sum.attributes:
# rudimentary attribute handling
type = str(field.type)
assert type in asdl.builtin_types, type
emit("%s %s;" % (type, field.name), depth + 1);
emit("};")
emit("")
def visitConstructor(self, cons, depth):
if cons.fields:
self.emit("struct {", depth)
for f in cons.fields:
self.visit(f, depth + 1)
self.emit("} %s;" % cons.name, depth)
self.emit("", depth)
else:
# XXX not sure what I want here, nothing is probably fine
pass
def visitField(self, field, depth):
# XXX need to lookup field.type, because it might be something
# like a builtin...
ctype = get_c_type(field.type)
name = field.name
if field.seq:
if field.type.value in ('cmpop',):
self.emit("asdl_int_seq *%(name)s;" % locals(), depth)
else:
self.emit("asdl_seq *%(name)s;" % locals(), depth)
else:
self.emit("%(ctype)s %(name)s;" % locals(), depth)
def visitProduct(self, product, name, depth):
self.emit("struct _%(name)s {" % locals(), depth)
for f in product.fields:
self.visit(f, depth + 1)
self.emit("};", depth)
self.emit("", depth)
class PrototypeVisitor(EmitVisitor):
"""Generate function prototypes for the .h file"""
def visitModule(self, mod):
for dfn in mod.dfns:
self.visit(dfn)
def visitType(self, type):
self.visit(type.value, type.name)
def visitSum(self, sum, name):
if is_simple(sum):
pass # XXX
else:
for t in sum.types:
self.visit(t, name, sum.attributes)
def get_args(self, fields):
"""Return list of C argument into, one for each field.
Argument info is 3-tuple of a C type, variable name, and flag
that is true if type can be NULL.
"""
args = []
unnamed = {}
for f in fields:
if f.name is None:
name = f.type
c = unnamed[name] = unnamed.get(name, 0) + 1
if c > 1:
name = "name%d" % (c - 1)
else:
name = f.name
# XXX should extend get_c_type() to handle this
if f.seq:
if f.type.value in ('cmpop',):
ctype = "asdl_int_seq *"
else:
ctype = "asdl_seq *"
else:
ctype = get_c_type(f.type)
args.append((ctype, name, f.opt or f.seq))
return args
def visitConstructor(self, cons, type, attrs):
args = self.get_args(cons.fields)
attrs = self.get_args(attrs)
ctype = get_c_type(type)
self.emit_function(cons.name, ctype, args, attrs)
def emit_function(self, name, ctype, args, attrs, union=1):
args = args + attrs
if args:
argstr = ", ".join(["%s %s" % (atype, aname)
for atype, aname, opt in args])
argstr += ", PyArena *arena"
else:
argstr = "PyArena *arena"
margs = "a0"
for i in range(1, len(args)+1):
margs += ", a%d" % i
self.emit("#define %s(%s) _Py_%s(%s)" % (name, margs, name, margs), 0,
reflow = 0)
self.emit("%s _Py_%s(%s);" % (ctype, name, argstr), 0)
def visitProduct(self, prod, name):
self.emit_function(name, get_c_type(name),
self.get_args(prod.fields), [], union=0)
class FunctionVisitor(PrototypeVisitor):
"""Visitor to generate constructor functions for AST."""
def emit_function(self, name, ctype, args, attrs, union=1):
def emit(s, depth=0, reflow=1):
self.emit(s, depth, reflow)
argstr = ", ".join(["%s %s" % (atype, aname)
for atype, aname, opt in args + attrs])
if argstr:
argstr += ", PyArena *arena"
else:
argstr = "PyArena *arena"
self.emit("%s" % ctype, 0)
emit("%s(%s)" % (name, argstr))
emit("{")
emit("%s p;" % ctype, 1)
for argtype, argname, opt in args:
# XXX hack alert: false is allowed for a bool
if not opt and not (argtype == "bool" or argtype == "int"):
emit("if (!%s) {" % argname, 1)
emit("PyErr_SetString(PyExc_ValueError,", 2)
msg = "field %s is required for %s" % (argname, name)
emit(' "%s");' % msg,
2, reflow=0)
emit('return NULL;', 2)
emit('}', 1)
emit("p = (%s)PyArena_Malloc(arena, sizeof(*p));" % ctype, 1);
emit("if (!p) {", 1)
emit("PyErr_NoMemory();", 2)
emit("return NULL;", 2)
emit("}", 1)
if union:
self.emit_body_union(name, args, attrs)
else:
self.emit_body_struct(name, args, attrs)
emit("return p;", 1)
emit("}")
emit("")
def emit_body_union(self, name, args, attrs):
def emit(s, depth=0, reflow=1):
self.emit(s, depth, reflow)
emit("p->kind = %s_kind;" % name, 1)
for argtype, argname, opt in args:
emit("p->v.%s.%s = %s;" % (name, argname, argname), 1)
for argtype, argname, opt in attrs:
emit("p->%s = %s;" % (argname, argname), 1)
def emit_body_struct(self, name, args, attrs):
def emit(s, depth=0, reflow=1):
self.emit(s, depth, reflow)
for argtype, argname, opt in args:
emit("p->%s = %s;" % (argname, argname), 1)
assert not attrs
class PickleVisitor(EmitVisitor):
def visitModule(self, mod):
for dfn in mod.dfns:
self.visit(dfn)
def visitType(self, type):
self.visit(type.value, type.name)
def visitSum(self, sum, name):
pass
def visitProduct(self, sum, name):
pass
def visitConstructor(self, cons, name):
pass
def visitField(self, sum):
pass
class MarshalPrototypeVisitor(PickleVisitor):
def prototype(self, sum, name):
ctype = get_c_type(name)
self.emit("static int marshal_write_%s(PyObject **, int *, %s);"
% (name, ctype), 0)
visitProduct = visitSum = prototype
class PyTypesDeclareVisitor(PickleVisitor):
def visitProduct(self, prod, name):
self.emit("static PyTypeObject *%s_type;" % name, 0)
self.emit("static PyObject* ast2obj_%s(void*);" % name, 0)
if prod.fields:
self.emit("static char *%s_fields[]={" % name,0)
for f in prod.fields:
self.emit('"%s",' % f.name, 1)
self.emit("};", 0)
def visitSum(self, sum, name):
self.emit("static PyTypeObject *%s_type;" % name, 0)
if sum.attributes:
self.emit("static char *%s_attributes[] = {" % name, 0)
for a in sum.attributes:
self.emit('"%s",' % a.name, 1)
self.emit("};", 0)
ptype = "void*"
if is_simple(sum):
ptype = get_c_type(name)
tnames = []
for t in sum.types:
tnames.append(str(t.name)+"_singleton")
tnames = ", *".join(tnames)
self.emit("static PyObject *%s;" % tnames, 0)
self.emit("static PyObject* ast2obj_%s(%s);" % (name, ptype), 0)
for t in sum.types:
self.visitConstructor(t, name)
def visitConstructor(self, cons, name):
self.emit("static PyTypeObject *%s_type;" % cons.name, 0)
if cons.fields:
self.emit("static char *%s_fields[]={" % cons.name, 0)
for t in cons.fields:
self.emit('"%s",' % t.name, 1)
self.emit("};",0)
class PyTypesVisitor(PickleVisitor):
def visitModule(self, mod):
self.emit("""
static PyTypeObject* make_type(char *type, PyTypeObject* base, char**fields, int num_fields)
{
PyObject *fnames, *result;
int i;
if (num_fields) {
fnames = PyTuple_New(num_fields);
if (!fnames) return NULL;
} else {
fnames = Py_None;
Py_INCREF(Py_None);
}
for(i=0; i < num_fields; i++) {
PyObject *field = PyString_FromString(fields[i]);
if (!field) {
Py_DECREF(fnames);
return NULL;
}
PyTuple_SET_ITEM(fnames, i, field);
}
result = PyObject_CallFunction((PyObject*)&PyType_Type, "s(O){sOss}",
type, base, "_fields", fnames, "__module__", "_ast");
Py_DECREF(fnames);
return (PyTypeObject*)result;
}
static int add_attributes(PyTypeObject* type, char**attrs, int num_fields)
{
int i, result;
PyObject *s, *l = PyList_New(num_fields);
if (!l) return 0;
for(i = 0; i < num_fields; i++) {
s = PyString_FromString(attrs[i]);
if (!s) {
Py_DECREF(l);
return 0;
}
PyList_SET_ITEM(l, i, s);
}
result = PyObject_SetAttrString((PyObject*)type, "_attributes", l) >= 0;
Py_DECREF(l);
return result;
}
static PyObject* ast2obj_list(asdl_seq *seq, PyObject* (*func)(void*))
{
int i, n = asdl_seq_LEN(seq);
PyObject *result = PyList_New(n);
PyObject *value;
if (!result)
return NULL;
for (i = 0; i < n; i++) {
value = func(asdl_seq_GET(seq, i));
if (!value) {
Py_DECREF(result);
return NULL;
}
PyList_SET_ITEM(result, i, value);
}
return result;
}
static PyObject* ast2obj_object(void *o)
{
if (!o)
o = Py_None;
Py_INCREF((PyObject*)o);
return (PyObject*)o;
}
#define ast2obj_identifier ast2obj_object
#define ast2obj_string ast2obj_object
static PyObject* ast2obj_bool(bool b)
{
return PyBool_FromLong(b);
}
static PyObject* ast2obj_int(bool b)
{
return PyInt_FromLong(b);
}
""", 0, reflow=False)
self.emit("static int init_types(void)",0)
self.emit("{", 0)
self.emit("static int initialized;", 1)
self.emit("if (initialized) return 1;", 1)
self.emit('AST_type = make_type("AST", &PyBaseObject_Type, NULL, 0);', 1)
for dfn in mod.dfns:
self.visit(dfn)
self.emit("initialized = 1;", 1)
self.emit("return 1;", 1);
self.emit("}", 0)
def visitProduct(self, prod, name):
if prod.fields:
fields = name.value+"_fields"
else:
fields = "NULL"
self.emit('%s_type = make_type("%s", AST_type, %s, %d);' %
(name, name, fields, len(prod.fields)), 1)
self.emit("if (!%s_type) return 0;" % name, 1)
def visitSum(self, sum, name):
self.emit('%s_type = make_type("%s", AST_type, NULL, 0);' % (name, name), 1)
self.emit("if (!%s_type) return 0;" % name, 1)
if sum.attributes:
self.emit("if (!add_attributes(%s_type, %s_attributes, %d)) return 0;" %
(name, name, len(sum.attributes)), 1)
else:
self.emit("if (!add_attributes(%s_type, NULL, 0)) return 0;" % name, 1)
simple = is_simple(sum)
for t in sum.types:
self.visitConstructor(t, name, simple)
def visitConstructor(self, cons, name, simple):
if cons.fields:
fields = cons.name.value+"_fields"
else:
fields = "NULL"
self.emit('%s_type = make_type("%s", %s_type, %s, %d);' %
(cons.name, cons.name, name, fields, len(cons.fields)), 1)
self.emit("if (!%s_type) return 0;" % cons.name, 1)
if simple:
self.emit("%s_singleton = PyType_GenericNew(%s_type, NULL, NULL);" %
(cons.name, cons.name), 1)
self.emit("if (!%s_singleton) return 0;" % cons.name, 1)
class ASTModuleVisitor(PickleVisitor):
def visitModule(self, mod):
self.emit("PyMODINIT_FUNC", 0)
self.emit("init_ast(void)", 0)
self.emit("{", 0)
self.emit("PyObject *m, *d;", 1)
self.emit("if (!init_types()) return;", 1)
self.emit('m = Py_InitModule3("_ast", NULL, NULL);', 1)
self.emit("if (!m) return;", 1)
self.emit("d = PyModule_GetDict(m);", 1)
self.emit('if (PyDict_SetItemString(d, "AST", (PyObject*)AST_type) < 0) return;', 1)
self.emit('if (PyModule_AddIntConstant(m, "PyCF_ONLY_AST", PyCF_ONLY_AST) < 0)', 1)
self.emit("return;", 2)
# Value of version: "$Revision: 53490 $"
self.emit('if (PyModule_AddStringConstant(m, "__version__", "%s") < 0)' % mod.version.value[12:-3], 1)
self.emit("return;", 2)
for dfn in mod.dfns:
self.visit(dfn)
self.emit("}", 0)
def visitProduct(self, prod, name):
self.addObj(name)
def visitSum(self, sum, name):
self.addObj(name)
for t in sum.types:
self.visitConstructor(t, name)
def visitConstructor(self, cons, name):
self.addObj(cons.name)
def addObj(self, name):
self.emit('if (PyDict_SetItemString(d, "%s", (PyObject*)%s_type) < 0) return;' % (name, name), 1)
_SPECIALIZED_SEQUENCES = ('stmt', 'expr')
def find_sequence(fields, doing_specialization):
"""Return True if any field uses a sequence."""
for f in fields:
if f.seq:
if not doing_specialization:
return True
if str(f.type) not in _SPECIALIZED_SEQUENCES:
return True
return False
def has_sequence(types, doing_specialization):
for t in types:
if find_sequence(t.fields, doing_specialization):
return True
return False
class StaticVisitor(PickleVisitor):
CODE = '''Very simple, always emit this static code. Overide CODE'''
def visit(self, object):
self.emit(self.CODE, 0, reflow=False)
class ObjVisitor(PickleVisitor):
def func_begin(self, name):
ctype = get_c_type(name)
self.emit("PyObject*", 0)
self.emit("ast2obj_%s(void* _o)" % (name), 0)
self.emit("{", 0)
self.emit("%s o = (%s)_o;" % (ctype, ctype), 1)
self.emit("PyObject *result = NULL, *value = NULL;", 1)
self.emit('if (!o) {', 1)
self.emit("Py_INCREF(Py_None);", 2)
self.emit('return Py_None;', 2)
self.emit("}", 1)
self.emit('', 0)
def func_end(self):
self.emit("return result;", 1)
self.emit("failed:", 0)
self.emit("Py_XDECREF(value);", 1)
self.emit("Py_XDECREF(result);", 1)
self.emit("return NULL;", 1)
self.emit("}", 0)
self.emit("", 0)
def visitSum(self, sum, name):
if is_simple(sum):
self.simpleSum(sum, name)
return
self.func_begin(name)
self.emit("switch (o->kind) {", 1)
for i in range(len(sum.types)):
t = sum.types[i]
self.visitConstructor(t, i + 1, name)
self.emit("}", 1)
for a in sum.attributes:
self.emit("value = ast2obj_%s(o->%s);" % (a.type, a.name), 1)
self.emit("if (!value) goto failed;", 1)
self.emit('if (PyObject_SetAttrString(result, "%s", value) < 0)' % a.name, 1)
self.emit('goto failed;', 2)
self.emit('Py_DECREF(value);', 1)
self.func_end()
def simpleSum(self, sum, name):
self.emit("PyObject* ast2obj_%s(%s_ty o)" % (name, name), 0)
self.emit("{", 0)
self.emit("switch(o) {", 1)
for t in sum.types:
self.emit("case %s:" % t.name, 2)
self.emit("Py_INCREF(%s_singleton);" % t.name, 3)
self.emit("return %s_singleton;" % t.name, 3)
self.emit("}", 1)
self.emit("return NULL; /* cannot happen */", 1)
self.emit("}", 0)
def visitProduct(self, prod, name):
self.func_begin(name)
self.emit("result = PyType_GenericNew(%s_type, NULL, NULL);" % name, 1);
self.emit("if (!result) return NULL;", 1)
for field in prod.fields:
self.visitField(field, name, 1, True)
self.func_end()
def visitConstructor(self, cons, enum, name):
self.emit("case %s_kind:" % cons.name, 1)
self.emit("result = PyType_GenericNew(%s_type, NULL, NULL);" % cons.name, 2);
self.emit("if (!result) goto failed;", 2)
for f in cons.fields:
self.visitField(f, cons.name, 2, False)
self.emit("break;", 2)
def visitField(self, field, name, depth, product):
def emit(s, d):
self.emit(s, depth + d)
if product:
value = "o->%s" % field.name
else:
value = "o->v.%s.%s" % (name, field.name)
self.set(field, value, depth)
emit("if (!value) goto failed;", 0)
emit('if (PyObject_SetAttrString(result, "%s", value) == -1)' % field.name, 0)
emit("goto failed;", 1)
emit("Py_DECREF(value);", 0)
def emitSeq(self, field, value, depth, emit):
emit("seq = %s;" % value, 0)
emit("n = asdl_seq_LEN(seq);", 0)
emit("value = PyList_New(n);", 0)
emit("if (!value) goto failed;", 0)
emit("for (i = 0; i < n; i++) {", 0)
self.set("value", field, "asdl_seq_GET(seq, i)", depth + 1)
emit("if (!value1) goto failed;", 1)
emit("PyList_SET_ITEM(value, i, value1);", 1)
emit("value1 = NULL;", 1)
emit("}", 0)
def set(self, field, value, depth):
if field.seq:
# XXX should really check for is_simple, but that requires a symbol table
if field.type.value == "cmpop":
# While the sequence elements are stored as void*,
# ast2obj_cmpop expects an enum
self.emit("{", depth)
self.emit("int i, n = asdl_seq_LEN(%s);" % value, depth+1)
self.emit("value = PyList_New(n);", depth+1)
self.emit("if (!value) goto failed;", depth+1)
self.emit("for(i = 0; i < n; i++)", depth+1)
# This cannot fail, so no need for error handling
self.emit("PyList_SET_ITEM(value, i, ast2obj_cmpop((cmpop_ty)asdl_seq_GET(%s, i)));" % value,
depth+2, reflow=False)
self.emit("}", depth)
else:
self.emit("value = ast2obj_list(%s, ast2obj_%s);" % (value, field.type), depth)
else:
ctype = get_c_type(field.type)
self.emit("value = ast2obj_%s(%s);" % (field.type, value), depth, reflow=False)
class PartingShots(StaticVisitor):
CODE = """
PyObject* PyAST_mod2obj(mod_ty t)
{
init_types();
return ast2obj_mod(t);
}
"""
class ChainOfVisitors:
def __init__(self, *visitors):
self.visitors = visitors
def visit(self, object):
for v in self.visitors:
v.visit(object)
v.emit("", 0)
def main(srcfile):
argv0 = sys.argv[0]
components = argv0.split(os.sep)
argv0 = os.sep.join(components[-2:])
auto_gen_msg = '/* File automatically generated by %s */\n' % argv0
mod = asdl.parse(srcfile)
if not asdl.check(mod):
sys.exit(1)
if INC_DIR:
p = "%s/%s-ast.h" % (INC_DIR, mod.name)
f = open(p, "wb")
print >> f, auto_gen_msg
print >> f, '#include "asdl.h"\n'
c = ChainOfVisitors(TypeDefVisitor(f),
StructVisitor(f),
PrototypeVisitor(f),
)
c.visit(mod)
print >>f, "PyObject* PyAST_mod2obj(mod_ty t);"
f.close()
if SRC_DIR:
p = os.path.join(SRC_DIR, str(mod.name) + "-ast.c")
f = open(p, "wb")
print >> f, auto_gen_msg
print >> f, '#include "Python.h"'
print >> f, '#include "%s-ast.h"' % mod.name
print >> f
print >>f, "static PyTypeObject* AST_type;"
v = ChainOfVisitors(
PyTypesDeclareVisitor(f),
PyTypesVisitor(f),
FunctionVisitor(f),
ObjVisitor(f),
ASTModuleVisitor(f),
PartingShots(f),
)
v.visit(mod)
f.close()
if __name__ == "__main__":
import sys
import getopt
INC_DIR = ''
SRC_DIR = ''
opts, args = getopt.getopt(sys.argv[1:], "h:c:")
if len(opts) != 1:
print "Must specify exactly one output file"
sys.exit(1)
for o, v in opts:
if o == '-h':
INC_DIR = v
if o == '-c':
SRC_DIR = v
if len(args) != 1:
print "Must specify single input file"
sys.exit(1)
main(args[0])
| |
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Gtacoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Test descendant package tracking code
from test_framework.test_framework import GtacoinTestFramework
from test_framework.util import *
from test_framework.mininode import COIN
MAX_ANCESTORS = 25
MAX_DESCENDANTS = 25
class MempoolPackagesTest(GtacoinTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 2
self.setup_clean_chain = False
def setup_network(self):
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, ["-maxorphantx=1000", "-relaypriority=0", "-debug"]))
self.nodes.append(start_node(1, self.options.tmpdir, ["-maxorphantx=1000", "-relaypriority=0", "-limitancestorcount=5", "-debug"]))
connect_nodes(self.nodes[0], 1)
self.is_network_split = False
self.sync_all()
# Build a transaction that spends parent_txid:vout
# Return amount sent
def chain_transaction(self, node, parent_txid, vout, value, fee, num_outputs):
send_value = satoshi_round((value - fee)/num_outputs)
inputs = [ {'txid' : parent_txid, 'vout' : vout} ]
outputs = {}
for i in range(num_outputs):
outputs[node.getnewaddress()] = send_value
rawtx = node.createrawtransaction(inputs, outputs)
signedtx = node.signrawtransaction(rawtx)
txid = node.sendrawtransaction(signedtx['hex'])
fulltx = node.getrawtransaction(txid, 1)
assert(len(fulltx['vout']) == num_outputs) # make sure we didn't generate a change output
return (txid, send_value)
def run_test(self):
''' Mine some blocks and have them mature. '''
self.nodes[0].generate(101)
utxo = self.nodes[0].listunspent(10)
txid = utxo[0]['txid']
vout = utxo[0]['vout']
value = utxo[0]['amount']
fee = Decimal("0.0001")
# MAX_ANCESTORS transactions off a confirmed tx should be fine
chain = []
for i in range(MAX_ANCESTORS):
(txid, sent_value) = self.chain_transaction(self.nodes[0], txid, 0, value, fee, 1)
value = sent_value
chain.append(txid)
# Check mempool has MAX_ANCESTORS transactions in it, and descendant
# count and fees should look correct
mempool = self.nodes[0].getrawmempool(True)
assert_equal(len(mempool), MAX_ANCESTORS)
descendant_count = 1
descendant_fees = 0
descendant_size = 0
descendants = []
ancestors = list(chain)
for x in reversed(chain):
# Check that getmempoolentry is consistent with getrawmempool
entry = self.nodes[0].getmempoolentry(x)
assert_equal(entry, mempool[x])
# Check that the descendant calculations are correct
assert_equal(mempool[x]['descendantcount'], descendant_count)
descendant_fees += mempool[x]['fee']
assert_equal(mempool[x]['modifiedfee'], mempool[x]['fee'])
assert_equal(mempool[x]['descendantfees'], descendant_fees * COIN)
descendant_size += mempool[x]['size']
assert_equal(mempool[x]['descendantsize'], descendant_size)
descendant_count += 1
# Check that getmempooldescendants is correct
assert_equal(sorted(descendants), sorted(self.nodes[0].getmempooldescendants(x)))
descendants.append(x)
# Check that getmempoolancestors is correct
ancestors.remove(x)
assert_equal(sorted(ancestors), sorted(self.nodes[0].getmempoolancestors(x)))
# Check that getmempoolancestors/getmempooldescendants correctly handle verbose=true
v_ancestors = self.nodes[0].getmempoolancestors(chain[-1], True)
assert_equal(len(v_ancestors), len(chain)-1)
for x in v_ancestors.keys():
assert_equal(mempool[x], v_ancestors[x])
assert(chain[-1] not in v_ancestors.keys())
v_descendants = self.nodes[0].getmempooldescendants(chain[0], True)
assert_equal(len(v_descendants), len(chain)-1)
for x in v_descendants.keys():
assert_equal(mempool[x], v_descendants[x])
assert(chain[0] not in v_descendants.keys())
# Check that descendant modified fees includes fee deltas from
# prioritisetransaction
self.nodes[0].prioritisetransaction(chain[-1], 0, 1000)
mempool = self.nodes[0].getrawmempool(True)
descendant_fees = 0
for x in reversed(chain):
descendant_fees += mempool[x]['fee']
assert_equal(mempool[x]['descendantfees'], descendant_fees * COIN + 1000)
# Adding one more transaction on to the chain should fail.
try:
self.chain_transaction(self.nodes[0], txid, vout, value, fee, 1)
except JSONRPCException as e:
print("too-long-ancestor-chain successfully rejected")
# Check that prioritising a tx before it's added to the mempool works
# First clear the mempool by mining a block.
self.nodes[0].generate(1)
sync_blocks(self.nodes)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
# Prioritise a transaction that has been mined, then add it back to the
# mempool by using invalidateblock.
self.nodes[0].prioritisetransaction(chain[-1], 0, 2000)
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
# Keep node1's tip synced with node0
self.nodes[1].invalidateblock(self.nodes[1].getbestblockhash())
# Now check that the transaction is in the mempool, with the right modified fee
mempool = self.nodes[0].getrawmempool(True)
descendant_fees = 0
for x in reversed(chain):
descendant_fees += mempool[x]['fee']
if (x == chain[-1]):
assert_equal(mempool[x]['modifiedfee'], mempool[x]['fee']+satoshi_round(0.00002))
assert_equal(mempool[x]['descendantfees'], descendant_fees * COIN + 2000)
# TODO: check that node1's mempool is as expected
# TODO: test ancestor size limits
# Now test descendant chain limits
txid = utxo[1]['txid']
value = utxo[1]['amount']
vout = utxo[1]['vout']
transaction_package = []
# First create one parent tx with 10 children
(txid, sent_value) = self.chain_transaction(self.nodes[0], txid, vout, value, fee, 10)
parent_transaction = txid
for i in range(10):
transaction_package.append({'txid': txid, 'vout': i, 'amount': sent_value})
for i in range(MAX_DESCENDANTS):
utxo = transaction_package.pop(0)
try:
(txid, sent_value) = self.chain_transaction(self.nodes[0], utxo['txid'], utxo['vout'], utxo['amount'], fee, 10)
for j in range(10):
transaction_package.append({'txid': txid, 'vout': j, 'amount': sent_value})
if i == MAX_DESCENDANTS - 2:
mempool = self.nodes[0].getrawmempool(True)
assert_equal(mempool[parent_transaction]['descendantcount'], MAX_DESCENDANTS)
except JSONRPCException as e:
print(e.error['message'])
assert_equal(i, MAX_DESCENDANTS - 1)
print("tx that would create too large descendant package successfully rejected")
# TODO: check that node1's mempool is as expected
# TODO: test descendant size limits
# Test reorg handling
# First, the basics:
self.nodes[0].generate(1)
sync_blocks(self.nodes)
self.nodes[1].invalidateblock(self.nodes[0].getbestblockhash())
self.nodes[1].reconsiderblock(self.nodes[0].getbestblockhash())
# Now test the case where node1 has a transaction T in its mempool that
# depends on transactions A and B which are in a mined block, and the
# block containing A and B is disconnected, AND B is not accepted back
# into node1's mempool because its ancestor count is too high.
# Create 8 transactions, like so:
# Tx0 -> Tx1 (vout0)
# \--> Tx2 (vout1) -> Tx3 -> Tx4 -> Tx5 -> Tx6 -> Tx7
#
# Mine them in the next block, then generate a new tx8 that spends
# Tx1 and Tx7, and add to node1's mempool, then disconnect the
# last block.
# Create tx0 with 2 outputs
utxo = self.nodes[0].listunspent()
txid = utxo[0]['txid']
value = utxo[0]['amount']
vout = utxo[0]['vout']
send_value = satoshi_round((value - fee)/2)
inputs = [ {'txid' : txid, 'vout' : vout} ]
outputs = {}
for i in range(2):
outputs[self.nodes[0].getnewaddress()] = send_value
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
signedtx = self.nodes[0].signrawtransaction(rawtx)
txid = self.nodes[0].sendrawtransaction(signedtx['hex'])
tx0_id = txid
value = send_value
# Create tx1
(tx1_id, tx1_value) = self.chain_transaction(self.nodes[0], tx0_id, 0, value, fee, 1)
# Create tx2-7
vout = 1
txid = tx0_id
for i in range(6):
(txid, sent_value) = self.chain_transaction(self.nodes[0], txid, vout, value, fee, 1)
vout = 0
value = sent_value
# Mine these in a block
self.nodes[0].generate(1)
self.sync_all()
# Now generate tx8, with a big fee
inputs = [ {'txid' : tx1_id, 'vout': 0}, {'txid' : txid, 'vout': 0} ]
outputs = { self.nodes[0].getnewaddress() : send_value + value - 4*fee }
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
signedtx = self.nodes[0].signrawtransaction(rawtx)
txid = self.nodes[0].sendrawtransaction(signedtx['hex'])
sync_mempools(self.nodes)
# Now try to disconnect the tip on each node...
self.nodes[1].invalidateblock(self.nodes[1].getbestblockhash())
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
sync_blocks(self.nodes)
if __name__ == '__main__':
MempoolPackagesTest().main()
| |
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import xml.etree.ElementTree as ET
import pickle
import os
def voc_ap(rec, prec, use_07_metric=True):
""" ap = voc_ap(rec, prec, [use_07_metric])
Compute VOC AP given precision and recall.
If use_07_metric is true, uses the
VOC 07 11 point method (default:True).
"""
if use_07_metric:
# 11 point metric
ap = 0.
for t in np.arange(0., 1.1, 0.1):
if np.sum(rec >= t) == 0:
p = 0
else:
p = np.max(prec[rec >= t])
ap = ap + p / 11.
else:
# correct AP calculation
# first append sentinel values at the end
mrec = np.concatenate(([0.], rec, [1.]))
mpre = np.concatenate(([0.], prec, [0.]))
# compute the precision envelope
for i in range(mpre.size - 1, 0, -1):
mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
# to calculate area under PR curve, look for points
# where X axis (recall) changes value
i = np.where(mrec[1:] != mrec[:-1])[0]
# and sum (\Delta recall) * prec
ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
return ap
def parse_rec(filename):
""" Parse a PASCAL VOC xml file """
tree = ET.parse(filename)
objects = []
for obj in tree.findall('object'):
obj_struct = {}
obj_struct['name'] = obj.find('name').text
obj_struct['pose'] = obj.find('pose').text
obj_struct['truncated'] = int(obj.find('truncated').text)
obj_struct['difficult'] = int(obj.find('difficult').text)
bbox = obj.find('bndbox')
obj_struct['bbox'] = [int(bbox.find('xmin').text) - 1,
int(bbox.find('ymin').text) - 1,
int(bbox.find('xmax').text) - 1,
int(bbox.find('ymax').text) - 1]
objects.append(obj_struct)
return objects
def voc_eval(detpath,
annopath,
imagesetfile,
classname,
cachedir,
ovthresh=0.5,
use_07_metric=True):
"""rec, prec, ap = voc_eval(detpath,
annopath,
imagesetfile,
classname,
[ovthresh],
[use_07_metric])
Top level function that does the PASCAL VOC evaluation.
detpath: Path to detections
detpath.format(classname) should produce the detection results file.
annopath: Path to annotations
annopath.format(imagename) should be the xml annotations file.
imagesetfile: Text file containing the list of images, one image per line.
classname: Category name (duh)
cachedir: Directory for caching the annotations
[ovthresh]: Overlap threshold (default = 0.5)
[use_07_metric]: Whether to use VOC07's 11 point AP computation
(default True)
"""
# assumes detections are in detpath.format(classname)
# assumes annotations are in annopath.format(imagename)
# assumes imagesetfile is a text file with each line an image name
# cachedir caches the annotations in a pickle file
# first load gt
if not os.path.isdir(cachedir):
os.mkdir(cachedir)
cachefile = os.path.join(cachedir, 'annots.pkl')
# read list of images
with open(imagesetfile, 'r') as f:
lines = f.readlines()
imagenames = [x.strip() for x in lines]
if not os.path.isfile(cachefile):
# load annots
recs = {}
for i, imagename in enumerate(imagenames):
recs[imagename] = parse_rec(annopath % (imagename))
if i % 100 == 0:
print('Reading annotation for {:d}/{:d}'.format(
i + 1, len(imagenames)))
# save
print('Saving cached annotations to {:s}'.format(cachefile))
with open(cachefile, 'wb') as f:
pickle.dump(recs, f)
else:
# load
with open(cachefile, 'rb') as f:
recs = pickle.load(f)
# extract gt objects for this class
class_recs = {}
npos = 0
for imagename in imagenames:
R = [obj for obj in recs[imagename] if obj['name'] == classname]
bbox = np.array([x['bbox'] for x in R])
difficult = np.array([x['difficult'] for x in R]).astype(np.bool)
det = [False] * len(R)
npos = npos + sum(~difficult)
class_recs[imagename] = {'bbox': bbox,
'difficult': difficult,
'det': det}
# read dets
detfile = detpath.format(classname)
with open(detfile, 'r') as f:
lines = f.readlines()
if any(lines) == 1:
splitlines = [x.strip().split(' ') for x in lines]
image_ids = [x[0] for x in splitlines]
confidence = np.array([float(x[1]) for x in splitlines])
BB = np.array([[float(z) for z in x[2:]] for x in splitlines])
# sort by confidence
sorted_ind = np.argsort(-confidence)
sorted_scores = np.sort(-confidence)
BB = BB[sorted_ind, :]
image_ids = [image_ids[x] for x in sorted_ind]
# go down dets and mark TPs and FPs
nd = len(image_ids)
tp = np.zeros(nd)
fp = np.zeros(nd)
for d in range(nd):
R = class_recs[image_ids[d]]
bb = BB[d, :].astype(float)
ovmax = -np.inf
BBGT = R['bbox'].astype(float)
if BBGT.size > 0:
# compute overlaps
# intersection
ixmin = np.maximum(BBGT[:, 0], bb[0])
iymin = np.maximum(BBGT[:, 1], bb[1])
ixmax = np.minimum(BBGT[:, 2], bb[2])
iymax = np.minimum(BBGT[:, 3], bb[3])
iw = np.maximum(ixmax - ixmin, 0.)
ih = np.maximum(iymax - iymin, 0.)
inters = iw * ih
uni = ((bb[2] - bb[0]) * (bb[3] - bb[1]) +
(BBGT[:, 2] - BBGT[:, 0]) *
(BBGT[:, 3] - BBGT[:, 1]) - inters)
overlaps = inters / uni
ovmax = np.max(overlaps)
jmax = np.argmax(overlaps)
if ovmax > ovthresh:
if not R['difficult'][jmax]:
if not R['det'][jmax]:
tp[d] = 1.
R['det'][jmax] = 1
else:
fp[d] = 1.
else:
fp[d] = 1.
# compute precision recall
fp = np.cumsum(fp)
tp = np.cumsum(tp)
rec = tp / float(npos)
# avoid divide by zero in case the first detection matches a difficult
# ground truth
prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)
ap = voc_ap(rec, prec, use_07_metric)
else:
rec = -1.
prec = -1.
ap = -1.
return rec, prec, ap
| |
#!/usr/local/bin/python
"""
Copyright (c) 2010-2013, GhostBSD. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistribution's of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistribution's in binary form must reproduce the above
copyright notice,this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES(INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
import os
import re
from subprocess import Popen, PIPE, STDOUT, call
import pickle
from time import sleep
tmp = "/tmp/.gbi/"
if not os.path.exists(tmp):
os.makedirs(tmp)
installer = "/usr/local/lib/gbi/"
sysinstall = "/usr/local/sbin/pc-sysinstall"
partitiondb = "%spartitiondb/" % tmp
query = "sh /usr/local/lib/gbi/backend-query/"
query_disk = '%sdisk-list.sh' % query
detect_sheme = '%sdetect-sheme.sh' % query
diskdb = "%sdisk" % partitiondb
query_partition = '%sdisk-part.sh' % query
query_label = '%sdisk-label.sh' % query
disk_info = '%sdisk-info.sh' % query
nl = "\n"
memory = 'sysctl hw.physmem'
disk_file = '%sdisk' % tmp
dslice = '%sslice' % tmp
Part_label = '%spartlabel' % tmp
part_schem = '%sscheme' % tmp
boot_file = '%sboot' % tmp
def disk_query():
df = open(diskdb, 'rb')
dl = pickle.load(df)
return dl
def zfs_disk_query():
disk_output = Popen(sysinstall + " disk-list", shell=True, stdin=PIPE,
stdout=PIPE, stderr=STDOUT, close_fds=True)
return disk_output.stdout.readlines()
def zfs_disk_size_query(disk):
disk_info_output = Popen(sysinstall + " disk-info " + disk, shell=True,
stdin=PIPE, stdout=PIPE, stderr=STDOUT,
close_fds=True)
return disk_info_output.stdout.readlines()[3].partition('=')[2]
def how_partition(path):
disk = disk_query()[path[0]][0]
if os.path.exists(partitiondb + disk):
part = partition_query(disk)
return len(part)
else:
return 0
def partition_query(disk):
plist = open(partitiondb + disk, 'rb')
pl = pickle.load(plist)
return pl
def label_query(pslice):
llist = open(partitiondb + pslice, 'rb')
ll = pickle.load(llist)
return ll
def scheme_query(path):
disk = disk_query()[path[0]]
return disk[-1]
def find_scheme(disk):
cmd = "%s %s" % (detect_sheme, disk)
shm_out = Popen(cmd, shell=True, stdin=PIPE, stdout=PIPE,
stderr=STDOUT, close_fds=True)
scheme = shm_out.stdout.readlines()[0].rstrip()
return scheme
def int_size(size):
size = int(size)
return size
def rpartslice(part):
item = part
p = set("p")
s = set("s")
if p & set(item):
drive = item.partition('p')[0]
elif s & set(item):
drive = item.partition('s')[0]
return drive
def sliceNum(part):
item = part
p = set("p")
s = set("s")
if p & set(item):
num = int(item.partition('p')[2])
elif s & set(item):
num = int(item.partition('s')[2])
return num
def slicePartition(part):
item = part
p = set("p")
s = set("s")
if p & set(item):
return 'p'
elif s & set(item):
return 's'
class diskSchemeChanger():
def __init__(self, schm, path, disk, size):
dlist = disk_query()
dselected = dlist[path[0]]
if schm is None:
dselected[-1] = 'GPT'
else:
dselected[-1] = schm
dlist[path[0]] = dselected
disk = dselected[0]
df = open(diskdb, 'wb')
pickle.dump(dlist, df)
df.close()
dsl = []
mdsl = []
if os.path.exists(tmp + 'destroy'):
df = open(tmp + 'destroy', 'rb')
mdsl = pickle.load(df)
dsl.extend(([disk, schm]))
mdsl.append(dsl)
cf = open(tmp + 'destroy', 'wb')
pickle.dump(mdsl, cf)
cf.close()
if not os.path.exists(partitiondb + disk):
plist = []
mplist = []
psf = open(partitiondb + disk, 'wb')
plist.extend((['freespace', size, '', '']))
mplist.append(plist)
pickle.dump(mplist, psf)
psf.close()
class partition_repos():
def disk_list(self):
disk_output = Popen(query_disk, shell=True, stdin=PIPE, stdout=PIPE,
stderr=STDOUT, close_fds=True)
dlist = []
for disk in disk_output.stdout:
dlist.append(disk.split())
return dlist
def disk_size(self, disk):
cmd = "%s %s" % (disk_info, disk)
ds = Popen(cmd, shell=True, stdin=PIPE, stdout=PIPE,
stderr=STDOUT, close_fds=True)
diskSize = ds.stdout.readlines()[0].rstrip()
return diskSize
def find_Scheme(self, disk):
cmd = "%s %s" % (detect_sheme, disk)
shm_out = Popen(cmd, shell=True, stdin=PIPE, stdout=PIPE,
stderr=STDOUT, close_fds=True)
scheme = shm_out.stdout.readlines()[0].rstrip()
return scheme
def mbr_partition_slice_list(self, disk):
partition_outpput = Popen('%s %s' % (query_partition, disk),
shell=True, stdin=PIPE, stdout=PIPE,
stderr=STDOUT, close_fds=True)
plist = []
mplist = []
dpsf = open(partitiondb + disk, 'wb')
for line in partition_outpput.stdout:
info = line.split()
plist.extend((info[0], info[1].partition('M')[0], '', info[2]))
mplist.append(plist)
plist = []
self.mbr_partition_list(info[0])
pickle.dump(mplist, dpsf)
dpsf.close()
def mbr_partition_list(self, pslice):
slice_outpput = Popen('%s %s' % (query_label, pslice), shell=True,
stdin=PIPE, stdout=PIPE, stderr=STDOUT, close_fds=True)
alph = ord('a')
if pslice == 'freespace':
pass
else:
llist = []
mllist = []
plf = open(partitiondb + pslice, 'wb')
for line in slice_outpput.stdout:
info = line.split()
letter = chr(alph)
alph = alph + 1
if info[0] == 'freespace':
llist.extend(([info[0], info[1].partition('M')[0], '', ''])
)
else:
llist.extend((
[pslice + letter, info[0].partition('M')[0],
'', info[1]]))
mllist.append(llist)
llist = []
pickle.dump(mllist, plf)
plf.close()
def gpt_partition_list(self, disk):
partition_outpput = Popen('%s %s' % (query_partition, disk),
shell=True, stdin=PIPE, stdout=PIPE,
stderr=STDOUT, close_fds=True)
plist = []
mplist = []
psf = open(partitiondb + disk, 'wb')
for line in partition_outpput.stdout:
info = line.split()
plist.extend((info[0], info[1].partition('M')[0], '', info[2]))
mplist.append(plist)
plist = []
pickle.dump(mplist, psf)
psf.close()
def __init__(self):
if not os.path.exists(partitiondb):
os.makedirs(partitiondb)
df = open(diskdb, 'wb')
dlist = []
mdlist = []
for disk in self.disk_list():
if self.find_Scheme(disk[0]) == "GPT":
dlist.extend(([disk[0], self.disk_size(disk[0]), '', 'GPT']))
self.gpt_partition_list(disk[0])
mdlist.append(dlist)
elif self.find_Scheme(disk[0]) == "MBR":
dlist.extend(([disk[0], self.disk_size(disk[0]), '', 'MBR']))
self.mbr_partition_slice_list(disk[0])
mdlist.append(dlist)
else:
dlist.extend(([disk[0], self.disk_size(disk[0]), '', None]))
mdlist.append(dlist)
dlist = []
pickle.dump(mdlist, df)
df.close()
class Delete_partition():
def find_if_lable(seff, part):
last = part[-1]
if re.search('[a-z]', last):
return True
def delete_label(self, part, spart, path):
llist = open(partitiondb + spart, 'rb')
ll = pickle.load(llist)
last_num = len(ll) - 1
lnum = path[2]
if last_num == lnum:
free = int_size(ll[last_num][1])
if lnum != 0 and ll[lnum - 1][0] == 'freespace':
free = free + int_size(ll[lnum - 1][1])
ll[lnum] = ['freespace', free, '', '']
ll.remove(ll[lnum - 1])
else:
ll[lnum] = ['freespace', free, '', '']
elif lnum == 0:
free = int_size(ll[lnum][1])
if ll[lnum + 1][0] == 'freespace':
free = free + int_size(ll[lnum + 1][1])
ll.remove(ll[lnum + 1])
ll[lnum] = ['freespace', free, '', '']
else:
free = int_size(ll[lnum][1])
if ll[lnum + 1][0] == 'freespace':
free = free + int_size(ll[lnum + 1][1])
ll.remove(ll[lnum + 1])
if lnum != 0 and ll[lnum - 1][0] == 'freespace':
free = free + int_size(ll[lnum - 1][1])
ll[lnum] = ['freespace', free, '', '']
ll.remove(ll[lnum - 1])
else:
ll[lnum] = ['freespace', free, '', '']
savepl = open(partitiondb + spart, 'w')
pickle.dump(ll, savepl)
savepl.close()
llist = open(partitiondb + spart, 'rb')
lablelist = pickle.load(llist)
pfile = open(Part_label, 'w')
for partlist in lablelist:
if partlist[2] != '':
pfile.writelines('%s %s %s\n' % (partlist[3], partlist[1], partlist [2]))
pfile.close()
def __init__(self, part, path):
if part == "freespace":
pass
elif self.find_if_lable(part) is True:
spart = part[:-1]
self.delete_label(part, spart, path)
else:
drive = rpartslice(part)
self.delete_slice(drive, part, path)
def delete_slice(self, drive, part, path):
slist = open(partitiondb + drive, 'rb')
sl = pickle.load(slist)
last_num = len(sl) - 1
snum = path[1]
if os.path.exists(dslice):
sfile = open(dslice, 'r')
slf = sfile.readlines()[0].rstrip()
if slf == 'all':
ptnum = snum - 1
else:
slnum = int(re.sub("[^0-9]", "", slf))
ptnum = snum - slnum
if last_num == snum:
free = int_size(sl[last_num][1])
if snum != 0 and sl[snum - 1][0] == 'freespace':
free = free + int_size(sl[snum - 1][1])
sl[snum] = ['freespace', free, '', '']
sl.remove(sl[snum - 1])
else:
sl[snum] = ['freespace', free, '', '']
elif snum == 0:
free = int_size(sl[snum][1])
if sl[snum + 1][0] == 'freespace':
free = free + int_size(sl[snum + 1][1])
sl.remove(sl[snum + 1])
sl[snum] = ['freespace', free, '', '']
else:
sl[snum] = ['freespace', free, '', '']
else:
free = int_size(sl[snum][1])
if sl[snum + 1][0] == 'freespace' and sl[snum - 1][0] == 'freespace':
free = free + int_size(sl[snum + 1][1]) + int_size(sl[snum - 1][1])
sl[snum] = ['freespace', free, '', '']
sl.remove(sl[snum + 1])
sl.remove(sl[snum - 1])
elif sl[snum + 1][0] == 'freespace':
free = free + int_size(sl[snum + 1][1])
sl[snum] = ['freespace', free, '', '']
sl.remove(sl[snum + 1])
elif snum != 0 and sl[snum - 1][0] == 'freespace':
free = free + int_size(sl[snum - 1][1])
sl[snum] = ['freespace', free, '', '']
sl.remove(sl[snum - 1])
else:
sl[snum] = ['freespace', free, '', '']
# Making delete file
dl = []
mdl = []
data = True
# if delete exist chek if slice is in delete.
if os.path.exists(tmp + 'delete'):
df = open(tmp + 'delete', 'rb')
mdl = pickle.load(df)
for line in mdl:
if part in line:
data = False
break
if data is True:
dl.extend(([part, free]))
mdl.append(dl)
cf = open(tmp + 'delete', 'wb')
pickle.dump(mdl, cf)
cf.close()
if os.path.exists(partitiondb + part):
os.remove(partitiondb + part)
saveps = open(partitiondb + drive, 'w')
pickle.dump(sl, saveps)
saveps.close()
if "p" in part:
pfile = open(Part_label, 'w')
for partlist in partition_query(drive):
if partlist[2] != '':
pfile.writelines('%s %s %s\n' % (partlist[3], partlist[1], partlist[2]))
pfile.close()
class autoDiskPartition():
def delete_mbr_partition(self, disk):
plist = partition_query(disk)
for part in plist:
if part[0] == 'freespace':
pass
else:
os.remove(partitiondb + part[0])
def create_mbr_partiton(self, disk, size):
file_disk = open(disk_file, 'w')
file_disk.writelines('%s\n' % disk)
file_disk.close()
sfile = open(part_schem, 'w')
sfile.writelines('partscheme=MBR')
sfile.close()
plist = []
mplist = []
dpsf = open(partitiondb + disk, 'wb')
plist.extend((disk + "s1", size, '', 'freebsd'))
mplist.append(plist)
pickle.dump(mplist, dpsf)
dpsf.close()
number = int(size.partition('M')[0])
slice_file = open(dslice, 'w')
slice_file.writelines('all\n')
slice_file.writelines('%s\n' % number)
slice_file.close()
ram = Popen(memory, shell=True, stdin=PIPE, stdout=PIPE,
stderr=STDOUT, close_fds=True)
mem = ram.stdout.read()
swap = int(mem.partition(':')[2].strip()) / (1024 * 1024)
rootNum = number - swap
llist = []
mllist = []
plf = open(partitiondb + disk + 's1', 'wb')
llist.extend(([disk + 's1a', rootNum, '/', 'UFS+SUJ']))
mllist.append(llist)
llist = []
llist.extend(([disk + 's1b', swap, 'none', 'SWAP']))
mllist.append(llist)
pickle.dump(mllist, plf)
plf.close()
pfile = open(Part_label, 'w')
pfile.writelines('UFS+SUJ %s /\n' % rootNum)
pfile.writelines('SWAP 0 none\n')
pfile.close()
def __init__(self, disk, size, schm):
if schm == 'GPT':
self.create_gpt_partiton(disk, size)
elif schm == 'MBR':
if os.path.exists(partitiondb + disk):
self.delete_mbr_partition(disk)
self.create_mbr_partiton(disk, size)
def create_gpt_partiton(self, disk, size):
file_disk = open(disk_file, 'w')
file_disk.writelines('%s\n' % disk)
file_disk.close()
sfile = open(part_schem, 'w')
sfile.writelines('partscheme=GPT')
sfile.close()
number = int(size.partition('M')[0])
slice_file = open(dslice, 'w')
slice_file.writelines('all\n')
slice_file.writelines('%s\n' % number)
slice_file.close()
ram = Popen(memory, shell=True, stdin=PIPE, stdout=PIPE,
stderr=STDOUT, close_fds=True)
mem = ram.stdout.read()
swap = int(mem.partition(':')[2].strip()) / (1024 * 1024)
if bios_or_uefi() == "UEFI":
bnum = 100
else:
bnum = 1
rootNum = number - swap
rnum = rootNum - bnum
plist = []
mplist = []
plf = open(partitiondb + disk, 'wb')
read = open(boot_file, 'r')
line = read.readlines()
boot = line[0].strip()
if bios_or_uefi() == "UEFI":
plist.extend(([disk + 'p1', bnum, 'none', 'UEFI']))
elif boot == "GRUB":
plist.extend(([disk + 'p1', bnum, 'none', 'BIOS']))
else:
plist.extend(([disk + 'p1', bnum, 'none', 'BOOT']))
mplist.append(plist)
plist = []
plist.extend(([disk + 'p2', rnum, '/', 'UFS+SUJ']))
mplist.append(plist)
plist = []
plist.extend(([disk + 'p3', swap, 'none', 'SWAP']))
mplist.append(plist)
pickle.dump(mplist, plf)
plf.close()
pfile = open(Part_label, 'w')
if bios_or_uefi() == "UEFI":
pfile.writelines('UEFI %s none\n' % bnum)
elif boot == "GRUB":
pfile.writelines('BIOS %s none\n' % bnum)
else:
pfile.writelines('BOOT %s none\n' % bnum)
pfile.writelines('UFS+SUJ %s /\n' % rnum)
pfile.writelines('SWAP 0 none\n')
pfile.close()
class autoFreeSpace():
def create_mbr_partiton(self, disk, size, sl, path):
file_disk = open(disk_file, 'w')
file_disk.writelines('%s\n' % disk)
file_disk.close()
sfile = open(part_schem, 'w')
sfile.writelines('partscheme=MBR')
sfile.close()
plist = []
mplist = partition_query(disk)
dpsf = open(partitiondb + disk, 'wb')
plist.extend((disk + "s%s" % sl, size, '', 'freebsd'))
mplist[path] = plist
pickle.dump(mplist, dpsf)
dpsf.close()
number = int(size)
slice_file = open(dslice, 'w')
slice_file.writelines('s%s\n' % sl)
slice_file.writelines('%s\n' % number)
slice_file.close()
ram = Popen(memory, shell=True, stdin=PIPE, stdout=PIPE,
stderr=STDOUT, close_fds=True)
mem = ram.stdout.read()
swap = int(mem.partition(':')[2].strip()) / (1024 * 1024)
rootNum = number - swap
llist = []
mllist = []
plf = open(partitiondb + disk + 's%s' % sl, 'wb')
llist.extend(([disk + 's%sa' % sl, rootNum, '/', 'UFS+SUJ']))
mllist.append(llist)
llist = []
llist.extend(([disk + 's%sb' % sl, swap, 'none', 'SWAP']))
mllist.append(llist)
pickle.dump(mllist, plf)
plf.close()
pfile = open(Part_label, 'w')
pfile.writelines('UFS+SUJ %s /\n' % rootNum)
pfile.writelines('SWAP %s none\n' % int(swap - 1))
pfile.close()
pl = []
mpl = []
if os.path.exists(tmp + 'create'):
pf = open(tmp + 'create', 'rb')
mpl = pickle.load(pf)
pl.extend(([disk + "s%s" % sl, size]))
mpl.append(pl)
cf = open(tmp + 'create', 'wb')
pickle.dump(mpl, cf)
cf.close()
def __init__(self, path, size):
disk = disk_query()[path[0]][0]
schm = disk_query()[path[0]][3]
sl = path[1] + 1
lv = path[1]
if schm == "GPT":
self.create_gpt_partiton(disk, size, sl, lv)
elif schm == "MBR":
self.create_mbr_partiton(disk, size, sl, lv)
def create_gpt_partiton(self, disk, size, sl, path):
file_disk = open(disk_file, 'w')
file_disk.writelines('%s\n' % disk)
file_disk.close()
sfile = open(part_schem, 'w')
sfile.writelines('partscheme=GPT')
sfile.close()
number = int(size.partition('M')[0])
slice_file = open(dslice, 'w')
slice_file.writelines('p%s\n' % sl)
slice_file.writelines('%s\n' % number)
slice_file.close()
ram = Popen(memory, shell=True, stdin=PIPE, stdout=PIPE,
stderr=STDOUT, close_fds=True)
mem = ram.stdout.read()
swap = int(mem.partition(':')[2].strip()) / (1024 * 1024)
rootNum = number - swap
if bios_or_uefi() == "UEFI":
bs = 100
else:
bs = 1
rootNum = rootNum - bs
plist = []
mplist = partition_query(disk)
plf = open(partitiondb + disk, 'wb')
read = open(boot_file, 'r')
line = read.readlines()
boot = line[0].strip()
if bios_or_uefi() == "UEFI":
plist.extend(([disk + 'p%s' % sl, bs, 'none', 'UEFI']))
elif boot == "GRUB":
plist.extend(([disk + 'p%s' % sl, bs, 'none', 'BIOS']))
else:
plist.extend(([disk + 'p%s' % sl, bs, 'none', 'BOOT']))
mplist[path] = plist
plist = []
plist.extend((
[disk + 'p%s' % int(sl + 1), rootNum, '/', 'UFS+SUJ']))
mplist.append(plist)
plist = []
plist.extend((
[disk + 'p%s' % int(sl + 2), swap, 'none', 'SWAP']))
mplist.append(plist)
pickle.dump(mplist, plf)
plf.close()
pfile = open(Part_label, 'w')
if bios_or_uefi() == "UEFI":
pfile.writelines('UEFI %s none\n' % bs)
elif boot == "GRUB":
pfile.writelines('BIOS %s none\n' % bs)
else:
pfile.writelines('BOOT %s none\n' % bs)
pfile.writelines('UFS+SUJ %s /\n' % rootNum)
pfile.writelines('SWAP %s none\n' % int(swap - 1))
pfile.close()
pl = []
mpl = []
if not os.path.exists(tmp + 'create'):
pl.extend(([disk + "p%s" % sl, size]))
mpl.append(pl)
cf = open(tmp + 'create', 'wb')
pickle.dump(mpl, cf)
cf.close()
class createLabel():
def __init__(self, path, lnumb, cnumb, lb, fs, data):
disk = disk_query()[path[0]][0]
if not os.path.exists(disk_file):
file_disk = open(disk_file, 'w')
file_disk.writelines('%s\n' % disk)
file_disk.close()
sl = path[1] + 1
lv = path[2]
sfile = open(part_schem, 'w')
sfile.writelines('partscheme=MBR')
sfile.close()
slice_file = open(dslice, 'w')
slice_file.writelines('s%s\n' % sl)
slice_file.close()
alph = ord('a')
alph += lv
letter = chr(alph)
llist = []
mllist = label_query(disk + 's%s' % sl)
plf = open(partitiondb + disk + 's%s' % sl, 'wb')
if lnumb == 0:
cnumb -= 1
llist.extend(([disk + 's%s' % sl + letter, cnumb, lb, fs]))
mllist[lv] = llist
llist = []
if lnumb > 0:
llist.extend((['freespace', lnumb, '', '']))
mllist.append(llist)
pickle.dump(mllist, plf)
plf.close()
llist = open(partitiondb + disk + 's%s' % sl, 'rb')
labellist = pickle.load(llist)
pfile = open(Part_label, 'w')
for partlist in labellist:
if partlist[2] != '':
pfile.writelines('%s %s %s\n' % (partlist[3], partlist[1], partlist[2]))
pfile.close()
class modifyLabel():
def __init__(self, path, lnumb, cnumb, lb, fs, data):
disk = disk_query()[path[0]][0]
if not os.path.exists(disk_file):
file_disk = open(disk_file, 'w')
file_disk.writelines('%s\n' % disk)
file_disk.close()
sl = path[1] + 1
lv = path[2]
sfile = open(part_schem, 'w')
sfile.writelines('partscheme=MBR')
sfile.close()
slice_file = open(dslice, 'w')
slice_file.writelines('s%s\n' % sl)
slice_file.close()
alph = ord('a')
alph += lv
letter = chr(alph)
llist = []
mllist = label_query(disk + 's%s' % sl)
plf = open(partitiondb + disk + 's%s' % sl, 'wb')
if lnumb == 0:
cnumb -= 1
llist.extend(([disk + 's%s' % sl + letter, cnumb, lb, fs]))
mllist[lv] = llist
llist = []
if lnumb > 0:
llist.extend((['freespace', lnumb, '', '']))
mllist.append(llist)
pickle.dump(mllist, plf)
plf.close()
llist = open(partitiondb + disk + 's%s' % sl, 'rb')
labellist = pickle.load(llist)
pfile = open(Part_label, 'w')
for partlist in labellist:
if partlist[2] != '':
pfile.writelines('%s %s %s\n' % (partlist[3], partlist[1], partlist[2]))
pfile.close()
class createSlice():
def __init__(self, size, rs, path):
disk = disk_query()[path[0]][0]
file_disk = open(disk_file, 'w')
file_disk.writelines('%s\n' % disk)
file_disk.close()
if len(path) == 1:
sl = 1
else:
sl = path[1] + 1
sfile = open(part_schem, 'w')
sfile.writelines('partscheme=MBR')
sfile.close()
slice_file = open(dslice, 'w')
slice_file.writelines('s%s\n' % sl)
slice_file.close()
plist = partition_query(disk)
pslice = '%ss%s' % (disk, path[1] + 1)
if rs == 0:
size -= 1
plist[path[1]] = [pslice, size, '', 'freebsd']
if rs > 0:
plist.append(['freespace', rs, '', ''])
psf = open(partitiondb + disk, 'wb')
pickle.dump(plist, psf)
psf.close()
llist = []
mllist = []
llist.extend((['freespace', size, '', '']))
mllist.append(llist)
plf = open(partitiondb + pslice, 'wb')
pickle.dump(mllist, plf)
plf.close()
slice_file = open(dslice, 'w')
slice_file.writelines('s%s\n' % pslice)
slice_file.close()
pl = []
mpl = []
if os.path.exists(tmp + 'create'):
pf = open(tmp + 'create', 'rb')
mpl = pickle.load(pf)
pl.extend(([pslice, size]))
mpl.append(pl)
cf = open(tmp + 'create', 'wb')
pickle.dump(mpl, cf)
cf.close()
class createPartition():
def __init__(self, path, lnumb, inumb, cnumb, lb, fs, data):
disk = disk_query()[path[0]][0]
if not os.path.exists(disk_file):
file_disk = open(disk_file, 'w')
file_disk.writelines('%s\n' % disk)
file_disk.close()
if len(path) == 1:
pl = 1
lv = 0
else:
pl = path[1] + 1
lv = path[1]
if not os.path.exists(part_schem):
sfile = open(part_schem, 'w')
sfile.writelines('partscheme=GPT')
sfile.close()
if not os.path.exists(dslice):
slice_file = open(dslice, 'w')
slice_file.writelines('p%s\n' % pl)
#slice_file.writelines('%s\n' % number)
slice_file.close()
plist = []
pslice = '%sp%s' % (disk, pl)
mplist = partition_query(disk)
if lnumb == 0 and cnumb > 1:
cnumb -= 1
pf = open(partitiondb + disk, 'wb')
plist.extend(([disk + 'p%s' % pl, cnumb, lb, fs]))
mplist[lv] = plist
plist = []
if lnumb > 0:
plist.extend((['freespace', lnumb, '', '']))
mplist.append(plist)
pickle.dump(mplist, pf)
pf.close()
pfile = open(Part_label, 'w')
for partlist in partition_query(disk):
if partlist[2] != '':
pfile.writelines('%s %s %s\n' % (partlist[3], partlist[1], partlist[2]))
pfile.close()
if data is True:
plst = []
mplst = []
if not os.path.exists(tmp + 'create'):
plst.extend(([pslice, cnumb]))
mplst.append(plst)
cf = open(tmp + 'create', 'wb')
pickle.dump(mplst, cf)
cf.close()
class modifyPartition():
def __init__(self, path, lnumb, inumb, cnumb, lb, fs, data):
disk = disk_query()[path[0]][0]
if not os.path.exists(disk_file):
file_disk = open(disk_file, 'w')
file_disk.writelines('%s\n' % disk)
file_disk.close()
if len(path) == 1:
pl = 1
lv = 0
else:
pl = path[1] + 1
lv = path[1]
if not os.path.exists(part_schem):
sfile = open(part_schem, 'w')
sfile.writelines('partscheme=GPT')
sfile.close()
if not os.path.exists(dslice):
slice_file = open(dslice, 'w')
slice_file.writelines('p%s\n' % pl)
# slice_file.writelines('%s\n' % number)
slice_file.close()
plist = []
pslice = '%sp%s' % (disk, pl)
mplist = partition_query(disk)
if lnumb == 0:
cnumb -= 1
pf = open(partitiondb + disk, 'wb')
plist.extend(([disk + 'p%s' % pl, cnumb, lb, fs]))
mplist[lv] = plist
plist = []
if lnumb > 0:
plist.extend((['freespace', lnumb, '', '']))
mplist.append(plist)
pickle.dump(mplist, pf)
pf.close()
pfile = open(Part_label, 'w')
for partlist in partition_query(disk):
if partlist[2] != '':
pfile.writelines('%s %s %s\n' % (partlist[3], partlist[1], partlist[2]))
pfile.close()
if data is True:
plst = []
mplst = []
if not os.path.exists(tmp + 'create'):
plst.extend(([pslice, cnumb]))
mplst.append(plst)
cf = open(tmp + 'create', 'wb')
pickle.dump(mplst, cf)
cf.close()
class rDeleteParttion():
def __init__(self):
if os.path.exists(tmp + 'delete'):
df = open(tmp + 'delete', 'rb')
dl = pickle.load(df)
for line in dl:
part = line[0]
num = sliceNum(part)
hd = rpartslice(part)
call('gpart delete -i %s %s' % (num, hd), shell=True)
sleep(2)
class destroyParttion():
def __init__(self):
if os.path.exists(tmp + 'destroy'):
dsf = open(tmp + 'destroy', 'rb')
ds = pickle.load(dsf)
for line in ds:
drive = line[0]
call('gpart destroy -F %s' % drive, shell=True)
scheme = line[1]
sleep(2)
call('gpart create -s %s %s' % (scheme,
drive), shell=True)
sleep(2)
def bios_or_uefi():
kenvcmd = "kenv"
kenvoutput = Popen(kenvcmd, shell=True, stdout=PIPE, close_fds=True)
if "grub.platform" in kenvoutput.stdout.read():
cmd = "kenv grub.platform"
output = Popen(cmd, shell=True, stdout=PIPE, close_fds=True)
if output.stdout.readlines()[0].rstrip() == "efi":
return "UEFI"
else:
return "BIOS"
else:
cmd = "sysctl -n machdep.bootmethod"
output = Popen(cmd, shell=True, stdout=PIPE, close_fds=True)
return output.stdout.readlines()[0].rstrip()
class makingParttion():
def __init__(self):
if os.path.exists(tmp + 'create'):
pf = open(tmp + 'create', 'rb')
pl = pickle.load(pf)
read = open(boot_file, 'r')
boot = read.readlines()[0].strip()
size = 0
for line in pl:
part = line[0]
drive = rpartslice(part)
sl = sliceNum(part)
if slicePartition(part) == 'p':
if bios_or_uefi() == 'UEFI':
cmd = 'gpart add -s 100M -t efi -i %s %s' % (sl, drive)
sleep(2)
cmd2 = 'newfs_msdos -F 16 %sp%s' % (drive, sl)
call(cmd, shell=True)
call(cmd2, shell=True)
else:
if boot == "GRUB":
cmd = 'gpart add -a 4k -s 1M -t bios-boot -i %s %s' % (sl, drive)
else:
cmd = 'gpart add -a 4k -s 512M -t freebsd-boot -i %s %s' % (sl, drive)
call(cmd, shell=True)
elif slicePartition(part) == 's':
size = int(line[1])
block = int(size * 2048)
cmd = 'gpart add -a 4k -s %s -t freebsd -i %s %s' % (block, sl, drive)
call(cmd, shell=True)
sleep(2)
| |
""" Transformer in Transformer (TNT) in PyTorch
A PyTorch implement of TNT as described in
'Transformer in Transformer' - https://arxiv.org/abs/2103.00112
The official mindspore code is released and available at
https://gitee.com/mindspore/mindspore/tree/master/model_zoo/research/cv/TNT
"""
import math
import torch
import torch.nn as nn
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.models.helpers import build_model_with_cfg
from timm.models.layers import Mlp, DropPath, trunc_normal_
from timm.models.layers.helpers import to_2tuple
from timm.models.layers import _assert
from timm.models.registry import register_model
from timm.models.vision_transformer import resize_pos_embed
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None,
'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True,
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'pixel_embed.proj', 'classifier': 'head',
**kwargs
}
default_cfgs = {
'tnt_s_patch16_224': _cfg(
url='https://github.com/contrastive/pytorch-image-models/releases/download/TNT/tnt_s_patch16_224.pth.tar',
mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5),
),
'tnt_b_patch16_224': _cfg(
mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5),
),
}
class Attention(nn.Module):
""" Multi-Head Attention
"""
def __init__(self, dim, hidden_dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0.):
super().__init__()
self.hidden_dim = hidden_dim
self.num_heads = num_heads
head_dim = hidden_dim // num_heads
self.head_dim = head_dim
self.scale = head_dim ** -0.5
self.qk = nn.Linear(dim, hidden_dim * 2, bias=qkv_bias)
self.v = nn.Linear(dim, dim, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop, inplace=True)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop, inplace=True)
def forward(self, x):
B, N, C = x.shape
qk = self.qk(x).reshape(B, N, 2, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4)
q, k = qk.unbind(0) # make torchscript happy (cannot use tensor as tuple)
v = self.v(x).reshape(B, N, self.num_heads, -1).permute(0, 2, 1, 3)
attn = (q @ k.transpose(-2, -1)) * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, -1)
x = self.proj(x)
x = self.proj_drop(x)
return x
class Block(nn.Module):
""" TNT Block
"""
def __init__(self, dim, in_dim, num_pixel, num_heads=12, in_num_head=4, mlp_ratio=4.,
qkv_bias=False, drop=0., attn_drop=0., drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm):
super().__init__()
# Inner transformer
self.norm_in = norm_layer(in_dim)
self.attn_in = Attention(
in_dim, in_dim, num_heads=in_num_head, qkv_bias=qkv_bias,
attn_drop=attn_drop, proj_drop=drop)
self.norm_mlp_in = norm_layer(in_dim)
self.mlp_in = Mlp(in_features=in_dim, hidden_features=int(in_dim * 4),
out_features=in_dim, act_layer=act_layer, drop=drop)
self.norm1_proj = norm_layer(in_dim)
self.proj = nn.Linear(in_dim * num_pixel, dim, bias=True)
# Outer transformer
self.norm_out = norm_layer(dim)
self.attn_out = Attention(
dim, dim, num_heads=num_heads, qkv_bias=qkv_bias,
attn_drop=attn_drop, proj_drop=drop)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm_mlp = norm_layer(dim)
self.mlp = Mlp(in_features=dim, hidden_features=int(dim * mlp_ratio),
out_features=dim, act_layer=act_layer, drop=drop)
def forward(self, pixel_embed, patch_embed):
# inner
pixel_embed = pixel_embed + self.drop_path(self.attn_in(self.norm_in(pixel_embed)))
pixel_embed = pixel_embed + self.drop_path(self.mlp_in(self.norm_mlp_in(pixel_embed)))
# outer
B, N, C = patch_embed.size()
patch_embed = torch.cat(
[patch_embed[:, 0:1], patch_embed[:, 1:] + self.proj(self.norm1_proj(pixel_embed).reshape(B, N - 1, -1))],
dim=1)
patch_embed = patch_embed + self.drop_path(self.attn_out(self.norm_out(patch_embed)))
patch_embed = patch_embed + self.drop_path(self.mlp(self.norm_mlp(patch_embed)))
return pixel_embed, patch_embed
class PixelEmbed(nn.Module):
""" Image to Pixel Embedding
"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, in_dim=48, stride=4):
super().__init__()
img_size = to_2tuple(img_size)
patch_size = to_2tuple(patch_size)
# grid_size property necessary for resizing positional embedding
self.grid_size = (img_size[0] // patch_size[0], img_size[1] // patch_size[1])
num_patches = (self.grid_size[0]) * (self.grid_size[1])
self.img_size = img_size
self.num_patches = num_patches
self.in_dim = in_dim
new_patch_size = [math.ceil(ps / stride) for ps in patch_size]
self.new_patch_size = new_patch_size
self.proj = nn.Conv2d(in_chans, self.in_dim, kernel_size=7, padding=3, stride=stride)
self.unfold = nn.Unfold(kernel_size=new_patch_size, stride=new_patch_size)
def forward(self, x, pixel_pos):
B, C, H, W = x.shape
_assert(H == self.img_size[0],
f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]}).")
_assert(W == self.img_size[1],
f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]}).")
x = self.proj(x)
x = self.unfold(x)
x = x.transpose(1, 2).reshape(B * self.num_patches, self.in_dim, self.new_patch_size[0], self.new_patch_size[1])
x = x + pixel_pos
x = x.reshape(B * self.num_patches, self.in_dim, -1).transpose(1, 2)
return x
class TNT(nn.Module):
""" Transformer in Transformer - https://arxiv.org/abs/2103.00112
"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, in_dim=48, depth=12,
num_heads=12, in_num_head=4, mlp_ratio=4., qkv_bias=False, drop_rate=0., attn_drop_rate=0.,
drop_path_rate=0., norm_layer=nn.LayerNorm, first_stride=4):
super().__init__()
self.num_classes = num_classes
self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models
self.pixel_embed = PixelEmbed(
img_size=img_size, patch_size=patch_size, in_chans=in_chans, in_dim=in_dim, stride=first_stride)
num_patches = self.pixel_embed.num_patches
self.num_patches = num_patches
new_patch_size = self.pixel_embed.new_patch_size
num_pixel = new_patch_size[0] * new_patch_size[1]
self.norm1_proj = norm_layer(num_pixel * in_dim)
self.proj = nn.Linear(num_pixel * in_dim, embed_dim)
self.norm2_proj = norm_layer(embed_dim)
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
self.patch_pos = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim))
self.pixel_pos = nn.Parameter(torch.zeros(1, in_dim, new_patch_size[0], new_patch_size[1]))
self.pos_drop = nn.Dropout(p=drop_rate)
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
blocks = []
for i in range(depth):
blocks.append(Block(
dim=embed_dim, in_dim=in_dim, num_pixel=num_pixel, num_heads=num_heads, in_num_head=in_num_head,
mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, drop=drop_rate, attn_drop=attn_drop_rate,
drop_path=dpr[i], norm_layer=norm_layer))
self.blocks = nn.ModuleList(blocks)
self.norm = norm_layer(embed_dim)
self.head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity()
trunc_normal_(self.cls_token, std=.02)
trunc_normal_(self.patch_pos, std=.02)
trunc_normal_(self.pixel_pos, std=.02)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
@torch.jit.ignore
def no_weight_decay(self):
return {'patch_pos', 'pixel_pos', 'cls_token'}
def get_classifier(self):
return self.head
def reset_classifier(self, num_classes, global_pool=''):
self.num_classes = num_classes
self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()
def forward_features(self, x):
B = x.shape[0]
pixel_embed = self.pixel_embed(x, self.pixel_pos)
patch_embed = self.norm2_proj(self.proj(self.norm1_proj(pixel_embed.reshape(B, self.num_patches, -1))))
patch_embed = torch.cat((self.cls_token.expand(B, -1, -1), patch_embed), dim=1)
patch_embed = patch_embed + self.patch_pos
patch_embed = self.pos_drop(patch_embed)
for blk in self.blocks:
pixel_embed, patch_embed = blk(pixel_embed, patch_embed)
patch_embed = self.norm(patch_embed)
return patch_embed[:, 0]
def forward(self, x):
x = self.forward_features(x)
x = self.head(x)
return x
def checkpoint_filter_fn(state_dict, model):
""" convert patch embedding weight from manual patchify + linear proj to conv"""
if state_dict['patch_pos'].shape != model.patch_pos.shape:
state_dict['patch_pos'] = resize_pos_embed(state_dict['patch_pos'],
model.patch_pos, getattr(model, 'num_tokens', 1), model.pixel_embed.grid_size)
return state_dict
def _create_tnt(variant, pretrained=False, **kwargs):
if kwargs.get('features_only', None):
raise RuntimeError('features_only not implemented for Vision Transformer models.')
model = build_model_with_cfg(
TNT, variant, pretrained,
default_cfg=default_cfgs[variant],
pretrained_filter_fn=checkpoint_filter_fn,
**kwargs)
return model
@register_model
def tnt_s_patch16_224(pretrained=False, **kwargs):
model_cfg = dict(
patch_size=16, embed_dim=384, in_dim=24, depth=12, num_heads=6, in_num_head=4,
qkv_bias=False, **kwargs)
model = _create_tnt('tnt_s_patch16_224', pretrained=pretrained, **model_cfg)
return model
@register_model
def tnt_b_patch16_224(pretrained=False, **kwargs):
model_cfg = dict(
patch_size=16, embed_dim=640, in_dim=40, depth=12, num_heads=10, in_num_head=4,
qkv_bias=False, **kwargs)
model = _create_tnt('tnt_b_patch16_224', pretrained=pretrained, **model_cfg)
return model
| |
"""
:codeauthor: Jayesh Kariya <jayeshk@saltstack.com>
"""
import json
import salt.modules.nftables as nftables
import salt.utils.files
from salt.exceptions import CommandExecutionError
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.mock import MagicMock, mock_open, patch
from tests.support.unit import TestCase
class NftablesTestCase(TestCase, LoaderModuleMockMixin):
"""
Test cases for salt.modules.nftables
"""
def setup_loader_modules(self):
return {nftables: {}}
# 'version' function tests: 1
def test_version(self):
"""
Test if it return version from nftables --version
"""
mock = MagicMock(return_value="nf_tables 0.3-1")
with patch.dict(nftables.__salt__, {"cmd.run": mock}):
self.assertEqual(nftables.version(), "0.3-1")
# 'build_rule' function tests: 1
def test_build_rule(self):
"""
Test if it build a well-formatted nftables rule based on kwargs.
"""
self.assertEqual(
nftables.build_rule(full="True"),
{"result": False, "rule": "", "comment": "Table needs to be specified"},
)
self.assertEqual(
nftables.build_rule(table="filter", full="True"),
{"result": False, "rule": "", "comment": "Chain needs to be specified"},
)
self.assertEqual(
nftables.build_rule(table="filter", chain="input", full="True"),
{"result": False, "rule": "", "comment": "Command needs to be specified"},
)
self.assertEqual(
nftables.build_rule(
table="filter",
chain="input",
command="insert",
position="3",
full="True",
),
{
"result": True,
"rule": "nft insert rule ip filter input position 3 ",
"comment": "Successfully built rule",
},
)
self.assertEqual(
nftables.build_rule(
table="filter", chain="input", command="insert", full="True"
),
{
"result": True,
"rule": "nft insert rule ip filter input ",
"comment": "Successfully built rule",
},
)
self.assertEqual(
nftables.build_rule(
table="filter", chain="input", command="halt", full="True"
),
{
"result": True,
"rule": "nft halt rule ip filter input ",
"comment": "Successfully built rule",
},
)
self.assertEqual(
nftables.build_rule(
table="filter",
chain="input",
command="insert",
position="3",
full="True",
connstate="related,established",
saddr="10.0.0.1",
daddr="10.0.0.2",
jump="accept",
),
{
"result": True,
"rule": (
"nft insert rule ip filter input position 3 ct state {"
" related,established } ip saddr 10.0.0.1 ip daddr 10.0.0.2 accept"
),
"comment": "Successfully built rule",
},
)
self.assertEqual(
nftables.build_rule(), {"result": True, "rule": "", "comment": ""}
)
# 'get_saved_rules' function tests: 1
def test_get_saved_rules(self):
"""
Test if it return a data structure of the rules in the conf file
"""
with patch.dict(nftables.__grains__, {"os_family": "Debian"}):
with patch.object(salt.utils.files, "fopen", MagicMock(mock_open())):
self.assertListEqual(nftables.get_saved_rules(), [])
# 'list_tables' function tests: 1
def test_list_tables(self):
"""
Test if it return a data structure of the current, in-memory tables
"""
list_tables = [{"family": "inet", "name": "filter", "handle": 2}]
list_tables_mock = MagicMock(return_value=list_tables)
with patch.object(nftables, "list_tables", list_tables_mock):
self.assertListEqual(nftables.list_tables(), list_tables)
list_tables_mock = MagicMock(return_value=[])
with patch.object(nftables, "list_tables", list_tables_mock):
self.assertListEqual(nftables.list_tables(), [])
# 'get_rules' function tests: 1
def test_get_rules(self):
"""
Test if it return a data structure of the current, in-memory rules
"""
list_tables_mock = MagicMock(
return_value=[{"family": "inet", "name": "filter", "handle": 2}]
)
list_rules_return = """table inet filter {
chain input {
type filter hook input priority 0; policy accept;
}
chain forward {
type filter hook forward priority 0; policy accept;
}
chain output {
type filter hook output priority 0; policy accept;
}
}"""
list_rules_mock = MagicMock(return_value=list_rules_return)
expected = [list_rules_return]
with patch.object(nftables, "list_tables", list_tables_mock):
with patch.dict(nftables.__salt__, {"cmd.run": list_rules_mock}):
self.assertListEqual(nftables.get_rules(), expected)
list_tables_mock = MagicMock(return_value=[])
with patch.object(nftables, "list_tables", list_tables_mock):
self.assertListEqual(nftables.get_rules(), [])
# 'get_rules_json' function tests: 1
def test_get_rules_json(self):
"""
Test if it return a data structure of the current, in-memory rules
"""
list_rules_return = """
{
"nftables": [
{
"table": {
"family": "ip",
"name": "filter",
"handle": 47
}
},
{
"chain": {
"family": "ip",
"table": "filter",
"name": "input",
"handle": 1,
"type": "filter",
"hook": "input",
"prio": 0,
"policy": "accept"
}
},
{
"chain": {
"family": "ip",
"table": "filter",
"name": "forward",
"handle": 2,
"type": "filter",
"hook": "forward",
"prio": 0,
"policy": "accept"
}
},
{
"chain": {
"family": "ip",
"table": "filter",
"name": "output",
"handle": 3,
"type": "filter",
"hook": "output",
"prio": 0,
"policy": "accept"
}
}
]
}
"""
list_rules_mock = MagicMock(return_value=list_rules_return)
expected = json.loads(list_rules_return)["nftables"]
with patch.dict(nftables.__salt__, {"cmd.run": list_rules_mock}):
self.assertListEqual(nftables.get_rules_json(), expected)
list_rules_mock = MagicMock(return_value=[])
with patch.dict(nftables.__salt__, {"cmd.run": list_rules_mock}):
self.assertListEqual(nftables.get_rules_json(), [])
# 'save' function tests: 1
def test_save(self):
"""
Test if it save the current in-memory rules to disk
"""
with patch.dict(nftables.__grains__, {"os_family": "Debian"}):
mock = MagicMock(return_value=False)
with patch.dict(nftables.__salt__, {"file.directory_exists": mock}):
with patch.dict(nftables.__salt__, {"cmd.run": mock}):
with patch.object(
salt.utils.files, "fopen", MagicMock(mock_open())
):
self.assertEqual(nftables.save(), "#! nft -f\n\n")
with patch.object(
salt.utils.files, "fopen", MagicMock(side_effect=IOError)
):
self.assertRaises(CommandExecutionError, nftables.save)
# 'get_rule_handle' function tests: 1
def test_get_rule_handle(self):
"""
Test if it get the handle for a particular rule
"""
self.assertEqual(
nftables.get_rule_handle(),
{"result": False, "comment": "Chain needs to be specified"},
)
self.assertEqual(
nftables.get_rule_handle(chain="input"),
{"result": False, "comment": "Rule needs to be specified"},
)
_ru = "input tcp dport 22 log accept"
ret = {"result": False, "comment": "Table filter in family ipv4 does not exist"}
mock = MagicMock(return_value="")
with patch.dict(nftables.__salt__, {"cmd.run": mock}):
self.assertEqual(nftables.get_rule_handle(chain="input", rule=_ru), ret)
ret = {
"result": False,
"comment": "Chain input in table filter in family ipv4 does not exist",
}
mock = MagicMock(return_value="table ip filter")
with patch.dict(nftables.__salt__, {"cmd.run": mock}):
self.assertEqual(nftables.get_rule_handle(chain="input", rule=_ru), ret)
ret = {
"result": False,
"comment": (
"Rule input tcp dport 22 log accept chain"
" input in table filter in family ipv4 does not exist"
),
}
ret1 = {
"result": False,
"comment": "Could not find rule input tcp dport 22 log accept",
}
with patch.object(
nftables,
"check_table",
MagicMock(return_value={"result": True, "comment": ""}),
):
with patch.object(
nftables,
"check_chain",
MagicMock(return_value={"result": True, "comment": ""}),
):
_ret1 = {
"result": False,
"comment": (
"Rule input tcp dport 22 log accept"
" chain input in table filter in"
" family ipv4 does not exist"
),
}
_ret2 = {"result": True, "comment": ""}
with patch.object(
nftables, "check", MagicMock(side_effect=[_ret1, _ret2])
):
self.assertEqual(
nftables.get_rule_handle(chain="input", rule=_ru), ret
)
_ru = "input tcp dport 22 log accept"
mock = MagicMock(return_value="")
with patch.dict(nftables.__salt__, {"cmd.run": mock}):
self.assertEqual(
nftables.get_rule_handle(chain="input", rule=_ru), ret1
)
# 'check' function tests: 1
def test_check(self):
"""
Test if it check for the existence of a rule in the table and chain
"""
self.assertEqual(
nftables.check(),
{"result": False, "comment": "Chain needs to be specified"},
)
self.assertEqual(
nftables.check(chain="input"),
{"result": False, "comment": "Rule needs to be specified"},
)
_ru = "tcp dport 22 log accept"
ret = {"result": False, "comment": "Table filter in family ipv4 does not exist"}
mock = MagicMock(return_value="")
with patch.dict(nftables.__salt__, {"cmd.run": mock}):
self.assertEqual(nftables.check(chain="input", rule=_ru), ret)
mock = MagicMock(return_value="table ip filter")
ret = {
"result": False,
"comment": "Chain input in table filter in family ipv4 does not exist",
}
with patch.dict(nftables.__salt__, {"cmd.run": mock}):
self.assertEqual(nftables.check(chain="input", rule=_ru), ret)
mock = MagicMock(return_value="table ip filter chain input {{")
ret = {
"result": False,
"comment": (
"Rule tcp dport 22 log accept in chain input in table filter in family"
" ipv4 does not exist"
),
}
with patch.dict(nftables.__salt__, {"cmd.run": mock}):
self.assertEqual(nftables.check(chain="input", rule=_ru), ret)
r_val = "table ip filter chain input {{ input tcp dport 22 log accept #"
mock = MagicMock(return_value=r_val)
ret = {
"result": True,
"comment": (
"Rule tcp dport 22 log accept in chain input in table filter in family"
" ipv4 exists"
),
}
with patch.dict(nftables.__salt__, {"cmd.run": mock}):
self.assertEqual(nftables.check(chain="input", rule=_ru), ret)
# 'check_chain' function tests: 1
def test_check_chain(self):
"""
Test if it check for the existence of a chain in the table
"""
self.assertEqual(
nftables.check_chain(),
{"result": False, "comment": "Chain needs to be specified"},
)
mock = MagicMock(return_value="")
ret = {
"comment": "Chain input in table filter in family ipv4 does not exist",
"result": False,
}
with patch.dict(nftables.__salt__, {"cmd.run": mock}):
self.assertEqual(nftables.check_chain(chain="input"), ret)
mock = MagicMock(return_value="chain input {{")
ret = {
"comment": "Chain input in table filter in family ipv4 exists",
"result": True,
}
with patch.dict(nftables.__salt__, {"cmd.run": mock}):
self.assertEqual(nftables.check_chain(chain="input"), ret)
# 'check_table' function tests: 1
def test_check_table(self):
"""
Test if it check for the existence of a table
"""
self.assertEqual(
nftables.check_table(),
{"result": False, "comment": "Table needs to be specified"},
)
mock = MagicMock(return_value="")
ret = {"comment": "Table nat in family ipv4 does not exist", "result": False}
with patch.dict(nftables.__salt__, {"cmd.run": mock}):
self.assertEqual(nftables.check_table(table="nat"), ret)
mock = MagicMock(return_value="table ip nat")
ret = {"comment": "Table nat in family ipv4 exists", "result": True}
with patch.dict(nftables.__salt__, {"cmd.run": mock}):
self.assertEqual(nftables.check_table(table="nat"), ret)
# 'new_table' function tests: 1
def test_new_table(self):
"""
Test if it create new custom table.
"""
self.assertEqual(
nftables.new_table(table=None),
{"result": False, "comment": "Table needs to be specified"},
)
mock = MagicMock(return_value="")
ret = {"comment": "Table nat in family ipv4 created", "result": True}
with patch.dict(nftables.__salt__, {"cmd.run": mock}):
self.assertEqual(nftables.new_table(table="nat"), ret)
mock = MagicMock(return_value="table ip nat")
ret = {"comment": "Table nat in family ipv4 exists", "result": True}
with patch.dict(nftables.__salt__, {"cmd.run": mock}):
self.assertEqual(nftables.new_table(table="nat"), ret)
# 'delete_table' function tests: 1
def test_delete_table(self):
"""
Test if it delete custom table.
"""
self.assertEqual(
nftables.delete_table(table=None),
{"result": False, "comment": "Table needs to be specified"},
)
mock_ret = {
"result": False,
"comment": "Table nat in family ipv4 does not exist",
}
with patch(
"salt.modules.nftables.check_table", MagicMock(return_value=mock_ret)
):
ret = nftables.delete_table(table="nat")
self.assertEqual(
ret,
{"result": False, "comment": "Table nat in family ipv4 does not exist"},
)
mock = MagicMock(return_value="table ip nat")
with patch.dict(nftables.__salt__, {"cmd.run": mock}), patch(
"salt.modules.nftables.check_table",
MagicMock(return_value={"result": True, "comment": ""}),
):
self.assertEqual(
nftables.delete_table(table="nat"),
{
"comment": "Table nat in family ipv4 could not be deleted",
"result": False,
},
)
mock = MagicMock(return_value="")
with patch.dict(nftables.__salt__, {"cmd.run": mock}), patch(
"salt.modules.nftables.check_table",
MagicMock(return_value={"result": True, "comment": ""}),
):
self.assertEqual(
nftables.delete_table(table="nat"),
{"comment": "Table nat in family ipv4 deleted", "result": True},
)
# 'new_chain' function tests: 2
def test_new_chain(self):
"""
Test if it create new chain to the specified table.
"""
self.assertEqual(
nftables.new_chain(),
{"result": False, "comment": "Chain needs to be specified"},
)
ret = {"result": False, "comment": "Table filter in family ipv4 does not exist"}
mock = MagicMock(return_value="")
with patch.dict(nftables.__salt__, {"cmd.run": mock}):
self.assertEqual(nftables.new_chain(chain="input"), ret)
ret = {
"result": False,
"comment": "Chain input in table filter in family ipv4 already exists",
}
mock = MagicMock(return_value="table ip filter chain input {{")
with patch.dict(nftables.__salt__, {"cmd.run": mock}):
self.assertEqual(nftables.new_chain(chain="input"), ret)
def test_new_chain_variable(self):
"""
Test if it create new chain to the specified table.
"""
mock = MagicMock(return_value="")
with patch.dict(nftables.__salt__, {"cmd.run": mock}), patch(
"salt.modules.nftables.check_chain",
MagicMock(return_value={"result": False, "comment": ""}),
), patch(
"salt.modules.nftables.check_table",
MagicMock(return_value={"result": True, "comment": ""}),
):
self.assertEqual(
nftables.new_chain(chain="input", table_type="filter"),
{
"result": False,
"comment": "Table_type, hook, and priority required.",
},
)
self.assertTrue(
nftables.new_chain(
chain="input", table_type="filter", hook="input", priority=0
)
)
# 'delete_chain' function tests: 1
def test_delete_chain(self):
"""
Test if it delete the chain from the specified table.
"""
self.assertEqual(
nftables.delete_chain(),
{"result": False, "comment": "Chain needs to be specified"},
)
ret = {"result": False, "comment": "Table filter in family ipv4 does not exist"}
mock = MagicMock(return_value="")
with patch.dict(nftables.__salt__, {"cmd.run": mock}):
self.assertEqual(nftables.delete_chain(chain="input"), ret)
ret = {
"result": False,
"comment": (
"Chain input in table filter in family ipv4 could not be deleted"
),
}
mock = MagicMock(return_value="table ip filter")
with patch.dict(nftables.__salt__, {"cmd.run": mock}), patch(
"salt.modules.nftables.check_table",
MagicMock(return_value={"result": True, "comment": ""}),
), patch(
"salt.modules.nftables.check_chain",
MagicMock(return_value={"result": True, "comment": ""}),
):
self.assertEqual(nftables.delete_chain(chain="input"), ret)
ret = {
"result": True,
"comment": "Chain input in table filter in family ipv4 deleted",
}
mock = MagicMock(return_value="")
with patch.dict(nftables.__salt__, {"cmd.run": mock}), patch(
"salt.modules.nftables.check_table",
MagicMock(return_value={"result": True, "comment": ""}),
), patch(
"salt.modules.nftables.check_chain",
MagicMock(return_value={"result": True, "comment": ""}),
):
self.assertEqual(nftables.delete_chain(chain="input"), ret)
def test_delete_chain_variables(self):
"""
Test if it delete the chain from the specified table.
"""
mock = MagicMock(return_value="")
with patch.dict(nftables.__salt__, {"cmd.run": mock}), patch(
"salt.modules.nftables.check_chain",
MagicMock(return_value={"result": True, "comment": ""}),
), patch(
"salt.modules.nftables.check_table",
MagicMock(return_value={"result": True, "comment": ""}),
):
_expected = {
"comment": "Chain input in table filter in family ipv4 deleted",
"result": True,
}
self.assertEqual(nftables.delete_chain(chain="input"), _expected)
# 'append' function tests: 2
def test_append(self):
"""
Test if it append a rule to the specified table & chain.
"""
self.assertEqual(
nftables.append(),
{"result": False, "comment": "Chain needs to be specified"},
)
self.assertEqual(
nftables.append(chain="input"),
{"result": False, "comment": "Rule needs to be specified"},
)
_ru = "input tcp dport 22 log accept"
ret = {"comment": "Table filter in family ipv4 does not exist", "result": False}
mock = MagicMock(return_value="")
with patch.dict(nftables.__salt__, {"cmd.run": mock}):
self.assertEqual(nftables.append(chain="input", rule=_ru), ret)
ret = {
"comment": "Chain input in table filter in family ipv4 does not exist",
"result": False,
}
mock = MagicMock(return_value="table ip filter")
with patch.dict(nftables.__salt__, {"cmd.run": mock}):
self.assertEqual(nftables.append(chain="input", rule=_ru), ret)
r_val = "table ip filter chain input {{ input tcp dport 22 log accept #"
mock = MagicMock(return_value=r_val)
_expected = {
"comment": (
"Rule input tcp dport 22 log accept chain input in table filter in"
" family ipv4 already exists"
),
"result": False,
}
with patch.dict(nftables.__salt__, {"cmd.run": mock}):
self.assertEqual(nftables.append(chain="input", rule=_ru), _expected)
def test_append_rule(self):
"""
Test if it append a rule to the specified table & chain.
"""
_ru = "input tcp dport 22 log accept"
mock = MagicMock(side_effect=["1", ""])
with patch.dict(nftables.__salt__, {"cmd.run": mock}), patch(
"salt.modules.nftables.check",
MagicMock(return_value={"result": False, "comment": ""}),
), patch(
"salt.modules.nftables.check_chain",
MagicMock(return_value={"result": True, "comment": ""}),
), patch(
"salt.modules.nftables.check_table",
MagicMock(return_value={"result": True, "comment": ""}),
):
_expected = {
"comment": (
'Failed to add rule "{}" chain input in table filter in family'
" ipv4.".format(_ru)
),
"result": False,
}
self.assertEqual(nftables.append(chain="input", rule=_ru), _expected)
_expected = {
"comment": (
'Added rule "{}" chain input in table filter in family ipv4.'.format(
_ru
)
),
"result": True,
}
self.assertEqual(nftables.append(chain="input", rule=_ru), _expected)
# 'insert' function tests: 2
def test_insert(self):
"""
Test if it insert a rule into the specified table & chain,
at the specified position.
"""
self.assertEqual(
nftables.insert(),
{"result": False, "comment": "Chain needs to be specified"},
)
self.assertEqual(
nftables.insert(chain="input"),
{"result": False, "comment": "Rule needs to be specified"},
)
_ru = "input tcp dport 22 log accept"
ret = {"result": False, "comment": "Table filter in family ipv4 does not exist"}
mock = MagicMock(return_value="")
with patch.dict(nftables.__salt__, {"cmd.run": mock}):
self.assertEqual(nftables.insert(chain="input", rule=_ru), ret)
ret = {
"result": False,
"comment": "Chain input in table filter in family ipv4 does not exist",
}
mock = MagicMock(return_value="table ip filter")
with patch.dict(nftables.__salt__, {"cmd.run": mock}):
self.assertEqual(nftables.insert(chain="input", rule=_ru), ret)
r_val = "table ip filter chain input {{ input tcp dport 22 log accept #"
mock = MagicMock(return_value=r_val)
with patch.dict(nftables.__salt__, {"cmd.run": mock}):
res = nftables.insert(chain="input", rule=_ru)
import logging
log = logging.getLogger(__name__)
log.debug("=== res %s ===", res)
self.assertTrue(nftables.insert(chain="input", rule=_ru))
def test_insert_rule(self):
"""
Test if it insert a rule into the specified table & chain,
at the specified position.
"""
_ru = "input tcp dport 22 log accept"
mock = MagicMock(side_effect=["1", ""])
with patch.dict(nftables.__salt__, {"cmd.run": mock}), patch(
"salt.modules.nftables.check",
MagicMock(return_value={"result": False, "comment": ""}),
), patch(
"salt.modules.nftables.check_chain",
MagicMock(return_value={"result": True, "comment": ""}),
), patch(
"salt.modules.nftables.check_table",
MagicMock(return_value={"result": True, "comment": ""}),
):
_expected = {
"result": False,
"comment": (
'Failed to add rule "{}" chain input in table filter in family'
" ipv4.".format(_ru)
),
}
self.assertEqual(nftables.insert(chain="input", rule=_ru), _expected)
_expected = {
"result": True,
"comment": (
'Added rule "{}" chain input in table filter in family ipv4.'.format(
_ru
)
),
}
self.assertEqual(nftables.insert(chain="input", rule=_ru), _expected)
# 'delete' function tests: 2
def test_delete(self):
"""
Test if it delete a rule from the specified table & chain,
specifying either the rule in its entirety, or
the rule's position in the chain.
"""
_ru = "input tcp dport 22 log accept"
ret = {
"result": False,
"comment": "Only specify a position or a rule, not both",
}
self.assertEqual(
nftables.delete(table="filter", chain="input", position="3", rule=_ru), ret
)
ret = {"result": False, "comment": "Table filter in family ipv4 does not exist"}
mock = MagicMock(return_value="")
with patch.dict(nftables.__salt__, {"cmd.run": mock}):
self.assertEqual(
nftables.delete(table="filter", chain="input", rule=_ru), ret
)
ret = {
"result": False,
"comment": "Chain input in table filter in family ipv4 does not exist",
}
mock = MagicMock(return_value="table ip filter")
with patch.dict(nftables.__salt__, {"cmd.run": mock}):
self.assertEqual(
nftables.delete(table="filter", chain="input", rule=_ru), ret
)
mock = MagicMock(return_value="table ip filter chain input {{")
with patch.dict(nftables.__salt__, {"cmd.run": mock}):
self.assertTrue(nftables.delete(table="filter", chain="input", rule=_ru))
def test_delete_rule(self):
"""
Test if it delete a rule from the specified table & chain,
specifying either the rule in its entirety, or
the rule's position in the chain.
"""
mock = MagicMock(side_effect=["1", ""])
with patch.dict(nftables.__salt__, {"cmd.run": mock}), patch(
"salt.modules.nftables.check",
MagicMock(return_value={"result": True, "comment": ""}),
), patch(
"salt.modules.nftables.check_chain",
MagicMock(return_value={"result": True, "comment": ""}),
), patch(
"salt.modules.nftables.check_table",
MagicMock(return_value={"result": True, "comment": ""}),
):
_expected = {
"result": False,
"comment": (
'Failed to delete rule "None" in chain input table filter in'
" family ipv4"
),
}
self.assertEqual(
nftables.delete(table="filter", chain="input", position="3"), _expected
)
_expected = {
"result": True,
"comment": (
'Deleted rule "None" in chain input in table filter in family ipv4.'
),
}
self.assertEqual(
nftables.delete(table="filter", chain="input", position="3"), _expected
)
# 'flush' function tests: 2
def test_flush(self):
"""
Test if it flush the chain in the specified table, flush all chains
in the specified table if chain is not specified.
"""
ret = {"result": False, "comment": "Table filter in family ipv4 does not exist"}
mock = MagicMock(return_value="")
with patch.dict(nftables.__salt__, {"cmd.run": mock}):
self.assertEqual(nftables.flush(table="filter", chain="input"), ret)
ret = {
"result": False,
"comment": "Chain input in table filter in family ipv4 does not exist",
}
mock = MagicMock(return_value="table ip filter")
with patch.dict(nftables.__salt__, {"cmd.run": mock}):
self.assertEqual(nftables.flush(table="filter", chain="input"), ret)
def test_flush_chain(self):
"""
Test if it flush the chain in the specified table, flush all chains
in the specified table if chain is not specified.
"""
mock = MagicMock(side_effect=["1", ""])
with patch.dict(nftables.__salt__, {"cmd.run": mock}), patch(
"salt.modules.nftables.check_chain",
MagicMock(return_value={"result": True, "comment": ""}),
), patch(
"salt.modules.nftables.check_table",
MagicMock(return_value={"result": True, "comment": ""}),
):
_expected = {
"result": False,
"comment": (
"Failed to flush rules from chain input in table filter in family"
" ipv4."
),
}
self.assertEqual(nftables.flush(table="filter", chain="input"), _expected)
_expected = {
"result": True,
"comment": (
"Flushed rules from chain input in table filter in family ipv4."
),
}
self.assertEqual(nftables.flush(table="filter", chain="input"), _expected)
# 'get_policy' function tests: 1
def test_get_policy(self):
"""
Test the current policy for the specified table/chain
"""
list_rules_return = """
{
"nftables": [
{
"table": {
"family": "ip",
"name": "filter",
"handle": 47
}
},
{
"chain": {
"family": "ip",
"table": "filter",
"name": "input",
"handle": 1,
"type": "filter",
"hook": "input",
"prio": 0,
"policy": "accept"
}
},
{
"chain": {
"family": "ip",
"table": "filter",
"name": "forward",
"handle": 2,
"type": "filter",
"hook": "forward",
"prio": 0,
"policy": "accept"
}
},
{
"chain": {
"family": "ip",
"table": "filter",
"name": "output",
"handle": 3,
"type": "filter",
"hook": "output",
"prio": 0,
"policy": "accept"
}
}
]
}
"""
expected = json.loads(list_rules_return)
self.assertEqual(
nftables.get_policy(table="filter", chain=None, family="ipv4"),
"Error: Chain needs to be specified",
)
with patch.object(nftables, "get_rules_json", MagicMock(return_value=expected)):
self.assertEqual(
nftables.get_policy(table="filter", chain="input", family="ipv4"),
"accept",
)
with patch.object(nftables, "get_rules_json", MagicMock(return_value=expected)):
self.assertIsNone(
nftables.get_policy(table="filter", chain="missing", family="ipv4")
)
# 'set_policy' function tests: 1
def test_set_policy(self):
"""
Test set the current policy for the specified table/chain
"""
list_rules_return = """
{
"nftables": [
{
"table": {
"family": "ip",
"name": "filter",
"handle": 47
}
},
{
"chain": {
"family": "ip",
"table": "filter",
"name": "input",
"handle": 1,
"type": "filter",
"hook": "input",
"prio": 0,
"policy": "accept"
}
},
{
"chain": {
"family": "ip",
"table": "filter",
"name": "forward",
"handle": 2,
"type": "filter",
"hook": "forward",
"prio": 0,
"policy": "accept"
}
},
{
"chain": {
"family": "ip",
"table": "filter",
"name": "output",
"handle": 3,
"type": "filter",
"hook": "output",
"prio": 0,
"policy": "accept"
}
}
]
}
"""
expected = json.loads(list_rules_return)["nftables"]
self.assertEqual(
nftables.set_policy(table="filter", chain=None, policy=None, family="ipv4"),
"Error: Chain needs to be specified",
)
self.assertEqual(
nftables.set_policy(
table="filter", chain="input", policy=None, family="ipv4"
),
"Error: Policy needs to be specified",
)
mock = MagicMock(return_value={"retcode": 0})
with patch.object(nftables, "get_rules_json", MagicMock(return_value=expected)):
with patch.dict(nftables.__salt__, {"cmd.run_all": mock}):
self.assertTrue(
nftables.set_policy(
table="filter", chain="input", policy="accept", family="ipv4"
)
)
| |
"""Test UniFi config flow."""
import aiounifi
from homeassistant import data_entry_flow
from homeassistant.components.unifi.const import (
CONF_ALLOW_BANDWIDTH_SENSORS,
CONF_BLOCK_CLIENT,
CONF_CONTROLLER,
CONF_DETECTION_TIME,
CONF_IGNORE_WIRED_BUG,
CONF_POE_CLIENTS,
CONF_SITE_ID,
CONF_SSID_FILTER,
CONF_TRACK_CLIENTS,
CONF_TRACK_DEVICES,
CONF_TRACK_WIRED_CLIENTS,
DOMAIN as UNIFI_DOMAIN,
)
from homeassistant.const import (
CONF_HOST,
CONF_PASSWORD,
CONF_PORT,
CONF_USERNAME,
CONF_VERIFY_SSL,
)
from .test_controller import setup_unifi_integration
from tests.async_mock import patch
from tests.common import MockConfigEntry
CLIENTS = [{"mac": "00:00:00:00:00:01"}]
DEVICES = [
{
"board_rev": 21,
"device_id": "mock-id",
"ip": "10.0.1.1",
"last_seen": 0,
"mac": "00:00:00:00:01:01",
"model": "U7PG2",
"name": "access_point",
"state": 1,
"type": "uap",
"version": "4.0.80.10875",
"wlan_overrides": [
{
"name": "SSID 3",
"radio": "na",
"radio_name": "wifi1",
"wlan_id": "012345678910111213141516",
},
{
"name": "",
"radio": "na",
"radio_name": "wifi1",
"wlan_id": "012345678910111213141516",
},
{
"radio": "na",
"radio_name": "wifi1",
"wlan_id": "012345678910111213141516",
},
],
}
]
WLANS = [
{"name": "SSID 1"},
{"name": "SSID 2", "name_combine_enabled": False, "name_combine_suffix": "_IOT"},
]
async def test_flow_works(hass, aioclient_mock, mock_discovery):
"""Test config flow."""
mock_discovery.return_value = "1"
result = await hass.config_entries.flow.async_init(
UNIFI_DOMAIN, context={"source": "user"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
assert result["data_schema"]({CONF_USERNAME: "", CONF_PASSWORD: ""}) == {
CONF_HOST: "unifi",
CONF_USERNAME: "",
CONF_PASSWORD: "",
CONF_PORT: 8443,
CONF_VERIFY_SSL: False,
}
aioclient_mock.get("https://1.2.3.4:1234", status=302)
aioclient_mock.post(
"https://1.2.3.4:1234/api/login",
json={"data": "login successful", "meta": {"rc": "ok"}},
headers={"content-type": "application/json"},
)
aioclient_mock.get(
"https://1.2.3.4:1234/api/self/sites",
json={
"data": [{"desc": "Site name", "name": "site_id", "role": "admin"}],
"meta": {"rc": "ok"},
},
headers={"content-type": "application/json"},
)
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={
CONF_HOST: "1.2.3.4",
CONF_USERNAME: "username",
CONF_PASSWORD: "password",
CONF_PORT: 1234,
CONF_VERIFY_SSL: True,
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == "Site name"
assert result["data"] == {
CONF_CONTROLLER: {
CONF_HOST: "1.2.3.4",
CONF_USERNAME: "username",
CONF_PASSWORD: "password",
CONF_PORT: 1234,
CONF_SITE_ID: "site_id",
CONF_VERIFY_SSL: True,
}
}
async def test_flow_works_multiple_sites(hass, aioclient_mock):
"""Test config flow works when finding multiple sites."""
result = await hass.config_entries.flow.async_init(
UNIFI_DOMAIN, context={"source": "user"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
aioclient_mock.get("https://1.2.3.4:1234", status=302)
aioclient_mock.post(
"https://1.2.3.4:1234/api/login",
json={"data": "login successful", "meta": {"rc": "ok"}},
headers={"content-type": "application/json"},
)
aioclient_mock.get(
"https://1.2.3.4:1234/api/self/sites",
json={
"data": [
{"name": "default", "role": "admin", "desc": "site name"},
{"name": "site2", "role": "admin", "desc": "site2 name"},
],
"meta": {"rc": "ok"},
},
headers={"content-type": "application/json"},
)
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={
CONF_HOST: "1.2.3.4",
CONF_USERNAME: "username",
CONF_PASSWORD: "password",
CONF_PORT: 1234,
CONF_VERIFY_SSL: True,
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "site"
assert result["data_schema"]({"site": "site name"})
assert result["data_schema"]({"site": "site2 name"})
async def test_flow_fails_site_already_configured(hass, aioclient_mock):
"""Test config flow."""
entry = MockConfigEntry(
domain=UNIFI_DOMAIN, data={"controller": {"host": "1.2.3.4", "site": "site_id"}}
)
entry.add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
UNIFI_DOMAIN, context={"source": "user"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
aioclient_mock.get("https://1.2.3.4:1234", status=302)
aioclient_mock.post(
"https://1.2.3.4:1234/api/login",
json={"data": "login successful", "meta": {"rc": "ok"}},
headers={"content-type": "application/json"},
)
aioclient_mock.get(
"https://1.2.3.4:1234/api/self/sites",
json={
"data": [{"desc": "Site name", "name": "site_id", "role": "admin"}],
"meta": {"rc": "ok"},
},
headers={"content-type": "application/json"},
)
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={
CONF_HOST: "1.2.3.4",
CONF_USERNAME: "username",
CONF_PASSWORD: "password",
CONF_PORT: 1234,
CONF_VERIFY_SSL: True,
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
async def test_flow_fails_user_credentials_faulty(hass, aioclient_mock):
"""Test config flow."""
result = await hass.config_entries.flow.async_init(
UNIFI_DOMAIN, context={"source": "user"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
aioclient_mock.get("https://1.2.3.4:1234", status=302)
with patch("aiounifi.Controller.login", side_effect=aiounifi.errors.Unauthorized):
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={
CONF_HOST: "1.2.3.4",
CONF_USERNAME: "username",
CONF_PASSWORD: "password",
CONF_PORT: 1234,
CONF_VERIFY_SSL: True,
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {"base": "faulty_credentials"}
async def test_flow_fails_controller_unavailable(hass, aioclient_mock):
"""Test config flow."""
result = await hass.config_entries.flow.async_init(
UNIFI_DOMAIN, context={"source": "user"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
aioclient_mock.get("https://1.2.3.4:1234", status=302)
with patch("aiounifi.Controller.login", side_effect=aiounifi.errors.RequestError):
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={
CONF_HOST: "1.2.3.4",
CONF_USERNAME: "username",
CONF_PASSWORD: "password",
CONF_PORT: 1234,
CONF_VERIFY_SSL: True,
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {"base": "service_unavailable"}
async def test_flow_fails_unknown_problem(hass, aioclient_mock):
"""Test config flow."""
result = await hass.config_entries.flow.async_init(
UNIFI_DOMAIN, context={"source": "user"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
aioclient_mock.get("https://1.2.3.4:1234", status=302)
with patch("aiounifi.Controller.login", side_effect=Exception):
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={
CONF_HOST: "1.2.3.4",
CONF_USERNAME: "username",
CONF_PASSWORD: "password",
CONF_PORT: 1234,
CONF_VERIFY_SSL: True,
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
async def test_advanced_option_flow(hass):
"""Test advanced config flow options."""
controller = await setup_unifi_integration(
hass, clients_response=CLIENTS, devices_response=DEVICES, wlans_response=WLANS
)
result = await hass.config_entries.options.async_init(
controller.config_entry.entry_id, context={"show_advanced_options": True}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "device_tracker"
assert set(
result["data_schema"].schema[CONF_SSID_FILTER].options.keys()
).intersection(("SSID 1", "SSID 2", "SSID 2_IOT", "SSID 3"))
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={
CONF_TRACK_CLIENTS: False,
CONF_TRACK_WIRED_CLIENTS: False,
CONF_TRACK_DEVICES: False,
CONF_SSID_FILTER: ["SSID 1", "SSID 2_IOT", "SSID 3"],
CONF_DETECTION_TIME: 100,
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "client_control"
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={CONF_BLOCK_CLIENT: [CLIENTS[0]["mac"]], CONF_POE_CLIENTS: False},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "statistics_sensors"
result = await hass.config_entries.options.async_configure(
result["flow_id"], user_input={CONF_ALLOW_BANDWIDTH_SENSORS: True}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["data"] == {
CONF_TRACK_CLIENTS: False,
CONF_TRACK_WIRED_CLIENTS: False,
CONF_TRACK_DEVICES: False,
CONF_SSID_FILTER: ["SSID 1", "SSID 2_IOT", "SSID 3"],
CONF_DETECTION_TIME: 100,
CONF_IGNORE_WIRED_BUG: False,
CONF_POE_CLIENTS: False,
CONF_BLOCK_CLIENT: [CLIENTS[0]["mac"]],
CONF_ALLOW_BANDWIDTH_SENSORS: True,
}
async def test_simple_option_flow(hass):
"""Test simple config flow options."""
controller = await setup_unifi_integration(
hass, clients_response=CLIENTS, wlans_response=WLANS
)
result = await hass.config_entries.options.async_init(
controller.config_entry.entry_id, context={"show_advanced_options": False}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "simple_options"
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={
CONF_TRACK_CLIENTS: False,
CONF_TRACK_DEVICES: False,
CONF_BLOCK_CLIENT: [CLIENTS[0]["mac"]],
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["data"] == {
CONF_TRACK_CLIENTS: False,
CONF_TRACK_DEVICES: False,
CONF_BLOCK_CLIENT: [CLIENTS[0]["mac"]],
}
| |
# Copyright 2016 by Raytheon BBN Technologies Corp. All Rights Reserved.
import ast
import os
import sys
from copy import deepcopy
from pyqgl2.ast_qgl2 import is_concur, is_seq
from pyqgl2.ast_util import ast2str, NodeError
from pyqgl2.find_channels import find_all_channels
class SingleSequence(object):
"""
Create a sequence list for a single QBIT
Note: this assumes that the AST is for one function
definition that has already been inlined, successfully
flattened, grouped, and sequenced already.
"""
def __init__(self, importer):
self.importer = importer
self.qbits = set()
self.qbit_creates = list()
self.sequences = dict() # From channel to list of pulses
# the imports we need to make in order to satisfy the stubs
#
# the key is the name of the module (i.e. something like
# 'QGL.PulsePrimitives') and the values are sets of import
# clauses (i.e. 'foo' or 'foo as bar')
#
self.stub_imports = dict()
def find_imports(self, node):
default_namespace = node.qgl_fname
for subnode in ast.walk(node):
if (isinstance(subnode, ast.Call) and
isinstance(subnode.func, ast.Name)):
funcname = subnode.func.id
# If we created a node without an qgl_fname,
# then use the default namespace instead.
# FIXME: This is a hack, but it will work for now.
#
if not hasattr(subnode, 'qgl_fname'):
namespace = default_namespace
else:
namespace = subnode.qgl_fname
fdef = self.importer.resolve_sym(namespace, funcname)
if not fdef:
print('ERROR %s funcname' % funcname)
NodeError.error_msg(subnode,
'cannot find import info for [%s]' % funcname)
elif not fdef.qgl_stub_import:
NodeError.error_msg(subnode,
'not a stub: [%s]' % funcname)
else:
# print('FI AST %s' % ast.dump(fdef))
(sym_name, module_name, orig_name) = fdef.qgl_stub_import
if orig_name:
import_str = '%s as %s' % (orig_name, sym_name)
else:
import_str = sym_name
if module_name not in self.stub_imports:
self.stub_imports[module_name] = set()
self.stub_imports[module_name].add(import_str)
return True
def create_imports_list(self):
import_list = list()
for module_name in sorted(self.stub_imports.keys()):
for sym_name in sorted(self.stub_imports[module_name]):
import_list.append(
'from %s import %s' % (module_name, sym_name))
return import_list
def find_sequence(self, node):
if not isinstance(node, ast.FunctionDef):
NodeError.fatal_msg(node, 'not a function definition')
return False
self.qbits = find_all_channels(node)
if len(self.qbits) == 0:
NodeError.error_msg(node, 'no channels found')
return False
else:
NodeError.diag_msg(node, "Found channels %s" % self.qbits)
lineNo = -1
while lineNo+1 < len(node.body):
lineNo += 1
# print("Line %d of %d" % (lineNo+1, len(node.body)))
stmnt = node.body[lineNo]
# print("Looking at stmnt %s" % stmnt)
assignment = self.is_qbit_create(stmnt)
if assignment:
self.qbit_creates.append(assignment)
continue
elif is_concur(stmnt):
# print("Found concur at line %d: %s" % (lineNo+1,stmnt))
for s in stmnt.body:
if is_seq(s):
# print("Found with seq for qbits %s: %s" % (s.qgl_chan_list, ast2str(s)))
#print("With seq next at line %d: %s" % (lineNo+1,s))
if str(s.qgl_chan_list) not in self.sequences:
self.sequences[str(s.qgl_chan_list)] = list()
thisSeq = self.sequences[str(s.qgl_chan_list)]
# print("Append body %s" % s.body)
# for s2 in s.body:
# print(ast2str(s2))
thisSeq += s.body
#print("lineNo now %d" % lineNo)
else:
NodeError.error_msg(s, "Not seq next at line %d: %s" % (lineNo+1,s))
elif isinstance(stmnt, ast.Expr):
if len(self.qbits) == 1:
# print("Append expr %s to sequence for %s" % (ast2str(stmnt), self.qbits))
if len(self.sequences) == 0:
self.sequences[list(self.qbits)[0]] = list()
self.sequences[list(self.qbits)[0]].append(stmnt)
else:
NodeError.error_msg(stmnt,
'orphan statement %s' % ast.dump(stmnt))
else:
NodeError.error_msg(stmnt,
'orphan statement %s' % ast.dump(stmnt))
# print("Seqs: %s" % self.sequences)
return True
def emit_function(self, func_name='qgl1_main', setup=None):
"""
Create a function that, when run, creates the context
in which the sequence is evaluated, and evaluate it.
func_name is the name for the function, if provided.
I'm not certain that the name has any significance
or whether this function will be, for all practical
purposes, a lambda.
"""
# assumes a standard 4-space indent; if the surrounding
# context uses some other indentation scheme, the interpreter
# may gripe, and pep8 certainly will
#
indent = ' '
# FIXME: Ugliness
# In the proper namespace we need to import all the QGL1 functions
# that this method is using / might use
# Here we include the imports matching stuff in qgl2.qgl1.py
# Can we perhaps annotate all the stubs with the proper
# import statement and use that to figure out what to include here?
base_imports = """ from QGL.PulseSequencePlotter import plot_pulse_files
"""
found_imports = ('\n' + indent).join(self.create_imports_list())
# allow QBIT parameters to be overridden
#
preamble = 'def %s(**kwargs):\n' % func_name
preamble += base_imports
preamble += indent + found_imports
preamble += '\n\n'
for (sym_name, _use_name, node) in self.qbit_creates:
preamble += indent + 'if \'' + sym_name + '\' in kwargs:\n'
preamble += (2 * indent) + sym_name
preamble += ' = kwargs[\'%s\']\n' % sym_name
preamble += indent + 'else:\n'
preamble += (2 * indent) + ast2str(node).strip() + '\n'
for (sym_name, use_name, _node) in self.qbit_creates:
preamble += indent + '%s = %s\n' % (use_name, sym_name)
if setup:
for stmnt in setup:
preamble += indent + ('%s\n' % ast2str(stmnt).strip())
seqs_def = indent + 'seqs = list()\n'
seqs_str = ''
seq_strs = list()
for seq in self.sequences.values():
#print("Looking at seq %s" % seq)
sequence = [ast2str(item).strip() for item in seq]
#print ("It is %s" % sequence)
# TODO: check that this is always OK.
#
# HACK ALERT: this might not always be the right thing to do
# but right now extraneous calls to Sync at the start of
# program appear to cause a problem, and they don't serve
# any known purpose, so skip them.
#
while sequence[0] == 'Sync()':
sequence = sequence[1:]
# TODO there must be a more elegant way to indent this properly
seq_str = indent + 'seq = [\n' + 2 * indent
seq_str += (',\n' + 2 * indent).join(sequence)
seq_str += '\n' + indent + ']\n'
seq_strs.append(seq_str)
for seq_str in seq_strs:
seqs_str += seq_str
# That was a single sequence. We want a list of sequences
# FIXME: Really, we want a new sequence every time the source code used Init()
seqs_str += indent + 'seqs += [seq]\n'
postamble = indent + 'return seqs\n'
res = preamble + seqs_def + seqs_str + postamble
return res
def single_sequence(node, func_name, importer, setup=None):
"""
Create a function that encapsulates the QGL code (for a single
sequence) from the given AST node, which is presumed to already
be fully pre-processed.
TODO: we don't test that the node is fully pre-processed.
TODO: each step of the preprocessor should mark the nodes
so that we know whether or not they've been processed.
"""
builder = SingleSequence(importer)
if builder.find_sequence(node) and builder.find_imports(node):
code = builder.emit_function(func_name, setup=setup)
NodeError.diag_msg(
node, 'generated code:\n#start\n%s\n#end code' % code)
# TODO: we might want to pass in elements of the local scope
scratch_scope = dict()
eval(compile(code, '<none>', mode='exec'), globals(), scratch_scope)
return scratch_scope[func_name]
else:
NodeError.fatal_msg(
node, 'find_sequence failed: not a single sequence')
return None
| |
from datetime import datetime
import logging
import shutil
from subprocess import Popen, PIPE
import os
import pandas as pd
import numpy as np
from pandas.errors import EmptyDataError
import re
BLAST_TABLE_COLS = '''
qseqid
stitle
pident
length
mismatch
gapopen
qstart
qend
sstart
send
evalue
bitscore
qlen
slen
sseq
'''.strip().split('\n')
class BlastRunner:
blast_db_created = False
def __init__(self, fasta_path, tmp_work_dir):
self.tmp_work_dir = tmp_work_dir
self.fasta_path = fasta_path
def _create_tmp_folder(self):
count = 1
tmp_dir = self.tmp_work_dir
while True:
try:
logging.info('Trying to create analysis directory at: %s', tmp_dir)
os.makedirs(tmp_dir)
break
except OSError as e:
logging.warning('Error on creation of tmp analysis directory "{}"! {}'.format(
tmp_dir,
e
))
tmp_dir = '{}_{}'.format(self.tmp_work_dir, count)
count += 1
self.tmp_work_dir = tmp_dir
return self.tmp_work_dir
def _copy_fasta_to_work_dir(self):
filename = os.path.basename(self.fasta_path)
filename_no_spaces = re.sub(r'\W', '_', filename)
dest_path = os.path.join(self.tmp_work_dir, filename_no_spaces)
if self.fasta_path == dest_path:
self.tmp_fasta_path = dest_path
return dest_path
shutil.copyfile(self.fasta_path, dest_path)
self.tmp_fasta_path = dest_path
return dest_path
def _run_makeblastdb(self):
work_dir = os.path.dirname(self.tmp_fasta_path)
filename = os.path.basename(self.tmp_fasta_path)
nin_filepath = os.path.join(work_dir, filename + '.nin')
if os.path.exists(nin_filepath):
self.blast_db_created = True
return self.tmp_fasta_path
p = Popen(['makeblastdb',
'-in', '{}'.format(self.tmp_fasta_path),
'-dbtype', 'nucl'],
stdout=PIPE,
stderr=PIPE)
p.wait()
stdout = p.stdout.read()
stderr = p.stderr.read()
if stdout is not None and stdout != '':
logging.debug('makeblastdb on {0} STDOUT: {1}'.format(self.tmp_fasta_path, stdout))
if stderr is not None and stderr != '':
logging.debug('makeblastdb on {0} STDERR: {1}'.format(self.tmp_fasta_path, stderr))
if os.path.exists(nin_filepath):
self.blast_db_created = True
return self.tmp_fasta_path
else:
ex_msg = 'makeblastdb was not able to create a BLAST DB for {0}. STDERR: {1}'.format(filename, stderr)
logging.error(ex_msg)
raise Exception(ex_msg)
def blast_against_query(self, query_fasta_path, blast_task='megablast', evalue=1e-20, min_pid=85):
if not self.blast_db_created:
self.prep_blast()
gene_filename = os.path.basename(query_fasta_path)
genome_filename = os.path.basename(self.tmp_fasta_path)
timestamp = '{:%Y%b%d_%H_%M_%S}'.format(datetime.now())
outfile = os.path.join(self.tmp_work_dir, '{}-{}-{}.blast'.format(gene_filename,
genome_filename,
timestamp))
p = Popen(['blastn',
'-task', blast_task,
'-query', query_fasta_path,
'-db', '{}'.format(self.tmp_fasta_path),
'-evalue', '{}'.format(evalue),
'-dust', 'no',
'-perc_identity', '{}'.format(min_pid),
'-out', outfile,
'-outfmt', '6 {}'.format(' '.join(BLAST_TABLE_COLS))],
stdout=PIPE,
stderr=PIPE)
p.wait()
stdout = p.stdout.read()
stderr = p.stderr.read()
if stdout is not None and stdout != '':
logging.debug('blastn on db {} and query {} STDOUT: {}'.format(genome_filename, gene_filename, stdout))
if stderr is not None and stderr != '':
logging.debug('blastn on db {} and query {} STDERR: {}'.format(genome_filename, gene_filename, stderr))
if os.path.exists(outfile):
return outfile
else:
ex_msg = 'blastn on db {} and query {} did not produce expected output file at {}'.format(genome_filename,
gene_filename,
outfile)
logging.error(ex_msg)
raise Exception(ex_msg)
def cleanup(self):
self.blast_db_created = False
shutil.rmtree(self.tmp_work_dir)
def prep_blast(self):
self._create_tmp_folder()
self._copy_fasta_to_work_dir()
self._run_makeblastdb()
def run_blast(self, query_fasta_path):
self.prep_blast()
blast_outfile = self.blast_against_query(query_fasta_path)
return blast_outfile
class BlastReader:
is_missing = True
is_perfect_match = False
is_trunc = False
df = None
def __init__(self, blast_outfile,filter=[]):
"""Read BLASTN output file into a pandas DataFrame
Sort the DataFrame by BLAST bitscore.
If there are no BLASTN results, then no results can be returned.
Args:
blast_outfile (str): `blastn` output file path
Raises:
EmptyDataError: No data could be parsed from the `blastn` output file
"""
self.blast_outfile = blast_outfile
try:
self.df = pd.read_csv(self.blast_outfile, header=None, sep='\t')
self.df.columns = BLAST_TABLE_COLS
# calculate the coverage for when results need to be validated
self.df.loc[:, 'coverage'] = self.df.length / self.df.qlen
self.df.sort_values(by='bitscore', ascending=False, inplace=True)
self.df.loc[:, 'is_trunc'] = BlastReader.trunc(qstart=self.df.qstart,
qend=self.df.qend,
qlen=self.df.qlen,
sstart=self.df.sstart,
send=self.df.send,
slen=self.df.slen)
logging.debug(self.df.head())
self.is_missing = False
self.filter_rows(filter)
except EmptyDataError as exc:
logging.warning('No BLASTN results to parse from file %s', blast_outfile)
self.is_missing = True
def filter_rows(self,filter):
for f in filter:
self.df = self.df[~self.df['qseqid'].str.contains(f)]
def df_dict(self):
if not self.is_missing:
return self.df.to_dict()
@staticmethod
def df_first_row_to_dict(df):
"""First DataFrame row to list of dict
Args:
df (pandas.DataFrame): A DataFrame with at least one row
Returns:
A list of dict that looks like:
[{'C1': 'x'}, {'C2': 'y'}, {'C3': 'z'}]
from a DataFrame that looks like:
C1 C2 C3
1 x y z
Else if `df` is `None`, returns `None`
"""
if df is not None and not df.empty:
return [dict(r) for i, r in df.head(1).iterrows()][0]
@staticmethod
def is_blast_result_trunc(qstart, qend, sstart, send, qlen, slen):
"""Check if a query sequence is truncated by the end of a subject sequence
Args:
qstart (int): Query sequence start index
qend (int): Query sequence end index
sstart (int): Subject sequence start index
send (int): Subject sequence end index
qlen (int): Query sequence length
slen (int): Subject sequence length
Returns:
bool: Result truncated by subject sequence end?
"""
q_match_len = abs(qstart - qend) + 1
s_max = max(sstart, send)
s_min = min(sstart, send)
return (q_match_len < qlen) and (s_max >= slen or s_min <= 1)
@staticmethod
def trunc(qstart, qend, sstart, send, qlen, slen):
"""Check if a query sequence is truncated by the end of a subject sequence
Args:
qstart (int pandas.Series): Query sequence start index
qend (int pandas.Series): Query sequence end index
sstart (int pandas.Series): Subject sequence start index
send (int pandas.Series): Subject sequence end index
qlen (int pandas.Series): Query sequence length
slen (int pandas.Series): Subject sequence length
Returns:
Boolean pandas.Series: Result truncated by subject sequence end?
"""
ssum2 = (send + sstart) / 2.0
sabs2 = np.abs(send - sstart) / 2.0
smax = ssum2 + sabs2
smin = ssum2 - sabs2
q_match_len = np.abs(qstart - qend) + 1
return (q_match_len < qlen) & ((smax >= slen) | (smin <= 1))
def perfect_matches(self):
"""
Return pandas DataFrame with perfect BLAST matches (100% identity and coverage)
Returns:
pandas.DataFrame or None: DataFrame of perfect BLAST matches or None if no perfect matches exist
"""
if self.is_missing:
return None
df_perfect_matches = self.df[(self.df['coverage'] == 1.0) & (self.df['pident'] == 100.0)]
if df_perfect_matches.shape[0] == 0:
return None
return df_perfect_matches
def top_result(self):
"""Return top `blastn` result
Try to find a 100% identity and coverage result (perfect match).
If one does not exist, then retrieve the result with the highest bitscore.
Returns:
Ordered dict of BLASTN results or None if no BLASTN results generated
"""
if self.is_missing:
return None
df_perfect_matches = self.df[(self.df['coverage'] == 1.0) & (self.df['pident'] == 100.0)]
if df_perfect_matches.shape[0]:
self.is_perfect_match = True
return BlastReader.df_first_row_to_dict(df_perfect_matches)
# Return the result with the highest bitscore.
# This is the first result in dataframe since the df is ordered by
# bitscore in descending order.
result_dict = BlastReader.df_first_row_to_dict(self.df)
if result_dict is None:
return None
result_trunc = BlastReader.is_blast_result_trunc(qstart=result_dict['qstart'],
qend=result_dict['qend'],
sstart=result_dict['sstart'],
send=result_dict['send'],
qlen=result_dict['qlen'],
slen=result_dict['slen'])
self.is_trunc = result_trunc
return result_dict
| |
#! /usr/bin/env python
## NB: not all commands work ##
"""Cascaded queue administration.
londiste.py INI pause [NODE [CONS]]
setadm.py INI pause NODE [CONS]
"""
import optparse
import os.path
import Queue
import sys
import threading
import time
import skytools
from skytools import UsageError, DBError
from pgq.cascade.nodeinfo import *
__all__ = ['CascadeAdmin']
RESURRECT_DUMP_FILE = "resurrect-lost-events.json"
command_usage = """\
%prog [options] INI CMD [subcmd args]
Node Initialization:
create-root NAME [PUBLIC_CONNSTR]
create-branch NAME [PUBLIC_CONNSTR] --provider=<public_connstr>
create-leaf NAME [PUBLIC_CONNSTR] --provider=<public_connstr>
All of the above initialize a node
Node Administration:
pause Pause node worker
resume Resume node worker
wait-root Wait until node has caught up with root
wait-provider Wait until node has caught up with provider
status Show cascade state
node-status Show status of local node
members Show members in set
Cascade layout change:
change-provider --provider NEW_NODE
Change where worker reads from
takeover FROM_NODE [--all] [--dead]
Take other node position
drop-node NAME
Remove node from cascade
tag-dead NODE ..
Tag node as dead
tag-alive NODE ..
Tag node as alive
"""
standalone_usage = """
setadm extra switches:
pause/resume/change-provider:
--node=NODE_NAME | --consumer=CONSUMER_NAME
create-root/create-branch/create-leaf:
--worker=WORKER_NAME
"""
class CascadeAdmin(skytools.AdminScript):
"""Cascaded PgQ administration."""
queue_name = None
queue_info = None
extra_objs = []
local_node = None
root_node_name = None
commands_without_pidfile = ['status', 'node-status', 'node-info']
def __init__(self, svc_name, dbname, args, worker_setup = False):
skytools.AdminScript.__init__(self, svc_name, args)
self.initial_db_name = dbname
if worker_setup:
self.options.worker = self.job_name
self.options.consumer = self.job_name
def init_optparse(self, parser = None):
"""Add SetAdmin switches to parser."""
p = skytools.AdminScript.init_optparse(self, parser)
usage = command_usage + standalone_usage
p.set_usage(usage.strip())
g = optparse.OptionGroup(p, "actual queue admin options")
g.add_option("--connstr", action="store_true",
help = "initial connect string")
g.add_option("--provider",
help = "init: connect string for provider")
g.add_option("--queue",
help = "specify queue name")
g.add_option("--worker",
help = "create: specify worker name")
g.add_option("--node",
help = "specify node name")
g.add_option("--consumer",
help = "specify consumer name")
g.add_option("--target",
help = "takeover: specify node to take over")
g.add_option("--merge",
help = "create-node: combined queue name")
g.add_option("--dead", action="append",
help = "tag some node as dead")
g.add_option("--dead-root", action="store_true",
help = "tag some node as dead")
g.add_option("--dead-branch", action="store_true",
help = "tag some node as dead")
g.add_option("--sync-watermark",
help = "list of node names to sync with")
p.add_option_group(g)
return p
def reload(self):
"""Reload config."""
skytools.AdminScript.reload(self)
if self.options.queue:
self.queue_name = self.options.queue
else:
self.queue_name = self.cf.get('queue_name', '')
if not self.queue_name:
self.queue_name = self.cf.get('pgq_queue_name', '')
if not self.queue_name:
raise Exception('"queue_name" not specified in config')
#
# Node initialization.
#
def cmd_install(self):
db = self.get_database(self.initial_db_name)
self.install_code(db)
def cmd_create_root(self, *args):
return self.create_node('root', args)
def cmd_create_branch(self, *args):
return self.create_node('branch', args)
def cmd_create_leaf(self, *args):
return self.create_node('leaf', args)
def create_node(self, node_type, args):
"""Generic node init."""
if node_type not in ('root', 'branch', 'leaf'):
raise Exception('unknown node type')
# load node name
if len(args) > 0:
node_name = args[0]
else:
node_name = self.cf.get('node_name', '')
if not node_name:
raise UsageError('Node name must be given either in command line or config')
# load node public location
if len(args) > 1:
node_location = args[1]
else:
node_location = self.cf.get('public_node_location', '')
if not node_location:
raise UsageError('Node public location must be given either in command line or config')
if len(args) > 2:
raise UsageError('Too many args, only node name and public connect string allowed')
# load provider
provider_loc = self.options.provider
if not provider_loc:
provider_loc = self.cf.get('initial_provider_location', '')
# check if sane
ok = 0
for k, v in skytools.parse_connect_string(node_location):
if k in ('host', 'service'):
ok = 1
break
if not ok:
self.log.warning('No host= in public connect string, bad idea')
# connect to database
db = self.get_database(self.initial_db_name)
# check if code is installed
self.install_code(db)
# query current status
res = self.exec_query(db, "select * from pgq_node.get_node_info(%s)", [self.queue_name])
info = res[0]
if info['node_type'] is not None:
self.log.info("Node is already initialized as %s", info['node_type'])
return
# check if public connstr is sane
self.check_public_connstr(db, node_location)
self.log.info("Initializing node")
node_attrs = {}
worker_name = self.options.worker
if not worker_name:
raise Exception('--worker required')
combined_queue = self.options.merge
if combined_queue and node_type != 'leaf':
raise Exception('--merge can be used only for leafs')
if self.options.sync_watermark:
if node_type != 'branch':
raise UsageError('--sync-watermark can be used only for branch nodes')
node_attrs['sync_watermark'] = self.options.sync_watermark
# register member
if node_type == 'root':
global_watermark = None
combined_queue = None
provider_name = None
self.exec_cmd(db, "select * from pgq_node.register_location(%s, %s, %s, false)",
[self.queue_name, node_name, node_location])
self.exec_cmd(db, "select * from pgq_node.create_node(%s, %s, %s, %s, %s, %s, %s)",
[self.queue_name, node_type, node_name, worker_name, provider_name, global_watermark, combined_queue])
provider_db = None
else:
if not provider_loc:
raise Exception('Please specify --provider')
root_db = self.find_root_db(provider_loc)
queue_info = self.load_queue_info(root_db)
# check if member already exists
if queue_info.get_member(node_name) is not None:
self.log.error("Node '%s' already exists", node_name)
sys.exit(1)
combined_set = None
provider_db = self.get_database('provider_db', connstr = provider_loc, profile = 'remote')
q = "select node_type, node_name from pgq_node.get_node_info(%s)"
res = self.exec_query(provider_db, q, [self.queue_name])
row = res[0]
if not row['node_name']:
raise Exception("provider node not found")
provider_name = row['node_name']
# register member on root
self.exec_cmd(root_db, "select * from pgq_node.register_location(%s, %s, %s, false)",
[self.queue_name, node_name, node_location])
# lookup provider
provider = queue_info.get_member(provider_name)
if not provider:
self.log.error("Node %s does not exist", provider_name)
sys.exit(1)
# register on provider
self.exec_cmd(provider_db, "select * from pgq_node.register_location(%s, %s, %s, false)",
[self.queue_name, node_name, node_location])
rows = self.exec_cmd(provider_db, "select * from pgq_node.register_subscriber(%s, %s, %s, null)",
[self.queue_name, node_name, worker_name])
global_watermark = rows[0]['global_watermark']
# initialize node itself
# insert members
self.exec_cmd(db, "select * from pgq_node.register_location(%s, %s, %s, false)",
[self.queue_name, node_name, node_location])
for m in queue_info.member_map.values():
self.exec_cmd(db, "select * from pgq_node.register_location(%s, %s, %s, %s)",
[self.queue_name, m.name, m.location, m.dead])
# real init
self.exec_cmd(db, "select * from pgq_node.create_node(%s, %s, %s, %s, %s, %s, %s)",
[ self.queue_name, node_type, node_name, worker_name,
provider_name, global_watermark, combined_queue ])
self.extra_init(node_type, db, provider_db)
if node_attrs:
s_attrs = skytools.db_urlencode(node_attrs)
self.exec_cmd(db, "select * from pgq_node.set_node_attrs(%s, %s)",
[self.queue_name, s_attrs])
self.log.info("Done")
def check_public_connstr(self, db, pub_connstr):
"""Look if public and local connect strings point to same db's.
"""
pub_db = self.get_database("pub_db", connstr = pub_connstr, profile = 'remote')
curs1 = db.cursor()
curs2 = pub_db.cursor()
q = "select oid, datname, txid_current() as txid, txid_current_snapshot() as snap"\
" from pg_catalog.pg_database where datname = current_database()"
curs1.execute(q)
res1 = curs1.fetchone()
db.commit()
curs2.execute(q)
res2 = curs2.fetchone()
pub_db.commit()
curs1.execute(q)
res3 = curs1.fetchone()
db.commit()
self.close_database("pub_db")
failure = 0
if (res1['oid'], res1['datname']) != (res2['oid'], res2['datname']):
failure += 1
sn1 = skytools.Snapshot(res1['snap'])
tx = res2['txid']
sn2 = skytools.Snapshot(res3['snap'])
if sn1.contains(tx):
failure += 2
elif not sn2.contains(tx):
failure += 4
if failure:
raise UsageError("Public connect string points to different database than local connect string (fail=%d)" % failure)
def extra_init(self, node_type, node_db, provider_db):
"""Callback to do specific init."""
pass
def find_root_db(self, initial_loc = None):
"""Find root node, having start point."""
if initial_loc:
loc = initial_loc
db = self.get_database('root_db', connstr = loc, profile = 'remote')
else:
loc = self.cf.get(self.initial_db_name)
db = self.get_database('root_db', connstr = loc)
while 1:
# query current status
res = self.exec_query(db, "select * from pgq_node.get_node_info(%s)", [self.queue_name])
info = res[0]
node_type = info['node_type']
if node_type is None:
self.log.info("Root node not initialized?")
sys.exit(1)
self.log.debug("db='%s' -- type='%s' provider='%s'", loc, node_type, info['provider_location'])
# configured db may not be root anymore, walk upwards then
if node_type in ('root', 'combined-root'):
db.commit()
self.root_node_name = info['node_name']
return db
self.close_database('root_db')
if loc == info['provider_location']:
raise Exception("find_root_db: got loop: %s" % loc)
loc = info['provider_location']
if loc is None:
self.log.error("Sub node provider not initialized?")
sys.exit(1)
db = self.get_database('root_db', connstr = loc, profile = 'remote')
raise Exception('process canceled')
def find_root_node(self):
self.find_root_db()
return self.root_node_name
def find_consumer_check(self, node, consumer):
cmap = self.get_node_consumer_map(node)
return (consumer in cmap)
def find_consumer(self, node = None, consumer = None):
if not node and not consumer:
node = self.options.node
consumer = self.options.consumer
if not node and not consumer:
raise Exception('Need either --node or --consumer')
# specific node given
if node:
if consumer:
if not self.find_consumer_check(node, consumer):
raise Exception('Consumer not found')
else:
state = self.get_node_info(node)
consumer = state.worker_name
return (node, consumer)
# global consumer search
if self.find_consumer_check(self.local_node, consumer):
return (self.local_node, consumer)
# fixme: dead node handling?
nodelist = self.queue_info.member_map.keys()
for node in nodelist:
if node == self.local_node:
continue
if self.find_consumer_check(node, consumer):
return (node, consumer)
raise Exception('Consumer not found')
def install_code(self, db):
"""Install cascading code to db."""
objs = [
skytools.DBLanguage("plpgsql"),
#skytools.DBFunction("txid_current_snapshot", 0, sql_file="txid.sql"),
skytools.DBSchema("pgq", sql_file="pgq.sql"),
skytools.DBFunction("pgq.get_batch_cursor", 3, sql_file = "pgq.upgrade.2to3.sql"),
skytools.DBSchema("pgq_ext", sql_file="pgq_ext.sql"), # not needed actually
skytools.DBSchema("pgq_node", sql_file="pgq_node.sql"),
]
objs += self.extra_objs
skytools.db_install(db.cursor(), objs, self.log)
db.commit()
#
# Print status of whole set.
#
def cmd_status(self):
"""Show set status."""
self.load_local_info()
# prepare data for workers
members = Queue.Queue()
for m in self.queue_info.member_map.itervalues():
cstr = self.add_connect_string_profile(m.location, 'remote')
members.put( (m.name, cstr) )
nodes = Queue.Queue()
# launch workers and wait
num_nodes = len(self.queue_info.member_map)
num_threads = max (min (num_nodes / 4, 100), 1)
tlist = []
for i in range(num_threads):
t = threading.Thread (target = self._cmd_status_worker, args = (members, nodes))
t.daemon = True
t.start()
tlist.append(t)
#members.join()
for t in tlist:
t.join()
while True:
try:
node = nodes.get_nowait()
except Queue.Empty:
break
self.queue_info.add_node(node)
self.queue_info.print_tree()
def _cmd_status_worker (self, members, nodes):
# members in, nodes out, both thread-safe
while True:
try:
node_name, node_connstr = members.get_nowait()
except Queue.Empty:
break
node = self.load_node_status (node_name, node_connstr)
nodes.put(node)
members.task_done()
def load_node_status (self, name, location):
""" Load node info & status """
# must be thread-safe (!)
if not self.node_alive(name):
node = NodeInfo(self.queue_name, None, node_name = name)
return node
try:
db = None
db = skytools.connect_database (location)
db.set_isolation_level (skytools.I_AUTOCOMMIT)
curs = db.cursor()
curs.execute("select * from pgq_node.get_node_info(%s)", [self.queue_name])
node = NodeInfo(self.queue_name, curs.fetchone())
node.load_status(curs)
self.load_extra_status(curs, node)
except DBError, d:
msg = str(d).strip().split('\n', 1)[0].strip()
print('Node %r failure: %s' % (name, msg))
node = NodeInfo(self.queue_name, None, node_name = name)
finally:
if db: db.close()
return node
def cmd_node_status(self):
"""
Show status of a local node.
"""
self.load_local_info()
db = self.get_node_database(self.local_node)
curs = db.cursor()
node = self.queue_info.local_node
node.load_status(curs)
self.load_extra_status(curs, node)
subscriber_nodes = self.get_node_subscriber_list(self.local_node)
offset=4*' '
print node.get_title()
print offset+'Provider: %s' % node.provider_node
print offset+'Subscribers: %s' % ', '.join(subscriber_nodes)
for l in node.get_infolines():
print offset+l
def load_extra_status(self, curs, node):
"""Fetch extra info."""
# must be thread-safe (!)
pass
#
# Normal commands.
#
def cmd_change_provider(self):
"""Change node provider."""
self.load_local_info()
self.change_provider(
node = self.options.node,
consumer = self.options.consumer,
new_provider = self.options.provider)
def node_change_provider(self, node, new_provider):
self.change_provider(node, new_provider = new_provider)
def change_provider(self, node = None, consumer = None, new_provider = None):
old_provider = None
if not new_provider:
raise Exception('Please give --provider')
if not node or not consumer:
node, consumer = self.find_consumer(node = node, consumer = consumer)
if node == new_provider:
raise UsageError ("cannot subscribe to itself")
cmap = self.get_node_consumer_map(node)
cinfo = cmap[consumer]
old_provider = cinfo['provider_node']
if old_provider == new_provider:
self.log.info("Consumer '%s' at node '%s' has already '%s' as provider",
consumer, node, new_provider)
return
# pause target node
self.pause_consumer(node, consumer)
# reload node info
node_db = self.get_node_database(node)
qinfo = self.load_queue_info(node_db)
ninfo = qinfo.local_node
node_location = qinfo.get_member(node).location
# reload consumer info
cmap = self.get_node_consumer_map(node)
cinfo = cmap[consumer]
# is it node worker or plain consumer?
is_worker = (ninfo.worker_name == consumer)
# fixme: expect the node to be described already
q = "select * from pgq_node.register_location(%s, %s, %s, false)"
self.node_cmd(new_provider, q, [self.queue_name, node, node_location])
# subscribe on new provider
if is_worker:
q = 'select * from pgq_node.register_subscriber(%s, %s, %s, %s)'
self.node_cmd(new_provider, q, [self.queue_name, node, consumer, cinfo['last_tick_id']])
else:
q = 'select * from pgq.register_consumer_at(%s, %s, %s)'
self.node_cmd(new_provider, q, [self.queue_name, consumer, cinfo['last_tick_id']])
# change provider on target node
q = 'select * from pgq_node.change_consumer_provider(%s, %s, %s)'
self.node_cmd(node, q, [self.queue_name, consumer, new_provider])
# done
self.resume_consumer(node, consumer)
# unsubscribe from old provider
try:
if is_worker:
q = "select * from pgq_node.unregister_subscriber(%s, %s)"
self.node_cmd(old_provider, q, [self.queue_name, node])
else:
q = "select * from pgq.unregister_consumer(%s, %s)"
self.node_cmd(old_provider, q, [self.queue_name, consumer])
except skytools.DBError, d:
self.log.warning("failed to unregister from old provider (%s): %s", old_provider, str(d))
def cmd_rename_node(self, old_name, new_name):
"""Rename node."""
self.load_local_info()
root_db = self.find_root_db()
# pause target node
self.pause_node(old_name)
node = self.load_node_info(old_name)
provider_node = node.provider_node
subscriber_list = self.get_node_subscriber_list(old_name)
# create copy of member info / subscriber+queue info
step1 = 'select * from pgq_node.rename_node_step1(%s, %s, %s)'
# rename node itself, drop copies
step2 = 'select * from pgq_node.rename_node_step2(%s, %s, %s)'
# step1
self.exec_cmd(root_db, step1, [self.queue_name, old_name, new_name])
self.node_cmd(provider_node, step1, [self.queue_name, old_name, new_name])
self.node_cmd(old_name, step1, [self.queue_name, old_name, new_name])
for child in subscriber_list:
self.node_cmd(child, step1, [self.queue_name, old_name, new_name])
# step1
self.node_cmd(old_name, step2, [self.queue_name, old_name, new_name])
self.node_cmd(provider_node, step1, [self.queue_name, old_name, new_name])
for child in subscriber_list:
self.node_cmd(child, step2, [self.queue_name, old_name, new_name])
self.exec_cmd(root_db, step2, [self.queue_name, old_name, new_name])
# resume node
self.resume_node(old_name)
def cmd_drop_node(self, node_name):
"""Drop a node."""
self.load_local_info()
node = None
try:
node = self.load_node_info(node_name)
if node:
# see if we can safely drop
subscriber_list = self.get_node_subscriber_list(node_name)
if subscriber_list:
raise UsageError('node still has subscribers')
except skytools.DBError:
pass
try:
# unregister node location from root node (event will be added to queue)
if node and node.type == 'root':
pass
else:
root_db = self.find_root_db()
q = "select * from pgq_node.unregister_location(%s, %s)"
self.exec_cmd(root_db, q, [self.queue_name, node_name])
except skytools.DBError, d:
self.log.warning("Unregister from root failed: %s", str(d))
try:
# drop node info
db = self.get_node_database(node_name)
q = "select * from pgq_node.drop_node(%s, %s)"
self.exec_cmd(db, q, [self.queue_name, node_name])
except skytools.DBError, d:
self.log.warning("Local drop failure: %s", str(d))
# brute force removal
for n in self.queue_info.member_map.values():
try:
q = "select * from pgq_node.drop_node(%s, %s)"
self.node_cmd(n.name, q, [self.queue_name, node_name])
except skytools.DBError, d:
self.log.warning("Failed to remove from '%s': %s", n.name, str(d))
def node_depends(self, sub_node, top_node):
cur_node = sub_node
# walk upstream
while 1:
info = self.get_node_info(cur_node)
if cur_node == top_node:
# yes, top_node is sub_node's provider
return True
if info.type == 'root':
# found root, no dependancy
return False
# step upwards
cur_node = info.provider_node
def demote_node(self, oldnode, step, newnode):
"""Downgrade old root?"""
q = "select * from pgq_node.demote_root(%s, %s, %s)"
res = self.node_cmd(oldnode, q, [self.queue_name, step, newnode])
if res:
return res[0]['last_tick']
def promote_branch(self, node):
"""Promote old branch as root."""
q = "select * from pgq_node.promote_branch(%s)"
self.node_cmd(node, q, [self.queue_name])
def wait_for_catchup(self, new, last_tick):
"""Wait until new_node catches up to old_node."""
# wait for it on subscriber
info = self.load_node_info(new)
if info.completed_tick >= last_tick:
self.log.info('tick already exists')
return info
if info.paused:
self.log.info('new node seems paused, resuming')
self.resume_node(new)
while 1:
self.log.debug('waiting for catchup: need=%d, cur=%d', last_tick, info.completed_tick)
time.sleep(1)
info = self.load_node_info(new)
if info.completed_tick >= last_tick:
return info
def takeover_root(self, old_node_name, new_node_name, failover = False):
"""Root switchover."""
new_info = self.get_node_info(new_node_name)
old_info = None
if self.node_alive(old_node_name):
# old root works, switch properly
old_info = self.get_node_info(old_node_name)
self.pause_node(old_node_name)
self.demote_node(old_node_name, 1, new_node_name)
last_tick = self.demote_node(old_node_name, 2, new_node_name)
self.wait_for_catchup(new_node_name, last_tick)
else:
# find latest tick on local node
q = "select * from pgq.get_queue_info(%s)"
db = self.get_node_database(new_node_name)
curs = db.cursor()
curs.execute(q, [self.queue_name])
row = curs.fetchone()
last_tick = row['last_tick_id']
db.commit()
# find if any other node has more ticks
other_node = None
other_tick = last_tick
sublist = self.find_subscribers_for(old_node_name)
for n in sublist:
q = "select * from pgq_node.get_node_info(%s)"
rows = self.node_cmd(n, q, [self.queue_name])
info = rows[0]
if info['worker_last_tick'] > other_tick:
other_tick = info['worker_last_tick']
other_node = n
# if yes, load batches from there
if other_node:
self.change_provider(new_node_name, new_provider = other_node)
self.wait_for_catchup(new_node_name, other_tick)
last_tick = other_tick
# promote new root
self.pause_node(new_node_name)
self.promote_branch(new_node_name)
# register old root on new root as subscriber
if self.node_alive(old_node_name):
old_worker_name = old_info.worker_name
else:
old_worker_name = self.failover_consumer_name(old_node_name)
q = 'select * from pgq_node.register_subscriber(%s, %s, %s, %s)'
self.node_cmd(new_node_name, q, [self.queue_name, old_node_name, old_worker_name, last_tick])
# unregister new root from old root
q = "select * from pgq_node.unregister_subscriber(%s, %s)"
self.node_cmd(new_info.provider_node, q, [self.queue_name, new_node_name])
# launch new node
self.resume_node(new_node_name)
# demote & launch old node
if self.node_alive(old_node_name):
self.demote_node(old_node_name, 3, new_node_name)
self.resume_node(old_node_name)
def takeover_nonroot(self, old_node_name, new_node_name, failover):
"""Non-root switchover."""
if self.node_depends(new_node_name, old_node_name):
# yes, old_node is new_nodes provider,
# switch it around
pnode = self.find_provider(old_node_name)
self.node_change_provider(new_node_name, pnode)
self.node_change_provider(old_node_name, new_node_name)
def cmd_takeover(self, old_node_name):
"""Generic node switchover."""
self.log.info("old: %s", old_node_name)
self.load_local_info()
new_node_name = self.options.node
if not new_node_name:
worker = self.options.consumer
if not worker:
raise UsageError('old node not given')
if self.queue_info.local_node.worker_name != worker:
raise UsageError('old node not given')
new_node_name = self.local_node
if not old_node_name:
raise UsageError('old node not given')
if old_node_name not in self.queue_info.member_map:
raise UsageError('Unknown node: %s' % old_node_name)
if self.options.dead_root:
otype = 'root'
failover = True
elif self.options.dead_branch:
otype = 'branch'
failover = True
else:
onode = self.get_node_info(old_node_name)
otype = onode.type
failover = False
if failover:
self.cmd_tag_dead(old_node_name)
new_node = self.get_node_info(new_node_name)
if old_node_name == new_node.name:
self.log.info("same node?")
return
if otype == 'root':
self.takeover_root(old_node_name, new_node_name, failover)
else:
self.takeover_nonroot(old_node_name, new_node_name, failover)
# switch subscribers around
if self.options.all or failover:
for n in self.find_subscribers_for(old_node_name):
if n != new_node_name:
self.node_change_provider(n, new_node_name)
def find_provider(self, node_name):
if self.node_alive(node_name):
info = self.get_node_info(node_name)
return info.provider_node
nodelist = self.queue_info.member_map.keys()
for n in nodelist:
if n == node_name:
continue
if not self.node_alive(n):
continue
if node_name in self.get_node_subscriber_list(n):
return n
return self.find_root_node()
def find_subscribers_for(self, parent_node_name):
"""Find subscribers for particular node."""
# use dict to eliminate duplicates
res = {}
nodelist = self.queue_info.member_map.keys()
for node_name in nodelist:
if node_name == parent_node_name:
continue
if not self.node_alive(node_name):
continue
n = self.get_node_info(node_name)
if not n:
continue
if n.provider_node == parent_node_name:
res[n.name] = 1
return res.keys()
def cmd_tag_dead(self, dead_node_name):
self.load_local_info()
# tag node dead in memory
self.log.info("Tagging node '%s' as dead", dead_node_name)
self.queue_info.tag_dead(dead_node_name)
# tag node dead in local node
q = "select * from pgq_node.register_location(%s, %s, null, true)"
self.node_cmd(self.local_node, q, [self.queue_name, dead_node_name])
# tag node dead in other nodes
nodelist = self.queue_info.member_map.keys()
for node_name in nodelist:
if not self.node_alive(node_name):
continue
if node_name == dead_node_name:
continue
if node_name == self.local_node:
continue
try:
q = "select * from pgq_node.register_location(%s, %s, null, true)"
self.node_cmd(node_name, q, [self.queue_name, dead_node_name])
except DBError, d:
msg = str(d).strip().split('\n', 1)[0]
print('Node %s failure: %s' % (node_name, msg))
self.close_node_database(node_name)
def cmd_pause(self):
"""Pause a node"""
self.load_local_info()
node, consumer = self.find_consumer()
self.pause_consumer(node, consumer)
def cmd_resume(self):
"""Resume a node from pause."""
self.load_local_info()
node, consumer = self.find_consumer()
self.resume_consumer(node, consumer)
def cmd_members(self):
"""Show member list."""
self.load_local_info()
db = self.get_database(self.initial_db_name)
desc = 'Member info on %s@%s:' % (self.local_node, self.queue_name)
q = "select node_name, dead, node_location"\
" from pgq_node.get_queue_locations(%s) order by 1"
self.display_table(db, desc, q, [self.queue_name])
def cmd_node_info(self):
self.load_local_info()
q = self.queue_info
n = q.local_node
m = q.get_member(n.name)
stlist = []
if m.dead:
stlist.append('DEAD')
if n.paused:
stlist.append("PAUSED")
if not n.uptodate:
stlist.append("NON-UP-TO-DATE")
st = ', '.join(stlist)
if not st:
st = 'OK'
print('Node: %s Type: %s Queue: %s' % (n.name, n.type, q.queue_name))
print('Status: %s' % st)
if n.type != 'root':
print('Provider: %s' % n.provider_node)
else:
print('Provider: --')
print('Connect strings:')
print(' Local : %s' % self.cf.get('db'))
print(' Public : %s' % m.location)
if n.type != 'root':
print(' Provider: %s' % n.provider_location)
if n.combined_queue:
print('Combined Queue: %s (node type: %s)' % (n.combined_queue, n.combined_type))
def cmd_wait_root(self):
"""Wait for next tick from root."""
self.load_local_info()
if self.queue_info.local_node.type == 'root':
self.log.info("Current node is root, no need to wait")
return
self.log.info("Finding root node")
root_node = self.find_root_node()
self.log.info("Root is %s", root_node)
dst_db = self.get_database(self.initial_db_name)
self.wait_for_node(dst_db, root_node)
def cmd_wait_provider(self):
"""Wait for next tick from provider."""
self.load_local_info()
if self.queue_info.local_node.type == 'root':
self.log.info("Current node is root, no need to wait")
return
dst_db = self.get_database(self.initial_db_name)
node = self.queue_info.local_node.provider_node
self.log.info("Provider is %s", node)
self.wait_for_node(dst_db, node)
def wait_for_node(self, dst_db, node_name):
"""Core logic for waiting."""
self.log.info("Fetching last tick for %s", node_name)
node_info = self.load_node_info(node_name)
tick_id = node_info.last_tick
self.log.info("Waiting for tick > %d", tick_id)
q = "select * from pgq_node.get_node_info(%s)"
dst_curs = dst_db.cursor()
while 1:
dst_curs.execute(q, [self.queue_name])
row = dst_curs.fetchone()
dst_db.commit()
if row['ret_code'] >= 300:
self.log.warning("Problem: %s", row['ret_code'], row['ret_note'])
return
if row['worker_last_tick'] > tick_id:
self.log.info("Got tick %d, exiting", row['worker_last_tick'])
break
self.sleep(2)
def cmd_resurrect(self):
"""Convert out-of-sync old root to branch and sync queue contents.
"""
self.load_local_info()
db = self.get_database(self.initial_db_name)
curs = db.cursor()
# stop if leaf
if self.queue_info.local_node.type == 'leaf':
self.log.info("Current node is leaf, nothing to do")
return
# stop if dump file exists
if os.path.lexists(RESURRECT_DUMP_FILE):
self.log.error("Dump file exists, cannot perform resurrection: %s", RESURRECT_DUMP_FILE)
sys.exit(1)
#
# Find failover position
#
self.log.info("** Searching for gravestone **")
# load subscribers
sub_list = []
q = "select * from pgq_node.get_subscriber_info(%s)"
curs.execute(q, [self.queue_name])
for row in curs.fetchall():
sub_list.append(row['node_name'])
db.commit()
# find backup subscription
this_node = self.queue_info.local_node.name
failover_cons = self.failover_consumer_name(this_node)
full_list = self.queue_info.member_map.keys()
done_nodes = { this_node: 1 }
prov_node = None
root_node = None
for node_name in sub_list + full_list:
if node_name in done_nodes:
continue
done_nodes[node_name] = 1
if not self.node_alive(node_name):
self.log.info('Node %s is dead, skipping', node_name)
continue
self.log.info('Looking on node %s', node_name)
node_db = None
try:
node_db = self.get_node_database(node_name)
node_curs = node_db.cursor()
node_curs.execute("select * from pgq.get_consumer_info(%s, %s)", [self.queue_name, failover_cons])
cons_rows = node_curs.fetchall()
node_curs.execute("select * from pgq_node.get_node_info(%s)", [self.queue_name])
node_info = node_curs.fetchone()
node_db.commit()
if len(cons_rows) == 1:
if prov_node:
raise Exception('Unexpected situation: there are two gravestones - on nodes %s and %s' % (prov_node, node_name))
prov_node = node_name
failover_tick = cons_rows[0]['last_tick']
self.log.info("Found gravestone on node: %s", node_name)
if node_info['node_type'] == 'root':
self.log.info("Found new root node: %s", node_name)
root_node = node_name
self.close_node_database(node_name)
node_db = None
if root_node and prov_node:
break
except skytools.DBError:
self.log.warning("failed to check node %s", node_name)
if node_db:
self.close_node_database(node_name)
node_db = None
if not root_node:
self.log.error("Cannot find new root node", failover_cons)
sys.exit(1)
if not prov_node:
self.log.error("Cannot find failover position (%s)", failover_cons)
sys.exit(1)
# load worker state
q = "select * from pgq_node.get_worker_state(%s)"
rows = self.exec_cmd(db, q, [self.queue_name])
state = rows[0]
# demote & pause
self.log.info("** Demote & pause local node **")
if self.queue_info.local_node.type == 'root':
self.log.info('Node %s is root, demoting', this_node)
q = "select * from pgq_node.demote_root(%s, %s, %s)"
self.exec_cmd(db, q, [self.queue_name, 1, prov_node])
self.exec_cmd(db, q, [self.queue_name, 2, prov_node])
# change node type and set worker paused in same TX
curs = db.cursor()
self.exec_cmd(curs, q, [self.queue_name, 3, prov_node])
q = "select * from pgq_node.set_consumer_paused(%s, %s, true)"
self.exec_cmd(curs, q, [self.queue_name, state['worker_name']])
db.commit()
elif not state['paused']:
# pause worker, don't wait for reaction, as it may be dead
self.log.info('Node %s is branch, pausing worker: %s', this_node, state['worker_name'])
q = "select * from pgq_node.set_consumer_paused(%s, %s, true)"
self.exec_cmd(db, q, [self.queue_name, state['worker_name']])
else:
self.log.info('Node %s is branch and worker is paused', this_node)
#
# Drop old consumers and subscribers
#
self.log.info("** Dropping old subscribers and consumers **")
# unregister subscriber nodes
q = "select pgq_node.unregister_subscriber(%s, %s)"
for node_name in sub_list:
self.log.info("Dropping old subscriber node: %s", node_name)
curs.execute(q, [self.queue_name, node_name])
# unregister consumers
q = "select consumer_name from pgq.get_consumer_info(%s)"
curs.execute(q, [self.queue_name])
for row in curs.fetchall():
cname = row['consumer_name']
if cname[0] == '.':
self.log.info("Keeping consumer: %s", cname)
continue
self.log.info("Dropping old consumer: %s", cname)
q = "pgq.unregister_consumer(%s, %s)"
curs.execute(q, [self.queue_name, cname])
db.commit()
# dump events
self.log.info("** Dump & delete lost events **")
stats = self.resurrect_process_lost_events(db, failover_tick)
self.log.info("** Subscribing %s to %s **", this_node, prov_node)
# set local position
self.log.info("Reset local completed pos")
q = "select * from pgq_node.set_consumer_completed(%s, %s, %s)"
self.exec_cmd(db, q, [self.queue_name, state['worker_name'], failover_tick])
# rename gravestone
self.log.info("Rename gravestone to worker: %s", state['worker_name'])
prov_db = self.get_node_database(prov_node)
prov_curs = prov_db.cursor()
q = "select * from pgq_node.unregister_subscriber(%s, %s)"
self.exec_cmd(prov_curs, q, [self.queue_name, this_node], quiet = True)
q = "select ret_code, ret_note, global_watermark"\
" from pgq_node.register_subscriber(%s, %s, %s, %s)"
res = self.exec_cmd(prov_curs, q, [self.queue_name, this_node, state['worker_name'], failover_tick], quiet = True)
global_wm = res[0]['global_watermark']
prov_db.commit()
# import new global watermark
self.log.info("Reset global watermark")
q = "select * from pgq_node.set_global_watermark(%s, %s)"
self.exec_cmd(db, q, [self.queue_name, global_wm], quiet = True)
# show stats
if stats:
self.log.info("** Statistics **")
klist = stats.keys()
klist.sort()
for k in klist:
v = stats[k]
self.log.info(" %s: %s", k, v)
self.log.info("** Resurrection done, worker paused **")
def resurrect_process_lost_events(self, db, failover_tick):
curs = db.cursor()
this_node = self.queue_info.local_node.name
cons_name = this_node + '.dumper'
self.log.info("Dumping lost events")
# register temp consumer on queue
q = "select pgq.register_consumer_at(%s, %s, %s)"
curs.execute(q, [self.queue_name, cons_name, failover_tick])
db.commit()
# process events as usual
total_count = 0
final_tick_id = -1
stats = {}
while 1:
q = "select * from pgq.next_batch_info(%s, %s)"
curs.execute(q, [self.queue_name, cons_name])
b = curs.fetchone()
batch_id = b['batch_id']
if batch_id is None:
break
final_tick_id = b['cur_tick_id']
q = "select * from pgq.get_batch_events(%s)"
curs.execute(q, [batch_id])
cnt = 0
for ev in curs.fetchall():
cnt += 1
total_count += 1
self.resurrect_dump_event(ev, stats, b)
q = "select pgq.finish_batch(%s)"
curs.execute(q, [batch_id])
if cnt > 0:
db.commit()
stats['dumped_count'] = total_count
self.resurrect_dump_finish()
self.log.info("%s events dumped", total_count)
# unregiser consumer
q = "select pgq.unregister_consumer(%s, %s)"
curs.execute(q, [self.queue_name, cons_name])
db.commit()
if failover_tick == final_tick_id:
self.log.info("No batches found")
return None
#
# Delete the events from queue
#
# This is done snapshots, to make sure we delete only events
# that were dumped out previously. This uses the long-tx
# resistant logic described in pgq.batch_event_sql().
#
# find snapshots
q = "select t1.tick_snapshot as s1, t2.tick_snapshot as s2"\
" from pgq.tick t1, pgq.tick t2"\
" where t1.tick_id = %s"\
" and t2.tick_id = %s"
curs.execute(q, [failover_tick, final_tick_id])
ticks = curs.fetchone()
s1 = skytools.Snapshot(ticks['s1'])
s2 = skytools.Snapshot(ticks['s2'])
xlist = []
for tx in s1.txid_list:
if s2.contains(tx):
xlist.append(str(tx))
# create where clauses
W1 = None
if len(xlist) > 0:
W1 = "ev_txid in (%s)" % (",".join(xlist),)
W2 = "ev_txid >= %d AND ev_txid <= %d"\
" and not txid_visible_in_snapshot(ev_txid, '%s')"\
" and txid_visible_in_snapshot(ev_txid, '%s')" % (
s1.xmax, s2.xmax, ticks['s1'], ticks['s2'])
# loop over all queue data tables
q = "select * from pgq.queue where queue_name = %s"
curs.execute(q, [self.queue_name])
row = curs.fetchone()
ntables = row['queue_ntables']
tbl_pfx = row['queue_data_pfx']
schema, table = tbl_pfx.split('.')
total_del_count = 0
self.log.info("Deleting lost events")
for i in range(ntables):
del_count = 0
self.log.debug("Deleting events from table %d", i)
qtbl = "%s.%s" % (skytools.quote_ident(schema),
skytools.quote_ident(table + '_' + str(i)))
q = "delete from " + qtbl + " where "
if W1:
self.log.debug(q + W1)
curs.execute(q + W1)
if curs.rowcount and curs.rowcount > 0:
del_count += curs.rowcount
self.log.debug(q + W2)
curs.execute(q + W2)
if curs.rowcount and curs.rowcount > 0:
del_count += curs.rowcount
total_del_count += del_count
self.log.debug('%d events deleted', del_count)
self.log.info('%d events deleted', total_del_count)
stats['deleted_count'] = total_del_count
# delete new ticks
q = "delete from pgq.tick t using pgq.queue q"\
" where q.queue_name = %s"\
" and t.tick_queue = q.queue_id"\
" and t.tick_id > %s"\
" and t.tick_id <= %s"
curs.execute(q, [self.queue_name, failover_tick, final_tick_id])
self.log.info("%s ticks deleted", curs.rowcount)
db.commit()
return stats
_json_dump_file = None
def resurrect_dump_event(self, ev, stats, batch_info):
if self._json_dump_file is None:
self._json_dump_file = open(RESURRECT_DUMP_FILE, 'w')
sep = '['
else:
sep = ','
# create ordinary dict to avoid problems with row class and datetime
d = {
'ev_id': ev.ev_id,
'ev_type': ev.ev_type,
'ev_data': ev.ev_data,
'ev_extra1': ev.ev_extra1,
'ev_extra2': ev.ev_extra2,
'ev_extra3': ev.ev_extra3,
'ev_extra4': ev.ev_extra4,
'ev_time': ev.ev_time.isoformat(),
'ev_txid': ev.ev_txid,
'ev_retry': ev.ev_retry,
'tick_id': batch_info['cur_tick_id'],
'prev_tick_id': batch_info['prev_tick_id'],
}
jsev = skytools.json_encode(d)
s = sep + '\n' + jsev
self._json_dump_file.write(s)
def resurrect_dump_finish(self):
if self._json_dump_file:
self._json_dump_file.write('\n]\n')
self._json_dump_file.close()
self._json_dump_file = None
def failover_consumer_name(self, node_name):
return node_name + ".gravestone"
#
# Shortcuts for operating on nodes.
#
def load_local_info(self):
"""fetch set info from local node."""
db = self.get_database(self.initial_db_name)
self.queue_info = self.load_queue_info(db)
self.local_node = self.queue_info.local_node.name
def get_node_database(self, node_name):
"""Connect to node."""
if node_name == self.queue_info.local_node.name:
db = self.get_database(self.initial_db_name)
else:
m = self.queue_info.get_member(node_name)
if not m:
self.log.error("get_node_database: cannot resolve %s", node_name)
sys.exit(1)
#self.log.info("%s: dead=%s", m.name, m.dead)
if m.dead:
return None
loc = m.location
db = self.get_database('node.' + node_name, connstr = loc, profile = 'remote')
return db
def node_alive(self, node_name):
m = self.queue_info.get_member(node_name)
if not m:
res = False
elif m.dead:
res = False
else:
res = True
#self.log.warning('node_alive(%s) = %s', node_name, res)
return res
def close_node_database(self, node_name):
"""Disconnect node's connection."""
if node_name == self.queue_info.local_node.name:
self.close_database(self.initial_db_name)
else:
self.close_database("node." + node_name)
def node_cmd(self, node_name, sql, args, quiet = False):
"""Execute SQL command on particular node."""
db = self.get_node_database(node_name)
if not db:
self.log.warning("ignoring cmd for dead node '%s': %s",
node_name, skytools.quote_statement(sql, args))
return None
return self.exec_cmd(db, sql, args, quiet = quiet, prefix=node_name)
#
# Various operation on nodes.
#
def set_paused(self, node, consumer, pause_flag):
"""Set node pause flag and wait for confirmation."""
q = "select * from pgq_node.set_consumer_paused(%s, %s, %s)"
self.node_cmd(node, q, [self.queue_name, consumer, pause_flag])
self.log.info('Waiting for worker to accept')
while 1:
q = "select * from pgq_node.get_consumer_state(%s, %s)"
stat = self.node_cmd(node, q, [self.queue_name, consumer], quiet = 1)[0]
if stat['paused'] != pause_flag:
raise Exception('operation canceled? %s <> %s' % (repr(stat['paused']), repr(pause_flag)))
if stat['uptodate']:
op = pause_flag and "paused" or "resumed"
self.log.info("Consumer '%s' on node '%s' %s", consumer, node, op)
return
time.sleep(1)
raise Exception('process canceled')
def pause_consumer(self, node, consumer):
"""Shortcut for pausing by name."""
self.set_paused(node, consumer, True)
def resume_consumer(self, node, consumer):
"""Shortcut for resuming by name."""
self.set_paused(node, consumer, False)
def pause_node(self, node):
"""Shortcut for pausing by name."""
state = self.get_node_info(node)
self.pause_consumer(node, state.worker_name)
def resume_node(self, node):
"""Shortcut for resuming by name."""
state = self.get_node_info(node)
if state:
self.resume_consumer(node, state.worker_name)
def subscribe_node(self, target_node, subscriber_node, tick_pos):
"""Subscribing one node to another."""
q = "select * from pgq_node.subscribe_node(%s, %s, %s)"
self.node_cmd(target_node, q, [self.queue_name, subscriber_node, tick_pos])
def unsubscribe_node(self, target_node, subscriber_node):
"""Unsubscribing one node from another."""
q = "select * from pgq_node.unsubscribe_node(%s, %s)"
self.node_cmd(target_node, q, [self.queue_name, subscriber_node])
_node_cache = {}
def get_node_info(self, node_name):
"""Cached node info lookup."""
if node_name in self._node_cache:
return self._node_cache[node_name]
inf = self.load_node_info(node_name)
self._node_cache[node_name] = inf
return inf
def load_node_info(self, node_name):
"""Non-cached node info lookup."""
db = self.get_node_database(node_name)
if not db:
self.log.warning('load_node_info(%s): ignoring dead node', node_name)
return None
q = "select * from pgq_node.get_node_info(%s)"
rows = self.exec_query(db, q, [self.queue_name])
return NodeInfo(self.queue_name, rows[0])
def load_queue_info(self, db):
"""Non-cached set info lookup."""
res = self.exec_query(db, "select * from pgq_node.get_node_info(%s)", [self.queue_name])
info = res[0]
q = "select * from pgq_node.get_queue_locations(%s)"
member_list = self.exec_query(db, q, [self.queue_name])
qinf = QueueInfo(self.queue_name, info, member_list)
if self.options.dead:
for node in self.options.dead:
self.log.info("Assuming node '%s' as dead", node)
qinf.tag_dead(node)
return qinf
def get_node_subscriber_list(self, node_name):
"""Fetch subscriber list from a node."""
q = "select node_name, node_watermark from pgq_node.get_subscriber_info(%s)"
db = self.get_node_database(node_name)
rows = self.exec_query(db, q, [self.queue_name])
return [r['node_name'] for r in rows]
def get_node_consumer_map(self, node_name):
"""Fetch consumer list from a node."""
q = "select consumer_name, provider_node, last_tick_id from pgq_node.get_consumer_info(%s)"
db = self.get_node_database(node_name)
rows = self.exec_query(db, q, [self.queue_name])
res = {}
for r in rows:
res[r['consumer_name']] = r
return res
if __name__ == '__main__':
script = CascadeAdmin('setadm', 'node_db', sys.argv[1:], worker_setup = False)
script.start()
| |
import numpy as np
import networkx as nx
import matplotlib.pyplot as plt
import matplotlib as mpl
#mpl.rc('text', usetex=True)
DIR = '/Users/jltoole/Documents/Projects/geo_social/'
class Model():
def __init__(self, u, l, c, s, a):
self.NUSERS = u
self.NLOCS = l
self.NCONTACTS = c
self.NSTEPS = s
self.G = nx.Graph()
# CONSTANTS
self.rho = 0.6
self.gamma = 0.21
self.alpha = a
self.beta = 0.8
self.tau = 17.0; # exponential cuttoff on time between calls
self.xmin = 1.0;
def reset(self):
print 'Initializing Graph...'
self.G = nx.Graph()
nodes = np.arange(self.NUSERS)
degs = np.random.lognormal(np.log(self.NCONTACTS),0.3, self.NUSERS)
# add nodes
# assign them random locations to start
a = np.arange(self.NLOCS,dtype=float)
p = a/np.sum(a)
p = np.cumsum(p)
for i in nodes:
self.G.add_node(i, lvec=np.zeros(self.NLOCS,dtype=int), a=np.random.exponential(scale=self.alpha), S=3)
for l in xrange(3):
r = np.random.rand(3)
l = np.digitize(r, p)
#self.G.node[i]['lvec'][l] = 1
self.G.node[i]['lvec'][np.random.randint(self.NLOCS)] = 1
# connect the network
for i in nodes:
stubs_left = degs[i] - self.G.degree(i)
if stubs_left > 0:
nbrs = []
while len(nbrs) < stubs_left:
tries = 0
j = nodes[np.random.randint(self.NUSERS)]
while (((degs[j] - self.G.degree(j) <= 0) or (i==j)) and (tries < 1000)) :
j = nodes[np.random.randint(self.NUSERS)]
tries += 1
nbrs.append(j)
edges = [ (i,j, {'sim':None}) for j in nbrs ]
self.G.add_edges_from(edges)
if i%(self.NUSERS/10) == 0:
print i, self.NUSERS
'''
self.G = nx.newman_watts_strogatz_graph(self.NUSERS,self.NCONTACTS,0.9)
for i in self.G.nodes_iter():
self.G.add_node(i, lvec=np.zeros(self.NLOCS,dtype=int), S=2)
for l in xrange(2):
self.G.node[i]['lvec'][np.random.randint(self.NLOCS)] = 1
for e in self.G.edges_iter():
self.G[e[0]][e[1]]['sim'] = None
'''
def get_return_location(self, u):
''' choose a location to return to based on a preferential attachement model
'''
lvec = self.G.node[u]['lvec']
p = np.cumsum(lvec)/float(np.sum(lvec))
r = np.random.rand()
return np.digitize( [r], p )[0]
def get_randomuser_location(self, u):
I = np.where( self.G.node[u]['lvec'] > 0)[0]
return I[np.random.randint(len(I))]
def get_friend_location(self, u):
''' pick a friend and choose one of their locations both based on pref. attachment
'''
p = np.arange(1,self.G.degree(u))
p = np.cumsum(p)/float(np.sum(p))
r = np.random.rand()
f = np.digitize( [r], p )[0]
return self.get_return_location(self.G.neighbors(u)[f])
#return self.get_randomuser_location(self.G.neighbors(u)[f])
def get_citywide_location(self,p):
r = np.random.rand()
return np.digitize( [r], p )[0]
def get_random_location(self):
return np.random.randint(self.NLOCS)
def get_citywide_visits(self):
lvecs = np.array(nx.get_node_attributes(self.G,'lvec').values())
p = np.sum(lvecs,0).astype(float)/np.sum(lvecs)
return p
def run(self):
print 'Running Model...'
for t in xrange(self.NSTEPS):
nextlocs = np.zeros(self.NUSERS)
p = np.cumsum(self.get_citywide_visits())
for u in xrange(self.NUSERS):
r = np.random.rand()
tries = 0
l = None
if r > self.rho*self.G.node[u]['S']**(-self.gamma):
# preferential return
r = np.random.rand()
if r > self.G.node[u]['a']:
l = self.get_return_location(u)
else:
l = self.get_friend_location(u)
while (self.G.node[u]['lvec'][l] == 0) and (tries < 100):
l = self.get_friend_location(u)
tries += 1
else:
# explore
if r > self.G.node[u]['a']:
#l = self.get_citywide_location(p)
l = self.get_random_location()
while (self.G.node[u]['lvec'][l] > 0) and (tries < 100):
#l = self.get_citywide_location(p)
l = self.get_random_location()
else:
l = self.get_friend_location(u)
while (self.G.node[u]['lvec'][l] > 0) and (tries < 100):
#l = self.get_citywide_location(p)
l = self.get_friend_location(u)
tries += 1
self.G.node[u]['S'] += 1
nextlocs[u] = l
# update users
for u in xrange(self.NUSERS):
self.G.node[u]['lvec'][nextlocs[u]] += 1
print "Step: ", t
def calculate_similarity(self):
print 'Calculating Similarity...'
for i in self.G.nodes_iter():
l1 = self.G.node[i]['lvec']
for j in self.G.neighbors(i):
l2 = self.G.node[j]['lvec']
self.G.edge[i][j]['sim'] = cosine_similarity( l1,l2 )
def cosine_similarity( u, v ):
u = u.astype(float)
v = v.astype(float)
return np.dot(u,v)/(np.linalg.norm(u)*np.linalg.norm(v))
def figure1(nets, leg=['Data','Randomized'], outfile=None):
plt.close()
f1 = plt.figure(1)
for G in nets:
data = np.array(nx.get_edge_attributes(G,'sim').values())
rand_data = []
for u in G.nodes_iter():
for k in xrange(2):
j = np.random.randint(G.number_of_nodes())
rand_data.append( cosine_similarity(G.node[u]['lvec'], G.node[j]['lvec'] ))
#rand_data.append(np.mean(temp))
data = np.array(data)
rand_data = np.array(rand_data)
# sim
xbins = np.logspace(-2,0,num=30)
#xbins = np.linspace(0,1,num=30)
x = xbins[1:] - (xbins[1:]-xbins[:-1])/2.
y, edges = np.histogram(data,xbins, density=True)
plt.subplot(2,1,1)
plt.loglog( x, y, '.-',color=np.random.rand(3))
#plt.legend(['Data','Randomized'], loc='best', fontsize=6)
plt.subplot(2,1,2)
data = np.array(nx.get_node_attributes(G,'lvec').values()).astype('float')
data = np.sort(data,1)[:,::-1]
data = data/np.tile(np.sum(data,1),(data.shape[1],1)).T
y = np.mean(data,0)
x = np.arange(len(y))+1
plt.loglog(x, y, 'k.-')
plt.subplot(2,1,1)
y, edges = np.histogram(rand_data,xbins, density=True)
x = edges[1:] - (edges[1:]-edges[:-1])/2.
plt.loglog( x, y, 'k.-')
plt.xlim([0,1])
plt.ylim([10**-2, 10**3])
plt.xlabel('$\cos\phi$')
plt.ylabel('$P(\cos\phi)$')
plt.title('Cosine Similarity')
leg.append('Randomized')
plt.legend(leg, loc='best', fontsize=8)
plt.subplot(2,1,2)
plt.xlabel('$k$')
plt.ylabel('$f_k$')
f1.set_size_inches(4,6)
f1.tight_layout()
if outfile != None:
plt.savefig( DIR+'figures/'+outfile+'_model.png',fmt='png')
plt.close()
'''
import chaomodel_graph as cm
m.figure1(m.G, outfile='figure1')
m = cm.Model(10000,250,10,55,0.1)
reload(cm)
alpha = [0.0,0.25,0.5,0.75,1.0]
nets = []
for a in alpha:
m = cm.Model(10000, 250, 20, 50, a);
m.reset();
m.run();
m.calculate_similarity();
nets.append(m.G)
cm.figure1(nets, leg=[str(a) for a in alpha], outfile='chao_alpha')
reload(cm)
m = cm.Model(10000,250,20,100,0.10); m.reset(); m.run(); m.calculate_similarity()
cm.figure1([m.G], leg=['Data'], outfile='chao')
'''
| |
r"""
django-rollbar middleware
There are two options for installing the Rollbar middleware. Both options
require modifying your settings.py file.
The first option is to use
'rollbar.contrib.django.middleware.RollbarNotifierMiddleware' which will
report all exceptions to Rollbar including 404s. This middlware should be
placed as the last item in your middleware list which is:
* MIDDLEWARE_CLASSES in Django 1.9 and earlier
* MIDDLEWARE in Django 1.10 and up
The other option is two use the two separate middlewares:
* 'rollbar.contrib.django.middleware.RollbarNotifierMiddlewareExcluding404'
* 'rollbar.contrib.django.middleware.RollbarNotifierMiddlewareOnly404'
The Excluding404 middleware should be placed as the last item in your middleware
list, and the Only404 middleware should be placed as the first item in your
middleware list. This allows 404s to be processed by your other middlewares
before sendind an item to Rollbar. Therefore if you handle the 404 differently
in a way that returns a response early you won't end up with a Rollbar item.
Regardless of which method you use, you also should add a section to settings.py
like this:
ROLLBAR = {
'access_token': 'tokengoeshere',
}
This can be used for passing configuration options to Rollbar. Additionally,
you can use the key 'ignorable_404_urls' to set an iterable of regular expression
patterns to use to determine whether a 404 exception should be ignored based
on the full url path for the request. For example,
import re
ROLLBAR = {
'access_token': 'YOUR_TOKEN',
'ignorable_404_urls': (
re.compile(r'/index\.php'),
re.compile('/foobar'),
),
}
To get more control of middleware and enrich it with custom data
you can subclass any of the middleware classes described above
and optionally override the methods:
def get_extra_data(self, request, exc):
''' May be defined. Must return a dict or None. Use it to put some custom extra data on rollbar event. '''
return
def get_payload_data(self, request, exc):
''' May be defined. Must return a dict or None. Use it to put some custom payload data on rollbar event. '''
return
You would then insert your custom subclass into your middleware
configuration in the same place as the base class as described above.
For example:
1. create a 'middleware.py' file on your project (name is up to you)
2. import the rollbar default middleware: 'from rollbar.contrib.django.middleware import RollbarNotifierMiddleware'
3. create your own middleware like this:
class CustomRollbarNotifierMiddleware(RollbarNotifierMiddleware):
def get_extra_data(self, request, exc):
''' May be defined. Must return a dict or None. Use it to put some custom extra data on rollbar event. '''
return
def get_payload_data(self, request, exc):
''' May be defined. Must return a dict or None. Use it to put some custom payload data on rollbar event. '''
return
4. add 'path.to.your.CustomRollbarNotifierMiddleware' in your settings.py to
a. MIDDLEWARE_CLASSES in Django 1.9 and earlier
b. MIDDLEWARE in Django 1.10 and up
5. add a section like this in your settings.py:
ROLLBAR = {
'access_token': 'tokengoeshere',
}
See README.rst for full installation and configuration instructions.
"""
import logging
import sys
import rollbar
from django.core.exceptions import MiddlewareNotUsed
from django.conf import settings
from django.http import Http404
from six import reraise
try:
from django.urls import resolve
except ImportError:
from django.core.urlresolvers import resolve
try:
from django.utils.deprecation import MiddlewareMixin
except ImportError:
from rollbar.contrib.django.utils import MiddlewareMixin
log = logging.getLogger(__name__)
DEFAULTS = {
'web_base': 'https://rollbar.com',
'enabled': True,
'patch_debugview': True,
'exception_level_filters': [
(Http404, 'warning')
]
}
def _patch_debugview(rollbar_web_base):
try:
from django.views import debug
except ImportError:
return
if rollbar_web_base.endswith('/'):
rollbar_web_base = rollbar_web_base[:-1]
# modify the TECHNICAL_500_TEMPLATE
new_data = """
{% if view_in_rollbar_url %}
<h3 style="margin-bottom:15px;"><a href="{{ view_in_rollbar_url }}" target="_blank">View in Rollbar</a></h3>
{% endif %}
"""
insert_before = '<table class="meta">'
replacement = new_data + insert_before
if hasattr(debug, 'TECHNICAL_500_TEMPLATE'):
if new_data in debug.TECHNICAL_500_TEMPLATE:
return
debug.TECHNICAL_500_TEMPLATE = debug.TECHNICAL_500_TEMPLATE.replace(insert_before, replacement, 1)
elif hasattr(debug, 'CURRENT_DIR'):
# patch ExceptionReporter.get_traceback_html if this version of Django is using
# the file system templates rather than the ones in code
# This code comes from:
# https://github.com/django/django/blob/d79cf1e9e2887aa12567c8f27e384195253cb847/django/views/debug.py#L329,L334
# There are theoretical issues with the code below, for example t.render could throw because
# t might be None, but this is the code from Django
from pathlib import Path
from django.template import Context
def new_get_traceback_html(exception_reporter):
"""Return HTML version of debug 500 HTTP error page."""
with Path(debug.CURRENT_DIR, 'templates', 'technical_500.html').open() as fh:
template_string = fh.read()
template_string = template_string.replace(insert_before, replacement, 1)
t = debug.DEBUG_ENGINE.from_string(template_string)
c = Context(exception_reporter.get_traceback_data(), use_l10n=False)
return t.render(c)
debug.ExceptionReporter.get_traceback_html = new_get_traceback_html
else:
# patch ExceptionReporter.get_traceback_html for Django versions 4.0+
def new_get_traceback_html(self):
"""Return HTML version of debug 500 HTTP error page."""
with self.html_template_path.open(encoding='utf-8') as fh:
t = debug.DEBUG_ENGINE.from_string(fh.read())
c = Context(self.get_traceback_data(), use_l10n=False)
return t.render(c)
if hasattr(debug.ExceptionReporter, '__rollbar__patched'):
return
# patch ExceptionReporter.get_traceback_data
old_get_traceback_data = debug.ExceptionReporter.get_traceback_data
def new_get_traceback_data(exception_reporter):
data = old_get_traceback_data(exception_reporter)
try:
item_uuid = exception_reporter.request.META.get('rollbar.uuid')
if item_uuid:
url = '%s/item/uuid/?uuid=%s' % (rollbar_web_base, item_uuid)
data['view_in_rollbar_url'] = url
except:
log.exception("Exception while adding view-in-rollbar link to technical_500_template.")
return data
debug.ExceptionReporter.get_traceback_data = new_get_traceback_data
debug.ExceptionReporter.__rollbar__patched = True
def _should_ignore_404(url):
url_patterns = getattr(settings, 'ROLLBAR', {}).get('ignorable_404_urls', ())
return any(p.search(url) for p in url_patterns)
class RollbarNotifierMiddleware(MiddlewareMixin):
def __init__(self, get_response=None):
super(RollbarNotifierMiddleware, self).__init__(get_response)
self.settings = getattr(settings, 'ROLLBAR', {})
if not self.settings.get('access_token'):
raise MiddlewareNotUsed
if not self._get_setting('enabled'):
raise MiddlewareNotUsed
self._ensure_log_handler()
kw = self.settings.copy()
access_token = kw.pop('access_token')
environment = kw.pop('environment', 'development' if settings.DEBUG else 'production')
kw.setdefault('exception_level_filters', DEFAULTS['exception_level_filters'])
# ignorable_404_urls is only relevant for this middleware not as an argument to init
kw.pop('ignorable_404_urls', None)
rollbar.init(access_token, environment, **kw)
def hook(request, data):
try:
# try django 1.5 method for getting url_name
url_name = request.resolver_match.url_name
except:
# fallback to older method
try:
url_name = resolve(request.path_info).url_name
except:
url_name = None
if url_name:
data['context'] = url_name
data['framework'] = 'django'
if request and hasattr(request, 'META'):
request.META['rollbar.uuid'] = data['uuid']
rollbar.BASE_DATA_HOOK = hook
# monkeypatch debug module
if self._get_setting('patch_debugview'):
try:
_patch_debugview(self._get_setting('web_base'))
except Exception as e:
log.error(
"Rollbar - unable to monkeypatch debugview to add 'View in Rollbar' link."
" To disable, set `ROLLBAR['patch_debugview'] = False` in settings.py."
" Exception was: %r", e
)
def _ensure_log_handler(self):
"""
If there's no log configuration, set up a default handler.
"""
if log.handlers:
return
handler = logging.StreamHandler()
formatter = logging.Formatter(
'%(asctime)s %(levelname)-5.5s [%(name)s][%(threadName)s] %(message)s')
handler.setFormatter(formatter)
log.addHandler(handler)
def _get_setting(self, name, default=None):
try:
return self.settings[name]
except KeyError:
if name in DEFAULTS:
default_val = DEFAULTS[name]
if hasattr(default_val, '__call__'):
return default_val()
return default_val
return default
def get_extra_data(self, request, exc):
return
def get_payload_data(self, request, exc):
return
def process_response(self, request, response):
return response
def process_exception(self, request, exc):
if isinstance(exc, Http404) and _should_ignore_404(request.get_full_path()):
return
rollbar.report_exc_info(
sys.exc_info(),
request,
extra_data=self.get_extra_data(request, exc),
payload_data=self.get_payload_data(request, exc),
)
class RollbarNotifierMiddlewareOnly404(MiddlewareMixin):
def get_extra_data(self, request, exc):
return
def get_payload_data(self, request, exc):
return
def process_response(self, request, response):
if response.status_code != 404:
return response
if _should_ignore_404(request.get_full_path()):
return response
try:
if hasattr(request, '_rollbar_notifier_original_http404_exc_info'):
exc_type, exc_value, exc_traceback = request._rollbar_notifier_original_http404_exc_info
reraise(exc_type, exc_value, exc_traceback)
else:
raise Http404()
except Exception as exc:
rollbar.report_exc_info(
sys.exc_info(),
request,
extra_data=self.get_extra_data(request, exc),
payload_data=self.get_payload_data(request, exc),
)
return response
class RollbarNotifierMiddlewareExcluding404(RollbarNotifierMiddleware):
def process_exception(self, request, exc):
if isinstance(exc, Http404):
request._rollbar_notifier_original_http404_exc_info = sys.exc_info()
else:
super(RollbarNotifierMiddlewareExcluding404, self).process_exception(request, exc)
| |
# This file is part of Indico.
# Copyright (C) 2002 - 2021 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
import warnings
from datetime import date, time
from sqlalchemy import and_, or_
from sqlalchemy.dialects.postgresql import ARRAY
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy.orm import contains_eager, joinedload, load_only
from indico.core.db.sqlalchemy import db
from indico.core.db.sqlalchemy.custom import static_array
from indico.core.db.sqlalchemy.principals import PrincipalType
from indico.core.db.sqlalchemy.protection import ProtectionManagersMixin, ProtectionMode
from indico.core.db.sqlalchemy.util.queries import db_dates_overlap
from indico.core.errors import NoReportError
from indico.modules.rb.models.blocked_rooms import BlockedRoom
from indico.modules.rb.models.blockings import Blocking
from indico.modules.rb.models.equipment import EquipmentType, RoomEquipmentAssociation
from indico.modules.rb.models.favorites import favorite_room_table
from indico.modules.rb.models.principals import RoomPrincipal
from indico.modules.rb.models.reservation_occurrences import ReservationOccurrence
from indico.modules.rb.models.reservations import Reservation
from indico.modules.rb.models.room_attributes import RoomAttribute, RoomAttributeAssociation
from indico.modules.rb.models.room_bookable_hours import BookableHours
from indico.modules.rb.models.room_nonbookable_periods import NonBookablePeriod
from indico.modules.rb.util import rb_is_admin
from indico.util.i18n import _
from indico.util.serializer import Serializer
from indico.util.string import format_repr
from indico.web.flask.util import url_for
class Room(ProtectionManagersMixin, db.Model, Serializer):
__tablename__ = 'rooms'
__table_args__ = (db.UniqueConstraint('id', 'location_id'), # useless but needed for the LocationMixin fkey
db.CheckConstraint("verbose_name != ''", 'verbose_name_not_empty'),
{'schema': 'roombooking'})
default_protection_mode = ProtectionMode.public
disallowed_protection_modes = frozenset({ProtectionMode.inheriting})
__api_public__ = (
'id', 'building', 'name', 'floor', 'longitude', 'latitude', ('number', 'roomNr'), ('location_name', 'location'),
('full_name', 'fullName')
)
__api_minimal_public__ = (
'id', ('full_name', 'fullName')
)
id = db.Column(
db.Integer,
primary_key=True
)
location_id = db.Column(
db.Integer,
db.ForeignKey('roombooking.locations.id'),
nullable=False
)
photo_id = db.Column(
db.Integer,
db.ForeignKey('roombooking.photos.id')
)
#: Verbose name for the room (long)
verbose_name = db.Column(
db.String,
nullable=True,
default=None
)
site = db.Column(
db.String,
default=''
)
division = db.Column(
db.String
)
building = db.Column(
db.String,
nullable=False
)
floor = db.Column(
db.String,
default='',
nullable=False
)
number = db.Column(
db.String,
default='',
nullable=False
)
notification_emails = db.Column(
ARRAY(db.String),
nullable=False,
default=[]
)
notification_before_days = db.Column(
db.Integer
)
notification_before_days_weekly = db.Column(
db.Integer
)
notification_before_days_monthly = db.Column(
db.Integer
)
end_notification_daily = db.Column(
db.Integer,
nullable=True
)
end_notification_weekly = db.Column(
db.Integer,
nullable=True
)
end_notification_monthly = db.Column(
db.Integer,
nullable=True
)
reservations_need_confirmation = db.Column(
db.Boolean,
nullable=False,
default=False
)
notifications_enabled = db.Column(
db.Boolean,
nullable=False,
default=True
)
end_notifications_enabled = db.Column(
db.Boolean,
nullable=False,
default=True
)
telephone = db.Column(
db.String,
nullable=False,
default=''
)
key_location = db.Column(
db.String,
nullable=False,
default=''
)
capacity = db.Column(
db.Integer,
default=20
)
surface_area = db.Column(
db.Integer
)
longitude = db.Column(
db.Float
)
latitude = db.Column(
db.Float
)
comments = db.Column(
db.String,
nullable=False,
default=''
)
owner_id = db.Column(
db.Integer,
db.ForeignKey('users.users.id'),
index=True,
nullable=False
)
is_deleted = db.Column(
db.Boolean,
nullable=False,
default=False,
)
is_reservable = db.Column(
db.Boolean,
nullable=False,
default=True
)
max_advance_days = db.Column(
db.Integer
)
booking_limit_days = db.Column(
db.Integer
)
location = db.relationship(
'Location',
back_populates='rooms',
lazy=True
)
acl_entries = db.relationship(
'RoomPrincipal',
lazy=True,
backref='room',
cascade='all, delete-orphan',
collection_class=set
)
attributes = db.relationship(
'RoomAttributeAssociation',
backref='room',
cascade='all, delete-orphan',
lazy='dynamic'
)
blocked_rooms = db.relationship(
'BlockedRoom',
backref='room',
cascade='all, delete-orphan',
lazy='dynamic'
)
bookable_hours = db.relationship(
'BookableHours',
backref='room',
order_by=BookableHours.start_time,
cascade='all, delete-orphan',
lazy='dynamic'
)
available_equipment = db.relationship(
'EquipmentType',
secondary=RoomEquipmentAssociation,
backref='rooms',
lazy=True
)
nonbookable_periods = db.relationship(
'NonBookablePeriod',
backref='room',
order_by=NonBookablePeriod.end_dt.desc(),
cascade='all, delete-orphan',
lazy='dynamic'
)
photo = db.relationship(
'Photo',
backref='room',
cascade='all, delete-orphan',
single_parent=True,
lazy=True
)
reservations = db.relationship(
'Reservation',
backref='room',
cascade='all, delete-orphan',
lazy='dynamic'
)
favorite_of = db.relationship(
'User',
secondary=favorite_room_table,
lazy=True,
collection_class=set,
backref=db.backref('favorite_rooms', lazy=True, collection_class=set),
)
#: The owner of the room. This is purely informational and does not grant
#: any permissions on the room.
owner = db.relationship(
'User',
# subquery load since a normal joinedload breaks `get_with_data`
lazy='subquery',
backref=db.backref(
'owned_rooms',
lazy='dynamic'
)
)
# relationship backrefs:
# - breaks (Break.own_room)
# - contributions (Contribution.own_room)
# - events (Event.own_room)
# - session_blocks (SessionBlock.own_room)
# - sessions (Session.own_room)
@hybrid_property
def is_auto_confirm(self):
return not self.reservations_need_confirmation
@is_auto_confirm.expression
def is_auto_confirm(self):
return ~self.reservations_need_confirmation
@property
def details_url(self):
if self.id is None:
return None
return url_for('rb.room_link', room_id=self.id)
@property
def map_url(self):
if not self.location.map_url_template:
return None
return self.location.map_url_template.format(
id=self.id,
building=self.building,
floor=self.floor,
number=self.number,
lat=self.latitude,
lng=self.longitude,
)
@property
def has_photo(self):
return self.photo_id is not None
@hybrid_property
def name(self):
return self.generate_name()
@name.expression
def name(cls):
q = (db.session.query(db.m.Location.room_name_format)
.filter(db.m.Location.id == cls.location_id)
.correlate(Room)
.scalar_subquery())
return db.func.format(q, cls.building, cls.floor, cls.number)
@hybrid_property
def full_name(self):
if self.verbose_name:
return f'{self.generate_name()} - {self.verbose_name}'
else:
return f'{self.generate_name()}'
@full_name.expression
def full_name(cls):
return db.case([
[cls.verbose_name.isnot(None), cls.name + ' - ' + cls.verbose_name]
], else_=cls.name)
@property
def location_name(self):
return self.location.name
@property
def sprite_position(self):
from indico.modules.rb import rb_cache
sprite_mapping = rb_cache.get('rooms-sprite-mapping')
return sprite_mapping.get(self.id, 0) if sprite_mapping else 0 # placeholder at position 0
def __repr__(self):
return format_repr(self, 'id', 'full_name', is_deleted=False)
def has_equipment(self, *names):
available = {x.name for x in self.available_equipment}
return bool(available & set(names))
def get_attribute_by_name(self, attribute_name):
return (self.attributes
.join(RoomAttribute)
.filter(RoomAttribute.name == attribute_name)
.first())
def has_attribute(self, attribute_name):
return self.get_attribute_by_name(attribute_name) is not None
def get_attribute_value(self, name, default=None):
attr = self.get_attribute_by_name(name)
return attr.value if attr else default
def set_attribute_value(self, name, value):
attr = self.get_attribute_by_name(name)
if attr:
if value:
attr.value = value
else:
self.attributes.filter(RoomAttributeAssociation.attribute_id == attr.attribute_id) \
.delete(synchronize_session='fetch')
elif value:
attr = RoomAttribute.query.filter_by(name=name).first()
if not attr:
raise ValueError(f"Attribute {name} does not exist")
attr_assoc = RoomAttributeAssociation()
attr_assoc.value = value
attr_assoc.attribute = attr
self.attributes.append(attr_assoc)
db.session.flush()
def generate_name(self):
if self.location is None:
warnings.warn('Room has no location; using default name format')
return f'{self.building}/{self.floor}-{self.number}'
return self.location.room_name_format.format(
building=self.building,
floor=self.floor,
number=self.number
)
@classmethod
def find_with_attribute(cls, attribute):
"""Search rooms which have a specific attribute."""
return (Room.query
.with_entities(Room, RoomAttributeAssociation.value)
.join(RoomAttributeAssociation)
.join(RoomAttribute)
.filter(RoomAttribute.name == attribute)
.all())
@staticmethod
def get_with_data(*args, **kwargs):
from indico.modules.rb.models.locations import Location
only_active = kwargs.pop('only_active', True)
filters = kwargs.pop('filters', None)
order = kwargs.pop('order', [Location.name, Room.building, Room.floor, Room.number, Room.verbose_name])
if kwargs:
raise ValueError(f'Unexpected kwargs: {kwargs}')
query = Room.query
entities = [Room]
if 'equipment' in args:
entities.append(static_array.array_agg(EquipmentType.name))
query = query.outerjoin(RoomEquipmentAssociation).outerjoin(EquipmentType)
query = (query.with_entities(*entities)
.outerjoin(Location, Location.id == Room.location_id)
.group_by(Location.name, Room.id))
if only_active:
query = query.filter(~Room.is_deleted)
if filters: # pragma: no cover
query = query.filter(*filters)
if order: # pragma: no cover
query = query.order_by(*order)
keys = ('room',) + tuple(args)
return (dict(zip(keys, row if args else [row])) for row in query)
@staticmethod
def filter_available(start_dt, end_dt, repetition, include_blockings=True, include_pre_bookings=True,
include_pending_blockings=False):
"""Return a SQLAlchemy filter criterion ensuring that the room is available during the given time."""
# Check availability against reservation occurrences
dummy_occurrences = ReservationOccurrence.create_series(start_dt, end_dt, repetition)
overlap_criteria = ReservationOccurrence.filter_overlap(dummy_occurrences)
reservation_criteria = [Reservation.room_id == Room.id,
ReservationOccurrence.is_valid,
overlap_criteria]
if not include_pre_bookings:
reservation_criteria.append(Reservation.is_accepted)
occurrences_filter = (Reservation.query
.join(ReservationOccurrence.reservation)
.filter(and_(*reservation_criteria)))
# Check availability against blockings
filters = ~occurrences_filter.exists()
if include_blockings:
if include_pending_blockings:
valid_states = (BlockedRoom.State.accepted, BlockedRoom.State.pending)
else:
valid_states = (BlockedRoom.State.accepted,)
# TODO: only take blockings into account which the user cannot override
blocking_criteria = [Room.id == BlockedRoom.room_id,
BlockedRoom.state.in_(valid_states),
db_dates_overlap(Blocking, 'start_date', end_dt.date(), 'end_date', start_dt.date(),
inclusive=True)]
blockings_filter = (BlockedRoom.query
.join(Blocking.blocked_rooms)
.filter(and_(*blocking_criteria)))
return filters & ~blockings_filter.exists()
return filters
@staticmethod
def filter_bookable_hours(start_time, end_time):
if end_time == time(0):
end_time = time(23, 59, 59)
period_end_time = db.case({time(0): time(23, 59, 59)}, else_=BookableHours.end_time,
value=BookableHours.end_time)
bookable_hours_filter = Room.bookable_hours.any(
(BookableHours.start_time <= start_time) & (period_end_time >= end_time)
)
return ~Room.bookable_hours.any() | bookable_hours_filter
@staticmethod
def filter_nonbookable_periods(start_dt, end_dt):
return ~Room.nonbookable_periods.any(and_(NonBookablePeriod.start_dt <= end_dt,
NonBookablePeriod.end_dt >= start_dt))
def get_blocked_rooms(self, *dates, **kwargs):
states = kwargs.get('states', (BlockedRoom.State.accepted,))
return (self.blocked_rooms
.join(BlockedRoom.blocking)
.options(contains_eager(BlockedRoom.blocking))
.filter(or_(Blocking.is_active_at(d) for d in dates),
BlockedRoom.state.in_(states))
.all())
@property
def protection_parent(self):
return None
@staticmethod
def is_user_admin(user):
return rb_is_admin(user)
@classmethod
def get_permissions_for_user(cls, user, allow_admin=True):
"""Get the permissions for all rooms for a user.
In case of multipass-based groups it will try to get a list of
all groups the user is in, and if that's not possible check the
permissions one by one for each room (which may result in many
group membership lookups).
It is recommended to not call this in any place where performance
matters and to memoize the result.
"""
# XXX: When changing the logic in here, make sure to update can_* as well!
all_rooms_query = (Room.query
.filter(~Room.is_deleted)
.options(load_only('id', 'protection_mode', 'reservations_need_confirmation',
'is_reservable', 'owner_id'),
joinedload('owner').load_only('id'),
joinedload('acl_entries')))
is_admin = allow_admin and cls.is_user_admin(user)
if (is_admin and allow_admin) or not user.can_get_all_multipass_groups:
# check one by one if we can't get a list of all groups the user is in
return {r.id: {
'book': r.can_book(user, allow_admin=allow_admin),
'prebook': r.can_prebook(user, allow_admin=allow_admin),
'override': r.can_override(user, allow_admin=allow_admin),
'moderate': r.can_moderate(user, allow_admin=allow_admin),
'manage': r.can_manage(user, allow_admin=allow_admin),
} for r in all_rooms_query}
criteria = [db.and_(RoomPrincipal.type == PrincipalType.user, RoomPrincipal.user_id == user.id)]
for group in user.local_groups:
criteria.append(db.and_(RoomPrincipal.type == PrincipalType.local_group,
RoomPrincipal.local_group_id == group.id))
for group in user.iter_all_multipass_groups():
criteria.append(db.and_(RoomPrincipal.type == PrincipalType.multipass_group,
RoomPrincipal.multipass_group_provider == group.provider.name,
db.func.lower(RoomPrincipal.multipass_group_name) == group.name.lower()))
data = {}
permissions = {'book', 'prebook', 'override', 'moderate', 'manage'}
prebooking_required_rooms = set()
non_reservable_rooms = set()
for room in all_rooms_query:
is_owner = user == room.owner
data[room.id] = {x: False for x in permissions}
if room.reservations_need_confirmation:
prebooking_required_rooms.add(room.id)
if not room.is_reservable:
non_reservable_rooms.add(room.id)
if (room.is_reservable and (room.is_public or is_owner)) or (is_admin and allow_admin):
if not room.reservations_need_confirmation or is_owner or (is_admin and allow_admin):
data[room.id]['book'] = True
if room.reservations_need_confirmation:
data[room.id]['prebook'] = True
if is_owner or (is_admin and allow_admin):
data[room.id]['override'] = True
data[room.id]['moderate'] = True
data[room.id]['manage'] = True
query = (RoomPrincipal.query
.join(Room)
.filter(~Room.is_deleted, db.or_(*criteria))
.options(load_only('room_id', 'full_access', 'permissions')))
for principal in query:
is_reservable = principal.room_id not in non_reservable_rooms
for permission in permissions:
if not is_reservable and not (is_admin and allow_admin) and permission in ('book', 'prebook'):
continue
explicit = permission == 'prebook' and principal.room_id not in prebooking_required_rooms
check_permission = None if permission == 'manage' else permission
if principal.has_management_permission(check_permission, explicit=explicit):
data[principal.room_id][permission] = True
return data
def can_access(self, user, allow_admin=True):
# rooms are never access-restricted
raise NotImplementedError
def can_manage(self, user, permission=None, allow_admin=True, check_parent=True, explicit_permission=False):
if user and user == self.owner and (permission is None or not explicit_permission):
return True
return super().can_manage(user, permission=permission, allow_admin=allow_admin, check_parent=check_parent,
explicit_permission=explicit_permission)
def can_book(self, user, allow_admin=True):
# XXX: When changing the logic in here, make sure to update get_permissions_for_user as well!
if not user:
return False
if not self.is_reservable and not (allow_admin and self.is_user_admin(user)):
return False
if self.is_public and not self.reservations_need_confirmation:
return True
return self.can_manage(user, permission='book', allow_admin=allow_admin)
def can_prebook(self, user, allow_admin=True):
# XXX: When changing the logic in here, make sure to update get_permissions_for_user as well!
if not user:
return False
if not self.is_reservable and not (allow_admin and self.is_user_admin(user)):
return False
if self.is_public and self.reservations_need_confirmation:
return True
# When the room does not use prebookings, we do not want the prebook option to show
# up for admins or room managers unless they are actually in the ACL with the prebook
# permission.
explicit = not self.reservations_need_confirmation
return self.can_manage(user, permission='prebook', allow_admin=allow_admin, explicit_permission=explicit)
def can_override(self, user, allow_admin=True):
# XXX: When changing the logic in here, make sure to update get_permissions_for_user as well!
return self.can_manage(user, permission='override', allow_admin=allow_admin)
def can_moderate(self, user, allow_admin=True):
# XXX: When changing the logic in here, make sure to update get_permissions_for_user as well!
return self.can_manage(user, permission='moderate', allow_admin=allow_admin)
def can_edit(self, user):
if not user:
return False
return rb_is_admin(user)
def can_delete(self, user):
if not user:
return False
return rb_is_admin(user)
def check_advance_days(self, end_date, user=None, quiet=False):
if not self.max_advance_days:
return True
if user and (rb_is_admin(user) or self.can_manage(user)):
return True
advance_days = (end_date - date.today()).days
ok = advance_days < self.max_advance_days
if quiet or ok:
return ok
else:
msg = _('You cannot book this room more than {} days in advance')
raise NoReportError(msg.format(self.max_advance_days))
def check_bookable_hours(self, start_time, end_time, user=None, quiet=False):
if user and (rb_is_admin(user) or self.can_manage(user)):
return True
bookable_hours = self.bookable_hours.all()
if not bookable_hours:
return True
for bt in bookable_hours:
if bt.fits_period(start_time, end_time):
return True
if quiet:
return False
raise NoReportError('Room cannot be booked at this time')
Room.register_protection_events()
| |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017 F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'
}
DOCUMENTATION = '''
---
module: iworkflow_iapp_template
short_description: Manages iApp templates
description:
- Manages TCL iApp services on a BIG-IP.
version_added: "2.4"
options:
name:
description:
- The name of the iApp template that you want to create on the
device. This is usually included in the template itself. This
option is typically used in cases where the template no longer
exists on disk (to reference) and the C(state) is C(absent).
template_content:
description:
- The contents of a valid iApp template in a tmpl file. This iApp
Template should be versioned and tested for compatibility with
iWorkflow Tenant Services and a BIG-IP version of 11.5.3.2 or later.
This option is only required when creating new template in iWorkflow.
When you are deleting iApp templates, you will need to specify either
one of C(name) or C(template_content).
device:
description:
- Managed BIG-IP that you want to get template JSON from. This option
is only required when C(state) is C(present).
state:
description:
- When C(present), ensures that the iApp service is created and running.
When C(absent), ensures that the iApp service has been removed.
default: present
choices:
- present
- absent
notes:
- Requires the f5-sdk Python package on the host. This is as easy as pip
install f5-sdk.
extends_documentation_fragment: f5
requirements:
- f5-sdk >= 2.3.0
- iWorkflow >= 2.1.0
author:
- Tim Rupp (@caphrim007)
'''
EXAMPLES = '''
- name: Add AppSvcs Integration to iWorkflow
iworkflow_iapp_template:
device: "my-bigip-1"
template_content: "{{ lookup('file', 'appsvcs_integration_v2.0_001.tmpl') }}"
password: "secret"
server: "lb.mydomain.com"
state: "present"
user: "admin"
delegate_to: localhost
- name: Remove AppSvcs Integration from iWorkflow
iworkflow_iapp_template:
name: "appsvcs_integration_v2.0_001"
password: "secret"
server: "lb.mydomain.com"
state: "present"
user: "admin"
delegate_to: localhost
'''
RETURN = '''
'''
import re
import time
from ansible.module_utils.f5_utils import (
AnsibleF5Client,
AnsibleF5Parameters,
defaultdict,
F5ModuleError,
HAS_F5SDK,
iControlUnexpectedHTTPError,
iteritems,
)
from f5.utils.iapp_parser import (
IappParser,
NonextantTemplateNameException
)
class Parameters(AnsibleF5Parameters):
api_map = {
'templateContent': 'template_content'
}
api_attributes = [
'templateContent', 'deviceForJSONTransformation'
]
returnables = []
updatables = [
'template_content',
]
def __init__(self, params=None):
self._values = defaultdict(lambda: None)
if params:
self.update(params=params)
def update(self, params=None):
if params:
for k, v in iteritems(params):
if self.api_map is not None and k in self.api_map:
map_key = self.api_map[k]
else:
map_key = k
# Handle weird API parameters like `dns.proxy.__iter__` by
# using a map provided by the module developer
class_attr = getattr(type(self), map_key, None)
if isinstance(class_attr, property):
# There is a mapped value for the api_map key
if class_attr.fset is None:
# If the mapped value does not have an associated setter
self._values[map_key] = v
else:
# The mapped value has a setter
setattr(self, map_key, v)
else:
# If the mapped value is not a @property
self._values[map_key] = v
def to_return(self):
result = {}
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
return result
def api_params(self):
result = {}
for api_attribute in self.api_attributes:
if self.api_map is not None and api_attribute in self.api_map:
result[api_attribute] = getattr(self, self.api_map[api_attribute])
else:
result[api_attribute] = getattr(self, api_attribute)
result = self._filter_params(result)
return result
def _squash_template_name_prefix(self):
name = self._get_template_name()
pattern = r'sys\s+application\s+template\s+/Common/{0}'.format(name)
replace = 'sys application template {0}'.format(name)
self._values['template_content'] = re.sub(pattern, replace, self._values['template_content'])
def _get_template_name(self):
parser = IappParser(self._values['template_content'])
tmpl = parser.parse_template()
return tmpl['name']
def _get_device_collection(self):
dg = self.client.api.shared.resolver.device_groups
result = dg.cm_cloud_managed_devices.devices_s.get_collection()
return result
def _get_device_selflink(self, device, collection):
for resource in collection:
if str(resource.product) != "BIG-IP":
continue
# The supplied device can be in several formats.
if str(resource.hostname) == device:
return str(resource.selfLink)
elif str(resource.address) == device:
return str(resource.selfLink)
elif str(resource.managementAddress) == device:
return str(resource.selfLink)
raise F5ModuleError(
"Device {0} was not found".format(device)
)
@property
def name(self):
if self._values['name']:
return self._values['name']
if self._values['template_content']:
try:
self._squash_template_name_prefix()
name = self._get_template_name()
self._values['name'] = name
return name
except NonextantTemplateNameException:
return F5ModuleError(
"No template name was found in the template"
)
return None
@property
def device(self):
if isinstance(self._values['device'], basestring):
collection = self._get_device_collection()
result = self._get_device_selflink(str(self._values['device']), collection)
return result
elif 'deviceForJSONTransformation' in self._values['device']:
# Case for the REST API
item = self._values['device']['deviceForJSONTransformation']
return str(item['deviceReference']['link'])
@device.setter
def device(self, value):
self._values['device'] = value
@property
def deviceForJSONTransformation(self):
result = dict(
link=self.device
)
return result
class ModuleManager(object):
def __init__(self, client):
self.client = client
self.have = None
self.want = Parameters()
self.want.client = self.client
self.want.update(self.client.module.params)
self.changes = Parameters()
self.changes.client = self.client
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = Parameters()
self.changes.client = self.client
self.changes.update(changed)
def _update_changed_options(self):
changed = {}
for key in Parameters.updatables:
if getattr(self.want, key) is not None:
attr1 = getattr(self.want, key)
attr2 = getattr(self.have, key)
if attr1 != attr2:
changed[key] = attr1
if changed:
self.changes = Parameters()
self.changes.client = self.client
self.changes.update(changed)
return True
return False
def exec_module(self):
changed = False
result = dict()
state = self.want.state
try:
if state == "present":
changed = self.present()
elif state == "absent":
changed = self.absent()
except iControlUnexpectedHTTPError as e:
raise F5ModuleError(str(e))
result.update(**self.changes.to_return())
result.update(dict(changed=changed))
return result
def exists(self):
return self.client.api.cm.cloud.templates.iapps.iapp.exists(
name=self.want.name
)
def present(self):
if self.exists():
return False
else:
return self.create()
def create(self):
if self.client.check_mode:
return True
if self.want.template_content is None:
raise F5ModuleError(
""
)
self.create_on_device()
return True
def create_on_device(self):
params = self.want.api_params()
params['name'] = self.want.name
self.client.api.cm.cloud.templates.iapps.iapp.create(
**params
)
time.sleep(5)
def absent(self):
if self.exists():
return self.remove()
return False
def remove(self):
if self.client.check_mode:
return True
self.remove_from_device()
if self.exists():
raise F5ModuleError("Failed to delete the iApp template")
return True
def remove_from_device(self):
resource = self.client.api.cm.cloud.templates.iapps.iapp.load(
name=self.want.name
)
if resource:
resource.delete()
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
self.argument_spec = dict(
name=dict(),
template_content=dict(required=False),
device=dict(),
state=dict(
default='present',
choices=['absent', 'present']
)
)
self.f5_product_name = 'iworkflow'
def main():
if not HAS_F5SDK:
raise F5ModuleError("The python f5-sdk module is required")
spec = ArgumentSpec()
client = AnsibleF5Client(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode,
f5_product_name=spec.f5_product_name
)
try:
mm = ModuleManager(client)
results = mm.exec_module()
client.module.exit_json(**results)
except F5ModuleError as e:
client.module.fail_json(msg=str(e))
if __name__ == '__main__':
main()
| |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gzip
import os
import re
import sys
import tarfile
from six.moves import urllib
import tensorflow as tf
from preprocessing import preprocess_utility as ult
from datasets.imagenet_dataset import ImagenetData
# from datasets import imagenet_dataset
from models import mobilenet_model
from models import vgg_model
FLAGS = tf.app.flags.FLAGS
IMAGE_SIZE = ult.IMAGE_SIZE
NUM_CLASSES = ult.NUM_CLASSES
NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN = ult.NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN
NUM_EXAMPLES_PER_EPOCH_FOR_EVAL = ult.NUM_EXAMPLES_PER_EPOCH_FOR_EVAL
# Constants describing the training process.
MOVING_AVERAGE_DECAY = 0.9999 # The decay to use for the moving average.
NUM_EPOCHS_PER_DECAY = 1.0 # Epochs after which learning rate decays.
LEARNING_RATE_DECAY_FACTOR = 0.75 # Learning rate decay factor.
INITIAL_LEARNING_RATE = 0.1 # Initial learning rate.
TOWER_NAME = 'tower'
def _activation_summary(x):
"""Helper to create summaries for activations.
Creates a summary that provides a histogram of activations.
Creates a summary that measure the sparsity of activations.
Args:
x: Tensor
Returns:
nothing
"""
# Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
# session. This helps the clarity of presentation on tensorboard.
tensor_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', x.op.name)
tf.summary.histogram(tensor_name + '/activations', x)
tf.summary.scalar(tensor_name + '/sparsity', tf.nn.zero_fraction(x))
def _variable_on_cpu(name, shape, initializer):
"""Helper to create a Variable stored on CPU memory.
Args:
name: name of the variable
shape: list of ints
initializer: initializer for Variable
Returns:
Variable Tensor
"""
with tf.device('/cpu:0'):
var = tf.get_variable(name, shape, initializer=initializer)
return var
def _variable_with_weight_decay(name, shape, stddev, wd):
"""Helper to create an initialized Variable with weight decay.
Note that the Variable is initialized with a truncated normal distribution.
A weight decay is added only if one is specified.
Args:
name: name of the variable
shape: list of ints
stddev: standard deviation of a truncated Gaussian
wd: add L2Loss weight decay multiplied by this float. If None, weight
decay is not added for this Variable.
Returns:
Variable Tensor
"""
var = _variable_on_cpu(name, shape,
tf.truncated_normal_initializer(stddev=stddev))
if wd is not None:
weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')
tf.add_to_collection('losses', weight_decay)
return var
def distorted_inputs():
"""Construct distorted input for CIFAR training using the Reader ops.
Returns:
images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
labels: Labels. 1D tensor of [batch_size] size.
Raises:
ValueError: If no data_dir
"""
if not FLAGS.data_dir:
raise ValueError('Please supply a data_dir')
# data_dir = os.path.join(FLAGS.data_dir, 'cifar-10-batches-bin')
dataset_train = ImagenetData(subset='train')
dataset_test = ImagenetData(subset='validation')
assert dataset_train.data_files()
assert dataset_test.data_files()
imgs_train, labels_train= ult.distorted_inputs(
dataset_train,
isTrain=True,
batch_size=FLAGS.batch_size,
num_preprocess_threads=FLAGS.num_preprocess_threads)
imgs_test, labels_test = ult.inputs(
dataset_test,
batch_size=FLAGS.batch_size,
num_preprocess_threads=FLAGS.num_preprocess_threads)
return (imgs_train, labels_train, imgs_test, labels_test)
# return ult.distorted_inputs(
# dataset,
# isTrain,
# batch_size=FLAGS.batch_size,
# num_preprocess_threads=FLAGS.num_preprocess_threads)
def inference(images, isTrain, isLoad):
"""Build the vggnet model.
Args:
images: Images returned from distorted_inputs() or inputs().
Returns:
Logits.
"""
if FLAGS.model_name == 'vggnet':
model = vgg_model.vggnet(isLoad, isTrain)
elif FLAGS.model_name == 'mobilenet':
model = mobilenet_model.mobilenet(isLoad, isTrain)
keep_prob = tf.cond(isTrain, lambda: 0.5, lambda: 1.0)
pred = model.conv_network(images, keep_prob)
return pred
def eval(logits, labels):
labels = tf.cast(labels, tf.int64)
predictions = tf.argmax(logits, 1)
# top5_acc = tf.metrics.recall_at_k(
# labels = labels,
# predictions = logits,
# k = 5
# )
# acc = tf.metrics.accuracy(
# labels = labels,
# predictions = predictions
# )
acc = tf.reduce_mean(tf.cast(tf.equal(predictions, labels), tf.float32))
top5_acc = tf.reduce_mean(tf.cast(tf.nn.in_top_k(logits, labels, 5), tf.float32))
return (acc, top5_acc)
def loss(logits, labels):
"""Add L2Loss to all the trainable variables.
Add summary for "Loss" and "Loss/avg".
Args:
logits: Logits from inference().
labels: Labels from distorted_inputs or inputs(). 1-D tensor
of shape [batch_size]
Returns:
Loss tensor of type float.
"""
# Calculate the average cross entropy loss across the batch.
labels = tf.cast(labels, tf.int64)
# labels = tf.cast(labels, tf.float32)
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits, labels=labels, name='cross_entropy_per_example')
cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
tf.add_to_collection('losses', cross_entropy_mean)
return tf.add_n(tf.get_collection('losses'), name='total_loss')
def pickle_save(sess):
if FLAGS.model_name == 'vggnet':
vgg_model.save_model(sess)
elif FLAGS.model_name == 'mobilenet':
mobilenet_model.save_model(sess)
def _add_loss_summaries(total_loss):
"""Add summaries for losses in CIFAR-10 model.
Generates moving average for all losses and associated summaries for
visualizing the performance of the network.
Args:
total_loss: Total loss from loss().
Returns:
loss_averages_op: op for generating moving averages of losses.
"""
# Compute the moving average of all individual losses and the total loss.
loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')
losses = tf.get_collection('losses')
loss_averages_op = loss_averages.apply(losses + [total_loss])
# Attach a scalar summary to all individual losses and the total loss; do the
# same for the averaged version of the losses.
for l in losses + [total_loss]:
# Name each loss as '(raw)' and name the moving average version of the loss
# as the original loss name.
tf.scalar_summary(l.op.name +' (raw)', l)
tf.scalar_summary(l.op.name, loss_averages.average(l))
return loss_averages_op
| |
import abc
import inspect
import itertools
import os
import yaml
from termcolor import cprint
from gym import spaces
from gym_mupen64plus.envs.mupen64plus_env \
import Mupen64PlusEnv, ControllerState, IMAGE_HELPER
import numpy as np
###############################################
class MarioKartEnv(Mupen64PlusEnv):
__metaclass__ = abc.ABCMeta
# Indicates the color value of the pixel at point (203, 51)
# This is where the lap number is present in the default HUD
END_RACE_PIXEL_COLORS = {"mupen64plus-video-rice.so" : ( 66, 49, 66),
"mupen64plus-video-glide64mk2.so" : (214, 148, 214),
"mupen64plus-video-glide64.so" : (157, 112, 158)}
HUD_PROGRESS_COLOR_VALUES = {(000, 000, 255): 1, # Blue: Lap 1
(255, 255, 000): 2, # Yellow: Lap 2
(255, 000, 000): 3} # Red: Lap 3
DEFAULT_STEP_REWARD = -0.1
LAP_REWARD = 100
CHECKPOINT_REWARD = 0.5
BACKWARDS_PUNISHMENT = -1
END_REWARD = 1000
END_EPISODE_THRESHOLD = 0
PLAYER_ROW = 0
PLAYER_COL = 0
MAP_SERIES = 0
MAP_CHOICE = 0
ENABLE_CHECKPOINTS = False
def __init__(self, character='mario', course='LuigiRaceway'):
self._set_character(character)
self._set_course(course)
super(MarioKartEnv, self).__init__()
self.end_race_pixel_color = self.END_RACE_PIXEL_COLORS[self.config["GFX_PLUGIN"]]
self.action_space = spaces.MultiDiscrete([[-80, 80], # Joystick X-axis
[-80, 80], # Joystick Y-axis
[ 0, 1], # A Button
[ 0, 1], # B Button
[ 0, 1]]) # RB Button
def _load_config(self):
self.config.update(yaml.safe_load(open(os.path.join(os.path.dirname(inspect.stack()[0][1]), "mario_kart_config.yml"))))
def _validate_config(self):
print("validate sub")
gfx_plugin = self.config["GFX_PLUGIN"]
if gfx_plugin not in self.END_RACE_PIXEL_COLORS:
raise AssertionError("Video Plugin '" + gfx_plugin + "' not currently supported by MarioKart environment")
def _step(self, action):
# Interpret the action choice and get the actual controller state for this step
controls = action + [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
return super(MarioKartEnv, self)._step(controls)
def _reset_after_race(self):
self._wait(count=275, wait_for='times screen')
self._navigate_post_race_menu()
self._wait(count=40, wait_for='map select screen')
self._navigate_map_select()
self._wait(count=50, wait_for='race to load')
def _reset_during_race(self):
# Can't pause the race until the light turns green
if (self.step_count * self.controller_server.frame_skip) < 120:
steps_to_wait = 100 - (self.step_count * self.controller_server.frame_skip)
self._wait(count=steps_to_wait, wait_for='green light so we can pause')
self._press_button(ControllerState.START_BUTTON)
self._press_button(ControllerState.JOYSTICK_DOWN)
self._press_button(ControllerState.A_BUTTON)
self._wait(count=76, wait_for='race to load')
def _reset(self):
self.lap = 1
self.step_count_at_lap = 0
self.last_known_lap = -1
self.CHECKPOINT_LOCATIONS = list(self._generate_checkpoints(64, 36, 584, 444))
if self.ENABLE_CHECKPOINTS:
self._checkpoint_tracker = [[False for i in range(len(self.CHECKPOINT_LOCATIONS))] for j in range(3)]
self.last_known_ckpt = -1
# Nothing to do on the first call to reset()
if self.reset_count > 0:
# Make sure we don't skip frames while navigating the menus
with self.controller_server.frame_skip_disabled():
if self.episode_over:
self._reset_after_race()
self.episode_over = False
else:
self._reset_during_race()
return super(MarioKartEnv, self)._reset()
def _get_reward(self):
#cprint('Get Reward called!','yellow')
reward_to_return = 0
cur_lap = self._get_lap()
if self.ENABLE_CHECKPOINTS:
cur_ckpt = self._get_current_checkpoint()
if self.episode_over:
# Scale out the end reward based on the total steps to get here; the fewer steps, the higher the reward
reward_to_return = 5 * (1250 - self.step_count) + self.END_REWARD #self.END_REWARD * (5000 / self.step_count) - 3000
else:
if cur_lap > self.lap:
self.lap = cur_lap
cprint('Lap %s!' % self.lap, 'green')
# Scale out the lap reward based on the steps to get here; the fewer steps, the higher the reward
steps_this_lap = self.step_count - self.step_count_at_lap
reward_to_return = self.LAP_REWARD # TODO: Figure out a good scale here... number of steps required per lap will vary depending on the course; don't want negative reward for completing a lap
self.step_count_at_lap = self.step_count
elif (self.ENABLE_CHECKPOINTS and cur_ckpt > -1 and
not self._checkpoint_tracker[self.last_known_lap - 1][cur_ckpt]):
# TODO: Backwards across a lap boundary incorrectly grants a checkpoint reward
# Need to investigate further. Might need to restore check for sequential checkpoints
#cprint(str(self.step_count) + ': CHECKPOINT achieved!', 'green')
self._checkpoint_tracker[self.lap - 1][cur_ckpt] = True
reward_to_return = self.CHECKPOINT_REWARD # TODO: This should reward per progress made. It seems as though currently, by going too fast, you could end up skipping over some progress rewards, which would encourage driving around a bit to achieve those rewards. Should reward whatever progress was achieved during the step (perhaps multiple 'checkpoints')
elif (self.ENABLE_CHECKPOINTS and ( cur_lap < self.last_known_lap or
cur_ckpt < self.last_known_ckpt)):
#cprint(str(self.step_count) + ': BACKWARDS!!', 'red')
self._checkpoint_tracker[self.lap - 1][self.last_known_ckpt] = False
reward_to_return = self.BACKWARDS_PUNISHMENT
else:
reward_to_return = self.DEFAULT_STEP_REWARD
if self.ENABLE_CHECKPOINTS:
self.last_known_ckpt = cur_ckpt
self.last_known_lap = cur_lap
return reward_to_return
def _get_lap(self):
# The first checkpoint is the upper left corner. It's value should tell us the lap.
ckpt_val = self._evaluate_checkpoint(self.CHECKPOINT_LOCATIONS[0])
# If it is unknown, assume same lap (character icon is likely covering the corner)
return ckpt_val if ckpt_val != -1 else self.lap
def _generate_checkpoints(self, min_x, min_y, max_x, max_y):
# TODO: I'm sure this can/should be more pythonic somehow
# Sample 4 pixels for each checkpoint to reduce the
# likelihood of a pixel matching the color by chance
# Top
for i in range((max_x - min_x) // 2):
x_val = min_x + i*2
y_val = min_y
yield [(x_val, y_val), (x_val + 1, y_val), (x_val, y_val + 1), (x_val + 1, y_val + 1)]
# Right-side
for i in range((max_y - min_y) // 2):
x_val = max_x
y_val = min_y + i*2
yield [(x_val, y_val), (x_val + 1, y_val), (x_val, y_val + 1), (x_val + 1, y_val + 1)]
# Bottom
for i in range((max_x - min_x) // 2):
if i == 0: # Skip the bottom right corner (for some reason MK doesn't draw it)
continue
x_val = max_x - i*2
y_val = max_y
yield [(x_val, y_val), (x_val + 1, y_val), (x_val, y_val + 1), (x_val + 1, y_val + 1)]
# Left-side
for i in range((max_y - min_y) // 2):
x_val = min_x
y_val = max_y - i*2
yield [(x_val, y_val), (x_val + 1, y_val), (x_val, y_val + 1), (x_val + 1, y_val + 1)]
def _get_current_checkpoint(self):
checkpoint_values = [self._evaluate_checkpoint(points)
for points in self.CHECKPOINT_LOCATIONS]
# Check if we have achieved any checkpoints
if any(val > -1 for val in checkpoint_values):
# argmin tells us the first index with the lowest value
index_of_lowest_val = np.argmin(checkpoint_values)
if index_of_lowest_val != 0:
# If the argmin is anything but 0, we have achieved
# all the checkpoints up through the prior index
checkpoint = index_of_lowest_val - 1
else:
# If the argmin is at index 0, they are all the same value,
# which means we've hit all the checkpoints for this lap
checkpoint = len(checkpoint_values) - 1
#if self.last_known_ckpt != checkpoint:
# cprint('--------------------------------------------','red')
# cprint('Checkpoints: %s' % checkpoint_values, 'yellow')
# cprint('Checkpoint: %s' % checkpoint, 'cyan')
return checkpoint
else:
# We haven't hit any checkpoint yet :(
return -1
# https://stackoverflow.com/a/3844948
# Efficiently determines if all items in a list are equal by
# counting the occurrences of the first item in the list and
# checking if the count matches the length of the list:
def all_equal(self, some_list):
return some_list.count(some_list[0]) == len(some_list)
def _evaluate_checkpoint(self, checkpoint_points):
checkpoint_pixels = [IMAGE_HELPER.GetPixelColor(self.pixel_array, point[0], point[1])
for point in checkpoint_points]
#print(checkpoint_pixels)
# If the first pixel is not a valid color, no need to check the other three
if not checkpoint_pixels[0] in self.HUD_PROGRESS_COLOR_VALUES:
return -1
# If the first pixel is good, make sure the other three match
elif not self.all_equal(checkpoint_pixels):
return -1
# If all are good, return the corresponding value
else:
return self.HUD_PROGRESS_COLOR_VALUES[checkpoint_pixels[0]]
def _evaluate_end_state(self):
#cprint('Evaluate End State called!','yellow')
return self.end_race_pixel_color == IMAGE_HELPER.GetPixelColor(self.pixel_array, 203, 51)
def _navigate_menu(self):
self._wait(count=10, wait_for='Nintendo screen')
self._press_button(ControllerState.A_BUTTON)
self._wait(count=68, wait_for='Mario Kart splash screen')
self._press_button(ControllerState.A_BUTTON)
self._wait(count=68, wait_for='Game Select screen')
self._navigate_game_select()
self._wait(count=14, wait_for='Player Select screen')
self._navigate_player_select()
self._wait(count=31, wait_for='Map Select screen')
self._navigate_map_select()
self._wait(count=46, wait_for='race to load')
# Change HUD View twice to get to the one we want:
self._cycle_hud_view(times=2)
# Now that we have the HUD as needed, reset the race so we have a consistent starting frame:
self._reset_during_race()
def _navigate_game_select(self):
# Select number of players (1 player highlighted by default)
self._press_button(ControllerState.A_BUTTON)
self._wait(count=3, wait_for='animation')
# Select GrandPrix or TimeTrials (GrandPrix highlighted by default - down to switch to TimeTrials)
self._press_button(ControllerState.JOYSTICK_DOWN)
self._wait(count=3, wait_for='animation')
# Select TimeTrials
self._press_button(ControllerState.A_BUTTON)
# Select Begin
self._press_button(ControllerState.A_BUTTON)
# Press OK
self._press_button(ControllerState.A_BUTTON)
def _navigate_player_select(self):
print('Player row: ' + str(self.PLAYER_ROW))
print('Player col: ' + str(self.PLAYER_COL))
# Character selection is remembered each time, so ensure upper-left-most is selected
self._press_button(ControllerState.JOYSTICK_UP)
self._press_button(ControllerState.JOYSTICK_LEFT, times=3)
# Navigate to character
self._press_button(ControllerState.JOYSTICK_DOWN, times=self.PLAYER_ROW)
self._press_button(ControllerState.JOYSTICK_RIGHT, times=self.PLAYER_COL)
# Select character
self._press_button(ControllerState.A_BUTTON)
# Press OK
self._press_button(ControllerState.A_BUTTON)
def _navigate_map_select(self):
print('Map series: ' + str(self.MAP_SERIES))
print('Map choice: ' + str(self.MAP_CHOICE))
# Map series selection is remembered each time, so ensure left-most is selected
self._press_button(ControllerState.JOYSTICK_LEFT, times=3)
# Select map series
self._press_button(ControllerState.JOYSTICK_RIGHT, times=self.MAP_SERIES)
self._press_button(ControllerState.A_BUTTON)
# Map choice selection is remembered each time, so ensure top-most is selected
self._press_button(ControllerState.JOYSTICK_UP, times=3)
# Select map choice
self._press_button(ControllerState.JOYSTICK_DOWN, times=self.MAP_CHOICE)
self._press_button(ControllerState.A_BUTTON)
# Press OK
self._press_button(ControllerState.A_BUTTON)
def _cycle_hud_view(self, times=1):
for _ in itertools.repeat(None, times):
self._press_button(ControllerState.CR_BUTTON)
def _navigate_post_race_menu(self):
# Times screen
self._press_button(ControllerState.A_BUTTON)
self._wait(count=13, wait_for='Post race menu')
# Post race menu (previous choice selected by default)
# - Retry
# - Course Change
# - Driver Change
# - Quit
# - Replay
# - Save Ghost
# Because the previous choice is selected by default, we navigate to the top entry so our
# navigation is consistent. The menu doesn't cycle top to bottom or bottom to top, so we can
# just make sure we're at the top by hitting up a few times
self._press_button(ControllerState.JOYSTICK_UP, times=5)
# Now we are sure to have the top entry selected
# Go down to 'course change'
self._press_button(ControllerState.JOYSTICK_DOWN)
self._press_button(ControllerState.A_BUTTON)
def _set_character(self, character):
characters = {'mario' : (0, 0),
'luigi' : (0, 1),
'peach' : (0, 2),
'toad' : (0, 3),
'yoshi' : (1, 0),
'd.k.' : (1, 1),
'wario' : (1, 2),
'bowser' : (1, 3)}
self.PLAYER_ROW, self.PLAYER_COL = characters[character]
def _set_course(self, course):
courses = {'LuigiRaceway' : (0, 0),
'MooMooFarm' : (0, 1),
'KoopaTroopaBeach' : (0, 2),
'KalimariDesert' : (0, 3),
'ToadsTurnpike' : (1, 0),
'FrappeSnowland' : (1, 1),
'ChocoMountain' : (1, 2),
'MarioRaceway' : (1, 3),
'WarioStadium' : (2, 0),
'SherbetLand' : (2, 1),
'RoyalRaceway' : (2, 2),
'BowsersCastle' : (2, 3),
'DKsJungleParkway' : (3, 0),
'YoshiValley' : (3, 1),
'BansheeBoardwalk' : (3, 2),
'RainbowRoad' : (3, 3)}
self.MAP_SERIES, self.MAP_CHOICE = courses[course]
| |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""RMSprop optimizer for Tensorflow.
rmsprop algorithm [tieleman2012rmsprop]
A detailed description of rmsprop.
- maintain a moving (discounted) average of the square of gradients
- divide gradient by the root of this average
mean_square = decay * mean_square{t-1} + (1-decay) * gradient ** 2
mom = momentum * mom{t-1} + learning_rate * g_t / sqrt(mean_square + epsilon)
delta = - mom
This implementation of RMSProp uses plain momentum, not Nesterov momentum.
The centered version additionally maintains a moving (discounted) average of the
gradients, and uses that average to estimate the variance:
mean_grad = decay * mean_square{t-1} + (1-decay) * gradient
mean_square = decay * mean_square{t-1} + (1-decay) * gradient ** 2
mom = momentum * mom{t-1} + learning_rate * g_t /
sqrt(mean_square - mean_grad**2 + epsilon)
delta = - mom
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.optimizer_v2 import optimizer_v2
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.training import training_ops
class RMSPropOptimizer(optimizer_v2.OptimizerV2):
"""Optimizer that implements the RMSProp algorithm.
See the
[paper](http://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf).
"""
def __init__(self,
learning_rate,
decay=0.9,
momentum=0.0,
epsilon=1e-10,
use_locking=False,
centered=False,
name="RMSProp"):
"""Construct a new RMSProp optimizer.
Note that in the dense implementation of this algorithm, variables and their
corresponding accumulators (momentum, gradient moving average, square
gradient moving average) will be updated even if the gradient is zero
(i.e. accumulators will decay, momentum will be applied). The sparse
implementation (used when the gradient is an `IndexedSlices` object,
typically because of `tf.gather` or an embedding lookup in the forward pass)
will not update variable slices or their accumulators unless those slices
were used in the forward pass (nor is there an "eventual" correction to
account for these omitted updates). This leads to more efficient updates for
large embedding lookup tables (where most of the slices are not accessed in
a particular graph execution), but differs from the published algorithm.
Some of the args below are hyperparameters, where a hyperparameter is
defined as a scalar Tensor, a regular Python value or a callable (which
will be evaluated when `apply_gradients` is called) returning a scalar
Tensor or a Python value.
Args:
learning_rate: A float hyperparameter. The learning rate.
decay: A float hyperparameter. Discounting factor for the history/coming
gradient.
momentum: A float hyperparameter.
epsilon: A float hyperparameter. Small value to avoid zero denominator.
use_locking: If True use locks for update operation.
centered: If True, gradients are normalized by the estimated variance of
the gradient; if False, by the uncentered second moment. Setting this to
True may help with training, but is slightly more expensive in terms of
computation and memory. Defaults to False.
name: Optional name prefix for the operations created when applying
gradients. Defaults to "RMSProp".
"""
super(RMSPropOptimizer, self).__init__(use_locking, name)
self._set_hyper("learning_rate", learning_rate)
self._set_hyper("decay", decay)
self._set_hyper("momentum", momentum)
self._set_hyper("epsilon", epsilon)
self._centered = centered
def _create_vars(self, var_list, state):
for v in var_list:
if v.get_shape().is_fully_defined():
init_rms = init_ops.ones_initializer(dtype=v.dtype.base_dtype)
else:
init_rms = array_ops.ones_like(v)
state.create_slot_with_initializer(v, init_rms, v.get_shape(),
v.dtype.base_dtype, "rms")
if self._centered:
state.zeros_slot(v, "mg")
state.zeros_slot(v, "momentum")
def _apply_dense(self, grad, var, state):
rms = state.get_slot(var, "rms")
mom = state.get_slot(var, "momentum")
if self._centered:
mg = state.get_slot(var, "mg")
return training_ops.apply_centered_rms_prop(
var,
mg,
rms,
mom,
state.get_hyper("learning_rate", var.dtype.base_dtype),
state.get_hyper("decay", var.dtype.base_dtype),
state.get_hyper("momentum", var.dtype.base_dtype),
state.get_hyper("epsilon", var.dtype.base_dtype),
grad,
use_locking=self._use_locking).op
else:
return training_ops.apply_rms_prop(
var,
rms,
mom,
state.get_hyper("learning_rate", var.dtype.base_dtype),
state.get_hyper("decay", var.dtype.base_dtype),
state.get_hyper("momentum", var.dtype.base_dtype),
state.get_hyper("epsilon", var.dtype.base_dtype),
grad,
use_locking=self._use_locking).op
def _resource_apply_dense(self, grad, var, state):
rms = state.get_slot(var, "rms")
mom = state.get_slot(var, "momentum")
if self._centered:
mg = state.get_slot(var, "mg")
return training_ops.resource_apply_centered_rms_prop(
var.handle,
mg.handle,
rms.handle,
mom.handle,
state.get_hyper("learning_rate", var.dtype.base_dtype),
state.get_hyper("decay", var.dtype.base_dtype),
state.get_hyper("momentum", var.dtype.base_dtype),
state.get_hyper("epsilon", var.dtype.base_dtype),
grad,
use_locking=self._use_locking)
else:
return training_ops.resource_apply_rms_prop(
var.handle,
rms.handle,
mom.handle,
state.get_hyper("learning_rate", var.dtype.base_dtype),
state.get_hyper("decay", var.dtype.base_dtype),
state.get_hyper("momentum", var.dtype.base_dtype),
state.get_hyper("epsilon", var.dtype.base_dtype),
grad,
use_locking=self._use_locking)
def _apply_sparse(self, grad, var, state):
rms = state.get_slot(var, "rms")
mom = state.get_slot(var, "momentum")
if self._centered:
mg = state.get_slot(var, "mg")
return training_ops.sparse_apply_centered_rms_prop(
var,
mg,
rms,
mom,
state.get_hyper("learning_rate", var.dtype.base_dtype),
state.get_hyper("decay", var.dtype.base_dtype),
state.get_hyper("momentum", var.dtype.base_dtype),
state.get_hyper("epsilon", var.dtype.base_dtype),
grad.values,
grad.indices,
use_locking=self._use_locking)
else:
return training_ops.sparse_apply_rms_prop(
var,
rms,
mom,
state.get_hyper("learning_rate", var.dtype.base_dtype),
state.get_hyper("decay", var.dtype.base_dtype),
state.get_hyper("momentum", var.dtype.base_dtype),
state.get_hyper("epsilon", var.dtype.base_dtype),
grad.values,
grad.indices,
use_locking=self._use_locking)
def _resource_apply_sparse(self, grad, var, indices, state):
rms = state.get_slot(var, "rms")
mom = state.get_slot(var, "momentum")
if self._centered:
mg = self.get_slot(var, "mg")
return training_ops.resource_sparse_apply_centered_rms_prop(
var.handle,
mg.handle,
rms.handle,
mom.handle,
state.get_hyper("learning_rate", var.dtype.base_dtype),
state.get_hyper("decay", var.dtype.base_dtype),
state.get_hyper("momentum", var.dtype.base_dtype),
state.get_hyper("epsilon", var.dtype.base_dtype),
grad,
indices,
use_locking=self._use_locking)
else:
return training_ops.resource_sparse_apply_rms_prop(
var.handle,
rms.handle,
mom.handle,
state.get_hyper("learning_rate", var.dtype.base_dtype),
state.get_hyper("decay", var.dtype.base_dtype),
state.get_hyper("momentum", var.dtype.base_dtype),
state.get_hyper("epsilon", var.dtype.base_dtype),
grad,
indices,
use_locking=self._use_locking)
| |
"""Support for HomematicIP Cloud devices."""
from __future__ import annotations
import logging
from pathlib import Path
from homematicip.aio.device import AsyncSwitchMeasuring
from homematicip.aio.group import AsyncHeatingGroup
from homematicip.aio.home import AsyncHome
from homematicip.base.helpers import handle_config
import voluptuous as vol
from homeassistant.const import ATTR_ENTITY_ID, ATTR_TEMPERATURE
from homeassistant.core import HomeAssistant, ServiceCall
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.config_validation import comp_entity_ids
from homeassistant.helpers.service import (
async_register_admin_service,
verify_domain_control,
)
from .const import DOMAIN as HMIPC_DOMAIN
_LOGGER = logging.getLogger(__name__)
ATTR_ACCESSPOINT_ID = "accesspoint_id"
ATTR_ANONYMIZE = "anonymize"
ATTR_CLIMATE_PROFILE_INDEX = "climate_profile_index"
ATTR_CONFIG_OUTPUT_FILE_PREFIX = "config_output_file_prefix"
ATTR_CONFIG_OUTPUT_PATH = "config_output_path"
ATTR_DURATION = "duration"
ATTR_ENDTIME = "endtime"
DEFAULT_CONFIG_FILE_PREFIX = "hmip-config"
SERVICE_ACTIVATE_ECO_MODE_WITH_DURATION = "activate_eco_mode_with_duration"
SERVICE_ACTIVATE_ECO_MODE_WITH_PERIOD = "activate_eco_mode_with_period"
SERVICE_ACTIVATE_VACATION = "activate_vacation"
SERVICE_DEACTIVATE_ECO_MODE = "deactivate_eco_mode"
SERVICE_DEACTIVATE_VACATION = "deactivate_vacation"
SERVICE_DUMP_HAP_CONFIG = "dump_hap_config"
SERVICE_RESET_ENERGY_COUNTER = "reset_energy_counter"
SERVICE_SET_ACTIVE_CLIMATE_PROFILE = "set_active_climate_profile"
HMIPC_SERVICES = [
SERVICE_ACTIVATE_ECO_MODE_WITH_DURATION,
SERVICE_ACTIVATE_ECO_MODE_WITH_PERIOD,
SERVICE_ACTIVATE_VACATION,
SERVICE_DEACTIVATE_ECO_MODE,
SERVICE_DEACTIVATE_VACATION,
SERVICE_DUMP_HAP_CONFIG,
SERVICE_RESET_ENERGY_COUNTER,
SERVICE_SET_ACTIVE_CLIMATE_PROFILE,
]
SCHEMA_ACTIVATE_ECO_MODE_WITH_DURATION = vol.Schema(
{
vol.Required(ATTR_DURATION): cv.positive_int,
vol.Optional(ATTR_ACCESSPOINT_ID): vol.All(str, vol.Length(min=24, max=24)),
}
)
SCHEMA_ACTIVATE_ECO_MODE_WITH_PERIOD = vol.Schema(
{
vol.Required(ATTR_ENDTIME): cv.datetime,
vol.Optional(ATTR_ACCESSPOINT_ID): vol.All(str, vol.Length(min=24, max=24)),
}
)
SCHEMA_ACTIVATE_VACATION = vol.Schema(
{
vol.Required(ATTR_ENDTIME): cv.datetime,
vol.Required(ATTR_TEMPERATURE, default=18.0): vol.All(
vol.Coerce(float), vol.Range(min=0, max=55)
),
vol.Optional(ATTR_ACCESSPOINT_ID): vol.All(str, vol.Length(min=24, max=24)),
}
)
SCHEMA_DEACTIVATE_ECO_MODE = vol.Schema(
{vol.Optional(ATTR_ACCESSPOINT_ID): vol.All(str, vol.Length(min=24, max=24))}
)
SCHEMA_DEACTIVATE_VACATION = vol.Schema(
{vol.Optional(ATTR_ACCESSPOINT_ID): vol.All(str, vol.Length(min=24, max=24))}
)
SCHEMA_SET_ACTIVE_CLIMATE_PROFILE = vol.Schema(
{
vol.Required(ATTR_ENTITY_ID): comp_entity_ids,
vol.Required(ATTR_CLIMATE_PROFILE_INDEX): cv.positive_int,
}
)
SCHEMA_DUMP_HAP_CONFIG = vol.Schema(
{
vol.Optional(ATTR_CONFIG_OUTPUT_PATH): cv.string,
vol.Optional(
ATTR_CONFIG_OUTPUT_FILE_PREFIX, default=DEFAULT_CONFIG_FILE_PREFIX
): cv.string,
vol.Optional(ATTR_ANONYMIZE, default=True): cv.boolean,
}
)
SCHEMA_RESET_ENERGY_COUNTER = vol.Schema(
{vol.Required(ATTR_ENTITY_ID): comp_entity_ids}
)
async def async_setup_services(hass: HomeAssistant) -> None:
"""Set up the HomematicIP Cloud services."""
if hass.services.async_services().get(HMIPC_DOMAIN):
return
@verify_domain_control(hass, HMIPC_DOMAIN)
async def async_call_hmipc_service(service: ServiceCall):
"""Call correct HomematicIP Cloud service."""
service_name = service.service
if service_name == SERVICE_ACTIVATE_ECO_MODE_WITH_DURATION:
await _async_activate_eco_mode_with_duration(hass, service)
elif service_name == SERVICE_ACTIVATE_ECO_MODE_WITH_PERIOD:
await _async_activate_eco_mode_with_period(hass, service)
elif service_name == SERVICE_ACTIVATE_VACATION:
await _async_activate_vacation(hass, service)
elif service_name == SERVICE_DEACTIVATE_ECO_MODE:
await _async_deactivate_eco_mode(hass, service)
elif service_name == SERVICE_DEACTIVATE_VACATION:
await _async_deactivate_vacation(hass, service)
elif service_name == SERVICE_DUMP_HAP_CONFIG:
await _async_dump_hap_config(hass, service)
elif service_name == SERVICE_RESET_ENERGY_COUNTER:
await _async_reset_energy_counter(hass, service)
elif service_name == SERVICE_SET_ACTIVE_CLIMATE_PROFILE:
await _set_active_climate_profile(hass, service)
hass.services.async_register(
domain=HMIPC_DOMAIN,
service=SERVICE_ACTIVATE_ECO_MODE_WITH_DURATION,
service_func=async_call_hmipc_service,
schema=SCHEMA_ACTIVATE_ECO_MODE_WITH_DURATION,
)
hass.services.async_register(
domain=HMIPC_DOMAIN,
service=SERVICE_ACTIVATE_ECO_MODE_WITH_PERIOD,
service_func=async_call_hmipc_service,
schema=SCHEMA_ACTIVATE_ECO_MODE_WITH_PERIOD,
)
hass.services.async_register(
domain=HMIPC_DOMAIN,
service=SERVICE_ACTIVATE_VACATION,
service_func=async_call_hmipc_service,
schema=SCHEMA_ACTIVATE_VACATION,
)
hass.services.async_register(
domain=HMIPC_DOMAIN,
service=SERVICE_DEACTIVATE_ECO_MODE,
service_func=async_call_hmipc_service,
schema=SCHEMA_DEACTIVATE_ECO_MODE,
)
hass.services.async_register(
domain=HMIPC_DOMAIN,
service=SERVICE_DEACTIVATE_VACATION,
service_func=async_call_hmipc_service,
schema=SCHEMA_DEACTIVATE_VACATION,
)
hass.services.async_register(
domain=HMIPC_DOMAIN,
service=SERVICE_SET_ACTIVE_CLIMATE_PROFILE,
service_func=async_call_hmipc_service,
schema=SCHEMA_SET_ACTIVE_CLIMATE_PROFILE,
)
async_register_admin_service(
hass=hass,
domain=HMIPC_DOMAIN,
service=SERVICE_DUMP_HAP_CONFIG,
service_func=async_call_hmipc_service,
schema=SCHEMA_DUMP_HAP_CONFIG,
)
async_register_admin_service(
hass=hass,
domain=HMIPC_DOMAIN,
service=SERVICE_RESET_ENERGY_COUNTER,
service_func=async_call_hmipc_service,
schema=SCHEMA_RESET_ENERGY_COUNTER,
)
async def async_unload_services(hass: HomeAssistant):
"""Unload HomematicIP Cloud services."""
if hass.data[HMIPC_DOMAIN]:
return
for hmipc_service in HMIPC_SERVICES:
hass.services.async_remove(domain=HMIPC_DOMAIN, service=hmipc_service)
async def _async_activate_eco_mode_with_duration(
hass: HomeAssistant, service: ServiceCall
) -> None:
"""Service to activate eco mode with duration."""
duration = service.data[ATTR_DURATION]
if hapid := service.data.get(ATTR_ACCESSPOINT_ID):
if home := _get_home(hass, hapid):
await home.activate_absence_with_duration(duration)
else:
for hap in hass.data[HMIPC_DOMAIN].values():
await hap.home.activate_absence_with_duration(duration)
async def _async_activate_eco_mode_with_period(
hass: HomeAssistant, service: ServiceCall
) -> None:
"""Service to activate eco mode with period."""
endtime = service.data[ATTR_ENDTIME]
if hapid := service.data.get(ATTR_ACCESSPOINT_ID):
if home := _get_home(hass, hapid):
await home.activate_absence_with_period(endtime)
else:
for hap in hass.data[HMIPC_DOMAIN].values():
await hap.home.activate_absence_with_period(endtime)
async def _async_activate_vacation(hass: HomeAssistant, service: ServiceCall) -> None:
"""Service to activate vacation."""
endtime = service.data[ATTR_ENDTIME]
temperature = service.data[ATTR_TEMPERATURE]
if hapid := service.data.get(ATTR_ACCESSPOINT_ID):
if home := _get_home(hass, hapid):
await home.activate_vacation(endtime, temperature)
else:
for hap in hass.data[HMIPC_DOMAIN].values():
await hap.home.activate_vacation(endtime, temperature)
async def _async_deactivate_eco_mode(hass: HomeAssistant, service: ServiceCall) -> None:
"""Service to deactivate eco mode."""
if hapid := service.data.get(ATTR_ACCESSPOINT_ID):
if home := _get_home(hass, hapid):
await home.deactivate_absence()
else:
for hap in hass.data[HMIPC_DOMAIN].values():
await hap.home.deactivate_absence()
async def _async_deactivate_vacation(hass: HomeAssistant, service: ServiceCall) -> None:
"""Service to deactivate vacation."""
if hapid := service.data.get(ATTR_ACCESSPOINT_ID):
if home := _get_home(hass, hapid):
await home.deactivate_vacation()
else:
for hap in hass.data[HMIPC_DOMAIN].values():
await hap.home.deactivate_vacation()
async def _set_active_climate_profile(
hass: HomeAssistant, service: ServiceCall
) -> None:
"""Service to set the active climate profile."""
entity_id_list = service.data[ATTR_ENTITY_ID]
climate_profile_index = service.data[ATTR_CLIMATE_PROFILE_INDEX] - 1
for hap in hass.data[HMIPC_DOMAIN].values():
if entity_id_list != "all":
for entity_id in entity_id_list:
group = hap.hmip_device_by_entity_id.get(entity_id)
if group and isinstance(group, AsyncHeatingGroup):
await group.set_active_profile(climate_profile_index)
else:
for group in hap.home.groups:
if isinstance(group, AsyncHeatingGroup):
await group.set_active_profile(climate_profile_index)
async def _async_dump_hap_config(hass: HomeAssistant, service: ServiceCall) -> None:
"""Service to dump the configuration of a Homematic IP Access Point."""
config_path: str = (
service.data.get(ATTR_CONFIG_OUTPUT_PATH) or hass.config.config_dir or "."
)
config_file_prefix = service.data[ATTR_CONFIG_OUTPUT_FILE_PREFIX]
anonymize = service.data[ATTR_ANONYMIZE]
for hap in hass.data[HMIPC_DOMAIN].values():
hap_sgtin = hap.config_entry.unique_id
if anonymize:
hap_sgtin = hap_sgtin[-4:]
file_name = f"{config_file_prefix}_{hap_sgtin}.json"
path = Path(config_path)
config_file = path / file_name
json_state = await hap.home.download_configuration()
json_state = handle_config(json_state, anonymize)
config_file.write_text(json_state, encoding="utf8")
async def _async_reset_energy_counter(hass: HomeAssistant, service: ServiceCall):
"""Service to reset the energy counter."""
entity_id_list = service.data[ATTR_ENTITY_ID]
for hap in hass.data[HMIPC_DOMAIN].values():
if entity_id_list != "all":
for entity_id in entity_id_list:
device = hap.hmip_device_by_entity_id.get(entity_id)
if device and isinstance(device, AsyncSwitchMeasuring):
await device.reset_energy_counter()
else:
for device in hap.home.devices:
if isinstance(device, AsyncSwitchMeasuring):
await device.reset_energy_counter()
def _get_home(hass: HomeAssistant, hapid: str) -> AsyncHome | None:
"""Return a HmIP home."""
if hap := hass.data[HMIPC_DOMAIN].get(hapid):
return hap.home
_LOGGER.info("No matching access point found for access point id %s", hapid)
return None
| |
from tilequeue.process import Source
from tilequeue.query.fixture import make_fixture_data_fetcher
from tilequeue.query.pool import DBConnectionPool
from tilequeue.query.postgres import make_db_data_fetcher
from tilequeue.query.rawr import make_rawr_data_fetcher
from tilequeue.query.split import make_split_data_fetcher
from tilequeue.store import make_s3_tile_key_generator
from tilequeue.utils import AwsSessionHelper
__all__ = [
'DBConnectionPool',
'make_db_data_fetcher',
'make_fixture_data_fetcher',
'make_data_fetcher',
]
def make_data_fetcher(cfg, layer_data, query_cfg, io_pool,
s3_role_arn=None,
s3_role_session_duration_s=None):
""" Make data fetcher from RAWR store and PostgreSQL database.
When s3_role_arn and s3_role_session_duration_s are available
the RAWR store will use the s3_role_arn to access the RAWR S3 bucket
"""
db_fetcher = make_db_data_fetcher(
cfg.postgresql_conn_info, cfg.template_path, cfg.reload_templates,
query_cfg, io_pool)
if cfg.yml.get('use-rawr-tiles'):
rawr_fetcher = _make_rawr_fetcher(
cfg, layer_data, s3_role_arn, s3_role_session_duration_s)
group_by_zoom = cfg.yml.get('rawr').get('group-zoom')
assert group_by_zoom is not None, 'Missing group-zoom rawr config'
return make_split_data_fetcher(
group_by_zoom, db_fetcher, rawr_fetcher)
else:
return db_fetcher
class _NullRawrStorage(object):
def __init__(self, data_source, table_sources):
self.data_source = data_source
self.table_sources = table_sources
def __call__(self, tile):
# returns a "tables" object, which responds to __call__(table_name)
# with tuples for that table.
data = {}
for location in self.data_source(tile):
data[location.name] = location.records
def _tables(table_name):
from tilequeue.query.common import Table
source = self.table_sources[table_name]
return Table(source, data.get(table_name, []))
return _tables
def _make_rawr_fetcher(cfg, layer_data,
s3_role_arn=None,
s3_role_session_duration_s=None):
"""
When s3_role_arn and s3_role_session_duration_s are available
the RAWR store will use the s3_role_arn to access the RAWR S3
bucket
"""
rawr_yaml = cfg.yml.get('rawr')
assert rawr_yaml is not None, 'Missing rawr configuration in yaml'
group_by_zoom = rawr_yaml.get('group-zoom')
assert group_by_zoom is not None, 'Missing group-zoom rawr config'
rawr_source_yaml = rawr_yaml.get('source')
assert rawr_source_yaml, 'Missing rawr source config'
table_sources = rawr_source_yaml.get('table-sources')
assert table_sources, 'Missing definitions of source per table'
# map text for table source onto Source objects
for tbl, data in table_sources.items():
source_name = data['name']
source_value = data['value']
table_sources[tbl] = Source(source_name, source_value)
label_placement_layers = rawr_yaml.get('label-placement-layers', {})
for geom_type, layers in label_placement_layers.items():
assert geom_type in ('point', 'polygon', 'linestring'), \
'Geom type %r not understood, expecting point, polygon or ' \
'linestring.' % (geom_type,)
label_placement_layers[geom_type] = set(layers)
indexes_cfg = rawr_yaml.get('indexes')
assert indexes_cfg, 'Missing definitions of table indexes.'
# source types are:
# s3 - to fetch RAWR tiles from S3
# store - to fetch RAWR tiles from any tilequeue tile source
# generate - to generate RAWR tiles directly, rather than trying to load
# them from S3. this can be useful for standalone use and
# testing. provide a postgresql subkey for database connection
# settings.
source_type = rawr_source_yaml.get('type')
if source_type == 's3':
rawr_source_s3_yaml = rawr_source_yaml.get('s3')
bucket = rawr_source_s3_yaml.get('bucket')
assert bucket, 'Missing rawr source s3 bucket'
region = rawr_source_s3_yaml.get('region')
assert region, 'Missing rawr source s3 region'
prefix = rawr_source_s3_yaml.get('prefix')
assert prefix, 'Missing rawr source s3 prefix'
extension = rawr_source_s3_yaml.get('extension')
assert extension, 'Missing rawr source s3 extension'
allow_missing_tiles = rawr_source_s3_yaml.get(
'allow-missing-tiles', False)
import boto3
from tilequeue.rawr import RawrS3Source
if s3_role_arn:
# use provided role to access S3
assert s3_role_session_duration_s, \
's3_role_session_duration_s is either None or 0'
aws_helper = AwsSessionHelper('tilequeue_dataaccess',
s3_role_arn,
region,
s3_role_session_duration_s)
s3_client = aws_helper.get_client('s3')
else:
s3_client = boto3.client('s3', region_name=region)
tile_key_gen = make_s3_tile_key_generator(rawr_source_s3_yaml)
storage = RawrS3Source(
s3_client, bucket, prefix, extension, table_sources, tile_key_gen,
allow_missing_tiles)
elif source_type == 'generate':
from raw_tiles.source.conn import ConnectionContextManager
from raw_tiles.source.osm import OsmSource
postgresql_cfg = rawr_source_yaml.get('postgresql')
assert postgresql_cfg, 'Missing rawr postgresql config'
conn_ctx = ConnectionContextManager(postgresql_cfg)
rawr_osm_source = OsmSource(conn_ctx)
storage = _NullRawrStorage(rawr_osm_source, table_sources)
elif source_type == 'store':
from tilequeue.store import make_store
from tilequeue.rawr import RawrStoreSource
store_cfg = rawr_source_yaml.get('store')
store = make_store(store_cfg)
storage = RawrStoreSource(store, table_sources)
else:
assert False, 'Source type %r not understood. ' \
'Options are s3, generate and store.' % (source_type,)
# TODO: this needs to be configurable, everywhere! this is a long term
# refactor - it's hard-coded in a bunch of places :-(
max_z = 16
layers = _make_layer_info(layer_data, cfg.process_yaml_cfg)
return make_rawr_data_fetcher(
group_by_zoom, max_z, storage, layers, indexes_cfg,
label_placement_layers)
def _make_layer_info(layer_data, process_yaml_cfg):
from tilequeue.query.common import LayerInfo, ShapeType
layers = {}
functions = _parse_yaml_functions(process_yaml_cfg)
for layer_datum in layer_data:
name = layer_datum['name']
min_zoom_fn, props_fn = functions[name]
shape_types = ShapeType.parse_set(layer_datum['geometry_types'])
layer_info = LayerInfo(min_zoom_fn, props_fn, shape_types)
layers[name] = layer_info
return layers
def _parse_yaml_functions(process_yaml_cfg):
from tilequeue.command import make_output_calc_mapping
from tilequeue.command import make_min_zoom_calc_mapping
output_layer_data = make_output_calc_mapping(process_yaml_cfg)
min_zoom_layer_data = make_min_zoom_calc_mapping(process_yaml_cfg)
keys = set(output_layer_data.keys())
assert keys == set(min_zoom_layer_data.keys())
functions = {}
for key in keys:
min_zoom_fn = min_zoom_layer_data[key]
output_fn = output_layer_data[key]
functions[key] = (min_zoom_fn, output_fn)
return functions
| |
import socket
import errno
import sys
import os
import json
import time
from joueur.serializer import serialize, deserialize
import joueur.error_code as error_code
from joueur.game_manager import GameManager
import joueur.ansi_color_coder as color
EOT_CHAR = chr(4)
# Client: A singleton module that talks to the server receiving game
# information and sending commands to execute. Clients perform no game logic
class _Client:
socket = None
_client = _Client()
def connect(hostname='localhost', port=3000, print_io=False):
_client.hostname = hostname
_client.port = int(port)
_client._print_io = print_io
_client._received_buffer = ""
_client._events_stack = []
_client._buffer_size = 1024
_client._timeout_time = 1.0
print(color.text('cyan') + 'Connecting to:', _client.hostname + ':' + str(
_client.port) + color.reset())
try:
_client.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Silly Windows
_client.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# so the blocking on recv doesn't hang forever and other system
# interrupts (e.g. keyboard) can be handled
_client.socket.settimeout(_client._timeout_time)
_client.socket.connect((_client.hostname, _client.port))
except socket.error as e:
error_code.handle_error(
error_code.COULD_NOT_CONNECT,
e,
'Could not connect to {}:{}'.format(
_client.hostname,
_client.port
)
)
def setup(game, ai, manager):
_client.game = game
_client.ai = ai
_client.manager = manager
def _send_raw(string):
if _client._print_io:
print(color.text('magenta') + 'TO SERVER --> ' + str(
string) + color.reset())
_client.socket.send(string)
# sends the server an event via socket
def send(event, data):
_send_raw(
(json.dumps({
'sentTime': int(time.time()),
'event': event,
'data': serialize(data)
}) + EOT_CHAR).encode('utf-8')
)
def disconnect(exit_code=None):
if _client.socket:
_client.socket.close()
def run_on_server(caller, function_name, args=None):
send('run', {
'caller': caller,
'functionName': function_name,
'args': args
})
ran_data = wait_for_event('ran')
return deserialize(ran_data, _client.game)
def play():
wait_for_event(None)
def wait_for_event(event):
while True:
wait_for_events()
while len(_client._events_stack) > 0:
sent = _client._events_stack.pop()
data = sent['data'] if 'data' in sent else None
if event is not None and sent['event'] == event:
return data
else:
_auto_handle(sent['event'], data)
# loops to check the socket for incoming data and ends once some events
# get found
def wait_for_events():
if len(_client._events_stack) > 0:
return # as we already have events to handle, no need to wait for more
try:
while True:
sent = None
try:
sent = _client.socket.recv(_client._buffer_size) \
.decode('utf-8')
except socket.timeout:
pass # timed out so keyboard/system interrupts can be handled,
# hence the while true loop above
except socket.error as e:
error_code.handle_error(
error_code.CANNOT_READ_SOCKET, e,
'Error reading socket while waiting for events')
if not sent:
continue
elif _client._print_io:
print(color.text('magenta') + 'FROM SERVER <-- ' + str(
sent) + color.reset())
split = (_client._received_buffer + sent).split(EOT_CHAR)
# the last item will either be "" if the last char was an EOT_CHAR,
# or a partial data we need to buffer anyways
_client._received_buffer = split.pop()
for json_str in reversed(split):
try:
parsed = json.loads(json_str)
except ValueError as e:
error_code.handle_error(error_code.MALFORMED_JSON, e,
'Could not parse json ""'.format(
json_str)
)
_client._events_stack.append(parsed)
if len(_client._events_stack) > 0:
return
except (KeyboardInterrupt, SystemExit):
disconnect()
# called via the client run loop when data is sent
def _auto_handle(event, data=None):
# the current module, e.g. the Client module that acts as a singleton
g = globals()
auto_handle_function = g['_auto_handle_' + event]
if auto_handle_function:
return auto_handle_function(data)
else:
error_code.handle_error(error_code.UNKNOWN_EVENT_FROM_SERVER, message=(
'Could not auto handle event "{}".'.format(event)))
def _auto_handle_delta(data):
try:
_client.manager.apply_delta_state(data)
except:
error_code.handle_error(error_code.DELTA_MERGE_FAILURE, sys.exc_info(),
'Error merging delta')
if _client.ai.player: # then the AI is ready for updates
_client.ai.game_updated()
def _auto_handle_order(data):
args = deserialize(data['args'], _client.game)
try:
returned = _client.ai._do_order(data['name'], args)
except:
print('esc info', type(sys.exc_info()))
error_code.handle_error(error_code.AI_ERRORED, sys.exc_info(),
'AI errored executing order "{}"'.format(
data.name))
send("finished", {
'orderIndex': data['index'],
'returned': returned
})
def _auto_handle_invalid(data):
try:
_client.ai.invalid(data['message'])
except:
error_code.handle_error(error_code.AI_ERRORED, sys.exc_info(),
'AI errored while handling invalid data.')
def _auto_handle_fatal(data):
error_code.handle_error(
error_code.FATAL_EVENT,
message='Got a fatal event from the server: ' + data['message']
)
def _auto_handle_over(data):
won = _client.ai.player.won
reason = _client.ai.player.reason_won \
if _client.ai.player.won \
else _client.ai.player.reason_lost
print('{}Game is Over. {} because {}{}'.format(
color.text('green'),
'I Won!' if won else 'I Lost :(',
reason,
color.reset()
))
try:
_client.ai.end(won, reason)
except:
error_code.handle_error(error_code.AI_ERRORED, sys.exc_info(),
'AI errored during end.')
if 'message' in data:
message = data['message'].replace('__HOSTNAME__', _client.hostname)
print(color.text('cyan') + message + color.reset())
disconnect()
os._exit(0)
| |
from decimal import Decimal
from six import text_type
from six.moves.urllib.parse import urlencode
from datetime import datetime
from ssl import SSLError
from suds import WebFault
from suds.client import Client
from authorize.data import Address, CreditCard
from authorize.apis.transaction import parse_response
from authorize.exceptions import AuthorizeConnectionError, \
AuthorizeError, AuthorizeResponseError, AuthorizeInvalidError
PROD_URL = 'https://api.authorize.net/soap/v1/Service.asmx?WSDL'
TEST_URL = 'https://apitest.authorize.net/soap/v1/Service.asmx?WSDL'
class CustomerAPI(object):
def __init__(self, login_id, transaction_key, debug=True, test=False):
self.url = TEST_URL if debug else PROD_URL
self.login_id = login_id
self.transaction_key = transaction_key
self.transaction_options = urlencode({
'x_version': '3.1',
'x_test_request': 'Y' if test else 'F',
'x_delim_data': 'TRUE',
'x_delim_char': ';',
})
@property
def client(self):
# Lazy instantiation of SOAP client, which hits the WSDL url
if not hasattr(self, '_client'):
self._client = Client(self.url)
return self._client
@property
def client_auth(self):
if not hasattr(self, '_client_auth'):
self._client_auth = self.client.factory.create(
'MerchantAuthenticationType')
self._client_auth.name = self.login_id
self._client_auth.transactionKey = self.transaction_key
return self._client_auth
def _make_call(self, service, *args):
# Provides standard API call error handling
method = getattr(self.client.service, service)
try:
response = method(self.client_auth, *args)
except (WebFault, SSLError) as e:
raise AuthorizeConnectionError('Error contacting SOAP API.')
if response.resultCode != 'Ok':
error = response.messages[0][0]
e = AuthorizeResponseError('%s: %s' % (error.code, error.text))
e.full_response = {
'response_code': error.code,
'response_text': error.text,
}
raise e
return response
def create_saved_profile(self, internal_id, payments=None, email=None):
"""
Creates a user profile to which you can attach saved payments.
Requires an internal_id to uniquely identify this user. If a list of
saved payments is provided, as generated by create_saved_payment,
these will be automatically added to the user profile. Returns the
user profile id.
"""
profile = self.client.factory.create('CustomerProfileType')
profile.merchantCustomerId = internal_id
profile.email = email
if payments:
payment_array = self.client.factory.create(
'ArrayOfCustomerPaymentProfileType')
payment_array.CustomerPaymentProfileType = payments
profile.paymentProfiles = payment_array
response = self._make_call('CreateCustomerProfile', profile, 'none')
profile_id = response.customerProfileId
payment_ids = None
if payments:
payment_ids = response.customerPaymentProfileIdList[0]
return profile_id, payment_ids
@staticmethod
def _address_to_profile(address, payment_profile):
if address and address.street:
payment_profile.billTo.address = address.street
if address and address.city:
payment_profile.billTo.city = address.city
if address and address.state:
payment_profile.billTo.state = address.state
if address and address.zip_code:
payment_profile.billTo.zip = address.zip_code
if address and address.country:
payment_profile.billTo.country = address.country
return payment_profile
def create_saved_payment(self, credit_card, address=None, profile_id=None):
"""
Creates a payment profile. If profile_id is provided, this payment
profile will be created in Authorize.net attached to that profile.
If it is not provided, the payment profile will be returned and can
be provided in a list to the create_profile call.
"""
# Create the basic payment profile with credit card details
payment_profile = self.client.factory.create(
'CustomerPaymentProfileType')
customer_type_enum = self.client.factory.create('CustomerTypeEnum')
payment_profile.customerType = customer_type_enum.individual
payment_type = self.client.factory.create('PaymentType')
credit_card_type = self.client.factory.create('CreditCardType')
credit_card_type.cardNumber = credit_card.card_number
credit_card_type.expirationDate = '{0.exp_year}-{0.exp_month:0>2}' \
.format(credit_card)
credit_card_type.cardCode = credit_card.cvv
payment_type.creditCard = credit_card_type
payment_profile.payment = payment_type
# Customer billing name and address are optional fields
if credit_card.first_name:
payment_profile.billTo.firstName = credit_card.first_name
if credit_card.last_name:
payment_profile.billTo.lastName = credit_card.last_name
payment_profile = self._address_to_profile(address, payment_profile)
# If a profile id is provided, create saved payment on that profile
# Otherwise, return an object for a later call to create_saved_profile
if profile_id:
response = self._make_call('CreateCustomerPaymentProfile',
profile_id, payment_profile, 'none')
return response.customerPaymentProfileId
else:
return payment_profile
def retrieve_saved_payment(self, profile_id, payment_id):
payment_id = int(payment_id)
profile = self._make_call(
'GetCustomerProfile', profile_id).profile
payment_info = {}
email = None
if hasattr(profile, 'email'):
email = text_type(profile.email)
payment_info['email'] = email
saved_payment = None
for payment in profile.paymentProfiles[0]:
if payment.customerPaymentProfileId == payment_id:
saved_payment = payment
break
if not saved_payment:
raise AuthorizeError("Payment ID does not exist for this profile.")
payment_info['number'] = text_type(
saved_payment.payment.creditCard.cardNumber)
data = saved_payment.billTo
payment_info['first_name'] = text_type(getattr(data, 'firstName', ''))
payment_info['last_name'] = text_type(getattr(data, 'lastName', ''))
kwargs = {
'street': getattr(data, 'address', None),
'city': getattr(data, 'city', None),
'state': getattr(data, 'state', None),
'zip_code': getattr(data, 'zip', None),
'country': getattr(data, 'country', None)}
kwargs = dict(
[(key, text_type(value)) for key, value in kwargs.items() if value])
payment_info['address'] = Address(**kwargs)
return payment_info
def update_saved_payment(self, profile_id, payment_id, **kwargs):
payment_profile = self.client.factory.create(
'CustomerPaymentProfileExType')
customer_type_enum = self.client.factory.create('CustomerTypeEnum')
payment_profile.customerType = customer_type_enum.individual
payment_simple_type = self.client.factory.create('PaymentType')
card_simple_type = self.client.factory.create('CreditCardSimpleType')
number = kwargs['number']
# Authorize.net uses this constant to indicate that we want to keep
# the existing expiration date.
date = 'XXXX'
card_simple_type.cardNumber = number
if kwargs['exp_month'] and kwargs['exp_year']:
exp = CreditCard.exp_time(kwargs['exp_month'], kwargs['exp_year'])
if exp <= datetime.now():
raise AuthorizeInvalidError('This credit card has expired.')
card_simple_type.expirationDate =\
'{0}-{1:0>2}'.format(kwargs['exp_year'], kwargs['exp_month'])
else:
card_simple_type.expirationDate = date
payment_simple_type.creditCard = card_simple_type
payment_profile.payment = payment_simple_type
payment_profile.payment.creditCard = card_simple_type
payment_profile.customerPaymentProfileId = payment_id
if kwargs['first_name']:
payment_profile.billTo.firstName = kwargs['first_name']
if kwargs['last_name']:
payment_profile.billTo.lastName = kwargs['last_name']
payment_profile = self._address_to_profile(
kwargs['address'], payment_profile)
self._make_call(
'UpdateCustomerPaymentProfile', profile_id,
payment_profile, 'none')
if not kwargs['email']:
return
profile = self.client.factory.create('CustomerProfileExType')
profile.email = kwargs['email']
profile.customerProfileId = profile_id
self._make_call('UpdateCustomerProfile', profile)
def delete_saved_profile(self, profile_id):
self._make_call('DeleteCustomerProfile', profile_id)
def delete_saved_payment(self, profile_id, payment_id):
self._make_call('DeleteCustomerPaymentProfile',
profile_id, payment_id)
def auth(self, profile_id, payment_id, amount, cvv=None):
if cvv is not None:
try:
int(cvv)
except ValueError:
raise AuthorizeInvalidError("CVV Must be a number.")
transaction = self.client.factory.create('ProfileTransactionType')
auth = self.client.factory.create('ProfileTransAuthOnlyType')
amount = Decimal(str(amount)).quantize(Decimal('0.01'))
auth.amount = str(amount)
auth.customerProfileId = profile_id
auth.customerPaymentProfileId = payment_id
auth.cardCode = cvv
transaction.profileTransAuthOnly = auth
response = self._make_call('CreateCustomerProfileTransaction',
transaction, self.transaction_options)
return parse_response(response.directResponse)
def capture(self, profile_id, payment_id, amount, cvv=None):
if cvv is not None:
try:
int(cvv)
except ValueError:
raise AuthorizeInvalidError("CVV Must be a number.")
transaction = self.client.factory.create('ProfileTransactionType')
capture = self.client.factory.create('ProfileTransAuthCaptureType')
amount = Decimal(str(amount)).quantize(Decimal('0.01'))
capture.amount = str(amount)
capture.customerProfileId = profile_id
capture.customerPaymentProfileId = payment_id
capture.cardCode = cvv
transaction.profileTransAuthCapture = capture
response = self._make_call('CreateCustomerProfileTransaction',
transaction, self.transaction_options)
return parse_response(response.directResponse)
def credit(self, profile_id, payment_id, amount):
# Creates an "unlinked credit" (as opposed to refunding a previous transaction)
transaction = self.client.factory.create('ProfileTransactionType')
credit = self.client.factory.create('ProfileTransRefundType')
amount = Decimal(str(amount)).quantize(Decimal('0.01'))
credit.amount = str(amount)
credit.customerProfileId = profile_id
credit.customerPaymentProfileId = payment_id
transaction.profileTransRefund = credit
response = self._make_call('CreateCustomerProfileTransaction',
transaction, self.transaction_options)
return parse_response(response.directResponse)
| |
# -*- coding: utf-8 -*-
from sir.helpers.SQLite3Helper import SQLite3Helper
from sir.helpers.FSHelper import FSHelper
import sir.variables.api
import sir.variables.views
import sir.analytics.api
import sir.analytics.views
import sir.api.views
import sir.pmacct_data.api
from flask import Flask, request, g, jsonify, render_template
import time
import pkg_resources
app = Flask(__name__)
app.config.from_envvar('SIR_SETTINGS', silent=True)
###################
###################
# BASIC #########
###################
###################
@app.before_request
def before_request():
g.db = SQLite3Helper(app.config['DATABASE'])
g.db.connect()
g.request_start_time = time.time()
g.request_time = lambda: float("%.5f" %
(time.time() - g.request_start_time))
g.fs = FSHelper(app.config['BGP_FOLDER'])
@app.teardown_request
def teardown_request(exception):
db = getattr(g, 'db', None)
if db is not None:
db.close()
@app.route('/', strict_slashes=False)
def start_page():
return render_template('basic/start_page.html')
###################
###################
# ANALYTICS #####
###################
###################
@app.route('/analytics', strict_slashes=False)
def analytics_view_help():
return sir.analytics.views.start_page(request)
@app.route('/analytics/offloaded_traffic', methods=['GET', 'POST'])
def analytics_view_offloaded_traffic():
return sir.analytics.views.offloaded_traffic(request)
@app.route('/analytics/aggregate_per_as', methods=['GET', 'POST'])
def analytics_view_aggregate_per_as():
return sir.analytics.views.aggregate(request, 'as')
@app.route('/analytics/aggregate_per_prefix', methods=['GET', 'POST'])
def analytics_view_aggregate_per_prefix():
return sir.analytics.views.aggregate(request, 'prefix')
@app.route('/analytics/simulate', methods=['GET', 'POST'])
def analytics_view_simulate():
return sir.analytics.views.simulate(request)
@app.route('/api/v1.0/analytics/top_prefixes', methods=['GET'])
def analytics_api_top_prefixes():
return jsonify(sir.analytics.api.top_prefixes(request))
@app.route('/api/v1.0/analytics/top_asns', methods=['GET'])
def analytics_api_top_asns():
return jsonify(sir.analytics.api.top_asns(request))
@app.route('/api/v1.0/analytics/find_prefix/<prefix>/<pl>', methods=['GET'])
def analytics_api_find_prefix(prefix, pl):
return jsonify(sir.analytics.api.find_prefix(request, u'{}/{}'.format(prefix, pl)))
@app.route('/analytics/find_prefix', methods=['GET', 'POST'])
def analytics_view_find_prefix():
return sir.analytics.views.find_prefix(request)
@app.route('/api/v1.0/analytics/find_prefixes_asn/<asn>', methods=['GET'])
def analytics_api_find_prefixes_asn(asn):
return jsonify(sir.analytics.api.find_prefixes_asn(request, asn))
@app.route('/analytics/find_prefixes_asn', methods=['GET', 'POST'])
def analytics_view_find_prefix_asn():
return sir.analytics.views.find_prefix_asn(request)
###################
###################
# API ###########
###################
###################
@app.route('/api/documentation', strict_slashes=False)
def api_help():
return sir.api.views.start_page(request)
###################
###################
# VARIABLES #####
###################
###################
@app.route('/variables/browse', methods=['GET'])
def browse_view_variables():
return sir.variables.views.browse_variables(request)
'''
@app.route('/variables/edit/<category>/<name>', methods=['GET', 'POST', 'DELETE'])
def edit_variable(category, name):
return variables.views.edit_variable(request, category, name)
'''
@app.route('/api/v1.0/variables', methods=['GET', 'POST'])
def variables_api_variables():
return jsonify(sir.variables.api.variables(request))
@app.route('/api/v1.0/variables/categories', methods=['GET'])
def variables_api_category():
return jsonify(sir.variables.api.variables_category(request))
@app.route('/api/v1.0/variables/categories/<category>', methods=['GET'])
def variables_api_filter_by_category(category):
return jsonify(sir.variables.api.variables_filter_by_category(request, category))
@app.route('/api/v1.0/variables/categories/<category>/<name>', methods=['GET', 'PUT', 'DELETE'])
def variables_api_name(category, name):
return jsonify(sir.variables.api.api_variables_name(request, category, name))
###################
###################
# PMACCT_DATA ###
###################
###################
@app.route('/api/v1.0/pmacct/dates', methods=['GET'])
def pmacct_data_api_get_dates():
return jsonify(sir.pmacct_data.api.get_dates(request))
@app.route('/api/v1.0/pmacct/flows', methods=['GET'])
def pmacct_data_api_get_flows():
return jsonify(sir.pmacct_data.api.get_flows(request))
@app.route('/api/v1.0/pmacct/raw_bgp', methods=['GET'])
def pmacct_data_api_get_raw_bgp():
return jsonify(sir.pmacct_data.api.get_raw_bgp(request))
@app.route('/api/v1.0/pmacct/bgp_prefixes', methods=['GET'])
def pmacct_data_api_get_bgp_prefixes():
return jsonify(sir.pmacct_data.api.get_bgp_prefixes(request))
@app.route('/api/v1.0/pmacct/purge_bgp', methods=['GET'])
def pmacct_data_api_purge_bgp():
return jsonify(sir.pmacct_data.api.purge_bgp(request))
@app.route('/api/v1.0/pmacct/purge_flows', methods=['GET'])
def pmacct_data_api_purge_flows():
return jsonify(sir.pmacct_data.api.purge_flows(request))
###################
###################
# SIR #####
###################
###################
@app.route('/api/v1.0/sir/version', methods=['GET'])
def sir_api_version():
sir_version = pkg_resources.require("sir")[0].version
config_copy = app.config.copy()
config_copy.pop('PERMANENT_SESSION_LIFETIME')
return jsonify(sir.helpers.api.build_api_response({'version': sir_version,
'configuration': config_copy}, error=False))
if __name__ == '__main__':
app.run(app.config['BIND_IP'], app.config['PORT'])
| |
import sys
import argparse
import numpy as np
def parseArgument():
# Parse the input
parser =\
argparse.ArgumentParser(description = "Make a differential interaction matrix for regions in a bed file based on 2 long-distance interaction matrix files")
parser.add_argument("--bedFileName", required=True,
help='Bed file with bins for interactions, sorted by chromosome, start, end')
parser.add_argument("--interactionOneFileName", required=True,
help='First long-distance interactions file, sorted by 1st loop chromosome, 1st loop start, 1st loop end, 2nd loop start, 2nd loop end')
parser.add_argument("--interactionTwoFileName", required=True,
help='Second long-distance interactions file, sorted by 1st loop chromosome, 1st loop start, 1st loop end, 2nd loop start, 2nd loop end')
parser.add_argument("--outputFileNamePrefix", required=True, help='Prefix of name of file where interaction matrix will be recorded, should not end with _')
options = parser.parse_args();
return options
def getCoordinates(bedLine):
# Get the coordinates of a line of a bed file
bedLineElements = bedLine.strip().split("\t")
return (bedLineElements[0], int(bedLineElements[1]), int(bedLineElements[2]))
def getAllCoordinates(options):
# Get the coordinates of all lines of a bed file
bedFile = open(options.bedFileName)
bedCoordinates = [getCoordinates(line) for line in bedFile.readlines()]
bedFile.close()
return bedCoordinates
def initializeInteractionMats(numMats, bedCoordinates):
# Initialize interaction matrices
interactionMats = []
for i in range(numMats):
# Initialize a new interaction matrix and add it to the list
interactionMats.append(np.zeros((len([bc[1] for bc in enumerate(bedCoordinates) if bc[1][0] == bedCoordinates[0][0]]), len([bc[1] for bc in enumerate(bedCoordinates) if bc[1][0] == bedCoordinates[0][0]]))))
return interactionMats
def getInteractionInfo(interactionLine):
# Get the information for an interaction
if interactionLine == "":
# At the end of the file, so return no information
return [("", 0, 0), ("", 0, 0), -1.0]
lineElements = interactionLine.strip().split("\t")
firstLoopEnd = (lineElements[0], int(lineElements[1]), int(lineElements[2]))
secondLoopEnd = (lineElements[3], int(lineElements[4]), int(lineElements[5]))
interactionStrength = float(lineElements[6])
return [firstLoopEnd, secondLoopEnd, interactionStrength]
def initializeIncrementedValues():
# Initialize values that will be incremented
lastCoordinateIndex = 0
lastChrom = ""
totalInteractions = 0
return [lastCoordinateIndex, lastChrom, totalInteractions]
def interactionTwoFirst(firstLoopEndOne, secondLoopEndOne, firstLoopEndTwo, secondLoopEndTwo):
# Determine whether the 2nd interaction is earlier in the genome
if firstLoopEndTwo[0] == "":
# At the end of the 2nd interaction file, so the 1st interaction is earlier
return False
if firstLoopEndOne[0] < firstLoopEndTwo[0]:
# The 1st interaction is on an earlier chromosome, so it is earlier
return False
if firstLoopEndOne[0] > firstLoopEndTwo[0]:
# The 2nd interaction is on an earlier chromosome, so it is earlier
return True
if firstLoopEndOne[1] < firstLoopEndTwo[1]:
# The 1st interaction starts earlier on the chromosome, so it is earlier
return False
if firstLoopEndOne[1] > firstLoopEndTwo[1]:
# The 2nd interaction starts earlier on the chromosome, so it is earlier
return True
if secondLoopEndOne[1] < secondLoopEndTwo[1]:
# The 1st interaction ends earlier on the chromosome, so it is earlier
return False
if secondLoopEndOne[1] > secondLoopEndTwo[1]:
# The 2nd interaction ends earlier on the chromosome, so it is earlier
return True
if firstLoopEndOne[2] < firstLoopEndTwo[2]:
# The 1st interaction starts earlier on the chromosome, so it is earlier
return False
if firstLoopEndOne[2] > firstLoopEndTwo[2]:
# The 2nd interaction starts earlier on the chromosome, so it is earlier
return True
if secondLoopEndOne[2] < secondLoopEndTwo[2]:
# The 1st interaction ends earlier on the chromosome, so it is earlier
return False
if secondLoopEndOne[2] > secondLoopEndTwo[2]:
# The 2nd interaction ends earlier on the chromosome, so it is earlier
return True
return False
def saveDifferentialInteractionFile(options, lastChrom, interactionMatOne, interactionMatTwo):
# Save the file with the differential interactions
outputFileName = options.outputFileNamePrefix + "_" + lastChrom + ".txt"
np.savetxt(outputFileName, np.log2(interactionMatOne + 1) - np.log2(interactionMatTwo + 1), fmt='%.4f')
def recordOldDiffInitializeNewChrom(options, lastChrom, interactionMatOne, interactionMatTwo, newChrom, bedCoordinates, totalInteractions):
# Record the data from the previous chromosome and initialize the new chromosome
if lastChrom != "":
# Record the previous chromosome
saveDifferentialInteractionFile(options, lastChrom, interactionMatOne, interactionMatTwo)
totalInteractions = totalInteractions + interactionMatOne.shape[0]
interactionMatOne = np.zeros((len([bc[1] for bc in enumerate(bedCoordinates) if bc[1][0] == newChrom]), len([bc[1] for bc in enumerate(bedCoordinates) if bc[1][0] == newChrom])))
interactionMatTwo = np.zeros((len([bc[1] for bc in enumerate(bedCoordinates) if bc[1][0] == newChrom]), len([bc[1] for bc in enumerate(bedCoordinates) if bc[1][0] == newChrom])))
lastChrom = newChrom
print(lastChrom)
return interactionMatOne, interactionMatTwo, totalInteractions, lastChrom
def peakOverlapsInteraction(loopEnd, bedCoordinate):
if (((loopEnd[1] >= bedCoordinate[1]) and (loopEnd[1] < bedCoordinate[2])) or ((loopEnd[2] > bedCoordinate[1]) and (loopEnd[2] <= bedCoordinate[2])))\
or (loopEnd[1] <= bedCoordinate[1]) and (loopEnd[2] >= bedCoordinate[2]):
# The peak overlaps the interaction
return True
return False
def incrementInteractionMat(interactionMat, currentCoordinateIndex, secondCoordinateIndex, interactionStrength, totalInteractions):
# Increment the values in the interaction matrix
interactionMat[currentCoordinateIndex - totalInteractions, secondCoordinateIndex - totalInteractions] = interactionMat[currentCoordinateIndex - totalInteractions, secondCoordinateIndex - totalInteractions] + interactionStrength
interactionMat[secondCoordinateIndex - totalInteractions, currentCoordinateIndex - totalInteractions] = interactionMat[secondCoordinateIndex - totalInteractions, currentCoordinateIndex - totalInteractions] + interactionStrength
return interactionMat
def incrementApproriateInteractionMat(interactionMatOne, interactionMatTwo, interactionOne, currentCoordinateIndex, secondCoordinateIndex, interactionStrength, totalInteractions):
# Increment the appropriate interaction matrix
if interactionOne:
# Increment the 1st interaction matrix
interactionMatOne = incrementInteractionMat(interactionMatOne, currentCoordinateIndex, secondCoordinateIndex, interactionStrength, totalInteractions)
else:
# Increment the 2nd interaction matrix
interactionMatTwo = incrementInteractionMat(interactionMatTwo, currentCoordinateIndex, secondCoordinateIndex, interactionStrength, totalInteractions)
secondCoordinateIndex = secondCoordinateIndex + 1
return [interactionMatOne, interactionMatTwo, secondCoordinateIndex]
def makeDifferentialInteractionMatForBed(options):
# Make an interaction matrix for regions in a bed file based on a long-distance interaction matrix file
# ASSUMES THAT ALL CHROMOSOMES IN THE INTERACTION AND PEAK FILES ARE THE SAME
bedCoordinates = getAllCoordinates(options)
[interactionMatOne, interactionMatTwo] = initializeInteractionMats(2, bedCoordinates)
[interactionOneFile, interactionTwoFile] = [open(interactionFileName) for interactionFileName in [options.interactionOneFileName, options.interactionTwoFileName]]
[firstLoopEndOne, secondLoopEndOne, interactionStrengthOne] = getInteractionInfo(interactionOneFile.readline())
[firstLoopEndTwo, secondLoopEndTwo, interactionStrengthTwo] = getInteractionInfo(interactionTwoFile.readline())
[lastCoordinateIndex, lastChrom, totalInteractions] = initializeIncrementedValues()
while (interactionStrengthOne >= 0) and (interactionStrengthTwo >= 0):
# Iterate through the interactions and get the interaction strength of each
interactionOne = True
[firstLoopEnd, secondLoopEnd, interactionStrength] = [firstLoopEndOne, secondLoopEndOne, interactionStrengthOne]
if (interactionStrengthOne < 0) or interactionTwoFirst(firstLoopEndOne, secondLoopEndOne, firstLoopEndTwo, secondLoopEndTwo):
# The current interaction is interaction two
[firstLoopEnd, secondLoopEnd, interactionStrength] = [firstLoopEndTwo, secondLoopEndTwo, interactionStrengthTwo]
interactionOne = False
currentCoordinateIndex = lastCoordinateIndex
if firstLoopEnd[0] != lastChrom:
# At a new choromosome
interactionMatOne, interactionMatTwo, totalInteractions, lastChrom = recordOldDiffInitializeNewChrom(options, lastChrom, interactionMatOne, interactionMatTwo, firstLoopEnd[0], bedCoordinates, totalInteractions)
while firstLoopEnd[0] > bedCoordinates[currentCoordinateIndex][0]:
# Go to the next bin in the bed file until the interaction's chromosome has been reached
currentCoordinateIndex = currentCoordinateIndex + 1
if currentCoordinateIndex == interactionMatOne.shape[0] + totalInteractions:
# At the end of the bed file, so stop
currentCoordinateIndex = currentCoordinateIndex - 1
break
while firstLoopEnd[1] > bedCoordinates[currentCoordinateIndex][2]:
# Go to the next bin in the bed file until the interaction's location has been reached
currentCoordinateIndex = currentCoordinateIndex + 1
if currentCoordinateIndex == interactionMatOne.shape[0] + totalInteractions:
# At the end of the bed file, so stop
currentCoordinateIndex = currentCoordinateIndex - 1
break
lastCoordinateIndex = currentCoordinateIndex
while peakOverlapsInteraction(firstLoopEnd, bedCoordinates[currentCoordinateIndex]):
# Iterate through the interactions that overlap with the first loop end
# ASSUMES THAT INTERACTIONS ARE ON THE SAME CHROMOSOME
# ASSUMES THAT LOOP ENDS ARE AT DIFFERENT PEAKS
secondCoordinateIndex = currentCoordinateIndex + 1
if secondCoordinateIndex == interactionMatOne.shape[0] + totalInteractions:
# At the end of the bed file, so stop
secondCoordinateIndex = secondCoordinateIndex - 1
break
while secondLoopEnd[1] > bedCoordinates[secondCoordinateIndex][2]:
# Go to the next bin in the bed file until the interaction's location has been reached
secondCoordinateIndex = secondCoordinateIndex + 1
if secondCoordinateIndex == interactionMatOne.shape[0] + totalInteractions:
# At the end of the bed file, so stop
secondCoordinateIndex = secondCoordinateIndex - 1
break
while peakOverlapsInteraction(secondLoopEnd, bedCoordinates[secondCoordinateIndex]):
# Iterate through the interactions that overlap with the second loop
[interactionMatOne, interactionMatTwo, secondCoordinateIndex] = incrementApproriateInteractionMat(interactionMatOne, interactionMatTwo, interactionOne, currentCoordinateIndex, secondCoordinateIndex, interactionStrength, totalInteractions)
if secondCoordinateIndex == interactionMatOne.shape[0] + totalInteractions:
# At the end of the bed file, so stop
secondCoordinateIndex = secondCoordinateIndex - 1
break
currentCoordinateIndex = currentCoordinateIndex + 1
if currentCoordinateIndex == interactionMatOne.shape[0] + totalInteractions:
# At the end of the bed file, so stop
currentCoordinateIndex = currentCoordinateIndex - 1
break
if interactionOne:
# Go to the next line of the 1st interaction file
[firstLoopEndOne, secondLoopEndOne, interactionStrengthOne] = getInteractionInfo(interactionOneFile.readline())
else:
# Go to the next line of the 2nd interaction file
[firstLoopEndTwo, secondLoopEndTwo, interactionStrengthTwo] = getInteractionInfo(interactionTwoFile.readline())
[interactionFile.close() for interactionFile in [interactionOneFile, interactionTwoFile]]
saveDifferentialInteractionFile(options, lastChrom, interactionMatOne, interactionMatTwo)
if __name__=="__main__":
options = parseArgument()
makeDifferentialInteractionMatForBed(options)
| |
#!/usr/bin/env python3
# Copyright 2021 The CFU-Playground Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Disable pylint's E1101, which breaks completely on migen
# pylint:disable=E1101
from hps_proto2_platform import Platform
from litex.soc.cores.clock import S7PLL
from litex.soc.integration.common import get_mem_data
from litex.soc.integration.soc import SoCRegion
from litex.soc.integration.builder import Builder, builder_args, builder_argdict
from litex.soc.integration.soc import LiteXSoC, SoCRegion
from litex.soc.cores.led import LedChaser
from litex import get_data_mod
from litex.build.lattice.radiant import radiant_build_args, radiant_build_argdict
from litex.build.lattice.oxide import oxide_args, oxide_argdict
from litespi.modules import GD25LQ128D
from litespi.opcodes import SpiNorFlashOpCodes as Codes
from litespi.phy.generic import LiteSPIPHY
from litespi import LiteSPI
from migen import Module, Instance, Signal, Record
from patch import Patch
# from cam_control import CameraControl
from patch_cpu_variant import patch_cpu_variant, copy_cpu_variant_if_needed
import argparse
import os
KB = 1024
MB = 1024 * KB
UART_SPEED = 115200
RAM_SIZE = 320 * KB
SOC_DIR = os.path.dirname(os.path.realpath(__file__))
class HpsSoC(LiteXSoC):
# Memory layout
csr_origin = 0xf0000000
spiflash_region = SoCRegion(0x20000000, 16*MB, cached=True)
# The start of the SPI Flash contains the FPGA gateware. Our ROM is after
# that.
rom_offset = 2*MB
sram_origin = 0x40000000
arena_origin = 0x60000000
vexriscv_region = SoCRegion(origin=0xf00f0000, size=0x100)
mem_map = {
"sram": sram_origin,
"arena": arena_origin,
"csr": csr_origin,
}
cpu_type = "vexriscv"
def __init__(self, platform, debug, variant=None,
cpu_cfu=None, execute_from_lram=False,
separate_arena=False,
with_led_chaser=False,
integrated_rom_init=[],
build_bios=False):
LiteXSoC.__init__(self,
platform=platform,
sys_clk_freq=platform.sys_clk_freq,
csr_data_width=32)
if variant == None:
variant = "full+debug" if debug else "full"
# Clock, Controller, CPU
self.submodules.crg = platform.create_crg()
self.add_controller("ctrl")
if execute_from_lram:
reset_address = 0x00000000
else:
reset_address = self.spiflash_region.origin + self.rom_offset
self.add_cpu(self.cpu_type,
variant=variant,
reset_address=reset_address,
cfu=cpu_cfu)
# RAM
if separate_arena:
ram_size = 64*KB
arena_size = RAM_SIZE - ram_size
elif execute_from_lram:
# Leave one LRAM free for ROM
ram_size = RAM_SIZE - 64*KB
arena_size = 0
else:
ram_size = RAM_SIZE
arena_size = 0
self.setup_ram(size=ram_size)
self.setup_arena(size=arena_size)
# SPI Flash
self.setup_litespi_flash()
# ROM (either part of SPI Flash, or embedded)
if execute_from_lram:
self.setup_rom_in_lram()
if integrated_rom_init:
assert len(integrated_rom_init) <= 64 * KB / 4
self.integrated_rom_initialized = True
self.rom.add_init(integrated_rom_init)
else:
self.setup_rom_in_flash()
# "LEDS" - Just one LED on JTAG port
if with_led_chaser:
self.submodules.leds = LedChaser(
pads=platform.request_all("user_led"),
sys_clk_freq=platform.sys_clk_freq)
self.csr.add("leds")
# UART
self.add_serial()
# Wishbone UART and CPU debug - JTAG must be disabled to use serial2
if debug:
self.add_uartbone("serial2", baudrate=UART_SPEED)
self.bus.add_slave(
"vexriscv_debug", self.cpu.debug_bus, self.vexriscv_region)
if build_bios:
# Timer (required for the BIOS build only)
self.add_timer(name="timer0")
self.timer0.add_uptime()
def setup_ram(self, size):
region = SoCRegion(self.sram_origin, size, cached=True, linker=True)
self.submodules.lram = self.platform.create_ram(32, size)
self.bus.add_slave("sram_lram", self.lram.bus, region)
self.bus.add_region("sram", region)
# define the "arena" region even if it's length-zero
def setup_arena(self, size):
region = SoCRegion(self.arena_origin, size, cached=True, linker=True)
self.bus.add_region("arena", region)
if size > 0:
self.submodules.arena = self.platform.create_ram(32, size, dual_port=True)
self.bus.add_slave("arena_lram", self.arena.bus, region)
self.add_config('SOC_SEPARATE_ARENA')
def setup_rom_in_lram(self):
region = SoCRegion(self.cpu.reset_address, 64 * KB, mode='r',
cached=True, linker=True)
self.submodules.rom = self.platform.create_ram(32, region.size)
self.bus.add_slave("rom_lram", self.rom.bus, region)
self.bus.add_region("rom", region)
self.integrated_rom_initialized = False
self.integrated_rom_size = region.size
def setup_litespi_flash(self):
self.submodules.spiflash_phy = LiteSPIPHY(
self.platform.request("spiflash4x"),
GD25LQ128D(Codes.READ_1_1_4),
default_divisor=0,
rate='1:2',
extra_latency=1)
self.submodules.spiflash_mmap = LiteSPI(phy=self.spiflash_phy,
mmap_endianness = self.cpu.endianness)
self.csr.add("spiflash_mmap")
self.csr.add("spiflash_phy")
self.bus.add_slave(name="spiflash", slave=self.spiflash_mmap.bus, region=self.spiflash_region)
def setup_rom_in_flash(self):
region = SoCRegion(self.spiflash_region.origin + self.rom_offset,
self.spiflash_region.size - self.rom_offset,
mode='r', cached=True, linker=True)
self.bus.add_region("rom", region)
self.integrated_rom_initialized = False
self.integrated_rom_size = region.size
def add_serial(self):
self.add_uart("uart", baudrate=UART_SPEED)
def connect_cfu_to_lram(self):
# create cfu <-> lram bus
cfu_lram_bus_layout = [
("lram0", [("addr", 14), ("din", 32)]),
("lram1", [("addr", 14), ("din", 32)]),
("lram2", [("addr", 14), ("din", 32)]),
("lram3", [("addr", 14), ("din", 32)])]
cfu_lram_bus = Record(cfu_lram_bus_layout)
# add extra ports to the cfu pinout
self.cpu.cfu_params.update(
o_port0_addr = cfu_lram_bus.lram0.addr,
i_port0_din = cfu_lram_bus.lram0.din,
o_port1_addr = cfu_lram_bus.lram1.addr,
i_port1_din = cfu_lram_bus.lram1.din,
o_port2_addr = cfu_lram_bus.lram2.addr,
i_port2_din = cfu_lram_bus.lram2.din,
o_port3_addr = cfu_lram_bus.lram3.addr,
i_port3_din = cfu_lram_bus.lram3.din,
)
# connect them to the lram module
self.comb += [
self.arena.b_addrs[0].eq(cfu_lram_bus.lram0.addr),
self.arena.b_addrs[1].eq(cfu_lram_bus.lram1.addr),
self.arena.b_addrs[2].eq(cfu_lram_bus.lram2.addr),
self.arena.b_addrs[3].eq(cfu_lram_bus.lram3.addr),
cfu_lram_bus.lram0.din.eq(self.arena.b_douts[0]),
cfu_lram_bus.lram1.din.eq(self.arena.b_douts[1]),
cfu_lram_bus.lram2.din.eq(self.arena.b_douts[2]),
cfu_lram_bus.lram3.din.eq(self.arena.b_douts[3]),
]
# This method is defined on SoCCore and the builder assumes it exists.
def initialize_rom(self, data):
if hasattr(self, 'rom') and not self.integrated_rom_initialized:
self.rom.add_init(data)
@property
def mem_regions(self):
return self.bus.regions
def do_finalize(self):
super().do_finalize()
# Retro-compatibility for builder
# TODO: just fix the builder
for region in self.bus.regions.values():
region.length = region.size
region.type = "cached" if region.cached else "io"
if region.linker:
region.type += "+linker"
self.csr_regions = self.csr.regions
def hps_soc_args(parser: argparse.ArgumentParser):
builder_args(parser)
radiant_build_args(parser)
oxide_args(parser)
def create_builder(soc, args):
builder = Builder(soc, **builder_argdict(args))
# builder.output_dir = args.output_dir
# required_packages = {"libcompiler_rt", "libbase"}
# # Select required packages. Of most importance is to exclude the "bios"
# # package, which is LiteX's BIOS, since we're using our own.
# builder.software_packages = [
# (name, dir) for (name, dir) in builder.software_packages if name in required_packages]
# # "bios" gets loaded automatically by the builder.
# builder.add_software_package("bios", f"{SOC_DIR}/software/bios")
return builder
def main():
patch_cpu_variant()
parser = argparse.ArgumentParser(description="HPS SoC")
hps_soc_args(parser)
parser.add_argument("--debug", action="store_true",
help="Enable debug mode")
parser.add_argument("--slim_cpu", action="store_true",
help="DEPRECATED: use '--cpu-variant=slim+cfu' instead (Use slimmer VexRiscv (required for mnv2_first))")
parser.add_argument("--build", action="store_true",
help="Whether to do a full build, including the bitstream")
parser.add_argument("--toolchain", default="oxide",
help="Which toolchain to use: oxide (default) or radiant")
parser.add_argument("--parallel-nextpnr", action="store_true",
help="Whether to use the parallel nextpnr script with the oxide toolchain")
parser.add_argument("--extra-nextpnr-params", action="store_true", help="Enable extra nextpnr parameters")
parser.add_argument("--synth_mode", default="radiant",
help="Which synthesis mode to use with Radiant toolchain: "
"radiant/synplify (default), lse, or yosys")
parser.add_argument("--cpu-cfu", default=None, help="Specify file containing CFU Verilog module")
parser.add_argument("--cpu-variant", default=None, help="Which CPU variant to use")
parser.add_argument("--separate-arena", action="store_true", help="Create separate RAM for tensor arena")
parser.add_argument("--cfu-mport", action="store_true", help="Create a direct connection between CFU and LRAM")
parser.add_argument("--execute-from-lram", action="store_true",
help="Make the CPU execute from integrated ROM stored in LRAM instead of flash")
parser.add_argument("--integrated-rom-init", metavar="FILE",
help="Use FILE as integrated ROM data instead of default BIOS")
parser.add_argument("--build-bios", action="store_true",
help="Flag to specify that the BIOS is built as well")
args = parser.parse_args()
if args.integrated_rom_init:
integrated_rom_init = get_mem_data(args.integrated_rom_init, "little")
else:
integrated_rom_init = []
if args.cpu_variant:
variant = args.cpu_variant
elif args.cpu_cfu:
if args.slim_cpu:
variant = "slim+cfu+debug" if args.debug else "slim+cfu"
else:
variant = "full+cfu+debug" if args.debug else "full+cfu"
else:
variant = "full+debug" if args.debug else "full"
copy_cpu_variant_if_needed(variant)
soc = HpsSoC(Platform(args.toolchain, args.parallel_nextpnr, args.extra_nextpnr_params),
debug=args.debug,
variant=variant,
cpu_cfu=args.cpu_cfu,
execute_from_lram=args.execute_from_lram,
separate_arena=args.separate_arena,
integrated_rom_init=integrated_rom_init,
build_bios=args.build_bios)
if args.cfu_mport:
soc.connect_cfu_to_lram()
if not args.build_bios:
# To still allow building libraries needed
# by the HPS software, without the necessity of
# having the BIOS (and its gatware requirements such as the Timer)
# this flag needs to be set to True
soc.integrated_rom_initialized = True
builder = create_builder(soc, args)
builder_kwargs = {}
if args.toolchain == "radiant":
builder_kwargs.update(radiant_build_argdict(args))
elif args.toolchain == "oxide":
builder_kwargs.update(oxide_argdict(args))
vns = builder.build(**builder_kwargs, run=args.build)
soc.do_exit(vns)
if not args.build:
print("Use --build to build the bitstream, if needed")
if __name__ == "__main__":
main()
| |
"""
Handles logic for parsing log requests into a readable format
"""
__author__ = "Ryan Faulkner"
__date__ = "November 9th, 2012"
__license__ = "GPL (version 2 or later)"
import sys
import urlparse
import re
import logging
import json
import gzip
import user_metrics.config.settings as projSet
# CONFIGURE THE LOGGER
logging.basicConfig(level=logging.DEBUG, stream=sys.stderr, format='%(asctime)s %(levelname)-8s %(message)s', datefmt='%b-%d %H:%M:%S')
class LineParseMethods():
"""
Defines methods for processing lines of text primarily from log files. Each method in this class takes one
argument:
- **line** - String. Line text to process.
The return value of the method is simply some function of the input defined by the transformation method.
"""
@classmethod
def parse(cls, log_file, parse_method, header=False, version=1):
"""
Log processing wapper method. This takes a log file as input and applies one of the parser methods to
the contents, storing the list results in a list.
"""
# Open the data file - Process the header
if re.search('\.gz', log_file):
file_obj = gzip.open(projSet.__data_file_dir__ + log_file, 'rb')
else:
file_obj = open(projSet.__data_file_dir__ + log_file, 'r')
if header: file_obj.readline()
contents = list()
while 1:
line = file_obj.readline()
if not line: break
contents.append(parse_method(line, version=version))
return map(lambda x: x, contents)
@staticmethod
def e3_lm_log_parse(line, version=1):
"""
Data Format:
https://meta.wikimedia.org/wiki/Research:Timestamp_position_modification/Clicktracking
e.g. from /var/log/aft/click-tracking.log ::
enwiki ext.lastModified@1-ctrl1-impression 20120622065341 0 aLIoSWm5H8W5C91MTT4ddkHXr42EmTxvL 0 0 0 0 0
"""
elems = line.split('\t')
l = elems[0].split()
l.extend(elems[1:])
# in most cases the additional data will be missing - append a field here
if len(l) < 11:
l.append("no data")
return l
@staticmethod
def e3_pef_log_parse(line, version=1):
"""
Data Format:
https://meta.wikimedia.org/wiki/Research:Timestamp_position_modification/Clicktracking
e.g. from /var/log/aft/click-tracking.log ::
enwiki ext.postEditFeedback@1-assignment-control 20120731063615 1 FGiANxyrmVcI5InN0myNeHabMbPUKQMCo 0 0 0 0 0 15667009:501626433
"""
elems = line.split('\t')
page_id = ''
rev_id = ''
user_hash = ''
try:
additional_data = elems[9]
additional_data_fields = additional_data.split(':')
if len(additional_data_fields) == 2:
page_id = additional_data_fields[0]
rev_id = additional_data_fields[1]
user_hash = ''
elif len(additional_data_fields) == 3:
page_id = additional_data_fields[0]
rev_id = additional_data_fields[1]
user_hash = additional_data_fields[2]
except IndexError:
logging.info('No additional data for event %s at time %s.' % (elems[0], elems[1]))
l = elems[0].split()
l.extend(elems[1:9])
l.append(user_hash)
l.append(rev_id)
l.append(page_id)
# Append fields corresponding to `e3pef_time_to_milestone` and `e3pef_revision_measure`
l.extend(['',''])
return l
@staticmethod
def e3_acux_log_parse_client_event(line, version=1):
line_bits = line.strip().split('\t')
num_fields = len(line_bits)
regex_str = r'ext.accountCreationUX.*@.*_%s' % version
if num_fields == 10 and re.search(regex_str, line):
# CLIENT EVENT - impression, assignment, and submit events
fields = line_bits[0].split()
project = fields[0]
event_desc = fields[1].split('@')[1].split('-')
bucket = event_desc[1]
event = event_desc[2]
fields = [project, bucket, event]
fields.extend(line_bits[1:5])
additional_fields = ['None','None','None']
parsed_add_fields = line_bits[9].split('|')
for i in xrange(len(parsed_add_fields)):
if i > 2: break
additional_fields[i] = parsed_add_fields[i]
fields.extend(additional_fields)
return fields
return []
@staticmethod
def e3_acux_log_parse_server_event(line, version=1):
line_bits = line.split('\t')
num_fields = len(line_bits)
server_event_regex = r'account_create.*userbuckets.*ACUX'
# handle both events generated from the server and client side via ACUX. Discriminate the two cases based
# on the number of fields in the log
if num_fields == 1:
# SERVER EVENT - account creation
line_bits = line.split()
try:
if re.search(server_event_regex,line):
query_vars = urlparse.parse_qs(line_bits[1])
userbuckets = json.loads(query_vars['userbuckets'][0])
# Ensure that the user is self made, the event is account creation, and the version is correct
if query_vars['self_made'][0] and query_vars['?event_id'][0] == 'account_create' \
and str(version) in userbuckets['ACUX'][0]:
campaign = userbuckets['campaign'][0] if 'campaign' in userbuckets else ''
return [line_bits[0], query_vars['username'][0], query_vars['user_id'][0],
query_vars['timestamp'][0], query_vars['?event_id'][0], query_vars['self_made'][0],
query_vars['mw_user_token'][0], query_vars['version'][0], query_vars['by_email'][0],
query_vars['creator_user_id'][0], campaign]
else:
return []
except KeyError: return []
except IndexError: return []
return []
@staticmethod
def e3_cta4_log_parse_client(line, version=1):
""" Parse logs for AFT5-CTA4 log requests """
line_bits = line.split('\t')
num_fields = len(line_bits)
regex_1 = r"ext.articleFeedbackv5@10-option6X-cta_signup_login-impression"
regex_2 = r"ext.articleFeedbackv5@10-option6X-cta_signup_login-button_signup_click"
if num_fields == 10 and (re.search(regex_1, line) or re.search(regex_2, line)):
fields = line_bits[0].split()
if re.search(regex_1, line):
fields.append('impression')
else:
fields.append('click')
fields.append(line_bits[1])
fields.append(line_bits[3])
last_field = line_bits[9].split('|')
if len(last_field) == 3:
fields.extend([i.strip() for i in last_field])
else:
return []
return fields
return []
@staticmethod
def e3_cta4_log_parse_server(line, version=1):
""" Parse logs for AFT5-CTA4 log requests """
line_bits = line.split('\t')
num_fields = len(line_bits)
if num_fields == 1:
# SERVER EVENT - account creation
line_bits = line.split()
query_vars = urlparse.parse_qs(line_bits[1])
try:
# Ensure that the user is self made
if query_vars['self_made'][0] and query_vars['?event_id'][0] == 'account_create' \
and re.search(r'userbuckets',line) and 'campaign' in json.loads(query_vars['userbuckets'][0]):
return [line_bits[0], query_vars['username'][0], query_vars['user_id'][0],
query_vars['timestamp'][0], query_vars['?event_id'][0], query_vars['self_made'][0],
query_vars['version'][0], query_vars['by_email'][0], query_vars['creator_user_id'][0]]
else:
return []
except TypeError: return []
except KeyError: return []
except IndexError: return []
return []
| |
from django import forms
from django.contrib.contenttypes.forms import generic_inlineformset_factory
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import FieldError
from django.db import IntegrityError, models
from django.db.models import Q
from django.test import SimpleTestCase, TestCase
from django.test.utils import isolate_apps
from .models import (
AllowsNullGFK, Animal, Carrot, Comparison, ConcreteRelatedModel,
ForConcreteModelModel, ForProxyModelModel, Gecko, ManualPK, Mineral,
ProxyRelatedModel, Rock, TaggedItem, ValuableRock, ValuableTaggedItem,
Vegetable,
)
class GenericRelationsTests(TestCase):
def setUp(self):
self.lion = Animal.objects.create(
common_name="Lion", latin_name="Panthera leo")
self.platypus = Animal.objects.create(
common_name="Platypus", latin_name="Ornithorhynchus anatinus")
Vegetable.objects.create(name="Eggplant", is_yucky=True)
self.bacon = Vegetable.objects.create(name="Bacon", is_yucky=False)
self.quartz = Mineral.objects.create(name="Quartz", hardness=7)
# Tagging stuff.
self.bacon.tags.create(tag="fatty")
self.bacon.tags.create(tag="salty")
self.lion.tags.create(tag="yellow")
self.lion.tags.create(tag="hairy")
# Original list of tags:
self.comp_func = lambda obj: (
obj.tag, obj.content_type.model_class(), obj.object_id
)
def test_generic_update_or_create_when_created(self):
"""
Should be able to use update_or_create from the generic related manager
to create a tag. Refs #23611.
"""
count = self.bacon.tags.count()
tag, created = self.bacon.tags.update_or_create(tag='stinky')
self.assertTrue(created)
self.assertEqual(count + 1, self.bacon.tags.count())
def test_generic_update_or_create_when_updated(self):
"""
Should be able to use update_or_create from the generic related manager
to update a tag. Refs #23611.
"""
count = self.bacon.tags.count()
tag = self.bacon.tags.create(tag='stinky')
self.assertEqual(count + 1, self.bacon.tags.count())
tag, created = self.bacon.tags.update_or_create(defaults={'tag': 'juicy'}, id=tag.id)
self.assertFalse(created)
self.assertEqual(count + 1, self.bacon.tags.count())
self.assertEqual(tag.tag, 'juicy')
def test_generic_get_or_create_when_created(self):
"""
Should be able to use get_or_create from the generic related manager
to create a tag. Refs #23611.
"""
count = self.bacon.tags.count()
tag, created = self.bacon.tags.get_or_create(tag='stinky')
self.assertTrue(created)
self.assertEqual(count + 1, self.bacon.tags.count())
def test_generic_get_or_create_when_exists(self):
"""
Should be able to use get_or_create from the generic related manager
to get a tag. Refs #23611.
"""
count = self.bacon.tags.count()
tag = self.bacon.tags.create(tag="stinky")
self.assertEqual(count + 1, self.bacon.tags.count())
tag, created = self.bacon.tags.get_or_create(id=tag.id, defaults={'tag': 'juicy'})
self.assertFalse(created)
self.assertEqual(count + 1, self.bacon.tags.count())
# shouldn't had changed the tag
self.assertEqual(tag.tag, 'stinky')
def test_generic_relations_m2m_mimic(self):
"""
Objects with declared GenericRelations can be tagged directly -- the
API mimics the many-to-many API.
"""
self.assertQuerysetEqual(self.lion.tags.all(), [
"<TaggedItem: hairy>",
"<TaggedItem: yellow>"
])
self.assertQuerysetEqual(self.bacon.tags.all(), [
"<TaggedItem: fatty>",
"<TaggedItem: salty>"
])
def test_access_content_object(self):
"""
Test accessing the content object like a foreign key.
"""
tagged_item = TaggedItem.objects.get(tag="salty")
self.assertEqual(tagged_item.content_object, self.bacon)
def test_query_content_object(self):
qs = TaggedItem.objects.filter(
animal__isnull=False).order_by('animal__common_name', 'tag')
self.assertQuerysetEqual(
qs, ["<TaggedItem: hairy>", "<TaggedItem: yellow>"]
)
mpk = ManualPK.objects.create(id=1)
mpk.tags.create(tag='mpk')
qs = TaggedItem.objects.filter(
Q(animal__isnull=False) | Q(manualpk__id=1)).order_by('tag')
self.assertQuerysetEqual(
qs, ["hairy", "mpk", "yellow"], lambda x: x.tag)
def test_exclude_generic_relations(self):
"""
Test lookups over an object without GenericRelations.
"""
# Recall that the Mineral class doesn't have an explicit GenericRelation
# defined. That's OK, because you can create TaggedItems explicitly.
# However, excluding GenericRelations means your lookups have to be a
# bit more explicit.
TaggedItem.objects.create(content_object=self.quartz, tag="shiny")
TaggedItem.objects.create(content_object=self.quartz, tag="clearish")
ctype = ContentType.objects.get_for_model(self.quartz)
q = TaggedItem.objects.filter(
content_type__pk=ctype.id, object_id=self.quartz.id
)
self.assertQuerysetEqual(q, [
"<TaggedItem: clearish>",
"<TaggedItem: shiny>"
])
def test_access_via_content_type(self):
"""
Test lookups through content type.
"""
self.lion.delete()
self.platypus.tags.create(tag="fatty")
ctype = ContentType.objects.get_for_model(self.platypus)
self.assertQuerysetEqual(
Animal.objects.filter(tags__content_type=ctype),
["<Animal: Platypus>"])
def test_set_foreign_key(self):
"""
You can set a generic foreign key in the way you'd expect.
"""
tag1 = TaggedItem.objects.create(content_object=self.quartz, tag="shiny")
tag1.content_object = self.platypus
tag1.save()
self.assertQuerysetEqual(
self.platypus.tags.all(),
["<TaggedItem: shiny>"])
def test_queries_across_generic_relations(self):
"""
Queries across generic relations respect the content types. Even though
there are two TaggedItems with a tag of "fatty", this query only pulls
out the one with the content type related to Animals.
"""
self.assertQuerysetEqual(Animal.objects.order_by('common_name'), [
"<Animal: Lion>",
"<Animal: Platypus>"
])
def test_queries_content_type_restriction(self):
"""
Create another fatty tagged instance with different PK to ensure there
is a content type restriction in the generated queries below.
"""
mpk = ManualPK.objects.create(id=self.lion.pk)
mpk.tags.create(tag="fatty")
self.platypus.tags.create(tag="fatty")
self.assertQuerysetEqual(
Animal.objects.filter(tags__tag='fatty'), ["<Animal: Platypus>"])
self.assertQuerysetEqual(
Animal.objects.exclude(tags__tag='fatty'), ["<Animal: Lion>"])
def test_object_deletion_with_generic_relation(self):
"""
If you delete an object with an explicit Generic relation, the related
objects are deleted when the source object is deleted.
"""
self.assertQuerysetEqual(TaggedItem.objects.all(), [
('fatty', Vegetable, self.bacon.pk),
('hairy', Animal, self.lion.pk),
('salty', Vegetable, self.bacon.pk),
('yellow', Animal, self.lion.pk)
],
self.comp_func
)
self.lion.delete()
self.assertQuerysetEqual(TaggedItem.objects.all(), [
('fatty', Vegetable, self.bacon.pk),
('salty', Vegetable, self.bacon.pk),
],
self.comp_func
)
def test_object_deletion_without_generic_relation(self):
"""
If Generic Relation is not explicitly defined, any related objects
remain after deletion of the source object.
"""
TaggedItem.objects.create(content_object=self.quartz, tag="clearish")
quartz_pk = self.quartz.pk
self.quartz.delete()
self.assertQuerysetEqual(TaggedItem.objects.all(), [
('clearish', Mineral, quartz_pk),
('fatty', Vegetable, self.bacon.pk),
('hairy', Animal, self.lion.pk),
('salty', Vegetable, self.bacon.pk),
('yellow', Animal, self.lion.pk),
],
self.comp_func
)
def test_tag_deletion_related_objects_unaffected(self):
"""
If you delete a tag, the objects using the tag are unaffected (other
than losing a tag).
"""
ctype = ContentType.objects.get_for_model(self.lion)
tag = TaggedItem.objects.get(
content_type__pk=ctype.id, object_id=self.lion.id, tag="hairy")
tag.delete()
self.assertQuerysetEqual(self.lion.tags.all(), ["<TaggedItem: yellow>"])
self.assertQuerysetEqual(TaggedItem.objects.all(), [
('fatty', Vegetable, self.bacon.pk),
('salty', Vegetable, self.bacon.pk),
('yellow', Animal, self.lion.pk)
],
self.comp_func
)
def test_add_bulk(self):
bacon = Vegetable.objects.create(name="Bacon", is_yucky=False)
t1 = TaggedItem.objects.create(content_object=self.quartz, tag="shiny")
t2 = TaggedItem.objects.create(content_object=self.quartz, tag="clearish")
# One update() query.
with self.assertNumQueries(1):
bacon.tags.add(t1, t2)
self.assertEqual(t1.content_object, bacon)
self.assertEqual(t2.content_object, bacon)
def test_add_bulk_false(self):
bacon = Vegetable.objects.create(name="Bacon", is_yucky=False)
t1 = TaggedItem.objects.create(content_object=self.quartz, tag="shiny")
t2 = TaggedItem.objects.create(content_object=self.quartz, tag="clearish")
# One save() for each object.
with self.assertNumQueries(2):
bacon.tags.add(t1, t2, bulk=False)
self.assertEqual(t1.content_object, bacon)
self.assertEqual(t2.content_object, bacon)
def test_add_rejects_unsaved_objects(self):
t1 = TaggedItem(content_object=self.quartz, tag="shiny")
msg = "<TaggedItem: shiny> instance isn't saved. Use bulk=False or save the object first."
with self.assertRaisesMessage(ValueError, msg):
self.bacon.tags.add(t1)
def test_add_rejects_wrong_instances(self):
msg = "'TaggedItem' instance expected, got <Animal: Lion>"
with self.assertRaisesMessage(TypeError, msg):
self.bacon.tags.add(self.lion)
def test_set(self):
bacon = Vegetable.objects.create(name="Bacon", is_yucky=False)
fatty = bacon.tags.create(tag="fatty")
salty = bacon.tags.create(tag="salty")
bacon.tags.set([fatty, salty])
self.assertQuerysetEqual(bacon.tags.all(), [
"<TaggedItem: fatty>",
"<TaggedItem: salty>",
])
bacon.tags.set([fatty])
self.assertQuerysetEqual(bacon.tags.all(), [
"<TaggedItem: fatty>",
])
bacon.tags.set([])
self.assertQuerysetEqual(bacon.tags.all(), [])
bacon.tags.set([fatty, salty], bulk=False, clear=True)
self.assertQuerysetEqual(bacon.tags.all(), [
"<TaggedItem: fatty>",
"<TaggedItem: salty>",
])
bacon.tags.set([fatty], bulk=False, clear=True)
self.assertQuerysetEqual(bacon.tags.all(), [
"<TaggedItem: fatty>",
])
bacon.tags.set([], clear=True)
self.assertQuerysetEqual(bacon.tags.all(), [])
def test_assign(self):
bacon = Vegetable.objects.create(name="Bacon", is_yucky=False)
fatty = bacon.tags.create(tag="fatty")
salty = bacon.tags.create(tag="salty")
bacon.tags.set([fatty, salty])
self.assertQuerysetEqual(bacon.tags.all(), [
"<TaggedItem: fatty>",
"<TaggedItem: salty>",
])
bacon.tags.set([fatty])
self.assertQuerysetEqual(bacon.tags.all(), [
"<TaggedItem: fatty>",
])
bacon.tags.set([])
self.assertQuerysetEqual(bacon.tags.all(), [])
def test_assign_with_queryset(self):
# Querysets used in reverse GFK assignments are pre-evaluated so their
# value isn't affected by the clearing operation
# in ManyRelatedManager.set() (#19816).
bacon = Vegetable.objects.create(name="Bacon", is_yucky=False)
bacon.tags.create(tag="fatty")
bacon.tags.create(tag="salty")
self.assertEqual(2, bacon.tags.count())
qs = bacon.tags.filter(tag="fatty")
bacon.tags.set(qs)
self.assertEqual(1, bacon.tags.count())
self.assertEqual(1, qs.count())
def test_generic_relation_related_name_default(self):
# GenericRelation isn't usable from the reverse side by default.
msg = (
"Cannot resolve keyword 'vegetable' into field. Choices are: "
"animal, content_object, content_type, content_type_id, id, "
"manualpk, object_id, tag, valuabletaggeditem"
)
with self.assertRaisesMessage(FieldError, msg):
TaggedItem.objects.filter(vegetable__isnull=True)
def test_multiple_gfk(self):
# Simple tests for multiple GenericForeignKeys
# only uses one model, since the above tests should be sufficient.
tiger = Animal.objects.create(common_name="tiger")
cheetah = Animal.objects.create(common_name="cheetah")
bear = Animal.objects.create(common_name="bear")
# Create directly
Comparison.objects.create(
first_obj=cheetah, other_obj=tiger, comparative="faster"
)
Comparison.objects.create(
first_obj=tiger, other_obj=cheetah, comparative="cooler"
)
# Create using GenericRelation
tiger.comparisons.create(other_obj=bear, comparative="cooler")
tiger.comparisons.create(other_obj=cheetah, comparative="stronger")
self.assertQuerysetEqual(cheetah.comparisons.all(), [
"<Comparison: cheetah is faster than tiger>"
])
# Filtering works
self.assertQuerysetEqual(tiger.comparisons.filter(comparative="cooler"), [
"<Comparison: tiger is cooler than cheetah>",
"<Comparison: tiger is cooler than bear>",
], ordered=False)
# Filtering and deleting works
subjective = ["cooler"]
tiger.comparisons.filter(comparative__in=subjective).delete()
self.assertQuerysetEqual(Comparison.objects.all(), [
"<Comparison: cheetah is faster than tiger>",
"<Comparison: tiger is stronger than cheetah>"
], ordered=False)
# If we delete cheetah, Comparisons with cheetah as 'first_obj' will be
# deleted since Animal has an explicit GenericRelation to Comparison
# through first_obj. Comparisons with cheetah as 'other_obj' will not
# be deleted.
cheetah.delete()
self.assertQuerysetEqual(Comparison.objects.all(), [
"<Comparison: tiger is stronger than None>"
])
def test_gfk_subclasses(self):
# GenericForeignKey should work with subclasses (see #8309)
quartz = Mineral.objects.create(name="Quartz", hardness=7)
valuedtag = ValuableTaggedItem.objects.create(
content_object=quartz, tag="shiny", value=10
)
self.assertEqual(valuedtag.content_object, quartz)
def test_generic_relation_to_inherited_child(self):
# GenericRelations to models that use multi-table inheritance work.
granite = ValuableRock.objects.create(name='granite', hardness=5)
ValuableTaggedItem.objects.create(content_object=granite, tag="countertop", value=1)
self.assertEqual(ValuableRock.objects.filter(tags__value=1).count(), 1)
# We're generating a slightly inefficient query for tags__tag - we
# first join ValuableRock -> TaggedItem -> ValuableTaggedItem, and then
# we fetch tag by joining TaggedItem from ValuableTaggedItem. The last
# join isn't necessary, as TaggedItem <-> ValuableTaggedItem is a
# one-to-one join.
self.assertEqual(ValuableRock.objects.filter(tags__tag="countertop").count(), 1)
granite.delete() # deleting the rock should delete the related tag.
self.assertEqual(ValuableTaggedItem.objects.count(), 0)
def test_generic_inline_formsets(self):
GenericFormSet = generic_inlineformset_factory(TaggedItem, extra=1)
formset = GenericFormSet()
self.assertHTMLEqual(
''.join(form.as_p() for form in formset.forms),
"""<p><label for="id_generic_relations-taggeditem-content_type-object_id-0-tag">
Tag:</label> <input id="id_generic_relations-taggeditem-content_type-object_id-0-tag" type="text"
name="generic_relations-taggeditem-content_type-object_id-0-tag" maxlength="50" /></p>
<p><label for="id_generic_relations-taggeditem-content_type-object_id-0-DELETE">Delete:</label>
<input type="checkbox" name="generic_relations-taggeditem-content_type-object_id-0-DELETE"
id="id_generic_relations-taggeditem-content_type-object_id-0-DELETE" />
<input type="hidden" name="generic_relations-taggeditem-content_type-object_id-0-id"
id="id_generic_relations-taggeditem-content_type-object_id-0-id" /></p>"""
)
formset = GenericFormSet(instance=Animal())
self.assertHTMLEqual(
''.join(form.as_p() for form in formset.forms),
"""<p><label for="id_generic_relations-taggeditem-content_type-object_id-0-tag">
Tag:</label> <input id="id_generic_relations-taggeditem-content_type-object_id-0-tag"
type="text" name="generic_relations-taggeditem-content_type-object_id-0-tag" maxlength="50" /></p>
<p><label for="id_generic_relations-taggeditem-content_type-object_id-0-DELETE">Delete:</label>
<input type="checkbox" name="generic_relations-taggeditem-content_type-object_id-0-DELETE"
id="id_generic_relations-taggeditem-content_type-object_id-0-DELETE" /><input type="hidden"
name="generic_relations-taggeditem-content_type-object_id-0-id"
id="id_generic_relations-taggeditem-content_type-object_id-0-id" /></p>"""
)
platypus = Animal.objects.create(
common_name="Platypus", latin_name="Ornithorhynchus anatinus"
)
platypus.tags.create(tag="shiny")
GenericFormSet = generic_inlineformset_factory(TaggedItem, extra=1)
formset = GenericFormSet(instance=platypus)
tagged_item_id = TaggedItem.objects.get(
tag='shiny', object_id=platypus.id
).id
self.assertHTMLEqual(
''.join(form.as_p() for form in formset.forms),
"""<p><label for="id_generic_relations-taggeditem-content_type-object_id-0-tag">Tag:</label>
<input id="id_generic_relations-taggeditem-content_type-object_id-0-tag" type="text"
name="generic_relations-taggeditem-content_type-object_id-0-tag" value="shiny" maxlength="50" /></p>
<p><label for="id_generic_relations-taggeditem-content_type-object_id-0-DELETE">Delete:</label>
<input type="checkbox" name="generic_relations-taggeditem-content_type-object_id-0-DELETE"
id="id_generic_relations-taggeditem-content_type-object_id-0-DELETE" />
<input type="hidden" name="generic_relations-taggeditem-content_type-object_id-0-id"
value="%s" id="id_generic_relations-taggeditem-content_type-object_id-0-id" /></p>
<p><label for="id_generic_relations-taggeditem-content_type-object_id-1-tag">Tag:</label>
<input id="id_generic_relations-taggeditem-content_type-object_id-1-tag" type="text"
name="generic_relations-taggeditem-content_type-object_id-1-tag" maxlength="50" /></p>
<p><label for="id_generic_relations-taggeditem-content_type-object_id-1-DELETE">Delete:</label>
<input type="checkbox" name="generic_relations-taggeditem-content_type-object_id-1-DELETE"
id="id_generic_relations-taggeditem-content_type-object_id-1-DELETE" />
<input type="hidden" name="generic_relations-taggeditem-content_type-object_id-1-id"
id="id_generic_relations-taggeditem-content_type-object_id-1-id" /></p>""" % tagged_item_id
)
lion = Animal.objects.create(common_name="Lion", latin_name="Panthera leo")
formset = GenericFormSet(instance=lion, prefix='x')
self.assertHTMLEqual(
''.join(form.as_p() for form in formset.forms),
"""<p><label for="id_x-0-tag">Tag:</label>
<input id="id_x-0-tag" type="text" name="x-0-tag" maxlength="50" /></p>
<p><label for="id_x-0-DELETE">Delete:</label> <input type="checkbox" name="x-0-DELETE" id="id_x-0-DELETE" />
<input type="hidden" name="x-0-id" id="id_x-0-id" /></p>"""
)
def test_gfk_manager(self):
# GenericForeignKey should not use the default manager (which may filter objects) #16048
tailless = Gecko.objects.create(has_tail=False)
tag = TaggedItem.objects.create(content_object=tailless, tag="lizard")
self.assertEqual(tag.content_object, tailless)
def test_subclasses_with_gen_rel(self):
"""
Concrete model subclasses with generic relations work
correctly (ticket 11263).
"""
granite = Rock.objects.create(name='granite', hardness=5)
TaggedItem.objects.create(content_object=granite, tag="countertop")
self.assertEqual(Rock.objects.get(tags__tag="countertop"), granite)
def test_subclasses_with_parent_gen_rel(self):
"""
Generic relations on a base class (Vegetable) work correctly in
subclasses (Carrot).
"""
bear = Carrot.objects.create(name='carrot')
TaggedItem.objects.create(content_object=bear, tag='orange')
self.assertEqual(Carrot.objects.get(tags__tag='orange'), bear)
def test_generic_inline_formsets_initial(self):
"""
Test for #17927 Initial values support for BaseGenericInlineFormSet.
"""
quartz = Mineral.objects.create(name="Quartz", hardness=7)
GenericFormSet = generic_inlineformset_factory(TaggedItem, extra=1)
ctype = ContentType.objects.get_for_model(quartz)
initial_data = [{
'tag': 'lizard',
'content_type': ctype.pk,
'object_id': quartz.pk,
}]
formset = GenericFormSet(initial=initial_data)
self.assertEqual(formset.forms[0].initial, initial_data[0])
def test_get_or_create(self):
# get_or_create should work with virtual fields (content_object)
quartz = Mineral.objects.create(name="Quartz", hardness=7)
tag, created = TaggedItem.objects.get_or_create(tag="shiny", defaults={'content_object': quartz})
self.assertTrue(created)
self.assertEqual(tag.tag, "shiny")
self.assertEqual(tag.content_object.id, quartz.id)
def test_update_or_create_defaults(self):
# update_or_create should work with virtual fields (content_object)
quartz = Mineral.objects.create(name="Quartz", hardness=7)
diamond = Mineral.objects.create(name="Diamond", hardness=7)
tag, created = TaggedItem.objects.update_or_create(tag="shiny", defaults={'content_object': quartz})
self.assertTrue(created)
self.assertEqual(tag.content_object.id, quartz.id)
tag, created = TaggedItem.objects.update_or_create(tag="shiny", defaults={'content_object': diamond})
self.assertFalse(created)
self.assertEqual(tag.content_object.id, diamond.id)
def test_query_content_type(self):
msg = "Field 'content_object' does not generate an automatic reverse relation"
with self.assertRaisesMessage(FieldError, msg):
TaggedItem.objects.get(content_object='')
def test_unsaved_instance_on_generic_foreign_key(self):
"""
Assigning an unsaved object to GenericForeignKey should raise an
exception on model.save().
"""
quartz = Mineral(name="Quartz", hardness=7)
with self.assertRaises(IntegrityError):
TaggedItem.objects.create(tag="shiny", content_object=quartz)
def test_cache_invalidation_for_content_type_id(self):
# Create a Vegetable and Mineral with the same id.
new_id = max(Vegetable.objects.order_by('-id')[0].id,
Mineral.objects.order_by('-id')[0].id) + 1
broccoli = Vegetable.objects.create(id=new_id, name="Broccoli")
diamond = Mineral.objects.create(id=new_id, name="Diamond", hardness=7)
tag = TaggedItem.objects.create(content_object=broccoli, tag="yummy")
tag.content_type = ContentType.objects.get_for_model(diamond)
self.assertEqual(tag.content_object, diamond)
def test_cache_invalidation_for_object_id(self):
broccoli = Vegetable.objects.create(name="Broccoli")
cauliflower = Vegetable.objects.create(name="Cauliflower")
tag = TaggedItem.objects.create(content_object=broccoli, tag="yummy")
tag.object_id = cauliflower.id
self.assertEqual(tag.content_object, cauliflower)
def test_assign_content_object_in_init(self):
spinach = Vegetable(name="spinach")
tag = TaggedItem(content_object=spinach)
self.assertEqual(tag.content_object, spinach)
class CustomWidget(forms.TextInput):
pass
class TaggedItemForm(forms.ModelForm):
class Meta:
model = TaggedItem
fields = '__all__'
widgets = {'tag': CustomWidget}
class GenericInlineFormsetTest(TestCase):
def test_generic_inlineformset_factory(self):
"""
Regression for #14572: Using base forms with widgets
defined in Meta should not raise errors.
"""
Formset = generic_inlineformset_factory(TaggedItem, TaggedItemForm)
form = Formset().forms[0]
self.assertIsInstance(form['tag'].field.widget, CustomWidget)
@isolate_apps('generic_relations')
def test_incorrect_content_type(self):
class BadModel(models.Model):
content_type = models.PositiveIntegerField()
msg = "fk_name 'generic_relations.BadModel.content_type' is not a ForeignKey to ContentType"
with self.assertRaisesMessage(Exception, msg):
generic_inlineformset_factory(BadModel, TaggedItemForm)
def test_save_new_uses_form_save(self):
"""
Regression for #16260: save_new should call form.save()
"""
class SaveTestForm(forms.ModelForm):
def save(self, *args, **kwargs):
self.instance.saved_by = "custom method"
return super().save(*args, **kwargs)
Formset = generic_inlineformset_factory(ForProxyModelModel, fields='__all__', form=SaveTestForm)
instance = ProxyRelatedModel.objects.create()
data = {
'form-TOTAL_FORMS': '1',
'form-INITIAL_FORMS': '0',
'form-MAX_NUM_FORMS': '',
'form-0-title': 'foo',
}
formset = Formset(data, instance=instance, prefix='form')
self.assertTrue(formset.is_valid())
new_obj = formset.save()[0]
self.assertEqual(new_obj.saved_by, "custom method")
def test_save_new_for_proxy(self):
Formset = generic_inlineformset_factory(ForProxyModelModel, fields='__all__', for_concrete_model=False)
instance = ProxyRelatedModel.objects.create()
data = {
'form-TOTAL_FORMS': '1',
'form-INITIAL_FORMS': '0',
'form-MAX_NUM_FORMS': '',
'form-0-title': 'foo',
}
formset = Formset(data, instance=instance, prefix='form')
self.assertTrue(formset.is_valid())
new_obj, = formset.save()
self.assertEqual(new_obj.obj, instance)
def test_save_new_for_concrete(self):
Formset = generic_inlineformset_factory(ForProxyModelModel, fields='__all__', for_concrete_model=True)
instance = ProxyRelatedModel.objects.create()
data = {
'form-TOTAL_FORMS': '1',
'form-INITIAL_FORMS': '0',
'form-MAX_NUM_FORMS': '',
'form-0-title': 'foo',
}
formset = Formset(data, instance=instance, prefix='form')
self.assertTrue(formset.is_valid())
new_obj, = formset.save()
self.assertNotIsInstance(new_obj.obj, ProxyRelatedModel)
class ProxyRelatedModelTest(TestCase):
def test_default_behavior(self):
"""
The default for for_concrete_model should be True
"""
base = ForConcreteModelModel()
base.obj = rel = ProxyRelatedModel.objects.create()
base.save()
base = ForConcreteModelModel.objects.get(pk=base.pk)
rel = ConcreteRelatedModel.objects.get(pk=rel.pk)
self.assertEqual(base.obj, rel)
def test_works_normally(self):
"""
When for_concrete_model is False, we should still be able to get
an instance of the concrete class.
"""
base = ForProxyModelModel()
base.obj = rel = ConcreteRelatedModel.objects.create()
base.save()
base = ForProxyModelModel.objects.get(pk=base.pk)
self.assertEqual(base.obj, rel)
def test_proxy_is_returned(self):
"""
Instances of the proxy should be returned when
for_concrete_model is False.
"""
base = ForProxyModelModel()
base.obj = ProxyRelatedModel.objects.create()
base.save()
base = ForProxyModelModel.objects.get(pk=base.pk)
self.assertIsInstance(base.obj, ProxyRelatedModel)
def test_query(self):
base = ForProxyModelModel()
base.obj = rel = ConcreteRelatedModel.objects.create()
base.save()
self.assertEqual(rel, ConcreteRelatedModel.objects.get(bases__id=base.id))
def test_query_proxy(self):
base = ForProxyModelModel()
base.obj = rel = ProxyRelatedModel.objects.create()
base.save()
self.assertEqual(rel, ProxyRelatedModel.objects.get(bases__id=base.id))
def test_generic_relation(self):
base = ForProxyModelModel()
base.obj = ProxyRelatedModel.objects.create()
base.save()
base = ForProxyModelModel.objects.get(pk=base.pk)
rel = ProxyRelatedModel.objects.get(pk=base.obj.pk)
self.assertEqual(base, rel.bases.get())
def test_generic_relation_set(self):
base = ForProxyModelModel()
base.obj = ConcreteRelatedModel.objects.create()
base.save()
newrel = ConcreteRelatedModel.objects.create()
newrel.bases.set([base])
newrel = ConcreteRelatedModel.objects.get(pk=newrel.pk)
self.assertEqual(base, newrel.bases.get())
class TestInitWithNoneArgument(SimpleTestCase):
def test_none_allowed(self):
# AllowsNullGFK doesn't require a content_type, so None argument should
# also be allowed.
AllowsNullGFK(content_object=None)
# TaggedItem requires a content_type but initializing with None should
# be allowed.
TaggedItem(content_object=None)
| |
"""
## Some Python Tricks
Some of my fav short Python tricks. No real theme, just added
if brief and cool.
Code comes with test samples (see the _@test_ entries, below).
## Code for Demonstrations and Tesing
### Demo
Trapping a set of demos.
If called with no arguments, it runs all the trapped
demos.
If called with no arguments _demo('-h')_, then it is
prints a list of the demos.
If called as a decorator, it traps the decorated
function. e.g.
@demo
def demoed(show=1):
"Sample demo."
print show/2
"""
def demo(f=None,demos=[]):
def demoDoc(d):
return '# '+d.__doc__+"\n" if d.__doc__ else ""
if f == '-h':
for d in demos:
print d.func_name+'()', demoDoc(d)
if f: demos.append(f); return f
s='|'+'='*40 +'\n'
for d in demos:
print '\n==|',d.func_name,s,demoDoc(d),d()
"""
### Test
Run a set of tests, each of which returns a pair of
_want,got_. Counts the number of time a test
is "passed" (i.e. _want == got_) or "failed"
(i.e. _want != got_).
If called with no arguments, it runs all the tests.
If called as a decorator, it traps the test.
"""
def test(f=None,cache=[]):
if f:
cache += [f]
return f
ok = no = 0
for t in cache:
print '#',t.func_name ,t.__doc__ or ''
prefix, n, found = None, 0, t() or []
while found:
this, that = found.pop(0), found.pop(0)
if this == that:
ok, n, prefix = ok+1, n+1,'# CORRECT :'
else:
no, n, prefix = no+1, n+1,'# WRONG :'
print prefix,t.func_name,'test',n
if ok+no:
print '\n# Final score: %s/%s = %s%% CORRECT' \
% (ok,(ok+no),int(100*ok/(ok+no)))
"""
E.g.
"""
@test
def tested0():
"Demo of a failing test"
return [False,True]
@test
def tested1():
"Demo of basic testing."
return [True,True, # should pass
1, 2/2] # should pass
"""
If the _test()_ is called after the above then we will see
tested1 Demo of basic testing.
CORRECT: tested1 test 1
WRONG : tested1 test 2
CORRECT: tested1 test 3
# tested2 Yet another demo of basic testing.
WRONG : tested2 test 1
# Final score: 2/4 = 50% CORRECT
## Type Coercion
### Atom
Converts strong to an int or a float.
"""
def atom(x):
try: return int(x)
except ValueError:
try: return float(x)
except ValueError:
return x
@test
def atomed():
return [1,atom("1")]
"""
## Maths Stuff
"""
def median(lst,ordered=False):
lst = lst if ordered else sorted(lst)
n = len(lst)
p = n // 2
if (n % 2): return lst[p]
p,q = p-1,p
q = max(0,(min(q,n)))
return (lst[p] + lst[q]) * 0.5
@test
def _median():
print median([1,2,3,4,5])
print median([1,2,3,4])
"""
## Random Stuff
Standard headers for random stuff
"""
import random,re
any = random.uniform
seed = random.seed
"""
### Sometimes
Returns True at probability 'p'.
"""
def sometimes(p) :
return p > any(0,1)
"""
### Some
Returns 'p'% of a list,selected at random.
"""
def some(lst,p=0.5) :
return [x for x in lst if sometimes(p)]
"""
### One
Returns one item in a list, selected at random.
"""
def one(lst):
return lst[ int(any(0,len(lst) - 1)) ]
"""
Random tests:
"""
@test
def randomed():
seed(1)
lst = list("mkbcdefgh")
return ["k",one(lst)
,['b', 'c', 'd', 'g', 'h'], some(lst)
]
"""
## Iterators
### Item
Return all non-list items in a nested list.
"""
def item(x) :
if isinstance(x,(tuple,list)):
for y in x:
for z in item(y): yield z
else: yield x
@test
def itemed():
return [19,
sum(x for x in item([1,[[3,4],5],[6]]))]
"""
### Cycle
Returns an infinite number of items from a list,
in a random order. Warning: never terminates!
"""
def cycle(lst,max=10**32):
while True and max > 0:
random.shuffle(lst)
for i in lst:
yield i
max -= 1
if max < 0: break
@test
def cycled():
seed(1)
return [[2,5,3,4,1,2,1,4,5,3,1,5,4,3,2,2,4,5,1,3]
,[x for x in cycle([1,2,3,4,5],20)]]
"""
### Pairs
Returns first and second item,
then second and third,
then third and fourth...
e.g. to track changes in slopes
for now,next in pairs([1,2,3,4,5]):
print "Delta", (now-next)/now
Code:
"""
def pairs(lst):
last=lst[0]
for i in lst[1:]:
yield last,i
last = i
@test
def paired():
return [2.5,
sum([(y-x)*1.0/x for x,y
in pairs([10,20,20,20,10,30])])]
"""
### Rows
Iterator file. Skips blanks likes. Splits
other lines into one list per line (dividing on
commas. Removes all whitespace. Filters
all cells via 'wrapper'.
E.g. to read a csv file
that may contain numbers
for n,cells in rows("mediate.csv",atom):
print cells[0]+ cells[-1]
Code:
"""
def noop(x): return x
def rows(file, n=0, bad=r'["\' \t\r\n]',sep=',',wrapper=noop) :
for line in open(file,'r') :
n += 1
line = re.sub(bad,"",line).split(sep)
if line:
yield n,map(wrapper,line)
"""
### Often
Generate often seen things most often
while generating rarer this more rarely.
Given a dictionary d{k1=n1, k2=n2, ...},
return enough keys ki at probability
pi = ni/n where n = n1+n2+..
e.g.
for key in some({'box':30,'circle':20,'line':10},20)
print key
will return around twice as many boxes as anything else,
circles 1/3rd of the time and lines 1/6th of the time.
"""
def often(d,enough=10**32):
n, lst = 0, []
for x in d:
n += d[x]
lst += [(d[x],x)]
lst = sorted(lst, reverse=True)
while enough > 0:
r = random.random()
for freq,thing in lst:
r -= freq*1.0/n
if r <= 0:
yield thing
enough -= 1
break
@test
def oftend():
seed(1)
return [['box','line', 'circle','box','box',
'box','circle','circle','box','box'],
[x for x in
often({'box':30,'circle':20,'line':10},
10)]]
"""
### And Finally...
Lets see how all that runs:
"""
test()
| |
import hashlib
import itertools
import logging
import time
from datetime import datetime, timedelta
from urlparse import urlparse
from django.conf import settings
from django.contrib.auth.models import User
from django.core.cache import cache
from django.core.exceptions import ValidationError, ObjectDoesNotExist
from django.core.urlresolvers import resolve
from django.db import models, IntegrityError
from django.db.models import Q
from django.http import Http404
from django.utils.encoding import smart_str
import waffle
from pyquery import PyQuery
from tidings.models import NotificationsMixin
from tower import ugettext_lazy as _lazy, ugettext as _
from kitsune.gallery.models import Image
from kitsune.products.models import Product, Topic
from kitsune.search.es_utils import UnindexMeBro, es_analyzer_for_locale
from kitsune.search.models import (
SearchMappingType, SearchMixin, register_for_indexing,
register_mapping_type)
from kitsune.sumo import ProgrammingError
from kitsune.sumo.models import ModelBase, LocaleField
from kitsune.sumo.urlresolvers import reverse, split_path
from kitsune.tags.models import BigVocabTaggableMixin
from kitsune.wiki.config import (
CATEGORIES, SIGNIFICANCES, TYPO_SIGNIFICANCE, MEDIUM_SIGNIFICANCE,
MAJOR_SIGNIFICANCE, REDIRECT_HTML, REDIRECT_CONTENT, REDIRECT_TITLE,
REDIRECT_SLUG, CANNED_RESPONSES_CATEGORY, ADMINISTRATION_CATEGORY,
TEMPLATES_CATEGORY, DOC_HTML_CACHE_KEY, TEMPLATE_TITLE_PREFIX)
from kitsune.wiki.permissions import DocumentPermissionMixin
log = logging.getLogger('k.wiki')
class TitleCollision(Exception):
"""An attempt to create two pages of the same title in one locale"""
class SlugCollision(Exception):
"""An attempt to create two pages of the same slug in one locale"""
class _NotDocumentView(Exception):
"""A URL not pointing to the document view was passed to from_url()."""
class Document(NotificationsMixin, ModelBase, BigVocabTaggableMixin,
SearchMixin, DocumentPermissionMixin):
"""A localized knowledgebase document, not revision-specific."""
title = models.CharField(max_length=255, db_index=True)
slug = models.CharField(max_length=255, db_index=True)
# Is this document a template or not?
is_template = models.BooleanField(default=False, editable=False,
db_index=True)
# Is this document localizable or not?
is_localizable = models.BooleanField(default=True, db_index=True)
# TODO: validate (against settings.SUMO_LANGUAGES?)
locale = LocaleField(default=settings.WIKI_DEFAULT_LANGUAGE, db_index=True)
# Latest approved revision. L10n dashboard depends on this being so (rather
# than being able to set it to earlier approved revisions). (Remove "+" to
# enable reverse link.)
current_revision = models.ForeignKey('Revision', null=True,
related_name='current_for+')
# Latest revision which both is_approved and is_ready_for_localization,
# This may remain non-NULL even if is_localizable is changed to false.
latest_localizable_revision = models.ForeignKey(
'Revision', null=True, related_name='localizable_for+')
# The Document I was translated from. NULL iff this doc is in the default
# locale or it is nonlocalizable. TODO: validate against
# settings.WIKI_DEFAULT_LANGUAGE.
parent = models.ForeignKey('self', related_name='translations',
null=True, blank=True)
# Cached HTML rendering of approved revision's wiki markup:
html = models.TextField(editable=False)
# A document's category must always be that of its parent. If it has no
# parent, it can do what it wants. This invariant is enforced in save().
category = models.IntegerField(choices=CATEGORIES, db_index=True)
# A document's is_archived flag must match that of its parent. If it has no
# parent, it can do what it wants. This invariant is enforced in save().
is_archived = models.BooleanField(
default=False, db_index=True, verbose_name='is obsolete',
help_text=_lazy(
u'If checked, this wiki page will be hidden from basic searches '
u'and dashboards. When viewed, the page will warn that it is no '
u'longer maintained.'))
# Enable discussion (kbforum) on this document.
allow_discussion = models.BooleanField(
default=True, help_text=_lazy(
u'If checked, this document allows discussion in an associated '
u'forum. Uncheck to hide/disable the forum.'))
# List of users that have contributed to this document.
contributors = models.ManyToManyField(User)
# List of products this document applies to.
products = models.ManyToManyField(Product)
# List of product-specific topics this document applies to.
topics = models.ManyToManyField(Topic)
# Needs change fields.
needs_change = models.BooleanField(default=False, help_text=_lazy(
u'If checked, this document needs updates.'), db_index=True)
needs_change_comment = models.CharField(max_length=500, blank=True)
# A 24 character length gives years before having to alter max_length.
share_link = models.CharField(max_length=24, default='')
# Dictates the order in which articles are displayed.
display_order = models.IntegerField(default=1, db_index=True)
# List of related documents
related_documents = models.ManyToManyField('self')
# firefox_versions,
# operating_systems:
# defined in the respective classes below. Use them as in
# test_firefox_versions.
# TODO: Rethink indexes once controller code is near complete. Depending on
# how MySQL uses indexes, we probably don't need individual indexes on
# title and locale as well as a combined (title, locale) one.
class Meta(object):
ordering = ['display_order', 'id']
unique_together = (('parent', 'locale'), ('title', 'locale'),
('slug', 'locale'))
permissions = [('archive_document', 'Can archive document'),
('edit_needs_change', 'Can edit needs_change')]
def _collides(self, attr, value):
"""Return whether there exists a doc in this locale whose `attr` attr
is equal to mine."""
return Document.objects.filter(
locale=self.locale, **{attr: value}).exclude(id=self.id).exists()
def _raise_if_collides(self, attr, exception):
"""Raise an exception if a page of this title/slug already exists."""
if self.id is None or hasattr(self, 'old_' + attr):
# If I am new or my title/slug changed...
if self._collides(attr, getattr(self, attr)):
raise exception
def clean(self):
"""Translations can't be localizable."""
self._clean_is_localizable()
self._clean_category()
self._ensure_inherited_attr('is_archived')
def _clean_is_localizable(self):
"""is_localizable == allowed to have translations. Make sure that isn't
violated.
For default language (en-US), is_localizable means it can have
translations. Enforce:
* is_localizable=True if it has translations
* if has translations, unable to make is_localizable=False
For non-default langauges, is_localizable must be False.
"""
if self.locale != settings.WIKI_DEFAULT_LANGUAGE:
self.is_localizable = False
# Can't save this translation if parent not localizable
if self.parent and not self.parent.is_localizable:
raise ValidationError('"%s": parent "%s" is not localizable.' % (
unicode(self), unicode(self.parent)))
# Can't make not localizable if it has translations
# This only applies to documents that already exist, hence self.pk
if self.pk and not self.is_localizable and self.translations.exists():
raise ValidationError(
u'"{0}": document has {1} translations but is not localizable.'
.format(unicode(self), self.translations.count()))
def _ensure_inherited_attr(self, attr):
"""Make sure my `attr` attr is the same as my parent's if I have one.
Otherwise, if I have children, make sure their `attr` attr is the same
as mine.
"""
if self.parent:
# We always set the child according to the parent rather than vice
# versa, because we do not expose an Archived checkbox in the
# translation UI.
setattr(self, attr, getattr(self.parent, attr))
else: # An article cannot have both a parent and children.
# Make my children the same as me:
if self.id:
self.translations.all().update(**{attr: getattr(self, attr)})
def _clean_category(self):
"""Make sure a doc's category is the same as its parent's."""
if (not self.parent and
self.category not in (id for id, name in CATEGORIES)):
# All we really need to do here is make sure category != '' (which
# is what it is when it's missing from the DocumentForm). The extra
# validation is just a nicety.
raise ValidationError(_('Please choose a category.'))
self._ensure_inherited_attr('category')
def _attr_for_redirect(self, attr, template):
"""Return the slug or title for a new redirect.
`template` is a Python string template with "old" and "number" tokens
used to create the variant.
"""
def unique_attr():
"""Return a variant of getattr(self, attr) such that there is no
Document of my locale with string attribute `attr` equal to it.
Never returns the original attr value.
"""
# "My God, it's full of race conditions!"
i = 1
while True:
new_value = template % dict(old=getattr(self, attr), number=i)
if not self._collides(attr, new_value):
return new_value
i += 1
old_attr = 'old_' + attr
if hasattr(self, old_attr):
# My slug (or title) is changing; we can reuse it for the redirect.
return getattr(self, old_attr)
else:
# Come up with a unique slug (or title):
return unique_attr()
def save(self, *args, **kwargs):
self.is_template = self.title.startswith(TEMPLATE_TITLE_PREFIX)
self._raise_if_collides('slug', SlugCollision)
self._raise_if_collides('title', TitleCollision)
# These are too important to leave to a (possibly omitted) is_valid
# call:
self._clean_is_localizable()
self._ensure_inherited_attr('is_archived')
# Everything is validated before save() is called, so the only thing
# that could cause save() to exit prematurely would be an exception,
# which would cause a rollback, which would negate any category changes
# we make here, so don't worry:
self._clean_category()
slug_changed = hasattr(self, 'old_slug')
title_changed = hasattr(self, 'old_title')
# If the slug changed, we clear out the share link so it gets regenerated.
self.share_link = ''
super(Document, self).save(*args, **kwargs)
# Make redirects if there's an approved revision and title or slug
# changed. Allowing redirects for unapproved docs would (1) be of
# limited use and (2) require making Revision.creator nullable.
if self.current_revision and (slug_changed or title_changed):
try:
doc = Document.objects.create(locale=self.locale,
title=self._attr_for_redirect(
'title', REDIRECT_TITLE),
slug=self._attr_for_redirect(
'slug', REDIRECT_SLUG),
category=self.category,
is_localizable=False)
Revision.objects.create(document=doc,
content=REDIRECT_CONTENT % self.title,
is_approved=True,
reviewer=self.current_revision.creator,
creator=self.current_revision.creator)
except TitleCollision:
pass
if slug_changed:
del self.old_slug
if title_changed:
del self.old_title
self.parse_and_calculate_links()
self.clear_cached_html()
def __setattr__(self, name, value):
"""Trap setting slug and title, recording initial value."""
# Public API: delete the old_title or old_slug attrs after changing
# title or slug (respectively) to suppress redirect generation.
if getattr(self, 'id', None):
# I have been saved and so am worthy of a redirect.
if name in ('slug', 'title') and hasattr(self, name):
old_name = 'old_' + name
if not hasattr(self, old_name):
# Case insensitive comparison:
if getattr(self, name).lower() != value.lower():
# Save original value:
setattr(self, old_name, getattr(self, name))
elif value == getattr(self, old_name):
# They changed the attr back to its original value.
delattr(self, old_name)
super(Document, self).__setattr__(name, value)
@property
def content_parsed(self):
if not self.current_revision:
return ''
return self.current_revision.content_parsed
@property
def summary(self):
if not self.current_revision:
return ''
return self.current_revision.summary
@property
def language(self):
return settings.LANGUAGES_DICT[self.locale.lower()]
@property
def related_products(self):
related_pks = [d.pk for d in self.related_documents.all()]
related_pks.append(self.pk)
return Product.objects.filter(document__in=related_pks).distinct()
@property
def is_hidden_from_search_engines(self):
return (self.is_template or self.is_archived or
self.category in (ADMINISTRATION_CATEGORY,
CANNED_RESPONSES_CATEGORY))
def get_absolute_url(self):
return reverse('wiki.document', locale=self.locale, args=[self.slug])
@classmethod
def from_url(cls, url, required_locale=None, id_only=False,
check_host=True):
"""Return the approved Document the URL represents, None if there isn't
one.
Return None if the URL is a 404, the URL doesn't point to the right
view, or the indicated document doesn't exist.
To limit the universe of discourse to a certain locale, pass in a
`required_locale`. To fetch only the ID of the returned Document, set
`id_only` to True.
If the URL has a host component, we assume it does not point to this
host and thus does not point to a Document, because that would be a
needlessly verbose way to specify an internal link. However, if you
pass check_host=False, we assume the URL's host is the one serving
Documents, which comes in handy for analytics whose metrics return
host-having URLs.
"""
try:
components = _doc_components_from_url(
url, required_locale=required_locale, check_host=check_host)
except _NotDocumentView:
return None
if not components:
return None
locale, path, slug = components
doc = cls.objects
if id_only:
doc = doc.only('id')
try:
doc = doc.get(locale=locale, slug=slug)
except cls.DoesNotExist:
try:
doc = doc.get(locale=settings.WIKI_DEFAULT_LANGUAGE, slug=slug)
translation = doc.translated_to(locale)
if translation:
return translation
return doc
except cls.DoesNotExist:
return None
return doc
def redirect_url(self, source_locale=settings.LANGUAGE_CODE):
"""If I am a redirect, return the URL to which I redirect.
Otherwise, return None.
"""
# If a document starts with REDIRECT_HTML and contains any <a> tags
# with hrefs, return the href of the first one. This trick saves us
# from having to parse the HTML every time.
if self.html.startswith(REDIRECT_HTML):
anchors = PyQuery(self.html)('a[href]')
if anchors:
# Articles with a redirect have a link that has the locale
# hardcoded into it, and so by simply redirecting to the given
# link, we end up possibly losing the locale. So, instead,
# we strip out the locale and replace it with the original
# source locale only in the case where an article is going
# from one locale and redirecting it to a different one.
# This only applies when it's a non-default locale because we
# don't want to override the redirects that are forcibly
# changing to (or staying within) a specific locale.
full_url = anchors[0].get('href')
(dest_locale, url) = split_path(full_url)
if (source_locale != dest_locale
and dest_locale == settings.LANGUAGE_CODE):
return '/' + source_locale + '/' + url
return full_url
def redirect_document(self):
"""If I am a redirect to a Document, return that Document.
Otherwise, return None.
"""
url = self.redirect_url()
if url:
return self.from_url(url)
def __unicode__(self):
return '[%s] %s' % (self.locale, self.title)
def allows_vote(self, request):
"""Return whether we should render the vote form for the document."""
# If the user isn't authenticated, we show the form even if they
# may have voted. This is because the page can be cached and we don't
# want to cache the page without the vote form. Users that already
# voted will see a "You already voted on this Article." message
# if they try voting again.
authed_and_voted = (
request.user.is_authenticated() and
self.current_revision and
self.current_revision.has_voted(request))
return (not self.is_archived and
self.current_revision and
not authed_and_voted and
not self.redirect_document() and
self.category != TEMPLATES_CATEGORY and
not waffle.switch_is_active('hide-voting'))
def translated_to(self, locale):
"""Return the translation of me to the given locale.
If there is no such Document, return None.
"""
if self.locale != settings.WIKI_DEFAULT_LANGUAGE:
raise NotImplementedError('translated_to() is implemented only on'
'Documents in the default language so'
'far.')
try:
return Document.objects.get(locale=locale, parent=self)
except Document.DoesNotExist:
return None
@property
def original(self):
"""Return the document I was translated from or, if none, myself."""
return self.parent or self
def localizable_or_latest_revision(self, include_rejected=False):
"""Return latest ready-to-localize revision if there is one,
else the latest approved revision if there is one,
else the latest unrejected (unreviewed) revision if there is one,
else None.
include_rejected -- If true, fall back to the latest rejected
revision if all else fails.
"""
def latest(queryset):
"""Return the latest item from a queryset (by ID).
Return None if the queryset is empty.
"""
try:
return queryset.order_by('-id')[0:1].get()
except ObjectDoesNotExist: # Catching IndexError seems overbroad.
return None
rev = self.latest_localizable_revision
if not rev or not self.is_localizable:
rejected = Q(is_approved=False, reviewed__isnull=False)
# Try latest approved revision:
rev = (latest(self.revisions.filter(is_approved=True)) or
# No approved revs. Try unrejected:
latest(self.revisions.exclude(rejected)) or
# No unrejected revs. Maybe fall back to rejected:
(latest(self.revisions) if include_rejected else None))
return rev
def is_outdated(self, level=MEDIUM_SIGNIFICANCE):
"""Return whether an update of a given magnitude has occured
to the parent document since this translation had an approved
update and such revision is ready for l10n.
If this is not a translation or has never been approved, return
False.
level: The significance of an edit that is "enough". Defaults to
MEDIUM_SIGNIFICANCE.
"""
if not (self.parent and self.current_revision):
return False
based_on_id = self.current_revision.based_on_id
more_filters = {'id__gt': based_on_id} if based_on_id else {}
return self.parent.revisions.filter(
is_approved=True, is_ready_for_localization=True,
significance__gte=level, **more_filters).exists()
def is_majorly_outdated(self):
"""Return whether a MAJOR_SIGNIFICANCE-level update has occurred to the
parent document since this translation had an approved update and such
revision is ready for l10n.
If this is not a translation or has never been approved, return False.
"""
return self.is_outdated(level=MAJOR_SIGNIFICANCE)
def is_watched_by(self, user):
"""Return whether `user` is notified of edits to me."""
from kitsune.wiki.events import EditDocumentEvent
return EditDocumentEvent.is_notifying(user, self)
def get_topics(self):
"""Return the list of new topics that apply to this document.
If the document has a parent, it inherits the parent's topics.
"""
if self.parent:
return self.parent.get_topics()
return Topic.objects.filter(document=self)
def get_products(self):
"""Return the list of products that apply to this document.
If the document has a parent, it inherits the parent's products.
"""
if self.parent:
return self.parent.get_products()
return Product.objects.filter(document=self)
@property
def recent_helpful_votes(self):
"""Return the number of helpful votes in the last 30 days."""
start = datetime.now() - timedelta(days=30)
return HelpfulVote.objects.filter(
revision__document=self, created__gt=start, helpful=True).count()
@classmethod
def get_mapping_type(cls):
return DocumentMappingType
def parse_and_calculate_links(self):
"""Calculate What Links Here data for links going out from this.
Also returns a parsed version of the current html, because that
is a byproduct of the process, and is useful.
"""
if not self.current_revision:
return ''
# Remove "what links here" reverse links, because they might be
# stale and re-rendering will re-add them. This cannot be done
# reliably in the parser's parse() function, because that is
# often called multiple times per document.
self.links_from().delete()
# Also delete the DocumentImage instances for this document.
DocumentImage.objects.filter(document=self).delete()
from kitsune.wiki.parser import wiki_to_html, WhatLinksHereParser
return wiki_to_html(self.current_revision.content,
locale=self.locale,
doc_id=self.id,
parser_cls=WhatLinksHereParser)
def links_from(self):
"""Get a query set of links that are from this document to another."""
return DocumentLink.objects.filter(linked_from=self)
def links_to(self):
"""Get a query set of links that are from another document to this."""
return DocumentLink.objects.filter(linked_to=self)
def add_link_to(self, linked_to, kind):
"""Create a DocumentLink to another Document."""
DocumentLink.objects.get_or_create(linked_from=self,
linked_to=linked_to,
kind=kind)
@property
def images(self):
return Image.objects.filter(documentimage__document=self)
def add_image(self, image):
"""Create a DocumentImage to connect self to an Image instance."""
try:
DocumentImage(document=self, image=image).save()
except IntegrityError:
# This DocumentImage already exists, ok.
pass
def clear_cached_html(self):
# Clear out both mobile and desktop templates.
for mobile, minimal in itertools.product([True, False], repeat=2):
cache.delete(doc_html_cache_key(self.locale, self.slug, mobile, minimal))
@register_mapping_type
class DocumentMappingType(SearchMappingType):
list_keys = [
'topic',
'product'
]
@classmethod
def get_model(cls):
return Document
@classmethod
def get_query_fields(cls):
return ['document_title',
'document_content',
'document_summary',
'document_keywords']
@classmethod
def get_localized_fields(cls):
# This is the same list as `get_query_fields`, but it doesn't
# have to be, which is why it is typed twice.
return ['document_title',
'document_content',
'document_summary',
'document_keywords']
@classmethod
def get_mapping(cls):
return {
'properties': {
# General fields
'id': {'type': 'long'},
'model': {'type': 'string', 'index': 'not_analyzed'},
'url': {'type': 'string', 'index': 'not_analyzed'},
'indexed_on': {'type': 'integer'},
'updated': {'type': 'integer'},
'product': {'type': 'string', 'index': 'not_analyzed'},
'topic': {'type': 'string', 'index': 'not_analyzed'},
# Document specific fields (locale aware)
'document_title': {'type': 'string'},
'document_keywords': {'type': 'string'},
'document_content': {'type': 'string', 'store': 'yes',
'term_vector': 'with_positions_offsets'},
'document_summary': {'type': 'string', 'store': 'yes',
'term_vector': 'with_positions_offsets'},
# Document specific fields (locale naive)
'document_locale': {'type': 'string', 'index': 'not_analyzed'},
'document_current_id': {'type': 'integer'},
'document_parent_id': {'type': 'integer'},
'document_category': {'type': 'integer'},
'document_slug': {'type': 'string', 'index': 'not_analyzed'},
'document_is_archived': {'type': 'boolean'},
'document_recent_helpful_votes': {'type': 'integer'},
'document_display_order': {'type': 'integer'}
}
}
@classmethod
def extract_document(cls, obj_id, obj=None):
if obj is None:
model = cls.get_model()
obj = model.objects.select_related(
'current_revision', 'parent').get(pk=obj_id)
if obj.html.startswith(REDIRECT_HTML):
# It's possible this document is indexed and was turned
# into a redirect, so now we want to explicitly unindex
# it. The way we do that is by throwing an exception
# which gets handled by the indexing machinery.
raise UnindexMeBro()
d = {}
d['id'] = obj.id
d['model'] = cls.get_mapping_type_name()
d['url'] = obj.get_absolute_url()
d['indexed_on'] = int(time.time())
d['topic'] = [t.slug for t in obj.get_topics()]
d['product'] = [p.slug for p in obj.get_products()]
d['document_title'] = obj.title
d['document_locale'] = obj.locale
d['document_parent_id'] = obj.parent.id if obj.parent else None
d['document_content'] = obj.html
d['document_category'] = obj.category
d['document_slug'] = obj.slug
d['document_is_archived'] = obj.is_archived
d['document_display_order'] = obj.original.display_order
d['document_summary'] = obj.summary
if obj.current_revision is not None:
d['document_keywords'] = obj.current_revision.keywords
d['updated'] = int(time.mktime(
obj.current_revision.created.timetuple()))
d['document_current_id'] = obj.current_revision.id
d['document_recent_helpful_votes'] = obj.recent_helpful_votes
else:
d['document_summary'] = None
d['document_keywords'] = None
d['updated'] = None
d['document_current_id'] = None
d['document_recent_helpful_votes'] = 0
# Don't query for helpful votes if the document doesn't have a current
# revision, or is a template, or is a redirect, or is in Navigation
# category (50).
if (obj.current_revision and
not obj.is_template and
not obj.html.startswith(REDIRECT_HTML) and
not obj.category == 50):
d['document_recent_helpful_votes'] = obj.recent_helpful_votes
else:
d['document_recent_helpful_votes'] = 0
# Select a locale-appropriate default analyzer for all strings.
d['_analyzer'] = es_analyzer_for_locale(obj.locale)
return d
@classmethod
def get_indexable(cls):
# This function returns all the indexable things, but we
# really need to handle the case where something was indexable
# and isn't anymore. Given that, this returns everything that
# has a revision.
indexable = super(cls, cls).get_indexable()
indexable = indexable.filter(current_revision__isnull=False)
return indexable
@classmethod
def index(cls, document, **kwargs):
# If there are no revisions or the current revision is a
# redirect, we want to remove it from the index.
if (document['document_current_id'] is None or
document['document_content'].startswith(REDIRECT_HTML)):
cls.unindex(document['id'], es=kwargs.get('es', None))
return
super(cls, cls).index(document, **kwargs)
register_for_indexing('wiki', Document)
register_for_indexing(
'wiki',
Document.topics.through,
m2m=True)
register_for_indexing(
'wiki',
Document.products.through,
m2m=True)
MAX_REVISION_COMMENT_LENGTH = 255
class Revision(ModelBase, SearchMixin):
"""A revision of a localized knowledgebase document"""
document = models.ForeignKey(Document, related_name='revisions')
summary = models.TextField() # wiki markup
content = models.TextField() # wiki markup
# Keywords are used mostly to affect search rankings. Moderators may not
# have the language expertise to translate keywords, so we put them in the
# Revision so the translators can handle them:
keywords = models.CharField(max_length=255, blank=True)
created = models.DateTimeField(default=datetime.now)
reviewed = models.DateTimeField(null=True)
expires = models.DateTimeField(null=True, blank=True)
# The significance of the initial revision of a document is NULL.
significance = models.IntegerField(choices=SIGNIFICANCES, null=True)
comment = models.CharField(max_length=MAX_REVISION_COMMENT_LENGTH)
reviewer = models.ForeignKey(User, related_name='reviewed_revisions',
null=True)
creator = models.ForeignKey(User, related_name='created_revisions')
is_approved = models.BooleanField(default=False, db_index=True)
# The default locale's rev that was the latest ready-for-l10n one when the
# Edit button was hit to begin creating this revision. If there was none,
# this is simply the latest of the default locale's revs as of that time.
# Used to determine whether localizations are out of date.
based_on = models.ForeignKey('self', null=True, blank=True)
# TODO: limit_choices_to={'document__locale':
# settings.WIKI_DEFAULT_LANGUAGE} is a start but not sufficient.
# Is both approved and marked as ready for translation (which will result
# in the translation UI considering it when looking for the latest
# translatable version). If is_approved=False or this revision belongs to a
# non-default-language Document, this must be False.
is_ready_for_localization = models.BooleanField(default=False)
readied_for_localization = models.DateTimeField(null=True)
readied_for_localization_by = models.ForeignKey(
User, related_name='readied_for_l10n_revisions', null=True)
class Meta(object):
permissions = [('review_revision', 'Can review a revision'),
('mark_ready_for_l10n',
'Can mark revision as ready for localization'),
('edit_keywords', 'Can edit keywords')]
def _based_on_is_clean(self):
"""Return a tuple: (the correct value of based_on, whether the old
value was correct).
based_on must be a revision of the English version of the document. If
based_on is not already set when this is called, the return value
defaults to something reasonable.
"""
original = self.document.original
if self.based_on and self.based_on.document != original:
# based_on is set and points to the wrong doc. The following is
# then the most likely helpful value:
return original.localizable_or_latest_revision(), False
# Even None is permissible, for example in the case of a brand new doc.
return self.based_on, True
def clean(self):
"""Ensure based_on is valid & police is_ready/is_approved invariant."""
# All of the cleaning herein should be unnecessary unless the user
# messes with hidden form data.
try:
self.document and self.document.original
except Document.DoesNotExist:
# For clean()ing forms that don't have a document instance behind
# them yet
self.based_on = None
else:
based_on, is_clean = self._based_on_is_clean()
if not is_clean:
old = self.based_on
self.based_on = based_on # Be nice and guess a correct value.
# TODO(erik): This error message ignores non-translations.
raise ValidationError(
_('A revision must be based on the English article. '
'Revision ID %(id)s does not fit this criterion.') %
dict(id=old.id))
if not self.can_be_readied_for_localization():
self.is_ready_for_localization = False
def save(self, *args, **kwargs):
_, is_clean = self._based_on_is_clean()
if not is_clean: # No more Mister Nice Guy
# TODO(erik): This error message ignores non-translations.
raise ProgrammingError('Revision.based_on must be None or refer '
'to a revision of the default-'
'language document.')
super(Revision, self).save(*args, **kwargs)
# When a revision is approved, re-cache the document's html content
# and update document contributors
if self.is_approved and (
not self.document.current_revision or
self.document.current_revision.id < self.id):
# Determine if there are new contributors and add them to the list
contributors = self.document.contributors.all()
# Exclude all explicitly rejected revisions
new_revs = self.document.revisions.exclude(
reviewed__isnull=False, is_approved=False)
if self.document.current_revision:
new_revs = new_revs.filter(
id__gt=self.document.current_revision.id)
new_contributors = set(
[r.creator for r in new_revs.select_related('creator')])
for user in new_contributors:
if user not in contributors:
self.document.contributors.add(user)
# Update document denormalized fields
if self.is_ready_for_localization:
self.document.latest_localizable_revision = self
self.document.html = self.content_parsed
self.document.current_revision = self
self.document.save()
elif (self.is_ready_for_localization and
(not self.document.latest_localizable_revision or
self.id > self.document.latest_localizable_revision.id)):
# We are marking a newer revision as ready for l10n.
# Update the denormalized field on the document.
self.document.latest_localizable_revision = self
self.document.save()
def delete(self, *args, **kwargs):
"""Dodge cascading delete of documents and other revisions."""
def latest_revision(excluded_rev, constraint):
"""Return the largest-ID'd revision meeting the given constraint
and excluding the given revision, or None if there is none."""
revs = document.revisions.filter(constraint).exclude(
pk=excluded_rev.pk).order_by('-id')[:1]
try:
# Academic TODO: There's probably a way to keep the QuerySet
# lazy all the way through the update() call.
return revs[0]
except IndexError:
return None
Revision.objects.filter(based_on=self).update(based_on=None)
document = self.document
# If the current_revision is being deleted, try to update it to the
# previous approved revision:
if document.current_revision == self:
new_current = latest_revision(self, Q(is_approved=True))
document.update(
current_revision=new_current,
html=new_current.content_parsed if new_current else '')
# Likewise, step the latest_localizable_revision field backward if
# we're deleting that revision:
if document.latest_localizable_revision == self:
document.update(latest_localizable_revision=latest_revision(
self, Q(is_approved=True, is_ready_for_localization=True)))
super(Revision, self).delete(*args, **kwargs)
def has_voted(self, request):
"""Did the user already vote for this revision?"""
if request.user.is_authenticated():
qs = HelpfulVote.objects.filter(revision=self,
creator=request.user)
elif request.anonymous.has_id:
anon_id = request.anonymous.anonymous_id
qs = HelpfulVote.objects.filter(revision=self,
anonymous_id=anon_id)
else:
return False
return qs.exists()
def __unicode__(self):
return u'[%s] %s #%s: %s' % (self.document.locale,
self.document.title,
self.id, self.content[:50])
@property
def content_parsed(self):
from kitsune.wiki.parser import wiki_to_html
return wiki_to_html(self.content, locale=self.document.locale,
doc_id=self.document.id)
def can_be_readied_for_localization(self):
"""Return whether this revision has the prerequisites necessary for the
user to mark it as ready for localization."""
# If not is_approved, can't be is_ready. TODO: think about using a
# single field with more states.
# Also, if significance is trivial, it shouldn't be translated.
return (self.is_approved and
self.significance > TYPO_SIGNIFICANCE and
self.document.locale == settings.WIKI_DEFAULT_LANGUAGE)
def get_absolute_url(self):
return reverse('wiki.revision', locale=self.document.locale,
args=[self.document.slug, self.id])
@property
def previous(self):
"""Get the revision that came before this in the document's history."""
older_revs = Revision.objects.filter(document=self.document,
id__lt=self.id,
is_approved=True)
older_revs = older_revs.order_by('-created')
try:
return older_revs[0]
except IndexError:
return None
@classmethod
def get_mapping_type(cls):
return RevisionMetricsMappingType
@register_mapping_type
class RevisionMetricsMappingType(SearchMappingType):
@classmethod
def get_model(cls):
return Revision
@classmethod
def get_index_group(cls):
return 'metrics'
@classmethod
def get_mapping(cls):
return {
'properties': {
'id': {'type': 'long'},
'model': {'type': 'string', 'index': 'not_analyzed'},
'url': {'type': 'string', 'index': 'not_analyzed'},
'indexed_on': {'type': 'integer'},
'created': {'type': 'date'},
'reviewed': {'type': 'date'},
'locale': {'type': 'string', 'index': 'not_analyzed'},
'product': {'type': 'string', 'index': 'not_analyzed'},
'is_approved': {'type': 'boolean'},
'creator_id': {'type': 'long'},
'reviewer_id': {'type': 'long'},
}
}
@classmethod
def extract_document(cls, obj_id, obj=None):
"""Extracts indexable attributes from an Answer."""
fields = ['id', 'created', 'creator_id', 'reviewed', 'reviewer_id',
'is_approved', 'document_id']
composed_fields = ['document__locale', 'document__slug']
all_fields = fields + composed_fields
if obj is None:
model = cls.get_model()
obj_dict = model.objects.values(*all_fields).get(pk=obj_id)
else:
obj_dict = dict([(field, getattr(obj, field))
for field in fields])
obj_dict['document__locale'] = obj.document.locale
obj_dict['document__slug'] = obj.document.slug
d = {}
d['id'] = obj_dict['id']
d['model'] = cls.get_mapping_type_name()
# We do this because get_absolute_url is an instance method
# and we don't want to create an instance because it's a DB
# hit and expensive. So we do it by hand. get_absolute_url
# doesn't change much, so this is probably ok.
d['url'] = reverse('wiki.revision', kwargs={
'revision_id': obj_dict['id'],
'document_slug': obj_dict['document__slug']})
d['indexed_on'] = int(time.time())
d['created'] = obj_dict['created']
d['reviewed'] = obj_dict['reviewed']
d['locale'] = obj_dict['document__locale']
d['is_approved'] = obj_dict['is_approved']
d['creator_id'] = obj_dict['creator_id']
d['reviewer_id'] = obj_dict['reviewer_id']
doc = Document.objects.get(id=obj_dict['document_id'])
d['product'] = [p.slug for p in doc.get_products()]
return d
register_for_indexing('revisions', Revision)
class HelpfulVote(ModelBase):
"""Helpful or Not Helpful vote on Revision."""
revision = models.ForeignKey(Revision, related_name='poll_votes')
helpful = models.BooleanField(default=False)
created = models.DateTimeField(default=datetime.now, db_index=True)
creator = models.ForeignKey(User, related_name='poll_votes', null=True)
anonymous_id = models.CharField(max_length=40, db_index=True)
user_agent = models.CharField(max_length=1000)
def add_metadata(self, key, value):
HelpfulVoteMetadata.objects.create(vote=self, key=key, value=value)
class HelpfulVoteMetadata(ModelBase):
"""Metadata for article votes."""
vote = models.ForeignKey(HelpfulVote, related_name='metadata')
key = models.CharField(max_length=40, db_index=True)
value = models.CharField(max_length=1000)
class ImportantDate(ModelBase):
"""Important date that shows up globally on metrics graphs."""
text = models.CharField(max_length=100)
date = models.DateField(db_index=True)
# Note: This model should probably be called LocaleTeam.
# It's a pain to change it now because of table names, FK column names,
# the M2M tables, etc.
class Locale(ModelBase):
"""A localization team."""
locale = LocaleField(db_index=True)
leaders = models.ManyToManyField(
User, blank=True, related_name='locales_leader')
reviewers = models.ManyToManyField(
User, blank=True, related_name='locales_reviewer')
editors = models.ManyToManyField(
User, blank=True, related_name='locales_editor')
class Meta:
ordering = ['locale']
def get_absolute_url(self):
return reverse('wiki.locale_details', args=[self.locale])
def __unicode__(self):
return self.locale
class DocumentLink(ModelBase):
"""Model a link between documents.
If article A contains [[Link:B]], then `linked_to` is B,
`linked_from` is A, and kind is 'link'.
"""
linked_to = models.ForeignKey(Document,
related_name='documentlink_from_set')
linked_from = models.ForeignKey(Document,
related_name='documentlink_to_set')
kind = models.CharField(max_length=16)
class Meta:
unique_together = ('linked_from', 'linked_to', 'kind')
def __unicode__(self):
return (u'<DocumentLink: %s from %r to %r>' %
(self.kind, self.linked_from, self.linked_to))
class DocumentImage(ModelBase):
"""Model to keep track of what documents include what images."""
document = models.ForeignKey(Document)
image = models.ForeignKey(Image)
class Meta:
unique_together = ('document', 'image')
def __unicode__(self):
return u'<DocumentImage: {doc} includes {img}>'.format(
doc=self.document, img=self.image)
def _doc_components_from_url(url, required_locale=None, check_host=True):
"""Return (locale, path, slug) if URL is a Document, False otherwise.
If URL doesn't even point to the document view, raise _NotDocumentView.
"""
# Extract locale and path from URL:
parsed = urlparse(url) # Never has errors AFAICT
if check_host and parsed.netloc:
return False
locale, path = split_path(parsed.path)
if required_locale and locale != required_locale:
return False
path = '/' + path
try:
view, view_args, view_kwargs = resolve(path)
except Http404:
return False
import kitsune.wiki.views # Views import models; models import views.
if view != kitsune.wiki.views.document:
raise _NotDocumentView
return locale, path, view_kwargs['document_slug']
def points_to_document_view(url, required_locale=None):
"""Return whether a URL reverses to the document view.
To limit the universe of discourse to a certain locale, pass in a
`required_locale`.
"""
try:
return not not _doc_components_from_url(
url, required_locale=required_locale)
except _NotDocumentView:
return False
def user_num_documents(user):
"""Count the number of documents a user has contributed to. """
return (Document.objects
.filter(revisions__creator=user)
.exclude(html__startswith='<p>REDIRECT <a').distinct().count())
def user_documents(user):
"""Return the documents a user has contributed to."""
return (Document.objects
.filter(revisions__creator=user)
.exclude(html__startswith='<p>REDIRECT <a').distinct())
def user_redirects(user):
"""Return the redirects a user has contributed to."""
return (Document.objects
.filter(revisions__creator=user)
.filter(html__startswith='<p>REDIRECT <a').distinct())
def doc_html_cache_key(locale, slug, mobile, minimal):
"""Returns the cache key for the document html."""
cache_key = DOC_HTML_CACHE_KEY.format(
locale=locale, slug=slug, mobile=str(mobile), minimal=str(minimal))
return hashlib.sha1(smart_str(cache_key)).hexdigest()
| |
import sys
import time
import shutil
import os
import subprocess
import schedule
import pyshark
import pdb
from datetime import datetime
class HoneyCopy(object):
def __init__(self):
# Path where the Virtual Box Vms managed by vagrant are stored
self.vboxpath = os.environ["HOME"]+"/VirtualBox VMs/"
# Path where the script is executed -> Home dir
self.honeypath = os.getcwd() + "/"
# Path for the archived PCAP files
self.archivepath = os.getcwd() + "/nw/archive/"
print "Env Variables are: %s , %s" % (self.vboxpath, self.honeypath)
return
def createHoneypot(self, osstr):
if not os.path.exists("vm"):
os.makedirs("vm")
if not os.path.exists("nw/archive"):
os.makedirs("nw/archive")
# adds the ubuntu or windows box to the vagrant environement, initializes the Honeypot VM
os.chdir("vm")
self.executeCommand(["vagrant", "box", "add", "--force", osstr, self.honeypath + osstr + ".box"])
self.executeCommand(["vagrant", "init", osstr])
self.executeCommand(["vagrant", "up"])
self.executeCommand(["vagrant", "halt"])
shutil.copyfile("../Vagrantfile_" + osstr, "Vagrantfile")
return
def clone(self):
os.chdir("vm")
if not os.path.exists("clone1"):
os.makedirs("clone1")
if not os.path.exists("clone2"):
os.makedirs("clone2")
# Windows needs a separate Vagrantfile per clone or the clones will cause a port collision
if os.path.exists("windows.box"):
osstr = "windows"
self.executeCommand(["vagrant", "package", "--output", "clone1/" + osstr +"_clone1.box", "--vagrantfile", self.honeypath + "Vagrantfile_" + osstr + "_clone1"])
time.sleep(5)
self.executeCommand(["vagrant", "package", "--output", "clone2/" + osstr + "_clone2.box", "--vagrantfile", self.honeypath + "Vagrantfile_" + osstr + "_clone2"])
os.chdir("clone1")
self.executeCommand(["vagrant", "init", osstr + "_clone1.box"])
self.executeCommand(["vagrant", "up"])
self.executeCommand(["vagrant", "halt"])
os.chdir("..")
os.chdir("clone2")
self.executeCommand(["vagrant", "init", osstr + "_clone2.box"])
self.executeCommand(["vagrant", "up"])
self.executeCommand(["vagrant", "halt"])
os.chdir("..")
else:
osstr = "ubuntu"
# using the vagrant package command to export a new box identical to the Honeypot
self.executeCommand(["vagrant", "package", "--output", osstr + "_clone", "--vagrantfile", self.honeypath + "Vagrantfile_" + osstr + "_clone"])
self.executeCommand(["vagrant", "box", "add", "--force", osstr + "_clone", osstr + "_clone"])
os.chdir("clone1")
self.executeCommand(["vagrant", "init", osstr + "_clone"])
self.executeCommand(["vagrant", "up"])
self.executeCommand(["vagrant", "halt"])
os.chdir("..")
os.chdir("clone2")
self.executeCommand(["vagrant", "init", osstr + "_clone"])
self.executeCommand(["vagrant", "up"])
self.executeCommand(["vagrant", "halt"])
os.chdir("..")
if not os.path.exists(self.honeypath + "nw"):
os.makedirs(self.honeypath + "nw")
time.sleep(5)
# enabling the network tracing of all virtual Systems with VBoxManage
# the honeyapot uses the networkadapter 2 (bridged) while the clones use the adapter 1 (NAT)
# the recorded traffic is stored in a PCAP File and is later analysed
for subdir, dirs, files in os.walk(self.vboxpath):
for dir in dirs:
if dir.startswith("vm_"):
self.executeCommand(["VBoxManage", "modifyvm", dir, "--nictrace2", "on", "--nictracefile2", self.honeypath + "nw/honeypot.pcap"])
if dir.startswith("clone1_"):
self.executeCommand(["VBoxManage", "modifyvm", dir, "--nictrace1", "on", "--nictracefile1", self.honeypath + "nw/clone1.pcap"])
if dir.startswith("clone2_"):
self.executeCommand(["VBoxManage", "modifyvm", dir, "--nictrace1", "on", "--nictracefile1", self.honeypath + "nw/clone2.pcap"])
return
# this function starts the systems, initializes a first compare (baseline) and starts the scheduler that calls the compare function periodicaly
def start(self):
os.chdir("vm")
self.executeCommand(["vagrant", "up"])
os.chdir("clone1")
self.executeCommand(["vagrant", "up"])
os.chdir("../clone2")
self.executeCommand(["vagrant", "up"])
os.chdir("..")
print "VMs up, start recording"
print "abort by pressing CTRL+C"
# this parameter represents the time between two comparisons
para_t = 60
schedule.every(para_t).minutes.do(self.compare)
self.compare()
# the process will run until you manually abort (or an unexpected runtime error occurs)
while 1:
try:
schedule.run_pending()
time.sleep(1)
except KeyboardInterrupt:
print "Manually aborted recording, VMs still running"
sys.exit()
return
def compare(self):
print "start comparing"
self.createSnapshot()
self.suspend()
if not os.path.exists(self.honeypath + "fs"):
os.makedirs(self.honeypath + "fs")
if not os.path.exists(self.honeypath + "fs/honeypot"):
os.makedirs(self.honeypath + "fs/honeypot")
if not os.path.exists(self.honeypath + "fs/copy1"):
os.makedirs(self.honeypath + "fs/copy1")
if not os.path.exists(self.honeypath + "fs/copy2"):
os.makedirs(self.honeypath + "fs/copy2")
if not os.path.exists(self.honeypath + "fs/diff"):
os.makedirs(self.honeypath + "fs/diff")
shutil.move(self.honeypath + "nw/honeypot.pcap", self.archivepath + "honeypot.pcap")
shutil.move(self.honeypath + "nw/clone1.pcap", self.archivepath + "clone1.pcap")
shutil.move(self.honeypath + "nw/clone2.pcap", self.archivepath + "clone2.pcap")
for subdir, dirs, files in os.walk(self.vboxpath):
for dir in dirs:
# this loop searches for the directories where the VMs are managed by Virtualbox
if dir.startswith("vm_"):
self.executeCommand(["VBoxManage", "modifyvm", dir, "--nictrace2", "on", "--nictracefile2", self.honeypath + "nw/honeypot.pcap"])
path1 = self.vboxpath + dir + "/"
snapid = self.getSaveId(path1 + "Snapshots/")
if os.path.exists(self.honeypath + "fs/honeypot.vmdk"):
os.remove(self.honeypath + "fs/honeypot.vmdk")
time.sleep(5)
print snapid, path1
if snapid == "empty":
shutil.copyfile(path1 + "box-disk1.vmdk", self.honeypath + "fs/honeypot.vmdk")
else:
os.chdir(path1 + "Snapshots/")
self.executeCommand(["VBoxManage", "clonehd", snapid, self.honeypath + "fs/honeypot.vmdk"])
os.chdir(self.honeypath)
if dir.startswith("clone1_"):
self.executeCommand(["VBoxManage", "modifyvm", dir, "--nictrace1", "on", "--nictracefile1", self.honeypath + "nw/clone1.pcap"])
path2 = self.vboxpath + dir + "/"
snapid = self.getSaveId(path2 + "Snapshots/")
if os.path.exists(self.honeypath + "fs/copy1.vmdk"):
os.remove(self.honeypath + "fs/copy1.vmdk")
time.sleep(5)
print snapid, path2
if snapid == "empty":
shutil.copyfile(path2 + "box-disk1.vmdk", self.honeypath + "fs/copy1.vmdk")
else:
os.chdir(path2 + "Snapshots/")
self.executeCommand(["VBoxManage", "clonehd", snapid, self.honeypath + "fs/copy1.vmdk"])
os.chdir(self.honeypath)
if dir.startswith("clone2_"):
self.executeCommand(["VBoxManage", "modifyvm", dir, "--nictrace1", "on", "--nictracefile1", self.honeypath + "nw/clone2.pcap"])
path3 = self.vboxpath + dir + "/"
snapid = self.getSaveId(path3 + "Snapshots/")
if os.path.exists(self.honeypath + "fs/copy2.vmdk"):
os.remove(self.honeypath + "fs/copy2.vmdk")
time.sleep(5)
print snapid, path3
if snapid == "empty":
shutil.copyfile(path3 + "box-disk1.vmdk", self.honeypath + "fs/copy2.vmdk")
else:
os.chdir(path3 + "Snapshots/")
self.executeCommand(["VBoxManage", "clonehd", snapid, self.honeypath + "fs/copy2.vmdk"])
os.chdir(self.honeypath)
self.resume()
self.diffFs()
self.diffNw()
print "compare complete"
return
def getSaveId(self, path):
f = []
for (dirpath, dirnames, filenames) in os.walk(path):
f.extend(filenames)
break
f.sort(key=lambda x: os.stat(os.path.join(path, x)).st_mtime)
f.reverse()
for file in f:
filename, file_extension = os.path.splitext(file)
if file_extension == ".vmdk":
return file[1:-6]
return "empty"
def suspend(self):
os.chdir(self.honeypath + "vm")
self.executeCommand(["vagrant", "suspend"])
os.chdir(self.honeypath + "vm/clone1")
self.executeCommand(["vagrant", "suspend"])
os.chdir(self.honeypath + "vm/clone2")
self.executeCommand(["vagrant", "suspend"])
os.chdir(self.honeypath)
return
def resume(self):
os.chdir(self.honeypath + "vm")
self.executeCommand(["vagrant", "resume"])
os.chdir(self.honeypath + "vm/clone1")
self.executeCommand(["vagrant", "resume"])
os.chdir(self.honeypath + "vm/clone2")
self.executeCommand(["vagrant", "resume"])
os.chdir(self.honeypath)
return
def createSnapshot(self):
snaptime = datetime.now()
os.chdir(self.honeypath + "vm")
self.executeCommand(["vagrant", "snapshot", "save", str(snaptime.year) + str(snaptime.month) + str(snaptime.day) + "_" + str(snaptime.hour) + str(snaptime.minute)])
os.chdir("clone1")
self.executeCommand(["vagrant", "snapshot", "save", str(snaptime.year) + str(snaptime.month) + str(snaptime.day) + "_" + str(snaptime.hour) + str(snaptime.minute)])
os.chdir("../clone2")
self.executeCommand(["vagrant", "snapshot", "save", str(snaptime.year) + str(snaptime.month) + str(snaptime.day) + "_" + str(snaptime.hour) + str(snaptime.minute)])
os.chdir(self.honeypath)
print "snapshots created"
return
def diffFs(self):
os.chdir(self.honeypath + "fs")
if os.path.exists(self.honeypath + "vm/windows.box"):
self.executeCommand(["vmware-mount", "honeypot.vmdk", "2", "honeypot"])
self.executeCommand(["vmware-mount", "copy1.vmdk", "2", "copy1"])
self.executeCommand(["vmware-mount", "copy2.vmdk", "2", "copy2"])
else:
self.executeCommand(["vmware-mount", "honeypot.vmdk", "honeypot"])
self.executeCommand(["vmware-mount", "copy1.vmdk", "copy1"])
self.executeCommand(["vmware-mount", "copy2.vmdk", "copy2"])
print "filesystems mounted"
snaptime = datetime.now()
try:
shutil.move("diff/diff1.1", "diff/diff1_" + str(snaptime.year) + str(snaptime.month) + str(snaptime.day) + "_" + str(snaptime.hour) + str(snaptime.minute))
shutil.move("diff/diff2.1", "diff/diff2_" + str(snaptime.year) + str(snaptime.month) + str(snaptime.day) + "_" + str(snaptime.hour) + str(snaptime.minute))
except IOError as e:
pass
try:
shutil.move("diff/diff1.2", "diff/diff1.1")
shutil.move("diff/diff2.2", "diff/diff2.1")
except IOError as e:
print "moving files aborted, not enough files present"
try:
shutil.move("diff/diff1.3", "diff/diff1.2")
shutil.move("diff/diff2.3", "diff/diff2.2")
except IOError as e:
print "moving files aborted, not enough files present"
try:
subprocess.check_output("rsync -rvl --size-only --dry-run --devices honeypot/ copy1 > diff/diff1.3 2>/dev/null", shell=True)
except subprocess.CalledProcessError as e:
print "ignoring exitcode from rsync"
try:
subprocess.check_output("rsync -rvl --size-only --dry-run --devices honeypot/ copy2 > diff/diff2.3 2>/dev/null", shell=True)
except subprocess.CalledProcessError as e:
print "ignoring exitcode from rsync"
try:
list1 = self.fileToList("diff/diff1.1")
list2 = self.fileToList("diff/diff1.2")
list3 = self.fileToList("diff/diff1.3")
list4 = self.fileToList("diff/diff2.1")
list5 = self.fileToList("diff/diff2.2")
list6 = self.fileToList("diff/diff2.3")
for line in list2:
if line in list1:
continue
else:
if line in list3:
with open('notify.log', 'a+') as notify:
notify.write(line)
for line in list5:
if line in list4:
continue
else:
if line in list6:
with open('notify.log', 'a+') as notify:
notify.write(line)
print "FS-Compare done"
except IOError as e:
print "not enough files present for comparison"
time.sleep(5)
try:
self.executeCommand(["vmware-mount", "-x"])
except subprocess.CalledProcessError as e:
self.executeCommand(["vmware- mount", "-x"])
print "ignoring vmware-mount error"
return
def diffNw(self):
cap1 = self.pcapToList(self.archivepath + "honeypot.pcap")
cap2 = self.pcapToList(self.archivepath + "clone1.pcap")
cap3 = self.pcapToList(self.archivepath + "clone2.pcap")
print "pcap-files read"
duplicated = []
for pkg1 in cap1:
time1 = float(pkg1.sniff_timestamp)
param = float(cap1[-1].sniff_timestamp) - 60 * 60 * 2
less = float(time1) - 60 * 60
more = float(time1) + 60 * 60
if pkg1.ip.dst in duplicated:
continue
else:
duplicated.append(pkg1.ip.dst)
if time1 > param:
counter = 0
for pkg2 in cap2:
time2 = float(pkg2.sniff_timestamp)
if pkg1.ip.dst == pkg2.ip.dst and less > time2 and more <= time2:
counter += 1
break
for pkg3 in cap3:
time3 = float(pkg3.sniff_timestamp)
if pkg1.ip.dst == pkg3.ip.dst and less > time3 and more <= time3:
counter += 1
break
if counter < 1:
with open(self.honeypath + 'fs/notify.log', 'a+') as notify:
notify.write(pkg1.ip.dst + "\n")
time.sleep(2)
nowtime = datetime.now()
shutil.move(self.archivepath + "honeypot.pcap", self.archivepath + "honeypot.pcap_" + str(nowtime.year) + str(nowtime.month) + str(nowtime.day) + str(nowtime.hour) + str(nowtime.minute))
shutil.move(self.archivepath + "clone1.pcap", self.archivepath + "clone1.pcap_" + str(nowtime.year) + str(nowtime.month) + str(nowtime.day) + str(nowtime.hour) + str(nowtime.minute))
shutil.move(self.archivepath + "clone2.pcap", self.archivepath + "clone2.pcap_" + str(nowtime.year) + str(nowtime.month) + str(nowtime.day) + str(nowtime.hour) + str(nowtime.minute))
print "Network-Compare done"
return
def fileToList(self, filename):
with open(filename) as f:
content = f.readlines()
return content
def pcapToList(self, filepath):
cap = pyshark.FileCapture(filepath, display_filter="tcp and tcp.seq < 2")
li = []
for pkg in cap:
li.append(pkg)
return li
def executeCommand(self, command, **communicate):
p = subprocess.Popen(command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
retcode = None
while retcode is None:
# pdb.set_trace()
retcode = p.poll()
line = p.stdout.readline()
if ("optional" in communicate and "[y/N]" in line):
p.stdin.write("y")
print line
if retcode is not 0:
raise subprocess.CalledProcessError
return
return
def cleanup(self):
os.chdir("vm")
self.executeCommand(["vagrant", "destroy", "-f"], optional=True)
os.chdir("clone1")
self.executeCommand(["vagrant", "destroy", "-f"], optional=True)
os.chdir("../clone2")
self.executeCommand(["vagrant", "destroy", "-f"], optional=True)
self.executeCommand(["vagrant", "box", "remove", "ubuntu", "-f"])
self.executeCommand(["vagrant", "box", "remove", "ubuntu_clone", "-f"])
time.sleep(5)
os.chdir(self.honeypath)
shutil.rmtree(self.honeypath + "vm")
return
| |
# needs access to libtcc and math.h
# TODO: *get tcc errors (currently something like 'Unknown error 3217941984',
# this makes debugging painful)
# *currently the compiled function accepts too many arguments silently
# *implement multi-dimensional functions for frange
# *list comprehension syntax for frange?
# *configuration of path to libtcc.so
# *add gcc support again (easier to set up than tcc)
# *fix compiler warnings
# heavily inspired by http://www.cs.tut.fi/~ask/cinpy/
"""
Experimental module for compiling functions to machine code.
Can also be used to generate C code from SymPy expressions.
Depends on libtcc.
This code is experimental. It may have severe bugs. Due to the use of C,
it's able to crash your Python interpreter/debugger with obscure error
messages.
64 bit floats (double) are used.
Overview
========
clambdify: compile a function to machine code (only useful for big
functions)
frange: evaluate a function on a range of numbers using machine code
cexpr: translate a Python expression to a C expression
genfcode: generate C code from a lambda string
evanonarray: evaluate a function on an array using machine code
Performance
===========
Python functions using the math module are *quite* fast. For simple
functions they are faster than functions compiled to machine code. So you
should test to see whether lambdify is fast enough for you.
Iterating is slow in Python (it's probably the biggest bottle neck).
frange allows you to iterate using machine code. This can result in huge
speedups. You might want to use NumPy: http://numpy.org/
For simple functions it's faster, but for big ones frange can be several
times more efficient.
You should experiment to see which solution is best for your application.
You can run the included benchmarks to see the real performance on your
machine.
Configuration
=============
You will probably need to compile libtcc on your own. Get the sources of
tcc:
http://bellard.org/tcc/
Currently it only works for a recent development version. So you might want
to run the following commands (you have to use your own paths of course):
$ cvs -z3 -d:pserver:anonymous@cvs.savannah.nongnu.org:/sources/tinycc co tinycc
$ cd tinycc
$ ./configure
$ make
$ gcc -shared -Wl,-soname,libtcc.so -o libtcc.so libtcc.o
$ cd sympy/utilities/
$ ln -s tinycc/libtcc.so # or change libtccpath in compilef.py
You might try to run libtcc_test. If something went wrong there will be bad
low level Python errors probably crashing the interpreter. The error output
will be printed to stdout or stderr, which might be different to your Python
shell.
Make sure that this module knows the path to libtcc.
If everything went right, all the tests will pass. Run this file to do so
and to see the results of some benchmarks.
"""
from __future__ import print_function, division
import ctypes
from sympy import Symbol, cse, sympify
from sympy.utilities.lambdify import lambdastr as getlambdastr
from sympy.external import import_module
libtccpath = './libtcc.so'
dps = 17 # decimal places of float precision
# load libtcc TODO: better Windows support
try:
libtcc = ctypes.cdll.LoadLibrary(libtccpath)
except OSError:
libtcc = None
if not libtcc:
raise ImportError('Could not load libtcc')
def __getClosePos(expr, braces, stopchar):
"""
Returns the closing position of the expression which is either the first occurrence of a character
in stopchar that is not in braces, the first unmatched closing brace or the end of the expression.
Examples
========
>>> from sympy.utilities.compilef import __getClosePos
>>> __getClosePos('3*x', '()', '+-')
2
>>> __getClosePos('3 + x) + 2', '()', '+-')
2
>>> __getClosePos('(3 + x)*y) + 4', '()', '+-')
9
"""
openbraces = 0
for i, char in enumerate(expr):
if char == braces[0]:
openbraces += 1
elif char == braces[1]:
if not openbraces: # happens when operator is in braces
return i
openbraces -= 1
elif char in stopchar and not openbraces:
return i
return i
def __getLeftRight(expr, index, oplength=1, stopchar='+-'):
"""
Gets the expressions to the left and right of an operator.
>>> __getLeftRight('1/(g(x)*3.5)**(x - a**x)/(x**2 + a)', 12,
... oplength=2, stopchar='+-*/')
('(g(x)*3.5)', '(x - a**x)')
"""
# assumes correct syntax
# get left expression
i = __getClosePos(expr[:index][::-1], ")(", stopchar)
left = expr[index-i:index]
i = __getClosePos(expr[index + oplength:], "()", stopchar)
right = expr[index + oplength:index + oplength + i]
return (left, right)
def cexpr(pyexpr):
"""
Python math expression string -> C expression string
"""
# TODO: better spacing
# replace 'a**b' with 'pow(a, b)'
while True:
index = pyexpr.find('**')
if index != -1:
left, right = __getLeftRight(pyexpr, index, 2, '+-*/')
pyexpr = pyexpr.replace(left + '**' + right, ' pow(%s, %s) '
% (left.lstrip(), right.rstrip()))
else:
break
# TODO: convert 'x**n' to 'x*x*...*x'
# TODO: avoid integer division
return pyexpr
def _gentmpvars():
"""
Generate symbols tmp1, tmp2, ... infinitely.
"""
i = 0
while True:
i += 1
yield Symbol('tmp' + str(i))
def genfcode(lambdastr, use_cse=False):
"""
Python lambda string -> C function code
Optionally cse() is used to eliminate common subexpressions.
"""
# TODO: verify lambda string
# interpret lambda string
varstr, fstr = lambdastr.split(': ')
varstr = varstr.lstrip('lambda ')
# generate C variable string
cvars = varstr.split(',')
cvarstr = ''
for v in cvars:
cvarstr += 'double %s, ' % v
cvarstr = cvarstr.rstrip(', ')
# convert function string to C syntax
if not use_cse:
cfstr = ''
finalexpr = cexpr(fstr)
else:
# eliminate common subexpressions
subs, finalexpr = cse(sympify(fstr), _gentmpvars())
if len(finalexpr) != 1:
raise ValueError("Length should be 1")
vardec = ''
cfstr = ''
for symbol, expr in subs:
vardec += ' double %s;\n' % symbol.name
cfstr += ' %s = %s;\n' % (
symbol.name, cexpr(str(expr.evalf(dps))))
cfstr = vardec + cfstr
finalexpr = cexpr(str(finalexpr[0].evalf(dps)))
# generate C code
code = """
inline double f(%s)
{
%s
return %s;
}
""" % (cvarstr, cfstr, finalexpr)
return code
def __run(cmd):
"""
Checks the exit code of a ran command.
"""
if not cmd == 0:
raise RuntimeError('could not run libtcc command')
def _compile(code, argcount=None, fname='f', fprototype=None):
"""
C code with function -> compiled function
Supports all standard C math functions, pi and e.
Function is assumed to get and return 'double' only.
Uses libtcc.
"""
# returned type and all arguments are double
if fprototype:
fprototype = ctypes.CFUNCTYPE(*fprototype)
else:
if not argcount:
raise ValueError("need argcount if no prototype is specified")
fprototype = ctypes.CFUNCTYPE(*[ctypes.c_double]*(argcount + 1))
# see libtcc.h for API documentation
tccstate = libtcc.tcc_new()
__run(libtcc.tcc_set_output_type(tccstate, 0)) # output to memory
##print libtcc.tcc_add_library_path(tccstate, mathh) # could be dropped
__run(libtcc.tcc_add_library(tccstate, 'm')) # use math.h FIXME: Windows
# compile string
__run(libtcc.tcc_compile_string(tccstate, code))
__run(libtcc.tcc_relocate(tccstate)) # fails if link error
# create C variable to get result
symbol = ctypes.c_long()
__run(libtcc.tcc_get_symbol(tccstate, ctypes.byref(symbol), fname))
# return reference to C function
return fprototype(symbol.value)
# expr needs to work with lambdastr
def clambdify(args, expr, **kwargs):
"""
SymPy expression -> compiled function
Supports all standard C math functions, pi and e.
>>> from sympy import sqrt
>>> from sympy.abc import x, y
>>> cf = clambdify((x,y), sqrt(x*y))
>>> cf(0.5, 4)
1.4142135623730951
"""
# convert function to lambda string
s = getlambdastr(args, expr.evalf(21))
# generate code
code = """
# include <math.h>
# define pi M_PI
# define e M_E
%s
""" % genfcode(s, **kwargs)
# compile code
return _compile(code, len(args))
def frange(*args, **kwargs):
"""
frange(lambdastr, [start,] stop[, step]) -> ctypes double array
Evaluates function on range using machine code.
Currently only one-dimensional functions are supported.
For simple functions it's somewhat slower than NumPy.
For big functions it can be several times faster.
lambdastr has the same restrictions as in clambdify.
>>> frange('lambda x: sqrt(x)', 1, 4) # doctest: +ELLIPSIS
<__main__.c_double_Array_3 object at ...>
>>> for i in _:
... print(i)
...
1.0
1.41421356237
1.73205080757
"""
if len(args) > 4:
raise TypeError('expected at most 4 arguments, got %i' % len(args))
if len(args) < 2:
raise TypeError('expected at least 2 argument, got %i' % len(args))
# interpret arguments
lambdastr = args[0]
start = 0
step = 1
if len(args) == 2:
stop = args[1]
elif len(args) >= 3:
start = args[1]
stop = args[2]
if len(args) == 4:
step = args[3]
if start + step == start:
raise ValueError("step is too small and would cause an infinite loop")
# determine length of resulting array
# TODO: do this better
length = stop - start
if length % step == 0:
length = length/step - 1 # exclude last one
else:
length = length//step
if step > 0:
if start < stop:
length += 1 # include first one
else:
if start > stop:
length += 1 # include first one
if length < 0:
length = 0
if length != int(length):
raise ValueError("length should be an integer")
length = int(length)
# create array
a = (ctypes.c_double * length)()
# generate code
vardef = 'double* MAX; double x = %f;' % start
loopbody = '*result = f(x); x += %f;' % step
code = """
# include <math.h>
# define pi M_PI
# define e M_E
%s
void evalonrange(double *result, int n)
{
%s
for (MAX = result + n; result < MAX; result++)
{
%s
}
}
""" % (genfcode(lambdastr, **kwargs), vardef, loopbody)
# compile and run
evalonrange = _compile(code, fname='evalonrange',
fprototype=[None, ctypes.c_void_p, ctypes.c_int])
evalonrange(ctypes.byref(a), ctypes.c_int(length))
# return ctypes array with results
return a
def evalonarray(lambdastr, array, length=None, **kwargs):
"""
Evaluates a function on an array using machine code.
array can be a numpy array, a ctypes array or a pointer to an array.
In the latter case, the correct length must be specified.
array will be overwritten! Make a copy before to avoid this.
"""
# interpret arguments
if hasattr(array, 'ctypes'): # numpy array
pointer = array.ctypes.get_as_parameter()
length = len(array)
elif isinstance(array, ctypes.Array): # ctypes array
pointer = ctypes.byref(array)
length = len(array)
elif isinstance(array, ctypes.c_void_p): # ctypes pointer FIXME
pointer = array
assert isinstance(length, int) and not length < 0
else:
raise ValueError('array type not recognized')
# generate code
code = """
# include <math.h>
# define pi M_PI
# define e M_E
%s
void evalonarray(double *array, int length)
{
double* MAX;
for (MAX = array + length; array < MAX; array++)
{
*array = f(*array);
}
}
""" % genfcode(lambdastr, **kwargs)
# compile an run on array
run = _compile(code, fname='evalonarray',
fprototype=[None, ctypes.c_void_p, ctypes.c_int])
run(pointer, length)
#########
# TESTS #
#########
from sympy import sqrt, pi, lambdify
from math import exp as _exp, cos as _cos, sin as _sin
# TODO: This should be removed for the release of 0.7.7, see issue #7853
from functools import partial
lambdify = partial(lambdify, default_array=True)
def test_cexpr():
expr = '1/(g(x)*3.5)**(x - a**x)/(x**2 + a)'
assert cexpr(expr).replace(' ', '') == \
'1/pow((g(x)*3.5),(x-pow(a,x)))/(pow(x,2)+a)'
def test_clambdify():
x = Symbol('x')
y = Symbol('y')
z = Symbol('z')
f1 = sqrt(x*y)
pf1 = lambdify((x, y), f1, 'math')
cf1 = clambdify((x, y), f1)
for i in xrange(10):
if cf1(i, 10 - i) != pf1(i, 10 - i):
raise ValueError("Values should be equal")
f2 = (x - y) / z * pi
pf2 = lambdify((x, y, z), f2, 'math')
cf2 = clambdify((x, y, z), f2)
if round(pf2(1, 2, 3), 14) != round(cf2(1, 2, 3), 14):
raise ValueError("Values should be equal")
# FIXME: slight difference in precision
def test_frange():
fstr = 'lambda x: _exp(x)*_cos(x)**x'
f = eval(fstr)
a = frange(fstr, 30, 168, 3)
args = range(30, 168, 3)
if len(a) != len(args):
raise ValueError("Lengths should be equal")
for i in xrange(len(a)):
if a[i] != f(args[i]):
raise ValueError("Values should be equal")
if len(frange('lambda x: x', 0, -10000)) != 0:
raise ValueError("Length should be 0")
if len(frange('lambda x: x', -1, -1, 0.0001)) != 0:
raise ValueError("Length should be 0")
a = frange('lambda x: x', -5, 5, 0.1)
b = range(-50, 50)
if len(a) != len(b):
raise ValueError("Lengths should be equal")
for i in xrange(len(a)):
if int(round(a[i]*10)) != b[i]:
raise ValueError("Values should be equal")
a = frange('lambda x: x', 17, -9, -3)
b = range(17, -9, -3)
if len(a) != len(b):
raise ValueError("Lengths should be equal")
for i in xrange(len(a)):
if a[i] != b[i]:
raise ValueError("a and b should be equal")
a = frange('lambda x: x', 2.7, -3.1, -1.01)
b = range(270, -310, -101)
if len(a) != len(b):
raise ValueError("Lengths should be equal")
for i in xrange(len(a)):
if int(round(a[i]*100)) != b[i]:
raise ValueError("Values should be equal")
assert frange('lambda x: x', 0.2, 0.1, -0.1)[0] == 0.2
if len(frange('lambda x: x', 0)) != 0:
raise ValueError("Length should be 0")
if len(frange('lambda x: x', 1000, -1)) != 0:
raise ValueError("Length should be 0")
if len(frange('lambda x: x', -1.23, 3.21, -0.0000001)) != 0:
raise ValueError("Length should be 0")
try:
frange()
assert False
except TypeError:
pass
try:
frange(1, 2, 3, 4, 5)
assert False
except TypeError:
pass
def test_evalonarray_ctypes():
a = frange('lambda x: x', 10)
evalonarray('lambda x: _sin(x)', a)
for i, j in enumerate(a):
if _sin(i) != j:
raise ValueError("Values should be equal")
# TODO: test for ctypes pointers
## evalonarray('lambda x: asin(x)', ctypes.byref(a), len(a))
## for i, j in enumerater(a):
## print j
def test_evalonarray_numpy():
numpy = import_module('numpy')
a = numpy.arange(10, dtype=float)
evalonarray('lambda x: x + 1', a)
for i, j in enumerate(a):
if float(i + 1) != j:
raise ValueError("Values should be equal")
def test_use_cse():
args = ('lambda x: sqrt(x + 1)**sqrt(x + 1)', 1, 10)
a = frange(*args)
kwargs = {}
kwargs['use_cse'] = True
b = frange(*args, **kwargs)
if len(a) != len(b):
raise ValueError("Lengths should be equal")
for i in xrange(len(a)):
if a[i] != b[i]:
raise ValueError("a and b should be equal")
def benchmark():
"""
Run some benchmarks for clambdify and frange.
NumPy and Psyco are used as reference if available.
"""
from time import time
from timeit import Timer
def fbenchmark(f, var=[Symbol('x')]):
"""
Do some benchmarks with f using clambdify, lambdify and psyco.
"""
global cf, pf, psyf
start = time()
cf = clambdify(var, f)
print('compile time (including sympy overhead): %f s' % (
time() - start))
pf = lambdify(var, f, 'math')
psyf = None
psyco = import_module('psyco')
if psyco:
psyf = lambdify(var, f, 'math')
psyco.bind(psyf)
code = '''for x in (i/1000. for i in range(1000)):
f(%s)''' % ('x,'*len(var)).rstrip(',')
t1 = Timer(code, 'from __main__ import cf as f')
t2 = Timer(code, 'from __main__ import pf as f')
if psyf:
t3 = Timer(code, 'from __main__ import psyf as f')
else:
t3 = None
print('for x = (0, 1, 2, ..., 999)/1000')
print('20 times in 3 runs')
print('compiled: %.4f %.4f %.4f' % tuple(t1.repeat(3, 20)))
print('Python lambda: %.4f %.4f %.4f' % tuple(t2.repeat(3, 20)))
if t3:
print('Psyco lambda: %.4f %.4f %.4f' % tuple(t3.repeat(3, 20)))
print('big function:')
from sympy import _exp, _sin, _cos, pi, lambdify
x = Symbol('x')
## f1 = diff(_exp(x)**2 - _sin(x)**pi, x) \
## * x**12-2*x**3+2*_exp(x**2)-3*x**7+4*_exp(123+x-x**5+2*x**4) \
## * ((x + pi)**5).expand()
f1 = 2*_exp(x**2) + x**12*(-pi*_sin(x)**((-1) + pi)*_cos(x) + 2*_exp(2*x)) \
+ 4*(10*pi**3*x**2 + 10*pi**2*x**3 + 5*pi*x**4 + 5*x*pi**4 + pi**5
+ x**5)*_exp(123 + x + 2*x**4 - x**5) - 2*x**3 - 3*x**7
fbenchmark(f1)
print()
print('simple function:')
y = Symbol('y')
f2 = sqrt(x*y) + x*5
fbenchmark(f2, [x, y])
times = 100000
fstr = '_exp(_sin(_exp(-x**2)) + sqrt(pi)*_cos(x**5/(x**3-x**2+pi*x)))'
print
print('frange with f(x) =')
print(fstr)
print('for x=1, ..., %i' % times)
print('in 3 runs including full compile time')
t4 = Timer("frange('lambda x: %s', 0, %i)" % (fstr, times),
'from __main__ import frange')
numpy = import_module('numpy')
print('frange: %.4f %.4f %.4f' % tuple(t4.repeat(3, 1)))
if numpy:
t5 = Timer('x = arange(%i); result = %s' % (times, fstr),
'from numpy import arange, sqrt, exp, sin, cos, exp, pi')
print('numpy: %.4f %.4f %.4f' % tuple(t5.repeat(3, 1)))
# TODO: integration into fbenchmark
if __name__ == '__main__':
if __debug__:
print('Running tests...',)
numpy = import_module('numpy')
test_cexpr()
test_clambdify()
test_frange()
test_evalonarray_ctypes()
if numpy:
test_evalonarray_numpy()
test_use_cse()
import doctest
doctest.testmod()
print('OK')
print
print('Running benchmark...')
benchmark()
| |
#===============================================================================
# Copyright 2007 Matt Chaput
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#===============================================================================
"""
This module contains classes for writing to an index.
"""
from array import array
from collections import defaultdict
from tempfile import TemporaryFile
from whoosh import index, postpool, reading, structfile, tables
from whoosh.fields import UnknownFieldError
from whoosh.util import fib
# Exceptions
class IndexingError(Exception):
pass
DOCLENGTH_TYPE = "H"
DOCLENGTH_LIMIT = 2**16-1
# Merge policies
# A merge policy is a callable that takes the Index object,
# the SegmentWriter object, and the current SegmentSet
# (not including the segment being written), and returns an
# updated SegmentSet (not including the segment being
# written).
def NO_MERGE(ix, writer, segments):
"""This policy does not merge any existing segments.
"""
return segments
def MERGE_SMALL(ix, writer, segments):
"""This policy merges small segments, where small is
defined using a heuristic based on the fibonacci sequence.
"""
newsegments = index.SegmentSet()
sorted_segment_list = sorted((s.doc_count_all(), s) for s in segments)
total_docs = 0
for i, (count, seg) in enumerate(sorted_segment_list):
if count > 0:
total_docs += count
if total_docs < fib(i + 5):
writer.add_segment(ix, seg)
else:
newsegments.append(seg)
return newsegments
def OPTIMIZE(ix, writer, segments):
"""This policy merges all existing segments.
"""
for seg in segments:
writer.add_segment(ix, seg)
return index.SegmentSet()
# Writing classes
class IndexWriter(index.DeletionMixin):
"""High-level object for writing to an index. This object takes care of
instantiating a SegmentWriter to create a new segment as you add documents,
as well as merging existing segments (if necessary) when you finish.
You can use this object as a context manager. If an exception is thrown
from within the context it calls cancel(), otherwise it calls commit()
when the context ends.
"""
# This class is mostly a shell for SegmentWriter. It exists to handle
# multiple SegmentWriters during merging/optimizing.
def __init__(self, ix, postlimit = 4 * 1024 * 1024,
term_blocksize = 1 * 1024, doc_blocksize = 8 * 1024,
vector_blocksize = 8 * 1024):
"""
:ix: the Index object you want to write to.
:blocksize: the block size for tables created by this writer.
"""
# Obtain a lock
self.locked = ix.lock()
self.index = ix
self.segments = ix.segments.copy()
self.postlimit = postlimit
self.term_blocksize = term_blocksize
self.doc_blocksize = doc_blocksize
self.vector_blocksize = vector_blocksize
self._segment_writer = None
self._searcher = None
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type:
self.cancel()
else:
self.commit()
def _finish(self):
self._close_searcher()
self._segment_writer = None
# Release the lock
if self.locked:
self.index.unlock()
def segment_writer(self):
"""Returns the underlying SegmentWriter object."""
if not self._segment_writer:
self._segment_writer = SegmentWriter(self.index, self.postlimit,
self.term_blocksize,
self.doc_blocksize, self.vector_blocksize)
return self._segment_writer
def searcher(self):
"""Returns a searcher for the existing index."""
if not self._searcher:
self._searcher = self.index.searcher()
return self._searcher
def _close_searcher(self):
if self._searcher:
self._searcher.close()
self._searcher = None
def start_document(self):
"""Starts recording information for a new document. This should be followed by
add_field() calls, and must be followed by an end_document() call.
Alternatively you can use add_document() to add all fields at once.
"""
self.segment_writer().start_document()
def add_field(self, fieldname, text, stored_value = None):
"""Adds a the value of a field to the document opened with start_document().
:fieldname: The name of the field in which to index/store the text.
:text: The unicode text to index.
"""
self.segment_writer().add_field(fieldname, text, stored_value = stored_value)
def end_document(self):
"""
Closes a document opened with start_document().
"""
self.segment_writer().end_document()
def add_document(self, **fields):
"""Adds all the fields of a document at once. This is an alternative to calling
start_document(), add_field() [...], end_document().
The keyword arguments map field names to the values to index/store.
For fields that are both indexed and stored, you can specify an alternate
value to store using a keyword argument in the form "_stored_<fieldname>".
For example, if you have a field named "title" and you want to index the
text "a b c" but store the text "e f g", use keyword arguments like this::
add_document(title=u"a b c", _stored_title=u"e f g")
"""
self.segment_writer().add_document(fields)
def update_document(self, **fields):
"""Adds or replaces a document. At least one of the fields for which you
supply values must be marked as 'unique' in the index's schema.
The keyword arguments map field names to the values to index/store.
For fields that are both indexed and stored, you can specify an alternate
value to store using a keyword argument in the form "_stored_<fieldname>".
For example, if you have a field named "title" and you want to index the
text "a b c" but store the text "e f g", use keyword arguments like this::
update_document(title=u"a b c", _stored_title=u"e f g")
"""
# Check which of the supplied fields are unique
unique_fields = [name for name, field
in self.index.schema.fields()
if name in fields and field.unique]
if not unique_fields:
raise IndexingError("None of the fields in %r are unique" % fields.keys())
# Delete documents in which the supplied unique fields match
s = self.searcher()
for name in unique_fields:
self.delete_by_term(name, fields[name])
# Add the given fields
self.add_document(**fields)
def commit(self, mergetype = MERGE_SMALL):
"""Finishes writing and unlocks the index.
:mergetype: How to merge existing segments. One of
writing.NO_MERGE, writing.MERGE_SMALL, or writing.OPTIMIZE.
"""
self._close_searcher()
if self._segment_writer or mergetype is OPTIMIZE:
self._merge_segments(mergetype)
self.index.commit(self.segments)
self._finish()
def cancel(self):
"""Cancels any documents/deletions added by this object
and unlocks the index.
"""
self._finish()
def _merge_segments(self, mergetype):
sw = self.segment_writer()
new_segments = mergetype(self.index, sw, self.segments)
sw.close()
new_segments.append(sw.segment())
self.segments = new_segments
class SegmentWriter(object):
"""
Do not instantiate this object directly; it is created by the IndexWriter object.
Handles the actual writing of new documents to the index: writes stored fields,
handles the posting pool, and writes out the term index.
"""
class DocumentState(object):
def __init__(self, scorable_fields):
self._scorable_fields = scorable_fields
self._fieldnum_to_pos = dict((fnum, i) for i, fnum in enumerate(scorable_fields))
self.reset()
def add_to_length(self, fieldnum, n):
pos = self._fieldnum_to_pos[fieldnum]
current = self.field_lengths[pos]
if current >= DOCLENGTH_LIMIT: return
self.field_lengths[pos] = min(current + n, DOCLENGTH_LIMIT)
def reset(self):
#: Whether a document is currently in progress
self.active = False
#: Maps field names to stored field contents for this document
self.stored_fields = {}
#: Keeps track of the last field that was added
self.prev_fieldnum = None
#: Keeps track of field lengths in this document
self.field_lengths = array(DOCLENGTH_TYPE, [0] * len(self._scorable_fields))
def __init__(self, ix, postlimit,
term_blocksize, doc_blocksize, vector_blocksize,
name = None):
"""
:ix: the Index object in which to write the new segment.
:postlimit: the maximum size for a run in the posting pool.
:name: the name of the segment.
:blocksize: the block size to use for tables created by this writer.
"""
self.index = ix
self.schema = ix.schema
self.storage = ix.storage
self.name = name or ix._next_segment_name()
self.max_doc = 0
self.max_weight = 0
self.pool = postpool.PostingPool(limit = postlimit)
self._scorable_fields = self.schema.scorable_fields()
# Create a temporary segment object just so we can access
# its *_filename attributes (so if we want to change the
# naming convention, we only have to do it in one place).
tempseg = index.Segment(self.name, 0, 0, None)
# Open files for writing
self.term_table = self.storage.create_table(tempseg.term_filename, postings = True,
blocksize = term_blocksize)
self.doclength_table = self.storage.create_records(tempseg.doclen_filename,
DOCLENGTH_TYPE,
len(self._scorable_fields))
self.docs_table = self.storage.create_table(tempseg.docs_filename,
blocksize = doc_blocksize, compressed = 9)
self.vector_table = None
if self.schema.has_vectored_fields():
self.vector_table = self.storage.create_table(tempseg.vector_filename,
postings = True,
stringids = True,
blocksize = vector_blocksize)
# Keep track of the total number of tokens (across all docs)
# in each field
self.field_length_totals = defaultdict(int)
# Records the state of the writer's current document
self._doc_state = SegmentWriter.DocumentState(self._scorable_fields)
def segment(self):
"""Returns an index.Segment object for the segment being written."""
return index.Segment(self.name, self.max_doc, self.max_weight,
dict(self.field_length_totals))
def close(self):
"""Finishes writing the segment (flushes the posting pool out to disk) and
closes all open files.
"""
if self._doc_state.active:
raise IndexingError("Called SegmentWriter.close() with a document still opened")
self._flush_pool()
self.doclength_table.close()
self.docs_table.close()
self.term_table.close()
if self.vector_table:
self.vector_table.close()
def add_index(self, other_ix):
"""Adds the contents of another Index object to this segment.
This currently does NO checking of whether the schemas match up.
"""
for seg in other_ix.segments:
self.add_segment(other_ix, seg)
def add_segment(self, ix, segment):
"""Adds the contents of another segment to this one. This is used
to merge existing segments into the new one before deleting them.
:ix: The index.Index object containing the segment to merge.
:segment: The index.Segment object to merge into this one.
"""
start_doc = self.max_doc
has_deletions = segment.has_deletions()
if has_deletions:
doc_map = {}
# Merge document info
docnum = 0
schema = ix.schema
doc_reader = reading.DocReader(ix.storage, segment, schema)
try:
vectored_fieldnums = ix.schema.vectored_fields()
if vectored_fieldnums:
doc_reader._open_vectors()
inv = doc_reader.vector_table
outv = self.vector_table
ds = SegmentWriter.DocumentState(self._scorable_fields)
for docnum in xrange(0, segment.max_doc):
if not segment.is_deleted(docnum):
ds.stored_fields = doc_reader[docnum]
ds.field_lengths = doc_reader.doc_field_lengths(docnum)
if has_deletions:
doc_map[docnum] = self.max_doc
for fieldnum in vectored_fieldnums:
if (docnum, fieldnum) in inv:
tables.copy_data(inv, (docnum, fieldnum),
outv, (self.max_doc, fieldnum),
postings = True)
self._write_doc_entry(ds)
self.max_doc += 1
docnum += 1
# Add field length totals
for fieldnum, total in segment.field_length_totals.iteritems():
self.field_length_totals[fieldnum] += total
finally:
doc_reader.close()
# Merge terms
term_reader = reading.TermReader(ix.storage, segment, ix.schema)
try:
for fieldnum, text, _, _ in term_reader:
for docnum, data in term_reader.postings(fieldnum, text):
if has_deletions:
newdoc = doc_map[docnum]
else:
newdoc = start_doc + docnum
self.pool.add_posting(fieldnum, text, newdoc, data)
finally:
term_reader.close()
def start_document(self):
ds = self._doc_state
if ds.active:
raise IndexingError("Called start_document() when a document was already opened")
ds.active = True
def end_document(self):
ds = self._doc_state
if not ds.active:
raise IndexingError("Called end_document() when a document was not opened")
self._write_doc_entry(ds)
ds.reset()
self.max_doc += 1
def add_document(self, fields):
self.start_document()
fieldnames = [name for name in fields.keys() if not name.startswith("_")]
schema = self.schema
for name in fieldnames:
if name not in schema:
raise UnknownFieldError("There is no field named %r" % name)
fieldnames.sort(key = schema.name_to_number)
for name in fieldnames:
value = fields.get(name)
if value:
self.add_field(name, value, stored_value = fields.get("_stored_%s" % name))
self.end_document()
def add_field(self, fieldname, value, stored_value = None,
start_pos = 0, start_char = 0, **kwargs):
if value is None:
return
# Get the field information
schema = self.schema
if fieldname not in schema:
raise UnknownFieldError("There is no field named %r" % fieldname)
fieldnum = schema.name_to_number(fieldname)
field = schema.field_by_name(fieldname)
format = field.format
# Check that the user added the fields in schema order
docstate = self._doc_state
if fieldnum < docstate.prev_fieldnum:
raise IndexingError("Added field %r out of order (add fields in schema order)" % fieldname)
docstate.prev_fieldnum = fieldnum
# If the field is indexed, add the words in the value to the index
if format.analyzer:
if not isinstance(value, unicode):
raise ValueError("%r in field %s is not unicode" % (value, fieldname))
# Count of all terms in the value
count = 0
# Count of UNIQUE terms in the value
unique = 0
for w, freq, data in format.word_datas(value,
start_pos = start_pos, start_char = start_char,
**kwargs):
assert w != ""
self.pool.add_posting(fieldnum, w, self.max_doc, data)
count += freq
unique += 1
# Add the term count to the total for this field
self.field_length_totals[fieldnum] += count
# Add the term count to the per-document field length
if field.scorable:
docstate.add_to_length(fieldnum, count)
# If the field is vectored, add the words in the value to
# the vector table
vector = field.vector
if vector:
vtable = self.vector_table
vdata = dict((w, data) for w, freq, data
in vector.word_datas(value,
start_pos = start_pos, start_char = start_char,
**kwargs))
write_postvalue = vector.write_postvalue
for word in sorted(vdata.keys()):
vtable.write_posting(word, vdata[word], writefn = write_postvalue)
vtable.add_row((self.max_doc, fieldnum), None)
# If the field is stored, add the value to the doc state
if field.stored:
if stored_value is None: stored_value = value
docstate.stored_fields[fieldname] = stored_value
def _write_doc_entry(self, ds):
docnum = self.max_doc
self.doclength_table.append(ds.field_lengths)
self.docs_table.add_row(docnum, ds.stored_fields)
def _flush_pool(self):
# This method pulls postings out of the posting pool (built up
# as documents are added) and writes them to the posting file.
# Each time it encounters a posting for a new term, it writes
# the previous term to the term index (by waiting to write the
# term entry, we can easily count the document frequency and
# sum the terms by looking at the postings).
term_table = self.term_table
write_posting_method = None
current_fieldnum = None # Field number of the current term
current_text = None # Text of the current term
first = True
current_weight = 0
# Loop through the postings in the pool.
# Postings always come out of the pool in field number/alphabetic order.
for fieldnum, text, docnum, data in self.pool:
# If we're starting a new term, reset everything
if write_posting_method is None or fieldnum > current_fieldnum or text > current_text:
if fieldnum != current_fieldnum:
write_posting_method = self.schema.field_by_number(fieldnum).format.write_postvalue
# If we've already written at least one posting, write the
# previous term to the index.
if not first:
term_table.add_row((current_fieldnum, current_text), current_weight)
if current_weight > self.max_weight:
self.max_weight = current_weight
# Reset term variables
current_fieldnum = fieldnum
current_text = text
current_weight = 0
first = False
elif fieldnum < current_fieldnum or (fieldnum == current_fieldnum and text < current_text):
# This should never happen!
raise Exception("Postings are out of order: %s:%s .. %s:%s" %
(current_fieldnum, current_text, fieldnum, text))
current_weight += term_table.write_posting(docnum, data, write_posting_method)
# Finish up the last term
if not first:
term_table.add_row((current_fieldnum, current_text), current_weight)
if current_weight > self.max_weight:
self.max_weight = current_weight
if __name__ == '__main__':
pass
| |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.forms import ValidationError
from django.urls import reverse
from django.utils.translation import gettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import messages
from openstack_dashboard.api import cinder
from openstack_dashboard.api import keystone
class CreateVolumeType(forms.SelfHandlingForm):
name = forms.CharField(max_length=255, label=_("Name"))
vol_type_description = forms.CharField(
max_length=255,
widget=forms.Textarea(attrs={'rows': 4}),
label=_("Description"),
required=False)
is_public = forms.BooleanField(
label=_("Public"),
initial=True,
required=False,
help_text=_("By default, volume type is created as public. To "
"create a private volume type, uncheck this field."))
def clean_name(self):
cleaned_name = self.cleaned_data['name']
if cleaned_name.isspace():
raise ValidationError(_('Volume type name can not be empty.'))
return cleaned_name
def handle(self, request, data):
try:
# Remove any new lines in the public key
volume_type = cinder.volume_type_create(
request,
data['name'],
data['vol_type_description'],
data['is_public'])
messages.success(request, _('Successfully created volume type: %s')
% data['name'])
return volume_type
except Exception as e:
if getattr(e, 'code', None) == 409:
msg = _('Volume type name "%s" already '
'exists.') % data['name']
self._errors['name'] = self.error_class([msg])
else:
redirect = reverse("horizon:admin:volume_types:index")
exceptions.handle(request,
_('Unable to create volume type.'),
redirect=redirect)
class CreateQosSpec(forms.SelfHandlingForm):
name = forms.CharField(max_length=255, label=_("Name"))
consumer = forms.ThemableChoiceField(label=_("Consumer"),
choices=cinder.CONSUMER_CHOICES)
def handle(self, request, data):
try:
qos_spec = cinder.qos_spec_create(request,
data['name'],
{'consumer': data['consumer']})
messages.success(request,
_('Successfully created QoS Spec: %s')
% data['name'])
return qos_spec
except Exception as ex:
if getattr(ex, 'code', None) == 409:
msg = _('QoS Spec name "%s" already '
'exists.') % data['name']
self._errors['name'] = self.error_class([msg])
else:
redirect = reverse("horizon:admin:volume_types:index")
exceptions.handle(request,
_('Unable to create QoS Spec.'),
redirect=redirect)
class CreateVolumeTypeEncryption(forms.SelfHandlingForm):
name = forms.CharField(label=_("Name"), required=False,
widget=forms.TextInput(attrs={'readonly':
'readonly'}))
provider = forms.CharField(max_length=255, label=_("Provider"))
control_location = forms.ThemableChoiceField(label=_("Control Location"),
choices=(('front-end',
_('front-end')),
('back-end',
_('back-end')))
)
cipher = forms.CharField(label=_("Cipher"), required=False)
key_size = forms.IntegerField(label=_("Key Size (bits)"),
required=False,
min_value=1)
volume_type_id = forms.CharField(widget=forms.HiddenInput())
def handle(self, request, data):
try:
# Set Cipher to None if empty
if data['cipher'] == '':
data['cipher'] = None
volume_type_id = data.pop('volume_type_id')
volume_type_name = data.pop('name')
# Create encryption for the volume type
volume_type = cinder.\
volume_encryption_type_create(request,
volume_type_id,
data)
messages.success(request, _('Successfully created encryption for '
'volume type: %s') % volume_type_name)
return volume_type
except Exception:
redirect = reverse("horizon:admin:volume_types:index")
exceptions.handle(
request, _('Unable to create encrypted volume type.'),
redirect=redirect)
class UpdateVolumeTypeEncryption(CreateVolumeTypeEncryption):
def handle(self, request, data):
try:
# Set Cipher to None if empty
if data['cipher'] == '':
data['cipher'] = None
volume_type_id = data.pop('volume_type_id')
volume_type_name = data.pop('name')
# Update encryption for the volume type
volume_type = cinder.\
volume_encryption_type_update(request,
volume_type_id,
data)
messages.success(request, _('Successfully updated encryption for '
'volume type: %s') % volume_type_name)
return volume_type
except NotImplementedError:
messages.error(request, _('Updating encryption is not '
'implemented. Unable to update '
' encrypted volume type.'))
except Exception:
redirect = reverse("horizon:admin:volume_types:index")
exceptions.handle(request,
_('Unable to update encrypted volume type.'),
redirect=redirect)
return False
class ManageQosSpecAssociation(forms.SelfHandlingForm):
qos_spec_choice = forms.ThemableChoiceField(
label=_("QoS Spec to be associated"),
help_text=_("Choose associated QoS Spec."))
def __init__(self, request, *args, **kwargs):
super().__init__(request, *args, **kwargs)
qos_spec_field = self.fields['qos_spec_choice']
qos_spec_field.choices = \
self.populate_qos_spec_choices()
def populate_qos_spec_choices(self):
# populate qos spec list box
qos_specs = self.initial["qos_specs"]
current_qos_spec = self.initial["cur_qos_spec_id"]
qos_spec_list = [(qos_spec.id, qos_spec.name)
for qos_spec in qos_specs
if qos_spec.id != current_qos_spec]
if current_qos_spec:
# used to remove the current spec
qos_spec_list.insert(0, ("-1", _("None (removes spec)")))
if qos_spec_list:
qos_spec_list.insert(0, ("", _("Select a new QoS spec")))
else:
qos_spec_list.insert(0, ("", _("No new QoS spec available")))
return qos_spec_list
def handle(self, request, data):
vol_type_id = self.initial['type_id']
new_qos_spec_id = data['qos_spec_choice']
# Update QOS Spec association information
try:
# NOTE - volume types can only be associated with
# ONE QOS Spec at a time
# first we need to un-associate the current QOS Spec, if it exists
cur_qos_spec_id = self.initial['cur_qos_spec_id']
if cur_qos_spec_id:
qos_spec = cinder.qos_spec_get(request,
cur_qos_spec_id)
cinder.qos_spec_disassociate(request,
qos_spec,
vol_type_id)
# now associate with new QOS Spec, if user wants one associated
if new_qos_spec_id != '-1':
qos_spec = cinder.qos_spec_get(request,
new_qos_spec_id)
cinder.qos_spec_associate(request,
qos_spec,
vol_type_id)
messages.success(request,
_('Successfully updated QoS Spec association.'))
return True
except Exception:
redirect = reverse("horizon:admin:volume_types:index")
exceptions.handle(request,
_('Error updating QoS Spec association.'),
redirect=redirect)
class EditQosSpecConsumer(forms.SelfHandlingForm):
current_consumer = forms.CharField(label=_("Current consumer"),
widget=forms.TextInput(
attrs={'readonly': 'readonly'}),
required=False)
consumer_choice = forms.ThemableChoiceField(
label=_("New QoS Spec Consumer"),
choices=cinder.CONSUMER_CHOICES,
help_text=_("Choose consumer for this QoS Spec."))
def __init__(self, request, *args, **kwargs):
super().__init__(request, *args, **kwargs)
consumer_field = self.fields['consumer_choice']
qos_spec = self.initial["qos_spec"]
self.fields['current_consumer'].initial = qos_spec.consumer
choices = [choice for choice in cinder.CONSUMER_CHOICES
if choice[0] != qos_spec.consumer]
choices.insert(0, ("", _("Select a new consumer")))
consumer_field.choices = choices
def handle(self, request, data):
qos_spec_id = self.initial['qos_spec_id']
new_consumer = data['consumer_choice']
# Update QOS Spec consumer information
try:
cinder.qos_spec_set_keys(request,
qos_spec_id,
{'consumer': new_consumer})
messages.success(request,
_('Successfully modified QoS Spec consumer.'))
return True
except Exception:
redirect = reverse("horizon:admin:volume_types:index")
exceptions.handle(request, _('Error editing QoS Spec consumer.'),
redirect=redirect)
class EditVolumeType(forms.SelfHandlingForm):
name = forms.CharField(max_length=255,
label=_("Name"))
description = forms.CharField(max_length=255,
widget=forms.Textarea(attrs={'rows': 4}),
label=_("Description"),
required=False)
is_public = forms.BooleanField(label=_("Public"), required=False,
help_text=_(
"To make volume type private, uncheck "
"this field."))
def clean_name(self):
cleaned_name = self.cleaned_data['name']
if cleaned_name.isspace():
msg = _('New name cannot be empty.')
self._errors['name'] = self.error_class([msg])
return cleaned_name
def handle(self, request, data):
volume_type_id = self.initial['id']
try:
cinder.volume_type_update(request,
volume_type_id,
data['name'],
data['description'],
data['is_public'])
message = _('Successfully updated volume type.')
messages.success(request, message)
return True
except Exception as ex:
redirect = reverse("horizon:admin:volume_types:index")
if ex.code == 409:
error_message = _('New name conflicts with another '
'volume type.')
else:
error_message = _('Unable to update volume type.')
exceptions.handle(request, error_message,
redirect=redirect)
class EditTypeAccessForm(forms.SelfHandlingForm):
def __init__(self, request, *args, **kwargs):
super().__init__(request, *args, **kwargs)
err_msg = _('Unable to retrieve volume type access list.')
self.fields["member"] = forms.MultipleChoiceField(
required=False,
widget=forms.ThemableCheckboxSelectMultiple())
# Get list of available projects.
try:
all_projects, has_more = keystone.tenant_list(request)
except Exception:
exceptions.handle(request, err_msg)
projects_list = [(project.id, project.name)
for project in all_projects]
self.fields["member"].choices = projects_list
volume_type_id = self.initial.get('volume_type_id')
volume_type_access = []
try:
if volume_type_id:
volume_type = cinder.volume_type_get(request,
volume_type_id)
if not volume_type.is_public:
volume_type_access = [
project.project_id for project in
cinder.volume_type_access_list(request,
volume_type_id)]
except Exception:
exceptions.handle(request, err_msg)
self.fields["member"].initial = volume_type_access
def handle(self, request, data):
type_id = self.initial['volume_type_id']
current_projects = self.fields["member"].initial
removed_projects = current_projects
for p in data['member']:
if p not in current_projects:
# Newly added project access
try:
cinder.volume_type_add_project_access(request, type_id, p)
except Exception:
exceptions.handle(request,
_('Failed to add project %(project)s to '
'volume type access.') %
{'project': p})
else:
removed_projects.remove(p)
for p in removed_projects:
try:
cinder.volume_type_remove_project_access(request, type_id, p)
except Exception:
exceptions.handle(request, _('Failed to remove project '
'%(project)s from volume type '
'access.') % {'project': p})
messages.success(request,
_('Modified volume type access: %s') % type_id)
return True
| |
import ResModel_pyramid
import tensorflow as tf
import numpy as np
import re
from yolo.net.net import Net
class YoloTinyNet(Net):
def __init__(self, common_params, net_params, test=False):
"""
common params: a params dict
net_params : a params dict
"""
super(YoloTinyNet, self).__init__(common_params, net_params)
#process params
self.image_size = int(common_params['image_size'])
self.num_classes = int(common_params['num_classes'])
self.cell_size = int(net_params['cell_size'])
self.boxes_per_cell = int(net_params['boxes_per_cell'])
self.batch_size = int(common_params['batch_size'])
self.weight_decay = float(net_params['weight_decay'])
if not test:
self.object_scale = float(net_params['object_scale'])
self.noobject_scale = float(net_params['noobject_scale'])
self.class_scale = float(net_params['class_scale'])
self.coord_scale = float(net_params['coord_scale'])
def inference(self, images):
"""Build the yolo model
Args:
images: 4-D tensor [batch_size, image_height, image_width, channels]
Returns:
predicts: 4-D tensor [batch_size, cell_size, cell_size, num_classes + 5 * boxes_per_cell]
"""
#ChangeStart
conv_num = 1
temp_conv = self.conv2d('conv' + str(conv_num), images, [3, 3, 3, 16], stride=1)
conv_num += 1
temp_pool = self.max_pool(temp_conv, [2, 2], 2)
temp_conv = self.conv2d('conv' + str(conv_num), temp_pool, [3, 3, 16, 32], stride=1)
conv_num += 1
temp_pool = self.max_pool(temp_conv, [2, 2], 2)
temp_conv = self.conv2d('conv' + str(conv_num), temp_pool, [3, 3, 32, 64], stride=1)
conv_num += 1
temp_conv = self.max_pool(temp_conv, [2, 2], 2)
temp_conv = self.conv2d('conv' + str(conv_num), temp_conv, [3, 3, 64, 128], stride=1)
conv_num += 1
temp_conv = self.max_pool(temp_conv, [2, 2], 2)
temp_conv = self.conv2d('conv' + str(conv_num), temp_conv, [3, 3, 128, 256], stride=1)
conv_num += 1
temp_conv = self.max_pool(temp_conv, [2, 2], 2)
temp_conv = self.conv2d('conv' + str(conv_num), temp_conv, [3, 3, 256, 512], stride=1)
conv_num += 1
temp_conv = self.max_pool(temp_conv, [2, 2], 2)
temp_conv = self.conv2d('conv' + str(conv_num), temp_conv, [3, 3, 512, 1024], stride=1)
conv_num += 1
temp_conv = self.conv2d('conv' + str(conv_num), temp_conv, [3, 3, 1024, 1024], stride=1)
conv_num += 1
temp_conv = self.conv2d('conv' + str(conv_num), temp_conv, [3, 3, 1024, 1024], stride=1)
conv_num += 1
# temp_conv = ResModel_pyramid.resnet(images,20)
#ChangeENd
temp_conv = tf.transpose(temp_conv, (0, 3, 1, 2))
#Fully connected layer
'''
print 'delete Fully'
local2 = self.local('local2', temp_conv,self.cell_size * self.cell_size * 1024, 4096)
'''
local1 = self.local('local1', temp_conv, self.cell_size * self.cell_size * 1024, 256)
local2 = self.local('local2', local1, 256, 4096)
local3 = self.local('local3', local2, 4096, self.cell_size * self.cell_size * (self.num_classes + self.boxes_per_cell * 5), leaky=False, pretrain=False, train=True)
n1 = self.cell_size * self.cell_size * self.num_classes
n2 = n1 + self.cell_size * self.cell_size * self.boxes_per_cell
class_probs = tf.reshape(local3[:, 0:n1], (-1, self.cell_size, self.cell_size, self.num_classes))
scales = tf.reshape(local3[:, n1:n2], (-1, self.cell_size, self.cell_size, self.boxes_per_cell))
boxes = tf.reshape(local3[:, n2:], (-1, self.cell_size, self.cell_size, self.boxes_per_cell * 4))
local3 = tf.concat(3,[class_probs, scales, boxes])
predicts = local3
return predicts
def iou(self, boxes1, boxes2):
"""calculate ious
Args:
boxes1: 4-D tensor [CELL_SIZE, CELL_SIZE, BOXES_PER_CELL, 4] ====> (x_center, y_center, w, h)
boxes2: 1-D tensor [4] ===> (x_center, y_center, w, h)
Return:
iou: 3-D tensor [CELL_SIZE, CELL_SIZE, BOXES_PER_CELL]
"""
boxes1 = tf.pack([boxes1[:, :, :, 0] - boxes1[:, :, :, 2] / 2, boxes1[:, :, :, 1] - boxes1[:, :, :, 3] / 2,
boxes1[:, :, :, 0] + boxes1[:, :, :, 2] / 2, boxes1[:, :, :, 1] + boxes1[:, :, :, 3] / 2])
boxes1 = tf.transpose(boxes1, [1, 2, 3, 0])
boxes2 = tf.pack([boxes2[0] - boxes2[2] / 2, boxes2[1] - boxes2[3] / 2,
boxes2[0] + boxes2[2] / 2, boxes2[1] + boxes2[3] / 2])
#calculate the left up point
lu = tf.maximum(boxes1[:, :, :, 0:2], boxes2[0:2])
rd = tf.minimum(boxes1[:, :, :, 2:], boxes2[2:])
#intersection
intersection = rd - lu
inter_square = intersection[:, :, :, 0] * intersection[:, :, :, 1]
mask = tf.cast(intersection[:, :, :, 0] > 0, tf.float32) * tf.cast(intersection[:, :, :, 1] > 0, tf.float32)
inter_square = mask * inter_square
#calculate the boxs1 square and boxs2 square
square1 = (boxes1[:, :, :, 2] - boxes1[:, :, :, 0]) * (boxes1[:, :, :, 3] - boxes1[:, :, :, 1])
square2 = (boxes2[2] - boxes2[0]) * (boxes2[3] - boxes2[1])
return inter_square/(square1 + square2 - inter_square + 1e-6)
def cond1(self, num, object_num, loss, predict, label, nilboy):
"""
if num < object_num
"""
return num < object_num
def body1(self, num, object_num, loss, predict, labels, nilboy):
"""
calculate loss
Args:
predict: 3-D tensor [cell_size, cell_size, 5 * boxes_per_cell]
labels : [max_objects, 5] (x_center, y_center, w, h, class)
"""
label = labels[num:num+1, :]
label = tf.reshape(label, [-1])
#calculate objects tensor [CELL_SIZE, CELL_SIZE]
min_x = (label[0] - label[2] / 2) / (self.image_size / self.cell_size)
max_x = (label[0] + label[2] / 2) / (self.image_size / self.cell_size)
min_y = (label[1] - label[3] / 2) / (self.image_size / self.cell_size)
max_y = (label[1] + label[3] / 2) / (self.image_size / self.cell_size)
min_x = tf.floor(min_x)
min_y = tf.floor(min_y)
max_x = tf.ceil(max_x)
max_y = tf.ceil(max_y)
temp = tf.cast(tf.pack([max_y - min_y, max_x - min_x]), dtype=tf.int32)
objects = tf.ones(temp, tf.float32)
temp = tf.cast(tf.pack([min_y, self.cell_size - max_y, min_x, self.cell_size - max_x]), tf.int32)
temp = tf.reshape(temp, (2, 2))
objects = tf.pad(objects, temp, "CONSTANT")
#calculate objects tensor [CELL_SIZE, CELL_SIZE]
#calculate responsible tensor [CELL_SIZE, CELL_SIZE]
center_x = label[0] / (self.image_size / self.cell_size)
center_x = tf.floor(center_x)
center_y = label[1] / (self.image_size / self.cell_size)
center_y = tf.floor(center_y)
response = tf.ones([1, 1], tf.float32)
temp = tf.cast(tf.pack([center_y, self.cell_size - center_y - 1, center_x, self.cell_size -center_x - 1]), tf.int32)
temp = tf.reshape(temp, (2, 2))
response = tf.pad(response, temp, "CONSTANT")
#objects = response
#calculate iou_predict_truth [CELL_SIZE, CELL_SIZE, BOXES_PER_CELL]
predict_boxes = predict[:, :, self.num_classes + self.boxes_per_cell:]
predict_boxes = tf.reshape(predict_boxes, [self.cell_size, self.cell_size, self.boxes_per_cell, 4])
predict_boxes = predict_boxes * [self.image_size / self.cell_size, self.image_size / self.cell_size, self.image_size, self.image_size]
base_boxes = np.zeros([self.cell_size, self.cell_size, 4])
for y in range(self.cell_size):
for x in range(self.cell_size):
#nilboy
base_boxes[y, x, :] = [self.image_size / self.cell_size * x, self.image_size / self.cell_size * y, 0, 0]
base_boxes = np.tile(np.resize(base_boxes, [self.cell_size, self.cell_size, 1, 4]), [1, 1, self.boxes_per_cell, 1])
predict_boxes = base_boxes + predict_boxes
iou_predict_truth = self.iou(predict_boxes, label[0:4])
#calculate C [cell_size, cell_size, boxes_per_cell]
C = iou_predict_truth * tf.reshape(response, [self.cell_size, self.cell_size, 1])
#calculate I tensor [CELL_SIZE, CELL_SIZE, BOXES_PER_CELL]
I = iou_predict_truth * tf.reshape(response, (self.cell_size, self.cell_size, 1))
max_I = tf.reduce_max(I, 2, keep_dims=True)
I = tf.cast((I >= max_I), tf.float32) * tf.reshape(response, (self.cell_size, self.cell_size, 1))
#calculate no_I tensor [CELL_SIZE, CELL_SIZE, BOXES_PER_CELL]
no_I = tf.ones_like(I, dtype=tf.float32) - I
p_C = predict[:, :, self.num_classes:self.num_classes + self.boxes_per_cell]
#calculate truth x,y,sqrt_w,sqrt_h 0-D
x = label[0]
y = label[1]
sqrt_w = tf.sqrt(tf.abs(label[2]))
sqrt_h = tf.sqrt(tf.abs(label[3]))
#sqrt_w = tf.abs(label[2])
#sqrt_h = tf.abs(label[3])
#calculate predict p_x, p_y, p_sqrt_w, p_sqrt_h 3-D [CELL_SIZE, CELL_SIZE, BOXES_PER_CELL]
p_x = predict_boxes[:, :, :, 0]
p_y = predict_boxes[:, :, :, 1]
#p_sqrt_w = tf.sqrt(tf.abs(predict_boxes[:, :, :, 2])) * ((tf.cast(predict_boxes[:, :, :, 2] > 0, tf.float32) * 2) - 1)
#p_sqrt_h = tf.sqrt(tf.abs(predict_boxes[:, :, :, 3])) * ((tf.cast(predict_boxes[:, :, :, 3] > 0, tf.float32) * 2) - 1)
#p_sqrt_w = tf.sqrt(tf.maximum(0.0, predict_boxes[:, :, :, 2]))
#p_sqrt_h = tf.sqrt(tf.maximum(0.0, predict_boxes[:, :, :, 3]))
#p_sqrt_w = predict_boxes[:, :, :, 2]
#p_sqrt_h = predict_boxes[:, :, :, 3]
p_sqrt_w = tf.sqrt(tf.minimum(self.image_size * 1.0, tf.maximum(0.0, predict_boxes[:, :, :, 2])))
p_sqrt_h = tf.sqrt(tf.minimum(self.image_size * 1.0, tf.maximum(0.0, predict_boxes[:, :, :, 3])))
#calculate truth p 1-D tensor [NUM_CLASSES]
P = tf.one_hot(tf.cast(label[4], tf.int32), self.num_classes, dtype=tf.float32)
#calculate predict p_P 3-D tensor [CELL_SIZE, CELL_SIZE, NUM_CLASSES]
p_P = predict[:, :, 0:self.num_classes]
#class_loss
class_loss = tf.nn.l2_loss(tf.reshape(objects, (self.cell_size, self.cell_size, 1)) * (p_P - P)) * self.class_scale
#class_loss = tf.nn.l2_loss(tf.reshape(response, (self.cell_size, self.cell_size, 1)) * (p_P - P)) * self.class_scale
#object_loss
object_loss = tf.nn.l2_loss(I * (p_C - C)) * self.object_scale
#object_loss = tf.nn.l2_loss(I * (p_C - (C + 1.0)/2.0)) * self.object_scale
#noobject_loss
#noobject_loss = tf.nn.l2_loss(no_I * (p_C - C)) * self.noobject_scale
noobject_loss = tf.nn.l2_loss(no_I * (p_C)) * self.noobject_scale
#coord_loss
coord_loss = (tf.nn.l2_loss(I * (p_x - x)/(self.image_size/self.cell_size)) +
tf.nn.l2_loss(I * (p_y - y)/(self.image_size/self.cell_size)) +
tf.nn.l2_loss(I * (p_sqrt_w - sqrt_w))/ self.image_size +
tf.nn.l2_loss(I * (p_sqrt_h - sqrt_h))/self.image_size) * self.coord_scale
nilboy = I
return num + 1, object_num, [loss[0] + class_loss, loss[1] + object_loss, loss[2] + noobject_loss, loss[3] + coord_loss], predict, labels, nilboy
def loss(self, predicts, labels, objects_num):
"""Add Loss to all the trainable variables
Args:
predicts: 4-D tensor [batch_size, cell_size, cell_size, 5 * boxes_per_cell]
===> (num_classes, boxes_per_cell, 4 * boxes_per_cell)
labels : 3-D tensor of [batch_size, max_objects, 5]
objects_num: 1-D tensor [batch_size]
"""
class_loss = tf.constant(0, tf.float32)
object_loss = tf.constant(0, tf.float32)
noobject_loss = tf.constant(0, tf.float32)
coord_loss = tf.constant(0, tf.float32)
loss = [0, 0, 0, 0]
for i in range(self.batch_size):
predict = predicts[i, :, :, :]
label = labels[i, :, :]
object_num = objects_num[i]
nilboy = tf.ones([7,7,2])
tuple_results = tf.while_loop(self.cond1, self.body1, [tf.constant(0), object_num, [class_loss, object_loss, noobject_loss, coord_loss], predict, label, nilboy])
for j in range(4):
loss[j] = loss[j] + tuple_results[2][j]
nilboy = tuple_results[5]
tf.add_to_collection('losses', (loss[0] + loss[1] + loss[2] + loss[3])/self.batch_size)
tf.summary.scalar('class_loss', loss[0]/self.batch_size)
tf.summary.scalar('object_loss', loss[1]/self.batch_size)
tf.summary.scalar('noobject_loss', loss[2]/self.batch_size)
tf.summary.scalar('coord_loss', loss[3]/self.batch_size)
tf.summary.scalar('weight_loss', tf.add_n(tf.get_collection('losses')) - (loss[0] + loss[1] + loss[2] + loss[3])/self.batch_size )
return tf.add_n(tf.get_collection('losses'), name='total_loss'), nilboy
| |
import enum
import errno
import hashlib
import json
import logging
import os
import re
import sys
import time
from contextlib import contextmanager
from datetime import datetime, timedelta
from itertools import count
from pathlib import Path
from subprocess import STDOUT, CalledProcessError, check_output, check_call
import tarfile
from tarfile import TarFile, TarInfo
from tempfile import mkdtemp, mkstemp
import typing
import shutil
import io
from io import BytesIO
from zipfile import ZipFile, BadZipfile
from collections import OrderedDict, namedtuple
from operator import itemgetter
from django.conf import settings
from django.contrib.auth.models import User
from django.core.exceptions import ValidationError
from django.core.files import File
from django.core.validators import RegexValidator, MinValueValidator
from django.db import models, transaction
from django.db.models.functions import Now
from django.dispatch import receiver
from django.template.defaultfilters import filesizeformat
from django.urls import reverse
from django.utils import timezone
from constants import maxlengths
from file_access_utils import compute_md5, use_field_file
from metadata.models import AccessControl, empty_removal_plan, remove_helper
from stopwatch.models import Stopwatch
import container.deffile as deffile
logger = logging.getLogger(__name__)
SINGULARITY_COMMAND = 'singularity'
# MANAGE_PY = "manage.py"
# MANAGE_PY_FULLPATH = os.path.join(settings.KIVE_HOME, MANAGE_PY)
MANAGE_PY_FULLPATH = os.path.abspath(os.path.join(__file__, '../../manage.py'))
NUM_RETRY = settings.SLURM_COMMAND_RETRY_NUM
SLEEP_SECS = settings.SLURM_COMMAND_RETRY_SLEEP_SECS
def multi_check_output(cmd_lst, stderr=None, env=None, num_retry=NUM_RETRY):
""" Perform a check_output command multiples times.
We use this routine when calling slurm commands to counter time-outs under
heavy load. For calls to other commands, we use check_output directly.
This routine should always return a (unicode) string.
NOTE: Under python3, subprocess.check_output() returns bytes by default, so we
set universal_newlines=True to guarantee strings.
NOTE: this routine was taken from the now defunct slurmlib module.
"""
itry, cmd_retry = 1, True
out_str = None
while cmd_retry:
cmd_retry = False
try:
out_str = check_output(cmd_lst,
stderr=stderr,
env=env,
universal_newlines=True)
except OSError as e:
# typically happens if the executable cannot execute at
# all (e.g. not installed)
# ==> we just pass this error up with extra context
e.strerror += ': ' + ' '.join(cmd_lst)
raise
except CalledProcessError as e:
# typically happens if the executable did run, but returned an error
# ==> assume the slurm command timed out, so we retry
cmd_retry = True
logger.warning("timeout #%d/%d on command %s (retcode %s)",
itry,
num_retry,
cmd_lst[0],
e.returncode)
if itry < num_retry:
itry += 1
time.sleep(SLEEP_SECS)
else:
raise
return out_str
class ContainerFamily(AccessControl):
name = models.CharField(max_length=maxlengths.MAX_NAME_LENGTH)
description = models.CharField(max_length=maxlengths.MAX_DESCRIPTION_LENGTH,
blank=True)
git = models.CharField(
'Git URL',
help_text='URL of Git repository that containers were built from',
max_length=2000,
blank=True)
containers = None # Filled in later from child table.
class Meta:
ordering = ['name']
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('container_family_update', kwargs=dict(pk=self.pk))
@transaction.atomic
def build_removal_plan(self, removal_accumulator=None):
""" Make a manifest of objects to remove when removing this. """
removal_plan = removal_accumulator or empty_removal_plan()
assert self not in removal_plan["ContainerFamilies"]
removal_plan["ContainerFamilies"].add(self)
for container in self.containers.all():
container.build_removal_plan(removal_plan)
return removal_plan
@transaction.atomic
def remove(self):
removal_plan = self.build_removal_plan()
remove_helper(removal_plan)
class ContainerNotChild(Exception):
pass
class ChildNotConfigured(Exception):
pass
class PipelineCompletionStatus:
def __init__(self, pipeline):
self.has_inputs = False
self.has_steps = False
self.has_outputs = False
self.inputs_not_connected = []
self.dangling_outputs = []
self.assess_pipeline_completion(pipeline)
def add_unfed_input(self, step_num, dataset_name):
self.inputs_not_connected.append((step_num, dataset_name))
def add_dangling_output(self, dataset_name):
self.dangling_outputs.append(dataset_name)
def is_complete(self):
return (self.has_inputs
and self.has_steps
and self.has_outputs
and len(self.inputs_not_connected) == 0
and len(self.dangling_outputs) == 0)
def assess_pipeline_completion(self, pipeline):
"""
Check that the specified pipeline is complete, returning a list of things that must still be satisfied.
:param pipeline:
:return:
"""
if len(pipeline["inputs"]) > 0:
self.has_inputs = True
if len(pipeline["steps"]) > 0:
self.has_steps = True
if len(pipeline["outputs"]) > 0:
self.has_outputs = True
# Construct a dataset mapping to check for unfed inputs and dangling outputs.
usable_inputs = [] # list of dicts
pipeline_inputs = [x["dataset_name"] for x in pipeline["inputs"]]
usable_inputs.append(pipeline_inputs)
for i, step_dict in enumerate(pipeline["steps"], start=1):
# Check for unfed inputs.
for input_dict in step_dict["inputs"]:
if input_dict["source_step"] is None:
self.add_unfed_input(i, input_dict["dataset_name"])
# Add the step outputs to the list of usable inputs.
usable_inputs.append(step_dict["outputs"])
# Check for dangling outputs.
for output_dict in pipeline["outputs"]:
if output_dict["source_step"] is None:
self.add_dangling_output(output_dict["dataset_name"])
class ExistingRunsError(Exception):
def __init__(self, message=None):
if message is None:
message = 'Container has runs. Save changes as a new container.'
super(ExistingRunsError, self).__init__(message)
def get_drivers(archive):
"""
Return a list of files that can be a driver.
:param archive: an archive as returned by open_content.
"""
drivers = []
for info in archive.infolist():
if is_driver(archive, info):
drivers.append(info)
return drivers
def is_driver(archive, info):
"""
True if the file in the archive that is specified by info is an admissible driver.
:param archive:
:param info:
:return:
"""
return archive.read(info).startswith(b"#!")
class Container(AccessControl):
UPLOAD_DIR = "Containers"
SIMG = "SIMG"
ZIP = "ZIP"
TAR = "TAR"
SUPPORTED_FILE_TYPES = (
(SIMG, "Singularity"),
(ZIP, "Zip"),
(TAR, "Tar")
)
ACCEPTED_FILE_EXTENSIONS = OrderedDict(
[
(".simg", SIMG),
(".zip", ZIP),
(".tar", TAR)
]
)
DEFAULT_APP_CONFIG = dict(memory=5000, threads=1)
EMPTY = "empty"
INCOMPLETE = "incomplete"
VALID = "valid"
accepted_extensions = list(ACCEPTED_FILE_EXTENSIONS.keys())
accepted_extension_str = ", ".join(accepted_extensions[:-1])
accepted_extension_str += ", or {}".format(accepted_extensions[-1])
DEFAULT_ERROR_MESSAGES = {
'invalid_singularity_container': "Upload a valid Singularity container file.",
'invalid_singularity_deffile': "Upload a valid Singularity container file (problem with deffile).",
'invalid_archive': "Upload a valid archive file.",
'singularity_cannot_have_parent': "Singularity containers cannot have parents",
'archive_must_have_parent': "Archive containers must have a valid Singularity container parent",
'parent_container_not_singularity': "Parent container must be a Singularity container",
'bad_extension': "File must have one of the following: {}".format(accepted_extension_str),
'archive_has_no_drivers': "Archive containers must contain at least one driver file",
'driver_not_in_archive': 'Step drivers must all be in the archive',
'inadmissible_driver': 'Step drivers must start with "#!"'
}
family = models.ForeignKey(ContainerFamily,
related_name="containers",
on_delete=models.CASCADE)
file = models.FileField(
"Container file",
upload_to=UPLOAD_DIR,
help_text="Singularity or archive container file")
file_type = models.CharField(
choices=SUPPORTED_FILE_TYPES,
default=SIMG,
max_length=20)
parent = models.ForeignKey(
"Container",
related_name="children",
null=True,
blank=True,
help_text='Singularity container that an archive container runs in',
on_delete=models.CASCADE)
tag = models.CharField('Tag',
help_text='Git tag or revision name',
max_length=128)
description = models.CharField('Description',
blank=True,
max_length=maxlengths.MAX_DESCRIPTION_LENGTH)
md5 = models.CharField(
max_length=64,
validators=[RegexValidator(
regex=re.compile("(^[0-9A-Fa-f]{32}$)|(^$)"),
message="MD5 checksum is not either 32 hex characters or blank")],
blank=True,
help_text="Validates file integrity")
created = models.DateTimeField(
auto_now_add=True,
help_text="When this was added to Kive.")
file_size = models.BigIntegerField(
blank=True,
null=True,
help_text="Size of the container file in bytes. If null, this has "
"not been computed yet.")
# Related models get set later.
methods = None
apps = None
children = None
@property
def display_name(self):
return '{}:{}'.format(self.family.name, self.tag)
@property
def file_path(self):
return os.path.join(settings.MEDIA_ROOT, self.file.name)
class Meta:
ordering = ['family__name', '-created']
@classmethod
def validate_singularity_container(cls, file_path):
"""
Confirm that the given file is a Singularity container.
:param file_path:
:return:
"""
try:
check_output([SINGULARITY_COMMAND, 'inspect', file_path], stderr=STDOUT)
except CalledProcessError as ex:
logger.warning('Invalid container file:\n%s', ex.output)
raise ValidationError(cls.DEFAULT_ERROR_MESSAGES['invalid_singularity_container'],
code='invalid_singularity_container')
def save(self, *args, **kwargs):
if not self.md5:
self.set_md5()
super(Container, self).save(*args, **kwargs)
def clean(self):
"""
Confirm that the file is of the correct type.
:return:
"""
if not self.file:
raise ValidationError(
self.DEFAULT_ERROR_MESSAGES["invalid_archive"],
code="invalid_archive",
)
if self.file_type == Container.SIMG:
if self.parent is not None:
raise ValidationError(self.DEFAULT_ERROR_MESSAGES["singularity_cannot_have_parent"],
code="singularity_cannot_have_parent")
# Because it's potentially more efficient to validate a Singularity container before
# this step, we check for an "already validated" flag.
if not getattr(self, "singularity_validated", False):
fd, file_path = mkstemp()
with use_field_file(self.file), io.open(fd, mode="w+b") as f:
for chunk in self.file.chunks():
f.write(chunk)
Container.validate_singularity_container(file_path)
os.remove(file_path)
else:
if self.parent is None:
raise ValidationError(self.DEFAULT_ERROR_MESSAGES["archive_must_have_parent"],
code="archive_must_have_parent")
elif not self.parent.is_singularity():
raise ValidationError(self.DEFAULT_ERROR_MESSAGES["parent_container_not_singularity"],
code="parent_container_not_singularity")
try:
with self.open_content() as a:
drivers = get_drivers(a)
if len(drivers) == 0:
raise ValidationError(self.DEFAULT_ERROR_MESSAGES["archive_has_no_drivers"],
code="archive_has_no_drivers")
# Check that all of the step drivers are admissible drivers.
archive_content = self.get_archive_content(False)
if archive_content is None:
return
with self.open_content() as archive:
all_members = archive.infolist()
members = [member
for member in all_members
if not member.name.startswith('kive/pipeline')]
members_by_name = {}
for member in members:
members_by_name[member.name] = member
pipeline = archive_content["pipeline"]
if pipeline is None:
return
for step_dict in pipeline["steps"]:
driver = step_dict["driver"]
if driver not in members_by_name:
raise ValidationError(self.DEFAULT_ERROR_MESSAGES["driver_not_in_archive"],
code="driver_not_in_archive")
if not is_driver(archive, members_by_name[driver]):
raise ValidationError(self.DEFAULT_ERROR_MESSAGES["inadmissible_driver"],
code="inadmissible_driver")
except (BadZipfile, tarfile.ReadError):
raise ValidationError(self.DEFAULT_ERROR_MESSAGES["invalid_archive"],
code="invalid_archive")
def set_md5(self):
"""
Set this instance's md5 attribute. Note that this does not save the instance.
This leaves self.file open and seek'd to the 0 position.
:return:
"""
if not self.file:
return
with use_field_file(self.file):
self.md5 = compute_md5(self.file)
def validate_md5(self):
"""
Compute the MD5 and check that it is as expected.
:return:
"""
with self.file:
current_md5 = compute_md5(self.file)
if current_md5 != self.md5:
raise ValueError(
"Container {} file MD5 has changed (original {}, current {})".format(self, self.md5, current_md5)
)
def __str__(self):
return self.display_name
def __repr__(self):
return 'Container(id={})'.format(self.pk)
def is_singularity(self):
return self.file_type == self.SIMG
def extract_archive(self, extraction_path):
""" Extract this child container to the specified extraction path.
Raises ContainerNotChild if this is not a child container.
:param extraction_path: where to extract the contents
"""
if self.is_singularity():
raise ContainerNotChild()
with self.open_content() as archive:
all_members = archive.infolist()
members = [member
for member in all_members
if not member.name.startswith('kive/pipeline')]
last_member = all_members[-1]
if last_member.name.startswith('kive/pipeline'):
members.append(last_member)
else:
last_member = None
archive.extractall(extraction_path, members)
if last_member is not None:
old_name = os.path.join(extraction_path, last_member.name)
new_name = os.path.join(extraction_path, 'kive', 'pipeline.json')
os.rename(old_name, new_name)
@contextmanager
def open_content(self, mode='r'):
if mode == 'r':
file_mode = 'rb'
elif mode == 'a':
file_mode = 'rb+'
else:
raise ValueError('Unsupported mode for archive content: {!r}.'.format(mode))
with use_field_file(self.file, file_mode):
if self.file_type == Container.ZIP:
archive = ZipHandler(self.file, mode)
elif self.file_type == Container.TAR:
archive = TarHandler(self.file, mode)
else:
raise ValueError(
'Cannot open content for a {} container.'.format(
self.file_type))
yield archive
archive.close()
def get_content(self, add_default=True):
"""Read the pipeline definitions, aka content, from an archive file (tar or zip)
or a singularity image file.
"""
if self.is_singularity():
return self.get_singularity_content()
return self.get_archive_content(add_default)
def get_singularity_content(self):
"""Determine pipeline definitions from a singularity file.
We need to extract and parse a deffile from the image for this to work.
If its not a singularity file: raise a ValidationError
If there is no deffile: do not complain (there are no apps defined)
If the deffile cannot be parsed: raise a ValidationError
"""
file_path = self.file_path
try:
json_data = check_output([SINGULARITY_COMMAND, 'inspect',
'-d', '-j', file_path], stderr=STDOUT)
except CalledProcessError:
logger.warning('Invalid container file', exc_info=True)
raise ValidationError(self.DEFAULT_ERROR_MESSAGES['invalid_singularity_container'],
code='invalid_singularity_container')
sing_data = json.loads(json_data.decode('utf-8'))
def_file_str = sing_data['data']['attributes']['deffile']
# if the container was not made using a deffile, this will be None.
# In this case, return an empty applist.
if def_file_str is None:
appinfo_lst = []
else:
appinfo_lst = deffile.parse_string(def_file_str)
return dict(applist=appinfo_lst)
def get_archive_content(self, add_default):
"""Determine the pipeline content from an archive container."""
with self.open_content() as archive:
last_entry = archive.infolist()[-1]
if re.match(r'kive/pipeline\d+\.json', last_entry.name):
pipeline_json = archive.read(last_entry)
pipeline = json.loads(pipeline_json.decode('utf-8'))
elif add_default:
pipeline = dict(default_config=self.DEFAULT_APP_CONFIG,
inputs=[],
steps=[],
outputs=[])
else:
pipeline = None
file_and_driver_status = [
(entry.name, is_driver(archive, entry))
for entry in archive.infolist()
if not entry.name.startswith('kive/')
]
file_and_driver_status = sorted(file_and_driver_status, key=itemgetter(0))
content = dict(files=file_and_driver_status,
pipeline=pipeline,
id=self.pk)
return content
def write_archive_content(self, content):
"""Write the contents of an archive (i.e. non singularity) container.
This method is typically called with a content dict taken from an ajax request.
Singularity containers are not made this way.
"""
related_runs = ContainerRun.objects.filter(app__in=self.apps.all())
if related_runs.exists():
raise ExistingRunsError()
pipeline = content['pipeline']
pipeline_json = json.dumps(pipeline)
with self.open_content('a') as archive:
file_names = set(entry.name
for entry in archive.infolist()
if entry.name.startswith('kive/pipeline'))
for i in count(1):
file_name = 'kive/pipeline{}.json'.format(i)
if file_name not in file_names:
archive.write(file_name, pipeline_json)
break
self.set_md5()
self.create_app_from_content(content)
def get_pipeline_state(self):
content = self.get_content(add_default=False)
if content is None:
return self.EMPTY
pipeline = content['pipeline']
if pipeline is None:
return self.EMPTY
if self.pipeline_valid(pipeline):
return self.VALID
return self.INCOMPLETE
def create_app_from_content(self, content=None):
"""Create apps based on the content configuration.
This method handles archive as well as singularity images.
In the case of singularity images:
if applist is None: no changes are made to current apps of a container.
if applist is []: all apps are deleted.
"""
content = content or self.get_content()
error_messages = []
if content is None:
logger.warning("failed to obtain content from container")
return
if self.is_singularity():
app_lst = content.get('applist', None)
if not app_lst:
error_messages.append(
'No definition file found in singularity file.')
else:
default_config = self.DEFAULT_APP_CONFIG
self.apps.all().delete()
for app_dct in app_lst:
appname = app_dct[deffile.AppInfo.KW_APP_NAME]
app_errors = app_dct[deffile.AppInfo.KW_ERROR_MESSAGES]
if app_errors:
summary = 'The {} app was not created: {}'.format(
repr(appname) if appname else 'default',
', '.join(app_errors))
error_messages.append(summary)
continue
num_threads = app_dct[deffile.AppInfo.KW_NUM_THREADS] or default_config['threads']
memory = app_dct[deffile.AppInfo.KW_MEMORY] or default_config['memory']
inpargs, outargs = app_dct[deffile.AppInfo.KW_IO_ARGS]
inpargs = inpargs or "input_txt"
outargs = outargs or "output_txt"
help_str = app_dct[deffile.AppInfo.KW_HELP_STRING] or ""
# attach the help string of the default app to the container's description
if appname == "" and help_str != "":
self.description = (
help_str
if self.description == ""
else self.description + "\n" + help_str
)
self.save()
newdb_app = self.apps.create(name=appname,
description=help_str,
threads=num_threads,
memory=memory)
newdb_app.write_inputs(inpargs)
newdb_app.write_outputs(outargs)
else:
# archive container
pipeline = content['pipeline']
if self.pipeline_valid(pipeline):
default_config = pipeline.get('default_config',
self.DEFAULT_APP_CONFIG)
self.apps.all().delete()
app = self.apps.create(memory=default_config['memory'],
threads=default_config['threads'])
# noinspection PyTypeChecker
input_names = ' '.join(entry['dataset_name']
for entry in pipeline['inputs'])
# noinspection PyTypeChecker
output_names = ' '.join(entry['dataset_name']
for entry in pipeline['outputs'])
app.write_inputs(input_names)
app.write_outputs(output_names)
return error_messages
@staticmethod
def pipeline_valid(pipeline):
"""
True if the specified pipeline is valid; False otherwise.
:param pipeline:
:return:
"""
# noinspection PyBroadException
try:
return PipelineCompletionStatus(pipeline).is_complete()
except Exception:
return False
def get_absolute_url(self):
return reverse('container_update', kwargs=dict(pk=self.pk))
def get_absolute_path(self):
return os.path.join(settings.MEDIA_ROOT, self.file.name)
@transaction.atomic
def build_removal_plan(self, removal_accumulator=None):
""" Make a manifest of objects to remove when removing this. """
removal_plan = removal_accumulator or empty_removal_plan()
assert self not in removal_plan["Containers"]
removal_plan["Containers"].add(self)
for app in self.apps.all():
app.build_removal_plan(removal_plan)
for child in self.children.all():
child.build_removal_plan(removal_plan)
return removal_plan
@transaction.atomic
def remove(self):
removal_plan = self.build_removal_plan()
remove_helper(removal_plan)
@classmethod
def scan_file_names(cls):
""" Yield all file names, relative to MEDIA_ROOT. """
relative_root = Container.UPLOAD_DIR
absolute_root = os.path.join(settings.MEDIA_ROOT, relative_root)
if not os.path.exists(absolute_root):
return
for file_name in os.listdir(absolute_root):
yield os.path.join(relative_root, file_name)
class ZipHandler:
MemberInfo = namedtuple('MemberInfo', 'name original')
def __init__(self, fileobj=None, mode='r', archive=None):
if archive is None:
archive = ZipFile(fileobj, mode, allowZip64=True)
self.archive = archive
def close(self):
self.archive.close()
def read(self, info):
with self.archive.open(info.original) as f:
return f.read()
def write(self, file_name, content):
self.archive.writestr(file_name, content)
def extractall(self, path, members=None):
if members is None:
original_members = None
else:
original_members = [member.original for member in members]
self.archive.extractall(path, original_members)
def infolist(self):
return [ZipHandler.MemberInfo(info.filename, info)
for info in self.archive.infolist()]
class TarHandler(ZipHandler):
def __init__(self, fileobj=None, mode='r', archive=None):
if archive is None:
archive = TarFile(fileobj=fileobj, mode=mode)
super(TarHandler, self).__init__(fileobj, mode, archive)
def read(self, info):
f = self.archive.extractfile(info.original)
try:
return f.read()
finally:
f.close()
def write(self, file_name, content):
tarinfo = TarInfo(file_name)
tarinfo.size = len(content)
self.archive.addfile(tarinfo, BytesIO(content.encode('utf8')))
def infolist(self):
return [ZipHandler.MemberInfo(info.name, info)
for info in self.archive.getmembers()]
class ContainerApp(models.Model):
container = models.ForeignKey(Container,
related_name="apps",
on_delete=models.CASCADE)
name = models.CharField(max_length=maxlengths.MAX_NAME_LENGTH,
help_text="Leave blank for default",
blank=True)
description = models.CharField('Description',
blank=True,
max_length=maxlengths.MAX_DESCRIPTION_LENGTH)
threads = models.PositiveIntegerField(
"Number of threads",
help_text="How many threads does this app use during execution?",
default=1,
validators=[MinValueValidator(1)])
memory = models.PositiveIntegerField(
"Memory required (MB)",
help_text="Megabytes of memory Slurm will allocate for this app "
"(0 allocates all memory)",
default=6000)
arguments = None # Filled in later from child table.
runs = None # Filled in later from child table.
objects = None # Filled in later by Django.
class Meta:
ordering = ('-container_id', 'name',)
@property
def display_name(self):
name = self.container.display_name
if self.name:
# noinspection PyTypeChecker
name += ' / ' + self.name
return name
def __str__(self):
return self.display_name
def __repr__(self):
return 'ContainerApp(id={})'.format(self.pk)
@property
def inputs(self):
return self._format_arguments(ContainerArgument.INPUT)
@property
def outputs(self):
return self._format_arguments(ContainerArgument.OUTPUT)
def _format_arguments(self, argument_type):
arguments = self.arguments.filter(type=argument_type)
optionals = [argument
for argument in arguments
if argument.position is None]
positionals = [argument
for argument in arguments
if argument.position is not None]
terms = [argument.formatted for argument in optionals]
if (argument_type == ContainerArgument.INPUT and
any(argument.allow_multiple for argument in optionals)):
terms.append('--')
terms.extend(argument.formatted for argument in positionals)
return ' '.join(terms)
def write_inputs(self, formatted):
self._write_arguments(ContainerArgument.INPUT, formatted)
def write_outputs(self, formatted):
self._write_arguments(ContainerArgument.OUTPUT, formatted)
def _write_arguments(self, argument_type, formatted):
self.arguments.filter(type=argument_type).delete()
expected_multiples = {ContainerArgument.INPUT: '*',
ContainerArgument.OUTPUT: '/'}
for position, term in enumerate(formatted.split(), 1):
if term == '--':
continue
match = re.match(r'(--)?(\w+)([*/])?$', term)
if match is None:
raise ValueError('Invalid argument name: {}'.format(term))
if match.group(1):
position = None
if not match.group(3):
allow_multiple = False
elif match.group(3) == expected_multiples[argument_type]:
allow_multiple = True
else:
raise ValueError('Invalid argument name: {}'.format(term))
self.arguments.create(name=match.group(2),
position=position,
allow_multiple=allow_multiple,
type=argument_type)
def can_be_accessed(self, user):
return self.container.can_be_accessed(user)
def get_absolute_url(self):
return reverse('container_app_update', kwargs=dict(pk=self.pk))
@transaction.atomic
def build_removal_plan(self, removal_accumulator=None):
""" Make a manifest of objects to remove when removing this. """
removal_plan = removal_accumulator or empty_removal_plan()
assert self not in removal_plan["ContainerApps"]
removal_plan["ContainerApps"].add(self)
for run in self.runs.all():
if run not in removal_plan['ContainerRuns']:
run.build_removal_plan(removal_plan)
return removal_plan
@transaction.atomic
def remove(self):
removal_plan = self.build_removal_plan()
remove_helper(removal_plan)
@enum.unique
class ContainerArgumentType(enum.Enum):
FIXED_INPUT = enum.auto()
FIXED_OUTPUT = enum.auto()
OPTIONAL_INPUT = enum.auto()
OPTIONAL_MULTIPLE_INPUT = enum.auto()
FIXED_DIRECTORY_OUTPUT = enum.auto()
class ContainerArgument(models.Model):
INPUT = 'I'
OUTPUT = 'O'
TYPES = ((INPUT, 'Input'),
(OUTPUT, 'Output'))
KEYWORD_ARG_TYPES = set([
ContainerArgumentType.OPTIONAL_INPUT,
ContainerArgumentType.OPTIONAL_MULTIPLE_INPUT,
])
FIXED_ARG_TYPES = set([
ContainerArgumentType.FIXED_INPUT,
ContainerArgumentType.FIXED_OUTPUT,
ContainerArgumentType.FIXED_DIRECTORY_OUTPUT,
])
app = models.ForeignKey(ContainerApp,
related_name="arguments",
on_delete=models.CASCADE)
name = models.CharField(max_length=maxlengths.MAX_NAME_LENGTH)
position = models.IntegerField(
null=True,
blank=True,
help_text="Position in the arguments (gaps and duplicates are allowed). "
"Leave position blank to pass as an option with --name.")
type = models.CharField(max_length=1, choices=TYPES)
allow_multiple = models.BooleanField(
default=False,
help_text="True for optional inputs that accept multiple datasets and "
"outputs that just collect all files written to a directory")
objects = None # Filled in later by Django.
class Meta:
ordering = ('app_id', 'type', 'position', 'name')
def __repr__(self):
return 'ContainerArgument(name={!r})'.format(self.name)
def can_be_accessed(self, user):
return self.app.container.can_be_accessed(user)
@property
def formatted(self):
text = self.name
if self.position is None:
# noinspection PyTypeChecker
text = '--' + text
if self.allow_multiple:
text += '*' if self.type == ContainerArgument.INPUT else '/'
return text
@property
def argtype(self) -> typing.Optional[ContainerArgumentType]:
"Classify this argument, or return None if it's unclassifiable."
if self.position is not None:
if self.allow_multiple:
if self.type == self.OUTPUT:
return ContainerArgumentType.FIXED_DIRECTORY_OUTPUT
else:
if self.type == self.INPUT:
return ContainerArgumentType.FIXED_INPUT
elif self.type == self.OUTPUT:
return ContainerArgumentType.FIXED_OUTPUT
else:
if self.type == self.INPUT:
if self.allow_multiple:
return ContainerArgumentType.OPTIONAL_MULTIPLE_INPUT
else:
return ContainerArgumentType.OPTIONAL_INPUT
# If the above fell through, the model is in a corrupted or partially
# initialized state, and cannot be assigned a type.
def clean(self):
if self.argtype is None:
raise ValidationError("Could not assign a ContainerArgumentType to this argument")
@receiver(models.signals.post_delete, sender=Container)
def delete_container_file(instance, **_kwargs):
if instance.file:
try:
os.remove(instance.file.path)
except OSError as ex:
if ex.errno != errno.ENOENT:
raise
class Batch(AccessControl):
name = models.CharField(
"Batch Name",
max_length=maxlengths.MAX_NAME_LENGTH,
help_text='Name of this batch of container runs',
blank=True)
description = models.TextField(
max_length=maxlengths.MAX_DESCRIPTION_LENGTH,
blank=True)
runs = None # Filled in later by Django.
class Meta:
ordering = ('-id',)
def __str__(self):
return self.name or 'Batch {}'.format(self.pk)
@property
def absolute_url(self):
return reverse('batch_update', kwargs=dict(pk=self.pk))
@transaction.atomic
def build_removal_plan(self, removal_accumulator=None):
""" Make a manifest of objects to remove when removing this. """
removal_plan = removal_accumulator or empty_removal_plan()
assert self not in removal_plan["Batches"]
removal_plan["Batches"].add(self)
for run in self.runs.all():
run.build_removal_plan(removal_plan)
return removal_plan
@transaction.atomic
def remove(self):
removal_plan = self.build_removal_plan()
remove_helper(removal_plan)
class SandboxMissingException(Exception):
pass
class ContainerRun(Stopwatch, AccessControl):
NEW = 'N'
LOADING = 'L'
RUNNING = 'R'
SAVING = 'S'
COMPLETE = 'C'
FAILED = 'F'
CANCELLED = 'X'
STATES = ((NEW, 'New'),
(LOADING, 'Loading'),
(RUNNING, 'Running'),
(SAVING, 'Saving'),
(COMPLETE, 'Complete'),
(FAILED, 'Failed'),
(CANCELLED, 'Cancelled'))
ACTIVE_STATES = [
NEW,
LOADING,
RUNNING,
SAVING
]
SANDBOX_ROOT = os.path.join(settings.MEDIA_ROOT, 'ContainerRuns')
app = models.ForeignKey(ContainerApp,
related_name="runs",
on_delete=models.CASCADE)
batch = models.ForeignKey(Batch,
related_name="runs",
blank=True,
null=True,
on_delete=models.CASCADE)
name = models.CharField(max_length=maxlengths.MAX_NAME_LENGTH, blank=True)
description = models.CharField(max_length=maxlengths.MAX_DESCRIPTION_LENGTH,
blank=True)
state = models.CharField(max_length=1, choices=STATES, default=NEW)
submit_time = models.DateTimeField(
auto_now_add=True,
help_text='When this job was put in the queue.')
priority = models.IntegerField(default=0,
help_text='Chooses which slurm queue to use.')
sandbox_path = models.CharField(
max_length=maxlengths.MAX_EXTERNAL_PATH_LENGTH,
blank=True) # type: str
slurm_job_id = models.IntegerField(blank=True, null=True)
return_code = models.IntegerField(blank=True, null=True)
stopped_by = models.ForeignKey(User,
help_text="User that stopped this run",
null=True,
blank=True,
related_name="container_runs_stopped",
on_delete=models.CASCADE)
is_redacted = models.BooleanField(
default=False,
help_text="True if the outputs or logs were redacted for sensitive data")
datasets = None # Filled in later by Django.
logs = None # Filled in later by Django.
sandbox_size = models.BigIntegerField(
blank=True,
null=True,
help_text="Size of the sandbox in bytes. If null, this has not been computed yet."
)
original_run = models.ForeignKey(
'ContainerRun',
help_text="This run is a rerun of the original.",
null=True,
blank=True,
related_name="reruns",
on_delete=models.CASCADE)
md5 = models.CharField(
max_length=64,
validators=[RegexValidator(
regex=re.compile("(^[0-9A-Fa-f]{32}$)|(^$)"),
message="MD5 checksum is not either 32 hex characters or blank")],
blank=True,
help_text="Summary of MD5's for inputs, outputs, and containers.")
is_warned = models.BooleanField(
default=False,
help_text="True if a warning was logged because the Slurm job failed.")
class Meta:
ordering = ('-submit_time',)
def __str__(self):
return self.name or 'Container run {}'.format(self.pk)
def __repr__(self):
return 'ContainerRun(id={!r})'.format(self.pk)
def get_absolute_url(self):
return reverse('container_run_detail', kwargs=dict(pk=self.pk))
def get_rerun_name(self):
""" Create a name to use when rerunning this run.
Appends a (rerun) suffix, if needed.
"""
rerun_suffix = '(rerun)'
name = self.name.rstrip()
if name.endswith(rerun_suffix):
return name
if name:
name += ' '
name += rerun_suffix
return name
@property
def has_changed(self):
if self.state != self.COMPLETE or self.original_run is None:
return
return self.md5 != self.original_run.md5
def get_access_limits(self, access_limits=None):
if access_limits is None:
access_limits = []
access_limits.append(self.app.container)
input_entries = self.datasets.filter(
argument__type=ContainerArgument.INPUT).prefetch_related('dataset')
access_limits.extend(entry.dataset for entry in input_entries)
return access_limits
def save(self,
force_insert=False,
force_update=False,
using=None,
update_fields=None,
schedule=True):
super(ContainerRun, self).save(force_insert,
force_update,
using,
update_fields)
if (schedule and
self.state == self.NEW and
not self.sandbox_path and
not self.original_run):
transaction.on_commit(self.schedule)
@property
def full_sandbox_path(self):
if not self.sandbox_path:
return ''
return os.path.join(settings.MEDIA_ROOT, self.sandbox_path)
def create_sandbox(self, prefix=None):
sandbox_root = self.SANDBOX_ROOT
try:
os.mkdir(sandbox_root)
except OSError as ex:
if ex.errno != errno.EEXIST:
raise
if prefix is None:
prefix = 'user{}_run{}_'.format(self.user.username, self.pk)
full_sandbox_path = mkdtemp(prefix=prefix, dir=sandbox_root)
os.mkdir(os.path.join(full_sandbox_path, 'logs'))
self.sandbox_path = os.path.relpath(full_sandbox_path, settings.MEDIA_ROOT)
def schedule(self, dependencies=None):
try:
dependency_job_ids = []
if dependencies:
for source_run_id, source_dependencies in dependencies.items():
source_run = ContainerRun.objects.get(id=source_run_id)
source_run.schedule(source_dependencies)
dependency_job_ids.append(source_run.slurm_job_id)
self.create_sandbox()
self.save()
child_env = dict(os.environ)
extra_path = settings.SLURM_PATH
if extra_path is not None:
old_system_path = child_env['PATH']
system_path = extra_path + os.pathsep + old_system_path
child_env['PATH'] = system_path
child_env['PYTHONPATH'] = os.pathsep.join(sys.path)
child_env.pop('KIVE_LOG', None)
output = multi_check_output(self.build_slurm_command(settings.SLURM_QUEUES,
dependency_job_ids),
env=child_env)
self.slurm_job_id = int(output)
# It's just possible the slurm job has already started modifying the
# run, so only update one field.
self.save(update_fields=['slurm_job_id'])
except Exception:
self.state = self.FAILED
self.save(update_fields=['state'])
raise
def build_slurm_command(self, slurm_queues=None, dependency_job_ids=None):
"""Build a list of strings representing a slurm command"""
if not self.sandbox_path:
raise RuntimeError(
'Container run needs a sandbox before calling Slurm.')
slurm_prefix = os.path.join(settings.MEDIA_ROOT,
self.sandbox_path,
'logs',
'job%J_node%N_')
job_name = 'r{} {}'.format(self.pk,
self.app.name or
self.app.container.family.name)
command = ['sbatch',
'-J', job_name,
'--parsable',
'--output', slurm_prefix + 'stdout.txt',
'--error', slurm_prefix + 'stderr.txt',
'-c', str(self.app.threads),
'--mem', str(self.app.memory)]
if slurm_queues is not None:
kive_name, slurm_name = slurm_queues[self.priority]
command.extend(['-p', slurm_name])
if dependency_job_ids:
command.append('--dependency=afterok:' + ':'.join(
str(job_id)
for job_id in dependency_job_ids))
command.extend([MANAGE_PY_FULLPATH, 'runcontainer', str(self.pk)])
return command
def create_inputs_from_original_run(self):
""" Create input datasets by copying original run.
Checks for reruns of the source runs.
:return: a set of source runs that need to be rerun to recreate the
inputs. Calling this again after those reruns will finish creating the
inputs.
"""
reruns_needed = set()
if self.original_run:
filled_argument_ids = self.datasets.values('argument_id')
unfilled_input_arguments = self.app.arguments.filter(
type=ContainerArgument.INPUT).exclude(id__in=filled_argument_ids)
for container_dataset in self.original_run.datasets.filter(
argument__in=unfilled_input_arguments):
rerun_dataset, source_run = container_dataset.find_rerun_dataset()
if rerun_dataset is None:
reruns_needed.add(source_run)
continue
container_dataset.id = None # Make a copy.
container_dataset.dataset = rerun_dataset
container_dataset.run = self
container_dataset.save()
return reruns_needed
def get_sandbox_prefix(self):
return 'user{}_run{}_'.format(self.user.username, self.pk)
def request_stop(self, user):
end_time = timezone.now()
rows_updated = ContainerRun.objects.filter(
pk=self.pk,
state=ContainerRun.NEW).update(state=ContainerRun.CANCELLED,
stopped_by=user,
end_time=end_time)
if rows_updated == 0:
# Run has already started. Must call scancel.
check_call(['scancel', '-f', str(self.slurm_job_id)])
self.state = ContainerRun.CANCELLED
self.stopped_by = user
self.end_time = end_time
self.save()
@transaction.atomic
def build_removal_plan(self, removal_accumulator=None):
""" Make a manifest of objects to remove when removing this. """
removal_plan = removal_accumulator or empty_removal_plan()
assert self not in removal_plan["ContainerRuns"]
if self.state not in (ContainerRun.COMPLETE,
ContainerRun.FAILED,
ContainerRun.CANCELLED):
raise ValueError(
'ContainerRun id {} is still active.'.format(self.pk))
removal_plan["ContainerRuns"].add(self)
for run_dataset in self.datasets.all():
if run_dataset.argument.type == ContainerArgument.OUTPUT:
if getattr(run_dataset.dataset, 'file_source', None) is not None:
# Dataset was converted from an old run. Don't remove it.
continue
run_dataset.dataset.build_removal_plan(removal_plan)
return removal_plan
@transaction.atomic
def remove(self):
removal_plan = self.build_removal_plan()
remove_helper(removal_plan)
def load_log(self, file_path, log_type):
# noinspection PyUnresolvedReferences,PyProtectedMember
short_size = ContainerLog._meta.get_field('short_text').max_length
file_size = os.lstat(file_path).st_size
with open(file_path) as f:
if file_size <= short_size:
long_text = None
short_text = f.read(short_size)
else:
short_text = ''
long_text = File(f)
# We use update_or_create(), because it's possible that a log could
# be successfully created, then an error occurs, and we need to
# update it.
log, _ = self.logs.update_or_create(
type=log_type,
defaults=dict(short_text=short_text))
if long_text is not None:
upload_name = 'run_{}_{}'.format(
self.pk,
os.path.basename(file_path))
log.long_text.save(upload_name, long_text)
def delete_sandbox(self):
assert self.sandbox_path
shutil.rmtree(self.full_sandbox_path)
self.sandbox_path = ''
@classmethod
def find_unneeded(cls):
""" A queryset of records that could be purged. """
return cls.objects.filter(sandbox_size__isnull=False).exclude(
sandbox_path='')
@classmethod
def scan_file_names(cls):
""" Yield all file names, relative to MEDIA_ROOT. """
relative_root = os.path.relpath(ContainerRun.SANDBOX_ROOT,
settings.MEDIA_ROOT)
if not os.path.exists(ContainerRun.SANDBOX_ROOT):
return
for file_name in os.listdir(ContainerRun.SANDBOX_ROOT):
yield os.path.join(relative_root, file_name)
@classmethod
def check_slurm_state(cls, pk=None):
""" Check active runs to make sure their Slurm jobs haven't died.
:param pk: a run id to check, or None if all active runs should be
checked.
"""
runs = cls.objects.filter(state__in=cls.ACTIVE_STATES).only(
'state',
'end_time',
'slurm_job_id')
if pk is not None:
runs = runs.filter(pk=pk)
job_runs = {str(run.slurm_job_id): run
for run in runs
if run.slurm_job_id is not None}
if not job_runs:
# No jobs to check.
return
job_id_text = ','.join(job_runs)
output = multi_check_output(['sacct',
'-j', job_id_text,
'-o', 'jobid,end',
'--noheader',
'--parsable2'])
slurm_date_format = '%Y-%m-%dT%H:%M:%S'
warn_end_time = datetime.now() - timedelta(minutes=1)
max_end_time = warn_end_time - timedelta(minutes=14)
warn_end_time_text = warn_end_time.strftime(slurm_date_format)
max_end_time_text = max_end_time.strftime(slurm_date_format)
for line in output.splitlines():
job_id, end_time = line.split('|')
if end_time > warn_end_time_text:
continue
run = job_runs.get(job_id)
if run is not None:
if end_time > max_end_time_text:
if not run.is_warned:
logger.warning(
'Slurm reports that run id %d ended at %s without '
'updating Kive. Waiting 15 minutes to allow '
'rescheduling.',
run.id,
end_time)
run.is_warned = True
run.save(update_fields=['is_warned'])
else:
logger.error(
'Slurm reports that run id %d ended at %s without '
'updating Kive. Marked as failed.',
run.id,
end_time)
run.state = cls.FAILED
run.end_time = Now()
run.save()
logs_path = Path(run.full_sandbox_path) / 'logs'
log_matches = list(logs_path.glob('job*_node*_stderr.txt'))
if log_matches:
run.load_log(log_matches[0], ContainerLog.STDERR)
def set_md5(self):
""" Set this run's md5. Note that this does not save the run. """
encoding = 'utf8'
md5gen = hashlib.md5()
container = self.app.container
container_md5 = container.md5.encode(encoding)
md5gen.update(container_md5)
parent_container = container.parent
if parent_container is not None:
parent_md5 = parent_container.md5.encode(encoding)
md5gen.update(parent_md5)
# Use explict sort order, so changes to default don't invalidate MD5's.
for container_dataset in self.datasets.order_by('argument__type',
'argument__position',
'argument__name'):
dataset = container_dataset.dataset
dataset_md5 = dataset.MD5_checksum.encode(encoding)
md5gen.update(dataset_md5)
self.md5 = md5gen.hexdigest()
class ContainerDataset(models.Model):
run = models.ForeignKey(ContainerRun,
related_name="datasets",
on_delete=models.CASCADE)
argument = models.ForeignKey(ContainerArgument,
related_name="datasets",
on_delete=models.CASCADE)
dataset = models.ForeignKey("librarian.Dataset",
related_name="containers",
on_delete=models.CASCADE)
multi_position = models.PositiveIntegerField(
help_text="Position in a multi-valued argument"
" (None for single-value arguments).",
null=True,
default=None,
)
name = models.CharField(
max_length=maxlengths.MAX_NAME_LENGTH,
help_text="Local file name, also used to sort multiple inputs for a "
"single argument.",
blank=True)
created = models.DateTimeField(
auto_now_add=True,
help_text="When this was added to Kive.")
objects = None # Filled in later by Django.
class Meta:
ordering = ('run',
'argument__type',
'argument__position',
'argument__name')
def find_rerun_dataset(self):
""" Find the dataset, or the matching dataset from a rerun.
:return: (dataset, source_run) If all of the matching datasets have
been purged, then dataset is None and source_run is a run that
produced the dataset as an output. Otherwise, source_run is None.
"""
if self.dataset.has_data():
return self.dataset, None
output_container_dataset = self.dataset.containers.get(
argument__type=ContainerArgument.OUTPUT)
output_argument = output_container_dataset.argument
for rerun in output_container_dataset.run.reruns.all():
rerun_container_dataset = rerun.datasets.get(argument=output_argument)
dataset, source_run = rerun_container_dataset.find_rerun_dataset()
if dataset is not None:
return dataset, None
return None, output_container_dataset.run
def clean(self):
# Check that a position has been supplied for multiple-input arguments
if self.argument.argtype is ContainerArgumentType.OPTIONAL_MULTIPLE_INPUT:
if self.multi_position is None:
raise ValidationError("multi_position is required for a multi-valued input")
elif self.multi_position is not None:
raise ValidationError("multi_position should be None for single-valued argtype")
class ContainerLog(models.Model):
UPLOAD_DIR = 'ContainerLogs'
STDOUT = 'O'
STDERR = 'E'
TYPES = ((STDOUT, 'stdout'),
(STDERR, 'stderr'))
type = models.CharField(max_length=1, choices=TYPES)
run = models.ForeignKey(ContainerRun,
related_name="logs",
on_delete=models.CASCADE)
short_text = models.CharField(
max_length=2000,
blank=True,
help_text="Holds the log text if it's shorter than the max length.")
long_text = models.FileField(
upload_to=UPLOAD_DIR,
help_text="Holds the log text if it's longer than the max length.")
log_size = models.BigIntegerField(
blank=True,
null=True,
help_text="Size of the log file in bytes. If null, this has not been computed yet, or the log is short"
"and not stored in a file.")
objects = None # Filled in later by Django.
def can_be_accessed(self, user):
return self.run.can_be_accessed(user)
def get_absolute_url(self):
return reverse('container_log_detail', kwargs=dict(pk=self.pk))
@property
def size(self):
""" Check the size of the log, either short or long.
:return: the size from whichever log is used, or None if the log was
purged.
"""
if self.long_text:
return self.long_text.size
if self.log_size:
return None
# noinspection PyTypeChecker
return len(self.short_text)
@property
def size_display(self):
log_size = self.size
if log_size is None:
return 'missing'
return filesizeformat(log_size)
@property
def preview(self):
display_limit = 1000
log_size = self.size
if log_size is None:
return '[purged]'
display = self.read(display_limit)
if log_size > display_limit:
display += '[...download to see the remaining {}.]'.format(
filesizeformat(log_size - display_limit))
return display
def read(self, size=None):
if self.long_text:
self.long_text.open('r')
try:
return self.long_text.read(size or -1)
finally:
self.long_text.close()
return self.short_text[:size]
@classmethod
def find_unneeded(cls):
""" A queryset of records that could be purged. """
return cls.objects.exclude(
long_text=None).exclude( # short log
long_text='').exclude( # purged log
log_size=None) # new log
@classmethod
def scan_file_names(cls):
""" Yield all file names, relative to MEDIA_ROOT. """
relative_root = ContainerLog.UPLOAD_DIR
absolute_root = os.path.join(settings.MEDIA_ROOT, relative_root)
if not os.path.exists(absolute_root):
return
for file_name in os.listdir(absolute_root):
yield os.path.join(relative_root, file_name)
| |
import datetime as dt
from functools import partial
import inspect
import pytest
import numpy as np
import pandas as pd
from pandas.util.testing import assert_index_equal, assert_series_equal, assert_frame_equal
from numpy.testing import assert_equal
assert_series_equal_strict = partial(assert_series_equal, check_dtype=True, check_index_type=True,
check_series_type=True, check_less_precise=False)
assert_frame_equal_strict = partial(assert_frame_equal, check_dtype=True, check_index_type=True,
check_column_type=True, check_frame_type=True, check_less_precise=False,
check_names=True)
from numpyson import dumps, loads, build_index_handler_for_type
def test_version():
import numpyson
assert numpyson.__version__
@pytest.mark.parametrize('arr_before', [
np.array([1, 2, 3]),
np.array([1., 2., 3.]),
np.array(['foo', 'bar', 'baz']),
np.array([dt.datetime(1970, 1, 1, 12, 57), dt.datetime(1970, 1, 1, 12, 58), dt.datetime(1970, 1, 1, 12, 59)]),
np.array([dt.date(1970, 1, 1), dt.date(1970, 1, 2), dt.date(1970, 1, 3)]),
np.array([True, False, True]),
np.arange(10).T,
np.array([[1, 4, 7], [2, 5, 8], [3, 6, 9]]),
np.array([[[1., 10.], [4., 40.], [7., 70.]], [[2., 20.], [5., 50.], [8., 80.]], [[3., 30.], [6., 60.], [9., 90.]]]),
np.reshape(np.arange(100), (10, 10)),
np.reshape(np.arange(100).T, (10, 10)),
])
def test_numpy_array_handler(arr_before):
buf = dumps(arr_before)
arr_after = loads(buf)
assert_equal(arr_before, arr_after)
def test_nested_array():
data_before = {"1": np.array([1, 2])}
buf = dumps(data_before)
data_after = loads(buf)
assert_equal(data_before["1"], data_after["1"])
@pytest.mark.parametrize('ts_before', [
pd.TimeSeries([1, 2, 3], index=[0, 1, 2]),
pd.TimeSeries([1., 2., 3.], pd.date_range('1970-01-01', periods=3, freq='S')),
pd.TimeSeries([1., 2., 3.], pd.date_range('1970-01-01', periods=3, freq='D')),
])
def test_pandas_timeseries_handler(ts_before):
buf = dumps(ts_before)
ts_after = loads(buf)
assert_series_equal_strict(ts_before, ts_after)
@pytest.mark.parametrize('index_before', [
pd.Index([0, 1, 2]),
pd.Index([0., 1., 2.]), # not sure why you would want to index by floating point numbers; here for completeness
pd.Index(['a', 'b', 'c']),
])
def test_pandas_index_handler(index_before):
buf = dumps(index_before)
index_after = loads(buf)
assert_index_equal(index_before, index_after)
@pytest.mark.parametrize('index_before', [
pd.date_range('1970-01-01', periods=3, freq='S'),
pd.date_range('1970-01-01', periods=3, freq='D'),
])
def test_pandas_datetime_index_handler(index_before):
buf = dumps(index_before)
index_after = loads(buf)
assert_index_equal(index_before, index_after)
@pytest.mark.parametrize('data_before', [
{"1": pd.date_range('1970-01-01', periods=3, freq='S')},
{"1": pd.date_range('1970-01-01', periods=3, freq='D')},
])
def test_datetime_index_nested(data_before):
buf = dumps(data_before)
data_after = loads(buf)
assert_index_equal(data_before["1"], data_after["1"])
TEST_DATA_FRAMES = (
pd.DataFrame({0: [1, 2, 3]}, index=[0, 1, 2]),
pd.DataFrame({0: [1, 2, 3], 1: [1.1, 2.2, 3.3]}, index=[0, 1, 2]),
pd.DataFrame({0: [1, 2, 3], 1: [1.1, 2.2, 3.3]}, index=pd.date_range('1970-01-01', periods=3, freq='S')),
pd.DataFrame({0: [1, 2, 3], 1: [1.1, 2.2, 3.3]}, index=pd.date_range('1970-01-01', periods=3, freq='D')),
pd.DataFrame({'a': [1, 2, 3], 'b': [1.1, 2.2, 3.3]}, index=pd.date_range('1970-01-01', periods=3, freq='D')),
pd.DataFrame({
'i': [1, 2, 3],
'f': [1.1, 2.2, 3.3],
's': ['ham', 'spam', 'eggs'],
'b': [True, False, True],
'o': [{'a': 1}, {'b': 2}, {'c': 3}],
},
index=pd.date_range('1970-01-01', periods=3, freq='S')),
pd.DataFrame(np.ones(shape=(10,15)), index=pd.date_range('1970-01-01', periods=10))
)
@pytest.mark.parametrize('df_before', TEST_DATA_FRAMES)
def test_pandas_dataframe_handler(df_before):
buf = dumps(df_before)
df_after = loads(buf)
assert_frame_equal_strict(df_before, df_after)
def test_mixed_python_and_pandas_types():
data_before = TEST_DATA_FRAMES
buf = dumps(data_before)
data_after = loads(buf)
assert isinstance(data_after, tuple)
assert len(data_after) == len(TEST_DATA_FRAMES)
assert len(data_before) == len(data_after)
for df_before, df_after in zip(data_before, data_after):
assert_frame_equal_strict(df_before, df_after)
def test_build_index_handler_for_type():
for index_class in ():
handler_cls = build_index_handler_for_type(index_class)
assert inspect.isclass(handler_cls)
assert hasattr(handler_cls, 'flatten')
assert hasattr(handler_cls, 'restore')
with pytest.raises(TypeError):
build_index_handler_for_type(pd.DatetimeIndex)
with pytest.raises(TypeError):
build_index_handler_for_type(pd.TimeSeries)
@pytest.mark.xfail(reason='failing preserve underlying array state when it is wrapped inside a Pandas object')
def test_preservation_of_specific_array_ordering():
df_c = pd.DataFrame(np.array([[1,2],[3,4], [5,6]], order='C'))
df_c_after = loads(dumps(df_c))
assert_frame_equal_strict(df_c, df_c_after)
assert_equal(df_c.values, df_c_after.values)
assert not df_c.values.flags.fortran
assert not df_c_after.values.flags.fortran
df_f = pd.DataFrame(np.array([[1,2],[3,4], [5,6]], order='F'))
df_f_after = loads(dumps(df_f))
assert_frame_equal_strict(df_f, df_f_after)
assert_equal(df_f.values, df_f_after.values)
assert df_f.values.flags.fortran
assert df_f_after.values.flags.fortran
def test_preservation_of_specific_array_ordering_simple():
arr_c = np.array([[1,2],[3,4], [5,6]], order='C')
arr_f = np.array([[1,2],[3,4], [5,6]], order='F')
assert_equal(arr_c, arr_f)
assert arr_c.strides != arr_f.strides
# C array ordering
arr_c_after = loads(dumps(arr_c))
assert arr_c.strides == arr_c_after.strides
assert not arr_c.flags.fortran
assert not arr_c_after.flags.fortran
assert_equal(arr_c, arr_c_after)
# Fortran array order
arr_f_after = loads(dumps(arr_f))
assert arr_f.strides == arr_f_after.strides
assert arr_f.flags.fortran
assert arr_f_after.flags.fortran
assert_equal(arr_f, arr_f_after)
@pytest.mark.parametrize("val", [np.float64(4.2), np.int64(5)])
def test_number(val):
dumped = dumps(val)
loaded = loads(dumped)
assert loaded == val
assert type(loaded) == type(val)
def test_datetime_identity():
import datetime
date = datetime.datetime(2013, 11, 1, 0, 0)
val = {
'start': date,
'end': date,
'd': {"ttf": pd.TimeSeries([1.],
pd.date_range("1970-1-1", periods=1, freq='S'))
}
}
dumped = dumps(val)
loaded = loads(dumped)
assert loaded["start"] == val["start"], dumped
assert loaded["end"] == val["end"]
assert loaded["end"] == val["end"]
| |
import codecs
import os
import sys
import re
import errno
from .exceptions import ExceptionPexpect, EOF, TIMEOUT
from .expect import Expecter, searcher_string, searcher_re
PY3 = (sys.version_info[0] >= 3)
text_type = str if PY3 else unicode
class _NullCoder(object):
"""Pass bytes through unchanged."""
@staticmethod
def encode(b, final=False):
return b
@staticmethod
def decode(b, final=False):
return b
class SpawnBase(object):
"""A base class providing the backwards-compatible spawn API for Pexpect.
This should not be instantiated directly: use :class:`pexpect.spawn` or
:class:`pexpect.fdpexpect.fdspawn`.
"""
encoding = None
pid = None
flag_eof = False
def __init__(self, timeout=30, maxread=2000, searchwindowsize=None,
logfile=None, encoding=None, codec_errors='strict'):
self.stdin = sys.stdin
self.stdout = sys.stdout
self.stderr = sys.stderr
self.searcher = None
self.ignorecase = False
self.before = None
self.after = None
self.match = None
self.match_index = None
self.terminated = True
self.exitstatus = None
self.signalstatus = None
# status returned by os.waitpid
self.status = None
# the child file descriptor is initially closed
self.child_fd = -1
self.timeout = timeout
self.delimiter = EOF
self.logfile = logfile
# input from child (read_nonblocking)
self.logfile_read = None
# output to send (send, sendline)
self.logfile_send = None
# max bytes to read at one time into buffer
self.maxread = maxread
# This is the read buffer. See maxread.
self.buffer = bytes() if (encoding is None) else text_type()
# Data before searchwindowsize point is preserved, but not searched.
self.searchwindowsize = searchwindowsize
# Delay used before sending data to child. Time in seconds.
# Set this to None to skip the time.sleep() call completely.
self.delaybeforesend = 0.05
# Used by close() to give kernel time to update process status.
# Time in seconds.
self.delayafterclose = 0.1
# Used by terminate() to give kernel time to update process status.
# Time in seconds.
self.delayafterterminate = 0.1
# Delay in seconds to sleep after each call to read_nonblocking().
# Set this to None to skip the time.sleep() call completely: that
# would restore the behavior from pexpect-2.0 (for performance
# reasons or because you don't want to release Python's global
# interpreter lock).
self.delayafterread = 0.0001
self.softspace = False
self.name = '<' + repr(self) + '>'
self.closed = True
# Unicode interface
self.encoding = encoding
self.codec_errors = codec_errors
if encoding is None:
# bytes mode (accepts some unicode for backwards compatibility)
self._encoder = self._decoder = _NullCoder()
self.string_type = bytes
self.crlf = b'\r\n'
if PY3:
self.allowed_string_types = (bytes, str)
self.linesep = os.linesep.encode('ascii')
def write_to_stdout(b):
try:
return sys.stdout.buffer.write(b)
except AttributeError:
# If stdout has been replaced, it may not have .buffer
return sys.stdout.write(b.decode('ascii', 'replace'))
self.write_to_stdout = write_to_stdout
else:
self.allowed_string_types = (basestring,) # analysis:ignore
self.linesep = os.linesep
self.write_to_stdout = sys.stdout.write
else:
# unicode mode
self._encoder = codecs.getincrementalencoder(encoding)(codec_errors)
self._decoder = codecs.getincrementaldecoder(encoding)(codec_errors)
self.string_type = text_type
self.crlf = u'\r\n'
self.allowed_string_types = (text_type, )
if PY3:
self.linesep = os.linesep
else:
self.linesep = os.linesep.decode('ascii')
# This can handle unicode in both Python 2 and 3
self.write_to_stdout = sys.stdout.write
# storage for async transport
self.async_pw_transport = None
def _log(self, s, direction):
if self.logfile is not None:
self.logfile.write(s)
self.logfile.flush()
second_log = self.logfile_send if (direction=='send') else self.logfile_read
if second_log is not None:
second_log.write(s)
second_log.flush()
# For backwards compatibility, in bytes mode (when encoding is None)
# unicode is accepted for send and expect. Unicode mode is strictly unicode
# only.
def _coerce_expect_string(self, s):
if self.encoding is None and not isinstance(s, bytes):
return s.encode('ascii')
return s
def _coerce_send_string(self, s):
if self.encoding is None and not isinstance(s, bytes):
return s.encode('utf-8')
return s
def read_nonblocking(self, size=1, timeout=None):
"""This reads data from the file descriptor.
This is a simple implementation suitable for a regular file. Subclasses using ptys or pipes should override it.
The timeout parameter is ignored.
"""
try:
s = os.read(self.child_fd, size)
except OSError as err:
if err.args[0] == errno.EIO:
# Linux-style EOF
self.flag_eof = True
raise EOF('End Of File (EOF). Exception style platform.')
raise
if s == b'':
# BSD-style EOF
self.flag_eof = True
raise EOF('End Of File (EOF). Empty string style platform.')
s = self._decoder.decode(s, final=False)
self._log(s, 'read')
return s
def _pattern_type_err(self, pattern):
raise TypeError('got {badtype} ({badobj!r}) as pattern, must be one'
' of: {goodtypes}, pexpect.EOF, pexpect.TIMEOUT'\
.format(badtype=type(pattern),
badobj=pattern,
goodtypes=', '.join([str(ast)\
for ast in self.allowed_string_types])
)
)
def compile_pattern_list(self, patterns):
'''This compiles a pattern-string or a list of pattern-strings.
Patterns must be a StringType, EOF, TIMEOUT, SRE_Pattern, or a list of
those. Patterns may also be None which results in an empty list (you
might do this if waiting for an EOF or TIMEOUT condition without
expecting any pattern).
This is used by expect() when calling expect_list(). Thus expect() is
nothing more than::
cpl = self.compile_pattern_list(pl)
return self.expect_list(cpl, timeout)
If you are using expect() within a loop it may be more
efficient to compile the patterns first and then call expect_list().
This avoid calls in a loop to compile_pattern_list()::
cpl = self.compile_pattern_list(my_pattern)
while some_condition:
...
i = self.expect_list(cpl, timeout)
...
'''
if patterns is None:
return []
if not isinstance(patterns, list):
patterns = [patterns]
# Allow dot to match \n
compile_flags = re.DOTALL
if self.ignorecase:
compile_flags = compile_flags | re.IGNORECASE
compiled_pattern_list = []
for idx, p in enumerate(patterns):
if isinstance(p, self.allowed_string_types):
p = self._coerce_expect_string(p)
compiled_pattern_list.append(re.compile(p, compile_flags))
elif p is EOF:
compiled_pattern_list.append(EOF)
elif p is TIMEOUT:
compiled_pattern_list.append(TIMEOUT)
elif isinstance(p, type(re.compile(''))):
compiled_pattern_list.append(p)
else:
self._pattern_type_err(p)
return compiled_pattern_list
def expect(self, pattern, timeout=-1, searchwindowsize=-1, async_=False, **kw):
'''This seeks through the stream until a pattern is matched. The
pattern is overloaded and may take several types. The pattern can be a
StringType, EOF, a compiled re, or a list of any of those types.
Strings will be compiled to re types. This returns the index into the
pattern list. If the pattern was not a list this returns index 0 on a
successful match. This may raise exceptions for EOF or TIMEOUT. To
avoid the EOF or TIMEOUT exceptions add EOF or TIMEOUT to the pattern
list. That will cause expect to match an EOF or TIMEOUT condition
instead of raising an exception.
If you pass a list of patterns and more than one matches, the first
match in the stream is chosen. If more than one pattern matches at that
point, the leftmost in the pattern list is chosen. For example::
# the input is 'foobar'
index = p.expect(['bar', 'foo', 'foobar'])
# returns 1('foo') even though 'foobar' is a "better" match
Please note, however, that buffering can affect this behavior, since
input arrives in unpredictable chunks. For example::
# the input is 'foobar'
index = p.expect(['foobar', 'foo'])
# returns 0('foobar') if all input is available at once,
# but returs 1('foo') if parts of the final 'bar' arrive late
When a match is found for the given pattern, the class instance
attribute *match* becomes an re.MatchObject result. Should an EOF
or TIMEOUT pattern match, then the match attribute will be an instance
of that exception class. The pairing before and after class
instance attributes are views of the data preceding and following
the matching pattern. On general exception, class attribute
*before* is all data received up to the exception, while *match* and
*after* attributes are value None.
When the keyword argument timeout is -1 (default), then TIMEOUT will
raise after the default value specified by the class timeout
attribute. When None, TIMEOUT will not be raised and may block
indefinitely until match.
When the keyword argument searchwindowsize is -1 (default), then the
value specified by the class maxread attribute is used.
A list entry may be EOF or TIMEOUT instead of a string. This will
catch these exceptions and return the index of the list entry instead
of raising the exception. The attribute 'after' will be set to the
exception type. The attribute 'match' will be None. This allows you to
write code like this::
index = p.expect(['good', 'bad', pexpect.EOF, pexpect.TIMEOUT])
if index == 0:
do_something()
elif index == 1:
do_something_else()
elif index == 2:
do_some_other_thing()
elif index == 3:
do_something_completely_different()
instead of code like this::
try:
index = p.expect(['good', 'bad'])
if index == 0:
do_something()
elif index == 1:
do_something_else()
except EOF:
do_some_other_thing()
except TIMEOUT:
do_something_completely_different()
These two forms are equivalent. It all depends on what you want. You
can also just expect the EOF if you are waiting for all output of a
child to finish. For example::
p = pexpect.spawn('/bin/ls')
p.expect(pexpect.EOF)
print p.before
If you are trying to optimize for speed then see expect_list().
On Python 3.4, or Python 3.3 with asyncio installed, passing
``async_=True`` will make this return an :mod:`asyncio` coroutine,
which you can yield from to get the same result that this method would
normally give directly. So, inside a coroutine, you can replace this code::
index = p.expect(patterns)
With this non-blocking form::
index = yield from p.expect(patterns, async_=True)
'''
if 'async' in kw:
async_ = kw.pop('async')
if kw:
raise TypeError("Unknown keyword arguments: {}".format(kw))
compiled_pattern_list = self.compile_pattern_list(pattern)
return self.expect_list(compiled_pattern_list,
timeout, searchwindowsize, async_)
def expect_list(self, pattern_list, timeout=-1, searchwindowsize=-1,
async_=False, **kw):
'''This takes a list of compiled regular expressions and returns the
index into the pattern_list that matched the child output. The list may
also contain EOF or TIMEOUT(which are not compiled regular
expressions). This method is similar to the expect() method except that
expect_list() does not recompile the pattern list on every call. This
may help if you are trying to optimize for speed, otherwise just use
the expect() method. This is called by expect().
Like :meth:`expect`, passing ``async_=True`` will make this return an
asyncio coroutine.
'''
if timeout == -1:
timeout = self.timeout
if 'async' in kw:
async_ = kw.pop('async')
if kw:
raise TypeError("Unknown keyword arguments: {}".format(kw))
exp = Expecter(self, searcher_re(pattern_list), searchwindowsize)
if async_:
from ._async import expect_async
return expect_async(exp, timeout)
else:
return exp.expect_loop(timeout)
def expect_exact(self, pattern_list, timeout=-1, searchwindowsize=-1,
async_=False, **kw):
'''This is similar to expect(), but uses plain string matching instead
of compiled regular expressions in 'pattern_list'. The 'pattern_list'
may be a string; a list or other sequence of strings; or TIMEOUT and
EOF.
This call might be faster than expect() for two reasons: string
searching is faster than RE matching and it is possible to limit the
search to just the end of the input buffer.
This method is also useful when you don't want to have to worry about
escaping regular expression characters that you want to match.
Like :meth:`expect`, passing ``async_=True`` will make this return an
asyncio coroutine.
'''
if timeout == -1:
timeout = self.timeout
if 'async' in kw:
async_ = kw.pop('async')
if kw:
raise TypeError("Unknown keyword arguments: {}".format(kw))
if (isinstance(pattern_list, self.allowed_string_types) or
pattern_list in (TIMEOUT, EOF)):
pattern_list = [pattern_list]
def prepare_pattern(pattern):
if pattern in (TIMEOUT, EOF):
return pattern
if isinstance(pattern, self.allowed_string_types):
return self._coerce_expect_string(pattern)
self._pattern_type_err(pattern)
try:
pattern_list = iter(pattern_list)
except TypeError:
self._pattern_type_err(pattern_list)
pattern_list = [prepare_pattern(p) for p in pattern_list]
exp = Expecter(self, searcher_string(pattern_list), searchwindowsize)
if async_:
from ._async import expect_async
return expect_async(exp, timeout)
else:
return exp.expect_loop(timeout)
def expect_loop(self, searcher, timeout=-1, searchwindowsize=-1):
'''This is the common loop used inside expect. The 'searcher' should be
an instance of searcher_re or searcher_string, which describes how and
what to search for in the input.
See expect() for other arguments, return value and exceptions. '''
exp = Expecter(self, searcher, searchwindowsize)
return exp.expect_loop(timeout)
def read(self, size=-1):
'''This reads at most "size" bytes from the file (less if the read hits
EOF before obtaining size bytes). If the size argument is negative or
omitted, read all data until EOF is reached. The bytes are returned as
a string object. An empty string is returned when EOF is encountered
immediately. '''
if size == 0:
return self.string_type()
if size < 0:
# delimiter default is EOF
self.expect(self.delimiter)
return self.before
# I could have done this more directly by not using expect(), but
# I deliberately decided to couple read() to expect() so that
# I would catch any bugs early and ensure consistent behavior.
# It's a little less efficient, but there is less for me to
# worry about if I have to later modify read() or expect().
# Note, it's OK if size==-1 in the regex. That just means it
# will never match anything in which case we stop only on EOF.
cre = re.compile(self._coerce_expect_string('.{%d}' % size), re.DOTALL)
# delimiter default is EOF
index = self.expect([cre, self.delimiter])
if index == 0:
### FIXME self.before should be ''. Should I assert this?
return self.after
return self.before
def readline(self, size=-1):
'''This reads and returns one entire line. The newline at the end of
line is returned as part of the string, unless the file ends without a
newline. An empty string is returned if EOF is encountered immediately.
This looks for a newline as a CR/LF pair (\\r\\n) even on UNIX because
this is what the pseudotty device returns. So contrary to what you may
expect you will receive newlines as \\r\\n.
If the size argument is 0 then an empty string is returned. In all
other cases the size argument is ignored, which is not standard
behavior for a file-like object. '''
if size == 0:
return self.string_type()
# delimiter default is EOF
index = self.expect([self.crlf, self.delimiter])
if index == 0:
return self.before + self.crlf
else:
return self.before
def __iter__(self):
'''This is to support iterators over a file-like object.
'''
return iter(self.readline, self.string_type())
def readlines(self, sizehint=-1):
'''This reads until EOF using readline() and returns a list containing
the lines thus read. The optional 'sizehint' argument is ignored.
Remember, because this reads until EOF that means the child
process should have closed its stdout. If you run this method on
a child that is still running with its stdout open then this
method will block until it timesout.'''
lines = []
while True:
line = self.readline()
if not line:
break
lines.append(line)
return lines
def fileno(self):
'''Expose file descriptor for a file-like interface
'''
return self.child_fd
def flush(self):
'''This does nothing. It is here to support the interface for a
File-like object. '''
pass
def isatty(self):
"""Overridden in subclass using tty"""
return False
# For 'with spawn(...) as child:'
def __enter__(self):
return self
def __exit__(self, etype, evalue, tb):
# We rely on subclasses to implement close(). If they don't, it's not
# clear what a context manager should do.
self.close()
| |
#!/usr/bin/env python
#
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import collections
import copy
import json
import os
import pipes
import re
import subprocess
import sys
import bb_utils
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from pylib import constants
CHROMIUM_COVERAGE_BUCKET = 'chromium-code-coverage'
_BotConfig = collections.namedtuple(
'BotConfig', ['bot_id', 'host_obj', 'test_obj'])
HostConfig = collections.namedtuple(
'HostConfig',
['script', 'host_steps', 'extra_args', 'extra_gyp_defines', 'target_arch'])
TestConfig = collections.namedtuple('Tests', ['script', 'tests', 'extra_args'])
def BotConfig(bot_id, host_object, test_object=None):
return _BotConfig(bot_id, host_object, test_object)
def DictDiff(d1, d2):
diff = []
for key in sorted(set(d1.keys() + d2.keys())):
if key in d1 and d1[key] != d2.get(key):
diff.append('- %s=%s' % (key, pipes.quote(d1[key])))
if key in d2 and d2[key] != d1.get(key):
diff.append('+ %s=%s' % (key, pipes.quote(d2[key])))
return '\n'.join(diff)
def GetEnvironment(host_obj, testing, extra_env_vars=None):
init_env = dict(os.environ)
init_env['GYP_GENERATORS'] = 'ninja'
if extra_env_vars:
init_env.update(extra_env_vars)
envsetup_cmd = '. build/android/envsetup.sh'
if testing:
# Skip envsetup to avoid presubmit dependence on android deps.
print 'Testing mode - skipping "%s"' % envsetup_cmd
envsetup_cmd = ':'
else:
print 'Running %s' % envsetup_cmd
proc = subprocess.Popen(['bash', '-exc',
envsetup_cmd + ' >&2; python build/android/buildbot/env_to_json.py'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
cwd=bb_utils.CHROME_SRC, env=init_env)
json_env, envsetup_output = proc.communicate()
if proc.returncode != 0:
print >> sys.stderr, 'FATAL Failure in envsetup.'
print >> sys.stderr, envsetup_output
sys.exit(1)
env = json.loads(json_env)
env['GYP_DEFINES'] = env.get('GYP_DEFINES', '') + \
' fastbuild=1 use_goma=1 gomadir=%s' % bb_utils.GOMA_DIR
if host_obj.target_arch:
env['GYP_DEFINES'] += ' target_arch=%s' % host_obj.target_arch
extra_gyp = host_obj.extra_gyp_defines
if extra_gyp:
env['GYP_DEFINES'] += ' %s' % extra_gyp
if re.search('(asan|clang)=1', extra_gyp):
env.pop('CXX_target', None)
# Bots checkout chrome in /b/build/slave/<name>/build/src
build_internal_android = os.path.abspath(os.path.join(
bb_utils.CHROME_SRC, '..', '..', '..', '..', '..', 'build_internal',
'scripts', 'slave', 'android'))
if os.path.exists(build_internal_android):
env['PATH'] = os.pathsep.join([build_internal_android, env['PATH']])
return env
def GetCommands(options, bot_config):
"""Get a formatted list of commands.
Args:
options: Options object.
bot_config: A BotConfig named tuple.
host_step_script: Host step script.
device_step_script: Device step script.
Returns:
list of Command objects.
"""
property_args = bb_utils.EncodeProperties(options)
commands = [[bot_config.host_obj.script,
'--steps=%s' % ','.join(bot_config.host_obj.host_steps)] +
property_args + (bot_config.host_obj.extra_args or [])]
test_obj = bot_config.test_obj
if test_obj:
run_test_cmd = [test_obj.script] + property_args
for test in test_obj.tests:
run_test_cmd.extend(['-f', test])
if test_obj.extra_args:
run_test_cmd.extend(test_obj.extra_args)
commands.append(run_test_cmd)
return commands
def GetBotStepMap():
compile_step = ['compile']
std_host_tests = ['check_webview_licenses', 'findbugs']
std_build_steps = ['compile', 'zip_build']
std_test_steps = ['extract_build']
std_tests = ['ui', 'unit']
flakiness_server = (
'--flakiness-server=%s' % constants.UPSTREAM_FLAKINESS_SERVER)
experimental = ['--experimental']
B = BotConfig
H = (lambda steps, extra_args=None, extra_gyp=None, target_arch=None :
HostConfig('build/android/buildbot/bb_host_steps.py', steps, extra_args,
extra_gyp, target_arch))
T = (lambda tests, extra_args=None :
TestConfig('build/android/buildbot/bb_device_steps.py', tests,
extra_args))
bot_configs = [
# Main builders
B('main-builder-dbg', H(std_build_steps + std_host_tests)),
B('main-builder-rel', H(std_build_steps)),
B('main-clang-builder',
H(compile_step, extra_gyp='clang=1 component=shared_library')),
B('main-clobber', H(compile_step)),
B('main-tests', H(std_test_steps), T(std_tests, [flakiness_server])),
# Other waterfalls
B('asan-builder-tests', H(compile_step,
extra_gyp='asan=1 component=shared_library'),
T(std_tests, ['--asan', '--asan-symbolize'])),
B('blink-try-builder', H(compile_step)),
B('chromedriver-fyi-tests-dbg', H(std_test_steps),
T(['chromedriver'], ['--install=ChromeShell'])),
B('fyi-x86-builder-dbg',
H(compile_step + std_host_tests, experimental, target_arch='x86')),
B('fyi-builder-dbg',
H(std_build_steps + std_host_tests, experimental,
extra_gyp='emma_coverage=1 android_lint=1')),
B('x86-builder-dbg',
H(compile_step + std_host_tests, target_arch='x86')),
B('fyi-builder-rel', H(std_build_steps, experimental)),
B('fyi-tests', H(std_test_steps),
T(std_tests, ['--experimental', flakiness_server,
'--coverage-bucket', CHROMIUM_COVERAGE_BUCKET])),
B('fyi-component-builder-tests-dbg',
H(compile_step, extra_gyp='component=shared_library'),
T(std_tests, ['--experimental', flakiness_server])),
B('gpu-builder-tests-dbg', H(compile_step), T(['gpu'])),
# Pass empty T([]) so that logcat monitor and device status check are run.
B('perf-bisect-builder-tests-dbg', H(['bisect_perf_regression']), T([])),
B('perf-tests-rel', H(std_test_steps),
T([], ['--install=ChromeShell'])),
B('webkit-latest-webkit-tests', H(std_test_steps),
T(['webkit_layout', 'webkit'], ['--auto-reconnect'])),
B('webkit-latest-contentshell', H(compile_step),
T(['webkit_layout'], ['--auto-reconnect'])),
B('builder-unit-tests', H(compile_step), T(['unit'])),
B('webrtc-chromium-builder',
H(std_build_steps,
extra_args=['--build-targets=android_builder_chromium_webrtc'])),
B('webrtc-native-builder',
H(std_build_steps,
extra_args=['--build-targets=android_builder_webrtc'],
extra_gyp='include_tests=1 enable_tracing=1')),
B('webrtc-chromium-tests', H(std_test_steps),
T(['webrtc_chromium'],
[flakiness_server, '--gtest-filter=WebRtc*'])),
B('webrtc-native-tests', H(std_test_steps),
T(['webrtc_native'], [flakiness_server])),
# Generic builder config (for substring match).
B('builder', H(std_build_steps)),
]
bot_map = dict((config.bot_id, config) for config in bot_configs)
# These bots have identical configuration to ones defined earlier.
copy_map = [
('lkgr-clobber', 'main-clobber'),
('try-builder-dbg', 'main-builder-dbg'),
('try-builder-rel', 'main-builder-rel'),
('try-clang-builder', 'main-clang-builder'),
('try-fyi-builder-dbg', 'fyi-builder-dbg'),
('try-x86-builder-dbg', 'x86-builder-dbg'),
('try-tests', 'main-tests'),
('try-fyi-tests', 'fyi-tests'),
('webkit-latest-tests', 'main-tests'),
]
for to_id, from_id in copy_map:
assert to_id not in bot_map
# pylint: disable=W0212
bot_map[to_id] = copy.deepcopy(bot_map[from_id])._replace(bot_id=to_id)
# Trybots do not upload to flakiness dashboard. They should be otherwise
# identical in configuration to their trunk building counterparts.
test_obj = bot_map[to_id].test_obj
if to_id.startswith('try') and test_obj:
extra_args = test_obj.extra_args
if extra_args and flakiness_server in extra_args:
extra_args.remove(flakiness_server)
return bot_map
# Return an object from the map, looking first for an exact id match.
# If this fails, look for an id which is a substring of the specified id.
# Choose the longest of all substring matches.
# pylint: disable=W0622
def GetBestMatch(id_map, id):
config = id_map.get(id)
if not config:
substring_matches = filter(lambda x: x in id, id_map.iterkeys())
if substring_matches:
max_id = max(substring_matches, key=len)
print 'Using config from id="%s" (substring match).' % max_id
config = id_map[max_id]
return config
def GetRunBotOptParser():
parser = bb_utils.GetParser()
parser.add_option('--bot-id', help='Specify bot id directly.')
parser.add_option('--testing', action='store_true',
help='For testing: print, but do not run commands')
return parser
def GetBotConfig(options, bot_step_map):
bot_id = options.bot_id or options.factory_properties.get('android_bot_id')
if not bot_id:
print (sys.stderr,
'A bot id must be specified through option or factory_props.')
return
bot_config = GetBestMatch(bot_step_map, bot_id)
if not bot_config:
print 'Error: config for id="%s" cannot be inferred.' % bot_id
return bot_config
def RunBotCommands(options, commands, env):
print 'Environment changes:'
print DictDiff(dict(os.environ), env)
for command in commands:
print bb_utils.CommandToString(command)
sys.stdout.flush()
if options.testing:
env['BUILDBOT_TESTING'] = '1'
return_code = subprocess.call(command, cwd=bb_utils.CHROME_SRC, env=env)
if return_code != 0:
return return_code
def main(argv):
parser = GetRunBotOptParser()
options, args = parser.parse_args(argv[1:])
if args:
parser.error('Unused args: %s' % args)
bot_config = GetBotConfig(options, GetBotStepMap())
if not bot_config:
sys.exit(1)
print 'Using config:', bot_config
commands = GetCommands(options, bot_config)
for command in commands:
print 'Will run: ', bb_utils.CommandToString(command)
print
env = GetEnvironment(bot_config.host_obj, options.testing)
return RunBotCommands(options, commands, env)
if __name__ == '__main__':
sys.exit(main(sys.argv))
| |
#!/usr/bin/env python
# Copyright 2018, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for the gtest_json_output module."""
import datetime
import errno
import json
import os
import re
import sys
import gtest_json_test_utils
import gtest_test_utils
GTEST_FILTER_FLAG = '--gtest_filter'
GTEST_LIST_TESTS_FLAG = '--gtest_list_tests'
GTEST_OUTPUT_FLAG = '--gtest_output'
GTEST_DEFAULT_OUTPUT_FILE = 'test_detail.json'
GTEST_PROGRAM_NAME = 'gtest_xml_output_unittest_'
# The flag indicating stacktraces are not supported
NO_STACKTRACE_SUPPORT_FLAG = '--no_stacktrace_support'
SUPPORTS_STACK_TRACES = NO_STACKTRACE_SUPPORT_FLAG not in sys.argv
if SUPPORTS_STACK_TRACES:
STACK_TRACE_TEMPLATE = '\nStack trace:\n*'
else:
STACK_TRACE_TEMPLATE = ''
EXPECTED_NON_EMPTY = {
u'tests': 24,
u'failures': 4,
u'disabled': 2,
u'errors': 0,
u'timestamp': u'*',
u'time': u'*',
u'ad_hoc_property': u'42',
u'name': u'AllTests',
u'testsuites': [
{
u'name': u'SuccessfulTest',
u'tests': 1,
u'failures': 0,
u'disabled': 0,
u'errors': 0,
u'time': u'*',
u'testsuite': [
{
u'name': u'Succeeds',
u'status': u'RUN',
u'time': u'*',
u'classname': u'SuccessfulTest'
}
]
},
{
u'name': u'FailedTest',
u'tests': 1,
u'failures': 1,
u'disabled': 0,
u'errors': 0,
u'time': u'*',
u'testsuite': [
{
u'name': u'Fails',
u'status': u'RUN',
u'time': u'*',
u'classname': u'FailedTest',
u'failures': [
{
u'failure':
u'gtest_xml_output_unittest_.cc:*\n'
u'Expected equality of these values:\n'
u' 1\n 2' + STACK_TRACE_TEMPLATE,
u'type': u''
}
]
}
]
},
{
u'name': u'DisabledTest',
u'tests': 1,
u'failures': 0,
u'disabled': 1,
u'errors': 0,
u'time': u'*',
u'testsuite': [
{
u'name': u'DISABLED_test_not_run',
u'status': u'NOTRUN',
u'time': u'*',
u'classname': u'DisabledTest'
}
]
},
{
u'name': u'SkippedTest',
u'tests': 1,
u'failures': 0,
u'disabled': 0,
u'errors': 0,
u'time': u'*',
u'testsuite': [
{
u'name': u'Skipped',
u'status': u'SKIPPED',
u'time': u'*',
u'classname': u'SkippedTest'
}
]
},
{
u'name': u'MixedResultTest',
u'tests': 3,
u'failures': 1,
u'disabled': 1,
u'errors': 0,
u'time': u'*',
u'testsuite': [
{
u'name': u'Succeeds',
u'status': u'RUN',
u'time': u'*',
u'classname': u'MixedResultTest'
},
{
u'name': u'Fails',
u'status': u'RUN',
u'time': u'*',
u'classname': u'MixedResultTest',
u'failures': [
{
u'failure':
u'gtest_xml_output_unittest_.cc:*\n'
u'Expected equality of these values:\n'
u' 1\n 2' + STACK_TRACE_TEMPLATE,
u'type': u''
},
{
u'failure':
u'gtest_xml_output_unittest_.cc:*\n'
u'Expected equality of these values:\n'
u' 2\n 3' + STACK_TRACE_TEMPLATE,
u'type': u''
}
]
},
{
u'name': u'DISABLED_test',
u'status': u'NOTRUN',
u'time': u'*',
u'classname': u'MixedResultTest'
}
]
},
{
u'name': u'XmlQuotingTest',
u'tests': 1,
u'failures': 1,
u'disabled': 0,
u'errors': 0,
u'time': u'*',
u'testsuite': [
{
u'name': u'OutputsCData',
u'status': u'RUN',
u'time': u'*',
u'classname': u'XmlQuotingTest',
u'failures': [
{
u'failure':
u'gtest_xml_output_unittest_.cc:*\n'
u'Failed\nXML output: <?xml encoding="utf-8">'
u'<top><![CDATA[cdata text]]></top>' +
STACK_TRACE_TEMPLATE,
u'type': u''
}
]
}
]
},
{
u'name': u'InvalidCharactersTest',
u'tests': 1,
u'failures': 1,
u'disabled': 0,
u'errors': 0,
u'time': u'*',
u'testsuite': [
{
u'name': u'InvalidCharactersInMessage',
u'status': u'RUN',
u'time': u'*',
u'classname': u'InvalidCharactersTest',
u'failures': [
{
u'failure':
u'gtest_xml_output_unittest_.cc:*\n'
u'Failed\nInvalid characters in brackets'
u' [\x01\x02]' + STACK_TRACE_TEMPLATE,
u'type': u''
}
]
}
]
},
{
u'name': u'PropertyRecordingTest',
u'tests': 4,
u'failures': 0,
u'disabled': 0,
u'errors': 0,
u'time': u'*',
u'SetUpTestSuite': u'yes',
u'TearDownTestSuite': u'aye',
u'testsuite': [
{
u'name': u'OneProperty',
u'status': u'RUN',
u'time': u'*',
u'classname': u'PropertyRecordingTest',
u'key_1': u'1'
},
{
u'name': u'IntValuedProperty',
u'status': u'RUN',
u'time': u'*',
u'classname': u'PropertyRecordingTest',
u'key_int': u'1'
},
{
u'name': u'ThreeProperties',
u'status': u'RUN',
u'time': u'*',
u'classname': u'PropertyRecordingTest',
u'key_1': u'1',
u'key_2': u'2',
u'key_3': u'3'
},
{
u'name': u'TwoValuesForOneKeyUsesLastValue',
u'status': u'RUN',
u'time': u'*',
u'classname': u'PropertyRecordingTest',
u'key_1': u'2'
}
]
},
{
u'name': u'NoFixtureTest',
u'tests': 3,
u'failures': 0,
u'disabled': 0,
u'errors': 0,
u'time': u'*',
u'testsuite': [
{
u'name': u'RecordProperty',
u'status': u'RUN',
u'time': u'*',
u'classname': u'NoFixtureTest',
u'key': u'1'
},
{
u'name': u'ExternalUtilityThatCallsRecordIntValuedProperty',
u'status': u'RUN',
u'time': u'*',
u'classname': u'NoFixtureTest',
u'key_for_utility_int': u'1'
},
{
u'name':
u'ExternalUtilityThatCallsRecordStringValuedProperty',
u'status': u'RUN',
u'time': u'*',
u'classname': u'NoFixtureTest',
u'key_for_utility_string': u'1'
}
]
},
{
u'name': u'TypedTest/0',
u'tests': 1,
u'failures': 0,
u'disabled': 0,
u'errors': 0,
u'time': u'*',
u'testsuite': [
{
u'name': u'HasTypeParamAttribute',
u'type_param': u'int',
u'status': u'RUN',
u'time': u'*',
u'classname': u'TypedTest/0'
}
]
},
{
u'name': u'TypedTest/1',
u'tests': 1,
u'failures': 0,
u'disabled': 0,
u'errors': 0,
u'time': u'*',
u'testsuite': [
{
u'name': u'HasTypeParamAttribute',
u'type_param': u'long',
u'status': u'RUN',
u'time': u'*',
u'classname': u'TypedTest/1'
}
]
},
{
u'name': u'Single/TypeParameterizedTestSuite/0',
u'tests': 1,
u'failures': 0,
u'disabled': 0,
u'errors': 0,
u'time': u'*',
u'testsuite': [
{
u'name': u'HasTypeParamAttribute',
u'type_param': u'int',
u'status': u'RUN',
u'time': u'*',
u'classname': u'Single/TypeParameterizedTestSuite/0'
}
]
},
{
u'name': u'Single/TypeParameterizedTestSuite/1',
u'tests': 1,
u'failures': 0,
u'disabled': 0,
u'errors': 0,
u'time': u'*',
u'testsuite': [
{
u'name': u'HasTypeParamAttribute',
u'type_param': u'long',
u'status': u'RUN',
u'time': u'*',
u'classname': u'Single/TypeParameterizedTestSuite/1'
}
]
},
{
u'name': u'Single/ValueParamTest',
u'tests': 4,
u'failures': 0,
u'disabled': 0,
u'errors': 0,
u'time': u'*',
u'testsuite': [
{
u'name': u'HasValueParamAttribute/0',
u'value_param': u'33',
u'status': u'RUN',
u'time': u'*',
u'classname': u'Single/ValueParamTest'
},
{
u'name': u'HasValueParamAttribute/1',
u'value_param': u'42',
u'status': u'RUN',
u'time': u'*',
u'classname': u'Single/ValueParamTest'
},
{
u'name': u'AnotherTestThatHasValueParamAttribute/0',
u'value_param': u'33',
u'status': u'RUN',
u'time': u'*',
u'classname': u'Single/ValueParamTest'
},
{
u'name': u'AnotherTestThatHasValueParamAttribute/1',
u'value_param': u'42',
u'status': u'RUN',
u'time': u'*',
u'classname': u'Single/ValueParamTest'
}
]
}
]
}
EXPECTED_FILTERED = {
u'tests': 1,
u'failures': 0,
u'disabled': 0,
u'errors': 0,
u'time': u'*',
u'timestamp': u'*',
u'name': u'AllTests',
u'ad_hoc_property': u'42',
u'testsuites': [{
u'name': u'SuccessfulTest',
u'tests': 1,
u'failures': 0,
u'disabled': 0,
u'errors': 0,
u'time': u'*',
u'testsuite': [{
u'name': u'Succeeds',
u'status': u'RUN',
u'time': u'*',
u'classname': u'SuccessfulTest',
}]
}],
}
EXPECTED_EMPTY = {
u'tests': 0,
u'failures': 0,
u'disabled': 0,
u'errors': 0,
u'time': u'*',
u'timestamp': u'*',
u'name': u'AllTests',
u'testsuites': [],
}
GTEST_PROGRAM_PATH = gtest_test_utils.GetTestExecutablePath(GTEST_PROGRAM_NAME)
SUPPORTS_TYPED_TESTS = 'TypedTest' in gtest_test_utils.Subprocess(
[GTEST_PROGRAM_PATH, GTEST_LIST_TESTS_FLAG], capture_stderr=False).output
class GTestJsonOutputUnitTest(gtest_test_utils.TestCase):
"""Unit test for Google Test's JSON output functionality.
"""
# This test currently breaks on platforms that do not support typed and
# type-parameterized tests, so we don't run it under them.
if SUPPORTS_TYPED_TESTS:
def testNonEmptyJsonOutput(self):
"""Verifies JSON output for a Google Test binary with non-empty output.
Runs a test program that generates a non-empty JSON output, and
tests that the JSON output is expected.
"""
self._TestJsonOutput(GTEST_PROGRAM_NAME, EXPECTED_NON_EMPTY, 1)
def testEmptyJsonOutput(self):
"""Verifies JSON output for a Google Test binary without actual tests.
Runs a test program that generates an empty JSON output, and
tests that the JSON output is expected.
"""
self._TestJsonOutput('gtest_no_test_unittest', EXPECTED_EMPTY, 0)
def testTimestampValue(self):
"""Checks whether the timestamp attribute in the JSON output is valid.
Runs a test program that generates an empty JSON output, and checks if
the timestamp attribute in the testsuites tag is valid.
"""
actual = self._GetJsonOutput('gtest_no_test_unittest', [], 0)
date_time_str = actual['timestamp']
# datetime.strptime() is only available in Python 2.5+ so we have to
# parse the expected datetime manually.
match = re.match(r'(\d+)-(\d\d)-(\d\d)T(\d\d):(\d\d):(\d\d)', date_time_str)
self.assertTrue(
re.match,
'JSON datettime string %s has incorrect format' % date_time_str)
date_time_from_json = datetime.datetime(
year=int(match.group(1)), month=int(match.group(2)),
day=int(match.group(3)), hour=int(match.group(4)),
minute=int(match.group(5)), second=int(match.group(6)))
time_delta = abs(datetime.datetime.now() - date_time_from_json)
# timestamp value should be near the current local time
self.assertTrue(time_delta < datetime.timedelta(seconds=600),
'time_delta is %s' % time_delta)
def testDefaultOutputFile(self):
"""Verifies the default output file name.
Confirms that Google Test produces an JSON output file with the expected
default name if no name is explicitly specified.
"""
output_file = os.path.join(gtest_test_utils.GetTempDir(),
GTEST_DEFAULT_OUTPUT_FILE)
gtest_prog_path = gtest_test_utils.GetTestExecutablePath(
'gtest_no_test_unittest')
try:
os.remove(output_file)
except OSError:
e = sys.exc_info()[1]
if e.errno != errno.ENOENT:
raise
p = gtest_test_utils.Subprocess(
[gtest_prog_path, '%s=json' % GTEST_OUTPUT_FLAG],
working_dir=gtest_test_utils.GetTempDir())
self.assert_(p.exited)
self.assertEquals(0, p.exit_code)
self.assert_(os.path.isfile(output_file))
def testSuppressedJsonOutput(self):
"""Verifies that no JSON output is generated.
Tests that no JSON file is generated if the default JSON listener is
shut down before RUN_ALL_TESTS is invoked.
"""
json_path = os.path.join(gtest_test_utils.GetTempDir(),
GTEST_PROGRAM_NAME + 'out.json')
if os.path.isfile(json_path):
os.remove(json_path)
command = [GTEST_PROGRAM_PATH,
'%s=json:%s' % (GTEST_OUTPUT_FLAG, json_path),
'--shut_down_xml']
p = gtest_test_utils.Subprocess(command)
if p.terminated_by_signal:
# p.signal is available only if p.terminated_by_signal is True.
self.assertFalse(
p.terminated_by_signal,
'%s was killed by signal %d' % (GTEST_PROGRAM_NAME, p.signal))
else:
self.assert_(p.exited)
self.assertEquals(1, p.exit_code,
"'%s' exited with code %s, which doesn't match "
'the expected exit code %s.'
% (command, p.exit_code, 1))
self.assert_(not os.path.isfile(json_path))
def testFilteredTestJsonOutput(self):
"""Verifies JSON output when a filter is applied.
Runs a test program that executes only some tests and verifies that
non-selected tests do not show up in the JSON output.
"""
self._TestJsonOutput(GTEST_PROGRAM_NAME, EXPECTED_FILTERED, 0,
extra_args=['%s=SuccessfulTest.*' % GTEST_FILTER_FLAG])
def _GetJsonOutput(self, gtest_prog_name, extra_args, expected_exit_code):
"""Returns the JSON output generated by running the program gtest_prog_name.
Furthermore, the program's exit code must be expected_exit_code.
Args:
gtest_prog_name: Google Test binary name.
extra_args: extra arguments to binary invocation.
expected_exit_code: program's exit code.
"""
json_path = os.path.join(gtest_test_utils.GetTempDir(),
gtest_prog_name + 'out.json')
gtest_prog_path = gtest_test_utils.GetTestExecutablePath(gtest_prog_name)
command = (
[gtest_prog_path, '%s=json:%s' % (GTEST_OUTPUT_FLAG, json_path)] +
extra_args
)
p = gtest_test_utils.Subprocess(command)
if p.terminated_by_signal:
self.assert_(False,
'%s was killed by signal %d' % (gtest_prog_name, p.signal))
else:
self.assert_(p.exited)
self.assertEquals(expected_exit_code, p.exit_code,
"'%s' exited with code %s, which doesn't match "
'the expected exit code %s.'
% (command, p.exit_code, expected_exit_code))
with open(json_path) as f:
actual = json.load(f)
return actual
def _TestJsonOutput(self, gtest_prog_name, expected,
expected_exit_code, extra_args=None):
"""Checks the JSON output generated by the Google Test binary.
Asserts that the JSON document generated by running the program
gtest_prog_name matches expected_json, a string containing another
JSON document. Furthermore, the program's exit code must be
expected_exit_code.
Args:
gtest_prog_name: Google Test binary name.
expected: expected output.
expected_exit_code: program's exit code.
extra_args: extra arguments to binary invocation.
"""
actual = self._GetJsonOutput(gtest_prog_name, extra_args or [],
expected_exit_code)
self.assertEqual(expected, gtest_json_test_utils.normalize(actual))
if __name__ == '__main__':
if NO_STACKTRACE_SUPPORT_FLAG in sys.argv:
# unittest.main() can't handle unknown flags
sys.argv.remove(NO_STACKTRACE_SUPPORT_FLAG)
os.environ['GTEST_STACK_TRACE_DEPTH'] = '1'
gtest_test_utils.Main()
| |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import tabs
from openstack_dashboard import api
from openstack_dashboard.dashboards.identity.projects.groups \
import tables as groups_tables
from openstack_dashboard.dashboards.identity.projects.users \
import tables as users_tables
class OverviewTab(tabs.Tab):
"""Overview of the project. """
name = _("Overview")
slug = "overview"
template_name = 'identity/projects/_detail_overview.html'
def get_context_data(self, request):
project = self.tab_group.kwargs['project']
context = {"project": project}
if api.keystone.VERSIONS.active >= 3:
extra_info = getattr(settings, 'PROJECT_TABLE_EXTRA_INFO', {})
context['extras'] = dict(
(display_key, getattr(project, key, ''))
for key, display_key in extra_info.items())
return context
class UsersTab(tabs.TableTab):
"""Display users member of the project. (directly or through a group)."""
table_classes = (users_tables.UsersTable,)
name = _("Users")
slug = "users"
template_name = "horizon/common/_detail_table.html"
preload = False
def _update_user_roles_names_from_roles_id(self, user, users_roles,
roles_list):
"""Add roles names to user.roles, based on users_roles.
:param user: user to update
:param users_roles: list of roles ID
:param roles_list: list of roles obtained with keystone
"""
user_roles_names = [role.name for role in roles_list
if role.id in users_roles]
current_user_roles_names = set(getattr(user, "roles", []))
user.roles = list(current_user_roles_names.union(user_roles_names))
def _get_users_from_project(self, project_id, roles, project_users):
"""Update with users which have role on project NOT through a group.
:param project_id: ID of the project
:param roles: list of roles from keystone
:param project_users: list to be updated with the users found
"""
# For keystone.user_list project_id is not passed as argument because
# it is ignored when using admin credentials
# Get all users (to be able to find user name)
users = api.keystone.user_list(self.request)
users = {user.id: user for user in users}
# Get project_users_roles ({user_id: [role_id_1, role_id_2]})
project_users_roles = api.keystone.get_project_users_roles(
self.request,
project=project_id)
for user_id in project_users_roles:
if user_id not in project_users:
# Add user to the project_users
project_users[user_id] = users[user_id]
project_users[user_id].roles = []
project_users[user_id].roles_from_groups = []
# Update the project_user role in order to get:
# project_users[user_id].roles = [role_name1, role_name2]
self._update_user_roles_names_from_roles_id(
user=project_users[user_id],
users_roles=project_users_roles[user_id],
roles_list=roles
)
def _get_users_from_groups(self, project_id, roles, project_users):
"""Update with users which have role on project through a group.
:param project_id: ID of the project
:param roles: list of roles from keystone
:param project_users: list to be updated with the users found
"""
# For keystone.group_list project_id is not passed as argument because
# it is ignored when using admin credentials
# Get all groups (to be able to find group name)
groups = api.keystone.group_list(self.request)
group_names = {group.id: group.name for group in groups}
# Get a dictionary {group_id: [role_id_1, role_id_2]}
project_groups_roles = api.keystone.get_project_groups_roles(
self.request,
project=project_id)
for group_id in project_groups_roles:
group_users = api.keystone.user_list(self.request,
group=group_id)
group_roles_names = [
role.name for role in roles
if role.id in project_groups_roles[group_id]]
roles_from_group = [(role_name, group_names[group_id])
for role_name in group_roles_names]
for user in group_users:
if user.id not in project_users:
# New user: Add the user to the list
project_users[user.id] = user
project_users[user.id].roles = []
project_users[user.id].roles_from_groups = []
# Add roles from group
project_users[user.id].roles_from_groups.extend(
roles_from_group)
def get_userstable_data(self):
"""Get users with roles on the project.
Roles can be applied directly on the project or through a group.
"""
project_users = {}
project = self.tab_group.kwargs['project']
try:
# Get all global roles once to avoid multiple requests.
roles = api.keystone.role_list(self.request)
# Update project_users with users which have role directly on
# the project, (NOT through a group)
self._get_users_from_project(project_id=project.id,
roles=roles,
project_users=project_users)
# Update project_users with users which have role indirectly on
# the project, (through a group)
self._get_users_from_groups(project_id=project.id,
roles=roles,
project_users=project_users)
except Exception:
exceptions.handle(self.request,
_("Unable to display the users of this project.")
)
return project_users.values()
class GroupsTab(tabs.TableTab):
"""Display groups member of the project. """
table_classes = (groups_tables.GroupsTable,)
name = _("Groups")
slug = "groups"
template_name = "horizon/common/_detail_table.html"
preload = False
def get_groupstable_data(self):
groups_in_project = []
project = self.tab_group.kwargs['project']
try:
# Get project_groups_roles: {group_id: [role_id_1, role_id_2]}
project_groups_roles = api.keystone.get_project_groups_roles(
self.request,
project=project.id)
# Get global roles and groups
roles = api.keystone.role_list(self.request)
# For keystone.group_list, we do not give the project_id because it
# is ignored when called with admin creds.
groups = api.keystone.group_list(self.request)
groups = {group.id: group for group in groups}
except Exception:
exceptions.handle(self.request,
_("Unable to display the groups of this"
" project."))
else:
# Construct Groups list, adding the role attribute
for group_id in project_groups_roles:
group = groups[group_id]
group.roles = [role.name for role in roles
if role.id in project_groups_roles[group_id]]
groups_in_project.append(group)
return groups_in_project
class ProjectDetailTabs(tabs.DetailTabsGroup):
slug = "project_details"
tabs = (OverviewTab, UsersTab, GroupsTab,)
| |
"""Define tests for the Brother Printer config flow."""
import json
from brother import SnmpError, UnsupportedModel
from homeassistant import data_entry_flow
from homeassistant.components.brother.const import DOMAIN
from homeassistant.config_entries import SOURCE_USER, SOURCE_ZEROCONF
from homeassistant.const import CONF_HOST, CONF_TYPE
from tests.async_mock import patch
from tests.common import MockConfigEntry, load_fixture
CONFIG = {CONF_HOST: "localhost", CONF_TYPE: "laser"}
async def test_show_form(hass):
"""Test that the form is served with no input."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == SOURCE_USER
async def test_create_entry_with_hostname(hass):
"""Test that the user step works with printer hostname."""
with patch(
"brother.Brother._get_data",
return_value=json.loads(load_fixture("brother_printer_data.json")),
):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}, data=CONFIG
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == "HL-L2340DW 0123456789"
assert result["data"][CONF_HOST] == CONFIG[CONF_HOST]
assert result["data"][CONF_TYPE] == CONFIG[CONF_TYPE]
async def test_create_entry_with_ip_address(hass):
"""Test that the user step works with printer IP address."""
with patch(
"brother.Brother._get_data",
return_value=json.loads(load_fixture("brother_printer_data.json")),
):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_USER},
data={CONF_HOST: "127.0.0.1", CONF_TYPE: "laser"},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == "HL-L2340DW 0123456789"
assert result["data"][CONF_HOST] == "127.0.0.1"
assert result["data"][CONF_TYPE] == "laser"
async def test_invalid_hostname(hass):
"""Test invalid hostname in user_input."""
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_USER},
data={CONF_HOST: "invalid/hostname", CONF_TYPE: "laser"},
)
assert result["errors"] == {CONF_HOST: "wrong_host"}
async def test_connection_error(hass):
"""Test connection to host error."""
with patch("brother.Brother._get_data", side_effect=ConnectionError()):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}, data=CONFIG
)
assert result["errors"] == {"base": "cannot_connect"}
async def test_snmp_error(hass):
"""Test SNMP error."""
with patch("brother.Brother._get_data", side_effect=SnmpError("error")):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}, data=CONFIG
)
assert result["errors"] == {"base": "snmp_error"}
async def test_unsupported_model_error(hass):
"""Test unsupported printer model error."""
with patch("brother.Brother._get_data", side_effect=UnsupportedModel("error")):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}, data=CONFIG
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "unsupported_model"
async def test_device_exists_abort(hass):
"""Test we abort config flow if Brother printer already configured."""
with patch(
"brother.Brother._get_data",
return_value=json.loads(load_fixture("brother_printer_data.json")),
):
MockConfigEntry(domain=DOMAIN, unique_id="0123456789", data=CONFIG).add_to_hass(
hass
)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}, data=CONFIG
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
async def test_zeroconf_no_data(hass):
"""Test we abort if zeroconf provides no data."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_ZEROCONF}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "cannot_connect"
async def test_zeroconf_not_brother_printer_error(hass):
"""Test we abort zeroconf flow if printer isn't Brother."""
with patch(
"brother.Brother._get_data",
return_value=json.loads(load_fixture("brother_printer_data.json")),
):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_ZEROCONF},
data={"hostname": "example.local.", "name": "Another Printer"},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "not_brother_printer"
async def test_zeroconf_snmp_error(hass):
"""Test we abort zeroconf flow on SNMP error."""
with patch("brother.Brother._get_data", side_effect=SnmpError("error")):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_ZEROCONF},
data={"hostname": "example.local.", "name": "Brother Printer"},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "cannot_connect"
async def test_zeroconf_device_exists_abort(hass):
"""Test we abort zeroconf flow if Brother printer already configured."""
with patch(
"brother.Brother._get_data",
return_value=json.loads(load_fixture("brother_printer_data.json")),
):
MockConfigEntry(domain=DOMAIN, unique_id="0123456789", data=CONFIG).add_to_hass(
hass
)
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_ZEROCONF},
data={"hostname": "example.local.", "name": "Brother Printer"},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
async def test_zeroconf_confirm_create_entry(hass):
"""Test zeroconf confirmation and create config entry."""
with patch(
"brother.Brother._get_data",
return_value=json.loads(load_fixture("brother_printer_data.json")),
):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_ZEROCONF},
data={"hostname": "example.local.", "name": "Brother Printer"},
)
assert result["step_id"] == "zeroconf_confirm"
assert result["description_placeholders"]["model"] == "HL-L2340DW"
assert result["description_placeholders"]["serial_number"] == "0123456789"
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={CONF_TYPE: "laser"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == "HL-L2340DW 0123456789"
assert result["data"][CONF_HOST] == "example.local"
assert result["data"][CONF_TYPE] == "laser"
| |
from pydev_imports import xmlrpclib
import sys
import traceback
from pydevd_constants import USE_LIB_COPY
try:
if USE_LIB_COPY:
import _pydev_Queue as _queue
else:
import Queue as _queue
except:
import queue as _queue
try:
from pydevd_exec import Exec
except:
from pydevd_exec2 import Exec
try:
if USE_LIB_COPY:
import _pydev_thread as thread
else:
import thread
except:
import _thread as thread
import pydevd_xml
import pydevd_vars
from pydevd_utils import *
#=======================================================================================================================
# Null
#=======================================================================================================================
class Null:
"""
Gotten from: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/68205
"""
def __init__(self, *args, **kwargs):
return None
def __call__(self, *args, **kwargs):
return self
def __getattr__(self, mname):
return self
def __setattr__(self, name, value):
return self
def __delattr__(self, name):
return self
def __repr__(self):
return "<Null>"
def __str__(self):
return "Null"
def __len__(self):
return 0
def __getitem__(self):
return self
def __setitem__(self, *args, **kwargs):
pass
def write(self, *args, **kwargs):
pass
def __nonzero__(self):
return 0
#=======================================================================================================================
# BaseStdIn
#=======================================================================================================================
class BaseStdIn:
def __init__(self, *args, **kwargs):
try:
self.encoding = sys.stdin.encoding
except:
#Not sure if it's available in all Python versions...
pass
def readline(self, *args, **kwargs):
#sys.stderr.write('Cannot readline out of the console evaluation\n') -- don't show anything
#This could happen if the user had done input('enter number).<-- upon entering this, that message would appear,
#which is not something we want.
return '\n'
def isatty(self):
return False #not really a file
def write(self, *args, **kwargs):
pass #not available StdIn (but it can be expected to be in the stream interface)
def flush(self, *args, **kwargs):
pass #not available StdIn (but it can be expected to be in the stream interface)
def read(self, *args, **kwargs):
#in the interactive interpreter, a read and a readline are the same.
return self.readline()
#=======================================================================================================================
# StdIn
#=======================================================================================================================
class StdIn(BaseStdIn):
'''
Object to be added to stdin (to emulate it as non-blocking while the next line arrives)
'''
def __init__(self, interpreter, host, client_port):
BaseStdIn.__init__(self)
self.interpreter = interpreter
self.client_port = client_port
self.host = host
def readline(self, *args, **kwargs):
#Ok, callback into the client to get the new input
try:
server = xmlrpclib.Server('http://%s:%s' % (self.host, self.client_port))
requested_input = server.RequestInput()
if not requested_input:
return '\n' #Yes, a readline must return something (otherwise we can get an EOFError on the input() call).
return requested_input
except:
return '\n'
#=======================================================================================================================
# BaseInterpreterInterface
#=======================================================================================================================
class BaseInterpreterInterface:
def __init__(self, mainThread):
self.mainThread = mainThread
self.interruptable = False
self.exec_queue = _queue.Queue(0)
self.buffer = []
def needMore(self, buffer, line):
if not buffer:
buffer = []
buffer.append(line)
source = "\n".join(buffer)
if hasattr(self.interpreter, 'is_complete'):
return not self.interpreter.is_complete(source)
try:
code = self.interpreter.compile(source, "<input>", "single")
except (OverflowError, SyntaxError, ValueError):
# Case 1
return False
if code is None:
# Case 2
return True
# Case 3
return False
def addExec(self, line):
#f_opened = open('c:/temp/a.txt', 'a')
#f_opened.write(line+'\n')
original_in = sys.stdin
try:
help = None
if 'pydoc' in sys.modules:
pydoc = sys.modules['pydoc'] #Don't import it if it still is not there.
if hasattr(pydoc, 'help'):
#You never know how will the API be changed, so, let's code defensively here
help = pydoc.help
if not hasattr(help, 'input'):
help = None
except:
#Just ignore any error here
pass
more = False
try:
sys.stdin = StdIn(self, self.host, self.client_port)
try:
if help is not None:
#This will enable the help() function to work.
try:
try:
help.input = sys.stdin
except AttributeError:
help._input = sys.stdin
except:
help = None
if not self._input_error_printed:
self._input_error_printed = True
sys.stderr.write('\nError when trying to update pydoc.help.input\n')
sys.stderr.write('(help() may not work -- please report this as a bug in the pydev bugtracker).\n\n')
import traceback;
traceback.print_exc()
try:
self.startExec()
more = self.doAddExec(line)
self.finishExec()
finally:
if help is not None:
try:
try:
help.input = original_in
except AttributeError:
help._input = original_in
except:
pass
finally:
sys.stdin = original_in
except SystemExit:
raise
except:
import traceback;
traceback.print_exc()
#it's always false at this point
need_input = False
return more, need_input
def doAddExec(self, line):
'''
Subclasses should override.
@return: more (True if more input is needed to complete the statement and False if the statement is complete).
'''
raise NotImplementedError()
def getNamespace(self):
'''
Subclasses should override.
@return: dict with namespace.
'''
raise NotImplementedError()
def getDescription(self, text):
try:
obj = None
if '.' not in text:
try:
obj = self.getNamespace()[text]
except KeyError:
return ''
else:
try:
splitted = text.split('.')
obj = self.getNamespace()[splitted[0]]
for t in splitted[1:]:
obj = getattr(obj, t)
except:
return ''
if obj is not None:
try:
if sys.platform.startswith("java"):
#Jython
doc = obj.__doc__
if doc is not None:
return doc
import jyimportsTipper
is_method, infos = jyimportsTipper.ismethod(obj)
ret = ''
if is_method:
for info in infos:
ret += info.getAsDoc()
return ret
else:
#Python and Iron Python
import inspect #@UnresolvedImport
doc = inspect.getdoc(obj)
if doc is not None:
return doc
except:
pass
try:
#if no attempt succeeded, try to return repr()...
return repr(obj)
except:
try:
#otherwise the class
return str(obj.__class__)
except:
#if all fails, go to an empty string
return ''
except:
traceback.print_exc()
return ''
def execLine(self, line):
try:
#buffer = self.interpreter.buffer[:]
self.exec_queue.put(line)
return self.needMore(self.buffer, line)
except:
traceback.print_exc()
return False
def interrupt(self):
try:
if self.interruptable:
if hasattr(thread, 'interrupt_main'): #Jython doesn't have it
thread.interrupt_main()
else:
self.mainThread._thread.interrupt() #Jython
return True
except:
traceback.print_exc()
return False
def close(self):
sys.exit(0)
def startExec(self):
self.interruptable = True
def get_server(self):
if self.host is not None:
return xmlrpclib.Server('http://%s:%s' % (self.host, self.client_port))
else:
return None
def finishExec(self):
self.interruptable = False
server = self.get_server()
if server is not None:
return server.NotifyFinished()
else:
return True
def getFrame(self):
xml = "<xml>"
xml += pydevd_xml.frameVarsToXML(self.getNamespace())
xml += "</xml>"
return xml
def getVariable(self, attributes):
xml = "<xml>"
valDict = pydevd_vars.resolveVar(self.getNamespace(), attributes)
if valDict is None:
valDict = {}
keys = valDict.keys()
for k in keys:
xml += pydevd_vars.varToXML(valDict[k], to_string(k))
xml += "</xml>"
return xml
def changeVariable(self, attr, value):
Exec('%s=%s' % (attr, value), self.getNamespace(), self.getNamespace())
| |
# Copyright 2014 - Mirantis, Inc.
# Copyright 2015 - StackStorm, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
import six
from mistralclient.api.v2 import workflows
from mistralclient.commands.v2 import base as cmd_base
from mistralclient.commands.v2 import workflows as workflow_cmd
from mistralclient.tests.unit import base
WORKFLOW_DICT = {
'id': '1-2-3-4',
'name': 'a',
'namespace': '',
'project_id': '12345',
'tags': ['a', 'b'],
'input': 'param',
'created_at': '1',
'updated_at': '1'
}
WF_DEF = """
version: '2.0'
flow:
tasks:
task1:
action: nova.servers_get server="1"
"""
WF_WITH_DEF_DICT = WORKFLOW_DICT.copy()
WF_WITH_DEF_DICT.update({'definition': WF_DEF})
WORKFLOW = workflows.Workflow(mock, WORKFLOW_DICT)
WORKFLOW_WITH_DEF = workflows.Workflow(mock, WF_WITH_DEF_DICT)
class TestCLIWorkflowsV2(base.BaseCommandTest):
@mock.patch('argparse.open', create=True)
def test_create(self, mock_open):
self.client.workflows.create.return_value = [WORKFLOW]
result = self.call(workflow_cmd.Create, app_args=['1.txt'])
self.assertEqual(
[('1-2-3-4', 'a', '', '12345', 'a, b', 'param', '1', '1')],
result[1]
)
@mock.patch('argparse.open', create=True)
def test_create_public(self, mock_open):
self.client.workflows.create.return_value = [WORKFLOW]
result = self.call(
workflow_cmd.Create,
app_args=['1.txt', '--public']
)
self.assertEqual(
[('1-2-3-4', 'a', '', '12345', 'a, b', 'param', '1', '1')],
result[1]
)
self.assertEqual(
'public',
self.client.workflows.create.call_args[1]['scope']
)
@mock.patch('argparse.open', create=True)
def test_create_long_input(self, mock_open):
wf_long_input_dict = WORKFLOW_DICT.copy()
long_input = ', '.join(
['var%s' % i for i in six.moves.xrange(10)]
)
wf_long_input_dict['input'] = long_input
workflow_long_input = workflows.Workflow(mock, wf_long_input_dict)
self.client.workflows.create.return_value = [workflow_long_input]
result = self.call(workflow_cmd.Create, app_args=['1.txt'])
self.assertEqual(
[('1-2-3-4', 'a', '', '12345', 'a, b', cmd_base.cut(long_input),
'1', '1')],
result[1]
)
@mock.patch('argparse.open', create=True)
def test_update(self, mock_open):
self.client.workflows.update.return_value = [WORKFLOW]
result = self.call(workflow_cmd.Update, app_args=['1.txt'])
self.assertEqual(
[('1-2-3-4', 'a', '', '12345', 'a, b', 'param', '1', '1')],
result[1]
)
@mock.patch('argparse.open', create=True)
def test_update_public(self, mock_open):
self.client.workflows.update.return_value = [WORKFLOW]
result = self.call(
workflow_cmd.Update,
app_args=['1.txt', '--public']
)
self.assertEqual(
[('1-2-3-4', 'a', '', '12345', 'a, b', 'param', '1', '1')],
result[1]
)
self.assertEqual(
'public',
self.client.workflows.update.call_args[1]['scope']
)
@mock.patch('argparse.open', create=True)
def test_update_with_id(self, mock_open):
self.client.workflows.update.return_value = WORKFLOW
result = self.call(
workflow_cmd.Update,
app_args=['1.txt', '--id', '1-2-3-4']
)
self.assertEqual(
[('1-2-3-4', 'a', '', '12345', 'a, b', 'param', '1', '1')],
result[1]
)
def test_list(self):
self.client.workflows.list.return_value = [WORKFLOW]
result = self.call(workflow_cmd.List)
self.assertEqual(
[('1-2-3-4', 'a', '', '12345', 'a, b', 'param', '1', '1')],
result[1]
)
def test_get(self):
self.client.workflows.get.return_value = WORKFLOW
result = self.call(workflow_cmd.Get, app_args=['name'])
self.assertEqual(
('1-2-3-4', 'a', '', '12345', 'a, b', 'param', '1', '1'),
result[1]
)
def test_delete(self):
self.call(workflow_cmd.Delete, app_args=['name'])
self.client.workflows.delete.assert_called_once_with('name', None)
def test_delete_with_multi_names(self):
self.call(workflow_cmd.Delete, app_args=['name1', 'name2'])
self.assertEqual(2, self.client.workflows.delete.call_count)
self.assertEqual(
[mock.call('name1', None), mock.call('name2', None)],
self.client.workflows.delete.call_args_list
)
def test_get_definition(self):
self.client.workflows.get.return_value = WORKFLOW_WITH_DEF
self.call(workflow_cmd.GetDefinition, app_args=['name'])
self.app.stdout.write.assert_called_with(WF_DEF)
@mock.patch('argparse.open', create=True)
def test_validate(self, mock_open):
self.client.workflows.validate.return_value = {'valid': True}
result = self.call(workflow_cmd.Validate, app_args=['wf.yaml'])
self.assertEqual((True, None), result[1])
@mock.patch('argparse.open', create=True)
def test_validate_failed(self, mock_open):
self.client.workflows.validate.return_value = {
'valid': False,
'error': 'Invalid DSL...'
}
result = self.call(workflow_cmd.Validate, app_args=['wf.yaml'])
self.assertEqual((False, 'Invalid DSL...'), result[1])
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 Ken Pepple
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Unit Tests for instance types code
"""
import time
from nova.compute import instance_types
from nova import context
from nova import db
from nova.db.sqlalchemy import models
from nova import exception
from nova.openstack.common.db.sqlalchemy import session as sql_session
from nova.openstack.common import log as logging
from nova import test
LOG = logging.getLogger(__name__)
class InstanceTypeTestCase(test.TestCase):
"""Test cases for instance type code."""
def _generate_name(self):
"""return a name not in the DB."""
nonexistent_flavor = str(int(time.time()))
flavors = instance_types.get_all_types()
while nonexistent_flavor in flavors:
nonexistent_flavor += "z"
else:
return nonexistent_flavor
def _generate_flavorid(self):
"""return a flavorid not in the DB."""
nonexistent_flavor = 2700
flavor_ids = [value["id"] for key, value in
instance_types.get_all_types().iteritems()]
while nonexistent_flavor in flavor_ids:
nonexistent_flavor += 1
else:
return nonexistent_flavor
def _existing_flavor(self):
"""return first instance type name."""
return instance_types.get_all_types().keys()[0]
def test_instance_type_create(self):
# Ensure instance types can be created.
name = 'Instance create test'
flavor_id = '512'
original_list = instance_types.get_all_types()
# create new type and make sure values stick
inst_type = instance_types.create(name, 256, 1, 120,
flavorid=flavor_id)
self.assertEqual(inst_type['flavorid'], flavor_id)
self.assertEqual(inst_type['name'], name)
self.assertEqual(inst_type['memory_mb'], 256)
self.assertEqual(inst_type['vcpus'], 1)
self.assertEqual(inst_type['root_gb'], 120)
self.assertEqual(inst_type['ephemeral_gb'], 0)
self.assertEqual(inst_type['swap'], 0)
self.assertEqual(inst_type['rxtx_factor'], 1.0)
# make sure new type shows up in list
new_list = instance_types.get_all_types()
self.assertNotEqual(len(original_list), len(new_list),
'instance type was not created')
def test_instance_type_create_then_delete(self):
# Ensure instance types can be created.
name = 'Small Flavor'
flavorid = 'flavor1'
original_list = instance_types.get_all_types()
# create new type and make sure values stick
inst_type = instance_types.create(name, 256, 1, 120, 100, flavorid)
inst_type_id = inst_type['id']
self.assertEqual(inst_type['flavorid'], flavorid)
self.assertEqual(inst_type['name'], name)
self.assertEqual(inst_type['memory_mb'], 256)
self.assertEqual(inst_type['vcpus'], 1)
self.assertEqual(inst_type['root_gb'], 120)
self.assertEqual(inst_type['ephemeral_gb'], 100)
self.assertEqual(inst_type['swap'], 0)
self.assertEqual(inst_type['rxtx_factor'], 1.0)
# make sure new type shows up in list
new_list = instance_types.get_all_types()
self.assertNotEqual(len(original_list), len(new_list),
'instance type was not created')
instance_types.destroy(name)
self.assertRaises(exception.InstanceTypeNotFound,
instance_types.get_instance_type, inst_type_id)
# deleted instance should not be in list anymoer
new_list = instance_types.get_all_types()
self.assertEqual(original_list, new_list)
def test_instance_type_create_without_flavorid(self):
name = 'Small Flavor'
inst_type = instance_types.create(name, 256, 1, 120, 100)
self.assertNotEqual(inst_type['flavorid'], None)
self.assertEqual(inst_type['name'], name)
self.assertEqual(inst_type['memory_mb'], 256)
self.assertEqual(inst_type['vcpus'], 1)
self.assertEqual(inst_type['root_gb'], 120)
self.assertEqual(inst_type['ephemeral_gb'], 100)
self.assertEqual(inst_type['swap'], 0)
self.assertEqual(inst_type['rxtx_factor'], 1.0)
def test_instance_type_create_with_empty_flavorid(self):
# Ensure that auto-generated uuid is assigned.
name = 'Empty String ID Flavor'
flavorid = ''
inst_type = instance_types.create(name, 256, 1, 120, 100, flavorid)
self.assertEqual(len(inst_type['flavorid']), 36)
self.assertEqual(inst_type['name'], name)
self.assertEqual(inst_type['memory_mb'], 256)
self.assertEqual(inst_type['vcpus'], 1)
self.assertEqual(inst_type['root_gb'], 120)
self.assertEqual(inst_type['ephemeral_gb'], 100)
self.assertEqual(inst_type['swap'], 0)
self.assertEqual(inst_type['rxtx_factor'], 1.0)
def test_instance_type_create_with_custom_rxtx_factor(self):
name = 'Custom RXTX Factor'
inst_type = instance_types.create(name, 256, 1, 120, 100,
rxtx_factor=9.9)
self.assertNotEqual(inst_type['flavorid'], None)
self.assertEqual(inst_type['name'], name)
self.assertEqual(inst_type['memory_mb'], 256)
self.assertEqual(inst_type['vcpus'], 1)
self.assertEqual(inst_type['root_gb'], 120)
self.assertEqual(inst_type['ephemeral_gb'], 100)
self.assertEqual(inst_type['swap'], 0)
self.assertEqual(inst_type['rxtx_factor'], 9.9)
def test_instance_type_create_with_special_characters(self):
# Ensure instance types raises InvalidInput for invalid characters.
name = "foo.bar!@#$%^-test_name"
flavorid = "flavor1"
self.assertRaises(exception.InvalidInput, instance_types.create,
name, 256, 1, 120, 100, flavorid)
def test_instance_type_create_with_long_flavor_name(self):
# Flavor name with 255 characters or less is valid.
name = 'a' * 255
inst_type = instance_types.create(name, 64, 1, 120, flavorid=11)
self.assertEqual(inst_type['name'], name)
# Flavor name which is more than 255 characters will cause error.
name = 'a' * 256
self.assertRaises(exception.InvalidInput, instance_types.create,
name, 64, 1, 120, flavorid=11)
def test_add_instance_type_access(self):
user_id = 'fake'
project_id = 'fake'
ctxt = context.RequestContext(user_id, project_id, is_admin=True)
flavor_id = 'flavor1'
type_ref = instance_types.create('some flavor', 256, 1, 120, 100,
flavorid=flavor_id)
access_ref = instance_types.add_instance_type_access(flavor_id,
project_id,
ctxt=ctxt)
self.assertEqual(access_ref["project_id"], project_id)
self.assertEqual(access_ref["instance_type_id"], type_ref["id"])
def test_add_instance_type_access_already_exists(self):
user_id = 'fake'
project_id = 'fake'
ctxt = context.RequestContext(user_id, project_id, is_admin=True)
flavor_id = 'flavor1'
type_ref = instance_types.create('some flavor', 256, 1, 120, 100,
flavorid=flavor_id)
access_ref = instance_types.add_instance_type_access(flavor_id,
project_id,
ctxt=ctxt)
self.assertRaises(exception.FlavorAccessExists,
instance_types.add_instance_type_access,
flavor_id, project_id, ctxt)
def test_add_instance_type_access_invalid_flavor(self):
user_id = 'fake'
project_id = 'fake'
ctxt = context.RequestContext(user_id, project_id, is_admin=True)
flavor_id = 'no_such_flavor'
self.assertRaises(exception.FlavorNotFound,
instance_types.add_instance_type_access,
flavor_id, project_id, ctxt)
def test_remove_instance_type_access(self):
user_id = 'fake'
project_id = 'fake'
ctxt = context.RequestContext(user_id, project_id, is_admin=True)
flavor_id = 'flavor1'
it = instance_types
type_ref = it.create('some flavor', 256, 1, 120, 100,
flavorid=flavor_id)
access_ref = it.add_instance_type_access(flavor_id, project_id, ctxt)
it.remove_instance_type_access(flavor_id, project_id, ctxt)
projects = it.get_instance_type_access_by_flavor_id(flavor_id, ctxt)
self.assertEqual([], projects)
def test_remove_instance_type_access_doesnt_exists(self):
user_id = 'fake'
project_id = 'fake'
ctxt = context.RequestContext(user_id, project_id, is_admin=True)
flavor_id = 'flavor1'
type_ref = instance_types.create('some flavor', 256, 1, 120, 100,
flavorid=flavor_id)
self.assertRaises(exception.FlavorAccessNotFound,
instance_types.remove_instance_type_access,
flavor_id, project_id, ctxt=ctxt)
def test_get_all_instance_types(self):
# Ensures that all instance types can be retrieved.
session = sql_session.get_session()
total_instance_types = session.query(models.InstanceTypes).count()
inst_types = instance_types.get_all_types()
self.assertEqual(total_instance_types, len(inst_types))
def test_invalid_create_args_should_fail(self):
# Ensures that instance type creation fails with invalid args.
invalid_sigs = [
(('Zero memory', 0, 1, 10, 20, 'flavor1'), {}),
(('Negative memory', -256, 1, 10, 20, 'flavor1'), {}),
(('Non-integer memory', 'asdf', 1, 10, 20, 'flavor1'), {}),
(('Zero vcpus', 256, 0, 10, 20, 'flavor1'), {}),
(('Negative vcpus', 256, -1, 10, 20, 'flavor1'), {}),
(('Non-integer vcpus', 256, 'a', 10, 20, 'flavor1'), {}),
(('Negative storage', 256, 1, -1, 20, 'flavor1'), {}),
(('Non-integer storage', 256, 1, 'a', 20, 'flavor1'), {}),
(('Negative swap', 256, 1, 10, 20, 'flavor1'), {'swap': -1}),
(('Non-integer swap', 256, 1, 10, 20, 'flavor1'), {'swap': -1}),
(('Negative rxtx_factor', 256, 1, 10, 20, 'f1'),
{'rxtx_factor': -1}),
(('Non-integer rxtx_factor', 256, 1, 10, 20, 'f1'),
{'rxtx_factor': "d"}),
]
for (args, kwargs) in invalid_sigs:
self.assertRaises(exception.InvalidInput,
instance_types.create, *args, **kwargs)
def test_non_existent_inst_type_shouldnt_delete(self):
# Ensures that instance type creation fails with invalid args.
self.assertRaises(exception.InstanceTypeNotFoundByName,
instance_types.destroy,
'unknown_flavor')
def test_duplicate_names_fail(self):
# Ensures that name duplicates raise InstanceTypeCreateFailed.
name = 'some_name'
instance_types.create(name, 256, 1, 120, 200, 'flavor1')
self.assertRaises(exception.InstanceTypeExists,
instance_types.create,
name, 256, 1, 120, 200, 'flavor2')
def test_duplicate_flavorids_fail(self):
# Ensures that flavorid duplicates raise InstanceTypeCreateFailed.
flavorid = 'flavor1'
instance_types.create('name one', 256, 1, 120, 200, flavorid)
self.assertRaises(exception.InstanceTypeIdExists,
instance_types.create,
'name two', 256, 1, 120, 200, flavorid)
def test_will_not_destroy_with_no_name(self):
# Ensure destroy said path of no name raises error.
self.assertRaises(exception.InstanceTypeNotFoundByName,
instance_types.destroy, None)
def test_will_not_get_bad_default_instance_type(self):
# ensures error raised on bad default instance type.
self.flags(default_instance_type='unknown_flavor')
self.assertRaises(exception.InstanceTypeNotFound,
instance_types.get_default_instance_type)
def test_will_get_instance_type_by_id(self):
default_instance_type = instance_types.get_default_instance_type()
instance_type_id = default_instance_type['id']
fetched = instance_types.get_instance_type(instance_type_id)
self.assertEqual(default_instance_type, fetched)
def test_will_not_get_instance_type_by_unknown_id(self):
# Ensure get by name returns default flavor with no name.
self.assertRaises(exception.InstanceTypeNotFound,
instance_types.get_instance_type, 10000)
def test_will_not_get_instance_type_with_bad_id(self):
# Ensure get by name returns default flavor with bad name.
self.assertRaises(exception.InstanceTypeNotFound,
instance_types.get_instance_type, 'asdf')
def test_instance_type_get_by_None_name_returns_default(self):
# Ensure get by name returns default flavor with no name.
default = instance_types.get_default_instance_type()
actual = instance_types.get_instance_type_by_name(None)
self.assertEqual(default, actual)
def test_will_not_get_instance_type_with_bad_name(self):
# Ensure get by name returns default flavor with bad name.
self.assertRaises(exception.InstanceTypeNotFound,
instance_types.get_instance_type_by_name, 10000)
def test_will_not_get_instance_by_unknown_flavor_id(self):
# Ensure get by flavor raises error with wrong flavorid.
self.assertRaises(exception.FlavorNotFound,
instance_types.get_instance_type_by_flavor_id,
'unknown_flavor')
def test_will_get_instance_by_flavor_id(self):
default_instance_type = instance_types.get_default_instance_type()
flavorid = default_instance_type['flavorid']
fetched = instance_types.get_instance_type_by_flavor_id(flavorid)
self.assertEqual(default_instance_type, fetched)
def test_can_read_deleted_types_using_flavor_id(self):
# Ensure deleted instance types can be read when querying flavor_id.
inst_type_name = "test"
inst_type_flavor_id = "test1"
inst_type = instance_types.create(inst_type_name, 256, 1, 120, 100,
inst_type_flavor_id)
self.assertEqual(inst_type_name, inst_type["name"])
# NOTE(jk0): The deleted flavor will show up here because the context
# in get_instance_type_by_flavor_id() is set to use read_deleted by
# default.
instance_types.destroy(inst_type["name"])
deleted_inst_type = instance_types.get_instance_type_by_flavor_id(
inst_type_flavor_id)
self.assertEqual(inst_type_name, deleted_inst_type["name"])
def test_read_deleted_false_converting_flavorid(self):
"""
Ensure deleted instance types are not returned when not needed (for
example when creating a server and attempting to translate from
flavorid to instance_type_id.
"""
instance_types.create("instance_type1", 256, 1, 120, 100, "test1")
instance_types.destroy("instance_type1")
instance_types.create("instance_type1_redo", 256, 1, 120, 100, "test1")
instance_type = instance_types.get_instance_type_by_flavor_id(
"test1", read_deleted="no")
self.assertEqual("instance_type1_redo", instance_type["name"])
def test_will_list_deleted_type_for_active_instance(self):
# Ensure deleted instance types with active instances can be read.
ctxt = context.get_admin_context()
inst_type = instance_types.create("test", 256, 1, 120, 100, "test1")
instance_params = {"instance_type_id": inst_type["id"]}
instance = db.instance_create(ctxt, instance_params)
# NOTE(jk0): Delete the instance type and reload the instance from the
# DB. The instance_type object will still be available to the active
# instance, otherwise being None.
instance_types.destroy(inst_type["name"])
instance = db.instance_get_by_uuid(ctxt, instance["uuid"])
self.assertRaises(exception.InstanceTypeNotFound,
instance_types.get_instance_type, inst_type["name"])
self.assertTrue(instance["instance_type"])
class InstanceTypeToolsTest(test.TestCase):
def _dict_to_metadata(self, data):
return [{'key': key, 'value': value} for key, value in data.items()]
def _test_extract_instance_type(self, prefix):
instance_type = instance_types.get_default_instance_type()
metadata = {}
instance_types.save_instance_type_info(metadata, instance_type,
prefix)
instance = {'system_metadata': self._dict_to_metadata(metadata)}
_instance_type = instance_types.extract_instance_type(instance, prefix)
props = instance_types.system_metadata_instance_type_props.keys()
for key in instance_type.keys():
if key not in props:
del instance_type[key]
self.assertEqual(instance_type, _instance_type)
def test_extract_instance_type(self):
self._test_extract_instance_type('')
def test_extract_instance_type_prefix(self):
self._test_extract_instance_type('foo_')
def test_save_instance_type_info(self):
instance_type = instance_types.get_default_instance_type()
example = {}
example_prefix = {}
for key in instance_types.system_metadata_instance_type_props.keys():
example['instance_type_%s' % key] = instance_type[key]
example_prefix['fooinstance_type_%s' % key] = instance_type[key]
metadata = {}
instance_types.save_instance_type_info(metadata, instance_type)
self.assertEqual(example, metadata)
metadata = {}
instance_types.save_instance_type_info(metadata, instance_type, 'foo')
self.assertEqual(example_prefix, metadata)
def test_delete_instance_type_info(self):
instance_type = instance_types.get_default_instance_type()
metadata = {}
instance_types.save_instance_type_info(metadata, instance_type)
instance_types.save_instance_type_info(metadata, instance_type, '_')
instance_types.delete_instance_type_info(metadata, '', '_')
self.assertEqual(metadata, {})
class InstanceTypeFilteringTest(test.TestCase):
"""Test cases for the filter option available for instance_type_get_all."""
def setUp(self):
super(InstanceTypeFilteringTest, self).setUp()
self.context = context.get_admin_context()
def assertFilterResults(self, filters, expected):
inst_types = db.instance_type_get_all(
self.context, filters=filters)
inst_names = [i['name'] for i in inst_types]
self.assertEqual(inst_names, expected)
def test_no_filters(self):
filters = None
expected = ['m1.large', 'm1.medium', 'm1.small', 'm1.tiny',
'm1.xlarge']
self.assertFilterResults(filters, expected)
def test_min_memory_mb_filter(self):
# Exclude tiny instance which is 512 MB.
filters = dict(min_memory_mb=513)
expected = ['m1.large', 'm1.medium', 'm1.small', 'm1.xlarge']
self.assertFilterResults(filters, expected)
def test_min_root_gb_filter(self):
# Exclude everything but large and xlarge which have >= 80 GB.
filters = dict(min_root_gb=80)
expected = ['m1.large', 'm1.xlarge']
self.assertFilterResults(filters, expected)
def test_min_memory_mb_AND_root_gb_filter(self):
# Exclude everything but large and xlarge which have >= 80 GB.
filters = dict(min_memory_mb=16384, min_root_gb=80)
expected = ['m1.xlarge']
self.assertFilterResults(filters, expected)
| |
"""A set of helper functions to work with the astropy module."""
import functools
import random
import string
import tempfile
import subprocess
import collections
from itertools import cycle, islice, chain, combinations, zip_longest
import scipy
import numpy as np
from astropy.table import Table, join
from astropy.coordinates import SkyCoord
from astropy import units as u
#from astroquery.vizier import Vizier
###############################################################################
# Astropy Utilities #
###############################################################################
def change_column_dtype(table, colname, newdtype):
'''Changes the dtype of a column in a table.
Use this function to change the dtype of a particular column in a table.
'''
tempcol = table[colname]
colindex = table.colnames.index(colname)
del(table[colname])
table.add_column(np.asanyarray(tempcol, dtype=newdtype), index=colindex)
def astropy_table_index(table, column, value):
'''Returns the row index of the table which has the value in column.
There are often times when you want to know the index of the row
where a certain column has a value. This function will return a
list of row indices that match the value in the column.'''
return astropy_table_indices(table, column, [value])
def astropy_table_indices(table, column, values):
'''Returns the row indices of the table which have the values in column.
If you need to get the indices of values located in the column of a table,
this function will determine that for you.
'''
indices = mark_selections_in_columns(table[column], values)
return np.where(indices)
def mark_selections_in_columns(col, values):
'''Return index indicating values are in col.
Returns an index array which is the size of col that indicates True when
col holds an entry equal to value, and False otherwise.'''
if len(col) > len(values)**2:
return multi_logical_or(*[col == v for v in values])
else:
try:
valset = set(values)
except TypeError:
unmasked_values = values[values.mask == False]
valset = set(unmasked_values)
index = []
for v in col:
try:
incol = v in valset
except TypeError:
incol = False
index.append(incol)
return np.array(index, dtype=np.bool)
def multi_logical_or(*arrs):
'''Performs a logical or for an arbitrary number of boolean arrays.'''
return functools.reduce(np.logical_or, arrs, False)
def multi_logical_and(*arrs):
'''Performs a logical or for an arbitrary number of boolean arrays.'''
return functools.reduce(np.logical_and, arrs, True)
def astropy_table_row(table, column, value):
'''Returns the row of the table which has the value in column.
If you want to know the row in an astropy table where a value in a
column corresponds to a given value, this function will return that
row. If there are multiple rows which match the value in the
column, you will get all of them. If no rows match the value, this
function will throw a ValueError.'''
return table[astropy_table_index(table, column, value)]
def extract_subtable_from_column(table, column, selections):
'''Returns a table which only contains values in selections.
This function will create a Table whose values in column are only
those found in selections.
'''
return table[astropy_table_indices(table, column, selections)]
def filter_column_from_subtable(table, column, selections):
'''Returns a table where none of the values in column are selections.
This function will create a Table whose values are those in column which
are not found in selections.
'''
subindices = astropy_table_indices(table, column, selections)
compindices = get_complement_indices(subindices, len(table))
return table[compindices]
def join_by_id(table1, table2, columnid1, columnid2, join_type="inner",
conflict_suffixes=("_A", "_B"), idproc=None,
additional_keys=[]):
'''Joins two tables based on columns with different names.
Table1 and table2 are the tables to be joined together. The column names
that should be joined are the two columnids. Columnid1 will be the column
name for the returned table. In case of conflicts, the
conflict suffixes will be appended to the keys with conflicts. To merge
conflicts instead of keeping them separate, add the column name to
additional_keys.
If the entries in the columns to be merged should be processed a certain
way, the function that does the processing should be given in idfilter. For
no processing, "None" should be passed instead.
'''
# Process the columns if need be.
if idproc is not None:
# I want to duplicate the data so it won't be lost. And by keeping it
# in the table, it will be preserved when it is joined.
origcol1 = table1[columnid1]
origcol2 = table2[columnid2]
randomcol1 = generate_random_string(10)
randomcol2 = generate_random_string(10)
table1.rename_column(columnid1, randomcol1)
table2.rename_column(columnid2, randomcol2)
table1[columnid1] = idproc(origcol1)
table2[columnid2] = idproc(origcol2)
# If columnid1 = columnid2, then we can go straight to a join. If not, then
# columnid2 needs to be renamed to columnid1. If table2[columnid1] exists,
# then we have a problem and an exception should be thrown.
if columnid1 != columnid2:
if columnid1 not in table2.colnames:
table2[columnid1] = table2[columnid2]
else:
raise ValueError(
"Column {0} already exists in second table.".format(columnid1))
try:
newtable = join(
table1, table2, keys=[columnid1]+additional_keys,
join_type=join_type, table_names=list(conflict_suffixes),
uniq_col_name="{col_name}{table_name}")
finally:
# Clean up the new table.
if columnid1 != columnid2:
del(table2[columnid1])
if idproc is not None:
del(table1[columnid1])
del(table2[columnid2])
del(newtable[randomcol1])
del(newtable[randomcol2])
table1.rename_column(randomcol1, columnid1)
table2.rename_column(randomcol2, columnid2)
return newtable
def join_by_ra_dec(
table1, table2, ra1="RA", dec1="DEC", ra2="RA", dec2="DEC",
ra1_unit=u.degree, dec1_unit=u.degree, ra2_unit=u.degree, dec2_unit=u.degree,
match_threshold=5*u.arcsec, join_type="inner",
conflict_suffixes=("_A", "_B")):
'''Join two tables by RA and DEC.
This function will essentially perform a join between tables using
coordinates. The column names for the coordinates should be given in ra1,
ra2, dec1, dec2.
In case of conflicts, the conflict_suffices will be used for columns in
table1 and table2, respectively.
'''
# Instead of directly using RA/Dec, we'll set up a column that maps rows in
# table 2 to rows in table2.
match_column = generate_random_string(10)
ra1_coords = table1[ra1]
try:
ra1_coords = ra1_coords.to(ra1_unit)
except u.UnitConversionError:
ra1_coords = ra1_coords * ra1_unit
dec1_coords = table1[dec1]
try:
dec1_coords = dec1_coords.to(dec1_unit)
except u.UnitConversionError:
dec1_coords = dec1_coords * dec1_unit
ra2_coords = table2[ra2]
try:
ra2_coords = ra2_coords.to(ra2_unit)
except u.UnitConversionError:
ra2_coords = ra2_coords * ra2_unit
dec2_coords = table2[dec2]
try:
dec2_coords = dec2_coords.to(dec2_unit)
except u.UnitConversionError:
dec2_coords = dec2_coords * dec2_unit
# This will cross-match the two catalogs to find the nearest matches.
coords1 = SkyCoord(ra=ra1_coords, dec=dec1_coords)
coords2 = SkyCoord(ra=ra2_coords, dec=dec2_coords)
idx, d2d, d3d = coords1.match_to_catalog_sky(coords2)
# We only count matches which are within the match threshold.
matches = d2d < match_threshold
matched_tbl1 = table1[matches]
try:
table2[match_column] = np.arange(len(table2))
matched_tbl1[match_column] = table2[idx[matches]][match_column]
newtable = join(
matched_tbl1, table2, keys=match_column,
join_type=join_type, table_names=list(conflict_suffixes),
uniq_col_name="{col_name}{table_name}")
finally:
del(table2[match_column])
del(newtable[match_column])
# Want to inherit table1 column naming.
# This will require deleting the table2 coordinates from the new table.
try:
del(newtable[ra2])
except KeyError:
# This occurs when ra1=ra2.
assert ra1==ra2
newtable.rename_column(ra1+conflict_suffixes[0], ra1)
del(newtable[ra2+conflict_suffixes[1]])
try:
del(newtable[dec2])
except KeyError:
assert dec1==dec2
newtable.rename_column(dec1+conflict_suffixes[0], dec1)
del(newtable[dec2+conflict_suffixes[1]])
return newtable
def generate_random_string(length):
'''Generate a random string with the given length.'''
return "".join([random.choice(string.ascii_letters) for _ in
range(length)])
def get_complement_indices(initindices, tablelength):
'''Returns the indices corresponding to rows not in partialtable.
This function essenially creates indices which correspond to the rows in
totaltable rows not in partialtable.
'''
compmask = np.ones(tablelength, np.bool)
compmask[initindices] = 0
return np.where(compmask)
def get_complement_table(partialtable, totaltable, compcolumn):
'''Returns a subtable of total table without rows in partialtable.
This is kinda like an operation to create a table which when stacked with
partialtable and sorted by compcolumn, will create totaltable.
'''
partialindices = astropy_table_indices(totaltable, compcolumn,
partialtable[compcolumn])
compmask = get_complement_indices(partialindices, len(totaltable))
comp_sample = totaltable[compmask]
return comp_sample
def split_table_by_value(table, column, splitvalue):
'''Bifurcates a table in two.
This function splits a table based on the values in column and returns two
tables in a 2-tuple. Values less than splitvalue are in the first tuple.
Values greater than splitvalue are in the second.
'''
lowentries = table[np.where(table[column] < splitvalue)]
highentries = table[np.where(table[column] >= splitvalue)]
return lowentries, highentries
def first_row_in_group(tablegroup):
'''Iterates through groups and selects the first row from each group.
This is good for tables where there are multiple entries for each grouping,
but the first row in the table is the preferable one. Such a thing occurs
with the Catalog of Active Binary Systems (III).
'''
rowholder = []
for group in tablegroup.groups:
rowholder.append(group[0])
filteredtable = Table(rows=rowholder, names=tablegroup.colnames)
return filteredtable
def byte_to_unicode_cast(bytearr):
'''Cast a numpy byte array to unicode.
A change in Astropy 3.0 led to some columns from FITS files being stored
as numpy byte arrays instead of string. This is an explicit cast of this
column to a string array.
https://github.com/astropy/astropy/pull/6821
The text in the bug report seems to indicate that conversion from bytes
objects to unicode should be done transparently, but this doesn't seem to
be the case.'''
strcol = np.asarray(bytearr, np.unicode_)
return strcol
def set_numeric_fill_values(table, fill_value):
'''Fill all of the columns in table specified in colnames.
This is a convenience function to be able to conveniently get a filled
table without having to manually fill a ton of columns.'''
for col in table.colnames:
if np.issubdtype(table[col].dtype, np.number):
table[col].fill_value = fill_value
def mask_numeric_fill_values(table, fill_value):
'''Fill all of the columns in table specified in colnames.
This convenience function to mask numeric columns in a table.'''
for col in table.colnames:
if np.issubdtype(table[col].dtype, np.number):
table[col] = np.ma.masked_values(table[col], fill_value)
###############################################################################
# Astroquery Catalog #
###############################################################################
def Vizier_cached_table(tblpath, tablecode):
'''Read a table from disk, querying Vizier if needed.
For large tables which can be automatically queried from Vizier, but take a
long time to download, this function will download the queried table into
tblpath, and then read from it for all following times.
The tablecode is the code (e.g. "J/A+A/512/A54/table8") uniquely
identifying the desired table.'''
try:
tbl = Table.read(str(tblpath), format="ascii.ipac")
except FileNotFoundError:
Vizier.ROW_LIMIT = -1
tbl = Vizier.get_catalogs(tablecode)[0]
tbl.write(str(tblpath), format="ascii.ipac")
return tbl
###############################################################################
# Spreadsheet help #
###############################################################################
def inspect_table_as_spreadsheet(table):
'''Opens the table in Libreoffice.
For cases where it would be much easier to look at data by analyzing it in
a spreadsheet, this function will essentially take the table and load it
into Libreoffice so that operations can be done on it.
'''
with tempfile.NamedTemporaryFile() as fp:
table.write(fp.name, format="ascii.csv")
libreargs = ["oocalc", fp.name]
try:
subprocess.run(libreargs)
except FileNotFoundError:
libreargs[0] = "localc"
subprocess.run(libreargs)
def inspect_table_in_topcat(table):
'''Opens the table in TOPCAT
TOPCAT is a useful tool for inspecting tables that are suited to be written
as FITS files. TOPCAT is actually much more extensible than we are using it
for, but it's helpful for this purpose.
'''
with tempfile.NamedTemporaryFile() as fp:
table.write(fp.name, format="fits", overwrite=True)
topcatargs = ["/home/regulus/simonian/topcat/topcat", fp.name]
subprocess.run(topcatargs)
###############################################################################
# Caching large data files #
###############################################################################
class memoized(object):
'''Decorator. Cache's a function's return value each time it is called. If
called later with the same arguments, the cached value is returned (not
reevaluated).
'''
def __init__(self, func):
self.func = func
self.cache = {}
def __call__(self, *args):
if not isinstance(args, collections.Hashable):
# uncacheable. a list, for instance.
# better to not cache than blow up
print("Uncacheable")
return self.func(*args)
if args in self.cache:
print("Cached")
return self.cache[args]
else:
print("Putting into cache")
value = self.func(*args)
self.cache[args] = value
return value
def __repr__(self):
'''Return the function's docstring.'''
return self.func.__doc__
def __get__(self, obj, objtype):
'''Support instance methods.'''
return functools.partial(self.__call__, obj)
def shortcut_file(filename, format="fits", fill_value=-9999):
''' Return a decorator that both caches the result and saves it to a file.
This decorator should be used for commonly used snippets and combinations
of tables that are small enough to be read in quickly, and processed enough
that generating them from scratch is time-intensive.
'''
class Memorize(object):
'''
A function decorated with @memorize caches its return value every time
it is called. If the function is called later with the same arguments,
the cached value is returned (the function is not reevaluated). The
cache is stored in the filename provided in shortcut_file for reuse in
future executions. If the function corresponding to this decorated has
been updated, make sure to change the object at the given filename.
'''
def __init__(self, func):
self.func = func
self.filename = filename
self.table = None
def __call__(self, *args):
if self.table is None:
try:
self.read_cache()
except FileNotFoundError:
value = self.func(*args)
self.table = value
self.save_cache()
return self.table
def read_cache(self):
'''
Read the table in from the given location. This will take the
format given in the shortcut_file command.
'''
self.table = Table.read(self.filename, format=format,
character_as_bytes=False)
mask_numeric_fill_values(self.table, fill_value)
# If the dtype is fits, then the Astropy FITS program doesn't
# convert correctly between bytes and strings.
# See https://github.com/astropy/astropy/issues/5280
def save_cache(self):
'''
Save the table into the given filename using the given format.
'''
set_numeric_fill_values(self.table, fill_value)
try:
self.table.write(self.filename, format=format)
except FileNotFoundError:
self.filename.parent.mkdir(parents=True)
self.table.write(self.filename, format=format)
def __repr__(self):
''' Return the function's docstring. '''
return self.func.__doc__
def __get__(self, obj, objtype):
''' Support instance methods. '''
return functools.partial(self.__call__, obj)
return Memorize
###############################################################################
# Itertools help #
###############################################################################
def roundrobin(*iterables):
'''roundrobin('ABC', 'D', 'EF') --> ADEBFC'''
# Recipe cedited to George Sakkis
pending = len(iterables)
nexts = cycle(iter(it).__next__ for it in iterables)
while pending:
try:
for next in nexts:
yield next()
except StopIteration:
pending -= 1
nexts = cycle(islice(nexts, pending))
def take(n, iterable):
'''Return first n items of the iterable as a list.'''
return list(islice(iterable, n))
def flatten(listOfLists):
"Flatten one level of nesting"
return chain.from_iterable(listOfLists)
def random_permutation(iterable, r=None):
"""Random selection from itertools.product(*args, **kwds)"""
pool = tuple(iterable)
r = len(pool) if r is None else r
return tuple(random.sample(pool, r))
def powerset(iterable):
"powerset([1,2,3]) --> () (1,) (2,) (3,) (1,2) (1,3) (2,3) (1,2,3)"
s = list(iterable)
return chain.from_iterable(combinations(s, r) for r in range(len(s)+1))
def consume(iterator, n):
"Advance the iterator n-steps ahead. If n is none, consume entirely."
# Use functions that consume iterators at C speed.
if n is None:
# feed the entire iterator into a zero-length deque
collections.deque(iterator, maxlen=0)
else:
# advance to the empty slice starting at position n
next(islice(iterator, n, n), None)
def nth(iterable, n, default=None):
"Returns the nth item or a default value"
return next(islice(iterable, n, None), default)
def zip_equal(*iterables):
'''Unzips, throwing an error if iterables have different lengths.'''
sentinel = object()
for combo in zip_longest(*iterables, fillvalue=sentinel):
if sentinel in combo:
raise ValueError("Iterables have different lengths")
yield combo
###############################################################################
# Binary confidence intervals #
###############################################################################
def poisson_upper(n, sigma):
'''Return the Poisson upper limit of the confidence interval.
This is the upper limit for a given number of successes n, and the width of
the confidence interval is given in sigmas.'''
up = (n+1)*(1 - 1/9/(n+1) + sigma/3/np.sqrt(n+1))**3
return up
def scaled_poisson_upper(n, sigma, scale):
'''Return the upper limit of a scaled Poisson variable.
This is the upper limit for a given number of successes if the random
variable was scaled by a scale factor.'''
confidence_level = scipy.stats.norm.cdf(sigma)
upperlim = scipy.stats.chi2.ppf(1-(1-confidence_level)/scale, 2*n+2)/2
return upperlim
def scaled_poisson_lower(n, sigma, scale):
'''Return the lower limit of a scaled Poisson variable.
This is the lower limit for a given number of successes if the random
variable was scaled by a scale factor.'''
confidence_level = scipy.stats.norm.cdf(sigma)
lowerlim = scipy.stats.chi2.ppf(1-confidence_level/scale, 2*n)/2
return lowerlim
def poisson_upper_exact(n, sigma):
'''Return the Poisson upper limit of the confidence interval.
This is the upper limit for a given number of successes n, and the width of
the confidence interval is given in sigmas. This expression uses a
root-finding algorithm as opposed to an approximation.'''
confidence_level = scipy.stats.norm.cdf(sigma)
upperlim = scipy.stats.chi2.ppf(confidence_level, 2*n+2)/2
return upperlim
def poisson_lower_exact(n, sigma):
'''Return the Poisson lower limit of the confidence interval.
This is the lower limit for a given number of successes n, and the width of
the confidence interval is given in sigmas. This expression uses a
root-finding algorithm as opposed to an approximation.'''
confidence_level = scipy.stats.norm.cdf(sigma)
lowerlim = scipy.stats.chi2.ppf(1-confidence_level, 2*n)/2
return lowerlim
def poisson_lower(n, sigma):
'''Return the Poisson lower limit of the confidence interval.
This is the lower limit for a given number of successes n, and the width of
the confidence interval is given in sigmas. This formula is from Gehrels
(1986) and contains tuned parameters.'''
betas = {1.0: 0.0, 2.0: 0.062, 3.0:0.222}
gammas = {1.0: 0.0, 2.0: -2.19, 3.0: -1.85}
low = n * (1 - 1/9/n - sigma/3/np.sqrt(n) + betas[sigma]*n**gammas[sigma])**3
return low
def binomial_upper(n1, n, sigma=1):
'''The upper limit of the one-sigma binomial probability.
This is the upper limit for a given number of successes n1 out of n trials.
This is a numerically exact solution to the value.'''
if sigma <= 0:
raise ValueError("The probability needs to be positive.")
cl = -scipy.special.erf(-sigma)
ul = np.where(n1 != n, scipy.special.betaincinv(n1+1, n-n1, cl), 1)
return ul
def binomial_lower(n1, n, sigma=1):
'''The lower limit of the one-sigma binomial probability.
This is the lower limit for a given number of successes n1 out of n trials.
This provides a numerically exact solution to the value.'''
ll = 1 - binomial_upper(n-n1, n, sigma=sigma)
return ll
############################################################################
# Numpy help #
###############################################################################
def slicer_vectorized(arr, strindices):
'''Extract the substring at strindices from an array.
Given a string array arr, extract the substring elementwise corresponding
to the indices in strindices.'''
arr = np.array(arr, dtype=np.unicode_)
indexarr = np.array(strindices, dtype=np.int_)
temparr = arr.view('U1').reshape(len(arr), -1)[:,strindices]
return np.fromstring(temparr.tostring(), dtype='U'+str(len(indexarr)))
def check_null(arr, nullvalue):
'''Returns a boolean array indicating which values of arr are nullvalue.
The currently recognized types of nullvalues are floats, NaN, and
np.ma.masked. This function encapsulates using the appropriate methods,
because simply doing arr == nullvalue does not work all of the time,
particularly for NaN values.'''
if np.isnan(nullvalue):
return np.isnan(arr)
elif nullvalue is np.ma.masked:
return np.ma.getmaskarray(arr)
else:
return arr == nullvalue
###############################################################################
# Matplotlib Boundaries #
###############################################################################
def round_bound(lowbounds, upbounds, round_interval):
'''Return a lower and upper bound within the given rounding interval.
Generally the bounds should be the value plus or minus the error.
Round-interval should be the width of the tick marks.'''
minbound, maxbound = np.min(lowbounds), np.max(upbounds)
lowlim = (minbound // round_interval) * round_interval
highlim = ((maxbound // round_interval) + 1) * round_interval
return lowlim, highlim
def adjust_axes(ax, lowx, highx, lowy, highy, xdiff, ydiff):
'''Adjust the given axes to ensure all data fits within them.
Ensure that the given matplotlib axes can accomodate both the new x and y
limits provided in this function, as well as the internal x and y limits.
The tick intervals for x and y should be given in xdiff and ydiff.'''
min_x, max_x = round_bound(lowx, highx, xdiff)
min_y, max_y = round_bound(lowy, highy, ydiff)
prev_xmin, prev_xmax = ax.get_xlim()
prev_ymin, prev_ymax = ax.get_ylim()
min_x = min(min_x, prev_xmin)
max_x = max(max_x, prev_xmax)
min_y = min(min_y, prev_ymin)
max_y = max(max_y, prev_ymax)
ax.set_xlim(min_x, max_x)
ax.set_ylim(min_y, max_y)
| |
# Copyright 2012 VMware, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import eventlet
import netaddr
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging
from oslo_service import loopingcall
from oslo_service import periodic_task
from oslo_utils import excutils
from oslo_utils import timeutils
from neutron.agent.common import utils as common_utils
from neutron.agent.l3 import dvr
from neutron.agent.l3 import dvr_edge_ha_router
from neutron.agent.l3 import dvr_edge_router as dvr_router
from neutron.agent.l3 import dvr_local_router as dvr_local_router
from neutron.agent.l3 import ha
from neutron.agent.l3 import ha_router
from neutron.agent.l3 import legacy_router
from neutron.agent.l3 import namespace_manager
from neutron.agent.l3 import namespaces
from neutron.agent.l3 import router_processing_queue as queue
from neutron.agent.linux import external_process
from neutron.agent.linux import ip_lib
from neutron.agent.linux import pd
from neutron.agent.metadata import driver as metadata_driver
from neutron.agent import rpc as agent_rpc
from neutron.callbacks import events
from neutron.callbacks import registry
from neutron.callbacks import resources
from neutron.common import constants as l3_constants
from neutron.common import exceptions as n_exc
from neutron.common import ipv6_utils
from neutron.common import rpc as n_rpc
from neutron.common import topics
from neutron import context as n_context
from neutron.i18n import _LE, _LI, _LW
from neutron import manager
try:
from neutron_fwaas.services.firewall.agents.l3reference \
import firewall_l3_agent
except Exception:
# TODO(dougw) - REMOVE THIS FROM NEUTRON; during l3_agent refactor only
from neutron.services.firewall.agents.l3reference import firewall_l3_agent
LOG = logging.getLogger(__name__)
# TODO(Carl) Following constants retained to increase SNR during refactoring
NS_PREFIX = namespaces.NS_PREFIX
INTERNAL_DEV_PREFIX = namespaces.INTERNAL_DEV_PREFIX
EXTERNAL_DEV_PREFIX = namespaces.EXTERNAL_DEV_PREFIX
class L3PluginApi(object):
"""Agent side of the l3 agent RPC API.
API version history:
1.0 - Initial version.
1.1 - Floating IP operational status updates
1.2 - DVR support: new L3 plugin methods added.
- get_ports_by_subnet
- get_agent_gateway_port
Needed by the agent when operating in DVR/DVR_SNAT mode
1.3 - Get the list of activated services
1.4 - Added L3 HA update_router_state. This method was reworked in
to update_ha_routers_states
1.5 - Added update_ha_routers_states
1.6 - Added process_prefix_update
1.7 - DVR support: new L3 plugin methods added.
- delete_agent_gateway_port
"""
def __init__(self, topic, host):
self.host = host
target = oslo_messaging.Target(topic=topic, version='1.0')
self.client = n_rpc.get_client(target)
def get_routers(self, context, router_ids=None):
"""Make a remote process call to retrieve the sync data for routers."""
cctxt = self.client.prepare()
return cctxt.call(context, 'sync_routers', host=self.host,
router_ids=router_ids)
def get_external_network_id(self, context):
"""Make a remote process call to retrieve the external network id.
@raise oslo_messaging.RemoteError: with TooManyExternalNetworks as
exc_type if there are more than one
external network
"""
cctxt = self.client.prepare()
return cctxt.call(context, 'get_external_network_id', host=self.host)
def update_floatingip_statuses(self, context, router_id, fip_statuses):
"""Call the plugin update floating IPs's operational status."""
cctxt = self.client.prepare(version='1.1')
return cctxt.call(context, 'update_floatingip_statuses',
router_id=router_id, fip_statuses=fip_statuses)
def get_ports_by_subnet(self, context, subnet_id):
"""Retrieve ports by subnet id."""
cctxt = self.client.prepare(version='1.2')
return cctxt.call(context, 'get_ports_by_subnet', host=self.host,
subnet_id=subnet_id)
def get_agent_gateway_port(self, context, fip_net):
"""Get or create an agent_gateway_port."""
cctxt = self.client.prepare(version='1.2')
return cctxt.call(context, 'get_agent_gateway_port',
network_id=fip_net, host=self.host)
def get_service_plugin_list(self, context):
"""Make a call to get the list of activated services."""
cctxt = self.client.prepare(version='1.3')
return cctxt.call(context, 'get_service_plugin_list')
def update_ha_routers_states(self, context, states):
"""Update HA routers states."""
cctxt = self.client.prepare(version='1.5')
return cctxt.call(context, 'update_ha_routers_states',
host=self.host, states=states)
def process_prefix_update(self, context, prefix_update):
"""Process prefix update whenever prefixes get changed."""
cctxt = self.client.prepare(version='1.6')
return cctxt.call(context, 'process_prefix_update',
subnets=prefix_update)
def delete_agent_gateway_port(self, context, fip_net):
"""Delete Floatingip_agent_gateway_port."""
cctxt = self.client.prepare(version='1.7')
return cctxt.call(context, 'delete_agent_gateway_port',
host=self.host, network_id=fip_net)
class L3NATAgent(firewall_l3_agent.FWaaSL3AgentRpcCallback,
ha.AgentMixin,
dvr.AgentMixin,
manager.Manager):
"""Manager for L3NatAgent
API version history:
1.0 initial Version
1.1 changed the type of the routers parameter
to the routers_updated method.
It was previously a list of routers in dict format.
It is now a list of router IDs only.
Per rpc versioning rules, it is backwards compatible.
1.2 - DVR support: new L3 agent methods added.
- add_arp_entry
- del_arp_entry
1.3 - fipnamespace_delete_on_ext_net - to delete fipnamespace
after the external network is removed
Needed by the L3 service when dealing with DVR
"""
target = oslo_messaging.Target(version='1.3')
def __init__(self, host, conf=None):
if conf:
self.conf = conf
else:
self.conf = cfg.CONF
self.router_info = {}
self._check_config_params()
self.process_monitor = external_process.ProcessMonitor(
config=self.conf,
resource_type='router')
self.driver = common_utils.load_interface_driver(self.conf)
self.context = n_context.get_admin_context_without_session()
self.plugin_rpc = L3PluginApi(topics.L3PLUGIN, host)
self.fullsync = True
# Get the list of service plugins from Neutron Server
# This is the first place where we contact neutron-server on startup
# so retry in case its not ready to respond.
retry_count = 5
while True:
retry_count = retry_count - 1
try:
self.neutron_service_plugins = (
self.plugin_rpc.get_service_plugin_list(self.context))
except oslo_messaging.RemoteError as e:
with excutils.save_and_reraise_exception() as ctx:
ctx.reraise = False
LOG.warning(_LW('l3-agent cannot check service plugins '
'enabled at the neutron server when '
'startup due to RPC error. It happens '
'when the server does not support this '
'RPC API. If the error is '
'UnsupportedVersion you can ignore this '
'warning. Detail message: %s'), e)
self.neutron_service_plugins = None
except oslo_messaging.MessagingTimeout as e:
with excutils.save_and_reraise_exception() as ctx:
if retry_count > 0:
ctx.reraise = False
LOG.warning(_LW('l3-agent cannot check service '
'plugins enabled on the neutron '
'server. Retrying. '
'Detail message: %s'), e)
continue
break
self.metadata_driver = None
if self.conf.enable_metadata_proxy:
self.metadata_driver = metadata_driver.MetadataDriver(self)
self.namespaces_manager = namespace_manager.NamespaceManager(
self.conf,
self.driver,
self.metadata_driver)
self._queue = queue.RouterProcessingQueue()
super(L3NATAgent, self).__init__(conf=self.conf)
self.target_ex_net_id = None
self.use_ipv6 = ipv6_utils.is_enabled()
self.pd = pd.PrefixDelegation(self.context, self.process_monitor,
self.driver,
self.plugin_rpc.process_prefix_update,
self.create_pd_router_update,
self.conf)
def _check_config_params(self):
"""Check items in configuration files.
Check for required and invalid configuration items.
The actual values are not verified for correctness.
"""
if not self.conf.interface_driver:
msg = _LE('An interface driver must be specified')
LOG.error(msg)
raise SystemExit(1)
if self.conf.ipv6_gateway:
# ipv6_gateway configured. Check for valid v6 link-local address.
try:
msg = _LE("%s used in config as ipv6_gateway is not a valid "
"IPv6 link-local address."),
ip_addr = netaddr.IPAddress(self.conf.ipv6_gateway)
if ip_addr.version != 6 or not ip_addr.is_link_local():
LOG.error(msg, self.conf.ipv6_gateway)
raise SystemExit(1)
except netaddr.AddrFormatError:
LOG.error(msg, self.conf.ipv6_gateway)
raise SystemExit(1)
def _fetch_external_net_id(self, force=False):
"""Find UUID of single external network for this agent."""
if self.conf.gateway_external_network_id:
return self.conf.gateway_external_network_id
# L3 agent doesn't use external_network_bridge to handle external
# networks, so bridge_mappings with provider networks will be used
# and the L3 agent is able to handle any external networks.
if not self.conf.external_network_bridge:
return
if not force and self.target_ex_net_id:
return self.target_ex_net_id
try:
self.target_ex_net_id = self.plugin_rpc.get_external_network_id(
self.context)
return self.target_ex_net_id
except oslo_messaging.RemoteError as e:
with excutils.save_and_reraise_exception() as ctx:
if e.exc_type == 'TooManyExternalNetworks':
ctx.reraise = False
msg = _(
"The 'gateway_external_network_id' option must be "
"configured for this agent as Neutron has more than "
"one external network.")
raise Exception(msg)
def _create_router(self, router_id, router):
args = []
kwargs = {
'router_id': router_id,
'router': router,
'use_ipv6': self.use_ipv6,
'agent_conf': self.conf,
'interface_driver': self.driver,
}
if router.get('distributed'):
kwargs['agent'] = self
kwargs['host'] = self.host
if router.get('distributed') and router.get('ha'):
if self.conf.agent_mode == l3_constants.L3_AGENT_MODE_DVR_SNAT:
kwargs['state_change_callback'] = self.enqueue_state_change
return dvr_edge_ha_router.DvrEdgeHaRouter(*args, **kwargs)
if router.get('distributed'):
if self.conf.agent_mode == l3_constants.L3_AGENT_MODE_DVR_SNAT:
return dvr_router.DvrEdgeRouter(*args, **kwargs)
else:
return dvr_local_router.DvrLocalRouter(*args, **kwargs)
if router.get('ha'):
kwargs['state_change_callback'] = self.enqueue_state_change
return ha_router.HaRouter(*args, **kwargs)
return legacy_router.LegacyRouter(*args, **kwargs)
def _router_added(self, router_id, router):
ri = self._create_router(router_id, router)
registry.notify(resources.ROUTER, events.BEFORE_CREATE,
self, router=ri)
self.router_info[router_id] = ri
ri.initialize(self.process_monitor)
# TODO(Carl) This is a hook in to fwaas. It should be cleaned up.
self.process_router_add(ri)
def _safe_router_removed(self, router_id):
"""Try to delete a router and return True if successful."""
try:
self._router_removed(router_id)
except Exception:
LOG.exception(_LE('Error while deleting router %s'), router_id)
return False
else:
return True
def _router_removed(self, router_id):
ri = self.router_info.get(router_id)
if ri is None:
LOG.warn(_LW("Info for router %s was not found. "
"Performing router cleanup"), router_id)
self.namespaces_manager.ensure_router_cleanup(router_id)
return
registry.notify(resources.ROUTER, events.BEFORE_DELETE,
self, router=ri)
ri.delete(self)
del self.router_info[router_id]
registry.notify(resources.ROUTER, events.AFTER_DELETE, self, router=ri)
def router_deleted(self, context, router_id):
"""Deal with router deletion RPC message."""
LOG.debug('Got router deleted notification for %s', router_id)
update = queue.RouterUpdate(router_id,
queue.PRIORITY_RPC,
action=queue.DELETE_ROUTER)
self._queue.add(update)
def routers_updated(self, context, routers):
"""Deal with routers modification and creation RPC message."""
LOG.debug('Got routers updated notification :%s', routers)
if routers:
# This is needed for backward compatibility
if isinstance(routers[0], dict):
routers = [router['id'] for router in routers]
for id in routers:
update = queue.RouterUpdate(id, queue.PRIORITY_RPC)
self._queue.add(update)
def router_removed_from_agent(self, context, payload):
LOG.debug('Got router removed from agent :%r', payload)
router_id = payload['router_id']
update = queue.RouterUpdate(router_id,
queue.PRIORITY_RPC,
action=queue.DELETE_ROUTER)
self._queue.add(update)
def router_added_to_agent(self, context, payload):
LOG.debug('Got router added to agent :%r', payload)
self.routers_updated(context, payload)
def _process_router_if_compatible(self, router):
if (self.conf.external_network_bridge and
not ip_lib.device_exists(self.conf.external_network_bridge)):
LOG.error(_LE("The external network bridge '%s' does not exist"),
self.conf.external_network_bridge)
return
if self.conf.router_id and router['id'] != self.conf.router_id:
raise n_exc.RouterNotCompatibleWithAgent(router_id=router['id'])
# Either ex_net_id or handle_internal_only_routers must be set
ex_net_id = (router['external_gateway_info'] or {}).get('network_id')
if not ex_net_id and not self.conf.handle_internal_only_routers:
raise n_exc.RouterNotCompatibleWithAgent(router_id=router['id'])
# If target_ex_net_id and ex_net_id are set they must be equal
target_ex_net_id = self._fetch_external_net_id()
if (target_ex_net_id and ex_net_id and ex_net_id != target_ex_net_id):
# Double check that our single external_net_id has not changed
# by forcing a check by RPC.
if ex_net_id != self._fetch_external_net_id(force=True):
raise n_exc.RouterNotCompatibleWithAgent(
router_id=router['id'])
if router['id'] not in self.router_info:
self._process_added_router(router)
else:
self._process_updated_router(router)
def _process_added_router(self, router):
self._router_added(router['id'], router)
ri = self.router_info[router['id']]
ri.router = router
ri.process(self)
registry.notify(resources.ROUTER, events.AFTER_CREATE, self, router=ri)
def _process_updated_router(self, router):
ri = self.router_info[router['id']]
ri.router = router
registry.notify(resources.ROUTER, events.BEFORE_UPDATE,
self, router=ri)
ri.process(self)
registry.notify(resources.ROUTER, events.AFTER_UPDATE, self, router=ri)
def _process_router_update(self):
for rp, update in self._queue.each_update_to_next_router():
LOG.debug("Starting router update for %s, action %s, priority %s",
update.id, update.action, update.priority)
if update.action == queue.PD_UPDATE:
self.pd.process_prefix_update()
LOG.debug("Finished a router update for %s", update.id)
continue
router = update.router
if update.action != queue.DELETE_ROUTER and not router:
try:
update.timestamp = timeutils.utcnow()
routers = self.plugin_rpc.get_routers(self.context,
[update.id])
except Exception:
msg = _LE("Failed to fetch router information for '%s'")
LOG.exception(msg, update.id)
self.fullsync = True
continue
if routers:
router = routers[0]
if not router:
removed = self._safe_router_removed(update.id)
if not removed:
# TODO(Carl) Stop this fullsync non-sense. Just retry this
# one router by sticking the update at the end of the queue
# at a lower priority.
self.fullsync = True
else:
# need to update timestamp of removed router in case
# there are older events for the same router in the
# processing queue (like events from fullsync) in order to
# prevent deleted router re-creation
rp.fetched_and_processed(update.timestamp)
LOG.debug("Finished a router update for %s", update.id)
continue
try:
self._process_router_if_compatible(router)
except n_exc.RouterNotCompatibleWithAgent as e:
LOG.exception(e.msg)
# Was the router previously handled by this agent?
if router['id'] in self.router_info:
LOG.error(_LE("Removing incompatible router '%s'"),
router['id'])
self._safe_router_removed(router['id'])
except Exception:
msg = _LE("Failed to process compatible router '%s'")
LOG.exception(msg, update.id)
self.fullsync = True
continue
LOG.debug("Finished a router update for %s", update.id)
rp.fetched_and_processed(update.timestamp)
def _process_routers_loop(self):
LOG.debug("Starting _process_routers_loop")
pool = eventlet.GreenPool(size=8)
while True:
pool.spawn_n(self._process_router_update)
# NOTE(kevinbenton): this is set to 1 second because the actual interval
# is controlled by a FixedIntervalLoopingCall in neutron/service.py that
# is responsible for task execution.
@periodic_task.periodic_task(spacing=1, run_immediately=True)
def periodic_sync_routers_task(self, context):
self.process_services_sync(context)
if not self.fullsync:
return
LOG.debug("Starting fullsync periodic_sync_routers_task")
# self.fullsync is True at this point. If an exception -- caught or
# uncaught -- prevents setting it to False below then the next call
# to periodic_sync_routers_task will re-enter this code and try again.
# Context manager self.namespaces_manager captures a picture of
# namespaces *before* fetch_and_sync_all_routers fetches the full list
# of routers from the database. This is important to correctly
# identify stale ones.
try:
with self.namespaces_manager as ns_manager:
self.fetch_and_sync_all_routers(context, ns_manager)
except n_exc.AbortSyncRouters:
self.fullsync = True
def fetch_and_sync_all_routers(self, context, ns_manager):
prev_router_ids = set(self.router_info)
timestamp = timeutils.utcnow()
try:
if self.conf.router_id:
routers = self.plugin_rpc.get_routers(context,
[self.conf.router_id])
else:
routers = self.plugin_rpc.get_routers(context)
except oslo_messaging.MessagingException:
LOG.exception(_LE("Failed synchronizing routers due to RPC error"))
raise n_exc.AbortSyncRouters()
else:
LOG.debug('Processing :%r', routers)
for r in routers:
ns_manager.keep_router(r['id'])
if r.get('distributed'):
# need to keep fip namespaces as well
ext_net_id = (r['external_gateway_info'] or {}).get(
'network_id')
if ext_net_id:
ns_manager.keep_ext_net(ext_net_id)
update = queue.RouterUpdate(r['id'],
queue.PRIORITY_SYNC_ROUTERS_TASK,
router=r,
timestamp=timestamp)
self._queue.add(update)
self.fullsync = False
LOG.debug("periodic_sync_routers_task successfully completed")
curr_router_ids = set([r['id'] for r in routers])
# Delete routers that have disappeared since the last sync
for router_id in prev_router_ids - curr_router_ids:
ns_manager.keep_router(router_id)
update = queue.RouterUpdate(router_id,
queue.PRIORITY_SYNC_ROUTERS_TASK,
timestamp=timestamp,
action=queue.DELETE_ROUTER)
self._queue.add(update)
def after_start(self):
# Note: the FWaaS' vArmourL3NATAgent is a subclass of L3NATAgent. It
# calls this method here. So Removing this after_start() would break
# vArmourL3NATAgent. We need to find out whether vArmourL3NATAgent
# can have L3NATAgentWithStateReport as its base class instead of
# L3NATAgent.
eventlet.spawn_n(self._process_routers_loop)
LOG.info(_LI("L3 agent started"))
def create_pd_router_update(self):
router_id = None
update = queue.RouterUpdate(router_id,
queue.PRIORITY_PD_UPDATE,
timestamp=timeutils.utcnow(),
action=queue.PD_UPDATE)
self._queue.add(update)
class L3NATAgentWithStateReport(L3NATAgent):
def __init__(self, host, conf=None):
super(L3NATAgentWithStateReport, self).__init__(host=host, conf=conf)
self.state_rpc = agent_rpc.PluginReportStateAPI(topics.REPORTS)
self.agent_state = {
'binary': 'neutron-l3-agent',
'host': host,
'availability_zone': self.conf.AGENT.availability_zone,
'topic': topics.L3_AGENT,
'configurations': {
'agent_mode': self.conf.agent_mode,
'router_id': self.conf.router_id,
'handle_internal_only_routers':
self.conf.handle_internal_only_routers,
'external_network_bridge': self.conf.external_network_bridge,
'gateway_external_network_id':
self.conf.gateway_external_network_id,
'interface_driver': self.conf.interface_driver,
'log_agent_heartbeats': self.conf.AGENT.log_agent_heartbeats},
'start_flag': True,
'agent_type': l3_constants.AGENT_TYPE_L3}
report_interval = self.conf.AGENT.report_interval
if report_interval:
self.heartbeat = loopingcall.FixedIntervalLoopingCall(
self._report_state)
self.heartbeat.start(interval=report_interval)
def _report_state(self):
num_ex_gw_ports = 0
num_interfaces = 0
num_floating_ips = 0
router_infos = self.router_info.values()
num_routers = len(router_infos)
for ri in router_infos:
ex_gw_port = ri.get_ex_gw_port()
if ex_gw_port:
num_ex_gw_ports += 1
num_interfaces += len(ri.router.get(l3_constants.INTERFACE_KEY,
[]))
num_floating_ips += len(ri.router.get(l3_constants.FLOATINGIP_KEY,
[]))
configurations = self.agent_state['configurations']
configurations['routers'] = num_routers
configurations['ex_gw_ports'] = num_ex_gw_ports
configurations['interfaces'] = num_interfaces
configurations['floating_ips'] = num_floating_ips
try:
agent_status = self.state_rpc.report_state(self.context,
self.agent_state,
True)
if agent_status == l3_constants.AGENT_REVIVED:
LOG.info(_LI('Agent has just been revived. '
'Doing a full sync.'))
self.fullsync = True
self.agent_state.pop('start_flag', None)
except AttributeError:
# This means the server does not support report_state
LOG.warn(_LW("Neutron server does not support state report."
" State report for this agent will be disabled."))
self.heartbeat.stop()
return
except Exception:
LOG.exception(_LE("Failed reporting state!"))
def after_start(self):
eventlet.spawn_n(self._process_routers_loop)
LOG.info(_LI("L3 agent started"))
# Do the report state before we do the first full sync.
self._report_state()
self.pd.after_start()
def agent_updated(self, context, payload):
"""Handle the agent_updated notification event."""
self.fullsync = True
LOG.info(_LI("agent_updated by server side %s!"), payload)
| |
"""
Phoenix
LICENSE
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
@copyright: Copyright (c) 2011, w3agency.net
@author: Thomas Stachl <t.stachl@w3agency.net>
@since: Mar 17, 2011
"""
"""----------------------------------------------------------------------------
Imports
----------------------------------------------------------------------------"""
from Phoenix.Conf import Config, logging
from Phoenix.Library import Console, SysUser, File
from Phoenix.Models import Member, Key, Hook
from os import path, mkdir
"""----------------------------------------------------------------------------
Exception
----------------------------------------------------------------------------"""
class Exception(Exception):
pass
class AdminException(Exception):
pass
"""----------------------------------------------------------------------------
Class
----------------------------------------------------------------------------"""
class Admin(Console):
def __init__(self):
if not __import__("os").getuid() == 0:
raise AdminException("Only root can run this script")
super(Admin, self).__init__()
def defineParser(self, parser):
subparsers = parser.add_subparsers(dest='action', title="Phoenix Admin Commands",
description="Valid commands to use with phoenix-admin")
init = subparsers.add_parser("init", help="Initalize and configure Phoenix")
init.add_argument("-e", "--admin-email", help="The email address of the administrator", required=True)
init.add_argument("-n", "--admin-name", help="The name of the administrator")
init.add_argument("-a", "--admin-username", help="The username of the administrator", default="admin")
init.add_argument("-u", "--git-user", help="User which is running the git daemon", default="git")
init.add_argument("-b", "--base-dir", help="base directory for the home directory of the user [/home]", default="/home")
init.add_argument("--repository-dir", help="The path where git repositories are stored [$base-dir/repositories]", default="repositories")
init.add_argument("--tarball-dir", help="The path where generated tarballs are stored [$base-dir/tarballs]", default="tarballs")
init.add_argument("-s", "--sql-connect", help="The connection string for the database to use [sqlite:///$BASE_DIR/phoenix.db]")
init.add_argument("-d", "--admin-repo", action="store_const", const=True, help="If this flag is set, we will create an admin repo to manage everything.")
adduser = subparsers.add_parser("adduser", help="Add a new user to the database")
adduser.add_argument("-u", "--username", help="The new users username", required=True)
adduser.add_argument("-e", "--email", help="The new users email address", required=True)
adduser.add_argument("-n", "--name", help="The new users name")
addrepo = subparsers.add_parser("addrepo", help="Add a repository")
addrepo.add_argument("-n", "--repository-name", help="The name of the new repository", required=True)
addrepo.add_argument("-p", "--repository-path", help="Optional, by default the path is the sanitized name of the repository")
addrepo.add_argument("-u", "--username", help="Username of the owner (use email if not known)")
addrepo.add_argument("-e", "--email", help="Email of the owner (if username not known)")
addkey = subparsers.add_parser("addkey", help="Add a new key to a user or a deploy key to a repository")
addkey.add_argument("-e", "--email", help="The email of the user the key should be added to")
addkey.add_argument("-u", "--username", help="The username of the user the key should be added to")
addhook = subparsers.add_parser("addhook", help="Add a hook to a repository.")
addhook.add_argument("-k", "--hook", help="The hook you want to add", required=True)
addhook.add_argument("-c", "--command", help="The command to execute on this hook", required=True)
addhook.add_argument("-n", "--repository-name", help="The name of the repository for the new hook")
addhook.add_argument("-p", "--repository-path", help="Optional, sometimes the path is shorter than the name")
addhook.add_argument("-e", "--email", help="The email of the owner of the repository")
addhook.add_argument("-u", "--username", help="The username of the owner of the repository")
removeuser = subparsers.add_parser("removeuser", help="Remove a user (deletes all repositories and keys too)")
removeuser.add_argument("-u", "--username", help="The username of the user who shall be removed")
removeuser.add_argument("-e", "--email", help="The email address of the user who shall be removed")
removerepo = subparsers.add_parser("removerepo", help="Remove a repository from the filesystem and database")
removerepo.add_argument("-n", "--repository-name", help="The name of the repository which should be deleted")
removerepo.add_argument("-p", "--repository-path", help="Optional, sometimes the path is shorter than the name")
removerepo.add_argument("-e", "--email", help="The email of the owner of the repository which should be deleted")
removerepo.add_argument("-u", "--username", help="The username of the owner of the repository which should be deleted")
removekey = subparsers.add_parser("removekey", help="Removes a key from the key file and database")
removekey.add_argument("-i", "--key-id", help="The id of the key to remove", required=True)
removehook = subparsers.add_parser("removehook", help="Remove a hook from a repository")
removehook.add_argument("-i", "--hook-id", help="The id of the hook to remove", required=True)
def init(self):
if Config.get("phoenix", "initialized") == "True":
raise AdminException("Already initialized.")
logging.info("Defining variables for init ...")
user = self.args.git_user
base = path.join(self.args.base_dir, user)
repo = path.join(base, self.args.repository_dir)
tar = path.join(base, self.args.tarball_dir)
ssh = path.join(base, ".ssh")
auth_keys = path.join(ssh, "authorized_keys")
admin_repo = self.args.admin_repo
email = self.args.admin_email
name = self.args.admin_name
username = self.args.admin_username
sql = self.args.sql_connect or "sqlite://%s" % path.join(base, "phoenix.db")
logging.info("Checking for permission to write the config file ...")
if not File.writePermission(Config.get("CONF_FILE")):
raise AdminException("You don't have permission to write the config file `%s' ..." % Config.get("CONF_FILE"))
if not SysUser.exists(self.args.git_user):
logging.info("Creating user `%s' ... " % user)
SysUser.create(user, base)
Config.set("phoenix", "user", user)
Config.set("phoenix", "base", base)
else:
raise AdminException("The user `%s' already exists." % user)
logging.info("Saving SQL connection string `%s' ..." % sql)
Config.set("phoenix", "sql_connect", sql)
Config.set("phoenix", "initialized", True)
Config.set("phoenix", "authorized_keys", auth_keys)
__import__("os").setgid(__import__("pwd").getpwnam(user).pw_gid)
__import__("os").setuid(__import__("pwd").getpwnam(user).pw_uid)
logging.info("Checking for permission to write the config file as `%s' ..." % user)
if not File.writePermission(Config.get("CONF_FILE")):
raise AdminException("You don't have permission to write the config file `%s' ..." % Config.get("CONF_FILE"))
from sqlobject import connectionForURI, sqlhub
connection = connectionForURI(Config.get("phoenix", "sql_connect"))
sqlhub.processConnection = connection
self._sqlChanges()
self._createDirectoryStructure(repo, tar, ssh)
logging.info("Creating `%s' ..." % auth_keys)
File.touch(auth_keys)
logging.info("Saving admin user information `%s' and `%s' in database ..." % (name, email))
admin = Member(username=username, email=email, name=name)
if admin_repo:
logging.info("Initializing development repository at `%s/phoenix.git' ..." % repo)
admin.addRepository("Phoenix Server Management", "phoenix.git")
print "Done."
def adduser(self):
logging.info("Defining username, name and email ...")
username = self.args.username
name = self.args.name
email = self.args.email
dummy = self._getMemberByUsernameOrEmail(username, email)
logging.info("Creating and saving the new user ...")
Member(username=username, email=email, name=name)
print "Done."
def addrepo(self):
logging.info("Defining username, email and repository name ...")
username = self.args.username
email = self.args.email
name = self.args.repository_name
path = self.args.repository_path
member = self._getMemberByUsernameOrEmail(username, email, True)
dummy = self._getRepositoryByNameOrPath(member, name, path)
logging.info("Changing to the git user ...")
__import__("os").setgid(__import__("pwd").getpwnam(Config.get("phoenix", "user")).pw_gid)
__import__("os").setuid(__import__("pwd").getpwnam(Config.get("phoenix", "user")).pw_uid)
logging.info("Creating and saving the new repository ...")
member.addRepository(name, path)
print "Done."
def addkey(self):
logging.info("Read the key ...")
key = __import__("sys").stdin.readline().strip()
if key == "":
raise Exception("Key can not be empty.")
logging.info("Define username, email and repository ...")
email = self.args.email
username = self.args.username
member = self._getMemberByUsernameOrEmail(username, email, True)
logging.info("Save new key in database ...")
member.addKey(key)
print "Done."
def addhook(self):
logging.info("Defining username, email, repository name, hook and command ...")
username = self.args.username
email = self.args.email
name = self.args.repository_name
path = self.args.repository_path
hook = self.args.hook
command = self.args.command
member = self._getMemberByUsernameOrEmail(username, email, True)
repo = self._getRepositoryByNameOrPath(member, name, path, True)
logging.info("Save new hook in database ...")
repo.addHook(hook, command)
print "Done."
def removeuser(self):
logging.info("Defining email and username ...")
email = self.args.email
username = self.args.username
member = self._getMemberByUsernameOrEmail(username, email, True)
logging.info("Removing the user from the database ...")
member.destroySelf()
print "Done."
def removerepo(self):
logging.info("Defining repository name, email and username ...")
email = self.args.email
username = self.args.username
name = self.args.repository_name
path = self.args.repository_path
member = self._getMemberByUsernameOrEmail(username, email, True)
repo = self._getRepositoryByNameOrPath(member, name, path, True)
logging.info("Removing the repository ...")
repo.destroySelf()
print "Done."
def removekey(self):
logging.info("Defining key id ...")
id = self.args.key_id
logging.info("Checking if the key exists ...")
key = Key.get(id)
if not key:
raise Exception("The key with the id `%s' does not exits." % id)
logging.info("Removing the key from the database ...")
key.destroySelf()
print "Done."
def removehook(self):
logging.info("Defining hook id ...")
id = self.args.hook_id
logging.info("Checking if hook exists ...")
hook = Hook.get(id)
if not hook:
raise Exception("The hook with the id `%s' does not exist." % id)
logging.info("Removing the hook from the database ...")
hook.destroySelf()
print "Done."
def _getMemberByUsernameOrEmail(self, username, email, must=False):
logging.info("Trying to find the user by username or email ...")
member = None
try:
if username:
member = Member.selectBy(username=username)[0]
if email:
member = Member.selectBy(email=email)[0]
except IndexError:
if must and not member:
raise AdminException("The user can not be found (username: `%s', email: `%s')" % (username, email))
if not must and member:
raise AdminException("The user `%s' with email `%s' already exists." % (member.username, member.email))
return member
def _getRepositoryByNameOrPath(self, member, name, path, must=False):
repo = None
logging.info("Trying to find a repository by name or path ...")
if name:
repo = member.repositoryByName(name)
if path:
repo = member.repositoryByPath(path)
if must and not repo:
raise AdminException("Repository with name `%s' or path `%s' can not be found." % (name, path))
elif not must and repo:
raise AdminException("The repository `%s' already exists." % repo.name)
return repo
def _createDirectoryStructure(self, repo, tar, ssh):
if not path.exists(repo):
logging.info("Creating repository dir at `%s' ..." % repo)
mkdir(repo)
else:
logging.warning("The folder `%s' already exists." % repo)
Config.set("phoenix", "repository_dir", repo)
if not path.exists(tar):
logging.info("Creating tarball dir at `%s' ..." % tar)
mkdir(tar)
else:
logging.warning("The folder `%s' already exists." % tar)
Config.set("phoenix", "tarball_dir", tar)
if not path.exists(ssh):
logging.info("Creating ssh dir at `%s' ..." % ssh)
mkdir(ssh, 0700)
else:
logging.warning("The folder `%s' already exists." % ssh)
Config.set("phoenix", "ssh_dir", ssh)
def _sqlChanges(self):
from Phoenix.Models import Privilege, Repository, Role
Member.createTable(ifNotExists=True)
Role.createTable(ifNotExists=True)
Repository.createTable(ifNotExists=True)
Privilege.createTable(ifNotExists=True)
Hook.createTable(ifNotExists=True)
Key.createTable(ifNotExists=True)
def debug(self):
print Config.get("ABS_PATH")
print Config.get("phoenix", "hook_dir")
| |
#pylint: disable=R0904
# $Filename$
# $Authors$
# Last Changed: $Date$ $Committer$ $Revision-Id$
#
# Copyright (c) 2003-2011, German Aerospace Center (DLR)
#
# All rights reserved.
#Redistribution and use in source and binary forms, with or without
#modification, are permitted provided that the following conditions are
#
#met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of the German Aerospace Center nor the names of
# its contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
#LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
#A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
#OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
#SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
#LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
#DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
#THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
#(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
#OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
Tests the data adapter implementation.
"""
__version__ = "$Revision-Id:$"
from StringIO import StringIO
import unittest
from webdav.Connection import WebdavError
from datafinder.persistence.adapters.webdav_.data.adapter import DataWebdavAdapter
from datafinder.persistence.error import PersistenceError
from datafinder_test.mocks import SimpleMock
__version__ = "$Revision-Id:$"
_VALID_GETCHILDREN_WEBDAV_RESULT = {"/Path": (True, None)}
_VALID_GETCHILDREN_RESULT = ["/Path"]
_PROPERTY_NOT_FOUND_MESSAGE = "Property is missing:"
class DataWebdavAdapterTestCase(unittest.TestCase):
""" Tests the data adapter implementation. """
def setUp(self):
""" Creates default adapter usable by test cases. """
self._defaultAdapter = DataWebdavAdapter("/path/identifier", SimpleMock(), SimpleMock("identifier"), SimpleMock(SimpleMock()))
def testLinkTarget(self):
""" Tests the link target property. """
adapter = DataWebdavAdapter("identifier", SimpleMock(), SimpleMock(), SimpleMock({"/":(False, "/thelinkTargetPath")}))
self.assertEquals(adapter.linkTarget, "/thelinkTargetPath")
self.assertTrue(adapter.isLink)
adapter = DataWebdavAdapter("identifier", SimpleMock(), SimpleMock(), SimpleMock({"/":(False, None)}))
self.assertEquals(adapter.linkTarget, None)
self.assertFalse(adapter.isLink)
def testIsLink(self):
""" Tests the normal behavior of the isLink method. """
adapter = DataWebdavAdapter("identifier", SimpleMock(), SimpleMock(), SimpleMock({"/":(False, "/thelinkTargetPath")}))
self.assertTrue(adapter.isLink)
adapter = DataWebdavAdapter("identifier", SimpleMock(), SimpleMock(), SimpleMock({"/":(False, None)}))
self.assertFalse(adapter.isLink)
def testIsLeaf(self):
""" Tests the normal behavior of the isLeaf method. """
adapter = DataWebdavAdapter("identifier", SimpleMock(), SimpleMock(), SimpleMock({"/":(True, None)}))
self.assertFalse(adapter.isLeaf)
adapter = DataWebdavAdapter("identifier", SimpleMock(), SimpleMock(), SimpleMock({"/":(False, "/thelinkTargetPath")}))
self.assertFalse(adapter.isLeaf)
adapter = DataWebdavAdapter("identifier", SimpleMock(), SimpleMock(), SimpleMock({"/":(False, None)}))
self.assertTrue(adapter.isLeaf)
def testIsCollection(self):
""" Tests the normal behavior of the isCollection method. """
adapter = DataWebdavAdapter("identifier", SimpleMock(), SimpleMock(), SimpleMock({"/":(True, None)}))
self.assertTrue(adapter.isCollection)
self.assertTrue(adapter.canAddChildren)
adapter = DataWebdavAdapter("identifier", SimpleMock(), SimpleMock(), SimpleMock({"/":(False, "/thelinkTargetPath")}))
self.assertFalse(adapter.isCollection)
self.assertFalse(adapter.canAddChildren)
adapter = DataWebdavAdapter("identifier", SimpleMock(), SimpleMock(), SimpleMock({"/":(False, None)}))
self.assertFalse(adapter.isCollection)
self.assertFalse(adapter.canAddChildren)
def testCreateResource(self):
""" Tests the normal behavior of the createResource method. """
self._defaultAdapter.createResource()
adapter = DataWebdavAdapter("identifier", SimpleMock(), SimpleMock(""), SimpleMock(SimpleMock()))
self.assertRaises(PersistenceError, adapter.createResource)
def testCreateLink(self):
""" Tests the normal behavior of the createLink method. """
self._defaultAdapter.createLink(self._defaultAdapter)
def testCreateCollection(self):
""" Tests the normal behavior of the createCollection method. """
self._defaultAdapter.createCollection()
self._defaultAdapter.createCollection(True)
adapter = DataWebdavAdapter("identifier", SimpleMock(), SimpleMock(""), SimpleMock(SimpleMock()))
self.assertRaises(PersistenceError, adapter.createCollection)
def testGetChildren(self):
""" Tests the normal behavior of the getChildren method. """
adapter = DataWebdavAdapter("/identifier", SimpleMock(), SimpleMock("/Path"),
SimpleMock(_VALID_GETCHILDREN_WEBDAV_RESULT))
self.assertEquals(adapter.getChildren(), _VALID_GETCHILDREN_RESULT)
def testWriteData(self):
""" Tests the normal behavior of the writeData method. """
self._defaultAdapter.writeData(StringIO(""))
def testReadData(self):
""" Tests the normal behavior of the readData method. """
adapter = DataWebdavAdapter("identifier", SimpleMock(), SimpleMock(), SimpleMock(SimpleMock(StringIO(""))))
self.assertTrue(isinstance(adapter.readData(), StringIO))
def testDelete(self):
""" Tests the normal behavior of the delete method. """
self._defaultAdapter.delete()
def testMove(self):
""" Tests the normal behavior of the move method. """
destination = DataWebdavAdapter("/anotherIdentifier", SimpleMock(), SimpleMock(), SimpleMock(SimpleMock()))
self._defaultAdapter.move(destination)
def testCopy(self):
""" Tests the normal behavior of the copy method. """
destination = DataWebdavAdapter("/anotherIdentifier", SimpleMock(), SimpleMock(), SimpleMock(SimpleMock()))
self._defaultAdapter.copy(destination)
def testExists(self):
""" Tests the normal behavior of the exists method. """
adapter = DataWebdavAdapter("/anotherIdentifier", SimpleMock(), SimpleMock(), SimpleMock(SimpleMock()))
self.assertTrue(adapter.exists())
adapter = DataWebdavAdapter("/anotherIdentifier", SimpleMock(), SimpleMock(), SimpleMock(error=WebdavError("", 404)))
self.assertFalse(adapter.exists())
adapter = DataWebdavAdapter("/anotherIdentifier", SimpleMock(), SimpleMock(), SimpleMock(error=WebdavError("")))
self.assertRaises(PersistenceError, adapter.exists)
def testErrorHandlingOnLibraryInstanceCreation(self):
""" Tests the error handling when creating concrete library instances. """
adapter = DataWebdavAdapter("/anotherIdentifier", SimpleMock(), SimpleMock("anotherIdentifier"),
SimpleMock(error=PersistenceError("")))
try:
self.assertFalse(adapter.isLink)
self.fail("PersistenceError not raised.")
except PersistenceError:
self.assertTrue(True)
try:
self.assertFalse(adapter.isLeaf)
self.fail("PersistenceError not raised.")
except PersistenceError:
self.assertTrue(True)
try:
self.assertFalse(adapter.isCollection)
self.fail("PersistenceError not raised.")
except PersistenceError:
self.assertTrue(True)
self.assertRaises(PersistenceError, adapter.createLink, self._defaultAdapter)
self.assertRaises(PersistenceError, adapter.createResource)
self.assertRaises(PersistenceError, adapter.createCollection)
self.assertRaises(PersistenceError, adapter.getChildren)
self.assertRaises(PersistenceError, adapter.writeData, StringIO(""))
self.assertRaises(PersistenceError, adapter.readData)
self.assertRaises(PersistenceError, adapter.delete)
self.assertRaises(PersistenceError, adapter.move, self._defaultAdapter)
self.assertRaises(PersistenceError, adapter.copy, self._defaultAdapter)
def testErrorHandlingUsingLibraryInstances(self):
""" Tests the error handling when using concrete library instances. """
connectionHelperMock = SimpleMock(methodNameResultMap={"determineResourceType": (None, WebdavError(""))})
adapter = DataWebdavAdapter("/anotherIdentifier", SimpleMock(), SimpleMock(""), connectionHelperMock)
try:
self.assertFalse(adapter.isLink)
self.fail("PersistenceError not raised.")
except PersistenceError:
self.assertTrue(True)
try:
self.assertFalse(adapter.isLeaf)
self.fail("PersistenceError not raised.")
except PersistenceError:
self.assertTrue(True)
try:
self.assertFalse(adapter.isCollection)
self.fail("PersistenceError not raised.")
except PersistenceError:
self.assertTrue(True)
self.assertRaises(PersistenceError, adapter.getChildren)
connectionHelperMock = SimpleMock(SimpleMock(error=WebdavError("")))
adapter = DataWebdavAdapter("/anotherIdentifier", SimpleMock(), SimpleMock(""), connectionHelperMock)
self.assertRaises(PersistenceError, adapter.createLink, self._defaultAdapter)
self.assertRaises(PersistenceError, adapter.createResource)
self.assertRaises(PersistenceError, adapter.createCollection)
self.assertRaises(PersistenceError, adapter.writeData, StringIO(""))
self.assertRaises(PersistenceError, adapter.readData)
self.assertRaises(PersistenceError, adapter.delete)
self.assertRaises(PersistenceError, adapter.move, self._defaultAdapter)
self.assertRaises(PersistenceError, adapter.copy, self._defaultAdapter)
| |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# Copyright 2017, Center of Speech and Language of Tsinghua University.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for downloading data from WMT, tokenizing, vocabularies."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gzip
import os
import re
import tarfile
from tensorflow.python.platform import gfile
# Special vocabulary symbols - we always put them at the start.
_PAD = b"_PAD"
_GO = b"_GO"
_EOS = b"_EOS"
_UNK = b"_UNK"
_START_VOCAB = [_PAD, _GO, _EOS, _UNK]
PAD_ID = 0
GO_ID = 1
EOS_ID = 2
UNK_ID = 3
# Regular expressions used to tokenize.
_DIGIT_RE = re.compile(br"\d")
def basic_tokenizer(sentence, lower=True):
"""Very basic tokenizer: split the sentence into a list of tokens."""
words = sentence.strip().split()
if lower:
words = [w.lower() for w in words if w]
else:
words = [w for w in words if w]
return words
def create_vocabulary(vocabulary_path, data_path, max_vocabulary_size,
tokenizer=None, normalize_digits=True):
"""Create vocabulary file (if it does not exist yet) from data file.
Data file is assumed to contain one sentence per line. Each sentence is
tokenized and digits are normalized (if normalize_digits is set).
Vocabulary contains the most-frequent tokens up to max_vocabulary_size.
We write it to vocabulary_path in a one-token-per-line format, so that later
token in the first line gets id=0, second line gets id=1, and so on.
Args:
vocabulary_path: path where the vocabulary will be created.
data_path: data file that will be used to create vocabulary.
max_vocabulary_size: limit on the size of the created vocabulary.
tokenizer: a function to use to tokenize each data sentence;
if None, basic_tokenizer will be used.
normalize_digits: Boolean; if true, all digits are replaced by 0s.
"""
if not gfile.Exists(vocabulary_path):
print("Creating vocabulary %s from data %s" % (vocabulary_path, data_path))
vocab = {}
with gfile.GFile(data_path, mode="rb") as f:
counter = 0
for line in f:
counter += 1
if counter % 100000 == 0:
print(" processing line %d" % counter)
tokens = tokenizer(line) if tokenizer else basic_tokenizer(line)
for w in tokens:
word = re.sub(_DIGIT_RE, b"0", w) if normalize_digits else w
if word in vocab:
vocab[word] += 1
else:
vocab[word] = 1
vocab_list = _START_VOCAB + sorted(vocab, key=vocab.get, reverse=True)
if len(vocab_list) > max_vocabulary_size:
vocab_list = vocab_list[:max_vocabulary_size]
with gfile.GFile(vocabulary_path, mode="wb") as vocab_file:
for w in vocab_list:
vocab_file.write(w + b"\n")
def initialize_vocabulary(vocabulary_path):
"""Initialize vocabulary from file.
Args:
vocabulary_path: path to the file containing the vocabulary.
Returns:
a pair: the vocabulary (a dictionary mapping string to integers), and
the reversed vocabulary (a list, which reverses the vocabulary mapping).
Raises:
ValueError: if the provided vocabulary_path does not exist.
"""
if gfile.Exists(vocabulary_path):
rev_vocab = []
with gfile.GFile(vocabulary_path, mode="rb") as f:
rev_vocab.extend(f.readlines())
rev_vocab = [line.strip() for line in rev_vocab]
vocab = dict([(x, y) for (y, x) in enumerate(rev_vocab)])
return vocab, rev_vocab
else:
raise ValueError("Vocabulary file %s not found.", vocabulary_path)
def sentence_to_token_ids(sentence, vocabulary,
tokenizer=None, normalize_digits=True):
"""Convert a string to list of integers representing token-ids.
For example, a sentence "I have a dog" may become tokenized into
["I", "have", "a", "dog"] and with vocabulary {"I": 1, "have": 2,
"a": 4, "dog": 7"} this function will return [1, 2, 4, 7].
Args:
sentence: the sentence in bytes format to convert to token-ids.
vocabulary: a dictionary mapping tokens to integers.
tokenizer: a function to use to tokenize each sentence;
if None, basic_tokenizer will be used.
normalize_digits: Boolean; if true, all digits are replaced by 0s.
Returns:
a list of integers, the token-ids for the sentence.
"""
if tokenizer:
words = tokenizer(sentence)
else:
words = basic_tokenizer(sentence)
if not normalize_digits:
return [vocabulary.get(w, UNK_ID) for w in words]
# Normalize digits by 0 before looking words up in the vocabulary.
return [vocabulary.get(re.sub(_DIGIT_RE, b"0", w), UNK_ID) for w in words]
def data_to_token_ids(data_path, target_path, vocabulary_path,
tokenizer=None, normalize_digits=True):
"""Tokenize data file and turn into token-ids using given vocabulary file.
This function loads data line-by-line from data_path, calls the above
sentence_to_token_ids, and saves the result to target_path. See comment
for sentence_to_token_ids on the details of token-ids format.
Args:
data_path: path to the data file in one-sentence-per-line format.
target_path: path where the file with token-ids will be created.
vocabulary_path: path to the vocabulary file.
tokenizer: a function to use to tokenize each sentence;
if None, basic_tokenizer will be used.
normalize_digits: Boolean; if true, all digits are replaced by 0s.
"""
if not gfile.Exists(target_path):
print("Tokenizing data in %s" % data_path)
vocab, _ = initialize_vocabulary(vocabulary_path)
with gfile.GFile(data_path, mode="rb") as data_file:
with gfile.GFile(target_path, mode="w") as tokens_file:
counter = 0
for line in data_file:
counter += 1
if counter % 100000 == 0:
print(" tokenizing line %d" % counter)
token_ids = sentence_to_token_ids(line, vocab, tokenizer,
normalize_digits)
tokens_file.write(" ".join([str(tok) for tok in token_ids]) + "\n")
def prepare_data(data_dir, src_vocabulary_size, trg_vocabulary_size, tokenizer=None):
"""prepare data from data_dir, create vocabularies and tokenize data.
Args:
data_dir: directory in which the data sets will be stored.
src_vocabulary_size: size of the source vocabulary to create and use.
trg_vocabulary_size: size of the target vocabulary to create and use.
tokenizer: a function to use to tokenize each data sentence;
if None, basic_tokenizer will be used.
Returns:
A tuple of 6 elements:
(1) path to the token-ids for source training data-set,
(2) path to the token-ids for target training data-set,
(3) path to the token-ids for source development data-set,
(4) path to the token-ids for target development data-set,
(5) path to the source vocabulary file,
(6) path to the target vocabulary file.
"""
# Get data from the specified directory.
train_path = os.path.join(data_dir, "train")
dev_path = os.path.join(data_dir, "dev")
# Create vocabularies of the appropriate sizes.
trg_vocab_path = os.path.join(data_dir, "vocab%d.trg" % trg_vocabulary_size)
src_vocab_path = os.path.join(data_dir, "vocab%d.src" % src_vocabulary_size)
create_vocabulary(trg_vocab_path, train_path + ".trg", trg_vocabulary_size, tokenizer)
create_vocabulary(src_vocab_path, train_path + ".src", src_vocabulary_size, tokenizer)
# Create token ids for the training data.
trg_train_ids_path = train_path + (".ids%d.trg" % trg_vocabulary_size)
src_train_ids_path = train_path + (".ids%d.src" % src_vocabulary_size)
data_to_token_ids(train_path + ".trg", trg_train_ids_path, trg_vocab_path, tokenizer)
data_to_token_ids(train_path + ".src", src_train_ids_path, src_vocab_path, tokenizer)
# Create token ids for the development data.
trg_dev_ids_path = dev_path + (".ids%d.trg" % trg_vocabulary_size)
src_dev_ids_path = dev_path + (".ids%d.src" % src_vocabulary_size)
data_to_token_ids(dev_path + ".trg", trg_dev_ids_path, trg_vocab_path, tokenizer)
data_to_token_ids(dev_path + ".src", src_dev_ids_path, src_vocab_path, tokenizer)
return (src_train_ids_path, trg_train_ids_path,
src_dev_ids_path, trg_dev_ids_path,
src_vocab_path, trg_vocab_path)
if __name__ == '__main__':
prepare_data("data", 30000, 30000)
| |
# -*- coding: utf-8 -*-
# File: config.py
import os
from ..compat import tfv1
from ..callbacks import (
JSONWriter, MergeAllSummaries, MovingAverageSummary, ProgressBar, RunUpdateOps, ScalarPrinter, TFEventWriter)
from ..dataflow.base import DataFlow
from ..input_source import InputSource
from ..tfutils.sesscreate import NewSessionCreator
from ..tfutils.sessinit import SaverRestore, SessionInit
from ..utils import logger
from .model_desc import ModelDescBase
__all__ = ['TrainConfig', 'AutoResumeTrainConfig', 'DEFAULT_CALLBACKS', 'DEFAULT_MONITORS']
def DEFAULT_CALLBACKS():
"""
Return the default callbacks,
which will be used in :class:`TrainConfig` and :meth:`Trainer.train_with_defaults`.
They are:
1. MovingAverageSummary()
2. ProgressBar()
3. MergeAllSummaries()
4. RunUpdateOps()
"""
return [
MovingAverageSummary(),
ProgressBar(),
MergeAllSummaries(),
RunUpdateOps()]
def DEFAULT_MONITORS():
"""
Return the default monitors,
which will be used in :class:`TrainConfig` and :meth:`Trainer.train_with_defaults`.
They are:
1. TFEventWriter()
2. JSONWriter()
3. ScalarPrinter()
"""
return [TFEventWriter(), JSONWriter(), ScalarPrinter()]
class TrainConfig(object):
"""
A collection of options to be used for single-cost trainers.
Note that you do not have to use :class:`TrainConfig`.
You can use the API of :class:`Trainer` directly, to have more fine-grained control of the training.
"""
def __init__(self,
dataflow=None, data=None,
model=None,
callbacks=None, extra_callbacks=None, monitors=None,
session_creator=None, session_config=None, session_init=None,
starting_epoch=1, steps_per_epoch=None, max_epoch=99999):
"""
Args:
dataflow (DataFlow):
data (InputSource):
model (ModelDesc):
callbacks (list[Callback]): a list of :class:`Callback` to use during training.
extra_callbacks (list[Callback]): This argument
is only used to provide the defaults in addition to ``callbacks``.
The list of callbacks that will be used in the end is simply ``callbacks + extra_callbacks``.
It is usually left as None, and the default value for this argument is :func:`DEFAULT_CALLBACKS()`.
You can override it when you don't like any of the default callbacks.
For example, if you'd like to let the progress bar print tensors, you can use
.. code-block:: none
extra_callbacks=[ProgressBar(names=['name']),
MovingAverageSummary(),
MergeAllSummaries(),
RunUpdateOps()]
monitors (list[MonitorBase]): Defaults to :func:`DEFAULT_MONITORS()`.
session_creator (tf.train.SessionCreator): Defaults to :class:`sesscreate.NewSessionCreator()`
with the config returned by :func:`tfutils.get_default_sess_config()`.
session_config (tf.ConfigProto): when session_creator is None, use this to create the session.
session_init (SessionInit): how to initialize variables of a session. Defaults to do nothing.
starting_epoch (int): The index of the first epoch.
steps_per_epoch (int): the number of steps (defined by :meth:`Trainer.run_step`) to run in each epoch.
Defaults to the input data size. You may want to divide it by the #GPUs in multi-GPU training.
Number of steps per epoch only affects the schedule of callbacks.
It does not affect the sequence of input data seen by the model.
max_epoch (int): maximum number of epoch to run training.
"""
# TODO type checker decorator
def assert_type(v, tp, name):
assert isinstance(v, tp), \
"{} has to be type '{}', but an object of type '{}' found.".format(
name, tp.__name__, v.__class__.__name__)
# process data & model
assert data is None or dataflow is None, "dataflow and data cannot be both presented in TrainConfig!"
if dataflow is not None:
assert_type(dataflow, DataFlow, 'dataflow')
if data is not None:
assert_type(data, InputSource, 'data')
self.dataflow = dataflow
self.data = data
if model is not None:
assert_type(model, ModelDescBase, 'model')
self.model = model
if callbacks is not None:
assert_type(callbacks, list, 'callbacks')
self.callbacks = callbacks
if extra_callbacks is not None:
assert_type(extra_callbacks, list, 'extra_callbacks')
self.extra_callbacks = extra_callbacks
if monitors is not None:
assert_type(monitors, list, 'monitors')
self.monitors = monitors
if session_init is not None:
assert_type(session_init, SessionInit, 'session_init')
self.session_init = session_init
if session_creator is None:
if session_config is not None:
self.session_creator = NewSessionCreator(config=session_config)
else:
self.session_creator = NewSessionCreator(config=None)
else:
self.session_creator = session_creator
assert session_config is None, "Cannot set both session_creator and session_config!"
if steps_per_epoch is None:
try:
if dataflow is not None:
steps_per_epoch = len(dataflow)
elif data is not None:
steps_per_epoch = data.size()
else:
raise NotImplementedError()
except NotImplementedError:
logger.error("You must set `TrainConfig(steps_per_epoch)` if the size of your input is not available.")
raise
else:
steps_per_epoch = int(steps_per_epoch)
self.steps_per_epoch = steps_per_epoch
self.starting_epoch = int(starting_epoch)
self.max_epoch = int(max_epoch)
class AutoResumeTrainConfig(TrainConfig):
"""
Same as :class:`TrainConfig`, but does the following to automatically
resume from training:
1. If a checkpoint was found in :meth:`logger.get_logger_dir()`, set
`session_init` option to load it.
2. If a JSON history was found in :meth:`logger.get_logger_dir()`, try to
load the epoch number from it and set the `starting_epoch` option to
continue training.
You can choose to let the above two option to either overwrite or
not overwrite user-provided arguments, as explained below.
Note that the functionality requires the logging directory to obtain
necessary information from a previous run.
If you have unconventional setup of logging directory, this class will not
work for you, for example:
1. If you save the checkpoint to a different directory rather than the
logging directory.
2. If in distributed training the directory is not
available to every worker, or the directories are different for different workers.
"""
def __init__(self, always_resume=True, **kwargs):
"""
Args:
always_resume (bool): If False, user-provided arguments
`session_init` and `starting_epoch` will take priority.
Otherwise, resume will take priority.
kwargs: same as in :class:`TrainConfig`.
Note:
The main goal of this class is to let a training job resume
without changing any line of code or command line arguments.
So it's useful to let resume take priority over user-provided arguments sometimes.
For example: if your training starts from a pre-trained model,
you would want it to use user-provided model loader at the
beginning, but a "resume" model loader when the job was
interrupted and restarted.
"""
found_sessinit = False
if always_resume or 'session_init' not in kwargs:
sessinit = self.get_sessinit_resume()
if sessinit is not None:
found_sessinit = True
path = sessinit.path
if 'session_init' in kwargs:
logger.info("Found checkpoint at {}. "
"session_init arguments will be overwritten.".format(path))
else:
logger.info("Will load checkpoint at {}.".format(path))
kwargs['session_init'] = sessinit
found_last_epoch = False
if always_resume or 'starting_epoch' not in kwargs:
last_epoch = JSONWriter.load_existing_epoch_number()
if last_epoch is not None:
found_last_epoch = True
now_epoch = last_epoch + 1
logger.info("Found history statistics from JSON. "
"Setting starting_epoch to {}.".format(now_epoch))
kwargs['starting_epoch'] = now_epoch
assert found_sessinit == found_last_epoch, \
"Found SessionInit={}, Found Last Epoch={}".format(found_sessinit, found_last_epoch)
super(AutoResumeTrainConfig, self).__init__(**kwargs)
@staticmethod
def get_sessinit_resume(dir=None):
if dir is None:
dir = logger.get_logger_dir()
if not dir:
return None
path = os.path.join(dir, 'checkpoint')
if not tfv1.gfile.Exists(path):
return None
return SaverRestore(path)
| |
# Copyright (c) 2013 NTT DOCOMO, INC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from ironicclient import exc as ironic_exc
import mock
import six
from webob import exc
from nova.api.openstack.compute import baremetal_nodes \
as b_nodes_v21
from nova.api.openstack.compute.legacy_v2.contrib import baremetal_nodes \
as b_nodes_v2
from nova.api.openstack import extensions
from nova import context
from nova import test
from nova.tests.unit.virt.ironic import utils as ironic_utils
class FakeRequest(object):
def __init__(self, context):
self.environ = {"nova.context": context}
def fake_node(**updates):
node = {
'id': 1,
'service_host': "host",
'cpus': 8,
'memory_mb': 8192,
'local_gb': 128,
'pm_address': "10.1.2.3",
'pm_user': "pm_user",
'pm_password': "pm_pass",
'terminal_port': 8000,
'interfaces': [],
'instance_uuid': 'fake-instance-uuid',
}
if updates:
node.update(updates)
return node
def fake_node_ext_status(**updates):
node = fake_node(uuid='fake-uuid',
task_state='fake-task-state',
updated_at='fake-updated-at',
pxe_config_path='fake-pxe-config-path')
if updates:
node.update(updates)
return node
FAKE_IRONIC_CLIENT = ironic_utils.FakeClient()
@mock.patch.object(b_nodes_v21, '_get_ironic_client',
lambda *_: FAKE_IRONIC_CLIENT)
class BareMetalNodesTestV21(test.NoDBTestCase):
mod = b_nodes_v21
def setUp(self):
super(BareMetalNodesTestV21, self).setUp()
self._setup()
self.context = context.get_admin_context()
self.request = FakeRequest(self.context)
def _setup(self):
self.controller = b_nodes_v21.BareMetalNodeController()
@mock.patch.object(FAKE_IRONIC_CLIENT.node, 'list')
def test_index_ironic(self, mock_list):
properties = {'cpus': 2, 'memory_mb': 1024, 'local_gb': 20}
node = ironic_utils.get_test_node(properties=properties)
mock_list.return_value = [node]
res_dict = self.controller.index(self.request)
expected_output = {'nodes':
[{'memory_mb': properties['memory_mb'],
'host': 'IRONIC MANAGED',
'disk_gb': properties['local_gb'],
'interfaces': [],
'task_state': None,
'id': node.uuid,
'cpus': properties['cpus']}]}
self.assertEqual(expected_output, res_dict)
mock_list.assert_called_once_with(detail=True)
@mock.patch.object(FAKE_IRONIC_CLIENT.node, 'list')
def test_index_ironic_missing_properties(self, mock_list):
properties = {'cpus': 2}
node = ironic_utils.get_test_node(properties=properties)
mock_list.return_value = [node]
res_dict = self.controller.index(self.request)
expected_output = {'nodes':
[{'memory_mb': 0,
'host': 'IRONIC MANAGED',
'disk_gb': 0,
'interfaces': [],
'task_state': None,
'id': node.uuid,
'cpus': properties['cpus']}]}
self.assertEqual(expected_output, res_dict)
mock_list.assert_called_once_with(detail=True)
def test_index_ironic_not_implemented(self):
with mock.patch.object(self.mod, 'ironic_client', None):
self.assertRaises(exc.HTTPNotImplemented,
self.controller.index,
self.request)
@mock.patch.object(FAKE_IRONIC_CLIENT.node, 'list_ports')
@mock.patch.object(FAKE_IRONIC_CLIENT.node, 'get')
def test_show_ironic(self, mock_get, mock_list_ports):
properties = {'cpus': 1, 'memory_mb': 512, 'local_gb': 10}
node = ironic_utils.get_test_node(properties=properties)
port = ironic_utils.get_test_port()
mock_get.return_value = node
mock_list_ports.return_value = [port]
res_dict = self.controller.show(self.request, node.uuid)
expected_output = {'node':
{'memory_mb': properties['memory_mb'],
'instance_uuid': None,
'host': 'IRONIC MANAGED',
'disk_gb': properties['local_gb'],
'interfaces': [{'address': port.address}],
'task_state': None,
'id': node.uuid,
'cpus': properties['cpus']}}
self.assertEqual(expected_output, res_dict)
mock_get.assert_called_once_with(node.uuid)
mock_list_ports.assert_called_once_with(node.uuid)
@mock.patch.object(FAKE_IRONIC_CLIENT.node, 'list_ports')
@mock.patch.object(FAKE_IRONIC_CLIENT.node, 'get')
def test_show_ironic_no_properties(self, mock_get, mock_list_ports):
properties = {}
node = ironic_utils.get_test_node(properties=properties)
port = ironic_utils.get_test_port()
mock_get.return_value = node
mock_list_ports.return_value = [port]
res_dict = self.controller.show(self.request, node.uuid)
expected_output = {'node':
{'memory_mb': 0,
'instance_uuid': None,
'host': 'IRONIC MANAGED',
'disk_gb': 0,
'interfaces': [{'address': port.address}],
'task_state': None,
'id': node.uuid,
'cpus': 0}}
self.assertEqual(expected_output, res_dict)
mock_get.assert_called_once_with(node.uuid)
mock_list_ports.assert_called_once_with(node.uuid)
@mock.patch.object(FAKE_IRONIC_CLIENT.node, 'list_ports')
@mock.patch.object(FAKE_IRONIC_CLIENT.node, 'get')
def test_show_ironic_no_interfaces(self, mock_get, mock_list_ports):
properties = {'cpus': 1, 'memory_mb': 512, 'local_gb': 10}
node = ironic_utils.get_test_node(properties=properties)
mock_get.return_value = node
mock_list_ports.return_value = []
res_dict = self.controller.show(self.request, node.uuid)
self.assertEqual([], res_dict['node']['interfaces'])
mock_get.assert_called_once_with(node.uuid)
mock_list_ports.assert_called_once_with(node.uuid)
@mock.patch.object(FAKE_IRONIC_CLIENT.node, 'get',
side_effect=ironic_exc.NotFound())
def test_show_ironic_node_not_found(self, mock_get):
error = self.assertRaises(exc.HTTPNotFound, self.controller.show,
self.request, 'fake-uuid')
self.assertIn('fake-uuid', six.text_type(error))
def test_show_ironic_not_implemented(self):
with mock.patch.object(self.mod, 'ironic_client', None):
properties = {'cpus': 1, 'memory_mb': 512, 'local_gb': 10}
node = ironic_utils.get_test_node(properties=properties)
self.assertRaises(exc.HTTPNotImplemented, self.controller.show,
self.request, node.uuid)
def test_create_ironic_not_supported(self):
self.assertRaises(exc.HTTPBadRequest,
self.controller.create,
self.request, {'node': object()})
def test_delete_ironic_not_supported(self):
self.assertRaises(exc.HTTPBadRequest,
self.controller.delete,
self.request, 'fake-id')
def test_add_interface_ironic_not_supported(self):
self.assertRaises(exc.HTTPBadRequest,
self.controller._add_interface,
self.request, 'fake-id', 'fake-body')
def test_remove_interface_ironic_not_supported(self):
self.assertRaises(exc.HTTPBadRequest,
self.controller._remove_interface,
self.request, 'fake-id', 'fake-body')
@mock.patch.object(b_nodes_v2, '_get_ironic_client',
lambda *_: FAKE_IRONIC_CLIENT)
class BareMetalNodesTestV2(BareMetalNodesTestV21):
mod = b_nodes_v2
def _setup(self):
self.ext_mgr = self.mox.CreateMock(extensions.ExtensionManager)
self.controller = b_nodes_v2.BareMetalNodeController(self.ext_mgr)
| |
from __future__ import absolute_import
from __future__ import unicode_literals
import docker
import pytest
from .. import mock
from .. import unittest
from compose.const import IS_WINDOWS_PLATFORM
from compose.const import LABEL_CONFIG_HASH
from compose.const import LABEL_ONE_OFF
from compose.const import LABEL_PROJECT
from compose.const import LABEL_SERVICE
from compose.container import Container
from compose.service import build_volume_binding
from compose.service import ConfigError
from compose.service import ContainerNet
from compose.service import get_container_data_volumes
from compose.service import merge_volume_bindings
from compose.service import NeedsBuildError
from compose.service import Net
from compose.service import NoSuchImageError
from compose.service import parse_repository_tag
from compose.service import parse_volume_spec
from compose.service import Service
from compose.service import ServiceNet
from compose.service import VolumeFromSpec
class ServiceTest(unittest.TestCase):
def setUp(self):
self.mock_client = mock.create_autospec(docker.Client)
def test_project_validation(self):
self.assertRaises(ConfigError, lambda: Service(name='foo', project='>', image='foo'))
Service(name='foo', project='bar.bar__', image='foo')
def test_containers(self):
service = Service('db', self.mock_client, 'myproject', image='foo')
self.mock_client.containers.return_value = []
self.assertEqual(list(service.containers()), [])
def test_containers_with_containers(self):
self.mock_client.containers.return_value = [
dict(Name=str(i), Image='foo', Id=i) for i in range(3)
]
service = Service('db', self.mock_client, 'myproject', image='foo')
self.assertEqual([c.id for c in service.containers()], list(range(3)))
expected_labels = [
'{0}=myproject'.format(LABEL_PROJECT),
'{0}=db'.format(LABEL_SERVICE),
'{0}=False'.format(LABEL_ONE_OFF),
]
self.mock_client.containers.assert_called_once_with(
all=False,
filters={'label': expected_labels})
def test_container_without_name(self):
self.mock_client.containers.return_value = [
{'Image': 'foo', 'Id': '1', 'Name': '1'},
{'Image': 'foo', 'Id': '2', 'Name': None},
{'Image': 'foo', 'Id': '3'},
]
service = Service('db', self.mock_client, 'myproject', image='foo')
self.assertEqual([c.id for c in service.containers()], ['1'])
self.assertEqual(service._next_container_number(), 2)
self.assertEqual(service.get_container(1).id, '1')
def test_get_volumes_from_container(self):
container_id = 'aabbccddee'
service = Service(
'test',
image='foo',
volumes_from=[VolumeFromSpec(mock.Mock(id=container_id, spec=Container), 'rw')])
self.assertEqual(service._get_volumes_from(), [container_id + ':rw'])
def test_get_volumes_from_container_read_only(self):
container_id = 'aabbccddee'
service = Service(
'test',
image='foo',
volumes_from=[VolumeFromSpec(mock.Mock(id=container_id, spec=Container), 'ro')])
self.assertEqual(service._get_volumes_from(), [container_id + ':ro'])
def test_get_volumes_from_service_container_exists(self):
container_ids = ['aabbccddee', '12345']
from_service = mock.create_autospec(Service)
from_service.containers.return_value = [
mock.Mock(id=container_id, spec=Container)
for container_id in container_ids
]
service = Service('test', volumes_from=[VolumeFromSpec(from_service, 'rw')], image='foo')
self.assertEqual(service._get_volumes_from(), [container_ids[0] + ":rw"])
def test_get_volumes_from_service_container_exists_with_flags(self):
for mode in ['ro', 'rw', 'z', 'rw,z', 'z,rw']:
container_ids = ['aabbccddee:' + mode, '12345:' + mode]
from_service = mock.create_autospec(Service)
from_service.containers.return_value = [
mock.Mock(id=container_id.split(':')[0], spec=Container)
for container_id in container_ids
]
service = Service('test', volumes_from=[VolumeFromSpec(from_service, mode)], image='foo')
self.assertEqual(service._get_volumes_from(), [container_ids[0]])
def test_get_volumes_from_service_no_container(self):
container_id = 'abababab'
from_service = mock.create_autospec(Service)
from_service.containers.return_value = []
from_service.create_container.return_value = mock.Mock(
id=container_id,
spec=Container)
service = Service('test', image='foo', volumes_from=[VolumeFromSpec(from_service, 'rw')])
self.assertEqual(service._get_volumes_from(), [container_id + ':rw'])
from_service.create_container.assert_called_once_with()
def test_split_domainname_none(self):
service = Service('foo', image='foo', hostname='name', client=self.mock_client)
opts = service._get_container_create_options({'image': 'foo'}, 1)
self.assertEqual(opts['hostname'], 'name', 'hostname')
self.assertFalse('domainname' in opts, 'domainname')
def test_memory_swap_limit(self):
self.mock_client.create_host_config.return_value = {}
service = Service(name='foo', image='foo', hostname='name', client=self.mock_client, mem_limit=1000000000, memswap_limit=2000000000)
service._get_container_create_options({'some': 'overrides'}, 1)
self.assertTrue(self.mock_client.create_host_config.called)
self.assertEqual(
self.mock_client.create_host_config.call_args[1]['mem_limit'],
1000000000
)
self.assertEqual(
self.mock_client.create_host_config.call_args[1]['memswap_limit'],
2000000000
)
def test_cgroup_parent(self):
self.mock_client.create_host_config.return_value = {}
service = Service(name='foo', image='foo', hostname='name', client=self.mock_client, cgroup_parent='test')
service._get_container_create_options({'some': 'overrides'}, 1)
self.assertTrue(self.mock_client.create_host_config.called)
self.assertEqual(
self.mock_client.create_host_config.call_args[1]['cgroup_parent'],
'test'
)
def test_log_opt(self):
self.mock_client.create_host_config.return_value = {}
log_opt = {'syslog-address': 'tcp://192.168.0.42:123'}
service = Service(name='foo', image='foo', hostname='name', client=self.mock_client, log_driver='syslog', log_opt=log_opt)
service._get_container_create_options({'some': 'overrides'}, 1)
self.assertTrue(self.mock_client.create_host_config.called)
self.assertEqual(
self.mock_client.create_host_config.call_args[1]['log_config'],
{'Type': 'syslog', 'Config': {'syslog-address': 'tcp://192.168.0.42:123'}}
)
def test_split_domainname_fqdn(self):
service = Service(
'foo',
hostname='name.domain.tld',
image='foo',
client=self.mock_client)
opts = service._get_container_create_options({'image': 'foo'}, 1)
self.assertEqual(opts['hostname'], 'name', 'hostname')
self.assertEqual(opts['domainname'], 'domain.tld', 'domainname')
def test_split_domainname_both(self):
service = Service(
'foo',
hostname='name',
image='foo',
domainname='domain.tld',
client=self.mock_client)
opts = service._get_container_create_options({'image': 'foo'}, 1)
self.assertEqual(opts['hostname'], 'name', 'hostname')
self.assertEqual(opts['domainname'], 'domain.tld', 'domainname')
def test_split_domainname_weird(self):
service = Service(
'foo',
hostname='name.sub',
domainname='domain.tld',
image='foo',
client=self.mock_client)
opts = service._get_container_create_options({'image': 'foo'}, 1)
self.assertEqual(opts['hostname'], 'name.sub', 'hostname')
self.assertEqual(opts['domainname'], 'domain.tld', 'domainname')
def test_no_default_hostname_when_not_using_networking(self):
service = Service(
'foo',
image='foo',
use_networking=False,
client=self.mock_client,
)
opts = service._get_container_create_options({'image': 'foo'}, 1)
self.assertIsNone(opts.get('hostname'))
def test_hostname_defaults_to_service_name_when_using_networking(self):
service = Service(
'foo',
image='foo',
use_networking=True,
client=self.mock_client,
)
opts = service._get_container_create_options({'image': 'foo'}, 1)
self.assertEqual(opts['hostname'], 'foo')
def test_get_container_create_options_with_name_option(self):
service = Service(
'foo',
image='foo',
client=self.mock_client,
container_name='foo1')
name = 'the_new_name'
opts = service._get_container_create_options(
{'name': name},
1,
one_off=True)
self.assertEqual(opts['name'], name)
def test_get_container_create_options_does_not_mutate_options(self):
labels = {'thing': 'real'}
environment = {'also': 'real'}
service = Service(
'foo',
image='foo',
labels=dict(labels),
client=self.mock_client,
environment=dict(environment),
)
self.mock_client.inspect_image.return_value = {'Id': 'abcd'}
prev_container = mock.Mock(
id='ababab',
image_config={'ContainerConfig': {}})
opts = service._get_container_create_options(
{},
1,
previous_container=prev_container)
self.assertEqual(service.options['labels'], labels)
self.assertEqual(service.options['environment'], environment)
self.assertEqual(
opts['labels'][LABEL_CONFIG_HASH],
'3c85881a8903b9d73a06c41860c8be08acce1494ab4cf8408375966dccd714de')
self.assertEqual(
opts['environment'],
{
'affinity:container': '=ababab',
'also': 'real',
}
)
def test_get_container_not_found(self):
self.mock_client.containers.return_value = []
service = Service('foo', client=self.mock_client, image='foo')
self.assertRaises(ValueError, service.get_container)
@mock.patch('compose.service.Container', autospec=True)
def test_get_container(self, mock_container_class):
container_dict = dict(Name='default_foo_2')
self.mock_client.containers.return_value = [container_dict]
service = Service('foo', image='foo', client=self.mock_client)
container = service.get_container(number=2)
self.assertEqual(container, mock_container_class.from_ps.return_value)
mock_container_class.from_ps.assert_called_once_with(
self.mock_client, container_dict)
@mock.patch('compose.service.log', autospec=True)
def test_pull_image(self, mock_log):
service = Service('foo', client=self.mock_client, image='someimage:sometag')
service.pull()
self.mock_client.pull.assert_called_once_with(
'someimage',
tag='sometag',
stream=True)
mock_log.info.assert_called_once_with('Pulling foo (someimage:sometag)...')
def test_pull_image_no_tag(self):
service = Service('foo', client=self.mock_client, image='ababab')
service.pull()
self.mock_client.pull.assert_called_once_with(
'ababab',
tag='latest',
stream=True)
@mock.patch('compose.service.log', autospec=True)
def test_pull_image_digest(self, mock_log):
service = Service('foo', client=self.mock_client, image='someimage@sha256:1234')
service.pull()
self.mock_client.pull.assert_called_once_with(
'someimage',
tag='sha256:1234',
stream=True)
mock_log.info.assert_called_once_with('Pulling foo (someimage@sha256:1234)...')
@mock.patch('compose.service.Container', autospec=True)
def test_recreate_container(self, _):
mock_container = mock.create_autospec(Container)
service = Service('foo', client=self.mock_client, image='someimage')
service.image = lambda: {'Id': 'abc123'}
new_container = service.recreate_container(mock_container)
mock_container.stop.assert_called_once_with(timeout=10)
self.mock_client.rename.assert_called_once_with(
mock_container.id,
'%s_%s' % (mock_container.short_id, mock_container.name))
new_container.start.assert_called_once_with()
mock_container.remove.assert_called_once_with()
@mock.patch('compose.service.Container', autospec=True)
def test_recreate_container_with_timeout(self, _):
mock_container = mock.create_autospec(Container)
self.mock_client.inspect_image.return_value = {'Id': 'abc123'}
service = Service('foo', client=self.mock_client, image='someimage')
service.recreate_container(mock_container, timeout=1)
mock_container.stop.assert_called_once_with(timeout=1)
def test_parse_repository_tag(self):
self.assertEqual(parse_repository_tag("root"), ("root", "", ":"))
self.assertEqual(parse_repository_tag("root:tag"), ("root", "tag", ":"))
self.assertEqual(parse_repository_tag("user/repo"), ("user/repo", "", ":"))
self.assertEqual(parse_repository_tag("user/repo:tag"), ("user/repo", "tag", ":"))
self.assertEqual(parse_repository_tag("url:5000/repo"), ("url:5000/repo", "", ":"))
self.assertEqual(parse_repository_tag("url:5000/repo:tag"), ("url:5000/repo", "tag", ":"))
self.assertEqual(parse_repository_tag("root@sha256:digest"), ("root", "sha256:digest", "@"))
self.assertEqual(parse_repository_tag("user/repo@sha256:digest"), ("user/repo", "sha256:digest", "@"))
self.assertEqual(parse_repository_tag("url:5000/repo@sha256:digest"), ("url:5000/repo", "sha256:digest", "@"))
@mock.patch('compose.service.Container', autospec=True)
def test_create_container_latest_is_used_when_no_tag_specified(self, mock_container):
service = Service('foo', client=self.mock_client, image='someimage')
images = []
def pull(repo, tag=None, **kwargs):
self.assertEqual('someimage', repo)
self.assertEqual('latest', tag)
images.append({'Id': 'abc123'})
return []
service.image = lambda *args, **kwargs: mock_get_image(images)
self.mock_client.pull = pull
service.create_container()
self.assertEqual(1, len(images))
def test_create_container_with_build(self):
service = Service('foo', client=self.mock_client, build='.')
images = []
service.image = lambda *args, **kwargs: mock_get_image(images)
service.build = lambda: images.append({'Id': 'abc123'})
service.create_container(do_build=True)
self.assertEqual(1, len(images))
def test_create_container_no_build(self):
service = Service('foo', client=self.mock_client, build='.')
service.image = lambda: {'Id': 'abc123'}
service.create_container(do_build=False)
self.assertFalse(self.mock_client.build.called)
def test_create_container_no_build_but_needs_build(self):
service = Service('foo', client=self.mock_client, build='.')
service.image = lambda *args, **kwargs: mock_get_image([])
with self.assertRaises(NeedsBuildError):
service.create_container(do_build=False)
def test_build_does_not_pull(self):
self.mock_client.build.return_value = [
b'{"stream": "Successfully built 12345"}',
]
service = Service('foo', client=self.mock_client, build='.')
service.build()
self.assertEqual(self.mock_client.build.call_count, 1)
self.assertFalse(self.mock_client.build.call_args[1]['pull'])
def test_config_dict(self):
self.mock_client.inspect_image.return_value = {'Id': 'abcd'}
service = Service(
'foo',
image='example.com/foo',
client=self.mock_client,
net=ServiceNet(Service('other')),
links=[(Service('one'), 'one')],
volumes_from=[VolumeFromSpec(Service('two'), 'rw')])
config_dict = service.config_dict()
expected = {
'image_id': 'abcd',
'options': {'image': 'example.com/foo'},
'links': [('one', 'one')],
'net': 'other',
'volumes_from': ['two'],
}
self.assertEqual(config_dict, expected)
def test_config_dict_with_net_from_container(self):
self.mock_client.inspect_image.return_value = {'Id': 'abcd'}
container = Container(
self.mock_client,
{'Id': 'aaabbb', 'Name': '/foo_1'})
service = Service(
'foo',
image='example.com/foo',
client=self.mock_client,
net=container)
config_dict = service.config_dict()
expected = {
'image_id': 'abcd',
'options': {'image': 'example.com/foo'},
'links': [],
'net': 'aaabbb',
'volumes_from': [],
}
self.assertEqual(config_dict, expected)
class NetTestCase(unittest.TestCase):
def test_net(self):
net = Net('host')
self.assertEqual(net.id, 'host')
self.assertEqual(net.mode, 'host')
self.assertEqual(net.service_name, None)
def test_net_container(self):
container_id = 'abcd'
net = ContainerNet(Container(None, {'Id': container_id}))
self.assertEqual(net.id, container_id)
self.assertEqual(net.mode, 'container:' + container_id)
self.assertEqual(net.service_name, None)
def test_net_service(self):
container_id = 'bbbb'
service_name = 'web'
mock_client = mock.create_autospec(docker.Client)
mock_client.containers.return_value = [
{'Id': container_id, 'Name': container_id, 'Image': 'abcd'},
]
service = Service(name=service_name, client=mock_client)
net = ServiceNet(service)
self.assertEqual(net.id, service_name)
self.assertEqual(net.mode, 'container:' + container_id)
self.assertEqual(net.service_name, service_name)
def test_net_service_no_containers(self):
service_name = 'web'
mock_client = mock.create_autospec(docker.Client)
mock_client.containers.return_value = []
service = Service(name=service_name, client=mock_client)
net = ServiceNet(service)
self.assertEqual(net.id, service_name)
self.assertEqual(net.mode, None)
self.assertEqual(net.service_name, service_name)
def mock_get_image(images):
if images:
return images[0]
else:
raise NoSuchImageError()
class ServiceVolumesTest(unittest.TestCase):
def setUp(self):
self.mock_client = mock.create_autospec(docker.Client)
def test_parse_volume_spec_only_one_path(self):
spec = parse_volume_spec('/the/volume')
self.assertEqual(spec, (None, '/the/volume', 'rw'))
def test_parse_volume_spec_internal_and_external(self):
spec = parse_volume_spec('external:interval')
self.assertEqual(spec, ('external', 'interval', 'rw'))
def test_parse_volume_spec_with_mode(self):
spec = parse_volume_spec('external:interval:ro')
self.assertEqual(spec, ('external', 'interval', 'ro'))
spec = parse_volume_spec('external:interval:z')
self.assertEqual(spec, ('external', 'interval', 'z'))
def test_parse_volume_spec_too_many_parts(self):
with self.assertRaises(ConfigError):
parse_volume_spec('one:two:three:four')
@pytest.mark.xfail((not IS_WINDOWS_PLATFORM), reason='does not have a drive')
def test_parse_volume_windows_absolute_path(self):
windows_absolute_path = "c:\\Users\\me\\Documents\\shiny\\config:\\opt\\shiny\\config:ro"
spec = parse_volume_spec(windows_absolute_path)
self.assertEqual(
spec,
(
"/c/Users/me/Documents/shiny/config",
"/opt/shiny/config",
"ro"
)
)
def test_build_volume_binding(self):
binding = build_volume_binding(parse_volume_spec('/outside:/inside'))
self.assertEqual(binding, ('/inside', '/outside:/inside:rw'))
def test_get_container_data_volumes(self):
options = [
'/host/volume:/host/volume:ro',
'/new/volume',
'/existing/volume',
]
self.mock_client.inspect_image.return_value = {
'ContainerConfig': {
'Volumes': {
'/mnt/image/data': {},
}
}
}
container = Container(self.mock_client, {
'Image': 'ababab',
'Volumes': {
'/host/volume': '/host/volume',
'/existing/volume': '/var/lib/docker/aaaaaaaa',
'/removed/volume': '/var/lib/docker/bbbbbbbb',
'/mnt/image/data': '/var/lib/docker/cccccccc',
},
}, has_been_inspected=True)
expected = {
'/existing/volume': '/var/lib/docker/aaaaaaaa:/existing/volume:rw',
'/mnt/image/data': '/var/lib/docker/cccccccc:/mnt/image/data:rw',
}
binds = get_container_data_volumes(container, options)
self.assertEqual(binds, expected)
def test_merge_volume_bindings(self):
options = [
'/host/volume:/host/volume:ro',
'/host/rw/volume:/host/rw/volume',
'/new/volume',
'/existing/volume',
]
self.mock_client.inspect_image.return_value = {
'ContainerConfig': {'Volumes': {}}
}
intermediate_container = Container(self.mock_client, {
'Image': 'ababab',
'Volumes': {'/existing/volume': '/var/lib/docker/aaaaaaaa'},
}, has_been_inspected=True)
expected = [
'/host/volume:/host/volume:ro',
'/host/rw/volume:/host/rw/volume:rw',
'/var/lib/docker/aaaaaaaa:/existing/volume:rw',
]
binds = merge_volume_bindings(options, intermediate_container)
self.assertEqual(set(binds), set(expected))
def test_mount_same_host_path_to_two_volumes(self):
service = Service(
'web',
image='busybox',
volumes=[
'/host/path:/data1',
'/host/path:/data2',
],
client=self.mock_client,
)
self.mock_client.inspect_image.return_value = {
'Id': 'ababab',
'ContainerConfig': {
'Volumes': {}
}
}
service._get_container_create_options(
override_options={},
number=1,
)
self.assertEqual(
set(self.mock_client.create_host_config.call_args[1]['binds']),
set([
'/host/path:/data1:rw',
'/host/path:/data2:rw',
]),
)
def test_different_host_path_in_container_json(self):
service = Service(
'web',
image='busybox',
volumes=['/host/path:/data'],
client=self.mock_client,
)
self.mock_client.inspect_image.return_value = {
'Id': 'ababab',
'ContainerConfig': {
'Volumes': {
'/data': {},
}
}
}
self.mock_client.inspect_container.return_value = {
'Id': '123123123',
'Image': 'ababab',
'Volumes': {
'/data': '/mnt/sda1/host/path',
},
}
service._get_container_create_options(
override_options={},
number=1,
previous_container=Container(self.mock_client, {'Id': '123123123'}),
)
self.assertEqual(
self.mock_client.create_host_config.call_args[1]['binds'],
['/mnt/sda1/host/path:/data:rw'],
)
def test_create_with_special_volume_mode(self):
self.mock_client.inspect_image.return_value = {'Id': 'imageid'}
create_calls = []
def create_container(*args, **kwargs):
create_calls.append((args, kwargs))
return {'Id': 'containerid'}
self.mock_client.create_container = create_container
volumes = ['/tmp:/foo:z']
Service(
'web',
client=self.mock_client,
image='busybox',
volumes=volumes,
).create_container()
self.assertEqual(len(create_calls), 1)
self.assertEqual(self.mock_client.create_host_config.call_args[1]['binds'], volumes)
| |
"""
SublimeHighlight.
Licensed under MIT.
Copyright (C) 2012 Andrew Gibson <agibsonsw@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of
the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
---------------------
Original code has been heavily modifed by Isaac Muse <isaacmuse@gmail.com> for the ExportHtml project.
"""
import sublime
import re
from .st_color_scheme_matcher import ColorSchemeMatcher
from .st_mapping import lang_map
INLINE_BODY_START = '<code class="inline-highlight">'
BODY_START = '<div class="highlight"><pre>'
LINE = '%(code)s<br>'
INLINE_LINE = '%(code)s'
CODE = '<span style="color: %(color)s;%(style)s">%(content)s</span>'
CODEBG = '<span style="background-color: %(highlight)s; color: %(color)s;%(style)s">%(content)s</span>'
BODY_END = '</pre></div>\n'
INLINE_BODY_END = '</code>'
ST_LANGUAGES = ('.sublime-syntax', '.tmLanguage')
class SublimeHighlight(object):
"""SublimeHighlight."""
def __init__(self, scheme):
"""Initialization."""
self.view = None
self.csm = ColorSchemeMatcher(scheme)
self.fground = self.csm.get_special_color('foreground', simulate_transparency=True)
self.bground = self.csm.get_special_color('background', simulate_transparency=True)
def setup(self, **kwargs):
"""Get get general document preferences from sublime preferences."""
self.tab_size = 4
self.size = self.view.size()
self.pt = 0
self.end = 0
self.curr_row = 0
self.ebground = self.bground
def setup_print_block(self, curr_sel, multi=False):
"""Determine start and end points and whether to parse whole file or selection."""
self.size = self.view.size()
self.pt = 0
self.end = 1
self.curr_row = 1
self.start_line = self.curr_row
def print_line(self, line, num):
"""Print the line."""
html_line = (INLINE_LINE if self.inline else LINE) % {
"code": line,
}
return html_line
def convert_view_to_html(self):
"""Begin conversion of the view to HTML."""
for line in self.view.split_by_newlines(sublime.Region(self.pt, self.size)):
self.size = line.end()
empty = not bool(line.size())
line = self.convert_line_to_html(empty)
self.html.append(self.print_line(line, self.curr_row))
self.curr_row += 1
def html_encode(self, text):
"""Format text to HTML."""
encode_table = {
'&': '&',
'>': '>',
'<': '<',
'\t': ' ' * self.tab_size,
'\n': ''
}
return re.sub(
(r'(?!\s($|\S))\s' if self.inline or self.code_wrap else r'\s'),
' ',
''.join(
encode_table.get(c, c) for c in text
)
)
def format_text(self, line, text, color, bgcolor, style, empty, annotate=False):
"""Format the text."""
if empty:
text = ' '
css_style = ''
if style and style == 'bold':
css_style += ' font-weight: bold;'
if style and style == 'italic':
css_style += ' font-style: italic;'
if bgcolor is None:
code = CODE % {
"color": color, "content": text, "style": css_style
}
else:
code = CODEBG % {
"highlight": bgcolor, "color": color, "content": text, "style": css_style
}
line.append(code)
def convert_line_to_html(self, empty):
"""Convert the line to its HTML representation."""
line = []
do_highlight = self.curr_row in self.hl_lines
while self.end <= self.size:
# Get text of like scope
scope_name = self.view.scope_name(self.pt)
while self.view.scope_name(self.end) == scope_name and self.end < self.size:
self.end += 1
color_match = self.csm.guess_color(scope_name, selected=do_highlight, explicit_background=True)
color = color_match.fg_simulated
bgcolor = color_match.bg_simulated
style = color_match.style
region = sublime.Region(self.pt, self.end)
# Normal text formatting
tidied_text = self.html_encode(self.view.substr(region))
self.format_text(line, tidied_text, color, bgcolor, style, empty)
# Continue walking through line
self.pt = self.end
self.end = self.pt + 1
# Get the color for the space at the end of a line
if self.end < self.view.size():
end_key = self.view.scope_name(self.pt)
color_match = self.csm.guess_color(end_key, explicit_background=True)
self.ebground = color_match.bg_simulated
# Join line segments
return ''.join(line)
def write_body(self):
"""Write the body of the HTML."""
processed_rows = ""
if not self.no_wrap:
self.html.append(INLINE_BODY_START if self.inline else BODY_START)
# Convert view to HTML
self.setup_print_block(self.view.sel()[0])
processed_rows += "[" + str(self.curr_row) + ","
self.convert_view_to_html()
processed_rows += str(self.curr_row) + "],"
# Write empty line to allow copying of last line and line number without issue
if not self.no_wrap:
self.html.append(INLINE_BODY_END if self.inline else BODY_END)
def set_view(self, src, lang):
"""Setup view for conversion."""
# Get the output panel
self.view = sublime.active_window().get_output_panel('mdpopups')
# Let all plugins no to leave this view alone
self.view.settings().set('is_widget', True)
# Don't translate anything.
self.view.settings().set("translate_tabs_to_spaces", False)
# Don't mess with my indenting Sublime!
self.view.settings().set("auto_indent", False)
# Insert into the view
self.view.run_command('insert', {'characters': src})
# Setup the proper syntax
lang = lang.lower()
user_map = sublime.load_settings('Preferences.sublime-settings').get('mdpopups.sublime_user_lang_map', {})
keys = set(list(user_map.keys()) + list(lang_map.keys()))
loaded = False
for key in keys:
v = lang_map.get(key, (tuple(), tuple()))
user_v = user_map.get(key, (tuple(), tuple()))
if lang in (tuple(user_v[0]) + v[0]):
for l in (tuple(user_v[1]) + v[1]):
for ext in ST_LANGUAGES:
sytnax_file = 'Packages/%s%s' % (l, ext)
try:
sublime.load_binary_resource(sytnax_file)
except Exception:
continue
self.view.set_syntax_file(sytnax_file)
loaded = True
break
if loaded:
break
if loaded:
break
if not loaded:
# Default to plain text
for ext in ST_LANGUAGES:
# Just in case text one day switches to 'sublime-syntax'
sytnax_file = 'Packages/Plain text%s' % ext
try:
sublime.load_binary_resource(sytnax_file)
except Exception:
continue
self.view.set_syntax_file(sytnax_file)
def syntax_highlight(self, src, lang, hl_lines=[], inline=False, no_wrap=False, code_wrap=False):
"""Syntax Highlight."""
self.set_view(src, 'text' if not lang else lang)
self.inline = inline
self.hl_lines = hl_lines
self.no_wrap = no_wrap
self.code_wrap = code_wrap
self.setup()
self.html = []
self.write_body()
return ''.join(self.html)
| |
import sys
from . import api, model
COMMON_TYPES = {
'FILE': model.unknown_type('FILE', '_IO_FILE'),
'bool': '_Bool',
}
for _type in model.PrimitiveType.ALL_PRIMITIVE_TYPES:
if _type.endswith('_t'):
COMMON_TYPES[_type] = _type
del _type
_CACHE = {}
def resolve_common_type(commontype):
try:
return _CACHE[commontype]
except KeyError:
result = COMMON_TYPES.get(commontype, commontype)
if not isinstance(result, str):
pass # result is already a BaseType
elif result.endswith(' *'):
if result.startswith('const '):
result = model.ConstPointerType(
resolve_common_type(result[6:-2]))
else:
result = model.PointerType(resolve_common_type(result[:-2]))
elif result in model.PrimitiveType.ALL_PRIMITIVE_TYPES:
result = model.PrimitiveType(result)
else:
assert commontype != result
result = resolve_common_type(result) # recursively
assert isinstance(result, model.BaseTypeByIdentity)
_CACHE[commontype] = result
return result
# ____________________________________________________________
# Windows common types
def win_common_types(maxsize):
result = {}
if maxsize < (1<<32):
result.update({ # Windows 32-bits
'HALF_PTR': 'short',
'INT_PTR': 'int',
'LONG_PTR': 'long',
'UHALF_PTR': 'unsigned short',
'UINT_PTR': 'unsigned int',
'ULONG_PTR': 'unsigned long',
})
else:
result.update({ # Windows 64-bits
'HALF_PTR': 'int',
'INT_PTR': 'long long',
'LONG_PTR': 'long long',
'UHALF_PTR': 'unsigned int',
'UINT_PTR': 'unsigned long long',
'ULONG_PTR': 'unsigned long long',
})
result.update({
"BYTE": "unsigned char",
"BOOL": "int",
"CCHAR": "char",
"CHAR": "char",
"DWORD": "unsigned long",
"DWORD32": "unsigned int",
"DWORD64": "unsigned long long",
"FLOAT": "float",
"INT": "int",
"INT8": "signed char",
"INT16": "short",
"INT32": "int",
"INT64": "long long",
"LONG": "long",
"LONGLONG": "long long",
"LONG32": "int",
"LONG64": "long long",
"WORD": "unsigned short",
"PVOID": model.voidp_type,
"ULONGLONG": "unsigned long long",
"WCHAR": "wchar_t",
"SHORT": "short",
"TBYTE": "WCHAR",
"TCHAR": "WCHAR",
"UCHAR": "unsigned char",
"UINT": "unsigned int",
"UINT8": "unsigned char",
"UINT16": "unsigned short",
"UINT32": "unsigned int",
"UINT64": "unsigned long long",
"ULONG": "unsigned long",
"ULONG32": "unsigned int",
"ULONG64": "unsigned long long",
"USHORT": "unsigned short",
"SIZE_T": "ULONG_PTR",
"SSIZE_T": "LONG_PTR",
"ATOM": "WORD",
"BOOLEAN": "BYTE",
"COLORREF": "DWORD",
"HANDLE": "PVOID",
"DWORDLONG": "ULONGLONG",
"DWORD_PTR": "ULONG_PTR",
"HACCEL": "HANDLE",
"HBITMAP": "HANDLE",
"HBRUSH": "HANDLE",
"HCOLORSPACE": "HANDLE",
"HCONV": "HANDLE",
"HCONVLIST": "HANDLE",
"HDC": "HANDLE",
"HDDEDATA": "HANDLE",
"HDESK": "HANDLE",
"HDROP": "HANDLE",
"HDWP": "HANDLE",
"HENHMETAFILE": "HANDLE",
"HFILE": "int",
"HFONT": "HANDLE",
"HGDIOBJ": "HANDLE",
"HGLOBAL": "HANDLE",
"HHOOK": "HANDLE",
"HICON": "HANDLE",
"HCURSOR": "HICON",
"HINSTANCE": "HANDLE",
"HKEY": "HANDLE",
"HKL": "HANDLE",
"HLOCAL": "HANDLE",
"HMENU": "HANDLE",
"HMETAFILE": "HANDLE",
"HMODULE": "HINSTANCE",
"HMONITOR": "HANDLE",
"HPALETTE": "HANDLE",
"HPEN": "HANDLE",
"HRESULT": "LONG",
"HRGN": "HANDLE",
"HRSRC": "HANDLE",
"HSZ": "HANDLE",
"WINSTA": "HANDLE",
"HWND": "HANDLE",
"LANGID": "WORD",
"LCID": "DWORD",
"LCTYPE": "DWORD",
"LGRPID": "DWORD",
"LPARAM": "LONG_PTR",
"LPBOOL": "BOOL *",
"LPBYTE": "BYTE *",
"LPCOLORREF": "DWORD *",
"LPCSTR": "const char *",
"LPCVOID": model.const_voidp_type,
"LPCWSTR": "const WCHAR *",
"LPCTSTR": "LPCWSTR",
"LPDWORD": "DWORD *",
"LPHANDLE": "HANDLE *",
"LPINT": "int *",
"LPLONG": "long *",
"LPSTR": "CHAR *",
"LPWSTR": "WCHAR *",
"LPTSTR": "LPWSTR",
"LPVOID": model.voidp_type,
"LPWORD": "WORD *",
"LRESULT": "LONG_PTR",
"PBOOL": "BOOL *",
"PBOOLEAN": "BOOLEAN *",
"PBYTE": "BYTE *",
"PCHAR": "CHAR *",
"PCSTR": "const CHAR *",
"PCTSTR": "LPCWSTR",
"PCWSTR": "const WCHAR *",
"PDWORD": "DWORD *",
"PDWORDLONG": "DWORDLONG *",
"PDWORD_PTR": "DWORD_PTR *",
"PDWORD32": "DWORD32 *",
"PDWORD64": "DWORD64 *",
"PFLOAT": "FLOAT *",
"PHALF_PTR": "HALF_PTR *",
"PHANDLE": "HANDLE *",
"PHKEY": "HKEY *",
"PINT": "int *",
"PINT_PTR": "INT_PTR *",
"PINT8": "INT8 *",
"PINT16": "INT16 *",
"PINT32": "INT32 *",
"PINT64": "INT64 *",
"PLCID": "PDWORD",
"PLONG": "LONG *",
"PLONGLONG": "LONGLONG *",
"PLONG_PTR": "LONG_PTR *",
"PLONG32": "LONG32 *",
"PLONG64": "LONG64 *",
"PSHORT": "SHORT *",
"PSIZE_T": "SIZE_T *",
"PSSIZE_T": "SSIZE_T *",
"PSTR": "CHAR *",
"PTBYTE": "TBYTE *",
"PTCHAR": "TCHAR *",
"PTSTR": "LPWSTR",
"PUCHAR": "UCHAR *",
"PUHALF_PTR": "UHALF_PTR *",
"PUINT": "UINT *",
"PUINT_PTR": "UINT_PTR *",
"PUINT8": "UINT8 *",
"PUINT16": "UINT16 *",
"PUINT32": "UINT32 *",
"PUINT64": "UINT64 *",
"PULONG": "ULONG *",
"PULONGLONG": "ULONGLONG *",
"PULONG_PTR": "ULONG_PTR *",
"PULONG32": "ULONG32 *",
"PULONG64": "ULONG64 *",
"PUSHORT": "USHORT *",
"PWCHAR": "WCHAR *",
"PWORD": "WORD *",
"PWSTR": "WCHAR *",
"QWORD": "unsigned long long",
"SC_HANDLE": "HANDLE",
"SC_LOCK": "LPVOID",
"SERVICE_STATUS_HANDLE": "HANDLE",
"UNICODE_STRING": model.StructType(
"_UNICODE_STRING",
["Length",
"MaximumLength",
"Buffer"],
[model.PrimitiveType("unsigned short"),
model.PrimitiveType("unsigned short"),
model.PointerType(model.PrimitiveType("wchar_t"))],
[-1, -1, -1]),
"PUNICODE_STRING": "UNICODE_STRING *",
"PCUNICODE_STRING": "const UNICODE_STRING *",
"USN": "LONGLONG",
"VOID": model.void_type,
"WPARAM": "UINT_PTR",
})
return result
if sys.platform == 'win32':
COMMON_TYPES.update(win_common_types(sys.maxsize))
| |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""Helper function to append a preference path inside the user home"""
import os
import sys
import time
from collections import OrderedDict
def escapeString(s):
s = s.replace("\033", "\\033")
# s = s.replace("\n","\\n")
# s = s.replace("\t","\\t")
# s = s.replace("\r","\\r")
return s
def unescapeString(s):
s = s.replace("\\033", "\033")
return s
class ConfigurationValueInvalidError(Exception):
"""The validation of the value has failed"""
class ConfigurationValue(object):
def __init__(self, default, help="", t=None, min=None, max=None, choices=None, validation=None, custom=False):
self.value = default
self.default = default if not custom else None
self.help = help
self.type = t if t is not None else type(self.value)
self.min = min if min is not None else 0
self.max = max if max is not None else sys.maxint
self.choices = choices
self.custom = custom
if validation is None:
if self.choices is not None:
validation = lambda v, choices=self.choices: v in choices
elif min is not None and max is not None:
validation = lambda v, min=min, max=max: min <= v <= max
else:
validation = lambda v: True
assert callable(validation)
self.validation = validation
self.name = ""
def setName(self, name):
self.name = name
return self
def set(self, value):
if self.validation(value):
if isinstance(self.value, dict):
self.value.update(value)
else:
self.value = value
else:
raise ConfigurationValueInvalidError(self.name, value)
return self
def get(self):
return self.value
def __str__(self):
s = "#{}\n".format(self.help) if self.help != "" else ""
s += "{} = {}\n".format(self.name, repr(self))
return s
def __repr__(self):
if self.value.__class__ is str or self.type is str:
return "\"{}\"".format(escapeString(str(self.value)))
else:
return "{}".format(escapeString(str(self.value)))
def __eq__(self, other):
return isinstance(other, ConfigurationValue) and self.get() == other.get()
@property
def isModified(self):
return self.custom or self.value != self.default
@property
def kind(self):
return ConfigurationValue
class ConfigurationDict(ConfigurationValue):
def __init__(self, items, help, custom=False):
entries = {key: ConfigurationValue(val).setName(key) if not isinstance(val, ConfigurationValue) else val for key, val in items.iteritems()}
ConfigurationValue.__init__(self, entries, help, t=dict, custom=custom)
if not custom:
self.defaults = dict()
self.defaults.update(items)
def __str__(self):
s = "# {}\n".format(self.help)
s += "{} = {}".format(self.name, repr(self))
return s
def __repr__(self):
s = "{{\n".format(self.name)
for key, val in self.values.iteritems():
if val.__class__ is str or val.type is str:
s += "\t'{}' : \"{}\",\n".format(key, escapeString(str(val.get())))
else:
s += "\t'{}' : {},\n".format(key, escapeString(str(val.get())))
s += "}\n\n"
return s
def set(self, values):
for key, val in values.iteritems():
if isinstance(self.values[key], ConfigurationValue):
self.values[key].set(val)
else:
self.values[key] = val
return self
def get(self, key=None):
return self if key is None else self[key]
def keys(self):
return self.values.keys()
def __getattr__(self, attr):
if attr in self:
return self[attr]
else:
raise AttributeError(attr)
def __getitem__(self, key):
return self.values[key].get()
def __contains__(self, key):
return key in self.values
def __iter__(self):
for key in self.values:
yield key
raise StopIteration()
def itervalues(self):
for val in self.values.itervalues():
yield val.get()
raise StopIteration()
def iteritems(self):
for key, val in self.values.iteritems():
yield key, val
raise StopIteration()
@property
def values(self):
return self.value
@property
def isModified(self):
return self.custom or any([self.defaults[key] != self.values[key].get() for key in self.values])
def getKind(self, entry):
return self.values[entry].kind if entry in self.values else None
@property
def kind(self):
return ConfigurationDict
class ConfigurationValueList(ConfigurationDict):
def __init__(self, items, help="", appendable=True, contentType=None, custom=False):
ConfigurationDict.__init__(self, items, help, custom)
self.appendable = appendable
self.contentType = contentType
@property
def kind(self):
return ConfigurationValueList
class UserPreferenceMeta(type):
"""Metaclass for User Preference"""
def __init__(cls, name, bases, attrs):
if not hasattr(cls, "__host__"):
setattr(cls, "__host__", cls)
if hasattr(cls, "__prefix__"):
dst = OrderedDict()
else:
dst = cls.__host__.__values__
for member in cls.__dict__:
if not member.startswith("_"):
value = getattr(cls, member)
if not callable(value):
dst[member] = value.setName(member)
if hasattr(cls, "__prefix__"):
cls.__host__.__values__[cls.__prefix__] = ConfigurationDict(dst, help=cls.__doc__)
class UserPreference(object):
__metaclass__ = UserPreferenceMeta
__values__ = OrderedDict()
def __init__(self, appname):
if sys.platform == "win32":
self.appdata = os.path.join(os.environ['APPDATA'], appname)
else:
self.appdata = os.path.expanduser(os.path.join("~", appname))
self.appname = appname
self.configfile = None
self.modules = {}
def addModules(self, **modules):
self.modules.update(modules)
return self
def add(self, name, entry):
assert isinstance(entry, ConfigurationValue)
self.values[name] = entry
return self
def addMany(self, entries):
for name, entry in entries.iteritems():
self.add(name, entry)
return self
def load(self, configfile="pref.py"):
entries = dict()
self.configfile = configfile
customPref = os.path.join(self.appdata, self.configfile)
if os.path.exists(customPref):
execfile(customPref, self.modules, entries)
for key, val in entries.iteritems():
if key in self.__values__:
self.__values__[key].set(val)
else:
self.__values__[key] = ConfigurationValue(key, val, custom=True)
return self
def save(self, configfile="pref.py"):
customPref = os.path.join(self.appdata, self.configfile)
if not os.path.exists(os.path.basename(customPref)):
os.makedirs(os.path.basename(customPref))
fHnd = open(customPref, "w")
fHnd.write(str(self))
fHnd.close()
def __str__(self):
s = ""
s += "#!/usr/bin/env python\n"
s += "# -*- coding:utf-8 -*-\n"
s += "\n"
s += "# Custom User Preference for {}\n".format(self.appname)
s += "# \n"
s += "# Last modification: {}\n".format(time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.gmtime()))
s += "# \n"
s += "\n"
for val in self.__values__.itervalues():
if val.isModified:
s += str(val)
return s
__repr__ = __str__
def getKind(self, entry):
return self.__values__[entry].kind if entry in self.__values__ else None
def __getattr__(self, attr):
if attr in self:
return self[attr]
else:
raise AttributeError(attr)
def __getitem__(self, key):
return self.__values__[key].get()
def __contains__(self, key):
return key in self.__values__
def __iter__(self):
for key in self.__values__:
yield key
raise StopIteration()
def itervalues(self):
for val in self.__values__.itervalues():
yield val
raise StopIteration()
def iteritems(self):
for key, val in self.__values__.iteritems():
yield key, val
raise StopIteration()
| |
"""
Spectral Algorithm for Nonlinear Equations
"""
from __future__ import division, absolute_import, print_function
import collections
import numpy as np
from scipy.optimize import OptimizeResult
from scipy.optimize.optimize import _check_unknown_options
from .linesearch import _nonmonotone_line_search_cruz, _nonmonotone_line_search_cheng
class _NoConvergence(Exception):
pass
def _root_df_sane(func, x0, args=(), ftol=1e-8, fatol=1e-300, maxfev=1000,
fnorm=None, callback=None, disp=False, M=10, eta_strategy=None,
sigma_eps=1e-10, sigma_0=1.0, line_search='cruz', **unknown_options):
r"""
Solve nonlinear equation with the DF-SANE method
Options
-------
ftol : float, optional
Relative norm tolerance.
fatol : float, optional
Absolute norm tolerance.
Algorithm terminates when ``||func(x)|| < fatol + ftol ||func(x_0)||``.
fnorm : callable, optional
Norm to use in the convergence check. If None, 2-norm is used.
maxfev : int, optional
Maximum number of function evaluations.
disp : bool, optional
Whether to print convergence process to stdout.
eta_strategy : callable, optional
Choice of the ``eta_k`` parameter, which gives slack for growth
of ``||F||**2``. Called as ``eta_k = eta_strategy(k, x, F)`` with
`k` the iteration number, `x` the current iterate and `F` the current
residual. Should satisfy ``eta_k > 0`` and ``sum(eta, k=0..inf) < inf``.
Default: ``||F||**2 / (1 + k)**2``.
sigma_eps : float, optional
The spectral coefficient is constrained to ``sigma_eps < sigma < 1/sigma_eps``.
Default: 1e-10
sigma_0 : float, optional
Initial spectral coefficient.
Default: 1.0
M : int, optional
Number of iterates to include in the nonmonotonic line search.
Default: 10
line_search : {'cruz', 'cheng'}
Type of line search to employ. 'cruz' is the original one defined in
[Martinez & Raydan. Math. Comp. 75, 1429 (2006)], 'cheng' is
a modified search defined in [Cheng & Li. IMA J. Numer. Anal. 29, 814 (2009)].
Default: 'cruz'
References
----------
.. [1] "Spectral residual method without gradient information for solving
large-scale nonlinear systems of equations." W. La Cruz,
J.M. Martinez, M. Raydan. Math. Comp. **75**, 1429 (2006).
.. [2] W. La Cruz, Opt. Meth. Software, 29, 24 (2014).
.. [3] W. Cheng, D.-H. Li. IMA J. Numer. Anal. **29**, 814 (2009).
"""
_check_unknown_options(unknown_options)
if line_search not in ('cheng', 'cruz'):
raise ValueError("Invalid value %r for 'line_search'" % (line_search,))
nexp = 2
if eta_strategy is None:
# Different choice from [1], as their eta is not invariant
# vs. scaling of F.
def eta_strategy(k, x, F):
# Obtain squared 2-norm of the initial residual from the outer scope
return f_0 / (1 + k) ** 2
if fnorm is None:
def fnorm(F):
# Obtain squared 2-norm of the current residual from the outer scope
return f_k ** (1.0 / nexp)
def fmerit(F):
return np.linalg.norm(F) ** nexp
nfev = [0]
f, x_k, x_shape, f_k, F_k, is_complex = _wrap_func(func, x0, fmerit, nfev, maxfev, args)
k = 0
f_0 = f_k
sigma_k = sigma_0
F_0_norm = fnorm(F_k)
# For the 'cruz' line search
prev_fs = collections.deque([f_k], M)
# For the 'cheng' line search
Q = 1.0
C = f_0
converged = False
message = "too many function evaluations required"
while True:
F_k_norm = fnorm(F_k)
if disp:
print("iter %d: ||F|| = %g, sigma = %g" % (k, F_k_norm, sigma_k))
if callback is not None:
callback(x_k, F_k)
if F_k_norm < ftol * F_0_norm + fatol:
# Converged!
message = "successful convergence"
converged = True
break
# Control spectral parameter, from [2]
if abs(sigma_k) > 1 / sigma_eps:
sigma_k = 1 / sigma_eps * np.sign(sigma_k)
elif abs(sigma_k) < sigma_eps:
sigma_k = sigma_eps
# Line search direction
d = -sigma_k * F_k
# Nonmonotone line search
eta = eta_strategy(k, x_k, F_k)
try:
if line_search == 'cruz':
alpha, xp, fp, Fp = _nonmonotone_line_search_cruz(f, x_k, d, prev_fs, eta=eta)
elif line_search == 'cheng':
alpha, xp, fp, Fp, C, Q = _nonmonotone_line_search_cheng(f, x_k, d, f_k, C, Q, eta=eta)
except _NoConvergence:
break
# Update spectral parameter
s_k = xp - x_k
y_k = Fp - F_k
sigma_k = np.vdot(s_k, s_k) / np.vdot(s_k, y_k)
# Take step
x_k = xp
F_k = Fp
f_k = fp
# Store function value
if line_search == 'cruz':
prev_fs.append(fp)
k += 1
x = _wrap_result(x_k, is_complex, shape=x_shape)
F = _wrap_result(F_k, is_complex)
result = OptimizeResult(x=x, success=converged,
message=message,
fun=F, nfev=nfev[0], nit=k)
return result
def _wrap_func(func, x0, fmerit, nfev_list, maxfev, args=()):
"""
Wrap a function and an initial value so that (i) complex values
are wrapped to reals, and (ii) value for a merit function
fmerit(x, f) is computed at the same time, (iii) iteration count
is maintained and an exception is raised if it is exceeded.
Parameters
----------
func : callable
Function to wrap
x0 : ndarray
Initial value
fmerit : callable
Merit function fmerit(f) for computing merit value from residual.
nfev_list : list
List to store number of evaluations in. Should be [0] in the beginning.
maxfev : int
Maximum number of evaluations before _NoConvergence is raised.
args : tuple
Extra arguments to func
Returns
-------
wrap_func : callable
Wrapped function, to be called as
``F, fp = wrap_func(x0)``
x0_wrap : ndarray of float
Wrapped initial value; raveled to 1D and complex
values mapped to reals.
x0_shape : tuple
Shape of the initial value array
f : float
Merit function at F
F : ndarray of float
Residual at x0_wrap
is_complex : bool
Whether complex values were mapped to reals
"""
x0 = np.asarray(x0)
x0_shape = x0.shape
F = np.asarray(func(x0, *args)).ravel()
is_complex = np.iscomplexobj(x0) or np.iscomplexobj(F)
x0 = x0.ravel()
nfev_list[0] = 1
if is_complex:
def wrap_func(x):
if nfev_list[0] >= maxfev:
raise _NoConvergence()
nfev_list[0] += 1
z = _real2complex(x).reshape(x0_shape)
v = np.asarray(func(z, *args)).ravel()
F = _complex2real(v)
f = fmerit(F)
return f, F
x0 = _complex2real(x0)
F = _complex2real(F)
else:
def wrap_func(x):
if nfev_list[0] >= maxfev:
raise _NoConvergence()
nfev_list[0] += 1
x = x.reshape(x0_shape)
F = np.asarray(func(x, *args)).ravel()
f = fmerit(F)
return f, F
return wrap_func, x0, x0_shape, fmerit(F), F, is_complex
def _wrap_result(result, is_complex, shape=None):
"""
Convert from real to complex and reshape result arrays.
"""
if is_complex:
z = _real2complex(result)
else:
z = result
if shape is not None:
z = z.reshape(shape)
return z
def _real2complex(x):
return np.ascontiguousarray(x, dtype=float).view(np.complex128)
def _complex2real(z):
return np.ascontiguousarray(z, dtype=complex).view(np.float64)
| |
from __future__ import unicode_literals
from datetime import date
from django.db.models.query_utils import InvalidQuery
from django.test import TestCase, skipUnlessDBFeature
from .models import Author, Book, Coffee, Reviewer, FriendlyAuthor
class RawQueryTests(TestCase):
fixtures = ['raw_query_books.json']
def assertSuccessfulRawQuery(self, model, query, expected_results,
expected_annotations=(), params=[], translations=None):
"""
Execute the passed query against the passed model and check the output
"""
results = list(model.objects.raw(query, params=params, translations=translations))
self.assertProcessed(model, results, expected_results, expected_annotations)
self.assertAnnotations(results, expected_annotations)
def assertProcessed(self, model, results, orig, expected_annotations=()):
"""
Compare the results of a raw query against expected results
"""
self.assertEqual(len(results), len(orig))
for index, item in enumerate(results):
orig_item = orig[index]
for annotation in expected_annotations:
setattr(orig_item, *annotation)
for field in model._meta.fields:
# Check that all values on the model are equal
self.assertEqual(
getattr(item, field.attname),
getattr(orig_item, field.attname)
)
# This includes checking that they are the same type
self.assertEqual(
type(getattr(item, field.attname)),
type(getattr(orig_item, field.attname))
)
def assertNoAnnotations(self, results):
"""
Check that the results of a raw query contain no annotations
"""
self.assertAnnotations(results, ())
def assertAnnotations(self, results, expected_annotations):
"""
Check that the passed raw query results contain the expected
annotations
"""
if expected_annotations:
for index, result in enumerate(results):
annotation, value = expected_annotations[index]
self.assertTrue(hasattr(result, annotation))
self.assertEqual(getattr(result, annotation), value)
def testSimpleRawQuery(self):
"""
Basic test of raw query with a simple database query
"""
query = "SELECT * FROM raw_query_author"
authors = Author.objects.all()
self.assertSuccessfulRawQuery(Author, query, authors)
def testRawQueryLazy(self):
"""
Raw queries are lazy: they aren't actually executed until they're
iterated over.
"""
q = Author.objects.raw('SELECT * FROM raw_query_author')
self.assertTrue(q.query.cursor is None)
list(q)
self.assertTrue(q.query.cursor is not None)
def testFkeyRawQuery(self):
"""
Test of a simple raw query against a model containing a foreign key
"""
query = "SELECT * FROM raw_query_book"
books = Book.objects.all()
self.assertSuccessfulRawQuery(Book, query, books)
def testDBColumnHandler(self):
"""
Test of a simple raw query against a model containing a field with
db_column defined.
"""
query = "SELECT * FROM raw_query_coffee"
coffees = Coffee.objects.all()
self.assertSuccessfulRawQuery(Coffee, query, coffees)
def testOrderHandler(self):
"""
Test of raw raw query's tolerance for columns being returned in any
order
"""
selects = (
('dob, last_name, first_name, id'),
('last_name, dob, first_name, id'),
('first_name, last_name, dob, id'),
)
for select in selects:
query = "SELECT %s FROM raw_query_author" % select
authors = Author.objects.all()
self.assertSuccessfulRawQuery(Author, query, authors)
def testTranslations(self):
"""
Test of raw query's optional ability to translate unexpected result
column names to specific model fields
"""
query = "SELECT first_name AS first, last_name AS last, dob, id FROM raw_query_author"
translations = {'first': 'first_name', 'last': 'last_name'}
authors = Author.objects.all()
self.assertSuccessfulRawQuery(Author, query, authors, translations=translations)
def testParams(self):
"""
Test passing optional query parameters
"""
query = "SELECT * FROM raw_query_author WHERE first_name = %s"
author = Author.objects.all()[2]
params = [author.first_name]
qset = Author.objects.raw(query, params=params)
results = list(qset)
self.assertProcessed(Author, results, [author])
self.assertNoAnnotations(results)
self.assertEqual(len(results), 1)
self.assertIsInstance(repr(qset), str)
@skipUnlessDBFeature('supports_paramstyle_pyformat')
def testPyformatParams(self):
"""
Test passing optional query parameters
"""
query = "SELECT * FROM raw_query_author WHERE first_name = %(first)s"
author = Author.objects.all()[2]
params = {'first': author.first_name}
qset = Author.objects.raw(query, params=params)
results = list(qset)
self.assertProcessed(Author, results, [author])
self.assertNoAnnotations(results)
self.assertEqual(len(results), 1)
self.assertIsInstance(repr(qset), str)
def testManyToMany(self):
"""
Test of a simple raw query against a model containing a m2m field
"""
query = "SELECT * FROM raw_query_reviewer"
reviewers = Reviewer.objects.all()
self.assertSuccessfulRawQuery(Reviewer, query, reviewers)
def testExtraConversions(self):
"""
Test to insure that extra translations are ignored.
"""
query = "SELECT * FROM raw_query_author"
translations = {'something': 'else'}
authors = Author.objects.all()
self.assertSuccessfulRawQuery(Author, query, authors, translations=translations)
def testMissingFields(self):
query = "SELECT id, first_name, dob FROM raw_query_author"
for author in Author.objects.raw(query):
self.assertNotEqual(author.first_name, None)
# last_name isn't given, but it will be retrieved on demand
self.assertNotEqual(author.last_name, None)
def testMissingFieldsWithoutPK(self):
query = "SELECT first_name, dob FROM raw_query_author"
try:
list(Author.objects.raw(query))
self.fail('Query without primary key should fail')
except InvalidQuery:
pass
def testAnnotations(self):
query = "SELECT a.*, count(b.id) as book_count FROM raw_query_author a LEFT JOIN raw_query_book b ON a.id = b.author_id GROUP BY a.id, a.first_name, a.last_name, a.dob ORDER BY a.id"
expected_annotations = (
('book_count', 3),
('book_count', 0),
('book_count', 1),
('book_count', 0),
)
authors = Author.objects.all()
self.assertSuccessfulRawQuery(Author, query, authors, expected_annotations)
def testWhiteSpaceQuery(self):
query = " SELECT * FROM raw_query_author"
authors = Author.objects.all()
self.assertSuccessfulRawQuery(Author, query, authors)
def testMultipleIterations(self):
query = "SELECT * FROM raw_query_author"
normal_authors = Author.objects.all()
raw_authors = Author.objects.raw(query)
# First Iteration
first_iterations = 0
for index, raw_author in enumerate(raw_authors):
self.assertEqual(normal_authors[index], raw_author)
first_iterations += 1
# Second Iteration
second_iterations = 0
for index, raw_author in enumerate(raw_authors):
self.assertEqual(normal_authors[index], raw_author)
second_iterations += 1
self.assertEqual(first_iterations, second_iterations)
def testGetItem(self):
# Indexing on RawQuerySets
query = "SELECT * FROM raw_query_author ORDER BY id ASC"
third_author = Author.objects.raw(query)[2]
self.assertEqual(third_author.first_name, 'Bob')
first_two = Author.objects.raw(query)[0:2]
self.assertEqual(len(first_two), 2)
self.assertRaises(TypeError, lambda: Author.objects.raw(query)['test'])
def test_inheritance(self):
# date is the end of the Cuban Missile Crisis, I have no idea when
# Wesley was born
f = FriendlyAuthor.objects.create(first_name="Wesley", last_name="Chun",
dob=date(1962, 10, 28))
query = "SELECT * FROM raw_query_friendlyauthor"
self.assertEqual(
[o.pk for o in FriendlyAuthor.objects.raw(query)], [f.pk]
)
def test_query_count(self):
self.assertNumQueries(1, list, Author.objects.raw("SELECT * FROM raw_query_author"))
| |
import numpy
def check(y,x,matrix):
boxX = 0
boxY = 0
if (y > 2):
boxY = 3
if (y > 5):
boxY = 6
if (x > 2):
boxX = 3
if (x > 5):
boxX = 6
list = []
for i in range(0, 9):
if (i != y and matrix[i][x] != 0 and matrix[i][x] not in list):
list.append(matrix[i][x])
for j in range(0, 9):
if (j != x and matrix[y][j] != 0 and matrix[y][j] not in list):
list.append(matrix[y][j])
for i in range(boxY, boxY + 3):
for j in range(boxX, boxX + 3):
if (i != y and j != x and matrix[i][j] not in list and matrix[i][j] != 0):
list.append(matrix[i][j])
if(matrix[y][x] in list):
return False
return True
def solve(matrix):
contin = True
currX = 0
currY = 0
##matrix denoting blanks
filled = numpy.zeros(shape=(9,9))
for x in range(0,9):
for y in range(0,9):
if(matrix[y][x]==0):
filled[y][x]=0
else:
filled[y][x]=1
while(filled[currY][currX]!=0):
currX += 1
if (currX == 9):
currX = 0
currY += 1
#print("Strart: "+str(currY)+ str(currX))
while(contin):
if(currY == 9 and currX==0):
return matrix
if (currY < 0 or currX < 0):
return numpy.zeros(shape=(9, 9))
#print(currX, currY)
if(matrix[currY][currX]==0):
z=1
while(z < 10):
#print(matrix)
#print(currX,currY)
##check for nonfilled
if(currY == 9 and currX==0):
return matrix
##check for no solution
if(currY <0 or currX < 0):
return numpy.zeros(shape=(9,9))
if(filled[currY][currX]==0):
matrix[currY][currX] = z
##continue
if(check(currY, currX, matrix)):
currX += 1
if (currX == 9):
currX = 0
currY += 1
if(currY == 9):
contin= False
z=0
##backtrack if no valids found
if(z==9):
##go back 1
matrix[currY][currX]=0 ##reset
currX -= 1
if (currX == -1):
currX = 8
currY -= 1
z = matrix[currY][currX]
##if its filled
if(filled[currY][currX]!=0):
while(filled[currY][currX]!=0 or (filled[currY][currX]==0 and matrix[currY][currX]==9)):
##if you get to one you need to reset
if (filled[currY][currX] == 0 and matrix[currY][currX] == 9):
matrix[currY][currX] = 0 ## reset
##go back one
currX -= 1
if (currX == -1):
currX = 8
currY -= 1
##go back 1 if filled
if(filled[currY][currX]==1):
#print(currX,currY)
currX-=1
if(currX == -1):
currX = 8
currY-=1
z = matrix[currY][currX]
##not filled
else:
##not filled and not 9
z = matrix[currY][currX]
##not filled and is 9
while(matrix[currY][currX] == 9):
##if not filled and 9
if (filled[currY][currX] == 0 and z == 9):
matrix[currY][currX] = 0
currX -= 1
if (currX == -1):
currX = 8
currY -= 1
##if filled backtrack to a nonfilled
if (filled[currY][currX] != 0):
while(filled[currY][currX]!=0):
currX -= 1
if (currX == -1):
currX = 8
currY -= 1
if (currY == 9 and currX == 0):
return matrix
z = matrix[currY][currX]
##increment
if(z!=9):
z+=1
else:
#print("else")
currX += 1
if (currX == 9):
currX = 0
currY += 1
z=1
else:
if(matrix[currY][currX]!=0):
currX -= 1
if (currX == -1):
currX = 8
currY -= 1
if(currY==-1):
contin = False
else:
currX += 1
if (currX == 9):
currX = 0
currY += 1
matrix = numpy.zeros(shape=(9,9))
matrix[0][0] = 1
matrix[0][1] = 9
matrix[0][2] = 0
matrix[0][3] = 0
matrix[0][4] = 0
matrix[0][5] = 6
matrix[0][6] = 0
matrix[0][7] = 4
matrix[0][8] = 0
matrix[1][0] = 0
matrix[1][1] = 0
matrix[1][2] = 5
matrix[1][3] = 3
matrix[1][4] = 0
matrix[1][5] = 0
matrix[1][6] = 0
matrix[1][7] = 0
matrix[1][8] = 8
matrix[2][0] = 0
matrix[2][1] = 0
matrix[2][2] = 0
matrix[2][3] = 0
matrix[2][4] = 7
matrix[2][5] = 0
matrix[2][6] = 2
matrix[2][7] = 0
matrix[2][8] = 0
matrix[3][0] = 0
matrix[3][1] = 0
matrix[3][2] = 1
matrix[3][3] = 0
matrix[3][4] = 5
matrix[3][5] = 0
matrix[3][6] = 0
matrix[3][7] = 0
matrix[3][8] = 3
matrix[4][0] = 0
matrix[4][1] = 6
matrix[4][2] = 0
matrix[4][3] = 0
matrix[4][4] = 0
matrix[4][5] = 9
matrix[4][6] = 0
matrix[4][7] = 7
matrix[4][8] = 0
matrix[5][0] = 2
matrix[5][1] = 0
matrix[5][2] = 0
matrix[5][3] = 0
matrix[5][4] = 8
matrix[5][5] = 4
matrix[5][6] = 1
matrix[5][7] = 0
matrix[5][8] = 0
matrix[6][0] = 0
matrix[6][1] = 0
matrix[6][2] = 3
matrix[6][3] = 0
matrix[6][4] = 1
matrix[6][5] = 0
matrix[6][6] = 0
matrix[6][7] = 0
matrix[6][8] = 0
matrix[7][0] = 8
matrix[7][1] = 0
matrix[7][2] = 0
matrix[7][3] = 0
matrix[7][4] = 0
matrix[7][5] = 2
matrix[7][6] = 5
matrix[7][7] = 0
matrix[7][8] = 0
matrix[8][0] = 0
matrix[8][1] = 5
matrix[8][2] = 0
matrix[8][3] = 4
matrix[8][4] = 0
matrix[8][5] = 0
matrix[8][6] = 0
matrix[8][7] = 8
matrix[8][8] = 0
#print(matrix)
print(solve(matrix))
| |
# Copyright (c) 2008-2010 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for implementations of L{IReactorTCP}.
"""
__metaclass__ = type
import socket
from zope.interface import implements
from zope.interface.verify import verifyObject
from twisted.internet.test.reactormixins import ReactorBuilder
from twisted.internet.error import DNSLookupError
from twisted.internet.interfaces import (
IResolverSimple, IConnector, IReactorFDSet)
from twisted.internet.address import IPv4Address
from twisted.internet.defer import Deferred, succeed, fail, maybeDeferred
from twisted.internet.protocol import ServerFactory, ClientFactory, Protocol
from twisted.python.runtime import platform
from twisted.python.failure import Failure
from twisted.python import log
from twisted.trial.unittest import SkipTest
from twisted.test.test_tcp import ClosingProtocol
from twisted.internet.test.test_core import ObjectModelIntegrationMixin
class Stop(ClientFactory):
"""
A client factory which stops a reactor when a connection attempt fails.
"""
def __init__(self, reactor):
self.reactor = reactor
def clientConnectionFailed(self, connector, reason):
self.reactor.stop()
class FakeResolver:
"""
A resolver implementation based on a C{dict} mapping names to addresses.
"""
implements(IResolverSimple)
def __init__(self, names):
self.names = names
def getHostByName(self, name, timeout):
try:
return succeed(self.names[name])
except KeyError:
return fail(DNSLookupError("FakeResolver couldn't find " + name))
class _SimplePullProducer(object):
"""
A pull producer which writes one byte whenever it is resumed. For use by
L{test_unregisterProducerAfterDisconnect}.
"""
def __init__(self, consumer):
self.consumer = consumer
def stopProducing(self):
pass
def resumeProducing(self):
log.msg("Producer.resumeProducing")
self.consumer.write('x')
def _getWriters(reactor):
"""
Like L{IReactorFDSet.getWriters}, but with support for IOCP reactor as well.
"""
if IReactorFDSet.providedBy(reactor):
return reactor.getWriters()
elif 'IOCP' in reactor.__class__.__name__:
return reactor.handles
else:
# Cannot tell what is going on.
raise Exception("Cannot find writers on %r" % (reactor,))
def serverFactoryFor(protocol):
"""
Helper function which provides the signature L{ServerFactory} should
provide.
"""
factory = ServerFactory()
factory.protocol = protocol
return factory
class TCPClientTestsBuilder(ReactorBuilder):
"""
Builder defining tests relating to L{IReactorTCP.connectTCP}.
"""
def _freePort(self, interface='127.0.0.1'):
probe = socket.socket()
try:
probe.bind((interface, 0))
return probe.getsockname()
finally:
probe.close()
def test_interface(self):
"""
L{IReactorTCP.connectTCP} returns an object providing L{IConnector}.
"""
reactor = self.buildReactor()
connector = reactor.connectTCP("127.0.0.1", 1234, ClientFactory())
self.assertTrue(verifyObject(IConnector, connector))
def test_clientConnectionFailedStopsReactor(self):
"""
The reactor can be stopped by a client factory's
C{clientConnectionFailed} method.
"""
host, port = self._freePort()
reactor = self.buildReactor()
reactor.connectTCP(host, port, Stop(reactor))
reactor.run()
def test_addresses(self):
"""
A client's transport's C{getHost} and C{getPeer} return L{IPv4Address}
instances which give the dotted-quad string form of the local and
remote endpoints of the connection respectively.
"""
host, port = self._freePort()
reactor = self.buildReactor()
server = reactor.listenTCP(
0, serverFactoryFor(Protocol), interface=host)
serverAddress = server.getHost()
addresses = {'host': None, 'peer': None}
class CheckAddress(Protocol):
def makeConnection(self, transport):
addresses['host'] = transport.getHost()
addresses['peer'] = transport.getPeer()
reactor.stop()
clientFactory = Stop(reactor)
clientFactory.protocol = CheckAddress
reactor.connectTCP(
'localhost', server.getHost().port, clientFactory,
bindAddress=('127.0.0.1', port))
reactor.installResolver(FakeResolver({'localhost': '127.0.0.1'}))
reactor.run() # self.runReactor(reactor)
self.assertEqual(
addresses['host'],
IPv4Address('TCP', '127.0.0.1', port))
self.assertEqual(
addresses['peer'],
IPv4Address('TCP', '127.0.0.1', serverAddress.port))
def test_connectEvent(self):
"""
This test checks that we correctly get notifications event for a
client. This ought to prevent a regression under Windows using the GTK2
reactor. See #3925.
"""
reactor = self.buildReactor()
server = reactor.listenTCP(0, serverFactoryFor(Protocol))
connected = []
class CheckConnection(Protocol):
def connectionMade(self):
connected.append(self)
reactor.stop()
clientFactory = Stop(reactor)
clientFactory.protocol = CheckConnection
reactor.connectTCP(
'127.0.0.1', server.getHost().port, clientFactory)
reactor.run()
self.assertTrue(connected)
def test_unregisterProducerAfterDisconnect(self):
"""
If a producer is unregistered from a L{ITCPTransport} provider after the
transport has been disconnected (by the peer) and after
L{ITCPTransport.loseConnection} has been called, the transport is not
re-added to the reactor as a writer as would be necessary if the
transport were still connected.
"""
reactor = self.buildReactor()
port = reactor.listenTCP(0, serverFactoryFor(ClosingProtocol))
finished = Deferred()
finished.addErrback(log.err)
finished.addCallback(lambda ign: reactor.stop())
writing = []
class ClientProtocol(Protocol):
"""
Protocol to connect, register a producer, try to lose the
connection, wait for the server to disconnect from us, and
then unregister the producer.
"""
def connectionMade(self):
log.msg("ClientProtocol.connectionMade")
self.transport.registerProducer(
_SimplePullProducer(self.transport), False)
self.transport.loseConnection()
def connectionLost(self, reason):
log.msg("ClientProtocol.connectionLost")
self.unregister()
writing.append(self.transport in _getWriters(reactor))
finished.callback(None)
def unregister(self):
log.msg("ClientProtocol unregister")
self.transport.unregisterProducer()
clientFactory = ClientFactory()
clientFactory.protocol = ClientProtocol
reactor.connectTCP('127.0.0.1', port.getHost().port, clientFactory)
self.runReactor(reactor)
self.assertFalse(
writing[0], "Transport was writing after unregisterProducer.")
def test_disconnectWhileProducing(self):
"""
If L{ITCPTransport.loseConnection} is called while a producer
is registered with the transport, the connection is closed
after the producer is unregistered.
"""
reactor = self.buildReactor()
# XXX For some reason, pyobject/pygtk will not deliver the close
# notification that should happen after the unregisterProducer call in
# this test. The selectable is in the write notification set, but no
# notification ever arrives.
skippedReactors = ["Glib2Reactor", "Gtk2Reactor"]
reactorClassName = reactor.__class__.__name__
if reactorClassName in skippedReactors and platform.isWindows():
raise SkipTest(
"A pygobject/pygtk bug disables this functionality on Windows.")
class Producer:
def resumeProducing(self):
log.msg("Producer.resumeProducing")
port = reactor.listenTCP(0, serverFactoryFor(Protocol))
finished = Deferred()
finished.addErrback(log.err)
finished.addCallback(lambda ign: reactor.stop())
class ClientProtocol(Protocol):
"""
Protocol to connect, register a producer, try to lose the
connection, unregister the producer, and wait for the connection to
actually be lost.
"""
def connectionMade(self):
log.msg("ClientProtocol.connectionMade")
self.transport.registerProducer(Producer(), False)
self.transport.loseConnection()
# Let the reactor tick over, in case synchronously calling
# loseConnection and then unregisterProducer is the same as
# synchronously calling unregisterProducer and then
# loseConnection (as it is in several reactors).
reactor.callLater(0, reactor.callLater, 0, self.unregister)
def unregister(self):
log.msg("ClientProtocol unregister")
self.transport.unregisterProducer()
# This should all be pretty quick. Fail the test
# if we don't get a connectionLost event really
# soon.
reactor.callLater(
1.0, finished.errback,
Failure(Exception("Connection was not lost")))
def connectionLost(self, reason):
log.msg("ClientProtocol.connectionLost")
finished.callback(None)
clientFactory = ClientFactory()
clientFactory.protocol = ClientProtocol
reactor.connectTCP('127.0.0.1', port.getHost().port, clientFactory)
self.runReactor(reactor)
# If the test failed, we logged an error already and trial
# will catch it.
class TCPPortTestsBuilder(ReactorBuilder, ObjectModelIntegrationMixin):
"""
Tests for L{IReactorRCP.listenTCP}
"""
def getListeningPort(self, reactor):
"""
Get a TCP port from a reactor
"""
return reactor.listenTCP(0, ServerFactory())
def getExpectedConnectionLostLogMsg(self, port):
"""
Get the expected connection lost message for a TCP port
"""
return "(TCP Port %s Closed)" % (port.getHost().port,)
def test_connectionLostLogMsg(self):
"""
When a connection is lost, an informative message should be logged
(see L{getExpectedConnectionLostLogMsg}): an address identifying
the port and the fact that it was closed.
"""
loggedMessages = []
def logConnectionLostMsg(eventDict):
loggedMessages.append(log.textFromEventDict(eventDict))
reactor = self.buildReactor()
p = self.getListeningPort(reactor)
expectedMessage = self.getExpectedConnectionLostLogMsg(p)
log.addObserver(logConnectionLostMsg)
def stopReactor(ignored):
log.removeObserver(logConnectionLostMsg)
reactor.stop()
def doStopListening():
log.addObserver(logConnectionLostMsg)
maybeDeferred(p.stopListening).addCallback(stopReactor)
reactor.callWhenRunning(doStopListening)
reactor.run()
self.assertIn(expectedMessage, loggedMessages)
def test_allNewStyle(self):
"""
The L{IListeningPort} object is an instance of a class with no
classic classes in its hierarchy.
"""
reactor = self.buildReactor()
port = self.getListeningPort(reactor)
self.assertFullyNewStyle(port)
globals().update(TCPClientTestsBuilder.makeTestCaseClasses())
globals().update(TCPPortTestsBuilder.makeTestCaseClasses())
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class PrivateEndpointConnectionsOperations:
"""PrivateEndpointConnectionsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.rdbms.postgresql.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def get(
self,
resource_group_name: str,
server_name: str,
private_endpoint_connection_name: str,
**kwargs: Any
) -> "_models.PrivateEndpointConnection":
"""Gets a private endpoint connection.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param private_endpoint_connection_name: The name of the private endpoint connection.
:type private_endpoint_connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PrivateEndpointConnection, or the result of cls(response)
:rtype: ~azure.mgmt.rdbms.postgresql.models.PrivateEndpointConnection
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateEndpointConnection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-06-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'serverName': self._serialize.url("server_name", server_name, 'str'),
'privateEndpointConnectionName': self._serialize.url("private_endpoint_connection_name", private_endpoint_connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('PrivateEndpointConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforPostgreSQL/servers/{serverName}/privateEndpointConnections/{privateEndpointConnectionName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
server_name: str,
private_endpoint_connection_name: str,
parameters: "_models.PrivateEndpointConnection",
**kwargs: Any
) -> Optional["_models.PrivateEndpointConnection"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.PrivateEndpointConnection"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'serverName': self._serialize.url("server_name", server_name, 'str'),
'privateEndpointConnectionName': self._serialize.url("private_endpoint_connection_name", private_endpoint_connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'PrivateEndpointConnection')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('PrivateEndpointConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforPostgreSQL/servers/{serverName}/privateEndpointConnections/{privateEndpointConnectionName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
server_name: str,
private_endpoint_connection_name: str,
parameters: "_models.PrivateEndpointConnection",
**kwargs: Any
) -> AsyncLROPoller["_models.PrivateEndpointConnection"]:
"""Approve or reject a private endpoint connection with a given name.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param private_endpoint_connection_name:
:type private_endpoint_connection_name: str
:param parameters:
:type parameters: ~azure.mgmt.rdbms.postgresql.models.PrivateEndpointConnection
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either PrivateEndpointConnection or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.rdbms.postgresql.models.PrivateEndpointConnection]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateEndpointConnection"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
server_name=server_name,
private_endpoint_connection_name=private_endpoint_connection_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('PrivateEndpointConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'serverName': self._serialize.url("server_name", server_name, 'str'),
'privateEndpointConnectionName': self._serialize.url("private_endpoint_connection_name", private_endpoint_connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforPostgreSQL/servers/{serverName}/privateEndpointConnections/{privateEndpointConnectionName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
server_name: str,
private_endpoint_connection_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-06-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'serverName': self._serialize.url("server_name", server_name, 'str'),
'privateEndpointConnectionName': self._serialize.url("private_endpoint_connection_name", private_endpoint_connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforPostgreSQL/servers/{serverName}/privateEndpointConnections/{privateEndpointConnectionName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
server_name: str,
private_endpoint_connection_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes a private endpoint connection with a given name.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param private_endpoint_connection_name:
:type private_endpoint_connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
server_name=server_name,
private_endpoint_connection_name=private_endpoint_connection_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'serverName': self._serialize.url("server_name", server_name, 'str'),
'privateEndpointConnectionName': self._serialize.url("private_endpoint_connection_name", private_endpoint_connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforPostgreSQL/servers/{serverName}/privateEndpointConnections/{privateEndpointConnectionName}'} # type: ignore
async def _update_tags_initial(
self,
resource_group_name: str,
server_name: str,
private_endpoint_connection_name: str,
parameters: "_models.TagsObject",
**kwargs: Any
) -> "_models.PrivateEndpointConnection":
cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateEndpointConnection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_tags_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'serverName': self._serialize.url("server_name", server_name, 'str'),
'privateEndpointConnectionName': self._serialize.url("private_endpoint_connection_name", private_endpoint_connection_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('PrivateEndpointConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_tags_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforPostgreSQL/servers/{serverName}/privateEndpointConnections/{privateEndpointConnectionName}'} # type: ignore
async def begin_update_tags(
self,
resource_group_name: str,
server_name: str,
private_endpoint_connection_name: str,
parameters: "_models.TagsObject",
**kwargs: Any
) -> AsyncLROPoller["_models.PrivateEndpointConnection"]:
"""Updates tags on private endpoint connection.
Updates private endpoint connection with the specified tags.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param private_endpoint_connection_name:
:type private_endpoint_connection_name: str
:param parameters: Parameters supplied to the Update private endpoint connection Tags
operation.
:type parameters: ~azure.mgmt.rdbms.postgresql.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either PrivateEndpointConnection or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.rdbms.postgresql.models.PrivateEndpointConnection]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateEndpointConnection"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._update_tags_initial(
resource_group_name=resource_group_name,
server_name=server_name,
private_endpoint_connection_name=private_endpoint_connection_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('PrivateEndpointConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'serverName': self._serialize.url("server_name", server_name, 'str'),
'privateEndpointConnectionName': self._serialize.url("private_endpoint_connection_name", private_endpoint_connection_name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforPostgreSQL/servers/{serverName}/privateEndpointConnections/{privateEndpointConnectionName}'} # type: ignore
def list_by_server(
self,
resource_group_name: str,
server_name: str,
**kwargs: Any
) -> AsyncIterable["_models.PrivateEndpointConnectionListResult"]:
"""Gets all private endpoint connections on a server.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PrivateEndpointConnectionListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.rdbms.postgresql.models.PrivateEndpointConnectionListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateEndpointConnectionListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-06-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_server.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'serverName': self._serialize.url("server_name", server_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('PrivateEndpointConnectionListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_server.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforPostgreSQL/servers/{serverName}/privateEndpointConnections'} # type: ignore
| |
# Class interface to the CD module.
import cd, CD
class Error(Exception):
pass
class _Stop(Exception):
pass
def _doatime(self, cb_type, data):
if ((data[0] * 60) + data[1]) * 75 + data[2] > self.end:
## print 'done with list entry',`self.listindex`
raise _Stop
func, arg = self.callbacks[cb_type]
if func:
func(arg, cb_type, data)
def _dopnum(self, cb_type, data):
if data > self.end:
## print 'done with list entry',`self.listindex`
raise _Stop
func, arg = self.callbacks[cb_type]
if func:
func(arg, cb_type, data)
class Readcd:
def __init__(self, *arg):
if len(arg) == 0:
self.player = cd.open()
elif len(arg) == 1:
self.player = cd.open(arg[0])
elif len(arg) == 2:
self.player = cd.open(arg[0], arg[1])
else:
raise Error, 'bad __init__ call'
self.list = []
self.callbacks = [(None, None)] * 8
self.parser = cd.createparser()
self.playing = 0
self.end = 0
self.status = None
self.trackinfo = None
def eject(self):
self.player.eject()
self.list = []
self.end = 0
self.listindex = 0
self.status = None
self.trackinfo = None
if self.playing:
## print 'stop playing from eject'
raise _Stop
def pmsf2msf(self, track, min, sec, frame):
if not self.status:
self.cachestatus()
if track < self.status[5] or track > self.status[6]:
raise Error, 'track number out of range'
if not self.trackinfo:
self.cacheinfo()
start, total = self.trackinfo[track]
start = ((start[0] * 60) + start[1]) * 75 + start[2]
total = ((total[0] * 60) + total[1]) * 75 + total[2]
block = ((min * 60) + sec) * 75 + frame
if block > total:
raise Error, 'out of range'
block = start + block
min, block = divmod(block, 75*60)
sec, frame = divmod(block, 75)
return min, sec, frame
def reset(self):
self.list = []
def appendtrack(self, track):
self.appendstretch(track, track)
def appendstretch(self, start, end):
if not self.status:
self.cachestatus()
if not start:
start = 1
if not end:
end = self.status[6]
if type(end) == type(0):
if end < self.status[5] or end > self.status[6]:
raise Error, 'range error'
else:
l = len(end)
if l == 4:
prog, min, sec, frame = end
if prog < self.status[5] or prog > self.status[6]:
raise Error, 'range error'
end = self.pmsf2msf(prog, min, sec, frame)
elif l != 3:
raise Error, 'syntax error'
if type(start) == type(0):
if start < self.status[5] or start > self.status[6]:
raise Error, 'range error'
if len(self.list) > 0:
s, e = self.list[-1]
if type(e) == type(0):
if start == e+1:
start = s
del self.list[-1]
else:
l = len(start)
if l == 4:
prog, min, sec, frame = start
if prog < self.status[5] or prog > self.status[6]:
raise Error, 'range error'
start = self.pmsf2msf(prog, min, sec, frame)
elif l != 3:
raise Error, 'syntax error'
self.list.append((start, end))
def settracks(self, list):
self.list = []
for track in list:
self.appendtrack(track)
def setcallback(self, cb_type, func, arg):
if cb_type < 0 or cb_type >= 8:
raise Error, 'type out of range'
self.callbacks[cb_type] = (func, arg)
if self.playing:
start, end = self.list[self.listindex]
if type(end) == type(0):
if cb_type != CD.PNUM:
self.parser.setcallback(cb_type, func, arg)
else:
if cb_type != CD.ATIME:
self.parser.setcallback(cb_type, func, arg)
def removecallback(self, cb_type):
if cb_type < 0 or cb_type >= 8:
raise Error, 'type out of range'
self.callbacks[cb_type] = (None, None)
if self.playing:
start, end = self.list[self.listindex]
if type(end) == type(0):
if cb_type != CD.PNUM:
self.parser.removecallback(cb_type)
else:
if cb_type != CD.ATIME:
self.parser.removecallback(cb_type)
def gettrackinfo(self, *arg):
if not self.status:
self.cachestatus()
if not self.trackinfo:
self.cacheinfo()
if len(arg) == 0:
return self.trackinfo[self.status[5]:self.status[6]+1]
result = []
for i in arg:
if i < self.status[5] or i > self.status[6]:
raise Error, 'range error'
result.append(self.trackinfo[i])
return result
def cacheinfo(self):
if not self.status:
self.cachestatus()
self.trackinfo = []
for i in range(self.status[5]):
self.trackinfo.append(None)
for i in range(self.status[5], self.status[6]+1):
self.trackinfo.append(self.player.gettrackinfo(i))
def cachestatus(self):
self.status = self.player.getstatus()
if self.status[0] == CD.NODISC:
self.status = None
raise Error, 'no disc in player'
def getstatus(self):
return self.player.getstatus()
def play(self):
if not self.status:
self.cachestatus()
size = self.player.bestreadsize()
self.listindex = 0
self.playing = 0
for i in range(8):
func, arg = self.callbacks[i]
if func:
self.parser.setcallback(i, func, arg)
else:
self.parser.removecallback(i)
if len(self.list) == 0:
for i in range(self.status[5], self.status[6]+1):
self.appendtrack(i)
try:
while 1:
if not self.playing:
if self.listindex >= len(self.list):
return
start, end = self.list[self.listindex]
if type(start) == type(0):
dummy = self.player.seektrack(
start)
else:
min, sec, frame = start
dummy = self.player.seek(
min, sec, frame)
if type(end) == type(0):
self.parser.setcallback(
CD.PNUM, _dopnum, self)
self.end = end
func, arg = \
self.callbacks[CD.ATIME]
if func:
self.parser.setcallback(CD.ATIME, func, arg)
else:
self.parser.removecallback(CD.ATIME)
else:
min, sec, frame = end
self.parser.setcallback(
CD.ATIME, _doatime,
self)
self.end = (min * 60 + sec) * \
75 + frame
func, arg = \
self.callbacks[CD.PNUM]
if func:
self.parser.setcallback(CD.PNUM, func, arg)
else:
self.parser.removecallback(CD.PNUM)
self.playing = 1
data = self.player.readda(size)
if data == '':
self.playing = 0
self.listindex = self.listindex + 1
continue
try:
self.parser.parseframe(data)
except _Stop:
self.playing = 0
self.listindex = self.listindex + 1
finally:
self.playing = 0
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# try_quantopian.py
import datetime
import os
import pg8000
import urlparse
from flask import Flask, g, render_template, request \
,flash , redirect \
,session, url_for
from flask.ext.pymongo import PyMongo
import database
#from time import strftime
app = Flask(__name__)
app.config['AUTHOR'] = "Sean"
app.config['MONGO_URI'] = os.environ['MONGO_URI']
app.config['PASSWORD'] = urlparse.urlparse(app.config['MONGO_URI']).password
app.config['USERNAME'] = urlparse.urlparse(app.config['MONGO_URI']).username
app.secret_key = os.urandom(24)
mongo = PyMongo(app)
def get_db():
"Set the flask 'g' value for _database, and return it."""
db = getattr(g, "_database", None)
if db is None:
db = g._database = database.Database()
return db
@app.teardown_appcontext
def close_connection(exception):
"""Set the flask 'g' value for _database, and return it."""
db = getattr(g, '_database', None)
if db is not None:
db.close_connection()
g._database = None
## ------------------------------------------------------ Web parts ----- ##
@app.route("/")
def show_entries():
"""Show all of the blog entries."""
entries = mongo.db.entries.find(sort=[('$natural', -1)])
return render_template('show_entries.html', entries=entries)
@app.route('/add', methods=['POST'])
def add_entry():
"""Add a blog post (must be logged in)."""
if not session.get('logged_in'):
return redirect(url_for('login'))
post = {"author": app.config['AUTHOR'],
"title": request.form['title'],
"text": request.form['text'],
"date": datetime.datetime.utcnow()}
mongo.db.entries.insert(post)
flash('New entry was successfully posted')
return redirect(url_for('show_entries'))
@app.route("/graph", methods=['GET', 'POST'])
def linechart():
"""Line chart using Rickshaw.
data format is a list of dictionaries:
{name:'name', data:[ {x:time, y:123}, ... {x:time, y:567}],
color: palette.color()},
"""
stock_data = []
##Create a set of the available stocks to check against
## the request, so we only ask for stocks that we have.
query = """
SELECT column_name FROM information_schema.columns
WHERE table_name = 'close';
"""
available_stocks = get_db().select(query, columns=('s'))
available_stocks = reduce(
lambda x, y: x.union(y.values()), available_stocks, set())
available_stocks.remove('dt')
## break it up into groups
groups = ['a', 'b', 'c', 'de', 'fgh', 'ijkl', 'mn', 'opq', 'rs', 'tuv', 'wxyz']
grouped_stocks = dict((g,[]) for g in groups)
for s in available_stocks:
for g, l in grouped_stocks.iteritems():
if s[0] in g:
l.append(s)
for v in grouped_stocks.values():
v.sort()
stocks = ['aapl', 'goog', 'yhoo']
if request.method == 'POST':
stocks = []
for list_suffix in grouped_stocks.keys():
stocks.extend(request.form.getlist('stocks_{}'.format(list_suffix)))
stocks = [s.lower() for s in stocks]
# Omit here any stocks that do not exist in our database
stocks = [s for s in stocks if s in available_stocks]
#Make sure we put _something_ on the chart
if len(stocks) == 0:
stocks = ['aapl', 'goog', 'yhoo']
#OK, do the query
query = """
SELECT EXTRACT (EPOCH FROM dt), {} FROM close
WHERE dt between '2001-01-01' AND '2010-01-31';
"""
for s in stocks:
data = get_db().select(query.format(s), columns=('x','y'))
if data:
stock_data.append(dict(name=s, data=data))
return render_template(
'line_chart.html',
groups = groups,
grouped_stocks=grouped_stocks,
stock_data=stock_data)
@app.route('/login', methods=['GET', 'POST'])
def login():
"""Log in -- username and password taken from the MongoLabs URI."""
print request #debug only
error = None
if request.method == 'POST':
if request.form['username'] != app.config['USERNAME']:
error = "Invalid username -- (Hint: it's your MongoLabs database username)"
elif request.form['password'] != app.config['PASSWORD']:
error = "Invalid password -- (Hint: it's your MongoLabs database password)"
else:
session['logged_in'] = True
flash('You were logged in')
return redirect(url_for('show_entries'))
return render_template('login.html', error=error)
@app.route('/logout')
def logout():
session.pop('logged_in', None)
flash('You were logged out')
return redirect(url_for('show_entries'))
@app.route('/remove')
@app.route('/remove/<post_title>')
def remove_entry(post_title=None):
"""Delete a blog post (must be logged in)"""
if not session.get('logged_in'):
return redirect(url_for('login'))
if post_title is None:
return redirect(url_for('show_entries'))
else:
post_title = post_title.decode("UTF-8")
result = mongo.db.entries.remove({'title': post_title})
print result
if result:
flash('New entry was sucessfuly deleted')
return redirect(url_for('show_entries'))
@app.route('/edit')
@app.route('/edit/<post_title>', methods=['GET', 'POST'])
def edit_entry(post_title=None):
"""Update a blog post (must be logged in)"""
if not session.get('logged_in'):
return redirect(url_for('login'))
if post_title is None:
return redirect(url_for('show_entries'))
elif request.method == 'GET':
post_title = post_title.decode('UTF-8')
entry = mongo.db.entries.find_one({"title" : post_title})
if entry:
return render_template('edit_entries.html', entry=entry)
else:
flash("Could not find {}".format(post_title))
return redirect(url_for('show_entries'))
else:
post_title = post_title.decode('UTF-8')
post = {"author" : app.config['AUTHOR'],
"title" :request.form['title'],
'text': request.form['text'],
'date' : datetime.datetime.utcnow()}
result = mongo.db.entries.find_and_modify({"title" : post_title}, post)
print "IN THE EDIT. RESULT ::::", result
if result:
flash("New entry was successfully update")
return redirect(url_for('show_entries'))
## Decorator to create a custom filter to fix the dates
@app.template_filter("dateformat")
def datetimeformat(value, format='%m-%d-%Y'):
return value.strftime(format)
if __name__ == "__main__":
app.run(debug=True)
| |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ast_util module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import ast
import collections
import textwrap
import gast
from tensorflow.python.autograph.pyct import anno
from tensorflow.python.autograph.pyct import ast_util
from tensorflow.python.autograph.pyct import loader
from tensorflow.python.autograph.pyct import parser
from tensorflow.python.autograph.pyct import qual_names
from tensorflow.python.platform import test
class AstUtilTest(test.TestCase):
def setUp(self):
super(AstUtilTest, self).setUp()
self._invocation_counts = collections.defaultdict(lambda: 0)
def test_rename_symbols_basic(self):
node = parser.parse('a + b')
node = qual_names.resolve(node)
node = ast_util.rename_symbols(
node, {qual_names.QN('a'): qual_names.QN('renamed_a')})
self.assertIsInstance(node.value.left.id, str)
source = parser.unparse(node, include_encoding_marker=False)
self.assertEqual(source.strip(), '(renamed_a + b)')
def test_rename_symbols_attributes(self):
node = parser.parse('b.c = b.c.d')
node = qual_names.resolve(node)
node = ast_util.rename_symbols(
node, {qual_names.from_str('b.c'): qual_names.QN('renamed_b_c')})
source = parser.unparse(node, include_encoding_marker=False)
self.assertEqual(source.strip(), 'renamed_b_c = renamed_b_c.d')
def test_rename_symbols_nonlocal(self):
node = parser.parse('nonlocal a, b, c')
node = qual_names.resolve(node)
node = ast_util.rename_symbols(
node, {qual_names.from_str('b'): qual_names.QN('renamed_b')})
source = parser.unparse(node, include_encoding_marker=False)
self.assertEqual(source.strip(), 'nonlocal a, renamed_b, c')
def test_rename_symbols_global(self):
node = parser.parse('global a, b, c')
node = qual_names.resolve(node)
node = ast_util.rename_symbols(
node, {qual_names.from_str('b'): qual_names.QN('renamed_b')})
source = parser.unparse(node, include_encoding_marker=False)
self.assertEqual(source.strip(), 'global a, renamed_b, c')
def test_rename_symbols_annotations(self):
node = parser.parse('a[i]')
node = qual_names.resolve(node)
anno.setanno(node, 'foo', 'bar')
orig_anno = anno.getanno(node, 'foo')
node = ast_util.rename_symbols(node,
{qual_names.QN('a'): qual_names.QN('b')})
self.assertIs(anno.getanno(node, 'foo'), orig_anno)
def test_rename_symbols_function(self):
node = parser.parse('def f():\n pass')
node = ast_util.rename_symbols(node,
{qual_names.QN('f'): qual_names.QN('f1')})
source = parser.unparse(node, include_encoding_marker=False)
self.assertEqual(source.strip(), 'def f1():\n pass')
def test_copy_clean(self):
node = parser.parse(
textwrap.dedent("""
def f(a):
return a + 1
"""))
setattr(node, '__foo', 'bar')
new_node = ast_util.copy_clean(node)
self.assertIsNot(new_node, node)
self.assertFalse(hasattr(new_node, '__foo'))
def test_copy_clean_preserves_annotations(self):
node = parser.parse(
textwrap.dedent("""
def f(a):
return a + 1
"""))
anno.setanno(node, 'foo', 'bar')
anno.setanno(node, 'baz', 1)
new_node = ast_util.copy_clean(node, preserve_annos={'foo'})
self.assertEqual(anno.getanno(new_node, 'foo'), 'bar')
self.assertFalse(anno.hasanno(new_node, 'baz'))
def test_keywords_to_dict(self):
keywords = parser.parse_expression('f(a=b, c=1, d=\'e\')').keywords
d = ast_util.keywords_to_dict(keywords)
# Make sure we generate a usable dict node by attaching it to a variable and
# compiling everything.
node = parser.parse('def f(b): pass')
node.body.append(ast.Return(d))
result, _, _ = loader.load_ast(node)
self.assertDictEqual(result.f(3), {'a': 3, 'c': 1, 'd': 'e'})
def assertMatch(self, target_str, pattern_str):
node = parser.parse_expression(target_str)
pattern = parser.parse_expression(pattern_str)
self.assertTrue(ast_util.matches(node, pattern))
def assertNoMatch(self, target_str, pattern_str):
node = parser.parse_expression(target_str)
pattern = parser.parse_expression(pattern_str)
self.assertFalse(ast_util.matches(node, pattern))
def test_matches_symbols(self):
self.assertMatch('foo', '_')
self.assertNoMatch('foo()', '_')
self.assertMatch('foo + bar', 'foo + _')
self.assertNoMatch('bar + bar', 'foo + _')
self.assertNoMatch('foo - bar', 'foo + _')
def test_matches_function_args(self):
self.assertMatch('super(Foo, self).__init__(arg1, arg2)',
'super(_).__init__(_)')
self.assertMatch('super().__init__()', 'super(_).__init__(_)')
self.assertNoMatch('super(Foo, self).bar(arg1, arg2)',
'super(_).__init__(_)')
self.assertMatch('super(Foo, self).__init__()', 'super(Foo, _).__init__(_)')
self.assertNoMatch('super(Foo, self).__init__()',
'super(Bar, _).__init__(_)')
def _mock_apply_fn(self, target, source):
target = parser.unparse(target, include_encoding_marker=False)
source = parser.unparse(source, include_encoding_marker=False)
self._invocation_counts[(target.strip(), source.strip())] += 1
def test_apply_to_single_assignments_dynamic_unpack(self):
node = parser.parse('a, b, c = d')
ast_util.apply_to_single_assignments(node.targets, node.value,
self._mock_apply_fn)
self.assertDictEqual(self._invocation_counts, {
('a', 'd[0]'): 1,
('b', 'd[1]'): 1,
('c', 'd[2]'): 1,
})
def test_apply_to_single_assignments_static_unpack(self):
node = parser.parse('a, b, c = d, e, f')
ast_util.apply_to_single_assignments(node.targets, node.value,
self._mock_apply_fn)
self.assertDictEqual(self._invocation_counts, {
('a', 'd'): 1,
('b', 'e'): 1,
('c', 'f'): 1,
})
def test_parallel_walk(self):
src = """
def f(a):
return a + 1
"""
node = parser.parse(textwrap.dedent(src))
for child_a, child_b in ast_util.parallel_walk(node, node):
self.assertEqual(child_a, child_b)
def test_parallel_walk_string_leaves(self):
src = """
def f(a):
global g
"""
node = parser.parse(textwrap.dedent(src))
for child_a, child_b in ast_util.parallel_walk(node, node):
self.assertEqual(child_a, child_b)
def test_parallel_walk_inconsistent_trees(self):
node_1 = parser.parse(
textwrap.dedent("""
def f(a):
return a + 1
"""))
node_2 = parser.parse(
textwrap.dedent("""
def f(a):
return a + (a * 2)
"""))
node_3 = parser.parse(
textwrap.dedent("""
def f(a):
return a + 2
"""))
with self.assertRaises(ValueError):
for _ in ast_util.parallel_walk(node_1, node_2):
pass
# There is not particular reason to reject trees that differ only in the
# value of a constant.
# TODO(mdan): This should probably be allowed.
with self.assertRaises(ValueError):
for _ in ast_util.parallel_walk(node_1, node_3):
pass
def assertLambdaNodes(self, matching_nodes, expected_bodies):
self.assertEqual(len(matching_nodes), len(expected_bodies))
for node in matching_nodes:
self.assertIsInstance(node, gast.Lambda)
self.assertIn(
parser.unparse(node.body, include_encoding_marker=False).strip(),
expected_bodies)
if __name__ == '__main__':
test.main()
| |
import os
import signal
import uuid
from Queue import Queue
from contextlib import closing
import psycopg2
import pytest
from pgshovel.administration import create_set
from pgshovel.interfaces.common_pb2 import (
Column,
Row,
)
from pgshovel.interfaces.configurations_pb2 import ReplicationSetConfiguration
from pgshovel.interfaces.streams_pb2 import (
BeginOperation,
CommitOperation,
MutationOperation,
RollbackOperation,
)
from pgshovel.relay.relay import (
Relay,
Worker,
)
from pgshovel.streams.batches import get_operation
from tests.pgshovel.fixtures import (
cluster,
create_temporary_database,
)
from tests.pgshovel.streams.fixtures import reserialize
def configure_tick_frequency(dsn):
with closing(psycopg2.connect(dsn)) as connection, connection.cursor() as cursor:
cursor.execute("UPDATE pgq.queue SET queue_ticker_max_lag = %s, queue_ticker_idle_period = %s", ('0', '0'))
connection.commit()
def create_set_configuration(dsn):
replication_set = ReplicationSetConfiguration()
replication_set.database.dsn = dsn
replication_set.tables.add(
name='auth_user',
columns=['id', 'username'],
primary_keys=['id'],
)
replication_set.tables.add(
name='accounts_userprofile',
primary_keys=['id'],
)
return replication_set
def force_tick(connection, queue):
with connection.cursor() as cursor:
cursor.execute('SELECT * FROM pgq.ticker(%s)', (queue,))
connection.commit()
class QueueHandler(object):
def __init__(self, queue):
self.queue = queue
def push(self, items):
for item in items:
self.queue.put(reserialize(item))
def get_events(queue, n):
events = []
for _ in xrange(n):
events.append(queue.get(True, 1))
return events
def assert_same_batch(events):
# Protocol buffer messages aren't hashable, so we have to test against the
# serialized immutable form.
assert len(set([get_operation(event).batch_identifier.SerializeToString() for event in events])) == 1
def unwrap_transaction(events):
operations = map(lambda event: get_operation(get_operation(event)), events)
assert isinstance(operations[0], BeginOperation)
assert isinstance(operations[-1], (CommitOperation, RollbackOperation))
assert events[0].batch_operation.batch_identifier == events[-1].batch_operation.batch_identifier
return operations[1:-1]
def test_worker(cluster):
dsn = create_temporary_database()
create_set(cluster, 'example', create_set_configuration(dsn))
configure_tick_frequency(dsn)
queue = Queue()
worker = Worker(cluster, dsn, 'example', 'consumer', QueueHandler(queue))
worker.start()
with closing(psycopg2.connect(dsn)) as connection, connection.cursor() as cursor:
cursor.execute('INSERT INTO auth_user (username) VALUES (%s)', ('example',))
connection.commit()
force_tick(connection, cluster.get_queue_name('example'))
events = get_events(queue, 3)
assert_same_batch(events)
(mutation,) = unwrap_transaction(events)
assert mutation.table == 'auth_user'
assert mutation.schema == 'public'
assert mutation.operation == MutationOperation.INSERT
assert not mutation.HasField('old')
assert sorted(mutation.new.columns, key=lambda c: c.name) == [
Column(name='id', integer64=1),
Column(name='username', string='example'),
]
# also make sure tables without column whitelist defined replicate the entire row state
with closing(psycopg2.connect(dsn)) as connection, connection.cursor() as cursor:
cursor.execute('INSERT INTO accounts_userprofile (user_id, display_name) VALUES (%s, %s)', (1, 'example',))
connection.commit()
force_tick(connection, cluster.get_queue_name('example'))
events = get_events(queue, 3)
assert_same_batch(events)
(mutation,) = unwrap_transaction(events)
assert mutation.table == 'accounts_userprofile'
assert mutation.schema == 'public'
assert mutation.operation == MutationOperation.INSERT
assert not mutation.HasField('old')
assert sorted(mutation.new.columns, key=lambda c: c.name) == [
Column(name='display_name', string='example'),
Column(name='id', integer64=1),
Column(name='user_id', integer64=1),
]
worker.stop_async()
worker.result(1)
def test_relay(cluster):
primary_dsn = create_temporary_database()
secondary_dsn = create_temporary_database()
create_set(cluster, 'example', create_set_configuration(primary_dsn))
configure_tick_frequency(primary_dsn)
queue = Queue()
relay = Relay(cluster, 'example', 'consumer', QueueHandler(queue), throttle=0.1)
relay.start()
with closing(psycopg2.connect(primary_dsn)) as connection, connection.cursor() as cursor:
cursor.execute('INSERT INTO auth_user (username) VALUES (%s)', ('example',))
connection.commit()
force_tick(connection, cluster.get_queue_name('example'))
events = get_events(queue, 3)
assert_same_batch(events)
(mutation,) = unwrap_transaction(events)
assert mutation.table == 'auth_user'
assert mutation.schema == 'public'
assert mutation.operation == MutationOperation.INSERT
assert not mutation.HasField('old')
assert sorted(mutation.new.columns, key=lambda c: c.name) == [
Column(name='id', integer64=1),
Column(name='username', string='example'),
]
# ensure the connection recovers after being killed
with closing(psycopg2.connect(primary_dsn)) as connection, connection.cursor() as cursor:
connection.autocommit = True
cursor.execute('SELECT pg_terminate_backend(pid) FROM pg_stat_activity WHERE pid != pg_backend_pid()')
with closing(psycopg2.connect(primary_dsn)) as connection, connection.cursor() as cursor:
cursor.execute('INSERT INTO auth_user (username) VALUES (%s)', ('example',))
connection.commit()
force_tick(connection, cluster.get_queue_name('example'))
events = get_events(queue, 3)
assert_same_batch(events)
(mutation,) = unwrap_transaction(events)
assert mutation.table == 'auth_user'
assert mutation.schema == 'public'
assert mutation.operation == MutationOperation.INSERT
assert not mutation.HasField('old')
assert sorted(mutation.new.columns, key=lambda c: c.name) == [
Column(name='id', integer64=2),
Column(name='username', string='example'),
]
relay.stop_async()
relay.result(1)
"""
# also test it's ability to handle zookeeper disconnection
relay = Relay(cluster, 'example', 'consumer', QueueHandler(queue), throttle=0.1)
relay.start()
zookeeper_server, _ = zookeeper
zookeeper_server.stop()
relay.result(10)
# XXX: have to restart for services rn, need to fix
zookeeper_server.start()
"""
| |
"""
Classes allowing "generic" relations through ContentType and object-id fields.
"""
from django.core.exceptions import ObjectDoesNotExist
from django.db import connection
from django.db.models import signals
from django.db import models
from django.db.models.fields.related import RelatedField, Field, ManyToManyRel
from django.db.models.loading import get_model
from django.forms import ModelForm
from django.forms.models import BaseModelFormSet, modelformset_factory, save_instance
from django.contrib.admin.options import InlineModelAdmin, flatten_fieldsets
from django.utils.encoding import smart_unicode
class GenericForeignKey(object):
"""
Provides a generic relation to any object through content-type/object-id
fields.
"""
def __init__(self, ct_field="content_type", fk_field="object_id"):
self.ct_field = ct_field
self.fk_field = fk_field
def contribute_to_class(self, cls, name):
self.name = name
self.model = cls
self.cache_attr = "_%s_cache" % name
cls._meta.add_virtual_field(self)
# For some reason I don't totally understand, using weakrefs here doesn't work.
signals.pre_init.connect(self.instance_pre_init, sender=cls, weak=False)
# Connect myself as the descriptor for this field
setattr(cls, name, self)
def instance_pre_init(self, signal, sender, args, kwargs, **_kwargs):
"""
Handles initializing an object with the generic FK instaed of
content-type/object-id fields.
"""
if self.name in kwargs:
value = kwargs.pop(self.name)
kwargs[self.ct_field] = self.get_content_type(obj=value)
kwargs[self.fk_field] = value._get_pk_val()
def get_content_type(self, obj=None, id=None, using=None):
# Convenience function using get_model avoids a circular import when
# using this model
ContentType = get_model("contenttypes", "contenttype")
if obj:
return ContentType.objects.db_manager(obj._state.db).get_for_model(obj)
elif id:
return ContentType.objects.db_manager(using).get_for_id(id)
else:
# This should never happen. I love comments like this, don't you?
raise Exception("Impossible arguments to GFK.get_content_type!")
def __get__(self, instance, instance_type=None):
if instance is None:
return self
try:
return getattr(instance, self.cache_attr)
except AttributeError:
rel_obj = None
# Make sure to use ContentType.objects.get_for_id() to ensure that
# lookups are cached (see ticket #5570). This takes more code than
# the naive ``getattr(instance, self.ct_field)``, but has better
# performance when dealing with GFKs in loops and such.
f = self.model._meta.get_field(self.ct_field)
ct_id = getattr(instance, f.get_attname(), None)
if ct_id:
ct = self.get_content_type(id=ct_id, using=instance._state.db)
try:
rel_obj = ct.get_object_for_this_type(pk=getattr(instance, self.fk_field))
except ObjectDoesNotExist:
pass
setattr(instance, self.cache_attr, rel_obj)
return rel_obj
def __set__(self, instance, value):
if instance is None:
raise AttributeError(u"%s must be accessed via instance" % self.related.opts.object_name)
ct = None
fk = None
if value is not None:
ct = self.get_content_type(obj=value)
fk = value._get_pk_val()
setattr(instance, self.ct_field, ct)
setattr(instance, self.fk_field, fk)
setattr(instance, self.cache_attr, value)
class GenericRelation(RelatedField, Field):
"""Provides an accessor to generic related objects (e.g. comments)"""
def __init__(self, to, **kwargs):
kwargs['verbose_name'] = kwargs.get('verbose_name', None)
kwargs['rel'] = GenericRel(to,
related_name=kwargs.pop('related_name', None),
limit_choices_to=kwargs.pop('limit_choices_to', None),
symmetrical=kwargs.pop('symmetrical', True))
# Override content-type/object-id field names on the related class
self.object_id_field_name = kwargs.pop("object_id_field", "object_id")
self.content_type_field_name = kwargs.pop("content_type_field", "content_type")
kwargs['blank'] = True
kwargs['editable'] = False
kwargs['serialize'] = False
Field.__init__(self, **kwargs)
def get_choices_default(self):
return Field.get_choices(self, include_blank=False)
def value_to_string(self, obj):
qs = getattr(obj, self.name).all()
return smart_unicode([instance._get_pk_val() for instance in qs])
def m2m_db_table(self):
return self.rel.to._meta.db_table
def m2m_column_name(self):
return self.object_id_field_name
def m2m_reverse_name(self):
return self.rel.to._meta.pk.column
def contribute_to_class(self, cls, name):
super(GenericRelation, self).contribute_to_class(cls, name)
# Save a reference to which model this class is on for future use
self.model = cls
# Add the descriptor for the m2m relation
setattr(cls, self.name, ReverseGenericRelatedObjectsDescriptor(self))
def contribute_to_related_class(self, cls, related):
pass
def set_attributes_from_rel(self):
pass
def get_internal_type(self):
return "ManyToManyField"
def db_type(self, connection):
# Since we're simulating a ManyToManyField, in effect, best return the
# same db_type as well.
return None
def extra_filters(self, pieces, pos, negate):
"""
Return an extra filter to the queryset so that the results are filtered
on the appropriate content type.
"""
if negate:
return []
ContentType = get_model("contenttypes", "contenttype")
content_type = ContentType.objects.get_for_model(self.model)
prefix = "__".join(pieces[:pos + 1])
return [("%s__%s" % (prefix, self.content_type_field_name),
content_type)]
class ReverseGenericRelatedObjectsDescriptor(object):
"""
This class provides the functionality that makes the related-object
managers available as attributes on a model class, for fields that have
multiple "remote" values and have a GenericRelation defined in their model
(rather than having another model pointed *at* them). In the example
"article.publications", the publications attribute is a
ReverseGenericRelatedObjectsDescriptor instance.
"""
def __init__(self, field):
self.field = field
def __get__(self, instance, instance_type=None):
if instance is None:
return self
# This import is done here to avoid circular import importing this module
from django.contrib.contenttypes.models import ContentType
# Dynamically create a class that subclasses the related model's
# default manager.
rel_model = self.field.rel.to
superclass = rel_model._default_manager.__class__
RelatedManager = create_generic_related_manager(superclass)
qn = connection.ops.quote_name
manager = RelatedManager(
model = rel_model,
instance = instance,
symmetrical = (self.field.rel.symmetrical and instance.__class__ == rel_model),
join_table = qn(self.field.m2m_db_table()),
source_col_name = qn(self.field.m2m_column_name()),
target_col_name = qn(self.field.m2m_reverse_name()),
content_type = ContentType.objects.db_manager(instance._state.db).get_for_model(instance),
content_type_field_name = self.field.content_type_field_name,
object_id_field_name = self.field.object_id_field_name
)
return manager
def __set__(self, instance, value):
if instance is None:
raise AttributeError("Manager must be accessed via instance")
manager = self.__get__(instance)
manager.clear()
for obj in value:
manager.add(obj)
def create_generic_related_manager(superclass):
"""
Factory function for a manager that subclasses 'superclass' (which is a
Manager) and adds behavior for generic related objects.
"""
class GenericRelatedObjectManager(superclass):
def __init__(self, model=None, core_filters=None, instance=None, symmetrical=None,
join_table=None, source_col_name=None, target_col_name=None, content_type=None,
content_type_field_name=None, object_id_field_name=None):
super(GenericRelatedObjectManager, self).__init__()
self.core_filters = core_filters or {}
self.model = model
self.content_type = content_type
self.symmetrical = symmetrical
self.instance = instance
self.join_table = join_table
self.join_table = model._meta.db_table
self.source_col_name = source_col_name
self.target_col_name = target_col_name
self.content_type_field_name = content_type_field_name
self.object_id_field_name = object_id_field_name
self.pk_val = self.instance._get_pk_val()
def get_query_set(self):
query = {
'%s__pk' % self.content_type_field_name : self.content_type.id,
'%s__exact' % self.object_id_field_name : self.pk_val,
}
return superclass.get_query_set(self).using(self.instance._state.db).filter(**query)
def add(self, *objs):
for obj in objs:
if not isinstance(obj, self.model):
raise TypeError("'%s' instance expected" % self.model._meta.object_name)
setattr(obj, self.content_type_field_name, self.content_type)
setattr(obj, self.object_id_field_name, self.pk_val)
obj.save()
add.alters_data = True
def remove(self, *objs):
for obj in objs:
obj.delete(using=self.instance._state.db)
remove.alters_data = True
def clear(self):
for obj in self.all():
obj.delete(using=self.instance._state.db)
clear.alters_data = True
def create(self, **kwargs):
kwargs[self.content_type_field_name] = self.content_type
kwargs[self.object_id_field_name] = self.pk_val
return super(GenericRelatedObjectManager, self).create(**kwargs)
create.alters_data = True
return GenericRelatedObjectManager
class GenericRel(ManyToManyRel):
def __init__(self, to, related_name=None, limit_choices_to=None, symmetrical=True):
self.to = to
self.related_name = related_name
self.limit_choices_to = limit_choices_to or {}
self.symmetrical = symmetrical
self.multiple = True
self.through = None
class BaseGenericInlineFormSet(BaseModelFormSet):
"""
A formset for generic inline objects to a parent.
"""
def __init__(self, data=None, files=None, instance=None, save_as_new=None,
prefix=None, queryset=None):
# Avoid a circular import.
from django.contrib.contenttypes.models import ContentType
opts = self.model._meta
self.instance = instance
self.rel_name = '-'.join((
opts.app_label, opts.object_name.lower(),
self.ct_field.name, self.ct_fk_field.name,
))
if self.instance is None or self.instance.pk is None:
qs = self.model._default_manager.none()
else:
if queryset is None:
queryset = self.model._default_manager
qs = queryset.filter(**{
self.ct_field.name: ContentType.objects.get_for_model(self.instance),
self.ct_fk_field.name: self.instance.pk,
})
super(BaseGenericInlineFormSet, self).__init__(
queryset=qs, data=data, files=files,
prefix=prefix
)
#@classmethod
def get_default_prefix(cls):
opts = cls.model._meta
return '-'.join((opts.app_label, opts.object_name.lower(),
cls.ct_field.name, cls.ct_fk_field.name,
))
get_default_prefix = classmethod(get_default_prefix)
def save_new(self, form, commit=True):
# Avoid a circular import.
from django.contrib.contenttypes.models import ContentType
kwargs = {
self.ct_field.get_attname(): ContentType.objects.get_for_model(self.instance).pk,
self.ct_fk_field.get_attname(): self.instance.pk,
}
new_obj = self.model(**kwargs)
return save_instance(form, new_obj, commit=commit)
def generic_inlineformset_factory(model, form=ModelForm,
formset=BaseGenericInlineFormSet,
ct_field="content_type", fk_field="object_id",
fields=None, exclude=None,
extra=3, can_order=False, can_delete=True,
max_num=None,
formfield_callback=lambda f: f.formfield()):
"""
Returns an ``GenericInlineFormSet`` for the given kwargs.
You must provide ``ct_field`` and ``object_id`` if they different from the
defaults ``content_type`` and ``object_id`` respectively.
"""
opts = model._meta
# Avoid a circular import.
from django.contrib.contenttypes.models import ContentType
# if there is no field called `ct_field` let the exception propagate
ct_field = opts.get_field(ct_field)
if not isinstance(ct_field, models.ForeignKey) or ct_field.rel.to != ContentType:
raise Exception("fk_name '%s' is not a ForeignKey to ContentType" % ct_field)
fk_field = opts.get_field(fk_field) # let the exception propagate
if exclude is not None:
exclude = list(exclude)
exclude.extend([ct_field.name, fk_field.name])
else:
exclude = [ct_field.name, fk_field.name]
FormSet = modelformset_factory(model, form=form,
formfield_callback=formfield_callback,
formset=formset,
extra=extra, can_delete=can_delete, can_order=can_order,
fields=fields, exclude=exclude, max_num=max_num)
FormSet.ct_field = ct_field
FormSet.ct_fk_field = fk_field
return FormSet
class GenericInlineModelAdmin(InlineModelAdmin):
ct_field = "content_type"
ct_fk_field = "object_id"
formset = BaseGenericInlineFormSet
def get_formset(self, request, obj=None):
if self.declared_fieldsets:
fields = flatten_fieldsets(self.declared_fieldsets)
else:
fields = None
if self.exclude is None:
exclude = []
else:
exclude = list(self.exclude)
exclude.extend(self.get_readonly_fields(request, obj))
exclude = exclude or None
defaults = {
"ct_field": self.ct_field,
"fk_field": self.ct_fk_field,
"form": self.form,
"formfield_callback": self.formfield_for_dbfield,
"formset": self.formset,
"extra": self.extra,
"can_delete": self.can_delete,
"can_order": False,
"fields": fields,
"max_num": self.max_num,
"exclude": exclude
}
return generic_inlineformset_factory(self.model, **defaults)
class GenericStackedInline(GenericInlineModelAdmin):
template = 'admin/edit_inline/stacked.html'
class GenericTabularInline(GenericInlineModelAdmin):
template = 'admin/edit_inline/tabular.html'
| |
"""Sends commands to multiple devices"""
import wx
import os
from pydispatch import dispatcher
import json
from ObjectListView import ObjectListView, ColumnDefn
from scripts import mdc_gui
class SendCommandConfig(mdc_gui.MultiSend):
def __init__(self, parent, device_list, dxlink_model):
mdc_gui.MultiSend.__init__(self, parent)
self.parent = parent
self.prefs = self.parent.preferences
self.SetTitle(f"Multiple Send Command {dxlink_model}") # to %s" %obj.ip)
try:
# create json file with:
# json.dump(jsonData, outfile, sort_keys = True, indent = 4,
# ensure_ascii=False)
with open(os.path.join("send_commands", "rx_tx_commands.txt"), 'rb') as command_file:
self.rx_tx_commands = json.load(command_file)
except IOError:
dlg = wx.MessageDialog(parent=self, message='Cannot find ' +
'rx_tx_commands.txt \nYou will now only be' +
' able to send commands manually. \nTo ' +
'have the system commands auto load, ' +
're-install the \nprogram or replace: ' +
os.getcwd() + '\\send_commands\\' +
'rx_tx_commands.txt',
caption='Please re-install program.',
style=wx.OK)
dlg.ShowModal()
dlg.Destroy()
self.rx_tx_commands = {'dxrx': {},
'dxtx': {},
'dxftx': {},
'dxfrx': {}}
self.device_list = ObjectListView(self.olv_panel, wx.ID_ANY,
size=wx.Size(-1, 200),
style=wx.LC_REPORT |
wx.SUNKEN_BORDER |
wx.RESIZE_BORDER)
self.device_list.SetColumns(
[ColumnDefn("Model", "center", 130, "model"),
ColumnDefn("IP", "center", 100, "ip_address"),
ColumnDefn("MAC", "center", 100, "mac_address"),
ColumnDefn("Device", "center", 80, "device"),
ColumnDefn("Status", "left", 120, "status")])
self.olv_sizer.Add(self.device_list, 1, wx.ALL | wx.EXPAND, 0)
self.olv_sizer.Layout()
self.obj = []
self.port = None
self.result_string = ''
self.send_btn.Disable()
self.dxlink_model = dxlink_model
self.on_query(None)
self.completionlist = []
self.errorlist = []
self.waiting_result = True
self.waiting_delay = True
self.action_cmb.Enable(False)
self.device_list.CreateCheckStateColumn()
self.device_list.SetObjects(device_list)
for obj in self.device_list.GetObjects():
self.device_list.ToggleCheck(obj)
self.device_list.RefreshObjects(self.device_list.GetObjects())
dispatcher.connect(self.on_result,
signal="send_command result",
sender=dispatcher.Any)
dispatcher.connect(self.update_window,
signal="Update Window",
sender=dispatcher.Any)
self.time_out = wx.Timer(self)
self.Bind(wx.EVT_TIMER, self.on_time_out, self.time_out)
def on_command_combo(self, _):
"""Updates the command combo box"""
if not self.query_chk.GetValue():
self.action_cmb.Enable(True)
self.action_cmb.SetValue('Actions')
self.update_action_combo(self.commands_cmb.GetValue())
self.send_btn.Enable()
def on_action_combo(self, _):
"""Updates the action combo box"""
self.update_string()
def on_query(self, _):
"""Switches to query or commands"""
old = self.commands_cmb.GetValue()
self.commands_cmb.Clear()
self.description_txt.Clear()
self.syntax_txt.Clear()
if self.query_chk.GetValue():
self.commands_cmb.SetValue('Query')
else:
self.commands_cmb.SetValue('Commands')
for item in sorted(self.rx_tx_commands[self.dxlink_model]):
if self.query_chk.GetValue():
if item[:1] == '?': # only add query
self.commands_cmb.Append(item)
else:
if item[:1] != '?': # only add commands
self.commands_cmb.Append(item)
if self.query_chk.GetValue():
self.action_cmb.Enable(False)
for item in self.commands_cmb.GetItems():
if item[1:] == old:
self.commands_cmb.SetValue(item)
self.on_command_combo(None)
break
else:
self.string_port_txt.Clear()
self.string_command_txt.Clear()
else:
self.action_cmb.Enable(False)
for item in self.commands_cmb.GetItems():
if item == old[1:]:
self.commands_cmb.SetValue(item)
self.on_command_combo(None)
break
else:
self.string_port_txt.Clear()
self.string_command_txt.Clear()
def update_action_combo(self, selection):
"""Updates action combo box"""
self.action_cmb.Clear()
for item in self.rx_tx_commands[self.dxlink_model][selection][1]:
self.action_cmb.Append(item)
self.port = self.rx_tx_commands[self.dxlink_model][selection][0]
self.description_txt.SetValue(
self.rx_tx_commands[self.dxlink_model][selection][2])
self.syntax_txt.SetValue(
self.rx_tx_commands[self.dxlink_model][selection][3])
self.action_cmb.SetValue("Actions")
self.update_string()
def update_string(self):
"""Updates the command string"""
if self.action_cmb.GetValue() == "Actions":
action = ""
elif self.action_cmb.GetValue() == "":
action = ''
else:
action = "-" + self.action_cmb.GetValue()
output = self.commands_cmb.GetValue() + action
self.string_port_txt.SetValue(str(self.port))
self.string_command_txt.SetValue(output)
def on_get_all(self, _):
"""Send all querys"""
if self.get_all_chk.GetValue():
self.send_btn.Enable(True)
self.action_cmb.Enable(False)
self.commands_cmb.Enable(False)
self.command_chk.Enable(False)
self.query_chk.SetValue(True)
self.on_query(None)
else:
self.action_cmb.Enable(False)
self.commands_cmb.Enable(True)
self.command_chk.Enable(True)
self.send_btn.Enable(False)
def on_send(self, _):
"""Send the command string"""
if self.check_for_none_selected():
return
self.result_string = ''
self.errorlist = []
self.completionlist = []
if self.get_all_chk.GetValue():
self.on_send_all()
return
for obj in self.device_list.GetCheckedObjects():
if obj.device == " ":
device = 0
else:
device = obj.device
if obj.system == " ":
system = 0
else:
system = obj.system
port = self.string_port_txt.GetValue()
command_txt = self.string_command_txt.GetValue()
output = f"send_command {device}:{port}:{system},\"\'{command_txt}\'\""
# print('Output: ', output)
self.parent.telnet_job_queue.put(
['send_command', obj,
self.prefs.telnet_timeout,
output])
self.parent.set_status((obj, "Queued"))
self.device_list.RefreshObject(obj)
# self.display_progress()
def on_send_all(self):
"""Send all is checked"""
for obj in self.device_list.GetCheckedObjects():
command_list = []
for item in self.commands_cmb.GetItems():
command_list.append(
(str(item),
str(self.rx_tx_commands[self.dxlink_model][item][0])))
self.parent.telnet_job_queue.put(
['multiple_send_command', obj,
self.prefs.telnet_timeout,
command_list])
def update_window(self, sender):
"""Updates objs as they progress"""
self.device_list.RefreshObject(sender)
def check_for_none_selected(self):
"""Checks if nothing is selected"""
if len(self.device_list.GetCheckedObjects()) == 0:
dlg = wx.MessageDialog(parent=self, message='Nothing selected...' +
'\nPlease use the check box on the device' +
' you want to select',
caption='Nothing Selected',
style=wx.OK)
dlg.ShowModal()
dlg.Destroy()
return True
def on_time_out(self, _):
"""Timer has expired"""
self.waiting_result = False
self.result_string = "*** Timed out waiting for response ***"
def on_result(self, sender):
"""Sets the result label"""
# self.waiting_result = False
if sender[0]:
self.result_string = sender[1]
else:
print("error ", sender[1])
def on_exit(self, _):
"""When user exits"""
self.Destroy()
def on_abort(self, _):
"""When user clicks abort"""
self.parent.abort = True
self.Destroy()
| |
#!/usr/bin/python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
# The directories end up in the debug info, so the easy way of getting
# a reproducible build is to run it in a know absolute directory.
# We use a directory in /builds/slave because the mozilla infrastructure
# cleans it up automatically.
base_dir = "/builds/slave/moz-toolchain"
source_dir = base_dir + "/src"
build_dir = base_dir + "/build"
aux_inst_dir = build_dir + '/aux_inst'
old_make = aux_inst_dir + '/bin/make'
##############################################
import urllib
import os
import os.path
import shutil
import tarfile
import subprocess
def download_uri(uri):
fname = uri.split('/')[-1]
if (os.path.exists(fname)):
return fname
urllib.urlretrieve(uri, fname)
return fname
def extract(tar, path):
t = tarfile.open(tar)
t.extractall(path)
def check_run(args):
r = subprocess.call(args)
assert r == 0
def run_in(path, args):
d = os.getcwd()
os.chdir(path)
check_run(args)
os.chdir(d)
def patch(patch, plevel, srcdir):
patch = os.path.realpath(patch)
check_run(['patch', '-d', srcdir, '-p%s' % plevel, '-i', patch, '--fuzz=0',
'-s'])
def build_package(package_source_dir, package_build_dir, configure_args,
make = old_make):
if not os.path.exists(package_build_dir):
os.mkdir(package_build_dir)
run_in(package_build_dir,
["%s/configure" % package_source_dir] + configure_args)
run_in(package_build_dir, [make, "-j8"])
run_in(package_build_dir, [make, "install"])
def build_aux_tools(base_dir):
make_build_dir = base_dir + '/make_build'
build_package(make_source_dir, make_build_dir,
["--prefix=%s" % aux_inst_dir], "make")
run_in(unifdef_source_dir, ["make"])
run_in(unifdef_source_dir, ["make", "prefix=%s" % aux_inst_dir, "install"])
tar_build_dir = base_dir + '/tar_build'
build_package(tar_source_dir, tar_build_dir,
["--prefix=%s" % aux_inst_dir])
gawk_build_dir = base_dir + '/gawk_build'
build_package(gawk_source_dir, gawk_build_dir,
["--prefix=%s" % aux_inst_dir])
def with_env(env, f):
old_env = os.environ.copy()
os.environ.update(env)
f()
os.environ.clear()
os.environ.update(old_env)
def build_glibc(env, stage_dir, inst_dir):
def f():
build_glibc_aux(stage_dir, inst_dir)
with_env(env, f)
def build_glibc_aux(stage_dir, inst_dir):
glibc_build_dir = stage_dir + '/glibc'
build_package(glibc_source_dir, glibc_build_dir,
["--disable-profile",
"--enable-add-ons=nptl",
"--without-selinux",
"--enable-kernel=%s" % linux_version,
"--libdir=%s/lib64" % inst_dir,
"--prefix=%s" % inst_dir])
def build_linux_headers_aux(inst_dir):
run_in(linux_source_dir, [old_make, "headers_check"])
run_in(linux_source_dir, [old_make, "INSTALL_HDR_PATH=dest",
"headers_install"])
shutil.move(linux_source_dir + "/dest/include", inst_dir + '/include')
def build_linux_headers(inst_dir):
def f():
build_linux_headers_aux(inst_dir)
with_env({"PATH" : aux_inst_dir + "/bin:%s" % os.environ["PATH"]}, f)
def build_gcc(stage_dir, is_stage_one):
gcc_build_dir = stage_dir + '/gcc'
tool_inst_dir = stage_dir + '/inst'
lib_inst_dir = stage_dir + '/libinst'
gcc_configure_args = ["--prefix=%s" % tool_inst_dir,
"--enable-__cxa_atexit",
"--with-gmp=%s" % lib_inst_dir,
"--with-mpfr=%s" % lib_inst_dir,
"--with-mpc=%s" % lib_inst_dir,
"--enable-languages=c,c++",
"--disable-lto",
"--disable-multilib",
"--disable-bootstrap"]
if is_stage_one:
# We build the stage1 gcc without shared libraries. Otherwise its
# libgcc.so would depend on the system libc.so, which causes problems
# when it tries to use that libgcc.so and the libc we are about to
# build.
gcc_configure_args.append("--disable-shared")
build_package(gcc_source_dir, gcc_build_dir, gcc_configure_args)
if is_stage_one:
# The glibc build system uses -lgcc_eh, but at least in this setup
# libgcc.a has all it needs.
d = tool_inst_dir + "/lib/gcc/x86_64-unknown-linux-gnu/4.5.2/"
os.symlink(d + "libgcc.a", d + "libgcc_eh.a")
def build_one_stage(env, stage_dir, is_stage_one):
def f():
build_one_stage_aux(stage_dir, is_stage_one)
with_env(env, f)
def build_one_stage_aux(stage_dir, is_stage_one):
os.mkdir(stage_dir)
lib_inst_dir = stage_dir + '/libinst'
gmp_build_dir = stage_dir + '/gmp'
build_package(gmp_source_dir, gmp_build_dir,
["--prefix=%s" % lib_inst_dir, "--disable-shared"])
mpfr_build_dir = stage_dir + '/mpfr'
build_package(mpfr_source_dir, mpfr_build_dir,
["--prefix=%s" % lib_inst_dir, "--disable-shared",
"--with-gmp=%s" % lib_inst_dir])
mpc_build_dir = stage_dir + '/mpc'
build_package(mpc_source_dir, mpc_build_dir,
["--prefix=%s" % lib_inst_dir, "--disable-shared",
"--with-gmp=%s" % lib_inst_dir,
"--with-mpfr=%s" % lib_inst_dir])
tool_inst_dir = stage_dir + '/inst'
os.mkdir(tool_inst_dir)
os.mkdir(tool_inst_dir + '/lib64')
os.symlink('lib64', tool_inst_dir + '/lib')
build_linux_headers(tool_inst_dir)
# zlib's configure only works if run from the source dir, copy the source
zlib_build_dir = stage_dir + '/zlib'
shutil.copytree(zlib_source_dir, zlib_build_dir)
build_package(zlib_build_dir, zlib_build_dir,
["--prefix=%s" % tool_inst_dir])
binutils_build_dir = stage_dir + '/binutils'
build_package(binutils_source_dir, binutils_build_dir,
["--prefix=%s" % tool_inst_dir,
"--without-zlib"])
# During stage one we have to build gcc first, this glibc doesn't even
# build with gcc 4.6. During stage two, we have to build glibc first.
# The problem is that libstdc++ is built with xgcc and if glibc has
# not been built yet xgcc will use the system one.
if is_stage_one:
build_gcc(stage_dir, is_stage_one)
build_glibc({"CC" : tool_inst_dir + "/bin/gcc",
"CXX" : tool_inst_dir + "/bin/g++"},
stage_dir, tool_inst_dir)
else:
build_glibc({}, stage_dir, tool_inst_dir)
build_gcc(stage_dir, is_stage_one)
def build_tar_package(tar, name, base, directory):
name = os.path.realpath(name)
run_in(base, [tar, "-cf", name, "--mtime=2012-01-01", "--owner=root",
directory])
##############################################
def build_source_dir(prefix, version):
return source_dir + '/' + prefix + version
binutils_version = "2.21.1"
glibc_version = "2.5.1"
linux_version = "2.6.18"
tar_version = "1.26"
gawk_version = "3.1.5"
make_version = "3.81"
gcc_version = "4.5.2"
mpfr_version = "2.4.2"
zlib_version = "1.2.3"
gmp_version = "5.0.1"
mpc_version = "0.8.1"
unifdef_version = "2.6"
binutils_source_uri = "http://ftp.gnu.org/gnu/binutils/binutils-%sa.tar.bz2" % \
binutils_version
glibc_source_uri = "http://ftp.gnu.org/gnu/glibc/glibc-%s.tar.bz2" % \
glibc_version
linux_source_uri = "http://www.kernel.org/pub/linux/kernel/v2.6/linux-%s.tar.bz2" % \
linux_version
tar_source_uri = "http://ftp.gnu.org/gnu/tar/tar-%s.tar.bz2" % \
tar_version
gawk_source_uri = "http://ftp.gnu.org/gnu/gawk/gawk-%s.tar.bz2" % \
gawk_version
make_source_uri = "http://ftp.gnu.org/gnu/make/make-%s.tar.bz2" % \
make_version
unifdef_source_uri = "http://dotat.at/prog/unifdef/unifdef-%s.tar.gz" % \
unifdef_version
gcc_source_uri = "http://ftp.gnu.org/gnu/gcc/gcc-%s/gcc-%s.tar.bz2" % \
(gcc_version, gcc_version)
mpfr_source_uri = "http://www.mpfr.org/mpfr-%s/mpfr-%s.tar.bz2" % \
(mpfr_version, mpfr_version)
zlib_source_uri = "http://iweb.dl.sourceforge.net/project/libpng/zlib/%s/zlib-%s.tar.bz2" % (zlib_version, zlib_version)
gmp_source_uri = "http://ftp.gnu.org/gnu/gmp/gmp-%s.tar.bz2" % gmp_version
mpc_source_uri = "http://www.multiprecision.org/mpc/download/mpc-%s.tar.gz" % \
mpc_version
binutils_source_tar = download_uri(binutils_source_uri)
glibc_source_tar = download_uri(glibc_source_uri)
linux_source_tar = download_uri(linux_source_uri)
tar_source_tar = download_uri(tar_source_uri)
gawk_source_tar = download_uri(gawk_source_uri)
make_source_tar = download_uri(make_source_uri)
unifdef_source_tar = download_uri(unifdef_source_uri)
mpc_source_tar = download_uri(mpc_source_uri)
mpfr_source_tar = download_uri(mpfr_source_uri)
zlib_source_tar = download_uri(zlib_source_uri)
gmp_source_tar = download_uri(gmp_source_uri)
gcc_source_tar = download_uri(gcc_source_uri)
binutils_source_dir = build_source_dir('binutils-', binutils_version)
glibc_source_dir = build_source_dir('glibc-', glibc_version)
linux_source_dir = build_source_dir('linux-', linux_version)
tar_source_dir = build_source_dir('tar-', tar_version)
gawk_source_dir = build_source_dir('gawk-', gawk_version)
make_source_dir = build_source_dir('make-', make_version)
unifdef_source_dir = build_source_dir('unifdef-', unifdef_version)
mpc_source_dir = build_source_dir('mpc-', mpc_version)
mpfr_source_dir = build_source_dir('mpfr-', mpfr_version)
zlib_source_dir = build_source_dir('zlib-', zlib_version)
gmp_source_dir = build_source_dir('gmp-', gmp_version)
gcc_source_dir = build_source_dir('gcc-', gcc_version)
if not os.path.exists(source_dir):
os.makedirs(source_dir)
extract(binutils_source_tar, source_dir)
patch('binutils-deterministic.patch', 1, binutils_source_dir)
extract(glibc_source_tar, source_dir)
extract(linux_source_tar, source_dir)
patch('glibc-deterministic.patch', 1, glibc_source_dir)
run_in(glibc_source_dir, ["autoconf"])
extract(tar_source_tar, source_dir)
extract(gawk_source_tar, source_dir)
extract(make_source_tar, source_dir)
extract(unifdef_source_tar, source_dir)
extract(mpc_source_tar, source_dir)
extract(mpfr_source_tar, source_dir)
extract(zlib_source_tar, source_dir)
extract(gmp_source_tar, source_dir)
extract(gcc_source_tar, source_dir)
patch('plugin_finish_decl.diff', 0, gcc_source_dir)
patch('libtool-74c8993c178a1386ea5e2363a01d919738402f30.patch', 1, gcc_source_dir)
patch('pr49911.diff', 1, gcc_source_dir)
patch('r159628-r163231-r171807.patch', 1, gcc_source_dir)
patch('gcc-fixinc.patch', 1, gcc_source_dir)
patch('gcc-include.patch', 1, gcc_source_dir)
if os.path.exists(build_dir):
shutil.rmtree(build_dir)
os.makedirs(build_dir)
build_aux_tools(build_dir)
basic_path = aux_inst_dir + "/bin:/bin:/usr/bin"
stage1_dir = build_dir + '/stage1'
build_one_stage({"PATH" : basic_path,
"CC" : "gcc",
"CXX" : "g++" },
stage1_dir, True)
for stage_num in range(2, 4):
prev_stage_dir = build_dir + '/stage' + str(stage_num - 1)
prev_stage_inst_dir = prev_stage_dir + '/inst'
cur_stage_dir = build_dir + '/stage' + str(stage_num)
build_one_stage({"PATH" : prev_stage_inst_dir + "/bin:" + basic_path,
"CC" : "gcc -fgnu89-inline",
"CXX" : "g++",
"RANLIB" : "true" },
cur_stage_dir, False)
stage3_dir = build_dir + '/stage3'
build_tar_package(aux_inst_dir + "/bin/tar",
"toolchain.tar", stage3_dir, "inst")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.